diff --git "a/493.jsonl" "b/493.jsonl" new file mode 100644--- /dev/null +++ "b/493.jsonl" @@ -0,0 +1,1496 @@ +{"seq_id":"27911645771","text":"#!/usr/local/bioinfo/python/2.7.9/bin/python\n# -*- coding: utf-8 -*-\n# @package summary_stat_egglib.py\n# @author Florian CHARRIAT\n\n\"\"\"\n\tThe summary_stat_egglib script\n\t===========================\n\t:author: Florian CHARRIAT & Pierre Gladieux\n\t:contact: florian.charriat@inra.fr\n\t:date: 08/07/2018\n\t:version: 0.1\n\tScript description\n\t------------------\n\tThis Programme is used to correct vcf file with multi sample create with show-snp\n\tExample\n\t-------\n\t>>> summary_stat_egglib.py -f multiSample.vcf -o output.vcf -d show-snp.result\n\n\n\tHelp Programm\n\t-------------\n\toptional arguments:\n\t\t- \\-h, --help\n\t\t\t\t\t\tshow this help message and exit\n\t\t- \\-v, --version\n\t\t\t\t\t\tdisplay summary_stat_egglib.py version number and exit\n\n\tInput mandatory infos for running:\n\t\t- \\-f , --file \n\t\t\t\t\t\tpath of the vcf file with multi sample\n\t\t- \\-d , --directory \n\t\t\t\t\t\tpath of the directory which contains all show-snp file without -I and -C option.\n\t\t- \\-o , --output \n\t\t\t\t\t\tpath of the output file ( format vcf)\n\n\"\"\"\n\n\n##################################################\n## Modules\n##################################################\n#Import \nimport sys, os\n# Import module_Flo\nfrom module_Flo import verifDir, createDir, form, verifFichier, fasta2dict, openfile, sort_human,is_number\n\n## Python modules\nimport argparse, egglib\nfrom time import gmtime, strftime\n# Import BioPython\n\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Seq import Seq\n\n##################################################\n## Functions\n\n\n##################################################\n## Main code\n##################################################\nif __name__ == \"__main__\":\n\n\t# Initializations\n\tversion = '0.1'\n\t# Parameters recovery\n\tparser = argparse.ArgumentParser(prog=__file__, description='''This Programme is used to calculate diversity statistics between groups with the Egglib package''')\n\tparser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\\\n\t\t\t\t\t\t'display '+__file__+' version number and exit')\n\n\tfilesreq = parser.add_argument_group('Input mandatory infos for running')\n\tfilesreq.add_argument('-d', '--directory',type = str, required=True, dest = 'directory', help = 'path of the directory which contains all the fasta file to proceed')\n\tfilesreq.add_argument('-g', '--groups',type = str, required=True, dest = 'groups', help = 'path of the file which contain per line : Name of Isolat\\tGroups')\n\tfilesreq.add_argument('--focal_groups', type=str,nargs='+', default = 'None', dest='target',\n\t\t\t\t\t\t help='One or more groups to analyse')\n\tfilesreq.add_argument('--min_length', type=str, required=True, dest='minLength',\n\t\t\t\t\t\t help='the minimum length of sequence to analyse')\n\tfilesreq.add_argument('--min_size', type=str, required=True, dest='minSize',\n\t\t\t\t\t\t help='the minimum size of exploitable samples')\n\tfilesreq.add_argument('--translatorX', action = 'store_true', dest='translatorX',\n\t\t\t\t\t\t help=\"If in your fasta file, your sequence havn't the same length please use this option for use transltorX option for align all sequence\")\n\tfilesreq.add_argument('--MISS', type=str, required=True, dest='miss',\n\t\t\t\t\t\t help='maximum proportion of missing data (if there are more missing data at a site, the site is ignored)')\n\tfilesreq.add_argument('-o', '--output',type = str, required=True, dest = 'output', help = 'path of the output file ( format vcf)')\n\n\n######### Recuperation arguments ###########\n\targs = parser.parse_args()\n\tdirectory = os.path.abspath(args.directory)\n\tgroups = os.path.abspath(args.groups)\n\ttarget = args.target\n\ttranslatorX = args.translatorX\n\tMISS = args.miss\n\tminLength = args.minLength\n\tminSize = args.minSize\n\toutput = os.path.abspath(args.output)\n########### Gestion directory ##############\n\tdirectory = verifDir(directory,True)\n\toutput = verifDir(output)\n\tcreateDir([output,output+'/translatorX',output+'/fastaByGroup'])\n############### start message ########################\n\n\tprint(form(\"\\n\\t---------------------------------------------------------\", 'yellow', 'bold'))\n\tprint(\n\t\t\t\"\\t\" + form(\"|\", 'yellow', 'bold') + form(\" Welcome in summary_stat_egglib (Version \" + version + \") \",\n\t\t\t\t\t\t\t\t\t\t\t\t\t type='bold') + form(\"|\", 'yellow', 'bold'))\n\tprint(form(\"\\t---------------------------------------------------------\", 'yellow', 'bold') + '\\n')\n\n########### Main #####################################\n\tentete_file = 'Name\\tNb samples\\tNb analysed sites\\tAverage nb ofsamples used\\tNb segregating sites\\tNb haplotypes\\tExpected heterozygosity\\tWatterson’s estimator of theta\\tNucleotide diversity\\tTajima’s D\\tNb Non-synonimous\\tNb synonimous\\tNSseg\\tSseg\\tNucleotide diversity of non-synonimous site (Pi)\\tNucleotide diversity of synonimous site (Pi)\\n'\n\tsummary_stat = {}\n\tdico_group = {}\n\tOUTs = {}\n\tall_stats = ['n', 'lseff', 'nseff', 'S', 'K', 'He', 'thetaW', 'Pi', 'D', 'NSsites', 'Ssites', 'NSseg', 'Sseg',\n\t\t\t\t 'PiNS', 'PiS']\n\n\tprint('Lecture du fichier : %s'% groups.split('/')[-1])\n\twith open(groups,'r') as group_file :\n\t\tfor line in group_file :\n\t\t\tisolat,group = line.strip().split()\n\t\t\tif group in target or target == 'None':\n\t\t\t\tif group not in dico_group.keys() :\n\t\t\t\t\t# Initialisation du dico summary pour donner à la fin les moyennes par groupes\n\t\t\t\t\tsummary_stat[group] = {}\n\t\t\t\t\tfor stat in all_stats:\n\t\t\t\t\t\tsummary_stat[group][stat] = []\n\t\t\t\t\t# Création dico contenant tous les isolats (value) par groupe (keys)\n\t\t\t\t\tdico_group[group] = [isolat]\n\t\t\t\t\t# Création d'un dico de output\n\t\t\t\t\tOUTs[group] = open(output + group + '.csv', 'w')\n\t\t\t\t\tOUTs[group].write(entete_file)\n\t\t\t\telse :\n\t\t\t\t\tdico_group[group].append(isolat)\n\n\ttotal = len([file for file in os.listdir(directory) if file.endswith('.fasta')])\n\tcount = 0\n\n\n\n\n\tfor file in os.listdir(directory):\n\t\tcount +=1\n\t\tprint('Traitement du fichier : %s (%s/%s)'%(file,count,total))\n\n\t\t#Ouverture du fichier fasta pour le mettre dans un dixo et le split par groupe\n\t\ttry :\n\t\t\tdico_fasta_all = fasta2dict(directory + file)\n\t\texcept :\n\t\t\tprint('\\t-The %s file have two sequence with the same \\n'%file)\n\t\t\tcontinue\n\t\tfile_exist = False\n\t\ti = 0\n\t\thaplotype = {}\n\t\tfor gp in sorted(dico_group.keys()) :\n\t\t\tfasta_group = output+'fastaByGroup/' + '%s_%s.fasta'%(file.replace(\".fasta\",\"\"),gp)\n\t\t\twith open(fasta_group,'w') as fasta_file :\n\t\t\t\tfor elt in dico_fasta_all.keys() :\n\t\t\t\t\tname = elt.replace('Mo_','').split('_')[0]\n\t\t\t\t\tif name in dico_group[gp] :\n\t\t\t\t\t\trecord = SeqRecord(dico_fasta_all[elt].seq, id=str(elt), name=str(elt), description='')\n\t\t\t\t\t\tSeqIO.write(record, fasta_file, \"fasta\")\n\t\t\t\t\t\tfile_exist = True\n\t\t\tif file_exist == False :\n\t\t\t\tos.system('rm %s'%fasta_group)\n\t\t\t\tcontinue\n\t\t\tfile_exist = False\n\n\n\t\t\tif translatorX :\n\t\t\t\tfile_aln = output + 'translatorX/' + '%s_%s.fasta'%(file.replace(\".fasta\",\"\"),gp)\n\t\t\t\tos.system('translatorx_vLocal.pl -i %s -o %s > stdout.txt 2> stdout.txt' % (fasta_group,file_aln ))\n\t\t\t\tfile_aln = file_aln + '.nt_ali.fasta'\n\n\t\t\telse :\n\t\t\t\tfile_aln = directory + file\n\n\t\t\tdico_fasta = fasta2dict(file_aln)\n\t\t\ti += 1\n\t\t\thaplotype[gp] = [[elt,str(dico_fasta[elt].seq),i] for elt in dico_fasta.keys() if elt.replace('Mo_','').split('_')[0] in dico_group[gp]]\n\t\t\tliste = [len(dico_fasta[elt].seq) for elt in dico_fasta.keys() if elt.replace('Mo_','').split('_')[0] in dico_group[gp]]\n\t\t\taln = egglib.Align.create(haplotype[gp])\n\t\t\tcs = egglib.stats.ComputeStats()\n\t\t\tcs.add_stats('lseff', 'nseff', 'S', 'K', 'He', 'thetaW', 'Pi', 'D')\n\t\t\tstat_order = ['lseff', 'nseff', 'S', 'K', 'He', 'thetaW', 'Pi', 'D']\n\t\t\tstats = cs.process_align(aln, max_missing=float(MISS))\n\t\t\tif stats['lseff']>=int(minLength) and stats['nseff']>=int(minSize):\n\t\t\t\tif stats['Pi'] != None:\n\t\t\t\t\tPi = stats['Pi'] / float(stats['lseff'])\n\t\t\t\telse:\n\t\t\t\t\tPi = 0\n\t\t\t\tif stats['thetaW'] != 'None':\n\t\t\t\t\tthetaW = stats['thetaW'] / float(stats['lseff'])\n\t\t\t\telse:\n\t\t\t\t\tthetaW = 0\n\t\t\t\tstats_treated = stats\n\t\t\t\t# for stat in stats:\n\t\t\t\t# \tif stat == 'thetaW':\n\t\t\t\t# \t\tstats_treated['thetaW'] = thetaW\n\t\t\t\t# \telif stat == 'Pi':\n\t\t\t\t# \t\tstats_treated['Pi'] = Pi\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tstats_treated[stat] = stats[stat]\n\t\t\t\trf = egglib.tools.ReadingFrame([(0, stats['lseff'] - 1)])\n\t\t\t\tcd = egglib.stats.CodingDiversity(aln, frame=rf) # Probleme Rf prend le 0 de l'alignement donc prend pas toute le séquence mais les GAP aussi (donc manque des séquence + ajout plein de gap)\n\t\t\t\talign_S = cd.mk_align_S()\n\t\t\t\talign_NS = cd.mk_align_NS()\n\t\t\t\tnumS = cd.num_sites_S\n\t\t\t\tnumNS = cd.num_sites_NS\n\t\t\t\tcodon_filter = egglib.stats.Filter(rng=(0, 63), missing=64)\n\t\t\t\tcs = egglib.stats.ComputeStats()\n\t\t\t\tcs.add_stats('Pi', 'lseff')\n\t\t\t\tstatsS = cs.process_align(align_S, filtr=codon_filter, max_missing=float(MISS))\n\t\t\t\tstatsNS = cs.process_align(align_NS, filtr=codon_filter, max_missing=float(MISS))\n\t\t\t\tif statsS['Pi'] != None:\n\t\t\t\t\tPiS = statsS['Pi'] / numS\n\t\t\t\telse:\n\t\t\t\t\tPiS = 0\n\t\t\t\tif statsNS['Pi'] != None:\n\t\t\t\t\tPiNS = statsNS['Pi'] / numNS\n\t\t\t\telse:\n\t\t\t\t\tPiNS = 0\n\t\t\t\tOUTs[gp].write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'%(file.replace('.fasta', ''),aln.ns,'\\t'.join([str(stats_treated[x]) for x in stat_order]),round(cd.num_sites_NS, 2),round(cd.num_sites_S, 2),cd.num_pol_NS,cd.num_pol_S,PiNS,PiS))\n\n\n\t\t\t\t# Création des données pour donner la moyenne des statistiques par groupe\n\t\t\t\tsummary_stat[gp]['n'].append(aln.ns)\n\t\t\t\tfor stat in stat_order :\n\t\t\t\t\tif stats_treated[stat] != None:\n\t\t\t\t\t\tsummary_stat[gp][stat].append(stats_treated[stat])\n\t\t\t\t\telse:\n\t\t\t\t\t\tsummary_stat[gp][stat].append(0)\n\t\t\t\tsummary_stat[gp]['NSsites'].append(round(cd.num_sites_NS, 2))\n\t\t\t\tsummary_stat[gp]['Ssites'].append(round(cd.num_sites_S, 2))\n\t\t\t\tsummary_stat[gp]['NSseg'].append(cd.num_pol_NS)\n\t\t\t\tsummary_stat[gp]['Sseg'].append(cd.num_pol_S)\n\t\t\t\tsummary_stat[gp]['PiNS'].append(PiNS)\n\t\t\t\tsummary_stat[gp]['PiS'].append(PiS)\n\n\tentete_file = 'Name\\tNb sequences used\\tNb samples\\tNb analysed sites\\tAverage nb ofsamples used\\tNb segregating sites\\tNb haplotypes\\tExpected heterozygosity\\tWatterson’s estimator of theta\\tNucleotide diversity\\tTajima’s D\\tNb Non-synonimous\\tNb synonimous\\tNSseg\\tSseg\\tNucleotide diversity of non-synonimous site (Pi)\\tNucleotide diversity of synonimous site (Pi)\\n'\n\twith open(output+'summary_stat.csv','w') as summary_file :\n\t\tsummary_file.write(entete_file)\n\t\tfor gp in OUTs.keys():\n\t\t\tOUTs[gp].close()\n\t\t\tstats_mean = [str(len(summary_stat[gp]['n']))]\n\t\t\tfor stat in all_stats :\n\t\t\t\tliste_stat = [float(x) for x in summary_stat[gp][stat]]\n\t\t\t\tif len(liste_stat) != 0 :\n\t\t\t\t\tstats_mean.append(str(sum(liste_stat) / len(liste_stat)))\n\t\t\t\telse :\n\t\t\t\t\tstats_mean.append('No data')\n\t\t\tsummary_file.write('%s\\t%s\\n'%(gp,'\\t'.join(stats_mean)))\n\n\n\n\n\t############## end message ###########################\n\n\tprint(form(\"\\n\\t---------------------------------------------------------\",'yellow','bold'))\n\tprint(\"\\t\"+form(\"|\",'yellow','bold')+form(\" End of execution \",type='bold')+form(\"|\",'yellow','bold'))\n\tprint(form(\"\\t---------------------------------------------------------\",'yellow','bold'))","repo_name":"FlorianCHA/Script","sub_path":"Cluster/Stat_summary_egglib.py","file_name":"Stat_summary_egglib.py","file_ext":"py","file_size_in_byte":11178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21787548860","text":"import os\nimport sys\nimport models\nimport numpy as np\nfrom joblib import dump, load\n\nrootpath = os.path.dirname(os.path.dirname(__file__))\nsys.path.append(rootpath)\n\nif __name__ == \"__main__\":\n X_train = np.load('data/X_train_hog.npy')\n X_test = np.load('data/X_test_hog.npy')\n y_train = np.load('data/y_train.npy')\n y_test = np.load('data/y_test.npy')\n print(\"Loading data!\")\n print(X_train.shape, X_test.shape)\n print(y_train.shape, y_test.shape)\n \n # print('Logistic regression')\n # model = models.LogisticRegression(X_train.shape[1])\n # model.setOptimizer('Langevin', 0.08, 7000, 1)\n # # model.setOptimizer('SGD', 0.001, 1500)\n # model.fit(X_train, y_train)\n # print(\"Test on test data:\")\n # test_loss = model.lossFunction(X_test, y_test)\n # test_acc = model.evaluation(X_test, y_test)\n # print(\"loss: {} \\nacc: {}\".format(test_loss, test_acc))\n # model.save('data/logistic_v3.pkl')\n\n\n # print('LDA classification')\n # model = models.LDA(X_train.shape[1], 30, 0)\n # model.fit(X_train, y_train)\n # print(\"Test on test data:\")\n # test_acc = model.evaluation(X_test, y_test)\n # print(\"Test acc: {}\".format(test_acc))\n\n\n print('svm classification')\n kernel = 'rbf'\n model = models.SVM(kernel)\n best_model = model.fit(X_train, y_train)\n acc = model.evaluation(X_test, y_test)\n print(best_model.support_vectors_.shape)\n print(best_model.support_.shape)\n print(\"acc is {}\".format(acc))\n print('save model...')\n dump(best_model, 'data/{}_SVM_v2.joblib'.format(kernel))\n\n # svm = load('data/rbf_SVM.joblib')\n # preds = svm.predict(X_test[:100])\n # print(preds)","repo_name":"k160438/ML-Project1","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28609450663","text":"#coding:utf-8\nfrom flask import *\nimport db,modules,newsubmit\n\ndef Run(problem_id):\n\tproblem = db.Read_Problem(problem_id)\n\tif problem == None:\n\t\tflash(r'### 题目 P%d 没找着! \\n 可能是因为编号不对.'%problem_id,'error')\n\t\treturn modules.Page_Back()\n\tif not problem[9] and not modules.Current_User_Privilege(2):\n\t\tflash(r'无此权限','error')\n\t\treturn modules.Page_Back()\n\n\tif request.method == 'GET':\n\t\treturn render_template('problem.html',problem=problem)\n\telse:\n\t\treturn newsubmit.Submit(problem_id,request.form)\n","repo_name":"interestingLSY/intOJ","sub_path":"web/sites/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"9054369302","text":"from django.shortcuts import render , redirect # added manually\nfrom .models import * \nfrom .forms import New_ad\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n# def home_page(request):\n# ads = Car.objects.all()\n# data = {'ads' : ads}\n# return render(request , 'Car_Platform/home.html' , context=data)\n\ndef home_page(request):\n ads = Car.objects.all()\n \n # Get the selected car model from query parameters\n car_model_id = request.GET.get('car_model')\n if car_model_id:\n ads = ads.filter(car_make_id=car_model_id)\n \n # Handle sorting based on selected options\n selected_options = request.GET.getlist('sort-option')\n sort_mapping = {\n 'year_asc': 'date',\n \n 'price_asc': 'price',\n 'price_desc': '-price',\n 'date_asc': 'date',\n 'date_desc': '-date',\n }\n sort_criteria = [sort_mapping[option] for option in selected_options]\n ads = ads.order_by(*sort_criteria)\n\n car_models = Car_make.objects.all()\n\n context = {\n 'ads': ads,\n 'car_models': car_models,\n }\n return render(request, 'Car_Platform/home.html', context)\n\n@login_required\ndef car_detail(request , ad_id):\n car = Car.objects.get(id=ad_id)\n context = {'car':car}\n return render(request , 'Car_Platform/detail_page.html' , context)\n\n\n@login_required\ndef new_ad(request):\n if request.method == 'POST':\n ad_form = New_ad(request.POST , request.FILES)\n\n if ad_form.is_valid():\n ad_form.save()\n return redirect( 'Car_Platform:home_page')\n\n else:\n ad_form = New_ad() \n\n context = {'ad_form' : ad_form}\n return render(request , 'Car_Platform/new_ad.html' , context) \n\n","repo_name":"hanzla56/Car-selling-Platform","sub_path":"Car_Platform/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5645544684","text":"# CoffeeShop\n\n\nfrom trail_exam.shop import Shop\nclass CoffeeShop(Shop):\n\n# can be created by their name and the price of its served coffee\n#\n# should work the same way as a general shop\n\n def __init__(self, shop_name, price):\n super().__init__(shop_name)\n self.price = price\n\n# can serve coffee as well on the given price\n#\n# when a coffee is served, the price of the coffee is added to the income of the shop,\\\n# and we print: «shop name» served coffee for «coffee price»$\n\n def serve_coffee(self, product_name, quantity):\n super().sell(product_name, quantity)\n coffee_served = quantity * self.price\n self.income += coffee_served # the price of the coffee is added to the income of the shop\n print(self.shop_name+ \" served coffee for \"+ self.price + \" $.\")\n\n","repo_name":"talathkhaleel1/learning","sub_path":"application_development/app_dev_with_inheritance/grocery_shop/coffee_shop.py","file_name":"coffee_shop.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2874285291","text":"# save data function for the group and feature files\ndef save_data(group_data, output_feature, output_group, n_grams_flag):\n if len(group_data) == 0:\n return\n\n output_group.write(str(len(group_data))+\"\\n\")\n for data in group_data:\n # only include nonzero features\n if n_grams_flag:\n # n-grams and other features\n feats = [p for p in data[2:] if float(p.split(':')[1]) != 0.0]\n else:\n # only other features\n feats = [p for p in data[8:] if float(p.split(':')[1]) != 0.0]\n\n output_feature.write(data[0] + \" \" + \" \".join(feats) + \"\\n\")\n\n\n# transform the svmlight data to the format required by xgboost's lambdaMART\ndef trans_data(in_file, out_feature, out_group, n_grams_flag):\n # open the files\n fi = open(in_file, encoding=\"utf8\")\n output_feature = open(out_feature, \"w\")\n output_group = open(out_group, \"w\")\n # initialize the group lists\n group_data = []\n group = \"\"\n # for every line in the file extract feature and group data\n for line in fi:\n if not line:\n break\n if \"#\" in line:\n line = line[:line.index(\"#\")]\n splits = line.strip().split(\" \")\n if splits[1] != group:\n save_data(group_data, output_feature, output_group, n_grams_flag)\n group_data = []\n group = splits[1]\n group_data.append(splits)\n\n save_data(group_data, output_feature, output_group, n_grams_flag)\n\n # close the opened files\n fi.close()\n output_feature.close()\n output_group.close()\n","repo_name":"kPsarakis/Core-IR-Group-11","sub_path":"LambdaMART/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40595523011","text":"import argparse\nimport os\nimport time\nfrom helpers import read_strings\n\n\n\n# cat \tCatalan Numbers and RNA Secondary Structures\n# motz \tMotzkin Numbers and RNA Secondary Structures\n\n# partition\n#\n# Ensure that matching are non-crossing by splitting set into two partitions,\n# one between i and and j, one outside\n#\n# Parameters:\n# indices The set (of indices) that are to be partitioned\n# i One end of bond\n# j The other end of bond\n\ndef partition(indices,i,j):\n I1 = []\n I2 = []\n\n for k in indices:\n if k==i: continue\n if k==j: continue\n if i img.size[1]:\n raise ValueError(\"text doesn't fit\")\n y = (img.size[1] - height) // 2\n for t, w, h in pieces:\n x = (img.size[0] - w) // 2\n draw.text((x, y), t, font=font, fill=color)\n y += h\n\n def get_complementary(color):\n color = color[1:]\n color = int(color, 16)\n comp_color = 0xFFFFFF ^ color\n comp_color = \"#%06X\" % comp_color\n return comp_color\n\n color_text = get_complementary(color_cover)\n\n W, H = size\n img = Image.new(\"RGB\", (W, H), color=color_cover)\n fit_text(img, book_name, color_text, ImageFont.truetype(\"impact.ttf\", 17))\n\n draw = ImageDraw.Draw(img)\n\n # Имя автора\n font = ImageFont.truetype(os.path.abspath(r\"fonts\\Raleway-Regular.ttf\"), 15)\n w, h = draw.textsize(author, font=font)\n draw.text(((W - w) - 6, (H - h) - 6), author, font=font, fill=color_text)\n\n # Название категории\n font = ImageFont.truetype(os.path.abspath(r\"fonts\\Raleway-Bold.ttf\"), 12)\n w, h = draw.textsize(category_name, font=font)\n draw.text((6, 6), category_name, font=font, fill=color_text)\n\n return img\n\n def text2pdf_bytes(self, text):\n pdf = FPDF()\n pdf.add_page()\n pdf.add_font(\"ArialAll\", \"\", r\"fonts\\ARIALUNI.TTF\", uni=True)\n pdf.set_font(\"ArialAll\", size=14)\n pdf.set_top_margin(10)\n text_clear = bytes(text, \"utf8\").decode(\"utf8\", \"ignore\")\n\n pdf.multi_cell(190, 5, txt=text_clear, align=\"L\")\n pdf.output(\"default.pdf\")\n with open(\"default.pdf\", \"rb\") as f:\n bytes_ = f.read()\n os.remove(\"default.pdf\")\n return bytes_\n\n def add_book(self, author, title, category, color=\"#c8c8c8\", cover_book=None, text=None, text_txt_path=None):\n if cover_book == None:\n cover_book = self.get_cover_bytes(self.generate_cover(title, author, category, color))\n else:\n try:\n image = Image.open(cover_book)\n image.thumbnail((190, 288), Image.ANTIALIAS)\n cover_book = self.get_cover_bytes(image)\n except FileNotFoundError:\n cover_book = cover_book\n except:\n cover_book = self.get_cover_bytes(self.generate_cover(title, author, category, color))\n if text_txt_path and not text:\n text = open(text_txt_path, \"r\", encoding=\"utf8\").read()\n\n print(\"Start generate data\")\n try:\n pdf_ = self.text2pdf_bytes(text).hex()\n except:\n import traceback\n print(traceback.format_exc())\n return \"ERROR: INVALID TEXT\"\n data = {\n \"author\": author,\n \"author_lower\": author.lower(),\n \"title\": title,\n \"title_lower\": title.lower(),\n \"category\": category,\n \"cover\": cover_book.hex(),\n \"pdf\": pdf_\n }\n\n check_exist = self.es.multi_search([{\"author_lower\": author.lower()},\n {\"title_lower\": title.lower()}])[\"hits\"][\"hits\"]\n if check_exist != []:\n new_id = check_exist[0][\"_id\"]\n self.es.delete(id_=new_id)\n else:\n new_id = self.es.count({})[\"count\"]\n print(\"Book added\")\n self.es.create(id_=new_id, body_=data)\n\n return False\n\n def get_pdf_bytes_book(self, author, title):\n return hex2bytes(self.es.multi_search([\n {\"author\": author},\n {\"title\": title}\n ])[\"hits\"][\"hits\"][0][\"_source\"][\"pdf\"])\n\n def info_book(self, author, title, id_=None):\n try:\n if id_ != None:\n res = self.es.get(id_)[\"_source\"]\n author, title, category, cover = res[\"author\"], res[\"title\"], res[\"category\"], res[\"cover\"]\n else:\n res = \\\n self.es.multi_search([{\"author\": author}, {\"title\": title}], [\"_id\", \"category\", \"cover\"])[\"hits\"][\n \"hits\"][0]\n id_, category, cover = res[\"_id\"], res[\"_source\"][\"category\"], res[\"_source\"][\"cover\"]\n return {\"author\": author,\n \"title\": title,\n \"category\": category,\n \"cover_base64\": self.get_cover_base64(hex2bytes(cover)),\n \"id\": id_\n }\n except:\n return None\n\n def get_book(self, author, title):\n try:\n return self.es.multi_search([{\"author_lower\": author.lower()},\n {\"title_lower\": title.lower()}])[\"hits\"][\"hits\"][0][\"_source\"]\n except:\n return None\n\n def find_book(self, title):\n try:\n return [x[\"_source\"][\"author\"] for x in\n self.es.search({\"query\": {\"match\": {\"title_lower\": title.lower()}}, \"_source\": [\"author\"]})[\"hits\"][\n \"hits\"]]\n except:\n return None\n\n def author_books(self, author):\n try:\n return [x[\"_source\"][\"title\"] for x in\n self.es.search({\"query\": {\"match\": {\"author_lower\": author.lower()}}, \"_source\": [\"title\"]})[\n \"hits\"][\"hits\"]]\n except:\n return None\n\n def similarity_search(self, query):\n try:\n query = f'*{\"*\".join(query.lower())}*'.split()\n\n authors = self.es.search(body_={\"query\": {\"bool\": {\"must\":\n [{\"wildcard\": {\"author_lower\": q_a}} for q_a in\n query]\n }},\n \"_source\": [\"author\"]}\n )\n authors = [x[\"_source\"][\"author\"] for x in authors[\"hits\"][\"hits\"]] if authors else []\n\n titles = self.es.search(body_={\"query\": {\"bool\": {\"must\":\n [{\"wildcard\": {\"title_lower\": q_a}} for q_a in query]\n }},\n \"_source\": [\"title\"]}\n )\n titles = [x[\"_source\"][\"title\"] for x in titles[\"hits\"][\"hits\"]] if titles else []\n\n return list(set(authors)), list(set(titles))\n except:\n import traceback\n return [], []\n\n def get_random_book_category(self, category, count=5):\n book = self.es.search(body_={\"query\": {\"function_score\":\n {\"query\": {\"match\": {\"category\": category}},\n \"functions\": [{\"random_score\": {\"seed\": getrandbits(50)}}]}},\n \"_source\": [\"_id\"], \"size\": count})[\"hits\"][\"hits\"]\n return [self.get_book_id(id_[\"_id\"]) for id_ in book]\n\n def get_cover_bytes(self, image: Image = None, author=None, title=None):\n if author == None:\n img = image\n img_bytes = BytesIO()\n img.save(img_bytes, format=\"PNG\")\n img_bytes = img_bytes.getvalue()\n else:\n img = Image.open(self.info_book(author, title)[\"cover_bytes\"])\n img_bytes = BytesIO()\n img.save(img_bytes, format=\"PNG\")\n img_bytes = img_bytes.getvalue()\n\n return img_bytes\n\n def get_cover_base64(self, bytes_):\n return base64.b64encode(bytes_).decode(\"utf8\")\n\n def get_book_id(self, id_):\n return self.info_book(\"\", \"\", id_)\n\n def del_book(self, author, title):\n id_ = int(self.es.multi_search([{\"author\": author}, {\"title\": title}], [\"_id\"])[\"hits\"][\"hits\"][0][\"_id\"])\n self.es.delete(id_=id_)\n return True\n\n def get_category_book(self, category, page, page_count):\n get_books = [x[\"_id\"] for x in self.es.search(body_={\"query\": {\"match\": {\"category\": category}},\n \"from\": int(page_count * page),\n \"size\": int(page_count),\n \"_source\": [\"_id\"]})[\"hits\"][\"hits\"]]\n result = []\n for id_ in get_books:\n result.append(self.get_book_id(id_))\n return result\n\n def count_category(self, category):\n return self.es.count({\"query\": {\"match\": {\"category\": category}}})[\"count\"]\n","repo_name":"ascorblack/library","sub_path":"mainElastic.py","file_name":"mainElastic.py","file_ext":"py","file_size_in_byte":10147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34872991922","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom models.utils import preprocess_data\nimport numpy as np\nimport argparse\nimport torchvision\nimport cv2\nfrom torchvision import datasets, models, transforms\nimport sys\nimport numpy\nfrom models.vgg_face import get_pretrained_model, get_prediction\nimport matplotlib.pyplot as plt # D.Griesser: import added\nfrom image_utils import save_image, show_image #\nimport torch.nn.functional as F\n#uncomment to see some images \nnumpy.set_printoptions(threshold=sys.maxsize)\n\n\n\ndef choose_color(model,X,y,glass,mean):\n model.eval()\n potential_starting_color0 = [128,220,160,200,220]\n potential_starting_color1 = [128,130,105,175,210]\n potential_starting_color2 = [128, 0, 55, 30, 50]\n\n max_loss = torch.zeros(y.shape[0]).to(y.device)\n max_delta = torch.zeros_like(X)\n \n \n for i in range(len(potential_starting_color0)):\n delta1 = torch.zeros(X.size()).to(y.device)\n\n delta1[:,0,:,:] = glass[0,:,:]*potential_starting_color2[i]\n delta1[:,1,:,:] = glass[1,:,:]*potential_starting_color1[i]\n delta1[:,2,:,:] = glass[2,:,:]*potential_starting_color0[i]\n\n # X + delta1 --> applies new color to eyeglass frame (frame is already added to the image)\n all_loss = nn.CrossEntropyLoss(reduction='none')(model(X+delta1-mean),y)\n \n max_delta[all_loss >= max_loss] = delta1.detach()[all_loss >= max_loss]\n max_loss = torch.max(max_loss, all_loss)\n\n print(\"choose_color() Iteration %d / %d\" % (i + 1, len(potential_starting_color0))) # D.Griesser: print-statement added\n \n \n\n print(\"choose_color() finished\") # D.Griesser: print-statement added\n \n # returns colored glass silhouttes for the whole batch\n return max_delta\n\n\ndef glass_attack(model, X, y, glass, alpha=1, num_iter=20 ,momentum=0.4, y_target=None):\n \"\"\" Construct glass frame adversarial examples on the examples X\"\"\"\n\n targeted = y_target is not None\n\n model.eval()\n mean = torch.Tensor(np.array([129.1863 , 104.7624,93.5940])).view(1, 3, 1, 1)\n de = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n mean = mean.to(de)\n X1 = torch.zeros_like(X,requires_grad = True)\n X1.data = (X+mean)*(1-glass) # add glasses to images\n\n with torch.set_grad_enabled(False):\n print(\"glass_attack() calls choose_color()\") # D.Griesser: print-statement added\n color_glass = choose_color(model,X1,y,glass,mean) # sole purpose is to pick a starting color\n \n with torch.set_grad_enabled(True):\n X1.data = X1.data+color_glass-mean\n delta =torch.zeros_like(X)\n \n #D.Griesser: change this area (incorporate the different algorithms)? \n for t in range(num_iter):\n print(\"glass_attack() Iteration %d / %d\" % (t + 1, num_iter)) # D.Griesser: print-statement added\n loss = nn.CrossEntropyLoss()(model(X1), y_target if targeted else y)\n loss.backward()\n\n delta_change = X1.grad.detach()*glass\n max_val,indice = torch.max(torch.abs(delta_change.view(delta_change.shape[0], -1)),1)\n r = alpha * delta_change /max_val[:,None,None,None]\n\n if t == 0:\n delta.data = r\n\n else:\n if targeted:\n delta.data = momentum * delta.detach() - r\n else:\n delta.data = momentum * delta.detach() + r\n\n delta.data[(delta.detach() + X1.detach() + mean) > 255] = 0 \n delta.data[(delta.detach() + X1.detach() + mean) < 0 ] = 0 \n\n X1.data = (X1.detach() + delta.detach())\n X1.data = torch.round(X1.detach()+mean) - mean\n X1.grad.zero_()\n \n print(\"glass_attack() finished\") # D.Griesser: print-statement added\n return (X1).detach()\n\n\nif __name__ == \"__main__\":\n\n names = [line.rstrip('\\n') for line in open('data/names.txt')]\n \n alpha = 20\n iterations = 50\n batch_size = 10\n\n glass = cv2.imread('data/glasses/silhouette.png')\n\n glasses = transforms.ToTensor()(glass)\n\n model = get_pretrained_model()\n\n model.eval()\n\n # torch.manual_seed(1234)\n \n dataloaders, dataset_sizes = preprocess_data(batch_size)\n\n dataiter = iter(dataloaders)\n data = dataiter.next()\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n images, labels = data\n\n images = images[:,[2,1,0],:,:] #rgb to bgr\n\n model.to(device)\n images.to(device)\n labels.to(device)\n glasses.to(device)\n print(images.device)\n\n values, indices = get_prediction(images, model)\n\n predictions = [(names[index], value) for index, value in zip(indices.tolist(), values.tolist())]\n ground_truth = [names[label] for label in labels.tolist()] \n\n print(f\"Predictions: {predictions}\")\n print(f\"Labels: {ground_truth}\")\n\n # show_image(images)\n\n target = torch.full((batch_size,), 3) # target class = 3\n \n target.to(device)\n\n x_adv = glass_attack(\n model=model,\n X=images,\n y=labels,\n glass=glasses,\n alpha=alpha,\n num_iter=iterations,\n y_target=target\n )\n \n print(\"Expectation: %s\" %names[target[0].item()])\n\n values, indices = get_prediction(x_adv, model)\n \n predictions = [(names[index], value) for index, value in zip(indices.tolist(), values.tolist())]\n print(f\"Predictions: {predictions}\")\n\n show_image(x_adv)\n \n ","repo_name":"grieDan/ba-experiment","sub_path":"vgg-face-pytorch/glass_attack.py","file_name":"glass_attack.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9921036186","text":"\"\"\"\nMódulo para envio do link temporario no email do usuario\n\"\"\"\nfrom django.core.mail import send_mail\n\nclass SendEmail():\n\n def dispatch(self, subject:str, message:str, \n email:str, list_email:list, \n fail_silently = False):\n \"\"\"\n `subject`: O assunto do email\n\n `message`: A mensagem do email\n\n `email`: O email do usuario\n\n `list_email`: Tem o tipo`list` contendo uma lista de email, dentro dessa \n lista voce pode colocar seu prório e-mail\n\n `fail_silently`: Por padrão ela é False, mas pode ser alterado para True\n \"\"\"\n send_mail(subject, message, email, list_email, fail_silently = False)\n\n","repo_name":"JonasBarros1998/ecommerce","sub_path":"authentication/utils/sendEmail.py","file_name":"sendEmail.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29149069983","text":"from fastapi.testclient import TestClient\n\nfrom ..main import app\n\nclient = TestClient(app)\n\ndef test_root():\n response = client.get(\"/?token=jessica\")\n assert response.status_code == 200\n assert response.json() == {\"message\": \"Hello Bigger Applications!\"}\n\ndef test_root_invalid_token():\n response = client.get(\"/?token=invalid\")\n assert response.status_code == 400\n assert response.json() == {\"detail\": \"No Jessica token provided\"}\n","repo_name":"adjaunzemis/fastapi","sub_path":"app/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70182100001","text":"import math\nimport torch\nimport torch.nn as nn\nimport os\nimport numpy as np\nfrom timm.models.layers import DropPath\nfrom timm.models.vision_transformer import PatchEmbed, Mlp\n\n\nfrom .utils import auto_grad_checkpoint, to_2tuple\nfrom .PixArt_blocks import t2i_modulate, CaptionEmbedder, WindowAttention, MultiHeadCrossAttention, T2IFinalLayer, TimestepEmbedder, LabelEmbedder, FinalLayer\n\n\nclass PixArtBlock(nn.Module):\n \"\"\"\n A PixArt block with adaptive layer norm (adaLN-single) conditioning.\n \"\"\"\n\n def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs):\n super().__init__()\n self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)\n self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,\n input_size=input_size if window_size == 0 else (window_size, window_size),\n use_rel_pos=use_rel_pos, **block_kwargs)\n self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)\n self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)\n # to be compatible with lower version pytorch\n approx_gelu = lambda: nn.GELU(approximate=\"tanh\")\n self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.window_size = window_size\n self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)\n\n def forward(self, x, y, t, mask=None):\n B, N, C = x.shape\n\n shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)\n x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)).reshape(B, N, C))\n x = x + self.cross_attn(x, y, mask)\n x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))\n\n return x\n\n\n#############################################################################\n# Core PixArt Model #\n#################################################################################\nclass PixArt(nn.Module):\n \"\"\"\n Diffusion model with a Transformer backbone.\n \"\"\"\n\n def __init__(\n self,\n input_size=32,\n patch_size=2,\n in_channels=4,\n hidden_size=1152,\n depth=28,\n num_heads=16,\n mlp_ratio=4.0,\n class_dropout_prob=0.1,\n pred_sigma=True,\n drop_path: float = 0.,\n window_size=0,\n window_block_indexes=[],\n use_rel_pos=False,\n caption_channels=4096,\n lewei_scale=1.0,\n config=None,\n **kwargs,\n ):\n super().__init__()\n self.pred_sigma = pred_sigma\n self.in_channels = in_channels\n self.out_channels = in_channels * 2 if pred_sigma else in_channels\n self.patch_size = patch_size\n self.num_heads = num_heads\n self.lewei_scale = lewei_scale,\n self.dtype = torch.get_default_dtype()\n\n self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True)\n self.t_embedder = TimestepEmbedder(hidden_size)\n num_patches = self.x_embedder.num_patches\n self.base_size = input_size // self.patch_size\n # Will use fixed sin-cos embedding:\n self.register_buffer(\"pos_embed\", torch.zeros(1, num_patches, hidden_size))\n\n approx_gelu = lambda: nn.GELU(approximate=\"tanh\")\n self.t_block = nn.Sequential(\n nn.SiLU(),\n nn.Linear(hidden_size, 6 * hidden_size, bias=True)\n )\n self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)\n drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n PixArtBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],\n input_size=(input_size // patch_size, input_size // patch_size),\n window_size=window_size if i in window_block_indexes else 0,\n use_rel_pos=use_rel_pos if i in window_block_indexes else False)\n for i in range(depth)\n ])\n self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)\n\n self.initialize_weights()\n\n print(f'Warning: lewei scale: {self.lewei_scale}, base size: {self.base_size}')\n\n def forward_raw(self, x, t, y, mask=None, data_info=None):\n \"\"\"\n Original forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n x = self.x_embedder(x) + self.pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(t) # (N, D)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, 1, L, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n for block in self.blocks:\n x = auto_grad_checkpoint(block, x, y, t0, y_lens) # (N, T, D) #support grad checkpoint\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward(self, x, timesteps, context, y=None, **kwargs):\n \"\"\"\n Forward pass that adapts comfy input to original forward function\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n timesteps: (N,) tensor of diffusion timesteps\n context: (N, 1, 120, C) conditioning\n y: extra conditioning.\n \"\"\"\n ## Still accepts the input w/o that dim but returns garbage\n if len(context.shape) == 3:\n context = context.unsqueeze(1)\n\n ## run original forward pass\n out = self.forward_raw(\n x = x.to(self.dtype),\n t = timesteps.to(self.dtype),\n y = context.to(self.dtype),\n )\n\n ## only return EPS\n out = out.to(torch.float)\n eps, rest = out[:, :self.in_channels], out[:, self.in_channels:]\n return eps\n\n def forward_with_dpmsolver(self, x, t, y, mask=None, **kwargs):\n \"\"\"\n dpm solver donnot need variance prediction\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n model_out = self.forward(x, t, y, mask)\n return model_out.chunk(2, dim=1)[0]\n\n def forward_with_cfg(self, x, t, y, cfg_scale, **kwargs):\n \"\"\"\n Forward pass of PixArt, but also batches the unconditional forward pass for classifier-free guidance.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n half = x[: len(x) // 2]\n combined = torch.cat([half, half], dim=0)\n model_out = self.forward(combined, t, y, kwargs)\n model_out = model_out['x'] if isinstance(model_out, dict) else model_out\n eps, rest = model_out[:, :3], model_out[:, 3:]\n cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)\n half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)\n eps = torch.cat([half_eps, half_eps], dim=0)\n return torch.cat([eps, rest], dim=1)\n\n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n h = w = int(x.shape[1] ** 0.5)\n assert h * w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], h, w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))\n return imgs\n\n def initialize_weights(self):\n # Initialize transformer layers:\n def _basic_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n\n self.apply(_basic_init)\n\n # Initialize (and freeze) pos_embed by sin-cos embedding:\n pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5), lewei_scale=self.lewei_scale, base_size=self.base_size)\n self.pos_embed.data.copy_(torch.from_numpy(pos_embed).unsqueeze(0).to(self.dtype))\n\n # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):\n w = self.x_embedder.proj.weight.data\n nn.init.xavier_uniform_(w.view([w.shape[0], -1]))\n\n # Initialize timestep embedding MLP:\n nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.t_block[1].weight, std=0.02)\n\n # Initialize caption embedding MLP:\n nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02)\n nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02)\n\n # Zero-out adaLN modulation layers in PixArt blocks:\n for block in self.blocks:\n nn.init.constant_(block.cross_attn.proj.weight, 0)\n nn.init.constant_(block.cross_attn.proj.bias, 0)\n\n # Zero-out output layers:\n nn.init.constant_(self.final_layer.linear.weight, 0)\n nn.init.constant_(self.final_layer.linear.bias, 0)\n\n\ndef get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, lewei_scale=1.0, base_size=16):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n \"\"\"\n if isinstance(grid_size, int):\n grid_size = to_2tuple(grid_size)\n grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0]/base_size) / lewei_scale\n grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1]/base_size) / lewei_scale\n grid = np.meshgrid(grid_w, grid_h) # here w goes first\n grid = np.stack(grid, axis=0)\n grid = grid.reshape([2, 1, grid_size[1], grid_size[0]])\n\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if cls_token and extra_tokens > 0:\n pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)\n return pos_embed\n\n\ndef get_2d_sincos_pos_embed_from_grid(embed_dim, grid):\n assert embed_dim % 2 == 0\n\n # use half of dimensions to encode grid_h\n emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)\n emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)\n\n emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)\n return emb\n\n\ndef get_1d_sincos_pos_embed_from_grid(embed_dim, pos):\n \"\"\"\n embed_dim: output dimension for each position\n pos: a list of positions to be encoded: size (M,)\n out: (M, D)\n \"\"\"\n assert embed_dim % 2 == 0\n omega = np.arange(embed_dim // 2, dtype=np.float64)\n omega /= embed_dim / 2.\n omega = 1. / 10000 ** omega # (D/2,)\n\n pos = pos.reshape(-1) # (M,)\n out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product\n\n emb_sin = np.sin(out) # (M, D/2)\n emb_cos = np.cos(out) # (M, D/2)\n\n emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)\n return emb\n","repo_name":"city96/ComfyUI_ExtraModels","sub_path":"PixArt/models/PixArt.py","file_name":"PixArt.py","file_ext":"py","file_size_in_byte":12243,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"54"} +{"seq_id":"13141600968","text":"from django import forms\nfrom django.utils.safestring import mark_safe\n\nfrom xanmel.modules.xonotic.models import *\n\n\nclass NewsFeedFilterForm(forms.Form):\n event_types = forms.MultipleChoiceField(choices=EventType.choices(),\n initial=[i.value for i in EventType],\n widget=forms.CheckboxSelectMultiple(),\n required=False)\n maps = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'map name matches'}),\n required=False)\n players = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'player name matches'}),\n required=False)\n position_lte = forms.IntegerField(required=False,\n widget=forms.NumberInput(attrs={'placeholder': 'position <='}))\n\n def __init__(self, *args, **kwargs):\n servers = list(XDFServer.select(XDFServer.id, XDFServer.name))\n server_ids = [i.id for i in servers]\n kwargs['data'] = kwargs['data'].copy()\n if 'event_types' not in kwargs['data']:\n for i in EventType:\n kwargs['data'].update(event_types=i.value)\n if 'servers' not in kwargs['data']:\n for i in server_ids:\n kwargs['data'].update(servers=i)\n super().__init__(*args, **kwargs)\n self.fields['servers'] = forms.MultipleChoiceField(\n choices=[(i.id, i.name) for i in servers],\n initial=server_ids,\n widget=forms.CheckboxSelectMultiple(),\n required=False)\n\n\nclass MapListOrder(EChoice):\n MAP_NAME_ASC = (0, mark_safe('Map Name ↑'))\n MAP_NAME_DESC = (1, mark_safe('Map Name ↓'))\n PLAYER_ASC = (3, mark_safe('Record Holder ↑'))\n PLAYER_DESC = (4, mark_safe('Record Holder ↓'))\n TIME_ASC = (5, mark_safe('Time ↑'))\n TIME_DESC = (6, mark_safe('Time ↓'))\n TOP_SPEED_PLAYER_ASC = (7, mark_safe('Top Speed Holder ↑'))\n TOP_SPEED_PLAYER_DESC = (8, mark_safe('Top Speed Holder ↓'))\n TOP_SPEED_ASC = (9, mark_safe('Top Speed ↑'))\n TOP_SPEED_DESC = (10, mark_safe('Top Speed ↓'))\n\n\nclass MapListFilterForm(forms.Form):\n maps = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'map name matches'}),\n required=False)\n\n def __init__(self, *args, **kwargs):\n servers = list(XDFServer.select(XDFServer.id, XDFServer.name))\n server_ids = [i.id for i in servers]\n if 'servers' not in kwargs['data']:\n kwargs['data'] = kwargs['data'].copy()\n for i in server_ids:\n kwargs['data'].update(servers=i)\n super().__init__(*args, **kwargs)\n\n self.fields['servers'] = forms.MultipleChoiceField(\n choices=[(i.id, i.name) for i in servers],\n initial=server_ids,\n widget=forms.CheckboxSelectMultiple(),\n required=False)\n\n\nclass MapFilterForm(forms.Form):\n def __init__(self, *args, **kwargs):\n servers = list(XDFServer.select(XDFServer.id, XDFServer.name))\n server_ids = [i.id for i in servers]\n if 'servers' not in kwargs['data']:\n kwargs['data'] = kwargs['data'].copy()\n for i in server_ids:\n kwargs['data'].update(servers=i)\n super().__init__(*args, **kwargs)\n self.fields['servers'] = forms.MultipleChoiceField(\n choices=[(i.id, i.name) for i in servers],\n initial=server_ids,\n widget=forms.CheckboxSelectMultiple(),\n required=False)\n\n\nclass LadderFilterForm(forms.Form):\n players = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'player name matches'}),\n required=False)\n ladder_type = forms.ChoiceField(choices=LadderType.choices(), required=False)\n\n def __init__(self, *args, **kwargs):\n servers = list(XDFServer.select(XDFServer.id, XDFServer.name))\n super().__init__(*args, **kwargs)\n self.fields['server'] = forms.ChoiceField(choices=[(i.id, i.name) for i in servers], required=False)\n\n\nclass PlayerRecordSearchForm(MapListFilterForm):\n position_gte = forms.IntegerField(widget=forms.NumberInput(attrs={'placeholder': 'position >='}),\n required=False)\n position_lte = forms.IntegerField(widget=forms.NumberInput(attrs={'placeholder': 'position <='}),\n required=False)\n\n\nclass CompareWithForm(forms.Form):\n player1 = forms.IntegerField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n players = XDFPlayer.select().order_by(XDFPlayer.nickname)\n self.fields['player2'] = forms.ChoiceField(choices=[(i.id, i.nickname) for i in players])\n\n\nclass SearchType(EChoice):\n CRYPTO_IDFP = ('crypto_idfp', 'Crypto IDFP')\n STATS_ID = ('stats_id', 'Xonstats ID')\n NICKNAME = ('nickname', 'Nickname')\n\n\nclass SearchPlayerForm(forms.Form):\n query = forms.CharField()\n query_type = forms.ChoiceField(choices=SearchType.choices())\n","repo_name":"nsavch/xanmel-web","sub_path":"xdf/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7234888142","text":"from django.urls import path\nfrom .views import *\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\napp_name = 'files'\n\nurlpatterns = [\n # File \n path('v1/file', Files_APIView.as_view()), \n path('v1/file/', Files_APIView_Detail.as_view()),\n path('v1/export1', ExportWithPandas.as_view()), \n path('v1/export2', ExportWithRestPandas.as_view()), \n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","repo_name":"davidcasr/file-management","sub_path":"files/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1780993966","text":"import argparse\nimport json\nimport logging\nimport os\nimport time\nfrom typing import Dict\nimport re\nfrom shutil import which\n\nfrom kubernetes import config, client\nfrom kubernetes.client import V1Node\nfrom kubernetes.client.rest import ApiException\n\n\ndef get_cuda_version(api: client.CoreV1Api, node_name: str) -> (str, str):\n # Set the label \"capability.skippy.io/nvidia-cuda\" with the version of CUDA as a value (if installed)\n try:\n with open('/usr/local/cuda/version.txt', 'r') as version_file:\n version_str = version_file.read()\n version_re = re.search(r'^CUDA Version ([0-9]+).[0-9]+.[0-9]+', version_str)\n if len(version_re.groups()) == 1:\n version = version_re.group(1)\n logging.debug('capability.skippy.io/nvidia-cuda: Found NVidia CUDA Version %s', version)\n return 'capability.skippy.io/nvidia-cuda', version\n except FileNotFoundError:\n pass\n\n logging.debug('capability.skippy.io/nvidia-cuda: No valid /usr/local/cuda/version.txt found. '\n 'Assuming no CUDA installed.')\n return 'capability.skippy.io/nvidia-cuda', None\n\n\ndef check_nvidia_gpu(api: client.CoreV1Api, node_name: str) -> (str, str):\n # Set the label \"capability.skippy.io/nvidia-gpu\" if the binary \"nvidia-smi\" is found\n if which('nvidia-smi'):\n logging.debug('capability.skippy.io/nvidia-gpu: Found nvidia-smi')\n return 'capability.skippy.io/nvidia-gpu', ''\n else:\n logging.debug('capability.skippy.io/nvidia-gpu: No nvidia-smi available. Assuming no NVidia GPU')\n return 'capability.skippy.io/nvidia-gpu', None\n\n\ndef edge_label(api: client.CoreV1Api, node_name: str) -> (str, str):\n # Set the edge label if no locality type is set yet.\n status: V1Node = api.read_node(node_name)\n try:\n # noinspection PyStatementEffect\n status.metadata.labels['locality.skippy.io/type']\n logging.debug('locality.skippy.io/type: Already present, not doing anything here...')\n except KeyError:\n logging.debug('locality.skippy.io/type: Not yet set. Using default value (edge).')\n return 'locality.skippy.io/type', 'edge'\n\n\ndef storage_node_label(api: client.CoreV1Api, node_name: str) -> (str, str):\n # Set the edge label if no locality type is set yet.\n field_selector = 'spec.nodeName=' + node_name\n label_selector = 'app=minio'\n pods = api.list_pod_for_all_namespaces(watch=False, field_selector=field_selector, label_selector=label_selector)\n if len(pods.items) > 0:\n logging.debug('data.skippy.io/storage-node: Found a storage pod on the node. Setting label.')\n return 'data.skippy.io/storage-node', ''\n else:\n logging.debug('data.skippy.io/storage-node: No storage pod found on the node. Removing label.')\n return 'data.skippy.io/storage-node', None\n\n\nlabelling_functions = [get_cuda_version, check_nvidia_gpu, edge_label, storage_node_label]\n\n\ndef set_labels(api: client.CoreV1Api, node_name: str, labels: Dict[str, str]):\n try:\n logging.info(f'Updating labels for node {node_name}: {labels}...')\n body = {\n \"metadata\": {\n \"labels\": labels\n }\n }\n api.patch_node(node_name, body)\n logging.info('Update was successful.')\n except ApiException as e:\n # Parse the JSON message body of the exception\n logging.exception('ApiExceptionMessage: %s', json.loads(e.body)['message'])\n raise e\n\n\ndef main():\n # Parse the arguments\n parser = argparse.ArgumentParser(description='Skippy Daemon - Doing the dirty work away from the spotlight')\n parser.add_argument('-c', '--kube-config', action='store_true', dest='kube_config',\n help='Load kube-config from home dir instead of in-cluster-config from envs.', default=False)\n parser.add_argument('-n', '--node', action='store', dest='node_name',\n help='Node name to use (instead of environment variable NODE_NAME)')\n parser.add_argument('-d', '--debug', action='store_true', dest='debug',\n help='Enable debug logs.', default=False)\n args = parser.parse_args()\n level = logging.DEBUG if args.debug else logging.INFO\n node_name = args.node_name\n\n # Set the log level\n logging.getLogger().setLevel(level)\n\n # Load the kubernetes API config\n if args.kube_config:\n # Load the configuration from ~/.kube\n logging.debug('Loading kube config...')\n config.load_kube_config()\n else:\n # Load the configuration when running inside the cluster (by reading envs set by k8s)\n logging.debug('Loading in-cluster config...')\n config.load_incluster_config()\n\n api = client.CoreV1Api()\n\n old_labels = None\n while True:\n try:\n if node_name is None:\n node_name = os.environ['NODE_NAME']\n\n # Create the dict with all labels\n labels = {}\n for fn in labelling_functions:\n label = fn(api, node_name)\n if label is not None:\n labels[label[0]] = label[1]\n\n # Only patch the labels if they've changed\n if labels != old_labels:\n # Set the labels on the current node\n set_labels(api, node_name, labels)\n old_labels = labels\n else:\n logging.debug('Labels have not changed. No update necessary.')\n except KeyError:\n logging.exception('The name of the node could not be found! '\n 'Make sure to expose spec.nodeName as env var NODE_NAME.')\n\n # Wait for an hour until we re-check the node caps\n logging.debug('Waiting for 1 hour until re-check.')\n time.sleep(3600)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"edgerun/skippy-daemon","sub_path":"skippy-daemon.py","file_name":"skippy-daemon.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"46190166241","text":"import pandas as pd\r\nimport numpy as np\r\nfrom pylab import *\r\nfrom scipy import *\r\nfrom scipy import sparse\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.decomposition import PCA\r\n\r\n# creates a dictionary with cuisines and all ingredients present in that cuisine\r\n# includes duplicates\r\ndef create_cuisine_ingredient_dict(data):\r\n dict_cuisine_ing = {}\r\n cuisines = []\r\n ingredients = []\r\n\r\n for i in range(len(data)):\r\n c = data['cuisine'][i]\r\n ing = data['ingredients'][i]\r\n if c not in dict_cuisine_ing.keys():\r\n cuisines.append(c)\r\n dict_cuisine_ing[c] = ing\r\n else : \r\n ing_list = dict_cuisine_ing[c]\r\n ing_list.extend(ing)\r\n dict_cuisine_ing[c] = ing_list\r\n ingredients.extend(ing)\r\n\r\n return dict_cuisine_ing, list(set(cuisines)), list(set(ingredients))\r\n\r\n\r\ndef count_matrix(dictionary, cuisines, ingredients):\r\n\t# we are counting number of times each ingredient occurs in a cuisine\r\n\tmatrix = np.zeros((len(cuisines), len(ingredients)))\r\n\ti = 0\r\n\tfor c in cuisines:\r\n\t\ting = dictionary[c]\r\n\t\tfor ingre in ing:\r\n\t\t\t# getting the index of ingreient\r\n\t\t\tj = ingredients.index(ingre)\r\n\t\t\tmatrix[i][j] += 1\r\n\t\ti += 1\r\n\r\n\treturn matrix\r\n\r\n\r\ndef tf_idf_from_count_matrix(count_matrix):\r\n \r\n countsMatrix = sparse.csr_matrix(count_matrix)\r\n transformer = TfidfTransformer()\r\n tfidf = transformer.fit_transform(count_matrix)\r\n tfidf.toarray() \r\n return tfidf.toarray()\r\n\r\n\r\ndef diffusion(cuisine_ingredient_dict, cuisines, labels, reduced_data) :\r\n\ti = 0 \r\n\tj = 0 \r\n\r\n\teffect_on_cluster = [0 for cuisine in cuisines]\r\n\r\n\t# Jaccard Index = (the number in both sets) / (the number in either set) * 100\r\n\tfor cuisineA in cuisines: \r\n\r\n\t A_intersection = 0\r\n\t numInClusterBesidesA = 0\r\n\t setA = set(cuisine_ingredient_dict[cuisineA])\r\n\t setB_forA = []\r\n\t j = 0\r\n\t \r\n\t for cuisineB in cuisines:\r\n\t \t# if it is A itself - we obviously wouldn't want this (will be exactly 1)\r\n\t if cuisineB != cuisineA: \r\n\t \t#determines if then they are both in the same cluster\r\n\t if labels[j] == labels[i]: \r\n\t setB_forA.extend(set(cuisine_ingredient_dict[cuisineB]))\r\n\t numInClusterBesidesA += 1\r\n\t j += 1\r\n\t \r\n\t A_intersection = len(set(setA & set(setB_forA))) / float(len(set(setA.union(setB_forA))))\r\n\t effect_on_cluster[i] = A_intersection\r\n\t \r\n\t i += 1\r\n\r\n\t#plot the data\r\n\trdata = reduced_data\r\n\ti=0\r\n\tfigureRatios = (15,20)\r\n\tx = []\r\n\ty = []\r\n\tcolor = []\r\n\tarea = []\r\n\r\n\t#creating a color palette:\r\n\tcolorPalette = ['#ff6300','#2c3e50', '#660033'] \r\n\t# green,blue, orange, grey, purple\r\n\r\n\tplt.figure(1, figsize=figureRatios)\r\n\r\n\tfor data in rdata:\r\n\t x.append(data[0]) \r\n\t y.append(data[1]) \r\n\t color.append(colorPalette[labels[i]]) \r\n\t area.append(effect_on_cluster[i]*27000) # magnifying the bubble's sizes (all by the same unit)\r\n\t # plotting the name of the cuisine:\r\n\t text(data[0], data[1], cuisines[i], size=10.6,horizontalalignment='center', fontweight = 'bold', color='w')\r\n\t i += 1\r\n\r\n\tplt.scatter(x, y, c=color, s=area, linewidths=2, edgecolor='w', alpha=0.80) \r\n\r\n\tplt.axis([-0.45,0.65,-0.55,0.55])\r\n\tplt.axes().set_aspect(0.8, 'box')\r\n\r\n\tplt.xlabel('PC1')\r\n\tplt.ylabel('PC2')\r\n\tplt.axis('off') # removing the PC axes\r\n\r\n\tplt.show()\r\n","repo_name":"MandyK94/Whats_Cooking","sub_path":"cuisine_diffusion.py","file_name":"cuisine_diffusion.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74940234400","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom jspider.queue.base import BaseQueue\nimport asyncio\n\n__author__ = 'golden'\n__create_date__ = '2018/5/26 23:27'\n\n\nclass DefaultQueue(BaseQueue):\n def __init__(self, name, data_format='json', data_filter=None):\n super(DefaultQueue, self).__init__(name=name, data_format=data_format, data_filter=data_filter)\n self._queue = [] # asyncio.Queue()\n\n @classmethod\n def from_spider(cls, spider, name, data_format='json', data_filter=None):\n \"\"\"\n :param spider: spider object\n :param data_format: data_format\n :param data_filter: data_filter\n :return:\n \"\"\"\n return cls(name=name, data_format=data_format, data_filter=data_filter)\n\n async def push(self, request):\n self._queue.append(request)\n\n async def pop(self):\n if not await self.is_empty():\n return self._queue.pop()\n\n async def is_empty(self):\n return len(self._queue) == 0\n","repo_name":"goodking-bq/Jspider","sub_path":"jspider/queue/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6430763816","text":"import random\n\nhands = ['グー', 'チョキ', 'パー']\n\ndef start_message():\n print('じゃんけんスタート')\n\ndef get_my_hand():\n print('自分の手を入力してください')\n input_message = ''\n index = 0\n for hand in hands:\n input_message += str(index) + ':' + hand\n if index < 2:\n input_message += ', '\n index += 1\n return int(input(input_message))\n\ndef get_you_hand():\n return random.randint(0, 2)\n\ndef get_hand_name(hand_number):\n return hands[hand_number]\n\ndef view_hand(my_hand, you_hand):\n print('自分の手は ' + get_hand_name(my_hand))\n print('相手の手は ' + get_hand_name(you_hand))\n\ndef view_result(hand_diff):\n if hand_diff == 0:\n print('あいこ')\n elif hand_diff == -1 or hand_diff == 2:\n print('勝ち')\n else:\n print('負け')\n\nstart_message()\n\nmy_hand = get_my_hand()\nyou_hand = get_you_hand()\nhand_diff = my_hand - you_hand\n\nview_hand(my_hand, you_hand)\nview_result(hand_diff)\n","repo_name":"techgymjp/techgym_python","sub_path":"L2rT.py","file_name":"L2rT.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"69860416482","text":"from .hunter import SysctlHunter\nfrom binaryninja import *\n\n\ndef find_sysctls(bv: BinaryView):\n hunter = SysctlHunter(bv)\n hunter.run()\n\n\nPluginCommand.register(\n \"Sysctl Hunter\\\\Identify OIDs\",\n \"Find Sysctl OIDs\",\n find_sysctls,\n is_valid=lambda v: \"_sysctl_register_oid\" in v.symbols,\n)\n","repo_name":"xpcmdshell/SysctlHunter","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"28675140398","text":"from flask import Flask, Response, render_template\nfrom imutils.video import VideoStream\nfrom fastai.vision.all import *\nimport threading\nimport imutils\nimport cv2\nimport time\n\noutput_frame = None\npred = 'neutral'\nlock = threading.Lock()\n\napp = Flask(__name__)\n\nlearn = load_learner('convnet_vision_model.pkl')\nvs = VideoStream(src=0).start()\nface_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\") \n\ndef detect_expression():\n global vs, output_frame, lock, pred\n\n n_frames = 20\n frame_count = 0\n\n while True:\n frame = vs.read() # read video data\n frame = imutils.resize(frame, width=600) # resive window\n frame = cv2.flip(frame, 1) # flip the frame horizontally\n\n # convert to greyscale for model\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30))\n\n # prevents error if no face detected\n for coord in face_coord:\n # extract co-ordinates from face detector \n X, Y, w, h = coord\n H, W, _ = frame.shape\n X_1, X_2 = (max(0, X - int(w * 0.15)), min(X + int(1.15 * w), W))\n Y_1, Y_2 = (max(0, Y - int(h * 0.15)), min(Y + int(1.15 * h), H))\n img_cp = gray[Y_1:Y_2, X_1:X_2].copy()\n\n # model prediction - no transforms required\n if not frame_count % n_frames:\n pred, idx, probability = learn.predict(img_cp)\n\n # determine size of prediction label\n padding = 5\n text_size, _ = cv2.getTextSize(pred, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n text_w, text_h = text_size\n\n # render background for prediction label\n cv2.rectangle(\n frame,\n (X_1, Y_1), \n (X_1 + text_w + padding * 2, Y_1 - text_h - padding * 2), \n (0, 255, 200), -1)\n\n # render prediction label\n cv2.putText(\n frame, pred, \n (X_1 + padding, Y_1 - padding), \n cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0, 0, 0), 1)\n \n # render box around face\n cv2.rectangle(frame, (X_1, Y_1), (X_2, Y_2), (0, 255, 200), 2)\n\n frame_count += 1\n frame_count = frame_count % n_frames\n\n with lock:\n output_frame = frame.copy()\n\ndef generate_img():\n global output_frame, lock\n\n while True:\n # wait until lock is acquired\n with lock:\n # check if output frame is available\n if not output_frame.size:\n # bug CHECK THIS ####\n continue\n \n # encode output frame to .jpg\n flag, encoded_img = cv2.imencode('.jpg', output_frame)\n\n # ensure frame was successfully encoded\n if not flag:\n continue\n\n yield(b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + \n\t\t\tbytearray(encoded_img) + b'\\r\\n')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/camera_feed')\ndef camera_feed():\n return Response(generate_img(), \n mimetype = \"multipart/x-mixed-replace; boundary=frame\")\n\n\nif __name__ == \"__main__\":\n t = threading.Thread(target=detect_expression)\n t.daemon = True\n t.start()\n\n app.run(debug=True, threaded=True, use_reloader=False, port=8000)\n\nvs.stop()","repo_name":"finnformica/video-facial-recognition","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20158482103","text":"\"\"\"Core admin.\"\"\"\n\n# Django\nfrom django.contrib import admin\n\n# Project\nfrom core.models import TestModel\n\n# Register your models here.\n\n\n@admin.register(TestModel)\nclass TestModelAdmin(admin.ModelAdmin): # noqa: D101\n list_display = [\n 'name',\n 'number',\n ]\n","repo_name":"JerzyDeb/ajax_helpers","sub_path":"core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25526449732","text":"import random\n\nimport pygame\n\nfrom properties import *\n\n\nclass Tile(pygame.sprite.Sprite):\n \"\"\"Основной класс для тайлов\"\"\"\n\n def __init__(self, pos: tuple, groups: pygame.sprite.Group, surface,\n z: int = LAYERS['ground']):\n super().__init__(groups)\n self.z = z\n self.image = surface\n self.rect = self.image.get_rect(topleft=pos)\n self.__hitbox = self.rect.copy()\n\n @property\n def hitbox(self):\n return self.__hitbox\n\n\nclass NotTiledImage(Tile):\n def __init__(self, pos: tuple, groups: pygame.sprite.Group, surface, z: int = LAYERS['ground']):\n super().__init__(pos, groups,\n surface, z)\n offset_y = pos[1] + TILE_SIZE\n self.rect = self.image.get_rect(bottomleft=(pos[0], offset_y))\n\n\nclass Trigger(Tile):\n \"\"\"Tile c возможностью запуска скриптов\"\"\"\n\n def __init__(self,\n pos: tuple,\n groups: pygame.sprite.Group,\n surface,\n script,\n z: int = LAYERS['ground'],\n chance: int = 100\n ):\n super().__init__(pos, groups, surface, z)\n self.triggered = False\n self.script = script\n self.chance = chance\n\n def check(self):\n # log.debug(f\"function is {self.func}\")\n if not self.triggered and self.chance >= random.randint(0, 100):\n self.script.execute()\n self.triggered = True\n","repo_name":"AyurDondokov/Game_on_python","sub_path":"tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39067226477","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Library Prerequisites\nimport random\nimport numpy as np\nnp.random.seed(42)\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.model_selection import train_test_split\n\n\n# ## One-hot Encoding\n\ndef to_categorical(y, num_classes=None, dtype='float32'):\n y = np.array(y, dtype='int')\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=dtype)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical\n\n\n# ## Preprocessing\n\nx, y = fetch_openml('mnist_784', version=1, return_X_y=True)\n\nx = (x/255.).astype(np.float32)\ny = to_categorical(y)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)\n\n\nplt.imshow(x_train[0].reshape(28, 28), cmap='gray')\n\n\n# ## Network Implementation\n\nclass NumpyNet():\n def __init__(self, nodes, epochs=20, lr=0.5):\n self.nodes = nodes # neurons in each layer\n self.epochs = epochs\n self.lr = lr # learning rate\n self.params = {}\n self.create()\n \n def create(self): # init network weights\n params = self.params\n # three network weights\n params['W1'] = np.random.randn(self.nodes[1], self.nodes[0]) * np.sqrt(1./ self.nodes[1]) #multiply with small values\n params['W2'] = np.random.randn(self.nodes[2], self.nodes[1]) * np.sqrt(1./ self.nodes[2]) \n params['W3'] = np.random.randn(self.nodes[3], self.nodes[2]) * np.sqrt(1./ self.nodes[3]) \n \n def relu(self, x, deriv = False):\n if deriv:\n return x > 0 #derivative of relu\n return np.maximum(0, x)\n \n def softmax(self, x, deriv = False):\n exps = np.exp(x - x.max())\n if deriv:\n return exps/np.sum(exps, axis=0) * (1- exps/np.sum(exps, axis=0)) #derivative of softmax\n return exps / np.sum(exps, axis=0)\n \n def forward(self, x):#forward path\n params = self.params\n params['O0'] = x\n params['Z1'] = np.dot(params['W1'], x)\n params['O1'] = self.relu(params['Z1']) #input --> hidden layer 1\n \n params['Z2'] = np.dot(params['W2'], params['O1'])\n params['O2'] = self.relu(params['Z2']) #hidden layer 1 -> 2\n \n params['Z3'] = np.dot(params['W3'],params['O2'])\n params['O3'] = self.softmax(params['Z3']) #hidden layer 2 -> 3\n return params['O3']\n \n def backward(self, y, output):\n wparams = {}\n params = self.params\n error = 2 * (output - y)/output.shape[0] * self.softmax(params['Z3'], deriv=True)\n wparams['W3'] = np.outer(error, params['O2']) #weight changes for W3\n \n error = np.dot(params['W3'].T, error) * self.relu(params['Z2'], deriv=True)\n wparams['W2'] = np.outer(error, params['O1']) #weight changes for W2\n \n error = np.dot(params['W2'].T, error) * self.relu(params['Z1'], deriv=True)\n wparams['W1'] = np.outer(error, params['O0'])#weight changes for W1\n \n return wparams\n \n def update(self, wparams, lr):\n for key, v in wparams.items():\n self.params[key] -= lr * v #update network weight with changes calculated before\n \n def accuracy(self, x_test, y_test):\n predictions = []\n for x, y in zip(x_test, y_test):\n output = self.forward(x)\n pred = np.argmax(output)\n predictions.append(pred == np.argmax(y))\n return np.mean(predictions)\n \n def train_(self, x_train, y_train, x_test, y_test):#train with SGD\n for iteration in range(self.epochs):\n for x, y in zip(x_train, y_train):\n output = self.forward(x)\n wparams = self.backward(y, output)\n self.update(wparams)\n \n acc = self.accuracy(x_test, y_test)\n print(\"Epoch {0} -- Accuracy {1:.05f}%\".format(iteration, acc))\n \n def train(self, x_train, y_train, x_test, y_test, batch_size=1, lr_decay = False):#train with mini-batch SGD\n if batch_size < 2:\n return self.train_(x_train, y_train, x_test, y_test)\n \n for iteration in range(self.epochs):\n traindata = np.concatenate((x_train, y_train), axis=1)\n random.shuffle(traindata)\n batches = [traindata[k:k+batch_size] for k in range(0, len(traindata), batch_size)] # mini batch\n \n lr = self.lr* (self.epochs-iteration)/self.epochs if lr_decay else self.lr # learning rate decay\n for batch in batches:\n wparams_ = defaultdict(list)\n wparams = {}\n for data in batch:\n x, y = data[:784], data[784:]\n output = self.forward(x)\n wps = self.backward(y, output)\n for key, v in wps.items():\n wparams_[key].append(v)\n \n for key, v in wparams_.items(): \n wparams[key] = np.mean(wparams_[key],axis=0)#mean of mini batch weight changes\n self.update(wparams, lr) # update weights in each mini batch\n \n acc = self.accuracy(x_test, y_test)\n print(\"Epoch {0} -- LR {1:.05f} -- Accuracy {2:.03f}%\".format(iteration,lr, acc*100)) \n \n\n\nmodel = NumpyNet(nodes = [784, 128, 64, 10], epochs = 20, lr = 0.5)\nmodel.train(x_train,y_train,x_test,y_test, batch_size = 4, lr_decay = True)\n\n\n\nplt.imshow(x_test[110].reshape(28, 28), cmap='gray') #test a image\n\n\nnp.argmax(model.forward(x_test[110])) #predict the class of that image\n\n","repo_name":"guoqingbao/NumpyNet","sub_path":"example/TwoHiddenLayers.py","file_name":"TwoHiddenLayers.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13491890072","text":"import websocket\nimport json\nfrom kafka import KafkaProducer \n\nproducer = KafkaProducer(bootstrap_servers=\"localhost:9092\")\n\"\"\" spark = SparkSession.builder.appName(\"SimpleApp\").getOrCreate() \"\"\"\n\n\ndef on_open(ws):\n print(\"the socket is open\")\n subscribe_message = {\n \"type\": \"subscribe\",\n \"channels\": [{\"name\": \"ticker\", \"product_ids\": [\"BTC-USD\", \"ETH-USD\"]}],\n }\n ws.send(json.dumps(subscribe_message))\n\n\ndef on_message(ws, message):\n cur_data = json.loads(message)\n # print(cur_data)\n if cur_data[\"type\"] == \"ticker\":\n producer.send(cur_data[\"product_id\"], value=json.dumps(message).encode(\"utf-8\"))\n\n\n# url=\"wss://ws-feed-public.sandbox.exchange.coinbase.com\"\nurl = \"wss://ws-feed.pro.coinbase.com\"\nws = websocket.WebSocketApp(url, on_open=on_open, on_message=on_message)\n\n\nws.run_forever()\n","repo_name":"pavan-kumar-nuthi/Spark-streaming-batch","sub_path":"Coinbase_Connection.py","file_name":"Coinbase_Connection.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6327744949","text":"# 해시/ 전화번호 목록\nfrom collections import Counter\n\ndef solution(phone_book):\n pb = Counter(phone_book)\n\n for i in pb:\n s = ''\n for j in i:\n s += j\n if s in pb and s != i:\n return False\n\n return True\n\nif __name__ == '__main__':\n print(solution([\"119\", \"97674223\", \"1195524421\"]))\n print(solution([\"123\",\"456\",\"789\"]))\n print(solution([\"12\",\"123\",\"1235\",\"567\",\"88\"]))","repo_name":"202002538/algorithm","sub_path":"programmers/p42577.py","file_name":"p42577.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41283781157","text":"from detectron2 import model_zoo\nimport os, math\n\n\ndef add_test_config(cfg, args):\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml\"))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1\n cfg.MODEL.WEIGHTS = '/projects/patho1/Kechun/NestDetection/result_corrected/ablations_10_times/ITER_3_WFOCAL_w_2_nest_5x_cat_2/model_final.pth'\n\n cfg.DATALOADER.NUM_WORKERS = 8\n cfg.INPUT.ResizeShortestEdge = True\n\n cfg.DATASETS.TEST = (\"test\", \"valid\", \"train\")\n\n assert hasattr(args, 'num_classes')\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_classes # foreground\n\n assert hasattr(args, 'loss')\n assert hasattr(args, 'weight')\n loss_type = args.loss\n\n # Loss for RPN BBOX CLASSIFICATION. options are ['CE', 'WCE', 'WFOCAL']\n cfg.MODEL.RPN.BBOX_CLS_LOSS_TYPE = loss_type\n cfg.MODEL.RPN.BBOX_CLS_LOSS_WEIGHT = args.weight # weight for object\n\n # Loss for ROI BBOX CLASSIFICATION. options are ['CE', 'WCE', 'WFOCAL']\n cfg.MODEL.ROI_BOX_HEAD.BBOX_CLS_LOSS_TYPE = loss_type\n cfg.MODEL.ROI_BOX_HEAD.BBOX_CLS_LOSS_WEIGHT = [args.weight] * cfg.MODEL.ROI_HEADS.NUM_CLASSES + [1] # weight for class\n\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n cfg.FOCAL_LOSS_GAMMA = 2\n\n # -----------------------------------------------------------------------------\n # CUSTOM parameters\n # -----------------------------------------------------------------------------\n assert hasattr(args, \"instance\")\n cfg.instance = args.instance\n\n assert hasattr(args, \"patch_dir\")\n assert hasattr(args, \"output_dir\")\n cfg.OUTPUT_DIR = args.output_dir\n cfg.PATCH_DIR = args.patch_dir\n assert hasattr(args, \"resize_dir\")\n cfg.RESIZE_DIR = args.resize_dir\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)","repo_name":"kechunl/Nest-Detection","sub_path":"config/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31137316355","text":"def get_spacing(depth):\n first_num_spacing = 2 ** depth\n other_num_spacing = 0\n if (depth == 1):\n other_num_spacing = 3\n elif (depth == 2):\n other_num_spacing = 7\n elif (depth == 3):\n other_num_spacing = 15\n elif (depth == 4):\n other_num_spacing = 31\n return [first_num_spacing, other_num_spacing]\n \n\ndef print_tree(node, depth):\n layer_nodes = [node]\n while (len(layer_nodes) > 0):\n layer_has_nodes = False\n next_layer_nodes = []\n current_values = []\n spacing = get_spacing(depth)\n first_num_spacing = spacing[0]\n other_num_spacing = spacing[1]\n for i in range(len(layer_nodes)):\n n = layer_nodes[i]\n if (n is not None):\n layer_has_nodes = True\n if (n.left != None):\n next_layer_nodes.append(n.left)\n else:\n next_layer_nodes.append(None)\n if (n.right != None):\n next_layer_nodes.append(n.right)\n else:\n next_layer_nodes.append(None)\n #make every number two chars long\n svalue = str(n.value).rjust(2, ' ')\n else:\n svalue = ' '\n #spacing for layers\n if (len(current_values) == 0):\n svalue = svalue.rjust(first_num_spacing, ' ')\n else:\n svalue = svalue.rjust(other_num_spacing, ' ')\n current_values.append(svalue)\n if (layer_has_nodes):\n print (' '.join(current_values))\n layer_nodes = next_layer_nodes\n depth = depth - 1\n\nclass Node():\n def __init__(self, value, left = None, right = None):\n self.value = value\n self.left = left\n self.right = right\n\nclass Choice():\n def __init__(self, move, value):\n self.move = move\n self.value = value\n\n def __str__(self):\n return self.move + \": \" + str(self.value)\n\nclass mmBot():\n def __init__(self):\n self.nodes_explored = 0\n\n def minimax(self, node, is_max): \n self.nodes_explored = self.nodes_explored + 1\n\n # base case, if no sub nodes, just return the value\n if (node.left is None and node.right is None):\n return Choice(\"end\", node.value)\n\n # if node has only one child\n if (node.right is None):\n l_choice = self.minimax(node.left, not is_max)\n return Choice(\"left\", l_choice.value)\n elif (node.left is None):\n r_choice = self.minimax(node.right, not is_max)\n return Choice(\"right\", r_choice.value)\n\n # if child nodes exist, run minimax on each child nodes\n l_choice = self.minimax(node.left, not is_max)\n r_choice = self.minimax(node.right, not is_max)\n\n # compare results\n if (is_max):\n if (l_choice.value > r_choice.value):\n return Choice(\"left\", l_choice.value)\n else:\n return Choice(\"right\", r_choice.value)\n else:\n if (l_choice.value < r_choice.value):\n return Choice(\"left\", l_choice.value)\n else:\n return Choice(\"right\", r_choice.value)\n\n def select_move(self, node):\n self.nodes_explored = 0\n return self.minimax(node, True)\n\nclass abBot():\n def __init__(self):\n self.nodes_explored = 0\n\n def minimax(self, node, is_max, alpha, beta): \n self.nodes_explored = self.nodes_explored + 1\n\n # base case, if no sub nodes, just return the value\n if (node.left is None and node.right is None):\n return Choice(\"end\", node.value)\n # if node has only one child\n if (node.right is None):\n l_choice = self.minimax(node.left, not is_max, alpha, beta)\n return Choice(\"left\", l_choice.value)\n elif (node.left is None):\n r_choice = self.minimax(node.right, not is_max, alpha, beta)\n return Choice(\"right\", r_choice.value)\n\n if (is_max):\n # if child nodes exist, run minimax on each child nodes \n l_choice = self.minimax(node.left, not is_max, alpha, beta)\n alpha = max(l_choice.value, alpha)\n if (alpha >= beta):\n return Choice(\"left\", l_choice.value)\n r_choice = self.minimax(node.right, not is_max, alpha, beta)\n if (l_choice.value > r_choice.value):\n return Choice(\"left\", l_choice.value)\n else:\n return Choice(\"right\", r_choice.value)\n else:\n l_choice = self.minimax(node.left, not is_max, alpha, beta)\n beta = min(l_choice.value, beta)\n if (alpha >= beta):\n return Choice(\"left\", l_choice.value)\n r_choice = self.minimax(node.right, not is_max, alpha, beta)\n if (l_choice.value < r_choice.value):\n return Choice(\"left\", l_choice.value)\n else:\n return Choice(\"right\", r_choice.value)\n\n\n def select_move(self, node):\n self.nodes_explored = 0\n return self.minimax(node, True, -1000, 1000)\n\nroot = Node(\"na\", \n Node(3,\n Node(-2,\n Node(20),\n Node(15)),\n Node(4,\n Node(-7),\n Node(7))\n ),\n Node(7,\n Node(9,\n Node(-8),\n Node(-7)),\n Node(99,\n Node(36),\n Node(23))\n )\n)\n\nprint_tree(root, 4)\n\none_bot = mmBot()\nmove = one_bot.select_move(root)\nprint (\"mmBot\")\nprint (move)\nprint (one_bot.nodes_explored)\n\ntwo_bot = abBot()\nmove = two_bot.select_move(root)\nprint (\"\\nabBot\")\nprint (move)\nprint (two_bot.nodes_explored)","repo_name":"morgankenyon/RandomML","sub_path":"src/alphabetapruning.py","file_name":"alphabetapruning.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"73002601763","text":"import numpy as np\nimport utility\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport os\nimport csv\n\ndef findWindow(node, branch, window_size=4, scale=(0.22, 0.22, 0.3)):\n if isinstance(node, np.ndarray):\n node = node.tolist()\n node[1] = 2\n parent_distance = 0\n child_distance = 0\n parent_nodes = []\n child_nodes = []\n # select nodes in distance window_size/2 around node\n current_parent_node = node\n while parent_distance < window_size/2:\n parent_nodes.append(current_parent_node)\n next_parent_node = utility.nextNode(current_parent_node, branch).tolist()\n if not isinstance(next_parent_node, list):\n #print('not enough parent nodes!')\n break\n parent_distance += utility.dist3D(current_parent_node, next_parent_node, scale=scale)\n current_parent_node = next_parent_node\n current_child_node = node\n while child_distance < window_size/2:\n child_nodes.append(current_child_node)\n next_child_node = utility.prevNode(current_child_node, branch).tolist()\n if not next_child_node == []:\n next_child_node = next_child_node[0]\n if not isinstance(next_child_node, list):\n #print('not enough child nodes!')\n break\n child_distance += utility.dist3D(current_child_node, next_child_node, scale=scale)\n current_child_node = next_child_node\n child_nodes = child_nodes[1:]\n try:\n window_nodes = np.concatenate((np.array(parent_nodes), np.array(child_nodes)), axis=0)\n except ValueError:\n if parent_nodes==[]:\n window_nodes = child_nodes\n elif child_nodes==[]:\n window_nodes = parent_nodes\n else:\n raise NameError('NoWindowFound')\n return np.array(window_nodes)\n \ndef calculateAnglesWithLinearRegression(node, branch, window_size=3.2, scale=(0.22, 0.22, 0.3), visualize=True, fixed_node=False):\n if isinstance(node, np.ndarray):\n node = node.tolist()\n parent_distance = 0\n child_distance = 0\n parent_nodes = []\n child_nodes = []\n \n #select parent nodes in given window\n current_parent_node = node\n while parent_distance < window_size/2:\n parent_nodes.append(current_parent_node)\n try:\n next_parent_node = utility.nextNode(current_parent_node, branch).tolist()\n except:\n return 180\n if not isinstance(next_parent_node, list):\n #print('not enough parent nodes!')\n return 180\n parent_distance += utility.dist3D(current_parent_node, next_parent_node, scale=scale)\n current_parent_node = next_parent_node\n \n #select child nodes in given window \n current_child_node = node\n while child_distance < window_size/2:\n child_nodes.append(current_child_node)\n next_child_node = utility.prevNode(current_child_node, branch).tolist()\n if not next_child_node ==[]:\n next_child_node = next_child_node[0]\n \n if not isinstance(next_child_node, list):\n #print('not enough child nodes!')\n return 180\n try:\n child_distance += utility.dist3D(current_child_node, next_child_node, scale=scale)\n current_child_node = next_child_node\n except:\n return 180\n \n #take the coordinates from the nodes\n parent_nodes = np.array(parent_nodes)\n child_nodes = np.array(child_nodes)\n parent_points = parent_nodes[:, 2:5]\n child_points = child_nodes[:, 2:5]\n \n #calculate the mean of the points\n parent_mean = parent_points.mean(axis=0)\n child_mean = child_points.mean(axis=0)\n \n #calculate svd's\n parent_uu, parent_dd, parent_vv = np.linalg.svd(parent_points - parent_mean)\n child_uu, child_dd, child_vv = np.linalg.svd(child_points - child_mean)\n \n parent_uu_fixednode, parent_dd_fixednode, parent_vv_fixednode = np.linalg.svd(parent_points - parent_points[0])\n child_uu_fixednode, child_dd_fixednode, child_vv_fixednode = np. linalg.svd(child_points - child_points[0])\n \n #calculate vectors and angle\n parent_vector = parent_vv[0]\n if utility.dist3D(parent_points[0]+parent_vector, parent_points[-1]) > utility.dist3D(parent_points[0]-parent_vector, parent_points[-1]):\n parent_vector *= -1\n child_vector = child_vv[0]\n if utility.dist3D(child_points[0]+child_vector, child_points[-1]) > utility.dist3D(child_points[0]-child_vector, child_points[-1]):\n child_vector *= -1\n angle = utility.vectorAngle3D(parent_vector, child_vector)\n \n parent_vector_fixednode = parent_vv_fixednode[0]\n if utility.dist3D(parent_points[0]+parent_vector_fixednode, parent_points[-1]) > utility.dist3D(parent_points[0]-parent_vector_fixednode, parent_points[-1]):\n parent_vector_fixednode *= -1\n child_vector_fixednode = child_vv_fixednode[0]\n if utility.dist3D(child_points[0]+child_vector_fixednode, child_points[-1]) > utility.dist3D(child_points[0]-child_vector_fixednode, child_points[-1]):\n child_vector_fixednode *= -1 \n angle_fixednode = utility.vectorAngle3D(parent_vector_fixednode, child_vector_fixednode)\n \n \n \n #visualization\n if visualize:\n linspace = np.reshape(np.linspace(-10,10,2), (2,1))\n parent_line = parent_vector * linspace\n child_line = child_vector * linspace\n parent_line += parent_mean\n child_line += child_mean\n \n linspace_fixednode = np.reshape(np.linspace(-20,0,2), (2,1))\n parent_line_fixednode = parent_vector_fixednode * linspace_fixednode\n child_line_fixednode = child_vector_fixednode * linspace_fixednode\n parent_line_fixednode += parent_points[0]\n child_line_fixednode += child_points[0]\n\n import matplotlib.pyplot as plt\n import mpl_toolkits.mplot3d as m3d\n lins = np.reshape(np.linspace(0,1,2), (2,1))\n a = parent_points - parent_mean\n a_line = parent_vv[0] * lins\n b = child_points - child_mean\n b_line = child_vv[0] * lins\n c = parent_points - parent_points[0]\n c_line = parent_vv_fixednode[0] * lins\n d = child_points - child_points[0]\n d_line = child_vv_fixednode[0] * lins\n \n ax = m3d.Axes3D(plt.figure())\n ax.scatter3D(*parent_points.T, color='red')\n ax.quiver(parent_points[0][0], parent_points[0][1], parent_points[0][2], parent_vector[0], parent_vector[1], parent_vector[2], color='red')\n ax.quiver(parent_points[0][0], parent_points[0][1], parent_points[0][2], parent_vv_fixednode[0][0], parent_vv_fixednode[0][1], parent_vv_fixednode[0][2], color='orangered')\n ax.scatter3D(*child_points.T, color='blue')\n ax.quiver(child_points[0][0], child_points[0][1], child_points[0][2], child_vector[0], child_vector[1], child_vector[2], color='blue')\n ax.quiver(child_points[0][0], child_points[0][1], child_points[0][2], child_vv_fixednode[0][0], child_vv_fixednode[0][1], child_vv_fixednode[0][2], color='cyan')\n string = 'angles: ' + str(angle) + '/' + str(angle_fixednode)\n ax.text(child_points[0][0]+1, child_points[0][1], child_points[0][2], s=string)\n plt.show()\n if fixed_node:\n return angle_fixednode\n else:\n return angle\n \n\ndef wavyness(infilename_or_mainbranch='data/trees/annotated_mainbranch.swc',\n outfilename_tree='data/trees/wavytree.swc',\n outfilename_kinks='data/trees/kinks.swc',\n angle_threshold=145,\n window_size_linear_regression=4.0,\n window_size_maximum_supression=4.0,\n n_colors=10,\n scale=(0.223,0.223,0.3),\n fix_node=False,\n plot_cdf=False):\n \n if plot_cdf:\n fig, ax = plt.subplots(figsize=(8, 4))\n ax.grid(True)\n ax.set_title('Cumulative distribution')\n ax.set_xlabel('Angle (deg)')\n ax.set_ylabel('Percentage')\n #ax.axis([50,cutoff_angle,0,1]) \n \n if isinstance(infilename_or_mainbranch, str):\n tree = utility.readSWC(infilename_or_mainbranch)\n elif isinstance(infilename_or_mainbranch, (list, np.ndarray)):\n tree = np.array(infilename_or_mainbranch)\n tree[:,5] = 0.5\n annotated_mainbranch=tree.copy()\n angles = np.zeros(len(tree))\n for i in range(len(tree)):\n angles[i] = calculateAnglesWithLinearRegression(tree[i], tree, window_size=window_size_linear_regression, visualize=False, fixed_node=fix_node)\n angles = np.reshape(angles, (len(angles), 1))\n angles[np.where(np.isnan(angles))] = 180\n angles[:20]=180 # set the first and last 20 nodes to 180 as they generally don't correspond to real kinks\n angles[-20:]=180\n data = np.concatenate((tree, angles), axis=1)\n sample_numbers = data[:,0]\n \n kinks_count = 0\n kinks = []\n while min(data[:,7]) < angle_threshold:\n annotated_mainbranch[np.argwhere(annotated_mainbranch[:,0]==data[data[:,7].argmin()][0])[0][0],5]=3\n kinks.append(data[data[:,7].argmin()].tolist())\n kinks_count += 1\n w = findWindow(data[data[:,7].argmin()][:7], tree, window_size=window_size_maximum_supression, scale=scale)\n indices = np.argwhere(np.isin(data[:,0], w[:,0])).reshape(len(w))\n for index in indices:\n data[index,7] = 180\n \n kinks = np.array(kinks)\n try:\n kinks[:,5] = 3\n utility.saveSWC(outfilename_kinks, kinks)\n except IndexError:\n pass\n \n if plot_cdf:\n n, bins, patches = ax.hist(kinks[:,7], bins=10000, normed=1, histtype='step',\n cumulative=True, label='neuronname')\n patches[0].set_xy(patches[0].get_xy()[:-1])\n ax.legend(loc='center left')\n \n\n print(angles.min()) \n m = interpolate.interp1d([0, 180], [1, n_colors])\n normalized_angles = np.round(m(angles)).reshape(len(angles))\n tree[:, 1] = normalized_angles\n utility.saveSWC(outfilename_tree, tree)\n \n \n if plot_cdf:\n plt.show()\n return kinks_count, angle_threshold, annotated_mainbranch\n \nif __name__ == '__main__':\n wavyness()","repo_name":"DonMaks/quantification_aging_neurons","sub_path":"wavyness.py","file_name":"wavyness.py","file_ext":"py","file_size_in_byte":10161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29335252951","text":"from matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport matplotlib.pyplot as plt\nimport imageio\nimport numpy as np\n\ndef plot_1d(function, points, file_path=False):\n # plot 1d function\n\n plt.ion()\n fig = plt.figure(figsize=(3, 2), dpi=300)\n ax = fig.add_subplot(111)\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n params = {'legend.fontsize': 3,\n 'legend.handlelength': 3}\n plt.rcParams.update(params)\n\n ax.set_xlim(-100, 500)\n ax.set_ylim(-10, 22)\n \n # lower the size of the axis ticks\n ax.tick_params(axis='both', which='major', labelsize=3)\n \n\n # input (x, y) and output (z) nodes of cost-function graph\n function_x = np.linspace(-100, 500, 10000)\n function_y = function(function_x)\n\n # plot cost-function graph\n ax.plot(function_x, function_y, color='blue', linewidth=0.5)\n\n # plot starting points\n ax.scatter(points, function(points), color='red', marker='o', s=1)\n\n if file_path:\n # save image to file \n plt.savefig(file_path, bbox_inches='tight', pad_inches=0)\n\n # do not display image\n plt.close()\n else:\n plt.show()\n\ndef plot_2d(function, history):\n # plot Ackley function in 2D with CBO optimization path and colorbar\n plt.ion()\n fig = plt.figure(figsize=(3, 2), dpi=300)\n ax = fig.add_subplot(111)\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n params = {'legend.fontsize': 3,\n 'legend.handlelength': 3}\n plt.rcParams.update(params)\n\n ax.set_xlim(-7, 7)\n ax.set_ylim(-7, 7)\n\n # input (x, y) and output (z) nodes of cost-function graph\n function_x = np.linspace(-7, 7, 100)\n function_y = np.linspace(-7, 7, 100)\n\n x_mesh, y_mesh = np.meshgrid(function_x, function_y)\n z_mesh = function((x_mesh, y_mesh))\n\n # plot initial point\n ax.scatter(history[0][0], history[0][1], color='black', marker='o', s=10)\n\n # plot sgf optimization path\n ax.plot([el[0] for el in history], [el[1] for el in history], color='black', marker='x', linewidth=0.5,\n markersize=1)\n \n # plot cost-function graph\n ax.contourf(x_mesh, y_mesh, z_mesh, cmap=cm.coolwarm, alpha=.4)\n\n # plot final point\n ax.scatter(history[-1][0], history[-1][1], color='black', marker='o', s=10)\n\n\ndef save_gif(history, function, file_path, step=10):\n for i in range(0, len(history), step):\n plot_1d(function, history[i], file_path='./images/1d_function_{}1.png'.format(i))\n\n images = []\n for i in range(0, len(history), step):\n filename = f'./images/1d_function_{i}1.png'\n images.append(imageio.imread(filename))\n\n for _ in range(10):\n images.append(imageio.imread(filename))\n\n imageio.mimsave(file_path, images, duration=0.1)\n","repo_name":"GargoDan/CBO","sub_path":"src/visualisation/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38400391551","text":"\nfrom qgs.params.params import QgParams\nfrom qgs.integrators.integrator import RungeKuttaIntegrator, RungeKuttaTglsIntegrator\nfrom qgs.functions.tendencies import create_tendencies\n\nimport unittest\nimport numpy as np\n\nreal_eps = 1.e-3\n\n\nclass TestTlAd(unittest.TestCase):\n\n # Model parameters instantiation with some non-default specs\n model_parameters = QgParams({'phi0_npi': np.deg2rad(50.) / np.pi, 'hd': 0.3})\n # Mode truncation at the wavenumber 2 in both x and y spatial coordinate\n model_parameters.set_atmospheric_channel_fourier_modes(2, 2)\n\n # Changing (increasing) the orography depth and the meridional temperature gradient\n model_parameters.ground_params.set_orography(0.4, 1)\n model_parameters.atemperature_params.set_thetas(0.2, 0)\n\n f, Df = create_tendencies(model_parameters)\n\n integrator = RungeKuttaIntegrator()\n integrator.set_func(f)\n\n ic = np.random.rand(model_parameters.ndim) * 0.01\n integrator.integrate(0., 200000., 0.1, ic=ic, write_steps=0)\n _, ic = integrator.get_trajectories()\n\n tgls_integrator = RungeKuttaTglsIntegrator()\n tgls_integrator.set_func(f, Df)\n\n def test_taylor(self):\n\n for n in range(0, 7):\n\n y0 = self.ic\n dy = np.full_like(y0, 2. ** (-n)/np.sqrt(float(self.model_parameters.ndim)))\n y0prime = y0 + dy\n self.integrator.integrate(0., 0.1, 0.1, ic=y0, write_steps=0)\n _, y1 = self.integrator.get_trajectories()\n self.integrator.integrate(0., 0.1, 0.1, ic=y0prime, write_steps=0)\n _, y1prime = self.integrator.get_trajectories()\n\n dy1 = y1prime - y1\n\n self.tgls_integrator.integrate(0., 0.1, dt=0.1, write_steps=0, ic=y0, tg_ic=dy)\n _, _, dy1_tl = self.tgls_integrator.get_trajectories()\n\n print(\"Resulting difference in trajectory: (epsilon ~ 2^-\", n, \"= \", dy[0], \")\")\n print(\"diff: \", np.dot(dy1, dy1))\n print(\"tl: \", np.dot(dy1_tl, dy1_tl))\n print(\"ratio: \", np.dot(dy1, dy1)/np.dot(dy1_tl, dy1_tl))\n self.assertTrue(self.close_match(np.dot(dy1, dy1)/np.dot(dy1_tl, dy1_tl), 1., dy[0]/10))\n\n def test_adjoint_identity(self):\n\n y0 = self.ic\n for i in range(100):\n dy = np.random.randn(self.model_parameters.ndim)\n dy_bis = np.random.randn(self.model_parameters.ndim)\n\n # Calculate M(TL).x in dy1_tl\n self.tgls_integrator.integrate(0., 0.1, dt=0.1, write_steps=0, ic=y0, tg_ic=dy)\n _, _, dy1_tl = self.tgls_integrator.get_trajectories()\n\n # Calculate M(AD).x in dy1_ad\n self.tgls_integrator.integrate(0., 0.1, dt=0.1, write_steps=0, ic=y0, tg_ic=dy, adjoint=True)\n _, _, dy1_ad = self.tgls_integrator.get_trajectories()\n\n # Calculate M(AD).x in dy1_ad\n self.tgls_integrator.integrate(0., 0.1, dt=0.1, write_steps=0, ic=y0, tg_ic=dy_bis)\n _, _, dy1_bis_tl = self.tgls_integrator.get_trajectories()\n\n # Calculate M(AD).y in dy1_bis_ad\n self.tgls_integrator.integrate(0., 0.1, dt=0.1, write_steps=0, ic=y0, tg_ic=dy_bis, adjoint=True)\n _, _, dy1_bis_ad = self.tgls_integrator.get_trajectories()\n\n # Calculate norm < M(TL).x, y >\n norm1 = np.dot(dy1_tl, dy_bis)\n # Calculate norm < x, M(AD).y >\n norm2 = np.dot(dy, dy1_bis_ad)\n\n print(\" = \", norm1)\n print(\" = \", norm2)\n print(\"Ratio = \", norm1 / norm2)\n self.assertTrue(self.close_match(norm1, norm2))\n\n # Calculate norm \n norm1 = np.dot(dy1_bis_tl,dy)\n # Calculate norm \n norm2 = np.dot(dy_bis,dy1_ad)\n\n print(\" = \", norm1)\n print(\" = \", norm2)\n print(\"Ratio = \", norm1 / norm2)\n self.assertTrue(self.close_match(norm1, norm2))\n\n @staticmethod\n def close_match(v1, v2, eps=real_eps):\n return abs(v1 - v2) < eps\n","repo_name":"Climdyn/qgs","sub_path":"model_test/test_tlad.py","file_name":"test_tlad.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"54"} +{"seq_id":"12682902098","text":"from bs4 import BeautifulSoup\nimport requests\n\n# Конфиг ТГ бота\nchatId = ''\ntokenBot = ''\n\n# Категория для парсинга\nurlKwork = 'https://kwork.ru/projects?c=41'\n\n# Лист кворков\njobsList = []\n\n# первые кворки\nfirst = True\n\n# Отправить сообщение в ТГ\ndef send_msg(text, parse_mode='HTML') -> None:\n requests.post(f\"https://api.telegram.org/bot{tokenBot}/sendMessage?chat_id={chatId}&text={text}&parse_mode={parse_mode}&disable_web_page_preview=true\")\n\n# Основной цикл\nwhile True:\n kwork = requests.get(urlKwork)\n\n bsUse = BeautifulSoup(kwork.text, 'html.parser')\n blocks = bsUse.find_all('div', class_='card')\n\n # Парсинг кворка из биржи\n for block in blocks:\n infoBlock = BeautifulSoup(str(block), 'html.parser')\n\n kwork = infoBlock.find_all('div', class_='wants-card__header-title')[0]\n kworkGetLink = BeautifulSoup(str(kwork), 'html.parser')\n kworkLink = kworkGetLink.find('a').attrs['href']\n\n price = infoBlock.find_all('div', class_='wants-card__header-price')[0]\n workInfo = infoBlock.find_all('div', class_='breakwords')[0]\n\n author = infoBlock.find_all('a', class_='v-align-t')[0]\n authorLink = author.attrs['href']\n \n if not kwork in jobsList: \n jobsList.append(kwork)\n if first: pass\n else: send_msg(f'Новый кворк от {author.text}!\\n{authorLink}\\n\\n{kwork.text}\\n{kworkLink}\\n{price.text}\\n{workInfo.text}')\n \n first = False\n\n \n\n# /coded by @loveappless","repo_name":"love-apples/kwork-listening","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9145404213","text":"# Author:zhaoyanqi\n\nfrom db import modles\nfrom lib import common\nfrom db import dbhandler\nimport time\nfrom conf import settings\nimport os\n\n\n\n@common.login_auth\ndef release_notice(user_dic,conn):\n notice = modles.Notice(name=user_dic['notice_name'],\n content = user_dic['content'],\n user_id=user_dic['user_id'],\n create_time=common.get_nowtime())\n back_dic = {'flag':True,'msg':'发布工作成功!'}\n common.send_back(back_dic,conn)\n\n\n@common.login_auth\ndef delete_movie(user_dic,conn):\n movie = dbhandler.select(name=user_dic['movie_name'],type='movie')\n movie.is_delete = 1\n movie.save()\n back_dic = {'flag': True, 'msg': '删除成功!'}\n common.send_back(back_dic, conn)\n\n@common.login_auth\ndef upload_movie(user_dic,conn):\n print('user_dic',user_dic)\n recv_size = 0\n file_name = common.get_uuid(user_dic['file_name'])+user_dic['file_name']\n file_path = os.path.join(settings.BASE_MOVIE_DIR,file_name)\n with open(file_path, 'wb') as f:\n while recv_size < user_dic['filesize']:\n recv_data = conn.recv(1024)\n f.write(recv_data)\n recv_size+=len(recv_data)\n print(recv_size)\n\n movie = modles.Movie(name=file_name,\n path=file_path,\n is_free=user_dic['is_free'],\n is_delete=0,\n create_time=common.get_nowtime(),\n user_id=user_dic['user_id'],\n file_md5=user_dic['file_md5'],\n )\n back_dic = {'flag':True,'msg':'上传成功'}\n common.send_back(back_dic,conn)\n","repo_name":"doraqiqi/youku","sub_path":"youkuServer/interface/admin_interface.py","file_name":"admin_interface.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8341871687","text":"import asyncio\n\nfrom rest_framework import viewsets, mixins\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\n\nfrom ..serializers import PollAnswerSerializer, PollQuestionSerializer\nfrom ..models import PollAnswer\n\n\nclass PollAnswerViewset(\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n ):\n\n queryset = PollAnswer.objects.all()\n serializer_class = PollAnswerSerializer\n\n @action(methods=['get'], detail=True)\n def question(self, request, pk=None):\n answer = self.get_object()\n question = answer.question\n question_serializer = PollQuestionSerializer(question, many=False)\n return Response(question_serializer.data)\n\n","repo_name":"hermesthecat/letsvpn","sub_path":"api/letsvpn/polls/viewsets/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8625911352","text":"import os\nimport time\nfrom multiprocessing import Pipe\nfrom queue import Queue\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QAction\n\nerror_msg_prefix = \"error: tools_common: \"\n\n\nclass BufferPipe:\n\n def __init__(self, pipe=None, role='', duplex=True):\n self.pipe = pipe\n self.role = role\n self.duplex = duplex\n\n def write(self, content=None):\n error_msg = ''\n try:\n self.pipe.send(content)\n except Exception as e:\n error_msg = get_error_msg(e.args, error_msg_prefix + \"BufferPipe: write\")\n return error_msg\n\n def get(self):\n return self.pipe.recv()\n\n def put(self, content=None):\n pass\n\n def get_all(self):\n pass\n\n def close(self):\n self.pipe.close()\n\n def readable(self):\n return self.pipe.readable\n\n def writable(self):\n return self.pipe.writable\n\n def closed(self):\n return self.pipe.closed\n\n def empty(self):\n return not self.pipe.poll()\n\n\ndef new_pipe_buffer(duplex=True):\n \"\"\"创建新的pipe,返回的是两个类\"\"\"\n parent, child = Pipe(duplex)\n return BufferPipe(parent, 'parent', duplex), BufferPipe(child, 'child', duplex)\n\n\nclass BufferQueue(Queue):\n\n def __init__(self):\n Queue.__init__(self)\n\n def write(self, content):\n try:\n self.put(content)\n except Exception as e:\n error_msg = get_error_msg(e.args, error_msg_prefix + 'BufferQueue: write')\n print(error_msg)\n\n\ndef get_error_msg(e_args=None, msg=''):\n \"\"\"将错误信息组合\"\"\"\n error_msg = msg + '\\n'\n if isinstance(e_args, tuple):\n for arg in e_args:\n error_msg += str(arg) + '\\n'\n return error_msg\n\n\ndef check_file_exist(path, file_name):\n \"\"\"检查path是否存在file_name\"\"\"\n try:\n if not os.path.exists(os.path.join(path, file_name)):\n return False\n else:\n return True\n except Exception as e:\n print(get_error_msg(e.args, error_msg_prefix + \"check_file_exist\"))\n return False\n\n\ndef find_all_positions_in_src(str_src='', str_dst=''):\n \"\"\"找到dst在src的所有位置,按从小到大排列\"\"\"\n positions = list()\n if len(str_src) <= 0 or len(str_dst) <= 0 or len(str_dst) > len(str_src):\n return positions\n for i in range(len(str_src) - len(str_dst) + 1):\n src_temp = str_src[i:(i + len(str_dst))]\n if src_temp == str_dst:\n positions.append(i)\n return positions\n\n\ndef load_icon(file=''):\n \"\"\"加载图标\"\"\"\n icon = None\n error_msg = ''\n try:\n if os.path.exists(file):\n pos = find_all_positions_in_src(file, '.')\n if pos:\n file_type = file[pos[len(pos) - 1] + 1:]\n if file_type == 'ico':\n icon = QIcon(file)\n elif file_type == 'svg':\n icon = QIcon()\n icon.addFile(file)\n except Exception as e:\n error_msg = get_error_msg(e.args, error_msg_prefix + 'load_icon')\n return icon, error_msg\n\n\ndef action_objectname(name='', tree_widget=None, objectname=''):\n \"\"\"返回一个带有ObjectName的QAction\"\"\"\n action = QAction(name, tree_widget)\n action.setObjectName(objectname)\n return action\n\n\ndef get_all_tree_child(item):\n \"\"\"获取所有的子类\"\"\"\n children = list()\n count = item.childCount()\n if not count:\n children.append(item.text(0))\n else:\n for i in range(count):\n child_item = item.child(i)\n children += get_all_tree_child(child_item)\n return children\n\n\ndef get_text_x(text=''):\n \"\"\"文本的起始点\"\"\"\n midpoint = 50\n x = int(midpoint - midpoint * len(text) / 14)\n if x < 0:\n x = 1\n return x + 5\n\n\ndef get_device_no(last_no=''):\n \"\"\"计算设备编号\"\"\"\n next_no = ''\n if last_no:\n if '-' in last_no:\n nos = last_no.split('-')\n if len(nos) == 2:\n if nos[0].isdigit() and nos[1].isdigit():\n b = int(nos[1]) + 1\n a = int(nos[0])\n if b >= 100:\n b = 0\n a += 1\n next_no = str(a) + '-' + str(b)\n else:\n next_no = '0-0'\n return next_no\n\n\ndef get_timestamp():\n \"\"\"生成时间戳\"\"\"\n rec_time_now = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) + get_ms()\n return str(rec_time_now)\n\n\ndef get_ms():\n \"\"\"get millisecond of now in string of length 3\"\"\"\n ms = str(int(time.time() * 1000) % 1000)\n if len(ms) == 1:\n return '00' + ms\n if len(ms) == 2:\n return '0' + ms\n return ms\n\n\ndef get_date_time_ms():\n rec_time_now = time.strftime(\"%Y-%m-%d_%H.%M.%S.\", time.localtime()) + get_ms()\n return str(rec_time_now)\n\n\ndef time_sleep(time_interval=0, to_stop=False):\n \"\"\"等待时间\"\"\"\n if time_interval > 0:\n time_interval_ms = time_interval * 10\n for i in range(time_interval_ms):\n time.sleep(0.1)\n # self.msleep(100)\n if to_stop:\n break\n\n\ndef str_find_pos(str_src=''):\n \"\"\"返回str_dst在str_src的所有位置\"\"\"\n pos = list()\n str_dst = '\\\\'\n # 如果str_src,或者str_dst为空,返回\n # str_dst比str_src长也返回\n if len(str_src) <= 0 or len(str_dst) <= 0 or len(str_dst) > len(str_src):\n return pos\n for i in range(len(str_src)):\n src_temp = str_src[i:(i + 1)]\n if src_temp == str_dst:\n pos.append(i)\n return pos\n\n\ndef check_dir(path=''):\n \"\"\"检查path是否存在,如果不存在就新建\"\"\"\n # 'C:\\\\Users\\\\PycharmProjects\\\\'\n pos = str_find_pos(path)\n pos += [len(path)]\n for i in range(len(pos)):\n temp_path = path[0:pos[i]]\n if not os.path.exists(temp_path):\n try:\n os.mkdir(temp_path)\n except Exception as e:\n print('创建目录失败!')\n print(e.args)\n return False\n return True\n\n\ndef string_lstrip(src='', dst='', once=True):\n # s.lstrip(rm) 删除s字符串中开头处,位于 rm删除序列的字符\n # once:True表示只删除第一个,False表示删除所有\n if src == dst:\n return ''\n elif len(dst) > len(src):\n return src\n if once:\n if src[0: len(dst)] == dst:\n return src[len(dst):]\n else:\n return src\n else:\n pos = 0\n for i in range(int(len(src) / len(dst))):\n if src[i * len(dst): i * len(dst) + len(dst)] == dst:\n pos = i * len(dst) + len(dst)\n else:\n break\n return src[pos:]\n\n\ndef copy_dict_except(src_dict=None, except_keys=None):\n \"\"\"copy a dict\"\"\"\n if except_keys is None:\n except_keys = list()\n dst_dict = {}\n if isinstance(src_dict, dict) and isinstance(except_keys, list):\n for key, value in src_dict.items():\n if key not in except_keys:\n key_temp = key\n value_temp = value\n dst_dict[key_temp] = value_temp\n return dst_dict\n","repo_name":"AlvinsFish/UiExample","sub_path":"py_tools/tools_common.py","file_name":"tools_common.py","file_ext":"py","file_size_in_byte":7171,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"30148563816","text":"class Solution:\n def maxProfit(self, prices: List[int]) -> int:\n ans = 0\n mprice = 0\n \n for i in range(1,len(prices)):\n mprice += prices[i]-prices[i-1]\n mprice = max (0,mprice)\n ans = max(ans,mprice)\n \n return ans","repo_name":"NotAdityaPawar/Leetcode-solutions","sub_path":"0121-best-time-to-buy-and-sell-stock/0121-best-time-to-buy-and-sell-stock.py","file_name":"0121-best-time-to-buy-and-sell-stock.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33827060745","text":"\"\"\"\nCreated on January 2021\n\n@author: Niko Suchowitz\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime as dt\nfrom datetime import date\nimport calendar\nimport os\n\n\ndef checkForGaps(raw_df, f_df_name, areatypecode, areaname, mapcode):\n\t\"\"\"\n\tthe main function to check for missing data in the csv files\n\t:param raw_df: the dataframe of the csv-file\n\t:param f_df_name: name for saving\n\t:param areatypecode: MBA, BZN, CTA or CTY\n\t:param areaname: mapcode + areatypecode\n\t:param mapcode: code for the country\n\t:return:\n\t\"\"\"\n\n\t# gaps are: missing 'DateTime' for that day/hour\n\t# DateTime is in 'YYYY-MM-DD HH:MM:00:00.000' and once per full hour\n\n\t# only take rows into 'sorted_df' which are equal to our wanted attributes\n\tsorted_df = raw_df.loc[(raw_df[\"MapCode\"] == mapcode) & (raw_df[\"AreaTypeCode\"] == areatypecode)]\n\tsorted_df[\"DateTime\"] = pd.to_datetime(sorted_df[\"DateTime\"])\n\t# sort by DateTime-column\n\tsorted_df.sort_values(by='DateTime', inplace=True)\n\n\t# if the 'DateTime' is not in hourly steps, we downsample to hours\n\t# first we have to set the 'DateTime' as index\n\tsorted_df = sorted_df.set_index(['DateTime'])\n\t# now resample\n\tsorted_df['TotalLoadValue'] = round(sorted_df.resample('H').mean()['TotalLoadValue'], 2)\n\t# now drop the unnecessary rows\n\tsorted_df.dropna(subset=[\"TotalLoadValue\"], inplace=True)\n\t# now set 'DateTime' back as column\n\tsorted_df.reset_index(inplace=True)\n\n\t# now we also set up ResolutionCode and ResolutionCode, so we can later save the csv properly\n\tresolutioncode = sorted_df['ResolutionCode'].iloc[1]\n\tareacode = sorted_df['AreaCode'].iloc[1]\n\n\t\"\"\"check if start and end of month is in data\"\"\"\n\t# find out if first day is in list- first fill days, then hours\n\tfirsttimestamp = (sorted_df['DateTime']).iloc[0]\n\t# check if 'firsttimestamp' is not first of the month\n\tif firsttimestamp.day != 1 or firsttimestamp.hour != 0:\n\t\t# if so create a datetime obj of the first day of month\n\t\tdayone = firsttimestamp.replace(day=1, hour=0)\n\t\t# now add to the sorted DF sorted_df, add in -1 pos and then add one to overall-index\n\t\tsorted_df.loc[-1] = (dayone, resolutioncode, areacode, areatypecode, areaname, mapcode, np.nan,\n\t\t dt.now())\n\t\tsorted_df.index = sorted_df.index+1\n\t\tsorted_df.sort_values(by='DateTime', inplace=True)\n\t# just to check if if-clause is working\n\t#else:\n\t#\tprint(\"first of the month is in list\")\n\n\t# now we check for the last of the month\n\tlastindex = len(sorted_df.index)-1\n\tlast_timestamp = sorted_df['DateTime'].iloc[lastindex]\n\t# calendar.monthrange return a tuple\n\t# (weekday of first day of the month, number of days in month)\n\tlast_day_of_month = calendar.monthrange(last_timestamp.year, last_timestamp.month)[1]\n\t# checks if date is not last day of month or not last hour\n\tif last_timestamp.date() != date(last_timestamp.year, last_timestamp.month, last_day_of_month) or \\\n\t\tlast_timestamp.hour != int('23'):\n\t\t# if so we create a datetime with the last day and add it to the dataframe\n\t\tlast_day_as_date = dt(last_timestamp.year, last_timestamp.month, last_day_of_month, 23)\n\t\tsorted_df.loc[-1] = (last_day_as_date, resolutioncode, areacode, areatypecode, areaname, mapcode,\n\t\t np.nan, dt.now())\n\t\tsorted_df.index = sorted_df.index+1\n\t\tsorted_df.sort_values(by='DateTime', inplace=True)\n\t# just to check if if-clause is working\n\t#else:\n\t#\tprint(\"last of the month is in list\")\n\n\t# print the auxiliary-dataframe into a csv\n\t# first check if folder exists and create if not\n\tisExist = os.path.exists('data/own_data')\n\tif not isExist:\n\t\tos.makedirs('data/own_data')\n\t\tprint(\"The new directory own_data created!\")\n\tsorted_df.to_csv(\"data/own_data/sortedTotalLoad.csv\", sep='\\t', encoding='utf-8', index=False,\n\t header=[\"DateTime\", \"ResolutionCode\", \"AreaCode\",\n\t \"AreaTypeCode\", \"AreaName\", \"MapCode\", \"TotalLoadValue\", \"UpdateTime\"])\n\n\t\"\"\"iterate to find the gaps\"\"\"\n\t# compare the date then time\n\t# init old datetime as first datetime of dataframe and create gap-list\n\told_date = firsttimestamp\n\tgap_list = []\n\t# loop over every datetime-obj check if gap by comparing new and old\n\tfor datetime in sorted_df['DateTime']:\n\t\t# set new_date to current datetime\n\t\tnew_date = datetime\n\t\t# compare the time of the dates\n\t\tgap_list = gap_list_creator(old_date, new_date, gap_list, resolutioncode, areacode, areatypecode,\n\t\t areaname, mapcode)\n\t\t# set the current datetime as old\n\t\told_date = datetime\n\n\t\"\"\"create a csv with all gaps included\"\"\"\n\t# convert list with the gaps to a dataframe\n\tgap_df = pd.DataFrame(gap_list)\n\t# first check if folder exists and create if not\n\tisExist = os.path.exists('data/own_data/gaplists')\n\tif not isExist:\n\t\tos.makedirs('data/own_data/gaplists')\n\t\tprint(\"The new directory gaplists created!\")\n\t# check if the gap-df is empty\n\tif gap_df.empty:\n\t\t#print(\"there are no gaps\")\n\t\tgap_df.to_csv('data/own_data/gaplists/'+f_df_name+'_gap.csv', sep='\\t', encoding='utf-8', index=False)\n\t\t# no gaps so final-version stays the same\n\t\tfinal_df = sorted_df\n\telse:\n\t\tgap_df.to_csv('data/own_data/gaplists/'+f_df_name+'_gap.csv', sep='\\t', encoding='utf-8', index=False,\n\t\t header=[\"DateTime\", \"ResolutionCode\", \"AreaCode\", \"AreaTypeCode\", \"AreaName\",\n\t\t \"MapCode\", \"TotalLoadValue\", \"UpdateTime\"])\n\t\t# concat both csv to have a list with filled in gaps then save as csv\n\t\tsorted_totalload_csv = pd.read_csv(\"data/own_data/sortedTotalLoad.csv\", sep='\\t', encoding='utf-8')\n\t\tgap_list_csv = pd.read_csv('data/own_data/gaplists/'+f_df_name+'_gap.csv', sep='\\t', encoding='utf-8')\n\t\tdataframes = [sorted_totalload_csv, gap_list_csv]\n\t\tfinal_df = pd.concat(dataframes)\n\n\t# sort everything on the DateTime-column and save as csv\n\tfinal_df.sort_values(by='DateTime', inplace=True)\n\tfinal_df.reset_index(drop=True, inplace=True)\n\t# save the final df as csv, check first if folder exists\n\t# first check if folder exists and create if not\n\tisExist = os.path.exists('data/own_data/ActualTotalLoad_edited')\n\tif not isExist:\n\t\tos.makedirs('data/own_data/ActualTotalLoad_edited')\n\t\tprint(\"The new directory ActualTotalLoad_edited created!\")\n\tisExist = os.path.exists('data/own_data/ActualTotalLoad_edited/'+mapcode)\n\tif not isExist:\n\t\tos.makedirs('data/own_data/ActualTotalLoad_edited/'+mapcode)\n\t\tprint(\"The new directory for \"+mapcode+\" is created!\")\n\t# now safe\n\tfinal_df.to_csv('data/own_data/ActualTotalLoad_edited/'+mapcode+'/'+f_df_name+'.csv', sep='\\t', encoding='utf-8',\n\t index=False,\n\t header=[\"DateTime\", \"ResolutionCode\", \"AreaCode\", \"AreaTypeCode\", \"AreaName\",\n\t \"MapCode\", \"TotalLoadValue\", \"UpdateTime\"])\n\n\ndef gap_list_creator(old_date, new_date, gap_list, resolutioncode, areacode, areatypecode, areaname, mapcode):\n\t\"\"\"\n\tfind gaps between the start and end datetime and return the whole list of gaps\n\n\t:param old_date: the date from which we start to check for gaps\n\t:param new_date: the final date\n\t:param gap_list: list with already found gaps\n\n\tThe following inputs are just for proper saving\n\t:param resolutioncode:\n\t:param areacode:\n\t:param areatypecode: MBA, BZN, CTA or CTY\n\t:param areaname: mapcode + areatypecode\n\t:param mapcode: code for the country\n\t:return: updated list of gaps found in data\n\t\"\"\"\n\t# add an hour to check for gap\n\told_h_added = old_date+pd.Timedelta(1, unit='H')\n\n\t# if old_h_added is same or bigger than new_date we have no gap\n\tif old_h_added >= new_date:\n\t\treturn gap_list\n\telse:\n\t\t# add every missing datetime between start(old_date) and end(new_date); start exclusive\n\t\tfor timestamp in pd.date_range(old_date, new_date, freq='H', closed='right'):\n\t\t\t# because end is inclusive we have to check if we reached the end\n\t\t\tif timestamp != new_date:\n\t\t\t\t# create a datetime obj from the timestamp\n\t\t\t\tdatetime_obj = timestamp.to_pydatetime()\n\t\t\t\t# saves the gap with null-value\n\t\t\t\tgap_list.append((datetime_obj, resolutioncode, areacode, areatypecode, areaname, mapcode,\n\t\t\t\t np.nan, dt.now()))\n\t\treturn gap_list\n","repo_name":"nSucho/comparison_gapfilling","sub_path":"gap_finder.py","file_name":"gap_finder.py","file_ext":"py","file_size_in_byte":8062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40645702334","text":"import numpy as np\nimport os\nimport ml_args\n\n\ndef ml_raster_import( ml_path ):\n ''' Import a raster found at given the path '''\n \"\"\"\n Args:\n ml_path(string): the full path of the raster .ras to import\n \"\"\"\n\n # check consistency #\n if ( not os.path.exists( ml_path ) ):\n\n # send message #\n sys.exit( 'turing : error : unable to access raster' )\n\n # retrieve raster size #\n ml_size = os.path.getsize( ml_path )\n\n # compute raster width #\n ml_width = int( round( ml_size ** ( 1.0 / 3.0 ) ) )\n\n # import raster data #\n with open( ml_path, 'rb' ) as ml_file:\n\n # read raster bytes #\n ml_byte = ml_file.read( ml_size )\n\n # convert to numpy array #\n ml_data = np.frombuffer( ml_byte, dtype=np.uint8 )\n\n # return raster array #\n return ml_data.reshape( ml_width, ml_width, ml_width )\n\n\ndef get_ml_data(ml_args_raster):\n ''' Calls ml_raster_import to import all rasters '''\n \"\"\"\n Args:\n ml_args_raster(string): the rasters folder path\n \"\"\"\n\n ml_data = []\n \n for raster_id in range(0, int(ml_args.count)):\n \n raster_path = ml_args_raster + '/raster-{:06d}.ras'.format(raster_id)\n \n # import raster array #\n data = ml_raster_import(raster_path)\n \n ml_data.append(data)\n \n return ml_data\n\n","repo_name":"RPetitpierre/3D_pointclouds_super-resolution","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"71400157921","text":"import re\nfrom typing import Tuple, List, Union\n\nfrom bs4 import BeautifulSoup\n\nimport cqwu\nfrom cqwu.types.calendar import AiCourse\n\n\nclass GetSelectedCourses:\n async def get_selected_courses(\n self: \"cqwu.Client\",\n use_model: bool = False,\n ) -> Union[str, List[AiCourse]]:\n \"\"\" 获取选课结果 \"\"\"\n jw_html = await self.login_jwmis()\n jw_host = self.get_web_vpn_host(jw_html.url)\n jw_url = f\"{jw_host}/cqwljw/student/wsxk.zxjg.jsp\"\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Referer': f'{jw_host}/cqwljw/frame/homes.html',\n 'Sec-Fetch-Dest': 'iframe',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-User': '?1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41',\n 'sec-ch-ua': '\"Microsoft Edge\";v=\"111\", \"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"111\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"Windows\"',\n }\n jw_html = await self.request.get(jw_url, headers=headers, timeout=60, follow_redirects=True)\n jw_html = jw_html.text.replace(\"\"\"\"\"\", \"\")\n return (\n parse_courses(jw_html)\n if use_model\n else jw_html.replace(\"\", '')\n )\n\n\ndef format_text(text: str) -> str:\n return text.replace(\"\\n\", \"\").replace(\"\\t\", \"\").replace(\"\\r\", \"\").strip()\n\n\ndef parse_courses(jw_html: str) -> List[AiCourse]:\n courses = []\n courses_keys = []\n soup = BeautifulSoup(jw_html, \"lxml\")\n trs = soup.find_all(\"tbody\")[2].find_all(\"tr\")\n for tr in trs:\n tds = tr.find_all(\"td\")\n name = format_text(tds[1].get_text()).split(\"]\")[1]\n teacher = format_text(tds[4].get_text())\n calendars = str(tds[12]).replace(\"\\u2002\", \"\").split(\"
\")\n for calendar in calendars:\n text = (BeautifulSoup(calendar, \"lxml\")).text.strip()\n try:\n position, weeks, day, start_num, sections = parse_weeks_and_sections(text)\n except Exception:\n continue\n item = AiCourse(\n name=name,\n teacher=teacher,\n position=position,\n weeks=weeks,\n day=day,\n start_num=start_num,\n sections=sections,\n )\n if item.key not in courses_keys:\n courses_keys.append(item.key)\n courses.append(item)\n return courses\n\n\ndef parse_weeks_and_sections(text: str) -> Tuple[str, List[int], int, int, int]:\n # text: [1-3,5-17]周二[3-4节]格致-C305\n position, weeks_list, day, start_num, sections = \"\", [], 0, 0, 0\n weeks, days = text.split(\"周\")\n position = days.split(\"]\")[1]\n for week in re.findall(r\"\\d+-\\d+\", weeks):\n for i in range(int(week.split(\"-\")[0]), int(week.split(\"-\")[1]) + 1):\n weeks_list.append(i)\n day = \"一二三四五六日\".index(days[0]) + 1\n starts = re.findall(r\"\\d+-\\d+\", days)\n start_num = int(starts[0].split(\"-\")[0])\n sections = int(starts[0].split(\"-\")[1]) - start_num + 1\n return position, weeks_list, day, start_num, sections\n","repo_name":"cqwu-ehall/cqwu-ehall","sub_path":"cqwu/methods/webvpn/get_selected_courses.py","file_name":"get_selected_courses.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73657515683","text":"import pandas as pd\nimport numpy as np\nfrom multiprocessing import Pool\nimport os\n\ndata_path = '/app2/kevin_workspace/data0304/'\nexport_path = '/app2/kevin_workspace/data0304_encrypt/'\nitem_list = [i[:i.index('.pk.gz')] for i in os.listdir(data_path)]\n\nmapper_df = pd.read_pickle('/app/python-scripts/kevin_workspace/assist_data/fea_mapper.pk.gz',compression='gzip')\nmapper_df.code_type.replace({'loc_idnt':'store_id','item_idnt':'item_code'},inplace=True)\nfeatures = list(mapper_df.code_type.unique())\n\ndef get_mapper(feat):\n\n mapper = mapper_df[mapper_df.code_type == feat][['key_id','value_id']]\n mapper.rename({'key_id':feat+'_new','value_id':feat},axis=1,inplace=True)\n\n return mapper\n\ndef main(item):\n\n print(\"ENCRYPTING ITEM: \" + item)\n df = pd.read_pickle(data_path + item + '.pk.gz',compression='gzip')\n df['store_id'], df['item_code'] = df['store_id'].astype(str), df['item_code'].astype(str)\n common_feat = list(set(df.columns) & set(features))\n print(\"ENCRYPT FEATURES: \" + str(common_feat))\n\n for feat in common_feat:\n print('ENCRYPTING ' + feat)\n feat_mapper = get_mapper(feat)\n df = df.merge(feat_mapper, how='left', on=feat, indicator=True)\n if 'both' not in df._merge.astype(str).unique(): print(\"NO DATA WAS ENCRYPTED >>>>>>>>>>>>>>>>>\")\n df.drop(['_merge'],axis=1,inplace=True)\n\n # DROP COLUMNS\n df.drop(common_feat, axis=1, inplace=True)\n print(\"FEATURES: \" + str(list(df.columns)))\n df.to_csv(export_path + item + '.csv.gz', header=False, index=False, compression='gzip')\n print(f\"ITEM {item} EXPORTED TO {export_path}\")\n\n# RUN MULTIPROCESSING\npool = Pool(8)\npool.map(main, item_list)\npool.close()\npool.join()\n\nprint(f\"TRAINING COMPLETED AT {pd.Timestamp().now()}\")","repo_name":"kevingao1136/9zdata_code","sub_path":"ETL_library/encrypt_data.py","file_name":"encrypt_data.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15692318145","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ![](https://www.revell.de/fileadmin/_processed_/csm_05210__I_RMS_TITANIC_347cc45361.jpg)\n# *Image Credit: https://www.revell.de/en/products/model-building/ships/civilian-ships/id/05210.html*\n\n# Introduction\n# ====\n# I will be doing a exploratory analysis on the Titanic dataset, and predicting which passengers survived based on the given features in the dataset.\n\n# In[30]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport sklearn as sk # models\nimport seaborn as sns# visualizations\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\nfrom matplotlib import pyplot as plt\nget_ipython().magic(u'matplotlib inline')\n\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\ndata_train=pd.read_csv('../input/train.csv') #Read train data\ndata_test=pd.read_csv('../input/test.csv')#Read test data\n\n\n# In[31]:\n\n\nprint(\"SHAPE\")\nprint(\"Training data: \", data_train.shape) #Examine shape of data\nprint(\"Testing data: \", data_test.shape)#Examine shape of data\nprint()\n\n#Examine first 10 rows of data\ndata_train.head(10)\n\n\n# In[32]:\n\n\ndata_test.info()\n\n\n# In[33]:\n\n\n#Check to see how many null values are in dataframe for each column.\nprint(\"NUMBER OF NULLS IN COLUMNS data_train: \")\ndata_train.isnull().sum()#Takes all null values and displays ammount for each coloumn\n\n\n# In[34]:\n\n\n#Check to see how mnay null values are in dataframe for each column.\nprint(\"NUMBER OF NULLS IN COLUMNS data_test: \")\ndata_test.isnull().sum()\n\n\n# There is only null/NaN values in three columns. \n# \n# * For **Age** I will probably take the average age on the ship and fill in those values.\n# * For **Embarked** I will most likely just drop those rows considering there are only two...\n# * Im not quite sure what is best to do with **Cabin**. Part of me thinks I should try to do something about the missing values, but there are quite a few missing values and that usually means it should be dropped... I will assess it further.\n# \n\n# ![Cabin Layout](http://www.visualisingdata.com/blog/wp-content/uploads/2013/04/TITANIC.jpg)\n\n# *The approximate time that the Titanic struck the iceberg was at 11:40pm, it then sank entirely at 2:20am. At these times majority of people would be in bed so I feel like it is a crucial to obtain some sort of values for **Cabin**. As you can see the first class was on the upper decks. As you go further down you have majority of second and third class on the lower decks. I think the best approach for dealing with the missing values in the **Cabin** column could potentially be to instead have the deck level 1st, 2nd, or 3rd corresponding to Upper, Middle, and Lower decks.*\n# \n# **EDIT: It seems that there is to much data missing in the Cabin feature for it to be beneicial. I initially thought that I could do some sort of feature engineering to fix this, but it did not seem to be benefical. I will remove the Cabin feature and see how much it improves.**\n# 1. Dropped Cabin Feature.\n# \n# **EDIT2: A new idea came to mind while going through other peoples kernels. It seems that the cabin numbers could be associated with a higher class, and more likely to survive. Thank you** @Nadin Tamer\n# 1. Create a boolean column named **CabinBool** showing whether or not each row inside **Cabin** column with a value survived or died.\n# \n# \n# \n# \n# \n# My next thought is that there is probably a relationship between **Fare** + **Pclass**. I first want to visually check for any obvious outliers in a plot...\n\n# In[35]:\n\n\n# Create CabinBool feature\ndata_train[\"CabinBool\"] = (data_train[\"Cabin\"].notnull().astype('int'))\ndata_test[\"CabinBool\"] = (data_test[\"Cabin\"].notnull().astype('int'))\n\n\n# In[36]:\n\n\nsns.lmplot(x=\"PassengerId\", y=\"Fare\", data=data_train, fit_reg=True)\n\n\n# In[37]:\n\n\ndata_train.loc[data_train['Fare'] > 300] #Show all passengers that paid more than 300\n\n\n# Not sure why they paid so much more... They all paid the same amount as well. Weird. Either way they are outliers in this data and have some unorderly information so they must go.\n\n# In[38]:\n\n\ndata_train = data_train[data_train.Fare < 300]\n\n\n# In[39]:\n\n\nsns.lmplot(x=\"PassengerId\", y=\"Fare\", data=data_train, fit_reg=True)\n\n\n# There we go that looks better!\n# \n# Now I will go ahead and drop the **Cabin** column as it has to many missing values.\n# I will also go ahead and drop the **PassengerID** column from the training set as it is not benefical for the prediction.\n\n# In[40]:\n\n\ndata_train.drop('Cabin', axis = 1, inplace = True)\ndata_test.drop('Cabin', axis = 1, inplace = True)\n\ndata_train.head() # Check to see if the replacement worked...\n\n\n# Now I have to replace the empty **Age** column values with a reasonable input. Lets look at the heatmap to see what effects **Age** the most.\n\n# In[41]:\n\n\n#Calculate correlations\ncorr=data_train.corr()\n\n#Heatmap\nsns.heatmap(corr, cmap=\"Blues\")\n\n\n# Heatmap analysis\n# ========\n# The correlations that stand out to me the most in relation to the **Survived** column is **Survived**+**Fare** and **Survived**+**Parch**.\n\n# In[42]:\n\n\ndata_train[\"Age\"].fillna(data_train.groupby(\"Sex\")[\"Age\"].transform(\"mean\"), inplace=True)\ndata_test['Age'].fillna(data_test.groupby('Sex')['Age'].transform(\"mean\"), inplace=True)\n\nbins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]\nlabels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']\ndata_train['AgeGroup'] = pd.cut(data_train[\"Age\"], bins, labels = labels)\ndata_test['AgeGroup'] = pd.cut(data_test[\"Age\"], bins, labels = labels)\n\n# Map each age value into a numerical value\nage_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}\ndata_train['AgeGroup'] = data_train['AgeGroup'].map(age_mapping)\ndata_test['AgeGroup'] = data_test['AgeGroup'].map(age_mapping)\n\n\n# Drop Age column from each dataset now that new column 'FareGroups' has been made.\ndata_train = data_train.drop(['Age'], axis = 1)\ndata_test = data_test.drop(['Age'], axis = 1)\n\n\n# Above I determined the missing **Age** values by taking the mean of each **Sex** value and filling them in.\n# \n# Now its time to fix the values that are missing in the **Embarked** and **Fare** columns in the train & test datatsets...\n# I will fill the **Embarked** value with \"S\" because it is the most reoccuring value in that column. For missing value in the **Fare** column I will replace it with the mean value.\n\n# In[43]:\n\n\ndata_train.loc[data_train.Embarked.isnull()]\n\n\n# In[44]:\n\n\ndata_train['Embarked'].fillna(\"S\", inplace = True)\ndata_test['Fare'].fillna(data_test['Fare'].mean(), inplace = True)\n \n#Check to see how many null values are in dataframe for each column.\nprint(\"NUMBER OF NULLS IN COLUMNS: \")\ndata_train.isnull().sum()\n\n\n# *Next I will go ahead and split the **Fare** feature into groupings.\n# This idea was originally presented to me by Nadin Tamer, Thank you! (https://www.kaggle.com/nadintamer/titanic-survival-predictions-beginner)*\n# \n# **EDIT**: This actually wound up lowering my previous score. Will keep it in for future use if needed, but did not actually implement what was mentioned above. **Fare** feature remains the same as prior\n\n# In[45]:\n\n\n# Split Fare column in each dataset into four different labels.\ndata_train['FareGroups'] = pd.qcut(data_train['Fare'], 4, labels = [1, 2, 3, 4])\ndata_test['FareGroups'] = pd.qcut(data_test['Fare'], 4, labels = [1, 2, 3, 4])\n\n\n# Drop Fare column from each dataset now that new column 'FareGroups' has been made.\ndata_train = data_train.drop(['Fare'], axis = 1)\ndata_test = data_test.drop(['Fare'], axis = 1)\n\n\n# There we go! No empty or out of the ordinary data.\n# \n# The last thing we need to do is convert the **Embarked** and **Sex** columns into numerical values.\n# I will also be dropping the **PassengerID, Name,** and **Ticket** features as they have do not have a large coorelation to the survival rate.\n\n# In[46]:\n\n\ndata_train = pd.get_dummies(data_train, columns=['Sex', 'Embarked'], drop_first=True)\ndata_test = pd.get_dummies(data_test, columns=['Sex', 'Embarked'], drop_first=True)\n\ndata_train = data_train.drop([\"PassengerId\",\"Name\",\"Ticket\"], axis=1)\ndata_test = data_test.drop(['Name','Ticket'], axis=1)\ndata_test.tail()\n\n\n# I am going to go ahead and take one last look at each of the datasets before we move on to the modeling to make sure everything looks good.\n\n# In[47]:\n\n\ndata_train.head()\n\n\n# In[48]:\n\n\ndata_test.tail()\n\n\n# Now it is time to begin testing different models!\n# I need to split my training data into different variables and create a variable for my test data for fitting to the models.\n\n# In[49]:\n\n\nX_train= data_train.drop([\"Survived\"], axis=1)\nY_train= data_train.Survived\nX_test= data_test.drop(['PassengerId'], axis=1).copy()\n\nX_train.shape, Y_train.shape, X_test.shape\n\n\n# In[50]:\n\n\n# Logistic Regression\nlogreg = LogisticRegression()\nlogreg.fit(X_train, Y_train)\nY_pred = logreg.predict(X_test)\nacc_log = round(logreg.score(X_train, Y_train) * 100, 2)\nacc_log\n\n\n# In[51]:\n\n\nrandom_forest = RandomForestClassifier()\nrandom_forest.fit(X_train, Y_train)\nY_pred = random_forest.predict(X_test)\nacc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)\nacc_random_forest\n\n\n# In[52]:\n\n\nsvc = SVC()\nsvc.fit(X_train, Y_train)\nY_pred = svc.predict(X_test)\nacc_svc = round(svc.score(X_train, Y_train) * 100, 2)\nacc_svc\n\n\n# In[53]:\n\n\ndecision_tree = DecisionTreeClassifier()\ndecision_tree.fit(X_train, Y_train)\nY_pred_sub = decision_tree.predict(X_test)\nacc_decision_tree = round(decision_tree.score(X_train, Y_train) * 100, 2)\nacc_decision_tree\n\n\n# In[54]:\n\n\nknn = KNeighborsClassifier()\nknn.fit(X_train, Y_train)\nY_pred = knn.predict(X_test)\nacc_knn = round(knn.score(X_train, Y_train) * 100, 2)\nacc_knn\n\n\n# In[55]:\n\n\nnaive_bayes = GaussianNB()\nnaive_bayes.fit(X_train, Y_train)\nY_pred = naive_bayes.predict(X_test)\nacc_naive_bayes = round(naive_bayes.score(X_train, Y_train) * 100, 2)\nacc_naive_bayes\n\n\n# In[56]:\n\n\n#from xgboost import XGBClassifier\n\n#xgb = XGBClassifier(n_estimators=200)\n#xgb.fit(X_train, Y_train)\n#Y_pred = xgb.predict(X_test)\n#acc_xgb = round(xgb.score(X_train, Y_train)*100, 2)\n#acc_xgb\n\n\n# In[57]:\n\n\nmodels = pd.DataFrame({\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \n 'Random Forest', 'Naive Bayes', 'Decision Tree'], \n \n 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_naive_bayes, acc_decision_tree]})\n\nmodels.sort_values(by='Score', ascending=False)\n\n\n# Looking at the models it looks like **Decision Tree** scores the best! With **Random Forest** close behind it. \n# In previous submissions I have used **Decision Tree**, but for this submission I will try **Random Forest** to see if it will result in a higher prediction score.\n\n# Time to submit.\n\n# In[58]:\n\n\nsubmission = pd.DataFrame({\"PassengerId\": data_test[\"PassengerId\"],\n \"Survived\": Y_pred_sub\n })\nsubmission.to_csv('submit.csv', index=False)\n\n\n# Finished!\n# =====\n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/christianbh/titanic-analysis-and-survival-prediction/titanic-analysis-and-survival-prediction.py","file_name":"titanic-analysis-and-survival-prediction.py","file_ext":"py","file_size_in_byte":11771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2534333783","text":"# app.py\nimport base64\nimport io\nimport json\nimport torch\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom flask import Flask, render_template, request\nfrom torch.nn.functional import softmax\n\napp = Flask(__name__)\nmodel = torch.load(\"mnist.pth\").to('cpu')\ntorch.no_grad()\n\n\ndef pic_pred(data):\n global model\n with torch.no_grad():\n image = Image.open(io.BytesIO(data))\n r, g, b, a = image.split()\n lr = a.point(lambda i: 255 if i > 0 else 0)\n image = Image.merge('RGB', (lr, lr, lr)).convert('1')\n image = image.resize((28, 28))\n inp = transforms.Compose([transforms.ToTensor()])(image)\n predict = softmax(model(inp), dim=1)\n pred = torch.argmax(predict).item()\n probability = predict[0][pred].item()\n return pred, probability\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/image', methods=['POST']) # 定义一个接收图片的路由,只允许POST方法\ndef receive_image():\n url = request.get_json()\n if url:\n data = url.replace(\"data:image/png;base64,\", \"\")\n data = base64.b64decode(data)\n label, p = pic_pred(data)\n print(label, p)\n return json.dumps({'label': label, 'p': p})\n else:\n return \"Image not received.\"\n\n\nif __name__ == '__main__':\n app.run(debug=True) # 运行flask应用\n","repo_name":"moslian-off/A-Web-App-to-Recognizing-Handwritten-Numbers","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14182380861","text":"from calendar import c\nimport os\nfrom threading import Thread\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2 as cv\nfrom sqlalchemy import false\n\n# start X index to search for edge\nSEED_X = 70\n# start Y index to search for edge\nSEED_Y = 100\n\nEDGE_X = 20\nEDGE_Y = 100\n# Threshold of value when a pixel is considered an edge\nTHRESHOLD = 70\n# the Input Picture path\nPICTURE_INPUT = \"Rauschbild.png\"\n\nUSE_BOUNDARY_FILL = True\n\ndef is_in_bounds(img, pixel):\n if pixel[0] < 0 or pixel[0] >= img.shape[1] or pixel[1] < 0 or pixel[1] >= img.shape[0]:\n return False\n return True\n\ndef boundary_fill(img, seed, edge_seed, edge_threshold):\n painted_img = np.zeros((img.shape[0], img.shape[1], 3)).astype(int)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n painted_img[i,j] = np.array([img[i,j], img[i,j], img[i,j]])\n stack = [edge_seed]\n edge = []\n while len(stack) > 0:\n pixel = stack.pop()\n if is_in_bounds(img, pixel) and img[pixel[1], pixel[0]] < edge_threshold and pixel not in edge:\n edge.append(pixel)\n for x in range(pixel[0]-1, pixel[0]+2):\n for y in range(pixel[1]-1, pixel[1]+2):\n if x == pixel[0] and y == pixel[1]:\n continue\n stack.append((x, y))\n region = []\n region_stack = [seed]\n while len(region_stack)> 0:\n pixel = region_stack.pop()\n if is_in_bounds(img, pixel) and pixel not in edge and pixel not in region:\n region.append(pixel)\n for x in range(pixel[0]-1, pixel[0]+2):\n for y in range(pixel[1]-1, pixel[1]+2):\n if x == pixel[0] and y == pixel[1]:\n continue\n region_stack.append((x, y))\n for r in region:\n painted_img[r[1], r[0]] = np.array([255,0,0])\n for e in edge:\n painted_img[e[1], e[0]] = np.array([0,0,255])\n return painted_img\n\n\n\ndef paint_area(img, seed, threshold):\n # create a copy of the image in rgb\n painted_img = np.zeros((img.shape[0], img.shape[1], 3)).astype(int)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n painted_img[i,j] = np.array([img[i,j], img[i,j], img[i,j]])\n # used for saving the region\n region = [seed]\n # stack for visiting neighbours\n visited = [seed]\n print(\"Finding region\")\n while len(visited) > 0:\n x,y = visited.pop()\n painted_img[y,x] = np.array([255,0,0])\n for i in range(y-1, y+2):\n for j in range(x-1, x+2):\n if i >= 0 and i < img.shape[0] and j >= 0 and j < img.shape[1]:\n if img[i,j] > threshold:\n if not np.array_equal(painted_img[i,j], np.array([255,0,0])):\n painted_img[i,j] = np.array([255,0,0])\n if (j, i) not in region:\n region.append((j, i))\n visited.append((j,i))\n print(\"Finding region edges\")\n regionEdge = []\n for r in region:\n found_edge = False\n for i in range(r[0]-1, r[0]+2):\n for j in range(r[1]-1, r[1]+2):\n if i == r[0] and j == r[1]:\n continue\n if (i,j) not in region:\n found_edge = True\n break\n if found_edge:\n break\n if found_edge:\n regionEdge.append(r)\n print(\"Printing Edge Region\")\n for r in regionEdge:\n painted_img[r[1], r[0]] = np.array([0,0,255])\n return painted_img\n\n\ndef main():\n # read in image\n img = plt.imread(os.path.join(os.path.dirname(__file__), PICTURE_INPUT))\n if img.ndim == 3:\n img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n if img.max() <= 1:\n img = img*255\n plt.subplot(221)\n plt.imshow(img, cmap=\"gray\")\n plt.title(\"Original\")\n plt.subplot(222)\n if USE_BOUNDARY_FILL:\n plt.imshow(boundary_fill(img, (SEED_X,SEED_Y), (EDGE_X, EDGE_Y), THRESHOLD).astype(int))\n else:\n plt.imshow(paint_area(img, (SEED_X,SEED_Y), THRESHOLD).astype(int))\n plt.title(\"Painted\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"scorixear/bildverarbeitung","sub_path":"5/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36670299281","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom scipy.stats import shapiro, kstest\nimport math\nfrom scipy.stats import chi2, norm\nimport tqdm\nimport matplotlib.pyplot as plt\nfrom gessaman.gessaman import Gessaman\nfrom gessaman.utils import nn_estimator\nfrom glob import glob\nimport os\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef closest(lst, k):\n return lst[min(range(len(lst)), key=lambda i: abs(lst[i] - k))]\n\n\ndef del_activation_files():\n for f in glob(\"/tmp/*.txt\"):\n os.remove(f)\n\n\ndef r2_score(y_test, predictions, y_hat):\n return 1 - np.mean((predictions - y_test) ** 2) / np.mean((y_test - y_hat) ** 2)\n\n\ndef data_sigmoid(nb_row, sigma2):\n x = np.array([[1 / n * i] for i in range(0, nb_row)])\n y = np.sqrt(x * (1 - x)) * np.sin((2 * math.pi * 1.05) / (x + 0.05)) + 0.5\n y = y.flatten()\n y += np.random.normal(0, math.sqrt(sigma2), nb_row)\n\n return x, y\n\n\ndef data_rulefit(nb_cols, nb_row, sigma2):\n x = np.random.randint(10, size=(nb_row, nb_cols))\n x = x / 10.0\n\n y_true = 0.8 * np.exp(-2 * (x[:, 0] - x[:, 1])) + 2 * np.sin(math.pi * x[:, 2]) ** 2\n y = y_true + np.random.normal(0, math.sqrt(sigma2), nb_row)\n\n return x, y\n\n\ndef data_flag(nb_cols, nb_row, sigma2):\n x = np.random.uniform(low=-1, high=1, size=(nb_row, nb_cols))\n th_min = -0.4\n th_max = 0.4\n y_true = [\n -2 if x_val <= th_min else 0 if x_val <= th_max else 2 for x_val in x[:, 0]\n ]\n y = y_true + np.random.normal(0, math.sqrt(sigma2), nb_row)\n\n return x, y\n\n\ndef data_diag(nb_cols, nb_row, sigma2):\n assert nb_cols >= 2, \"You need at least 2 features for the diagonal simulations\"\n x = np.random.uniform(low=-1, high=1, size=(nb_row, nb_cols))\n th_min = -0.4\n th_max = 0.4\n y_true = [\n -2\n if x_val[0] + x_val[1] <= th_min\n else 0\n if x_val[0] + x_val[1] <= th_max\n else 2\n for x_val in x\n ]\n y = y_true + np.random.normal(0, math.sqrt(sigma2), nb_row)\n\n return x, y\n\n\ndef data_circle(nb_cols, nb_row, sigma2):\n assert nb_cols >= 2, \"You need at least 2 features for the diagonal simulations\"\n x = np.random.uniform(low=-1, high=1, size=(nb_row, nb_cols))\n th_min = 0.5\n th_max = 0.8\n y_true = [\n -2\n if x_val[0] ** 2 + x_val[1] ** 2 <= th_min\n else 0\n if x_val[0] ** 2 + x_val[1] ** 2 <= th_max\n else 2\n for x_val in x\n ]\n y = y_true + np.random.normal(0, math.sqrt(sigma2), nb_row)\n\n return x, y\n\n\ndef data_plateau(nb_cols, nb_row, sigma2):\n dmin = -1\n dmax = 1\n r = dmax - dmin\n x = np.random.uniform(low=dmin, high=dmax, size=(nb_row, nb_cols))\n v_max = dmin + r / (2 ** (1 / nb_cols))\n y_true = [1 if x_val == nb_cols else 0 for x_val in (x <= v_max).sum(1)]\n y = y_true + np.random.normal(0, math.sqrt(sigma2), nb_row)\n\n return x, y\n\n\ndef calc_chi2_approx(p):\n chi2_approx = chi2.ppf(1 - (1 / 2 ** (1 / (np.ceil(n / p) - 1))), p - 1) / p - 1\n return chi2_approx\n\n\ndef calc_normal_approx(p, n, noise):\n factor = math.sqrt(2) * noise\n normal_approx = norm.ppf(1 - (1 / 2) ** (1 / (np.ceil(n / p) - 1)))\n normal_approx = noise + factor / math.sqrt(p) * normal_approx\n return normal_approx\n\n\ndef calc_logit_approx(p, n, noise):\n factor = math.sqrt(2) * noise\n logit = math.log(2 ** (1 / (np.ceil(n / p) - 1)) - 1)\n logit *= math.sqrt(math.pi / 8)\n logit *= factor / math.sqrt(p)\n return noise + logit\n\n\ndef unbiased_gessaman(p, n, estimator, var):\n f = norm.ppf(1 - (1 / 2) ** (1 / (np.ceil(n / p) - 1)))\n # deno = 1 + math.sqrt(2/p) * f\n # return m / deno\n return estimator - np.sqrt(var) * f\n\n\ndef unbiased_gessaman_old(p, n, m, var):\n f = norm.ppf(1 - (1 / 2) ** (1 / (np.ceil(n / p) - 1)))\n deno = 1 + math.sqrt(2/p) * f\n return m / deno\n # return m - var ** (1/2) * f\n\n\ndef find_gap(s):\n # gap = None\n # s -= np.average(s)\n # step = np.hstack((np.ones(len(s)), -1 * np.ones(len(s))))\n # s_step = np.convolve(s, step, mode='valid')\n # gap = np.argmax(s_step) # TODO Find the first gap not the max !!!!\n\n s_diff = np.diff(s) / s[1:]\n # sub_diff = np.extract(s_diff > 0, s_diff)\n # gap_tests = list(s_diff > np.mean(sub_diff))\n temp = np.cumsum(s_diff) / np.array(range(1, len(s_diff) + 1))\n gap_tests = np.diff(temp) > 0\n if True in gap_tests:\n gaps = np.where(gap_tests)[0]\n for gap in gaps:\n if s_diff[gap + 2] == 0:\n return gap\n return -1\n else:\n return -1\n # gap = np.argmax(s_diff)\n # s_cum_mean = np.cumsum(s_diff) / np.array(range(1, len(s_diff)+1))\n # gaps = np.diff(s_cum_mean) < 0 # [g > m for g, m in zip(s_diff, s_cum_mean)]\n # gapidentifier un saut dans les donnéess_cumsum = list(np.cumsum(gaps))\n # gaps_cumsum = moving_average(np.round(s_diff, 2) >= 0, w)\n # plateau = s_diff == 0\n # gap_list = [False] + (s_diff > s_diff[0])[:-1]\n # gaps_cumsum = moving_average(plateau, w)\n # temp = [a*b for a, b in zip(gaps_cumsum, gap_list)]\n # if w in temp:\n # # gap = list(gaps_cumsum).index(w)\n # gap = temp.index(w)\n\n\ndef find_best_estimator(s, w):\n gap = find_gap(s, w)\n if gap is None:\n print('Last')\n return -1\n else:\n print(gap)\n return gap # No -1 because the gap is calculated on the diff, hence we drop 1 dimension.\n\n\ndef extract_partition(rs, r_id):\n sub_rs = np.extract([len(r) == len(rs[r_id]) for r in rs], rs)\n sub_rs = np.extract([r.condition.features_indexes == rs[r_id].condition.features_indexes for r in sub_rs], sub_rs)\n return sub_rs\n\n\ndef do_qqplot(data, data_type, d):\n fig = sm.qqplot(data, line='45')\n fig.savefig(\n f\"/home/vmargot/Documents/Jussieu/new/{data_type}_{n}_d={d}_qqplot\",\n format=\"svg\",\n dpi=300,\n )\n plt.close(fig)\n\n\ndef do_box_plots(sigma2_ratio_df, title, data_type, d, k_list):\n alpha = d / (2 * (d + 2))\n cov_min = k_list.index(closest(k_list, n ** (1-alpha)))\n\n fig = plt.figure(figsize=(25, 17))\n sns.boxplot(x=\"% of points\", y=\"Sigma2 ratio\", hue=\"Type\", data=sigma2_ratio_df)\n plt.xlabel(\"% of points in each cell\", fontsize=20)\n plt.ylabel(\"Sigma2 ratio\", fontsize=20)\n plt.xticks(fontsize=10, rotation=45)\n plt.yticks(fontsize=10)\n plt.title(title, fontsize=20)\n plt.legend(loc=\"upper left\", fontsize=15)\n plt.axhline(y=0, color=\"r\", linestyle=\":\")\n plt.axvline(x=cov_min, color=\"r\", linestyle=\":\")\n fig.savefig(\n f\"/home/vmargot/Documents/Jussieu/new/{data_type}_{n}_d={d}_sigma2ratio\",\n format=\"svg\",\n dpi=300,\n )\n plt.close(fig)\n\n\ndef do_approx_graph(sigma2_ratio_df, k_list, title, data_type, n, d, do_approx):\n alpha = d / (2 * (d + 2))\n cov_min = k_list.index(closest(k_list, n ** (1-alpha)))\n\n temp = sigma2_ratio_df.loc[sigma2_ratio_df[\"Type\"] == \"Gessaman\"][\n [\"Sigma2 ratio\", \"% of points\"]\n ].astype(float)\n m = temp.groupby(\"% of points\")[\"Sigma2 ratio\"].median()\n\n temp = sigma2_ratio_df.loc[sigma2_ratio_df[\"Type\"] == \"NN on sample\"][\n [\"Sigma2 ratio\", \"% of points\"]\n ].astype(float)\n m2 = temp.groupby(\"% of points\")[\"Sigma2 ratio\"].median()\n q1 = temp.groupby(\"% of points\")[\"Sigma2 ratio\"].quantile(0.25)\n q3 = temp.groupby(\"% of points\")[\"Sigma2 ratio\"].quantile(0.75)\n\n alpha = 1 / 2\n th_min = k_list.index(closest(k_list, n ** (1-alpha)))\n\n sigma2_ratio_df = sigma2_ratio_df.loc[sigma2_ratio_df[\"Type\"] == \"Gessaman\"]\n fig = plt.figure(figsize=(25, 17))\n sns.boxplot(x=\"% of points\", y=\"Sigma2 ratio\", hue=\"Type\", data=sigma2_ratio_df)\n plt.axhline(y=0, color=\"k\", linestyle=\":\")\n plt.axvline(x=cov_min, color=\"r\", linestyle=\":\", label=\"a = d/(2(d+2))\")\n plt.axvline(x=th_min, color=\"g\", linestyle=\":\", label=\"a = 1/2\")\n plt.plot(m.values, \"--\", linewidth=1, color='black', label=\"Gessaman median\")\n plt.plot(m2.values, \"--\", linewidth=1, color=\"orange\", label=\"1-NN median\")\n plt.plot(q1.values, \":\", linewidth=1, color=\"orange\")\n plt.plot(q3.values, \":\", linewidth=1, color=\"orange\")\n if do_approx:\n # sigma2_estimates_m = (m + 1) * noise\n # sigma2_gessaman = (sigma2_ratio_df['Sigma2 ratio'] + 1) * noise\n\n vals_chi2 = [calc_chi2_approx(p) for p in k_list]\n vals_normal = [calc_normal_approx(p, n, noise) / noise - 1 for p in k_list]\n vals_logit = [calc_logit_approx(p, n, noise) / noise - 1 for p in k_list]\n # sigma2_approx = [unbiased_gessaman(p, n, m, np.var(sigma2_gessaman)) / noise - 1\n # for p, m in zip(k_list, sigma2_estimates_m.values)]\n\n plt.plot(vals_chi2, linestyle=(0, (5, 2, 1, 2)), linewidth=1, color=\"red\", label=\"Chi2\")\n plt.plot(vals_logit, linestyle=(0, (5, 2, 1, 2)), linewidth=1, color=\"green\", label=\"Logit\")\n plt.plot(vals_normal, linestyle=(0, (5, 2, 1, 2)), linewidth=1, color=\"blue\", label=\"Normal\")\n\n # plt.plot(sigma2_approx, linewidth=1.5, color=\"c\", label=\"Gessaman unbiased (med)\")\n\n plt.xlabel(\"% of points in each cell\", fontsize=20)\n plt.ylabel(\"Sigma2 ratio\", fontsize=20)\n plt.xticks(fontsize=10, rotation=45)\n plt.yticks(fontsize=10)\n plt.title(title, fontsize=20)\n plt.legend(loc=\"lower right\", fontsize=15)\n fig.savefig(\n f\"/home/vmargot/Documents/Jussieu/new/{data_type}_{n}_d={d}_sigma2ratio_approx\",\n format=\"svg\",\n dpi=300,\n )\n plt.close(fig)\n\n\ndef do_graph(n, noise, step, nb_simu, data_type, d=1, do_approx=False):\n # min_points = int(n * step)\n min_points = int(n**(1/2) + 1)\n k_list = list(range(min_points, int(n / 2), int(n * step)))\n\n types_list = [\"Gessaman\", \"Gessaman unbiased\", \"NN on rule\", \"NN on sample\"]\n resume_df_list = []\n g_unbiased_estim_list = []\n\n title = f\"Points sensitivity for {data_type} dataset with sigma2={noise}, n={n} and for {nb_simu} simulations\"\n for i in tqdm.tqdm(range(nb_simu)):\n resume_df = pd.DataFrame(index=list(range(len(k_list) * len(types_list))),\n columns=[\"% of points\", \"Sigma2 ratio\", \"Type\"])\n types = types_list * len(k_list)\n resume_df['Type'] = np.sort(types)\n pct_points = list(np.round(np.array(k_list) / n * 100, 2))\n\n resume_df[\"% of points\"] = pct_points * len(types_list)\n\n nn_estimate_on_rule_list = []\n gessaman_estimator_list = []\n gessaman_estimator_var = []\n pts_partition = []\n\n if data_type == \"sigmoid\":\n x, y = data_sigmoid(n, noise)\n elif data_type == \"plateau\":\n x, y = data_plateau(d, n, noise)\n elif data_type == \"rulefit\":\n x, y = data_rulefit(d, n, noise)\n elif data_type == \"flag\":\n x, y = data_flag(d, n, noise)\n elif data_type == \"diag\":\n x, y = data_diag(d, n, noise)\n elif data_type == \"circle\":\n x, y = data_circle(d, n, noise)\n else:\n raise \"Not implemented data_type\"\n\n for k in k_list:\n g = Gessaman(k=k, nb_jobs=4, verbose=False)\n g.fit(x, y, False)\n suitable_rs = g.significant_rs + g.insignificant_rs\n noise_estimators = [rule.std ** 2 for rule in suitable_rs]\n gessaman_estimate = min(noise_estimators)\n gessaman_estimator_list.append(gessaman_estimate)\n\n partition = extract_partition(suitable_rs, noise_estimators.index(gessaman_estimate))\n gessaman_estimator_var.append(np.var([rule.std ** 2 for rule in partition]))\n\n pts_partition.append(len(partition))\n nn_estimate_on_rule = min([rule._nn_estimate for rule in g.ruleset])\n nn_estimate_on_rule_list.append(nn_estimate_on_rule)\n\n best_id = find_gap(gessaman_estimator_list)\n print('Index of the estimator: ', str(best_id))\n g_unbiased_estim = unbiased_gessaman(p=k_list[best_id], n=n, estimator=gessaman_estimator_list[best_id],\n var=np.var(noise_estimators))\n # var=gessaman_estimator_var[best_id])\n g_unbiased_estim_list.append(g_unbiased_estim)\n\n neigh_estimator = nn_estimator.calc_1nn_noise_estimator(x, y)\n\n sigma2_ratio_list = list(np.array(gessaman_estimator_list) / noise - 1) +\\\n [g_unbiased_estim / noise - 1] * len(k_list)\\\n + list(np.array(nn_estimate_on_rule_list) / noise - 1) +\\\n [neigh_estimator / noise - 1] * len(k_list)\n resume_df[\"Sigma2 ratio\"] = sigma2_ratio_list\n resume_df_list.append(resume_df)\n\n full_df = pd.concat(resume_df_list)\n do_box_plots(full_df, title, data_type, d=d, k_list=k_list)\n do_approx_graph(full_df, k_list, title, data_type, n, d, do_approx)\n\n del_activation_files()\n\n\nif __name__ == \"__main__\":\n nb_simu = 10\n step = 0.02\n n_list = [1125, 2500, 5000] # , 10000] # , 20000]\n # n_list = [2500]\n exp_list = ['sigmoid', 'plateau', 'rulefit', 'flag', 'diag', 'circle']\n # exp_list = ['sigmoid']\n for exp in exp_list:\n # Choose among ['sigmoid', 'plateau', 'rulefit', 'flag', 'diag', 'circle']\n # The noise has been set to have a signal / noise ratio > 2\n if exp == 'sigmoid':\n noise = 0.05\n for n in n_list:\n do_graph(n, noise, step, nb_simu, \"sigmoid\")\n\n if exp == 'plateau':\n d_list = [2, 3, 5] # , 8, 10]\n # d_list = [3]\n noise = 0.125\n for d in d_list:\n for n in n_list:\n do_graph(n, noise, step, nb_simu, exp, d=d, do_approx=True)\n\n if exp == 'rulefit':\n d = 5\n noise = 0.3\n for n in n_list:\n do_graph(n, noise, step, nb_simu, exp, d)\n\n if exp == 'flag':\n d = 3\n noise = 1.0\n for n in n_list:\n do_graph(n, noise, step, nb_simu, exp, d)\n\n if exp == 'diag':\n d = 3\n noise = 1.2\n for n in n_list:\n do_graph(n, noise, step, nb_simu, exp, d)\n\n if exp == 'circle':\n d = 3\n noise = 1.5\n for n in n_list:\n do_graph(n, noise, step, nb_simu, \"circle\", d)\n","repo_name":"VMargot/gessaman","sub_path":"points_sensitivity_graphs.py","file_name":"points_sensitivity_graphs.py","file_ext":"py","file_size_in_byte":14395,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34499872724","text":"\"\"\"\nRun-length encoding is a fast and simple method of encoding strings. The basic idea is to represent repeated successive\ncharacters as a single count and character. For example, the string \"AAAABBBCCDAA\" would be encoded as \"4A3B2C1D2A\".\n\"\"\"\n\ndef encode(s: str):\n ret = \"\"\n count = 0\n i = 0\n while(i < len(s)):\n c = s[i]\n while(i+1 < len(s) and s[i] == s[i+1]):\n print(s[i])\n count += 1\n i = i+1\n if count > 0:\n ret = ret + c + str(count)\n else:\n ret = ret + c\n count = 0\n i = i +1\n print(ret)\n return ret\n\ndef decode(s: str):\n ret = \"\"\n char = None\n count = -1\n i = 0\n # A3B2C1DA1\n while(i < len(s)):\n char = s[i]\n print(char)\n i = i+1\n c = s[i]\n if c > '0' and c <'9':\n count = int(c)\n ret = ret + char + char * count\n i = i+1\n else:\n ret = ret + char\n return ret\n\nprint(\"AAAABBBCCDAA\" == decode(encode(\"AAAABBBCCDAA\")))\n","repo_name":"gyan42/interview-preparation-qna","sub_path":"python/string/run_length_enc_dec.py","file_name":"run_length_enc_dec.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23121210382","text":"#!/usr/bin/env python\n\n# Built-in modules\nfrom datetime import datetime\nimport sys\n\n# 3ed party modules\n\n# Local modules\n\nmessage = \"\"\n\n\ndef log(mes, log_w_timestamp=True):\n # note - send_email uses message\n global message\n\n date = datetime.today().strftime(\"%y-%m-%d-(%H:%M:%S)\")\n if log_w_timestamp:\n str = f\"{date} - {mes}\"\n else:\n str = mes\n message = f\"{message}{str}\\n\"\n\n str = str.rstrip()\n print(str)\n sys.stdout.flush()\n sys.stderr.flush()\n\n\nif __name__ == \"__main__\":\n log(\"foo\")\n","repo_name":"Avilir/vm-backup","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72102390562","text":"ANIM = False\n\nqr_bytes = [\n\t16, 28, 44, 64, 86, 108, 124, 154, 182, 216, 254, 290, 334, 365, 415, 453, 507, 563, 627,\n\t669, 714, 782, 860, 914, 1000, 1062, 1128, 1193, 1267, 1373, 1455, 1541, 1631, 1725, 1812, 1914, 1992, 2102, 2216, 2334,\n]\nqr_bytes_ex = [\n\t2434, 2566, 2702, 2812, 2956,\n]\n\nSWIRL = None\n\ndef to_qr(s, rainbow=False):\n\tglobal SWIRL\n\tif type(s) is str:\n\t\ts = s.encode(\"utf-8\")\n\tsize = len(s)\n\terr = \"M\" if size <= 2334 else \"L\"\n\tver = None\n\t# if size > 125:\n\t#\t if size <= 2334:\n\t#\t\t for i, n in enumerate(qr_bytes):\n\t#\t\t\t if n >= size:\n\t#\t\t\t\t ver = i + 1\n\t#\t\t\t\t break\n\t#\t if ver is None:\n\t#\t\t for i, n in enumerate(qr_bytes_ex):\n\t#\t\t\t if n >= size:\n\t#\t\t\t\t ver = i + 36\n\t#\t\t\t\t err = \"L\"\n\t#\t if ver is None:\n\t#\t\t raise OverflowError(\"Input string too large for QR code encoding.\")\n\timport pyqrcode\n\timg = pyqrcode.create(s, error=err, version=ver, mode=None, encoding=\"utf-8\" if max(s) >= 80 else \"ascii\")\n\tfn = f\"cache/{time.time_ns() // 1000}.png\"\n\tif not os.path.exists(fn):\n\t\timg.png(fn, scale=1, module_color=(255,) * 3, background=(0,) * 4)\n\timo = Image.open(fn)\n\tim = imo.convert(\"1\")\n\timo.close()\n\tim = im.resize((512, 512), resample=Resampling.NEAREST)\n\tif rainbow:\n\t\tif SWIRL is None:\n\t\t\timo = Image.open(\"misc/swirl.png\")\n\t\t\tSWIRL = imo.resize((512, 512), resample=Resampling.BILINEAR)\n\t\t\timo.close()\n\t\tcount = 128\n\n\t\tdef qr_iterator(image):\n\t\t\tfilt1 = filt2 = SWIRL\n\t\t\t# spl = hsv_split(SWIRL, convert=False)\n\t\t\tspl = SWIRL.convert(\"HSV\").split()\n\t\t\tfor i in range(count):\n\t\t\t\tif i:\n\t\t\t\t\t# hue1 = spl[0] + round(i * 256 / count)\n\t\t\t\t\t# hue2 = spl[0] - round(i * 256 / count)\n\t\t\t\t\t# filt1 = hsv_merge(hue1, *spl[1:])\n\t\t\t\t\t# filt2 = hsv_merge(hue2, *spl[1:])\n\t\t\t\t\thue1 = spl[0].point(lambda x: round(x + 256 * i / count) & 255)\n\t\t\t\t\thue2 = spl[0].point(lambda x: round(x - 256 * i / count) & 255)\n\t\t\t\t\tfilt1 = Image.merge(\"HSV\", (hue1, spl[1], spl[2])).convert(\"RGB\")\n\t\t\t\t\tfilt2 = Image.merge(\"HSV\", (hue2, spl[1], spl[2])).convert(\"RGB\")\n\t\t\t\tfilt1 = ImageEnhance.Brightness(ImageEnhance.Contrast(filt1).enhance(0.5)).enhance(2)\n\t\t\t\tfilt2 = ImageChops.invert(ImageEnhance.Brightness(ImageEnhance.Contrast(filt2).enhance(0.5)).enhance(2)).transpose(Transpose.FLIP_LEFT_RIGHT)\n\t\t\t\tfilt1.paste(filt2, mask=image)\n\t\t\t\tyield filt1\n\n\t\treturn dict(duration=4800, count=count, frames=qr_iterator(im))\n\treturn ImageChops.invert(im).convert(\"RGBA\")\n\n\ndef rainbow_gif2(image, duration):\n\ttotal = 0\n\tfor f in range(2147483648):\n\t\ttry:\n\t\t\timage.seek(f)\n\t\texcept EOFError:\n\t\t\tbreak\n\t\ttotal += max(image.info.get(\"duration\", 0), 50)\n\tlength = f\n\tloops = total / duration / 1000\n\tscale = 1\n\twhile abs(loops * scale) < 1:\n\t\tscale <<= 1\n\t\tif length * scale >= 64:\n\t\t\tloops = 1 if loops >= 0 else -1\n\t\t\tbreak\n\tloops = round(loops * scale) / scale\n\tif abs(loops) < 1:\n\t\tloops = 1 if loops >= 0 else -1\n\tsize = image.size\n\t# print(image, length, scale, loops)\n\n\tdef rainbow_gif_iterator(image):\n\t\tfor f in range(length * scale):\n\t\t\timage.seek(f % length)\n\t\t\tif str(image.mode) == \"P\":\n\t\t\t\ttemp = image.convert(\"RGBA\")\n\t\t\telse:\n\t\t\t\ttemp = image\n\t\t\tif str(image.mode) == \"RGBA\":\n\t\t\t\tA = temp.getchannel(\"A\")\n\t\t\telse:\n\t\t\t\tA = None\n\t\t\tif temp.size[0] != size[0] or temp.size[1] != size[1]:\n\t\t\t\ttemp = temp.resize(size, Resampling.HAMMING)\n\t\t\tchannels = list(temp.convert(\"HSV\").split())\n\t\t\t# channels = hsv_split(temp, convert=False)\n\t\t\t# hue = channels[0] + round(f / length / scale * loops * 256)\n\t\t\t# temp = hsv_merge(hue, *channels[1:])\n\t\t\tchannels[0] = channels[0].point(lambda x: int(((f / length / scale * loops + x / 256) % 1) * 256))\n\t\t\ttemp = Image.merge(\"HSV\", channels).convert(\"RGB\")\n\t\t\tif A:\n\t\t\t\ttemp.putalpha(A)\n\t\t\tyield temp\n\n\treturn dict(duration=total * scale, count=length * scale, frames=rainbow_gif_iterator(image))\n\ndef rainbow_gif(image, duration):\n\ttry:\n\t\timage.seek(1)\n\texcept EOFError:\n\t\timage.seek(0)\n\telse:\n\t\treturn rainbow_gif2(image, duration)\n\t# image = resize_max(image, 960, resample=Image.HAMMING)\n\t# size = list(image.size)\n\tsize = image.size\n\tif duration == 0:\n\t\tfps = 0\n\telse:\n\t\tfps = round(256 / abs(duration))\n\trate = 1\n\twhile fps > 48 and rate < 8:\n\t\tfps >>= 1\n\t\trate <<= 1\n\twhile fps >= 64:\n\t\tfps >>= 1\n\t\trate <<= 1\n\tif fps <= 0:\n\t\traise ValueError(\"Invalid framerate value.\")\n\tif str(image.mode) == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tif str(image.mode) == \"RGBA\":\n\t\tA = image.getchannel(\"A\")\n\telse:\n\t\tA = None\n\tchannels = list(image.convert(\"HSV\").split())\n\t# channels = hsv_split(image, convert=False)\n\tif duration < 0:\n\t\trate = -rate\n\tcount = 256 // abs(rate)\n\tfunc = lambda x: (x + rate) & 255\n\n\t# Repeatedly hueshift image and return copies\n\tdef rainbow_gif_iterator(image):\n\t\tfor i in range(0, 256, abs(rate)):\n\t\t\tif i:\n\t\t\t\t# hue = channels[0] + i\n\t\t\t\t# image = hsv_merge(hue, *channels[1:])\n\t\t\t\tchannels[0] = channels[0].point(func)\n\t\t\t\timage = Image.merge(\"HSV\", channels).convert(\"RGBA\")\n\t\t\t\tif A is not None:\n\t\t\t\t\timage.putalpha(A)\n\t\t\tyield image\n\n\treturn dict(duration=1000 / fps * count, count=count, frames=rainbow_gif_iterator(image))\n\n\ndef spin_gif2(image, duration):\n\ttotal = 0\n\tfor f in range(2147483648):\n\t\ttry:\n\t\t\timage.seek(f)\n\t\texcept EOFError:\n\t\t\tbreak\n\t\ttotal += max(image.info.get(\"duration\", 0), 50)\n\tlength = f\n\tloops = total / duration / 1000\n\tscale = 1\n\twhile abs(loops * scale) < 1:\n\t\tscale *= 2\n\t\tif length * scale >= 64:\n\t\t\tloops = 1 if loops >= 0 else -1\n\t\t\tbreak\n\tloops = round(loops * scale) / scale\n\tif abs(loops) < 1:\n\t\tloops = 1 if loops >= 0 else -1\n\tsize = image.size\n\n\tdef spin_gif_iterator(image):\n\t\tfor f in range(length * scale):\n\t\t\timage.seek(f % length)\n\t\t\ttemp = image\n\t\t\tif temp.size[0] != size[0] or temp.size[1] != size[1]:\n\t\t\t\ttemp = temp.resize(size, Resampling.HAMMING)\n\t\t\ttemp = to_circle(rotate_to(temp, f * 360 / length / scale * loops, expand=False))\n\t\t\tyield temp\n\n\treturn dict(duration=total * scale, count=length * scale, frames=spin_gif_iterator(image))\n\n\ndef spin_gif(image, duration):\n\ttry:\n\t\timage.seek(1)\n\texcept EOFError:\n\t\timage.seek(0)\n\telse:\n\t\treturn spin_gif2(image, duration)\n\tmaxsize = 960\n\tsize = list(image.size)\n\tif duration == 0:\n\t\tfps = 0\n\telse:\n\t\tfps = round(256 / abs(duration))\n\trate = 1\n\twhile fps > 32 and rate < 8:\n\t\tfps >>= 1\n\t\trate <<= 1\n\twhile fps >= 64:\n\t\tfps >>= 1\n\t\trate <<= 1\n\tif fps <= 0:\n\t\traise ValueError(\"Invalid framerate value.\")\n\tif duration < 0:\n\t\trate = -rate\n\tcount = 256 // abs(rate)\n\n\t# Repeatedly rotate image and return copies\n\tdef spin_gif_iterator(image):\n\t\tfor i in range(0, 256, abs(rate)):\n\t\t\tif i:\n\t\t\t\tim = rotate_to(image, i * 360 / 256, expand=False)\n\t\t\telse:\n\t\t\t\tim = image\n\t\t\tyield to_circle(im)\n\n\treturn dict(duration=1000 / fps * count, count=count, frames=spin_gif_iterator(image))\n\n\ndef orbit_gif2(image, orbitals, duration, extras):\n\ttotal = 0\n\tfor f in range(2147483648):\n\t\ttry:\n\t\t\timage.seek(f)\n\t\texcept EOFError:\n\t\t\tbreak\n\t\ttotal += max(image.info.get(\"duration\", 0), 50)\n\tlength = f\n\tloops = total / duration / 1000\n\tscale = 1\n\twhile abs(loops * scale) < 1:\n\t\tscale *= 2\n\t\tif length * scale >= 64:\n\t\t\tloops = 1 if loops >= 0 else -1\n\t\t\tbreak\n\tloops = round(loops * scale) / scale\n\tif abs(loops) < 1:\n\t\tloops = 1 if loops >= 0 else -1\n\tsources = [image]\n\tsources.extend(extras)\n\n\tdef orbit_gif_iterator(sources):\n\t\tx = orbitals if len(sources) == 1 else 1\n\t\tdiameter = max(sources[0].size)\n\t\tscale2 = orbitals / pi * (sqrt(5) + 1) / 2 + 0.5\n\t\tdiameter = min(diameter, round(2048 / scale2))\n\t\tsize = (round(diameter * scale2),) * 2\n\t\tfor f in range(0, length * scale):\n\t\t\tim = Image.new(\"RGBA\", size, (0,) * 4)\n\t\t\tif orbitals > 1:\n\t\t\t\tim2 = Image.new(\"RGBA\", size, (0,) * 4)\n\t\t\t\tif orbitals & 1:\n\t\t\t\t\tim3 = Image.new(\"RGBA\", size, (0,) * 4)\n\t\t\tfor j in range(orbitals):\n\t\t\t\timage = sources[j % len(sources)]\n\t\t\t\tif hasattr(image, \"length\"):\n\t\t\t\t\tg = f % image.length\n\t\t\t\telse:\n\t\t\t\t\tg = f\n\t\t\t\ttry:\n\t\t\t\t\timage.seek(g)\n\t\t\t\texcept EOFError:\n\t\t\t\t\timage.length = f\n\t\t\t\t\timage.seek(0)\n\t\t\t\timage = resize_max(image, diameter, force=True)\n\t\t\t\tangle = f / length / scale * loops * tau / x + j / orbitals * tau\n\t\t\t\tpos = im.width / 2 + np.array((cos(angle), sin(angle))) * (diameter * scale2 / 2 - diameter / 2) - (image.width / 2, image.height / 2)\n\t\t\t\tpos = list(map(round, pos))\n\t\t\t\tif j == orbitals - 1 and orbitals & 1 and orbitals > 1:\n\t\t\t\t\tim3.paste(image, pos)\n\t\t\t\telif not j & 1:\n\t\t\t\t\tim.paste(image, pos)\n\t\t\t\telse:\n\t\t\t\t\tim2.paste(image, pos)\n\t\t\tif orbitals > 1:\n\t\t\t\tif orbitals & 1:\n\t\t\t\t\tim2 = Image.alpha_composite(im3, im2)\n\t\t\t\tim = Image.alpha_composite(im, im2)\n\t\t\tyield im\n\n\treturn dict(duration=total * scale, count=length * scale, frames=orbit_gif_iterator(sources))\n\n\ndef orbit_gif(image, orbitals, duration, extras):\n\tif extras:\n\t\textras = [get_image(url) for url in extras[:orbitals]]\n\telse:\n\t\tduration /= orbitals\n\ttry:\n\t\timage.seek(1)\n\texcept EOFError:\n\t\timage.seek(0)\n\telse:\n\t\treturn orbit_gif2(image, orbitals, duration, extras)\n\tmaxsize = 960\n\tsize = list(image.size)\n\tif duration == 0:\n\t\tfps = 0\n\telse:\n\t\tfps = round(256 / abs(duration))\n\trate = 1\n\twhile fps > 32 and rate < 8:\n\t\tfps >>= 1\n\t\trate <<= 1\n\twhile fps >= 64 and rate <= 64:\n\t\tfps >>= 1\n\t\trate <<= 1\n\tif fps <= 0:\n\t\traise ValueError(\"Invalid framerate value.\")\n\tif duration < 0:\n\t\trate = -rate\n\tcount = 256 // abs(rate)\n\tsources = [image]\n\tsources.extend(extras)\n\n\t# Repeatedly rotate image and return copies\n\tdef orbit_gif_iterator(sources):\n\t\tx = orbitals if len(sources) == 1 else 1\n\t\tdiameter = max(sources[0].size)\n\t\tscale = orbitals / pi * (sqrt(5) + 1) / 2 + 0.5\n\t\tdiameter = min(diameter, round(2048 / scale))\n\t\tsize = (round(diameter * scale),) * 2\n\t\tfor i in range(0, 256, abs(rate)):\n\t\t\tim = Image.new(\"RGBA\", size, (0,) * 4)\n\t\t\tif orbitals > 1:\n\t\t\t\tim2 = Image.new(\"RGBA\", size, (0,) * 4)\n\t\t\t\tif orbitals & 1:\n\t\t\t\t\tim3 = Image.new(\"RGBA\", size, (0,) * 4)\n\t\t\tfor j in range(orbitals):\n\t\t\t\timage = sources[j % len(sources)]\n\t\t\t\timage = resize_max(image, diameter, force=True)\n\t\t\t\tangle = i / 256 * tau / x + j / orbitals * tau\n\t\t\t\tpos = im.width / 2 + np.array((cos(angle), sin(angle))) * (diameter * scale / 2 - diameter / 2) - (image.width / 2, image.height / 2)\n\t\t\t\tpos = list(map(round, pos))\n\t\t\t\tif j == orbitals - 1 and orbitals & 1 and orbitals > 1:\n\t\t\t\t\tim3.paste(image, pos)\n\t\t\t\telif not j & 1:\n\t\t\t\t\tim.paste(image, pos)\n\t\t\t\telse:\n\t\t\t\t\tim2.paste(image, pos)\n\t\t\tif orbitals > 1:\n\t\t\t\tif orbitals & 1:\n\t\t\t\t\tim2 = Image.alpha_composite(im3, im2)\n\t\t\t\tim = Image.alpha_composite(im, im2)\n\t\t\tyield im\n\n\treturn dict(duration=1000 / fps * count, count=count, frames=orbit_gif_iterator(sources))\n\n\ndef to_square(image):\n\tw, h = image.size\n\td = w - h\n\tif not d:\n\t\treturn image\n\tif d > 0:\n\t\treturn image.crop((d >> 1, 0, w - (1 + d >> 1), h))\n\treturn image.crop((0, -d >> 1, w, h - (1 - d >> 1)))\n\n\nCIRCLE_CACHE = {}\n\ndef to_circle(image):\n\tglobal CIRCLE_CACHE\n\tif str(image.mode) != \"RGBA\":\n\t\timage = to_square(image).convert(\"RGBA\")\n\telse:\n\t\timage = to_square(image)\n\ttry:\n\t\timage_map = CIRCLE_CACHE[image.size]\n\texcept KeyError:\n\t\timage_map = Image.new(\"RGBA\", image.size)\n\t\tdraw = ImageDraw.Draw(image_map)\n\t\tdraw.ellipse((0, 0, *image.size), outline=0, fill=(255,) * 4, width=0)\n\t\tCIRCLE_CACHE[image.size] = image_map\n\treturn ImageChops.multiply(image, image_map)\n\n\nDIRECTIONS = dict(\n\tleft=0,\n\tup=1,\n\tright=2,\n\tdown=3,\n\tl=0,\n\tu=1,\n\tr=2,\n\td=3,\n)\nDIRECTIONS.update({\n\t\"0\": 0,\n\t\"1\": 1,\n\t\"2\": 2,\n\t\"3\": 3,\n})\n\ndef scroll_gif2(image, direction, duration):\n\ttotal = 0\n\tfor f in range(2147483647):\n\t\ttry:\n\t\t\timage.seek(f)\n\t\texcept EOFError:\n\t\t\tbreak\n\t\tdur = max(image.info.get(\"duration\", 0), 50)\n\t\ttotal += dur\n\tcount = f\n\n\tdef scroll_gif_iterator(image):\n\t\tif direction & 1:\n\t\t\ty = (direction & 2) - 1\n\t\t\tx = 0\n\t\telse:\n\t\t\tx = (direction & 2) - 1\n\t\t\ty = 0\n\t\tfor i in range(count):\n\t\t\timage.seek(i)\n\t\t\ttemp = resize_max(image, 960, resample=Resampling.HAMMING)\n\t\t\tif i:\n\t\t\t\txm = round(x * temp.width / count * i)\n\t\t\t\tym = round(y * temp.height / count * i)\n\t\t\t\ttemp = ImageChops.offset(temp, xm, ym)\n\t\t\tyield temp\n\n\treturn dict(duration=total, count=count, frames=scroll_gif_iterator(image))\n\ndef scroll_gif(image, direction, duration, fps):\n\ttry:\n\t\tdirection = DIRECTIONS[direction.casefold()]\n\texcept KeyError:\n\t\traise TypeError(f\"Invalid direction {direction}\")\n\ttry:\n\t\timage.seek(1)\n\texcept EOFError:\n\t\timage.seek(0)\n\telse:\n\t\treturn scroll_gif2(image, direction, duration)\n\timage = resize_max(image, 960, resample=Resampling.HAMMING)\n\tcount = round(duration * fps)\n\n\tdef scroll_gif_iterator(image):\n\t\tyield image\n\t\tif direction & 1:\n\t\t\ty = (direction & 2) - 1\n\t\t\tx = 0\n\t\telse:\n\t\t\tx = (direction & 2) - 1\n\t\t\ty = 0\n\t\tfor i in range(1, count):\n\t\t\txm = round(x * image.width / count * i)\n\t\t\tym = round(y * image.height / count * i)\n\t\t\ttemp = ImageChops.offset(image, xm, ym)\n\t\t\tyield temp\n\n\treturn dict(duration=1000 * duration, count=count, frames=scroll_gif_iterator(image))\n\n\ndef magik_gif2(image, cell_count, grid_distance, iterations):\n\ttotal = 0\n\tfor f in range(2147483648):\n\t\ttry:\n\t\t\timage.seek(f)\n\t\texcept EOFError:\n\t\t\tbreak\n\t\ttotal += max(image.info.get(\"duration\", 0), 50)\n\tlength = f\n\tloops = total / 2 / 1000\n\tscale = 1\n\twhile abs(loops * scale) < 1:\n\t\tscale *= 2\n\t\tif length * scale >= 32:\n\t\t\tloops = 1 if loops >= 0 else -1\n\t\t\tbreak\n\tloops = round(loops * scale) / scale\n\tif abs(loops) < 1:\n\t\tloops = 1 if loops >= 0 else -1\n\tsize = image.size\n\n\tdef magik_gif_iterator(image):\n\t\tts = time.time_ns() // 1000\n\t\tfor f in range(length * scale):\n\t\t\tnp.random.seed(ts & 4294967295)\n\t\t\timage.seek(f % length)\n\t\t\ttemp = image\n\t\t\tif temp.size[0] != size[0] or temp.size[1] != size[1]:\n\t\t\t\ttemp = temp.resize(size, Resampling.HAMMING)\n\t\t\tfor _ in range(int(31 * iterations * f / length / scale)):\n\t\t\t\tdst_grid = griddify(shape_to_rect(image.size), cell_count, cell_count)\n\t\t\t\tsrc_grid = distort_grid(dst_grid, grid_distance)\n\t\t\t\tmesh = grid_to_mesh(src_grid, dst_grid)\n\t\t\t\ttemp = temp.transform(temp.size, Transform.MESH, mesh, resample=Resampling.NEAREST)\n\t\t\tyield temp\n\n\treturn dict(duration=total * scale, count=length * scale, frames=magik_gif_iterator(image))\n\n\ndef magik_gif(image, cell_count=7, iterations=1, anim=32, duration=2):\n\tgrid_distance = int(max(1, round(sqrt(np.prod(image.size)) / cell_count / 3 / iterations)))\n\ttry:\n\t\timage.seek(1)\n\texcept EOFError:\n\t\timage.seek(0)\n\telse:\n\t\treturn magik_gif2(image, cell_count, grid_distance, iterations)\n\timage = resize_max(image, 960, resample=Resampling.HAMMING)\n\n\tdef magik_gif_iterator(image):\n\t\tyield image\n\t\tfor _ in range(anim - 1):\n\t\t\tfor _ in range(iterations):\n\t\t\t\tdst_grid = griddify(shape_to_rect(image.size), cell_count, cell_count)\n\t\t\t\tsrc_grid = distort_grid(dst_grid, grid_distance)\n\t\t\t\tmesh = grid_to_mesh(src_grid, dst_grid)\n\t\t\t\timage = image.transform(image.size, Transform.MESH, mesh, resample=Resampling.NEAREST)\n\t\t\tyield image\n\n\treturn dict(duration=duration * 1000, count=anim, frames=magik_gif_iterator(image))\n\n\ndef quad_as_rect(quad):\n\tif quad[0] != quad[2]: return False\n\tif quad[1] != quad[7]: return False\n\tif quad[4] != quad[6]: return False\n\tif quad[3] != quad[5]: return False\n\treturn True\n\ndef quad_to_rect(quad):\n\tassert(len(quad) == 8)\n\tassert(quad_as_rect(quad))\n\treturn (quad[0], quad[1], quad[4], quad[3])\n\ndef rect_to_quad(rect):\n\tassert(len(rect) == 4)\n\treturn (rect[0], rect[1], rect[0], rect[3], rect[2], rect[3], rect[2], rect[1])\n\ndef shape_to_rect(shape):\n\tassert(len(shape) == 2)\n\treturn (0, 0, shape[0], shape[1])\n\ndef griddify(rect, w_div, h_div):\n\tw = rect[2] - rect[0]\n\th = rect[3] - rect[1]\n\tx_step = w / float(w_div)\n\ty_step = h / float(h_div)\n\ty = rect[1]\n\tgrid_vertex_matrix = deque()\n\tfor _ in range(h_div + 1):\n\t\tgrid_vertex_matrix.append(deque())\n\t\tx = rect[0]\n\t\tfor _ in range(w_div + 1):\n\t\t\tgrid_vertex_matrix[-1].append([int(x), int(y)])\n\t\t\tx += x_step\n\t\ty += y_step\n\tgrid = np.array(grid_vertex_matrix)\n\treturn grid\n\ndef distort_grid(org_grid, max_shift):\n\tnew_grid = np.copy(org_grid)\n\tx_min = np.min(new_grid[:, :, 0])\n\ty_min = np.min(new_grid[:, :, 1])\n\tx_max = np.max(new_grid[:, :, 0])\n\ty_max = np.max(new_grid[:, :, 1])\n\tnew_grid += np.random.randint(-max_shift, max_shift + 1, new_grid.shape)\n\tnew_grid[:, :, 0] = np.maximum(x_min, new_grid[:, :, 0])\n\tnew_grid[:, :, 1] = np.maximum(y_min, new_grid[:, :, 1])\n\tnew_grid[:, :, 0] = np.minimum(x_max, new_grid[:, :, 0])\n\tnew_grid[:, :, 1] = np.minimum(y_max, new_grid[:, :, 1])\n\treturn new_grid\n\ndef grid_to_mesh(src_grid, dst_grid):\n\tassert(src_grid.shape == dst_grid.shape)\n\tmesh = deque()\n\tfor i in range(src_grid.shape[0] - 1):\n\t\tfor j in range(src_grid.shape[1] - 1):\n\t\t\tsrc_quad = [src_grid[i\t, j\t, 0], src_grid[i\t, j\t, 1],\n\t\t\t\t\t\tsrc_grid[i + 1, j\t, 0], src_grid[i + 1, j\t, 1],\n\t\t\t\t\t\tsrc_grid[i + 1, j + 1, 0], src_grid[i + 1, j + 1, 1],\n\t\t\t\t\t\tsrc_grid[i\t, j + 1, 0], src_grid[i\t, j + 1, 1]]\n\t\t\tdst_quad = [dst_grid[i\t, j\t, 0], dst_grid[i\t, j\t, 1],\n\t\t\t\t\t\tdst_grid[i + 1, j\t, 0], dst_grid[i + 1, j\t, 1],\n\t\t\t\t\t\tdst_grid[i + 1, j + 1, 0], dst_grid[i + 1, j + 1, 1],\n\t\t\t\t\t\tdst_grid[i\t, j + 1, 0], dst_grid[i\t, j + 1, 1]]\n\t\t\tdst_rect = quad_to_rect(dst_quad)\n\t\t\tmesh.append([dst_rect, src_quad])\n\treturn list(mesh)\n\ndef magik(image, cell_count=7):\n\tdst_grid = griddify(shape_to_rect(image.size), cell_count, cell_count)\n\tsrc_grid = distort_grid(dst_grid, int(max(1, round(sqrt(np.prod(image.size)) / cell_count / 3))))\n\tmesh = grid_to_mesh(src_grid, dst_grid)\n\treturn image.transform(image.size, Transform.MESH, mesh, resample=Resampling.NEAREST)\n\n\nblurs = {\n\t\"box\": ImageFilter.BoxBlur,\n\t\"boxblur\": ImageFilter.BoxBlur,\n\t\"gaussian\": ImageFilter.GaussianBlur,\n\t\"gaussianblur\": ImageFilter.GaussianBlur,\n}\n\ndef blur(image, filt=\"box\", radius=2):\n\ttry:\n\t\t_filt = blurs[filt.replace(\"_\", \"\").casefold()]\n\texcept KeyError:\n\t\traise TypeError(f'Invalid image operation: \"{filt}\"')\n\treturn image.filter(_filt(radius))\n\n\ndef invert(image):\n\tif str(image.mode) == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tif str(image.mode) == \"RGBA\":\n\t\tA = image.getchannel(\"A\")\n\t\timage = image.convert(\"RGB\")\n\telse:\n\t\tA = None\n\timage = ImageOps.invert(image)\n\tif A is not None:\n\t\timage.putalpha(A)\n\treturn image\n\ndef greyscale(image):\n\tif str(image.mode) == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tif str(image.mode) == \"RGBA\":\n\t\tA = image.getchannel(\"A\")\n\telse:\n\t\tA = None\n\timage = ImageOps.grayscale(image)\n\tif A is not None:\n\t\tif str(image.mode) != \"L\":\n\t\t\timage = image.getchannel(\"R\")\n\t\timage = Image.merge(\"RGBA\", (image, image, image, A))\n\treturn image\n\ndef laplacian(image):\n\tif str(image.mode) == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tb = image.tobytes()\n\ttry:\n\t\timport pygame\n\texcept ImportError:\n\t\tpygame = None\n\tsurf = pygame.image.frombuffer(b, image.size, image.mode)\n\tsurf = pygame.transform.laplacian(surf)\n\tb = pygame.image.tostring(surf, image.mode)\n\timage = Image.frombuffer(image.mode, image.size, b)\n\treturn image\n\ndef colourspace(image, source, dest):\n\tif str(image.mode) == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tif str(image.mode) == \"RGBA\":\n\t\tA = image.getchannel(\"A\")\n\telse:\n\t\tA = None\n\tim = None\n\tout = None\n\tif source in (\"xyz\", \"hsl\", \"hsi\", \"hcl\", \"luv\", \"yiq\", \"yuv\"):\n\t\tspl = rgb_split(image)\n\t\ttry:\n\t\t\tim = globals()[source + \"_merge\"](*spl, convert=True)\n\t\texcept TypeError:\n\t\t\tim = globals()[source + \"_merge\"](spl)\n\telse:\n\t\tif source == \"rgb\":\n\t\t\tim = image\n\t\telif source == \"cmy\":\n\t\t\tim = invert(image)\n\t\telif source == \"hsv\":\n\t\t\tif image.mode != \"RGB\":\n\t\t\t\timage = image.convert(\"RGB\")\n\t\t\tim = Image.frombuffer(\"HSV\", image.size, image.tobytes())\n\t\t\tim = im.convert(\"RGB\")\n\t\telif source == \"lab\":\n\t\t\tif image.mode != \"RGB\":\n\t\t\t\timage = image.convert(\"RGB\")\n\t\t\tim = Image.frombuffer(\"LAB\", image.size, image.tobytes())\n\t\t\tim = ImageCms.applyTransform(im, lab2rgb)\n\tif not im:\n\t\traise NotImplementedError(f\"Colourspace {source} is not currently supported.\")\n\tif dest in (\"xyz\", \"hsl\", \"hsi\", \"hcl\", \"luv\", \"yiq\", \"yuv\"):\n\t\tspl = globals()[dest + \"_split\"](im, convert=False)\n\t\tout = rgb_merge(*spl)\n\telse:\n\t\tif dest == \"rgb\":\n\t\t\tout = im\n\t\telif dest == \"cmy\":\n\t\t\tout = invert(im)\n\t\telif dest == \"hsv\":\n\t\t\tim = im.convert(\"HSV\")\n\t\t\tout = Image.frombuffer(\"RGB\", im.size, im.tobytes())\n\t\telif dest == \"lab\":\n\t\t\tim = ImageCms.applyTransform(im, rgb2lab)\n\t\t\tout = Image.frombuffer(\"RGB\", im.size, im.tobytes())\n\tif not out:\n\t\traise NotImplementedError(f\"Image conversion from {source} to {dest} is not currently supported.\")\n\tif A is not None:\n\t\tout.putalpha(A)\n\treturn out\n\n\ndef get_colour(image):\n\tif \"A\" in str(image.mode):\n\t\tspl = deque(image.split())\n\t\tA = np.divide(spl.pop(), 255)\n\t\tsumA = np.sum(A)\n\t\tif sumA == 0:\n\t\t\tcol = [0, 0, 0]\n\t\telse:\n\t\t\tcol = [np.sum(np.multiply(channel, A)) / sumA for channel in spl]\n\telse:\n\t\tspl = image.split()\n\t\tcol = [np.mean(channel) for channel in spl]\n\treturn str(col)\n\n\nchannel_map = {\n\t\"alpha\": -1,\n\t\"a\": -1,\n\t\"red\": 0,\n\t\"r\": 0,\n\t\"green\": 1,\n\t\"g\": 1,\n\t\"blue\": 2,\n\t\"b\": 2,\n\t\"cyan\": 3,\n\t\"c\": 3,\n\t\"magenta\": 4,\n\t\"m\": 4,\n\t\"yellow\": 5,\n\t\"y\": 5,\n\t\"hue\": 6,\n\t\"h\": 6,\n\t\"saturation\": 7,\n\t\"sat\": 7,\n\t\"s\": 7,\n\t\"value\": 8,\n\t\"v\": 8,\n\t\"lightness\": 9,\n\t\"luminance\": 9,\n\t\"lum\": 9,\n\t\"l\": 9,\n}\n\ndef fill_channels(image, colour, *channels):\n\tchannels = list(channels)\n\tops = {}\n\tfor c in channels:\n\t\ttry:\n\t\t\tcid = channel_map[c]\n\t\texcept KeyError:\n\t\t\tif len(c) <= 1:\n\t\t\t\traise TypeError(\"invalid colour identifier: \" + c)\n\t\t\tchannels.extend(c)\n\t\telse:\n\t\t\tops[cid] = None\n\tch = Image.new(\"L\", image.size, colour)\n\tif \"RGB\" not in str(image.mode):\n\t\timage = image.convert(\"RGB\")\n\tif -1 in ops:\n\t\timage.putalpha(ch)\n\tmode = image.mode\n\trgb = False\n\tfor i in range(3):\n\t\tif i in ops:\n\t\t\trgb = True\n\tif rgb:\n\t\tspl = list(image.split())\n\t\tfor i in range(3):\n\t\t\tif i in ops:\n\t\t\t\tspl[i] = ch\n\t\timage = Image.merge(mode, spl)\n\tcmy = False\n\tfor i in range(3, 6):\n\t\tif i in ops:\n\t\t\tcmy = True\n\tif cmy:\n\t\tspl = list(ImageChops.invert(image).split())\n\t\tfor i in range(3, 6):\n\t\t\tif i in ops:\n\t\t\t\tspl[i - 3] = ch\n\t\timage = ImageChops.invert(Image.merge(mode, spl))\n\thsv = False\n\tfor i in range(6, 9):\n\t\tif i in ops:\n\t\t\thsv = True\n\tif hsv:\n\t\tif str(image.mode) == \"P\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image.mode) == \"RGBA\":\n\t\t\tA = image.getchannel(\"A\")\n\t\telse:\n\t\t\tA = None\n\t\tspl = list(image.convert(\"HSV\").split())\n\t\t# spl = hsv_split(image, convert=False)\n\t\tfor i in range(6, 9):\n\t\t\tif i in ops:\n\t\t\t\tspl[i - 6] = ch\n\t\t# image = hsv_merge(*spl)\n\t\timage = Image.merge(\"HSV\", spl).convert(\"RGB\")\n\t\tif A is not None:\n\t\t\timage.putalpha(A)\n\tif 9 in ops:\n\t\tif str(image.mode) == \"P\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image.mode) == \"RGBA\":\n\t\t\tA = image.getchannel(\"A\")\n\t\telse:\n\t\t\tA = None\n\t\tspl = hsl_split(image, convert=False)\n\t\tspl[-1] = np.full(tuple(reversed(image.size)), colour)\n\t\timage = hsl_merge(*spl)\n\t\tif A is not None:\n\t\t\timage.putalpha(A)\n\treturn image\n\n\n# Image blend operations (this is a bit of a mess)\nblenders = {\n\t\"normal\": \"blend\",\n\t\"blt\": \"blend\",\n\t\"blit\": \"blend\",\n\t\"blend\": \"blend\",\n\t\"replace\": \"replace\",\n\t\"+\": \"add\",\n\t\"add\": \"add\",\n\t\"addition\": \"add\",\n\t\"-\": \"subtract\",\n\t\"sub\": \"subtract\",\n\t\"subtract\": \"subtract\",\n\t\"subtraction\": \"subtract\",\n\t\"*\": \"multiply\",\n\t\"mul\": \"multiply\",\n\t\"mult\": \"multiply\",\n\t\"multiply\": \"multiply\",\n\t\"multiplication\": \"multiply\",\n\t\"/\": blend_modes.divide,\n\t\"div\": blend_modes.divide,\n\t\"divide\": blend_modes.divide,\n\t\"division\": blend_modes.divide,\n\t\"mod\": \"OP_X%Y\",\n\t\"modulo\": \"OP_X%Y\",\n\t\"%\": \"OP_X%Y\",\n\t\"and\": \"OP_X&Y\",\n\t\"&\": \"OP_X&Y\",\n\t\"or\": \"OP_X|Y\",\n\t\"|\": \"OP_X|Y\",\n\t\"xor\": \"OP_X^Y\",\n\t\"^\": \"OP_X^Y\",\n\t\"nand\": \"OP_255-(X&Y)\",\n\t\"~&\": \"OP_255-(X&Y)\",\n\t\"nor\": \"OP_255-(X|Y)\",\n\t\"~|\": \"OP_255-(X|Y)\",\n\t\"xnor\": \"OP_255-(X^Y)\",\n\t\"~^\": \"OP_255-(X^Y)\",\n\t\"xand\": \"OP_255-(X^Y)\",\n\t\"diff\": \"difference\",\n\t\"difference\": \"difference\",\n\t\"overlay\": blend_modes.overlay,\n\t\"screen\": \"screen\",\n\t\"soft\": blend_modes.soft_light,\n\t\"softlight\": blend_modes.soft_light,\n\t\"hard\": blend_modes.hard_light,\n\t\"hardlight\": blend_modes.hard_light,\n\t\"lighter\": \"lighter\",\n\t\"lighten\": \"lighter\",\n\t\"darker\": \"darker\",\n\t\"darken\": \"darker\",\n\t\"plusdarker\": \"OP_X+Y-255\",\n\t\"plusdarken\": \"OP_X+Y-255\",\n\t\"overflow\": \"OVERFLOW\",\n\t\"lighting\": \"LIGHTING\",\n\t\"extract\": blend_modes.grain_extract,\n\t\"grainextract\": blend_modes.grain_extract,\n\t\"merge\": blend_modes.grain_merge,\n\t\"grainmerge\": blend_modes.grain_merge,\n\t\"burn\": \"OP_255*(1-((255-Y)/X))\",\n\t\"colorburn\": \"OP_255*(1-((255-Y)/X))\",\n\t\"colourburn\": \"OP_255*(1-((255-Y)/X))\",\n\t\"linearburn\": \"OP_(X+Y)-255\",\n\t\"dodge\": blend_modes.dodge,\n\t\"colordodge\": blend_modes.dodge,\n\t\"colourdodge\": blend_modes.dodge,\n\t\"lineardodge\": \"add\",\n\t\"hue\": \"SP_HUE\",\n\t\"sat\": \"SP_SAT\",\n\t\"saturation\": \"SP_SAT\",\n\t\"lightness\": \"SP_LIT\",\n\t\"brightness\": \"SP_LIT\",\n\t\"lum\": \"SP_LUM\",\n\t\"luminosity\": \"SP_LUM\",\n\t\"val\": \"SP_VAL\",\n\t\"value\": \"SP_VAL\",\n\t\"color\": \"SP_COL\",\n\t\"colour\": \"SP_COL\",\n\t\"alpha\": \"SP_ALP\",\n}\nhalve = (np.arange(1, 257) >> 1).astype(np.uint8)\ndarken = np.concatenate((np.zeros(128, dtype=np.uint8), np.arange(128, dtype=np.uint8)))\n\ndef blend_op(image, url, operation, amount, recursive=True):\n\top = operation.casefold().replace(\" \", \"\").replace(\"_\", \"\")\n\tif op in blenders:\n\t\tfilt = blenders[op]\n\telif op == \"auto\":\n\t\tfilt = \"blend\"\n\telse:\n\t\traise TypeError(\"Invalid image operation: \\\"\" + op + '\"')\n\ttry:\n\t\timage2 = get_image(url)\n\texcept TypeError as ex:\n\t\ts = ex.args[0]\n\t\tsearch = 'Filetype \"audio/'\n\t\tif not s.startswith(search):\n\t\t\traise\n\t\ts = s[len(search):]\n\t\timage.audio = dict(url=url, codec=s[:s.index('\"')])\n\t\treturn image\n\tif recursive:\n\t\tif not globals()[\"ANIM\"]:\n\t\t\ttry:\n\t\t\t\timage2.seek(1)\n\t\t\texcept EOFError:\n\t\t\t\timage2.seek(0)\n\t\t\telse:\n\t\t\t\tdur = 0\n\t\t\t\tfor f in range(2147483648):\n\t\t\t\t\ttry:\n\t\t\t\t\t\timage2.seek(f)\n\t\t\t\t\texcept EOFError:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tdur += max(image2.info.get(\"duration\", 0), 50)\n\t\t\t\tcount = f\n\n\t\t\t\tdef blend_op_iterator(image, image2, operation, amount):\n\t\t\t\t\tfor f in range(2147483648):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\timage2.seek(f)\n\t\t\t\t\t\texcept EOFError:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tif str(image.mode) == \"P\":\n\t\t\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\t\t\telif str(image.mode) != \"RGBA\":\n\t\t\t\t\t\t\ttemp = image.convert(\"RGBA\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttemp = image\n\t\t\t\t\t\ttemp2 = image2._images[image2._position]\n\t\t\t\t\t\t# print(image2._position)\n\t\t\t\t\t\t# image2._images[image2._position].save(f\"temp{f}.png\")\n\t\t\t\t\t\tyield blend_op(temp, temp2, operation, amount, recursive=False)\n\n\t\t\t\treturn dict(duration=dur, count=count, frames=blend_op_iterator(image, image2, operation, amount))\n\t\ttry:\n\t\t\tn_frames = 1\n\t\t\tfor f in range(CURRENT_FRAME + 1):\n\t\t\t\ttry:\n\t\t\t\t\timage2.seek(f)\n\t\t\t\texcept EOFError:\n\t\t\t\t\tbreak\n\t\t\t\tn_frames += 1\n\t\t\timage2.seek(CURRENT_FRAME % n_frames)\n\t\texcept EOFError:\n\t\t\timage2.seek(0)\n\tif image2.width != image.width or image2.height != image.height:\n\t\timage2 = resize_to(image2, image.width, image.height, \"auto\")\n\tif type(filt) is not str:\n\t\tif str(image.mode) == \"P\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image.mode) != \"RGBA\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image2.mode) == \"P\" and \"transparency\" in image2.info:\n\t\t\timage2 = image2.convert(\"RGBA\")\n\t\tif str(image2.mode) != \"RGBA\":\n\t\t\timage2 = image2.convert(\"RGBA\")\n\t\timgA = np.array(image).astype(np.float64)\n\t\timgB = np.array(image2).astype(np.float64)\n\t\tout = fromarray(np.uint8(filt(imgA, imgB, amount)), image.mode)\n\telse:\n\t\t# Basic blend, use second image\n\t\tif filt in (\"blend\", \"replace\"):\n\t\t\tout = image2\n\t\t# Image operation, use ImageMath.eval\n\t\telif filt.startswith(\"OP_\"):\n\t\t\tf = filt[3:]\n\t\t\tif str(image.mode) != str(image2.mode):\n\t\t\t\tif str(image.mode) == \"P\":\n\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\tif str(image.mode) != \"RGBA\":\n\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\tif str(image2.mode) == \"P\" and \"transparency\" in image2.info:\n\t\t\t\t\timage2 = image2.convert(\"RGBA\")\n\t\t\t\tif str(image2.mode) != \"RGBA\":\n\t\t\t\t\timage2 = image2.convert(\"RGBA\")\n\t\t\tmode = image.mode\n\t\t\tch1 = image.split()\n\t\t\tch2 = image2.split()\n\t\t\tc = len(ch1)\n\t\t\tch3 = [ImageMath.eval(f, dict(X=ch1[i], Y=ch2[i])).convert(\"L\") for i in range(3)]\n\t\t\tif c > 3:\n\t\t\t\tch3.append(ImageMath.eval(\"max(X,Y)\", dict(X=ch1[-1], Y=ch2[-1])).convert(\"L\"))\n\t\t\tout = Image.merge(mode, ch3)\n\t\t# Special operation, use HSV channels\n\t\telif filt.startswith(\"SP_\"):\n\t\t\tf = filt[3:]\n\t\t\tif f == \"ALP\":\n\t\t\t\tif \"A\" in image2.mode:\n\t\t\t\t\tif amount % 1:\n\t\t\t\t\t\tout = image.copy()\n\t\t\t\t\telse:\n\t\t\t\t\t\tout = image\n\t\t\t\t\tout.putalpha(image2.getchannel(\"A\"))\n\t\t\t\telse:\n\t\t\t\t\tout = image\n\t\t\t\t\tamount = 0\n\t\t\telse:\n\t\t\t\tif str(image.mode) == \"P\":\n\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\tif str(image.mode) == \"RGBA\":\n\t\t\t\t\tA = image.getchannel(\"A\")\n\t\t\t\telse:\n\t\t\t\t\tA = None\n\t\t\t\tif f == \"LUM\":\n\t\t\t\t\tchannels1 = yuv_split(image, convert=False)\n\t\t\t\t\tchannels2 = yuv_split(image2, convert=False)\n\t\t\t\telif f == \"LIT\":\n\t\t\t\t\tchannels1 = hsl_split(image, convert=False)\n\t\t\t\t\tchannels2 = hsl_split(image2, convert=False)\n\t\t\t\telse:\n\t\t\t\t\tchannels1 = image.convert(\"HSV\").split()\n\t\t\t\t\tchannels2 = image2.convert(\"HSV\").split()\n\t\t\t\tif f in (\"HUE\", \"LUM\"):\n\t\t\t\t\tchannels = [channels2[0], channels1[1], channels1[2]]\n\t\t\t\telif f == \"SAT\":\n\t\t\t\t\tchannels = [channels1[0], channels2[1], channels1[2]]\n\t\t\t\telif f in (\"LIT\", \"VAL\"):\n\t\t\t\t\tchannels = [channels1[0], channels1[1], channels2[2]]\n\t\t\t\telif f == \"COL\":\n\t\t\t\t\tchannels = [channels2[0], channels2[1], channels1[2]]\n\t\t\t\tif f == \"LUM\":\n\t\t\t\t\tout = yuv_merge(channels)\n\t\t\t\telif f == \"LIT\":\n\t\t\t\t\tout = hsl_merge(*channels)\n\t\t\t\telse:\n\t\t\t\t\tout = Image.merge(\"HSV\", channels).convert(\"RGB\")\n\t\t\t\tif A:\n\t\t\t\t\tout.putalpha(A)\n\t\telif filt in (\"OVERFLOW\", \"LIGHTING\"):\n\t\t\tif str(image.mode) != str(image2.mode):\n\t\t\t\tif image.mode == \"RGBA\" or image2.mode == \"RGBA\":\n\t\t\t\t\tif image.mode != \"RGBA\":\n\t\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\t\telse:\n\t\t\t\t\t\timage2 = image2.convert(\"RGBA\")\n\t\t\t\telse:\n\t\t\t\t\tmode = image.mode if image.mode != \"P\" else \"RGBA\" if \"transparency\" in image2.info else \"RGB\"\n\t\t\t\t\timage2 = image2.convert(mode)\n\t\t\t\t\tif image.mode != mode:\n\t\t\t\t\t\timage = image.convert(mode)\n\t\t\tif \"A\" in image.mode:\n\t\t\t\tspl, spl2 = image.split(), image2.split()\n\t\t\t\tA = ImageChops.add(spl[-1], spl2[-1])\n\t\t\t\timage = Image.merge(\"RGB\", spl[:-1])\n\t\t\t\timage2 = Image.merge(\"RGB\", spl2[:-1])\n\t\t\telse:\n\t\t\t\tA = None\n\t\t\timage = Image.blend(image, image2, 0.5)\n\t\t\tspl = hsl_split(image, convert=False, dtype=np.uint16)\n\t\t\tif filt == \"OVERFLOW\":\n\t\t\t\tspl[2] <<= 1\n\t\t\t\tspl[1] <<= 1\n\t\t\telse:\n\t\t\t\ttemp = spl[2] ^ 255\n\t\t\t\ttemp *= spl[2]\n\t\t\t\ttemp //= 255\n\t\t\t\tspl[2] += temp\n\t\t\t\tspl[1] <<= 1\n\t\t\tout = hsl_merge(*spl)\n\t\t\tif A:\n\t\t\t\tout.putalpha(A)\n\t\t# Otherwise attempt to find as ImageChops filter\n\t\telse:\n\t\t\tif str(image.mode) != str(image2.mode):\n\t\t\t\tif str(image.mode) == \"P\":\n\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\tif str(image.mode) != \"RGBA\":\n\t\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\t\tif str(image2.mode) == \"P\" and \"transparency\" in image2.info:\n\t\t\t\t\timage2 = image2.convert(\"RGBA\")\n\t\t\t\tif str(image2.mode) != \"RGBA\":\n\t\t\t\t\timage2 = image2.convert(\"RGBA\")\n\t\t\tfilt = getattr(ImageChops, filt)\n\t\t\tout = filt(image, image2)\n\t\tif str(image.mode) != str(out.mode):\n\t\t\tif str(image.mode) == \"P\":\n\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\tif str(image.mode) != \"RGBA\":\n\t\t\t\timage = image.convert(\"RGBA\")\n\t\t\tif str(out.mode) == \"P\" and \"transparency\" in out.info:\n\t\t\t\tout = out.convert(\"RGBA\")\n\t\t\tif str(out.mode) != \"RGBA\":\n\t\t\t\tout = out.convert(\"RGBA\")\n\t\tif filt == \"blend\":\n\t\t\t# A = out.getchannel(\"A\")\n\t\t\t# A.point(lambda x: round(x * amount))\n\t\t\t# out.putalpha(A)\n\t\t\tout = Image.alpha_composite(image, out)\n\t\tif amount == 0:\n\t\t\tout = image\n\t\telif amount != 1:\n\t\t\tout = Image.blend(image, out, amount)\n\treturn out\n\n\ndef remove_matte(image, colour):\n\tif str(image.mode) == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tif str(image.mode) != \"RGBA\":\n\t\timage = image.convert(\"RGBA\")\n\tarr = np.asanyarrayarray(image, dtype=np.float32)\n\tcol = np.array(colour)\n\tt = len(col)\n\tfor row in arr:\n\t\tfor cell in row:\n\t\t\tr = min(1, np.min(cell[:t] / col))\n\t\t\tif r > 0:\n\t\t\t\tcol = cell[:t] - r * col\n\t\t\t\tif max(col) > 0:\n\t\t\t\t\tratio = sum(cell) / max(col)\n\t\t\t\t\tcell[:t] = np.clip(col * ratio, 0, 255)\n\t\t\t\t\tcell[3] /= ratio\n\t\t\t\telse:\n\t\t\t\t\tcell[3] = 0\n\timage = fromarray(arr.astype(np.uint8))\n\treturn image\n\n\ncolour_blind_map = dict(\n\tprotan=(\n\t\t(\n\t\t\t(0.56667, 0.43333, 0),\n\t\t\t(0.55833, 0.44167, 0),\n\t\t\t(0.24167, 0.75833, 0),\n\t\t),\n\t\t(\n\t\t\t(0.81667, 0.18333, 0),\n\t\t\t(0.33333, 0.66667, 0),\n\t\t\t(0, 0.125, 0.875),\n\t\t),\n\t),\n\tdeutan=(\n\t\t(\n\t\t\t(0.625, 0.375, 0),\n\t\t\t(0.7, 0.3, 0),\n\t\t\t(0, 0.3, 0.7),\n\t\t),\n\t\t(\n\t\t\t(0.8, 0.2, 0),\n\t\t\t(0.25833, 0.74167, 0),\n\t\t\t(0, 0.14167, 0.85833),\n\t\t),\n\t),\n\ttritan=(\n\t\t(\n\t\t\t(0.95, 0.05, 0),\n\t\t\t(0, 0.43333, 0.56667),\n\t\t\t(0, 0.475, 0.525),\n\t\t),\n\t\t(\n\t\t\t(0.96667, 0.03333, 0),\n\t\t\t(0, 0.73333, 0.26667),\n\t\t\t(0, 0.18333, 0.81667),\n\t\t),\n\t),\n\tachro=(\n\t\t(\n\t\t\t(0.299, 0.587, 0.114),\n\t\t\t(0.299, 0.587, 0.114),\n\t\t\t(0.299, 0.587, 0.114),\n\t\t),\n\t\t(\n\t\t\t(0.618, 0.32, 0.062),\n\t\t\t(0.163, 0.775, 0.062),\n\t\t\t(0.163, 0.32, 0.516),\n\t\t),\n\t),\n)\n\ncolour_normal_map = (\n\t(1, 0, 0),\n\t(0, 1, 0),\n\t(0, 0, 1),\n)\n\ndef colour_deficiency(image, operation, value=None):\n\tif value is None:\n\t\tif operation == \"protanopia\":\n\t\t\toperation = \"protan\"\n\t\t\tvalue = 1\n\t\telif operation == \"protanomaly\":\n\t\t\toperation = \"protan\"\n\t\t\tvalue = 0.5\n\t\tif operation == \"deuteranopia\":\n\t\t\toperation = \"deutan\"\n\t\t\tvalue = 1\n\t\telif operation == \"deuteranomaly\":\n\t\t\toperation = \"deutan\"\n\t\t\tvalue = 0.5\n\t\telif operation == \"tritanopia\":\n\t\t\toperation = \"tritan\"\n\t\t\tvalue = 1\n\t\telif operation == \"tritanomaly\":\n\t\t\toperation = \"tritan\"\n\t\t\tvalue = 0.5\n\t\telif operation in (\"monochromacy\", \"achromatopsia\"):\n\t\t\toperation = \"achro\"\n\t\t\tvalue = 1\n\t\telif operation == \"achromatonomaly\":\n\t\t\toperation = \"achro\"\n\t\t\tvalue = 0.5\n\t\telse:\n\t\t\tvalue = 1\n\ttry:\n\t\ttable = colour_blind_map[operation]\n\texcept KeyError:\n\t\traise TypeError(f\"Invalid filter {operation}.\")\n\tif value < 0.5:\n\t\tvalue *= 2\n\t\tratios = [table[1][i] * value + colour_normal_map[i] * (1 - value) for i in range(3)]\n\telse:\n\t\tvalue = value * 2 - 1\n\t\tratios = [table[0][i] * value + table[1][i] * (1 - value) for i in range(3)]\n\tcolourmatrix = []\n\tfor r in ratios:\n\t\tcolourmatrix.extend(r)\n\t\tcolourmatrix.append(0)\n\tif image.mode == \"P\":\n\t\timage = image.convert(\"RGBA\")\n\tif image.mode == \"RGBA\":\n\t\tspl = image.split()\n\t\timage = Image.merge(\"RGB\", spl[:3])\n\t\tA = spl[-1]\n\telse:\n\t\tA = None\n\timage = image.convert(image.mode, colourmatrix)\n\tif A:\n\t\timage.putalpha(A)\n\treturn image\n\t# channels = list(image.split())\n\t# out = [None] * len(channels)\n\t# if len(out) == 4:\n\t#\t out[-1] = channels[-1]\n\t# for i_ratio, ratio in enumerate(ratios):\n\t#\t for i_colour in range(3):\n\t#\t\t if ratio[i_colour]:\n\t#\t\t\t im = channels[i_colour].point(lambda x: x * ratio[i_colour])\n\t#\t\t\t if out[i_ratio] is None:\n\t#\t\t\t\t out[i_ratio] = im\n\t#\t\t\t else:\n\t#\t\t\t\t out[i_ratio] = ImageChops.add(out[i_ratio], im)\n\t# return Image.merge(image.mode, out)\n\nEnhance = lambda image, operation, value: getattr(ImageEnhance, operation)(image).enhance(value)\n\ndef brightness(image, value):\n\tif value:\n\t\tif value < 0:\n\t\t\timage = invert(image)\n\t\t\tvalue = -value\n\t\tif str(image.mode) == \"P\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image.mode) == \"RGBA\":\n\t\t\tA = image.getchannel(\"A\")\n\t\telse:\n\t\t\tA = None\n\t\tH, S, L = hsl_split(image, convert=False, dtype=np.uint16)\n\t\tnp.multiply(L, value, out=L, casting=\"unsafe\")\n\t\timage = hsl_merge(H, S, L)\n\t\tif A:\n\t\t\timage.putalpha(A)\n\treturn image\n\ndef luminance(image, value):\n\tif value:\n\t\tif value < 0:\n\t\t\timage = invert(image)\n\t\t\tvalue = -value\n\t\tif str(image.mode) == \"P\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image.mode) == \"RGBA\":\n\t\t\tA = image.getchannel(\"A\")\n\t\telse:\n\t\t\tA = None\n\t\tyuv = yuv_split(image, convert=False)\n\t\tnp.multiply(yuv[0], value, out=yuv[0])\n\t\timage = yuv_merge(yuv)\n\t\tif A:\n\t\t\timage.putalpha(A)\n\treturn image\n\n# Hueshift image using HSV channels\ndef hue_shift(image, value):\n\tif value:\n\t\tif str(image.mode) == \"P\":\n\t\t\timage = image.convert(\"RGBA\")\n\t\tif str(image.mode) == \"RGBA\":\n\t\t\tA = image.getchannel(\"A\")\n\t\telse:\n\t\t\tA = None\n\t\tchannels = list(image.convert(\"HSV\").split())\n\t\t# channels = hsv_split(image, convert=False)\n\t\t# channels[0] += round(value * 256)\n\t\t# image = hsv_merge(*channels)\n\t\tvalue *= 256\n\t\tchannels[0] = channels[0].point(lambda x: (x + value) % 256)\n\t\timage = Image.merge(\"HSV\", channels).convert(\"RGB\")\n\t\tif A is not None:\n\t\t\timage.putalpha(A)\n\treturn image\n\n\ndef get_mask(image):\n\tif image.mode != \"LA\":\n\t\timage = image.convert(\"LA\")\n\tif image.size != (512, 512):\n\t\timage = image.resize((512, 512), resample=Resampling.LANCZOS)\n\ta = np.array(image, dtype=np.uint8).T\n\tL, A = a[0].T, a[1].T\n\tanytrans = A != 255\n\tnotblank = A != 0\n\tanyalpha = anytrans & notblank\n\tat = np.sum(anytrans)\n\taa = np.sum(anyalpha)\n\tanywhite = L == 255\n\tanyblack = L == 0\n\taw = np.sum(anywhite)\n\tab = np.sum(anyblack)\n\t# print(np.sum(anytrans), np.sum(notblank), np.sum(anyalpha), aw, ab)\n\tif at and aa < at / 2 and at > max(aw, ab):\n\t\tL[anytrans] = 255\n\t\tL[anytrans == False] = 0\n\telse:\n\t\tif aw and ab:\n\t\t\tavg = np.mean(L[notblank])\n\t\t\tif 255 - avg < 32:\n\t\t\t\taw = 0\n\t\t\telif avg < 32:\n\t\t\t\tab = 0\n\t\t\telif aw > ab:\n\t\t\t\tab = 0\n\t\t\telse:\n\t\t\t\taw = 0\n\t\tif aw and not ab:\n\t\t\tL[(anywhite == False) & notblank] = 0\n\t\telif ab and not aw:\n\t\t\tL[anyblack & notblank] = 255\n\t\t\tL[(anyblack == False) & notblank] = 0\n\t\telse:\n\t\t\traise RuntimeError(\"Unable to detect mask. Please use full black, white, or transparent.\")\n\tmask = Image.fromarray(L, mode=\"L\")\n\treturn expand_mask(mask, radius=4)\n\ndef inpaint(image, url):\n\timage2 = get_image(url, nodel=True)\n\tif image2.mode == \"LA\":\n\t\timage2 = image2.getchannel(\"L\")\n\telif \"RGB\" in image2.mode or \"P\" in image2.mode:\n\t\timage2 = image2.convert(\"L\")\n\telif image2.mode != \"L\":\n\t\timage2 = image2.convert(\"L\")\n\tmask = np.asanyarray(image2, dtype=np.uint8) >= 128\n\toutl = np.roll(mask, -1, axis=0)\n\toutu = np.roll(mask, -1, axis=1)\n\toutr = np.roll(mask, 1, axis=0)\n\toutd = np.roll(mask, 1, axis=1)\n\toutline = (outl | outu | outr | outd) & (mask == False)\n\tif image.mode != \"RGB\":\n\t\timage = image.convert(\"RGB\")\n\tif image.size != image2.size:\n\t\timage = image.resize(image2.size, resample=Resampling.LANCZOS)\n\ta = np.array(image, dtype=np.uint8)\n\torients = [None] * 2\n\tfor i in range(2):\n\t\tif i:\n\t\t\tb = a.swapaxes(0, 1)\n\t\t\tm2 = mask.T\n\t\t\to2 = outline.T\n\t\telse:\n\t\t\tb = a\n\t\t\tm2 = mask\n\t\t\to2 = outline\n\t\tpm = np.argwhere(m2)\n\t\tom = np.argwhere(o2)\n\t\tpaint_mask = np.empty(len(pm), dtype=object)\n\t\tpaint_mask[:] = tuple(map(tuple, pm))\n\t\toutliner = np.empty(len(om), dtype=object)\n\t\toutliner[:] = tuple(map(tuple, om))\n\t\tnearr = np.searchsorted(outliner, paint_mask) % len(om)\n\t\tnearl = nearr - 1\n\t\tipl = tuple(om[nearl].T)\n\t\tipr = tuple(om[nearr].T)\n\t\tdist = np.sqrt(np.sum((pm.astype(np.float32) - om[nearl]) ** 2, axis=1))\n\t\tdist /= np.max(dist)\n\t\tgrads = np.tile(dist, (3, 1)).T\n\t\tinterpolated = (b[ipl] * (1 - grads) + b[ipr] * grads).astype(np.uint8) >> 1\n\t\torients[i] = (m2, interpolated)\n\ta[mask] = 0\n\tfor i, (m, o) in enumerate(orients):\n\t\tif i:\n\t\t\ta.swapaxes(0, 1)[m] += o\n\t\telse:\n\t\t\ta[mask] += o\n\tim = Image.fromarray(a, mode=\"RGB\")\n\tfilt = ImageFilter.GaussianBlur(radius=1.5)\n\tim2 = im.filter(filt)\n\ta2 = np.asanyarray(im2, dtype=np.uint8)\n\ta[mask] = a2[mask]\n\treturn Image.fromarray(a, mode=\"RGB\")\n\ndef expand_mask(image2, radius=4):\n\tif not radius:\n\t\treturn image2\n\tif radius > image2.width:\n\t\tradius = image2.width\n\tif radius > image2.height:\n\t\tradius = image2.height\n\tif image2.mode == \"LA\":\n\t\timage2 = image2.getchannel(\"L\")\n\telif \"RGB\" in image2.mode or \"P\" in image2.mode:\n\t\timage2 = image2.convert(\"L\")\n\tmask = np.asanyarray(image2, dtype=np.uint8)\n\toutmask = mask.copy()\n\tfor x in range(-radius, radius + 1):\n\t\tfor y in range(-radius, radius + 1):\n\t\t\tif x ** 2 + y ** 2 > (radius + 0.5) ** 2:\n\t\t\t\tcontinue\n\t\t\ttemp = mask.copy()\n\t\t\tif x > 0:\n\t\t\t\tt2 = temp[:-x]\n\t\t\t\ttemp[:x] = temp[-x:]\n\t\t\t\ttemp[x:] = t2\n\t\t\telif x < 0:\n\t\t\t\tt2 = temp[-x:]\n\t\t\t\ttemp[x:] = temp[:-x]\n\t\t\t\ttemp[:x] = t2\n\t\t\tif y > 0:\n\t\t\t\tt2 = temp.T[:-y]\n\t\t\t\ttemp.T[:y] = temp.T[-y:]\n\t\t\t\ttemp.T[y:] = t2\n\t\t\telif y < 0:\n\t\t\t\tt2 = temp.T[-y:]\n\t\t\t\ttemp.T[y:] = temp.T[:-y]\n\t\t\t\ttemp.T[:y] = t2\n\t\t\toutmask |= temp\n\toutim = Image.fromarray(outmask, mode=\"L\")\n\treturn outim","repo_name":"TheSkiRteQPL/DzbanBot","sub_path":"misc/x-image.py","file_name":"x-image.py","file_ext":"py","file_size_in_byte":39598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4473482091","text":"import torch\nimport torch.nn as nn\nimport string\nimport torch.nn.functional as F\n\ntorch.manual_seed(1)\n\ndef sort_batch(data, length):\n data_id = torch.arange(data.size(0), dtype=torch.int)\n\n _, inx = torch.sort(length, descending=True)\n\n data = data[inx]\n data_id = data_id[inx]\n length = length[inx]\n\n return data, data_id, length\n\n\ndef recover_batch(data, data_id):\n _, inx = torch.sort(data_id)\n data = data[inx]\n\n return data\n\n\nclass Network(nn.Module):\n\n def __init__(self, in_channel, char_dim, hidden_size, height, width, channel_size, rel_len, node_len, linear_dim):\n super(Network, self).__init__()\n\n n_letters = len(string.printable)\n self.char_embedding = nn.Embedding(n_letters, char_dim)\n\n num_layers = 1\n biFlag = True\n self.bi_num = 2\n self.rnn = nn.LSTM(\n input_size=char_dim,\n hidden_size=hidden_size,\n num_layers=num_layers,\n bidirectional=biFlag,\n )\n\n self.conv = nn.Conv2d(in_channel, channel_size, (height, width))\n\n self.vec_len = channel_size + hidden_size * self.bi_num\n self.rel_embedding = nn.Embedding(rel_len, self.vec_len)\n self.node_embedding = nn.Embedding(node_len, width)\n \n self.linear = nn.Linear(self.vec_len, linear_dim)\n self.out_linear = nn.Linear(linear_dim, width)\n\n def forward_text(self, data, length):\n data, data_id, length = sort_batch(data, length)\n data = self.char_embedding(data.long())\n\n data = nn.utils.rnn.pack_padded_sequence(data, length, batch_first=True)\n\n out, (h_n, h_c) = self.rnn(data)\n out, length = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)\n\n out = torch.mean(out, 1)\n out = recover_batch(out, data_id)\n\n return out\n\n def forward_once(self, data_text, text_len, data_net, rel_id):\n out_text = self.forward_text(data_text, text_len)\n\n data_net = self.node_embedding(data_net.long())\n out_net = self.conv(data_net)\n out_net = out_net.view(out_net.size(0), -1)\n\n rout = torch.cat([out_text, out_net], 1)\n\n weight = self.rel_embedding(rel_id.long())\n rout = rout * weight\n\n rout = F.relu(self.linear(rout))\n rout = self.out_linear(rout)\n\n return rout\n\n def forward(self, data_text, text_len, data_net, rel_id, pos_id, neg_id):\n out_net = self.forward_once(data_text, text_len, data_net, rel_id)\n \n out_pos = self.node_embedding(pos_id.long())\n out_neg = self.node_embedding(neg_id.long())\n\n return out_net, out_pos, out_neg\n\nclass ContrastiveLoss(torch.nn.Module):\n\n def __init__(self, margin1, margin2):\n super(ContrastiveLoss, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n\n def forward(self, out_self, pos_out, neg_out):\n pdist = nn.PairwiseDistance(p=2)\n pos_dist = pdist(out_self, pos_out)\n neg_dist = pdist(out_self, neg_out)\n loss = torch.mean(torch.clamp(pos_dist - self.margin1, min=0.0) + torch.clamp(self.margin2 - neg_dist, min=0.0))\n\n return loss","repo_name":"CrazilyCode/NodeEmbedding","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8338490357","text":"# Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.\n\nvalores = [[], []]\n\nfor loop in range(1,8):\n n = int(input(f'Digite o {loop} valor: '))\n if(n % 2 == 0):\n valores[0].append(n)\n else:\n valores[1].append(n)\n\nvalores[0].sort()\nvalores[1].sort()\n\nprint('-=-' * 15)\nprint(f'Os numeros pares são: {valores[0]}')\nprint(f'Os numeros impares são: {valores[1]}')","repo_name":"henriquekirchheck/Curso-em-Video-Python","sub_path":"desafio/desafio085.py","file_name":"desafio085.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72471754721","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport inspect\nimport os\nfrom PIL import Image, ImageDraw\nfrom pprint import pprint\nimport sys\n\n# add parent directory to sys path to import relative modules\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0,parentdir)\n\nfrom lib.io_utils import *\nfrom lib.math_utils import *\nfrom lib.text_utils import *\n\n# input\nparser = argparse.ArgumentParser()\naddTextArguments(parser)\na = parser.parse_args()\n\ntprops = getTextProperties(a)\n_, lineHeight, _ = getLineSize(tprops['h3']['font'], 'A')\n\nbaseImage = Image.open('tmp/nara_poster.png')\n\ndef drawFrame(filename, baseImage, text, textAlpha):\n global lineHeight\n\n lines = addTextMeasurements([{\n \"type\": 'h3',\n \"text\": text\n }], tprops)\n width, height = baseImage.size\n x = roundInt(width * 0.05)\n y = height - x - lineHeight\n c = roundInt(textAlpha * 255.0)\n baseImage = linesToImage(lines, filename, width, height, color=\"#ffffff\", bgColor=\"#000000\", x=x, y=y, bgImage=baseImage, alpha=textAlpha, overwrite=True)\n\nmakeDirectories(['output/textAlphaTest/'])\n\nfor i in range(10):\n drawFrame(\"output/textAlphaTest/frame%s.png\" % zeroPad(i+1, 10), baseImage, 'The National Archives of the United States', (i+1)/10.0)\n\nprint('Done.')\n","repo_name":"beefoo/media-tools","sub_path":"tests/textAlpha.py","file_name":"textAlpha.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"17328015620","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom .error import PolicyError\n\n\ndef chained_policy(service, factories, store, prefix=\"\"):\n policy = None\n for factory in reversed(factories):\n policy = factory.instance(service, store, policy, prefix)\n return policy\n\n\ndef silent_check(policy, identity):\n success = True\n try:\n policy.check(identity)\n except PolicyError:\n success = False\n return success\n","repo_name":"mapix/bpolicy","sub_path":"bpolicy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"25431882718","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport argparse\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport itertools as it\nfrom collections import defaultdict\nfrom matplotlib.ticker import FixedLocator\n\n\n# read data and check it is mono, di or tri nucleotide\ndef load_data(data):\n df = pd.read_csv(data, sep='\\t').set_index('Sample')\n features = list(df.columns)\n # check mono, di, or tri\n nbase = len(features[0])\n assert len(features) == 4 ** nbase, f\"The header line of {data.name} is incorrect.\"+\\\n f\"Should be {4**nbase} features. Only found {len(features)} features!\"\n # generate feature labels\n rNMPs = 'ACGU'\n dNMPs = 'ACGT'\n if nbase == 1:\n labels = list(rNMPs)\n elif nbase == 2:\n rNMP_loc = 1 - features.index('CA')//4\n labels = list(it.product(rNMPs, dNMPs))\n if rNMP_loc == 0:\n labels = [x[0] + x[1] for x in labels]\n else:\n labels = [x[1] + x[0] for x in labels]\n elif nbase == 3:\n rNMP_loc = 2 - features.index('GCA')//16\n labels = list(it.product(rNMPs, dNMPs, dNMPs))\n if rNMP_loc == 0:\n labels = [x[0] + x[1] + x[2] for x in labels]\n elif rNMP_loc == 1:\n labels = [x[1] + x[0] + x[2] for x in labels]\n else:\n labels = [x[2] + x[1] + x[0] for x in labels]\n return df, labels\n\n\n# determine the size group from df\ndef determine_group_size(df):\n data = list(df.iloc[0])\n u = len(data) / sum(data)\n if u > 32:\n u = 64\n elif u > 8:\n u = 16\n else:\n u = 4\n return u\n\n\n# add background frequency to label\ndef add_bg_freq(labels, group_size, fr, chrom):\n # get bg freq from background\n freqs = {x.replace('U', 'T'):None for x in labels}\n features = fr.readline().rstrip('\\n').split('\\t')[1:]\n for l in fr:\n ws = l.split('\\t')\n if ws[0] == chrom:\n ws = ws[1:]\n for i in range(len(ws)):\n freqs[features[i]] = float(ws[i])\n break\n assert list(freqs.values())[0] != None, f'Cannot find all background frequencies of {chrom} in background frequency file {fr.name}!'\n labels = [[x, freqs[x.replace('U','T')]] for x in labels]\n # sum to 1\n s = []\n for i in range(len(labels)//group_size):\n unit = sum([x[1] for x in labels[i*group_size:i*group_size+group_size]])\n s += [unit]*group_size\n for j in range(len(labels)):\n if not s[j]:\n labels[j][1] = 'N/A'\n else:\n labels[j][1] /= (s[j]/100)\n labels[j][1] = f'{labels[j][1]:.2f}%'\n labels = [f'{x[1]} {x[0]}' for x in labels]\n return labels\n\n# remove empty rows from df and labels\ndef remove_empty(df, labels):\n id_drop = [\n i for i, x in enumerate(labels) if x.startswith('0.00%') or x.find('N/A') != -1\n ]\n dfn = df.drop(columns=[df.columns[x] for x in id_drop])\n labelsn = [x for i, x in enumerate(labels) if i not in id_drop]\n return dfn, labelsn\n\n# draw heatmaps\ndef draw(df, labels, output, no_annot, palette):\n # size parameters\n cell_height = {1:1, 2:0.6, 3:0.3}\n cell_width = {1:1, 2:0.6, 3:0.6}\n font_size = 0.28\n font_sizes_in_cell = {1:20, 2:12, 3:12}\n\n # get information from df\n nbases = len(df.columns[1])\n samples = list(df.index)\n\n # set figure size\n longest = max([len(x) for x in samples])\n label_width = len(labels[0]) * font_size + 0.2\n sample_height = longest * font_size\n title_height = 0.3\n colorbar_width = 2\n width = len(samples) * cell_width[nbases] + label_width + colorbar_width \n height = len(labels) * cell_height[nbases] + sample_height + title_height\n fig, ax = plt.subplots(figsize=(width, height))\n plt.subplots_adjust(left=label_width/width, right=1-colorbar_width/width, \\\n top=1-title_height/height, bottom=sample_height/height)\n \n # color scale\n group_size = determine_group_size(df)\n if group_size == 4:\n cmax = 0.5\n elif group_size == 16:\n cmax = 0.125\n \n # draw heatmap\n sns.heatmap(df.T, vmin=0, vmax=cmax, ax=ax, annot=(not no_annot), cmap=palette, annot_kws={\"size\":font_sizes_in_cell[nbases]})\n\n # title and axis\n ax.set_xticklabels(samples, rotation='vertical', size=font_size*100)\n ax.set_yticklabels(labels, rotation='horizontal', size=font_size*100)\n ax.set_ylabel('')\n ax.set_xlabel('')\n\n # change top of colorbar to '0.5-1'\n cax = plt.gcf().axes[-1]\n color_labels = cax.get_ymajorticklabels()\n color_labels_texts = [i.get_text() for i in color_labels]\n color_labels_texts[-1] += ' - 1'\n tick_loc = cax.get_yticks().tolist()\n cax.yaxis.set_major_locator(FixedLocator(tick_loc))\n cax.set_yticklabels(color_labels_texts, fontsize=font_size*100)\n\n # show or save\n fig.savefig(output)\n \n\ndef main():\n # argparse\n parser = argparse.ArgumentParser(description='Draw heatmap for rNMP incorporation mono-, di-, or tri-nucleotide data.')\n parser.add_argument('DATA',type=argparse.FileType('r'), help='Normalized rNMP incorporation frequncy')\n parser.add_argument('-o', default='rNMP_heatmap.png', help='Output figure name, default= rNMP_heatmap.png')\n parser.add_argument('-b', type=argparse.FileType('r'), help='Select background file. If a file is selected, the background percentage is added to labels.')\n parser.add_argument('--background_chrom', default='chrM', help='Chromosome name of background file, default = chrM, use with -b')\n parser.add_argument('--no_annot', action='store_true', help='Hide percentage annotation in each cell')\n parser.add_argument('--palette', default='icefire', help='Define the palette used for the heatmap')\n parser.add_argument('--group_size', default=None, choices={4, 16, 64}, help='Set group size FORCELY to 4, 16, or 64')\n parser.add_argument('--remove_empty_row', action='store_true', help='Remove empty rows in heatmap')\n args = parser.parse_args()\n\n\n # set color settings for graph\n sns.set(style='white')\n\n # get data and information\n df, labels = load_data(args.DATA)\n if not args.group_size:\n group_size = determine_group_size(df)\n else:\n group_size = args.group_size\n \n # read background frequency\n if args.b:\n labels = add_bg_freq(labels, group_size, args.b, args.background_chrom)\n\n # remove empty rows\n if args.remove_empty_row:\n df, labels = remove_empty(df, labels)\n\n # draw heatmaps\n draw(df, labels, args.o, args.no_annot, args.palette)\n print(f'Heatmap is saved to {args.o}!')\n\nif __name__ == '__main__':\n main()","repo_name":"xph9876/RibosePreferenceAnalysis","sub_path":"draw_heatmap.py","file_name":"draw_heatmap.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"8190276673","text":"# 사용자로부터 n개의 영단어를 입력받아 그중 하나의 영단어를 문제로 하여 퀴즈를 푸는 게임\n\nfrom random import *\n\ndef quiz2():\n chance = 10 #기회\n set_question = input(\"영단어를 3개 입력하시오 (,로 구분) : \")\n\n numofquestion = set_question.split(\",\")\n\n question = sample(numofquestion, 1) #인풋중 하나만 고르기\n question = question[0] #원소가 하나뿐인 리스트이지만 자료형을 스트링으로 변환하기 위하여\n # print(question)\n\n # 자리수 만큼 밑줄 표시\n for i in range(len(question)):\n print(\"_ \", end=\"\")\n print()\n\n answer_list = [] #문제와 답을 비교하기 위한 리스트\n for j in range(10):\n answer = input(\"알파벳을 입력하시오 ({}번 남음): \".format(10-j))\n answer = str(answer)\n if answer in question:\n answer_list.append(answer)\n print(\"correct\")\n else:\n print(\"wrong\")\n\n\n # print(answer_list)\n for i in range(len(question)):\n if question[i] in answer_list: #문제의 스펠링이 리스트안에 있으면 스펠링 표시\n print(question[i] + \" \", end=\"\")\n else:\n print(\"_ \", end=\"\")\n print()\n\n # 비교를 위한 set으로 변경\n set_question = set(question)\n set_answe_list = set(answer_list)\n\n if set_answe_list == set_question:\n print(\"도전에 성공하셨습니다.\")\n return question\n\n print(\"도전에 실패하셨습니다.\")\n return question\n\n\na = quiz2()","repo_name":"cruzey/Algorithm","sub_path":"quiz2.py","file_name":"quiz2.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40452802499","text":"import numpy as np\nimport base64\nimport json\nimport requests\n\n\ndef np_to_dict(array):\n \"\"\"\n Converts a numpy array to a dict that can be converted to json easily\n :param array: The numpy array\n :return: A dict containing the array in the custom dict format\n \"\"\"\n d = {\n \"size\": array.shape,\n \"array\": base64.encodebytes(array.astype(dtype=np.float32).tobytes()).decode('UTF-8')\n }\n return d\n\n\ndef weights_to_base64(weights):\n \"\"\"\n Converts a Keras array of weights to an array in the custom dict layout\n :param weights:\n :return:\n \"\"\"\n weight_strings = []\n for weight_m in weights:\n d = np_to_dict(weight_m)\n weight_strings.append(d)\n return weight_strings\n\n\ndef np_from_dict(array_dict):\n \"\"\"\n Creates a numpy array from a custom dict type\n :param array_dict:\n :return:\n \"\"\"\n weights_size = array_dict[\"size\"]\n weight_string = array_dict[\"array\"]\n weights = np.frombuffer(base64.decodebytes(weight_string.encode('UTF-8')), dtype=np.float32)\n return np.resize(weights, weights_size)\n\n\ndef base64_to_weights(weight_dicts):\n \"\"\"\n Converts a custom dict array to an array of weights for a Keras model\n :param weight_dicts: The array of weight dicts\n :return: A weight array\n \"\"\"\n weights = []\n for weight_dict in weight_dicts:\n weights.append(np_from_dict(weight_dict))\n return weights\n\n\ndef report_experience(experiences, server_addr=('localhost', 1337)):\n \"\"\"\n Reports the (s, a, r, sp, term) tuples to the server\n :param experiences: A list of experience tuples\n :param server_addr: the server address: a tuple of ip and port\n \"\"\"\n experiences = [\n (\n np_to_dict(s),\n a,\n r,\n np_to_dict(sp),\n term\n )\n for s, a, r, sp, term in experiences\n ]\n exp_json = json.dumps(experiences)\n r = requests.post(\"http://%s:%d/experience\" % server_addr, data={\"experiences\": exp_json})\n print(r.status_code, r.reason)\n\n\ndef get_model(ks, server_addr=('localhost', 1337)):\n \"\"\"\n Gets the model parameters from the server and instantiates the model with those parameters\n :param ks: Keras. The imported Keras library. Keras can't be imported here because that will start a TF session\n :param server_addr: the server address: a tuple of ip and port\n :return: The Keras model\n \"\"\"\n r = requests.get(\"http://%s:%d/model\" % server_addr)\n m_params = r.json()\n return model_from_dict(m_params, ks)\n\n\ndef model_from_dict(m_params, ks):\n \"\"\"\n Loads a model from the custom dict notation.\n :param m_params: Model dict in custom format\n :param ks: Keras (again, due to import problems)\n :return: The Keras model\n \"\"\"\n model = ks.models.model_from_json(json.dumps(m_params['layout']))\n model.set_weights(base64_to_weights(m_params['weights']))\n return model\n\n","repo_name":"Gerryflap/inter_rl","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33137551814","text":"\nimport sys\nfrom collections import defaultdict\nfrom xcore.xscene_schema import XSceneSchema as SCH_DEF\nfrom meta_io.genericxml import GenericXml\nimport json\nPY3 = False\nif sys.version_info.major == 3:\n from base64 import encodebytes as b64encode\n from base64 import decodebytes as b64decode\n PY3 = True\nelse:\n from base64 import encodestring as b64encode\n from base64 import decodestring as b64decode\n \n\nclass xconstant(object):\n \"\"\"\n list of xmltag to map factory to\n \"\"\"\n\n attribute = \"attribute\"\n encoded = \"encoded\"\n decoded = \"decoded\"\n fields = \"%s%sfields\" % (SCH_DEF.SDAT, SCH_DEF.SEP)\n variants = \"%s%svariants\" % (SCH_DEF.SDAT, SCH_DEF.SEP)\n\n########################################################\n# definition class hierachy\n# base\nclass XScene(GenericXml):\n Property = \"XScene\"\n __filter = set()\n _sep = \"/\"\n\n def __init__(self, tag=None):\n super(XScene, self).__init__(tag)\n\n @classmethod\n def set_filter(cls, property_list):\n cls.__filter = set(property_list)\n\n def print_nice(self, max_char=120, tab=\"\"):\n \"\"\"overwrite base class\n Args:\n max_char(int): clip the value of attrib\n tab(string): indent\n \"\"\"\n\n if self.Property in self.__filter:\n super(XScene, self).print_nice(max_char, tab)\n else:\n for ch in self.get_children():\n ch.print_nice(max_char=max_char, tab=tab + \" \" * 4)\n\n def get_name(self):\n if hasattr(self, \"name\"):\n return self.name\n return self.Property\n\n def set_geom_instance_data(self, geom):\n # some custom runtime data\n # not serialysed\n self._geom = geom\n\n def get_geom_instance_data(self):\n # not serialized\n if hasattr(self, \"_geom\"):\n return self._geom\n return None\n\n def get_path(self):\n \"\"\"\n\n Returns:\n path from the root\n \"\"\"\n\n parent = self.get_parent()\n if parent is None:\n return self.get_name()\n pp = parent.get_path()\n if pp != \"\":\n return pp + self._sep + self.get_name()\n else:\n return self.get_name()\n\n def delegate(self, args):\n \"\"\"build a dict with _x args\"\"\"\n d = dict()\n for x in args:\n d.update({\"_@x%s\" % x: args.get(x, \"\")})\n self.__dict__.update(d)\n\n def get_delegate(self):\n \"\"\"build a dict with _x args\"\"\"\n d = dict()\n for x in self.__dict__:\n if x.startswith(\"_@x\"):\n d.update({x[3:]: self.__dict__[x]})\n return d\n\n @classmethod\n def factory(cls, default_classes, log=None, mother_class=None):\n class SceneDefault(XScene):\n log = None\n __doc__ = (\"py representation SceneDefault\",)\n\n if mother_class == None:\n mother_class = XScene\n upmethod = dict()\n class_def = list()\n for classRoot in default_classes:\n upmethod.update(\n {\n \"log\": log,\n \"Property\": classRoot,\n \"__doc__\": \"py representation %s\" % classRoot,\n }\n )\n class_def.append(\n type(\n classRoot + \"Xml\",\n (mother_class,),\n upmethod,\n )\n )\n cls.__filter.update(default_classes)\n named = [x.Property for x in class_def]\n result = dict(zip(named, class_def))\n result.update({\"default\": SceneDefault})\n return result\n\nclass XLayerType(XScene):\n Property = SCH_DEF.Layer_Type_layer.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Layer_Type_layer.tag if tag is None else tag\n super(XLayerType, self).__init__(tag)\n self.filetype = \"\"\n self._fields = dict()\n\n def set_fields(self, adict):\n self._fields.update(adict)\n\n def get_sublayers(self):\n return self.get_children()\n\n##########################################################\nclass XStage(XScene):\n Property = SCH_DEF.Stage_Type.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Stage_Type.tag if tag is None else tag\n super(XStage, self).__init__(tag)\n self.filename = \"\"\n\nclass XStageAsset(XScene):\n Property = SCH_DEF.Stage_Type_asset.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Stage_Type_asset.tag if tag is None else tag\n super(XStageAsset, self).__init__(tag)\n\nclass XStageShot(XScene):\n Property = SCH_DEF.Stage_Type_shot.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Stage_Type_shot.tag if tag is None else tag\n super(XStageShot, self).__init__(tag)\n\nclass XStageSeq(XScene):\n Property = SCH_DEF.Stage_Type_seq.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Stage_Type_seq.tag if tag is None else tag\n super(XStageSeq, self).__init__(tag)\n\nclass XStageLoc(XScene):\n Property = SCH_DEF.Stage_Type_loc.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Stage_Type_loc.tag if tag is None else tag\n super(XStageLoc, self).__init__(tag)\n\n##########################################################\n# USD sublayer\nclass XPayload(XLayerType):\n Property = SCH_DEF.Layer_Type_payload.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Layer_Type_payload.tag if tag is None else tag\n super(XPayload, self).__init__(tag)\n\nclass XReference(XLayerType):\n Property = SCH_DEF.Layer_Type_reference.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Layer_Type_reference.tag if tag is None else tag\n super(XReference, self).__init__(tag)\n\n\nclass XSublayer(XLayerType):\n Property = SCH_DEF.Layer_Type_sublayer.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Layer_Type_sublayer.tag if tag is None else tag\n super(XSublayer, self).__init__(tag)\n\n\nclass XEmpty(XLayerType):\n Property = SCH_DEF.Layer_Type_empty.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Layer_Type_empty.tag if tag is None else tag\n super(XEmpty, self).__init__(tag)\n\n##########################################################\n# sublayer entry type\nclass XRootEntry(XSublayer):\n Property = SCH_DEF.Entry_Type.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Entry_Type.tag if tag is None else tag\n super(XRootEntry, self).__init__(tag)\n\nclass XRootEntryAsset(XRootEntry):\n Property = SCH_DEF.Entry_Type_asset.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Entry_Type_asset.tag if tag is None else tag\n super(XRootEntryAsset, self).__init__(tag)\n self.asset_type = None\n\n def set_asset_type(self, a_type_asset):\n self.asset_type = a_type_asset\n\nclass XRootEntryShot(XRootEntry):\n Property = SCH_DEF.Entry_Type_shot.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Entry_Type_shot.tag if tag is None else tag\n super(XRootEntryShot, self).__init__(tag)\n\nclass XRootEntrySequence(XRootEntry):\n Property = SCH_DEF.Entry_Type_seq.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Entry_Type_seq.tag if tag is None else tag\n super(XRootEntrySequence, self).__init__(tag)\n\nclass XRootEntryLocation(XRootEntry):\n Property = SCH_DEF.Entry_Type_loc.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Entry_Type_loc.tag if tag is None else tag\n super(XRootEntryLocation, self).__init__(tag)\n\n# opinions\nclass XAssetOpinion(XSublayer):\n Property = SCH_DEF.Opinion_asset.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Opinion_asset.tag if tag is None else tag\n super(XAssetOpinion, self).__init__(tag)\n self.f_step = \"[department]\"\n\nclass XAssetOpinionDesc(XAssetOpinion):\n Property = SCH_DEF.Opinion_asset_desc.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Opinion_asset_desc.tag if tag is None else tag\n super(XAssetOpinionDesc, self).__init__(tag)\n\n\nclass XAssetOpinionGeom(XAssetOpinion):\n Property = SCH_DEF.Opinion_asset_geom.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Opinion_asset_geom.tag if tag is None else tag\n super(XAssetOpinionGeom, self).__init__(tag)\n\n\nclass XShotOpinion(XSublayer):\n Property = SCH_DEF.Opinion_shot.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Opinion_shot.tag if tag is None else tag\n super(XShotOpinion, self).__init__(tag)\n\nclass XShotOpinionManifest(XShotOpinion):\n Property = SCH_DEF.Opinion_shot_manifest.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Opinion_shot_manifest.tag if tag is None else tag\n super(XShotOpinionManifest, self).__init__(tag)\n\nclass XShotOpinionGeom(XShotOpinion):\n Property = SCH_DEF.Opinion_shot_geom.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Opinion_shot_geom.tag if tag is None else tag\n super(XShotOpinionGeom, self).__init__(tag)\n\n\n### need more work later for seq and loc\nclass XLayerOther(XSublayer):\n Property = SCH_DEF.Layer_Type_layer_other.tag\n def __init__(self, tag=None):\n tag = SCH_DEF.Layer_Type_layer_other.tag if tag is None else tag\n super(XLayerOther, self).__init__(tag)\n\n\nclass InstanceAttrib(XScene):\n \"\"\"Basic stupid Attribute class, value are string with type named\n support only str, float and int for now\n \"\"\"\n\n Property = \"ATTRIBUTE\"\n __maptype = {\n \"int\": \"int\",\n \"str\": \"string\",\n \"unicode\": \"string\",\n \"float\": \"float\",\n \"bool\": \"bool\",\n }\n\n def __init__(self, tag=xconstant.attribute):\n super(InstanceAttrib, self).__init__(tag)\n self.type = \"string\"\n self.value = \"\"\n self.name = \"\"\n\n def get_data():\n return self\n\n def before_to_xml(self):\n # this is a method that can be overwriten to check valid and format for xml\n pass\n\n\n def do_encode_value(self, value):\n # the test for this encoding and xml is not done TODO.\n \n if PY3:\n self.value = b64encode(bytes(json.dumps(value), \"UTF-8\")).decode('UTF-8')\n else:\n self.value = b64encode(json.dumps(value))\n\n self.type = xconstant.encoded\n return self.value\n\n def decode_value(self):\n if PY3:\n self.value = json.loads(b64decode(bytes(self.value, \"UTF-8\")).decode('UTF-8'))\n else:\n self.value = json.loads(b64decode(self.value))\n self.type = xconstant.decoded\n return self.value\n\n def set_data_with_type(self, args):\n try:\n name = args.pop(\"name\")\n value = args.pop(\"value\")\n except Exception as e:\n raise Exception(\"FOR NOW: {}\".format(e))\n\n self.name = name\n if isinstance(value, dict):\n self.do_encode_value(value)\n else:\n\n self.type = self.__maptype[value.__class__.__name__]\n self.value = str(value)\n\nclass FieldsInstance(InstanceAttrib):\n Property = \"fields\"\n def __init__(self, tag=None):\n if tag is None:\n tag = xconstant.fields\n super(FieldsInstance, self).__init__(tag)\n self.__lookup = dict()\n self.key = \"\"\n \n def set_key(self, value):\n self.key = value\n\nclass VariantsInstance(InstanceAttrib):\n Property = \"variants\"\n def __init__(self, tag=None):\n if tag is None:\n tag = xconstant.variants\n super(VariantsInstance, self).__init__(tag)\n self.__lookup = dict()\n\n\n# final gather\n_XGenerator = {\n SCH_DEF.Layer_Type_layer.tag : XLayerType,\n SCH_DEF.Stage_Type.tag : XStage,\n SCH_DEF.Stage_Type_asset.tag : XStageAsset,\n SCH_DEF.Stage_Type_shot.tag : XStageShot,\n SCH_DEF.Stage_Type_seq.tag : XStageSeq,\n SCH_DEF.Stage_Type_loc.tag : XStageLoc,\n SCH_DEF.Layer_Type_payload.tag : XPayload,\n SCH_DEF.Layer_Type_reference.tag : XReference,\n SCH_DEF.Layer_Type_sublayer.tag : XSublayer,\n SCH_DEF.Layer_Type_empty.tag : XEmpty,\n SCH_DEF.Entry_Type.tag : XRootEntry,\n SCH_DEF.Entry_Type_asset.tag : XRootEntryAsset,\n SCH_DEF.Entry_Type_shot.tag : XRootEntryShot,\n SCH_DEF.Entry_Type_seq.tag : XRootEntrySequence,\n SCH_DEF.Entry_Type_loc.tag : XRootEntryLocation,\n SCH_DEF.Opinion_asset.tag : XAssetOpinion,\n SCH_DEF.Opinion_asset_desc.tag : XAssetOpinionDesc,\n SCH_DEF.Opinion_asset_geom.tag : XAssetOpinionGeom,\n SCH_DEF.Opinion_shot.tag : XShotOpinion,\n SCH_DEF.Opinion_shot_manifest.tag : XShotOpinionManifest,\n SCH_DEF.Opinion_shot_geom.tag : XShotOpinionGeom,\n SCH_DEF.Layer_Type_layer_other.tag : XLayerOther,\n\n xconstant.variants : VariantsInstance,\n xconstant.fields : FieldsInstance,\n xconstant.attribute: InstanceAttrib\n}\n","repo_name":"erictexier/usdsimple","sub_path":"python/xcore/xscene.py","file_name":"xscene.py","file_ext":"py","file_size_in_byte":12746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71580197603","text":"import os\nimport subprocess\n\nimport pytest\n\nfrom app.synspec import wrapper\n\n\ndef test_synspecwrapper_remove_spectrum(mocker):\n syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4600)\n\n mocker.patch(\"os.remove\")\n\n syn._remove_spectrum()\n\n os.remove.assert_called_once()\n\n\ndef test_synspecwrapper_no_spectrum():\n syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4401)\n\n with pytest.raises(wrapper.NoSpectrumError):\n syn.spectrum\n\n\ndef test_synspecwrapper_spectrum(mocker):\n syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4401)\n\n mock_spectrum_file = \" 4400.000 3.508E+07\\n 4400.010 3.507E+07\\n\"\n test_spectrum = [\n {\"wavelength\": 4400, \"flux\": 35080000},\n {\"wavelength\": 4400.01, \"flux\": 35070000},\n ]\n\n mocker.patch(\"builtins.open\", mocker.mock_open(read_data=mock_spectrum_file))\n returned_spectrum = syn.spectrum\n\n assert returned_spectrum == test_spectrum # nosec\n\n\ndef test_synspecwrapper_calculate_spectrum(mocker):\n syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=4400, wend=4401)\n\n mocker.patch(\"subprocess.call\")\n syn.calculate_spectrum()\n\n subprocess.call.assert_called_once()\n\n\ndef test_synspec():\n wstart, wend = 4000, 5000\n syn = wrapper.SynspecWrapper(teff=20000, logg=4, wstart=wstart, wend=wend)\n\n syn.calculate_spectrum()\n\n assert syn.spectrum[0][\"wavelength\"] == pytest.approx(wstart) # nosec\n assert syn.spectrum[-1][\"wavelength\"] == pytest.approx(wend) # nosec\n","repo_name":"gabraganca/quickstar","sub_path":"src/worker/tests/test_synspec.py","file_name":"test_synspec.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22238134486","text":"with open('output.txt', 'w') as file:\n for i in range(312400001, 312401632):\n\n #2021 /pupilstwoyear/21*******\n #url = f''\n\n #2022 /pupilslastyear/22*******\n #url = f''\n\n url = f''\n\n file.write(url + '\\n')\n print(f\"Wrote URL for {i}.jpg\")\n\n","repo_name":"Munkhbadral1/P0rtr4it","sub_path":"EYSH/mm.py","file_name":"mm.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15766903711","text":"#!/usr/bin/env python\n\nimport unittest\nimport subprocess\n\nimport citadel.nodes.language\n\n\nclass Language(unittest.TestCase):\n\n def test_output_string(self):\n yml = {}\n yml = 'xcode8.1'\n language = citadel.nodes.language.Language(yml, ['build'])\n self.assertTrue(citadel.tools.bash_syntax('\\n'.join(language.output)))\n self.assertEqual(len(language.errors), 0)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"grilo/citadel","sub_path":"tests/test_language.py","file_name":"test_language.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"3148033075","text":"import numpy as np\nimport scipy as sp\nfrom scipy.sparse import csr_matrix\n\nimport mfem.ser as mfem\nfrom mfem.ser import intArray\n\nclass mfem_adv_diff:\n ''' \n MFEM testbed matrix interface\n \n Attributes\n ----------\n \n\n Methods\n -------\n \n '''\n\n \n def __new__(self, prob_refinement, **kwargs):\n '''\n Return a PyMFEM discretization\n\n Input\n -----\n prob_refinement : refinement of problem (0, 1, 2, ...) to generate\n \n kwargs : dictionary of MFEM parameters, must contain the following keys\n \n 'gamma' : float \n diffusion constant\n \n 'meshfile' : string\n filename for mesh\n \n 'order' : int\n finite-element order\n \n 'problem_type' : string, \n default is 'constant_adv', but can be one of\n 'div_free_recirc', 'clockwise_rotation', 'constant_adv', 'diffusion_only'\n \n Output\n ------\n Dictionary with the following keys\n \n A : csr matrix\n sparse discretization matrix\n \n b : array\n right-hand-side\n \n B : array\n near null space modes\n \n vertices : array\n spatial vertices for each dof \n \n docstring : string\n describes discretization and problem parameters\n '''\n \n ref_levels = prob_refinement + 2\n \n ##\n # Retrieve discretization paramters\n try:\n gamma = kwargs ['gamma']\n meshfile = kwargs['meshfile']\n order = kwargs['order']\n except:\n raise ValueError(\"Incorrect kwargs for pymfem generator\") \n \n ##\n # Retrieve problem type\n try:\n problem_discr = kwargs['problem_type']\n except:\n problem_discr = 'constant_adv'\n #\n print(f\"Using problem type {problem_discr}\")\n \n \n ##\n # Generate spatial discretization matrix\n if problem_discr == \"constant_adv\":\n mfem_problem = 0\n docstring = \"constant advection speed over the domain, b(x,y) = [sqrt(2/3), sqrt(1/3)]\"\n \n elif problem_discr == \"clockwise_rotation\":\n mfem_problem = 1\n docstring = \"clockwise advection rotation around the origin, b(x,y) = [pi/2 y, -pi/2 x]\"\n \n elif problem_discr == \"div_free_recirc\":\n mfem_problem = 2\n docstring = \"div-free recirculating advection, b(x,y)= [x(1-x)(2y-1), -(2x-1)(1-y)y]\"\n \n elif problem_discr == \"diffusion_only\":\n mfem_problem = 3\n docstring = \"no advection (diffusion only)\"\n else:\n raise AssertionError(f\"Invalid problem description {problem_discr}\")\n \n ##\n # Generate matrices and near nullspace modes\n A, g, vertices, M, h_min, h_max = self.mfem_discretization(self, mfem_problem, gamma, meshfile, ref_levels, order)\n b = sp.rand(A.shape[0],1)\n B = np.ones_like(b)\n \n ##\n # Filter small entries in A\n max_A = np.max(np.abs(A.data))\n A.data[ np.abs(A.data)/max_A < 1e-14 ] = 0.0\n A.eliminate_zeros()\n \n \n docstring = f\"DG advection diffusion using mesh file {meshfile}, FEM order {order}, diffusion constant {gamma}, and problem type {problem_discr} with \" + docstring\n \n return {'A':A, 'b':b, 'B':B, 'vertices':vertices, 'docstring':docstring}\n \n\n def ComputeMeshSize(self, mesh):\n \n '''\n Description\n ------------\n This function computes the maximum and minimum mesh sizes for a mesh\n \n Input\n ------\n mesh: Finite element discretization mesh\n \n Output\n ------\n h_min: Minimum mesh size\n \n h_max: Maximum mesh size\n \n '''\n \n h_max = -1.0\n h_min = -1.0\n NumOfElements = mesh.GetNE()\n \n for i in range(NumOfElements):\n h = mesh.GetElementSize(i)\n if (i == 0):\n h_min = h_max = h\n else:\n if (h < h_min): h_min = h\n if (h > h_max): h_max = h\n \n return (h_min, h_max)\n\n \n def mfem_discretization(self, problem, gamma, meshfile, ref_levels, order):\n \n '''\n Description\n ------------\n This module uses MFEM to define a simple finite element discretization\n and generates a finite element stiffness matrix K as well as mass matrix M\n with a simple uniform mesh refinement.\n \n Input\n -----\n problem : parameter to define problem type for various type orientation in advection\n gamma : User supplied diffusion coefficient\n meshfile : Identifier for finite element mesh file (.mesh format)\n ref_levels : Mesh refinement level \n order : order of the finite element space\n \n \n Output\n ------\n NumOfElements : Number of elements in finite element discretization mesh\n K : Finite element stiffness matrix (explicit)\n M : Finite element mass matrix (implicit)\n g : Initial condition\n \n Parameter choices for an example run of this module\n ----------------------------------------------------\n \n problem = 0\n ref_levels = 1\n order = 1\n '''\n \n \n # 1. Read the mesh from the given mesh file. We can handle geometrically\n # periodic meshes in this code.\n mesh = mfem.Mesh(meshfile, 1,1)\n dim = mesh.Dimension()\n sdim = mesh.SpaceDimension()\n \n \n # 2. Refine the mesh to increase the resolution. In this example we do\n # 'ref_levels' of uniform refinement, where 'ref_levels' is a\n # input parameter. If the mesh is of NURBS type, we convert it\n # to a (piecewise-polynomial) high-order mesh.\n for lev in range(ref_levels):\n mesh.UniformRefinement();\n if mesh.NURBSext:\n mesh.SetCurvature(max(order, 1))\n \n bb_min, bb_max = mesh.GetBoundingBox(max(order, 1));\n (h_min, h_max) = self.ComputeMeshSize(self, mesh)\n \n \n # 3. Begin constructing stiffness matrix (K) for advection \n # Define coefficient using VectorPyCoefficient and PyCoefficient\n class velocity_coeff(mfem.VectorPyCoefficient):\n def EvalValue(self, x): \n dim = len(x)\n \n center = (bb_min + bb_max)/2.0\n # map to the reference [-1,1] domain \n X = 2 * (x - center) / (bb_max - bb_min)\n if problem == 0:\n # Constant advection \n if dim == 1: v = [1.0,]\n elif dim == 2: v = [np.sqrt(2./3.), np.sqrt(1./3)]\n elif dim == 3: v = [np.sqrt(3./6.), np.sqrt(2./6), np.sqrt(1./6.)]\n elif problem == 1:\n # Clockwise rotation in 2D around the origin \n w = pi/2\n if dim == 1: v = [1.0,]\n elif dim == 2: v = [w*X[1], - w*X[0]]\n elif dim == 3: v = [w*X[1], - w*X[0], 0]\n elif (problem == 2):\n # Div-Free recirculation\n if dim == 1: v = [1.0,]\n elif dim == 2: v=[ X[0]*(1-X[0])*(2*X[1]-1), -(2*X[0]-1)*(1-X[1])*X[1] ]\n elif dim == 3:\n print(\"div-free recirc does not support 3D, using constant advection instead\")\n v = [np.sqrt(3./6.), np.sqrt(2./6), np.sqrt(1./6.)]\n elif (problem == 3):\n # Pure diffusion\n if dim == 1: v = [0.0,]\n elif dim == 2: v = [0.0, 0.0]\n elif dim == 3: v = [0.0, 0.0, 0.0]\n \n return v\n \n class rhs_coeff(mfem.PyCoefficient):\n def EvalValue(self, x): \n dim = len(x)\n center = (bb_min + bb_max)/2.0\n # map to the reference [-1,1] domain \n X = 2 * (x - center) / (bb_max - bb_min)\n return np.sin(np.pi * X[0]) * np.sin(np.pi * X[1])\n \n \n # 4. Inflow boundary condition (zero for the problems considered in this code)\n class inflow_coeff(mfem.PyCoefficient):\n def EvalValue(self, x):\n return 0\n \n \n # 5. Define the discontinuous DG finite element space of the given\n # polynomial order on the refined mesh.\n fec = mfem.DG_FECollection(order, dim)\n fes = mfem.FiniteElementSpace(mesh, fec)\n \n \n # 6. Set up and assemble the bilinear and linear forms corresponding to the\n # DG discretization for Advection. The DGTraceIntegrator involves integrals\n # over mesh interior faces.\n velocity = velocity_coeff(dim)\n inflow = inflow_coeff()\n m = mfem.BilinearForm(fes)\n m.AddDomainIntegrator(mfem.MassIntegrator())\n k = mfem.BilinearForm(fes)\n k.AddDomainIntegrator(mfem.ConvectionIntegrator(velocity, -1.0))\n k.AddInteriorFaceIntegrator(\n mfem.TransposeIntegrator(mfem.DGTraceIntegrator(velocity, 1.0, -0.5)))\n k.AddBdrFaceIntegrator(\n mfem.TransposeIntegrator(mfem.DGTraceIntegrator(velocity, 1.0, -0.5)))\n \n m.Assemble()\n m.Finalize()\n skip_zeros = 0\n k.Assemble(skip_zeros)\n k.Finalize(skip_zeros)\n \n # Computes RHS inflow conditions (not used here)\n #b = mfem.LinearForm(fes)\n #b.AddBdrFaceIntegrator(\n # mfem.BoundaryFlowIntegrator(inflow, velocity, -1.0, -0.5)) \n #b.Assemble()\n \n \n # 7. Generate Scipy Sparse CSR stiffness matrix for advection and RHS vector\n Kmfem = k.SpMat()\n Mmfem = m.SpMat()\n K = csr_matrix( (Kmfem.GetDataArray().copy(), Kmfem.GetJArray().copy(), Kmfem.GetIArray().copy()) ) \n M = csr_matrix( (Mmfem.GetDataArray().copy(), Mmfem.GetJArray().copy(), Mmfem.GetIArray().copy()) ) \n rhs_fcn = rhs_coeff()\n g = mfem.GridFunction(fes)\n g.ProjectCoefficient(rhs_fcn)\n g = g.GetDataArray().copy()\n \n \n # 8. IP method for Diffusion operator\n #\n # Set up the bilinear form a(.,.) on the finite element space\n # corresponding to the Laplacian operator -Delta, by adding the Diffusion\n # domain integrator and the interior and boundary DG face integrators.\n # Note that boundary conditions are imposed weakly in the form, so there\n # is no need for dof elimination. After assembly and finalizing we\n # extract the corresponding sparse matrix Dmfem.\n sigma = -1.0\n kappa = (order+1)**2.\n \n if gamma == 0:\n D = csr_matrix( ([], ([],[])), (K.shape[0],K.shape[1]) )\n else:\n \n # Not currently used\n # Could try and set a nonzero Dirichlet boundary (to be added to RHS) with dbcCoef\n # and ProjectCoefficient, like is done above for g.\n #dbc_val=0.0\n #dbcCoef = mfem.ConstantCoefficient(dbc_val) \n \n # Define weak Dirichlet boundaries over whole Domain for diffusion\n # Create \"marker arrays\" to define the portions of boundary associated\n # with each type of boundary condition. These arrays have an entry\n # corresponding to each boundary attribute. Placing a '1' in entry i\n # marks attribute i+1 as being active, '0' is inactive.\n dbc_bdr = mfem.intArray(mesh.bdr_attributes.Max()) \n dbc_bdr.Assign(0)\n dbc_bdr[1] = 1\n \n gamma_coeff = mfem.ConstantCoefficient(gamma)\n a = mfem.BilinearForm(fes)\n a.AddDomainIntegrator(mfem.DiffusionIntegrator(gamma_coeff))\n a.AddInteriorFaceIntegrator(mfem.DGDiffusionIntegrator(gamma_coeff, sigma, kappa))\n a.AddBdrFaceIntegrator(mfem.DGDiffusionIntegrator(gamma_coeff, sigma, kappa),dbc_bdr)\n a.Assemble()\n a.Finalize()\n Dmfem = a.SpMat()\n D = csr_matrix( (Dmfem.GetDataArray().copy(), Dmfem.GetJArray().copy(), Dmfem.GetIArray().copy()) )\n \n \n # 9. Eliminate zeros from all the matrices\n M.eliminate_zeros()\n K.eliminate_zeros()\n D.eliminate_zeros()\n \n \n # 10. Compile vector of vertices\n vertices = np.zeros((fes.GetTrueVSize(), sdim))\n R = fes.GetConformingRestriction()\n if R is not None: \n VDof2TDof = np.zeros(fes.GetNDofs(), dtype=int)\n for i, j in enumerate(R.GetJArray()):\n VDof2TDof[j] = i\n TDof2Vdof = R.GetJArray().copy()\n else:\n VDof2TDof = None\n TDof2VDof = None\n \n for j in range(fes.GetNE()):\n el = fes.GetFE(j)\n tr = fes.GetElementTransformation(j)\n vdofs = fes.GetElementVDofs(j)\n \n tdofs= vdofs if VDof2TDof is None else [VDof2TDof[k] for k in vdofs]\n \n ir = el.GetNodes()\n for k, tdof in enumerate(tdofs):\n vertices[tdof] = tr.Transform(ir.IntPoint(k))\n ##\n NumOfElements = mesh.GetNE()\n \n \n ##\n # This will print the mesh to file\n #mesh.Print('refined.mesh', 8)\n \n return (D+K, g, vertices, M, h_min, h_max)\n","repo_name":"pyamg/pyamg-testbed","sub_path":"pyamg/testbed/mfem_adv_diff/mfem_adv_diff.py","file_name":"mfem_adv_diff.py","file_ext":"py","file_size_in_byte":13902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13832551736","text":"# file: main.py\n# description: Render simple text using different saturation levels\nimport badger2040\nimport machine\nimport time\ndisplay = badger2040.Badger2040()\n# normal update speed is used to fully clear and show a fully saturated part of the phrase\ndisplay.update_speed(badger2040.UPDATE_NORMAL)\ndisplay.pen(15)\ndisplay.clear()\ndisplay.pen(0)\nHALF_H = int(badger2040.HEIGHT/2)\ndisplay.text(\"Hello\", 0, HALF_H, rotation=0)\ndisplay.update()\n# turbo update speed is used to refill the phrase quickly...each press shows the next level (16?) of saturation\ndisplay.update_speed(badger2040.UPDATE_TURBO)\nrunning = True\nwhile running:\n # handle button functionality\n if display.pressed(badger2040.BUTTON_A):\n running = False\n if display.pressed(badger2040.BUTTON_B):\n # render the full phrase \n display.text(\"Hello World\", 0, HALF_H, rotation=0)\n display.update()\n # limit the sampling rate to ten per second\n time.sleep(.1)\n\nmachine.reset()\n# \n\n","repo_name":"jasoneplumb/droids","sub_path":"droid00d/hello-world/image/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"43790519527","text":"from flask import render_template, redirect, url_for, request, flash\nfrom . import bp as app\nfrom app.blueprints.main.models import Pokemon\nfrom flask_login import login_required, current_user\nimport requests\nfrom app import db\n\nmy_request = requests.get('https://pokeapi.co/api/v2/pokemon/pikachu')\njson_data = my_request.json()\n\n@app.route(\"/\")\ndef home():\n if not current_user.is_authenticated:\n return redirect(url_for('auth.login'))\n pokemon = Pokemon.query.all()\n\n pokemon.sort(key=lambda post: post.date_created, reverse=True)\n\n print(pokemon)\n\n context = {\n \"pokemon\": pokemon,\n }\n\n\n@app.route(\"/usercollection\")\ndef usercollection():\n if not current_user.is_authenticated:\n return redirect(url_for('auth.login'))\n return render_template('usercollection.html')","repo_name":"MaddyBoudreau/pokemon-api-flask-reupload","sub_path":"app/blueprints/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27752174496","text":"# datastore transactions of CartsView\n\nfrom datetime import datetime, date\nimport logging\nimport os\nimport sys\n\nfrom pandas import read_sql\n\nfrom data.datastore import (\n session_scope,\n Audn,\n Branch,\n Cart,\n Fund,\n Order,\n Lang,\n Library,\n MatType,\n Resource,\n ShelfCode,\n User,\n Vendor,\n)\nfrom data.datastore_worker import (\n count_records,\n get_cart_data_view_records,\n insert,\n retrieve_record,\n retrieve_records,\n retrieve_cart_details_view_stmn,\n update_record,\n retrieve_first_record,\n retrieve_last_record_filtered,\n)\nfrom errors import BabelError\nfrom logging_settings import format_traceback\nfrom gui.utils import get_id_from_index\nfrom ingest.sierra_exports import get_sierra_ids\nfrom marc.marc21 import make_bib\n\n\nmlogger = logging.getLogger(\"babel\")\n\n\ndef get_carts_data(system_id, user=\"All users\", status=\"\"):\n data = []\n\n try:\n with session_scope() as session:\n recs = get_cart_data_view_records(session, system_id, user, status)\n for r in recs:\n data.append(\n [\n r.cart_id,\n r.cart_name,\n f\"{r.cart_date:%y-%m-%d %H:%M}\",\n r.cart_status,\n r.cart_owner,\n r.linked,\n ]\n )\n return data\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\"Unhandled error on cart data retrieval.\" f\"Traceback: {tb}\")\n raise BabelError(exc)\n\n\ndef export_orders_to_marc_file(fh, cart_rec, progbar):\n # this has to be rewritten to make it more transparent\n # and easier to maintain\n\n try:\n progbar[\"value\"] = 0\n\n # overwrite existing files\n if os.path.isfile(fh):\n try:\n os.remove(fh)\n except WindowsError as e:\n raise BabelError(f\"File in use. Error: {e}\")\n\n with session_scope() as session:\n rec_count = count_records(session, Order, cart_id=cart_rec.did)\n progbar[\"maximum\"] = rec_count\n\n selector = retrieve_record(session, User, did=cart_rec.user_id)\n blanketPO = cart_rec.blanketPO\n # determine some global values\n if cart_rec.system_id == 1:\n oclc_code = \"BKL\"\n selector_code = selector.bpl_code\n\n elif cart_rec.system_id == 2:\n oclc_code = \"NYP\"\n selector_code = selector.nyp_code\n\n lib_rec = retrieve_record(session, Library, did=cart_rec.library_id)\n library_code = lib_rec.code\n\n ord_recs = retrieve_records(session, Order, cart_id=cart_rec.did)\n\n for order in ord_recs:\n mat_rec = retrieve_record(session, MatType, did=order.matType_id)\n ven_rec = retrieve_record(session, Vendor, did=order.vendor_id)\n\n if cart_rec.system_id == 1:\n order.mat_bib = mat_rec.bpl_bib_code\n order.mat_ord = mat_rec.bpl_ord_code\n order.vendor = ven_rec.bpl_code\n elif cart_rec.system_id == 2:\n order.mat_bib = mat_rec.nyp_bib_code\n order.mat_ord = mat_rec.nyp_ord_code\n order.vendor = ven_rec.nyp_code\n\n # retrieve joined values\n rec = retrieve_record(session, Audn, did=order.audn_id)\n order.audn = rec.code\n rec = retrieve_record(session, Lang, did=order.lang_id)\n order.lang = rec.code\n\n copies = 0\n locs = []\n funds = []\n for loc in order.locations:\n rec = retrieve_record(session, Branch, did=loc.branch_id)\n branch = rec.code\n try:\n rec = retrieve_record(session, ShelfCode, did=loc.shelfcode_id)\n shelfcode = rec.code\n shelf_with_audn = rec.includes_audn\n except AttributeError:\n shelfcode = \"\"\n shelf_with_audn = False\n try:\n rec = retrieve_record(session, Fund, did=loc.fund_id)\n fund = rec.code\n except AttributeError:\n fund = \"\"\n copies += loc.qty\n\n if shelf_with_audn:\n loc_str = f\"{branch}{order.audn}{shelfcode}/{loc.qty}\"\n else:\n if shelfcode is None:\n loc_str = f\"{branch}/{loc.qty}\"\n else:\n loc_str = f\"{branch}{shelfcode}/{loc.qty}\"\n locs.append(loc_str)\n\n fund_str = f\"{fund}/{loc.qty}\"\n funds.append(fund_str)\n\n order.copies = str(copies)\n order.locs = \",\".join(locs)\n order.funds = \",\".join(funds)\n order.order_date = datetime.strftime(date.today(), \"%m-%d-%Y\")\n\n make_bib(fh, oclc_code, library_code, blanketPO, selector_code, order)\n progbar[\"value\"] += 1\n progbar.update()\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\"Unhandled error on saving to MARC.\" f\"Traceback: {tb}\")\n raise BabelError(exc)\n\n\ndef get_cart_details_as_dataframe(cart_id):\n with session_scope() as session:\n stmn = retrieve_cart_details_view_stmn(cart_id)\n df = read_sql(stmn, session.bind)\n return df\n\n\ndef get_cart_data_for_order_sheet(cart_id):\n try:\n data_set = []\n with session_scope() as session:\n cart_rec = retrieve_record(session, Cart, did=cart_id)\n order_recs = retrieve_records(session, Order, cart_id=cart_id)\n for rec in order_recs:\n data = []\n data.append(rec.resource.other_no)\n data.append(rec.resource.isbn)\n data.append(rec.resource.title)\n data.append(rec.resource.author)\n total_cost = 0\n total_qty = 0\n for loc in rec.locations:\n total_cost += loc.qty * rec.resource.price_disc\n total_qty += loc.qty\n data.append(f\"{rec.resource.price_disc:.2f}\")\n data.append(total_qty)\n data.append(total_cost)\n data.append(rec.oid)\n data.append(cart_rec.blanketPO)\n data_set.append(data)\n session.expunge_all()\n\n return data_set\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\"Unhandled error cart data retrieval.\" f\"Traceback: {tb}\")\n raise BabelError(exc)\n\n\ndef create_cart_copy(cart_id, system, user, profile_idx, cart_name, status):\n \"\"\"\n Creates a copy of a cart\n args:\n cart_id: int, datastore cart did\n system: str, NYPL or BPL\n user: str, profile/user name\n profile_idx: dict, dictionary of user_id (key) and names\n cart_name: str, new cart name\n status: tkinter StringVar\n \"\"\"\n valid = True\n if not cart_id:\n valid = False\n status.set(\"Invalid cart id\")\n elif not system:\n valid = False\n status.set(\"Failed. Missing system parameter.\")\n elif not user:\n valid = False\n status.set(\"Failed. Missing profile prameter.\")\n elif not cart_name:\n valid = False\n status.set(\"Failed. Missing new cart name.\")\n\n try:\n with session_scope() as session:\n if cart_id and system and user and cart_name:\n # verify name/user not used:\n if system == \"BPL\":\n system_id = 1\n elif system == \"NYPL\":\n system_id = 2\n\n rec = retrieve_record(\n session,\n Cart,\n system_id=system_id,\n user_id=get_id_from_index(user, profile_idx),\n name=cart_name,\n )\n if rec:\n valid = False\n status.set(\n \"Failed. A cart with the same name\"\n \"already exists.\\nPlease change the name.\"\n )\n if valid:\n # create copy of the original cart\n old_orders = retrieve_records(session, Order, cart_id=cart_id)\n\n new_orders = []\n for order in old_orders:\n\n resource = Resource(\n title=order.resource.title,\n add_title=order.resource.add_title,\n author=order.resource.author,\n series=order.resource.series,\n publisher=order.resource.publisher,\n pub_place=order.resource.pub_place,\n summary=order.resource.summary,\n isbn=order.resource.isbn,\n upc=order.resource.upc,\n other_no=order.resource.other_no,\n price_list=order.resource.price_list,\n price_disc=order.resource.price_disc,\n desc_url=order.resource.desc_url,\n misc=order.resource.misc,\n )\n\n new_orders.append(\n Order(\n lang_id=order.lang_id,\n audn_id=order.audn_id,\n vendor_id=order.vendor_id,\n matType_id=order.matType_id,\n poPerLine=order.poPerLine,\n note=order.note,\n comment=order.comment,\n resource=resource,\n )\n )\n\n insert(\n session,\n Cart,\n name=cart_name,\n user_id=get_id_from_index(user, profile_idx),\n system_id=system_id,\n orders=new_orders,\n )\n\n status.set(\"Cart copied successfully.\")\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\"Unhandled error on cart copy.\" f\"Traceback: {tb}\")\n raise BabelError(exc)\n\n\ndef determine_carts_linked(session, cart_ids):\n \"\"\"\n Determines if all orders in relevant cart have corresponding\n Sierra order and bib number and updates cart linked status\n args:\n cart_ids: list of cart_ids that had their orders updated with oid\n \"\"\"\n\n mlogger.debug(\"Updating carts linked status.\")\n\n # determine time period when wlos were assigned\n for cart_id in cart_ids:\n cart_rec = retrieve_record(session, Cart, did=cart_id)\n # check if all orders have oid\n if cart_rec:\n linked = True\n for o in cart_rec.orders:\n if o.oid is None:\n mlogger.debug(f\"Order did={o.did} missing oid.\")\n linked = False\n\n if linked:\n mlogger.debug(f\"Cart {cart_rec.name} (did={cart_rec.did}) linked.\")\n update_record(session, Cart, cart_rec.did, linked=True)\n else:\n mlogger.debug(f\"Cart {cart_rec.name} (did={cart_rec.did}) not linked.\")\n else:\n mlogger.debug(f\"Cart with did={cart_id} not linked (missing record).\")\n\n\ndef add_sierra_ids_to_orders(source_fh, system_id):\n mlogger.debug(f\"Linking IDs initiated system_id-{system_id}.\")\n sids = get_sierra_ids(source_fh, system_id)\n try:\n unique_carts = set()\n with session_scope() as session:\n for sid in sids:\n wlo, oid, bid = sid\n ord_rec = retrieve_record(session, Order, wlo=wlo)\n if ord_rec:\n # record affected cart_id\n unique_carts.add(ord_rec.cart_id)\n # update ord rec\n update_record(session, Order, ord_rec.did, oid=oid, bid=bid)\n mlogger.debug(\n f\"Record updated: order_id={ord_rec.did}, \"\n f\"wlo={wlo}, oid={oid}, bid={bid}\"\n )\n\n session.flush()\n # check which carts are linked\n determine_carts_linked(session, unique_carts)\n\n mlogger.debug(\"Linking completed.\")\n\n except Exception as exc:\n _, _, exc_traceback = sys.exc_info()\n tb = format_traceback(exc, exc_traceback)\n mlogger.error(\"Unhandled error on linking IDs.\" f\"Traceback: {tb}\")\n raise BabelError(exc)\n\n\ndef get_cart_id_ranges(cart_id):\n with session_scope() as session:\n first_ord = retrieve_first_record(session, Order, cart_id=cart_id)\n last_ord = retrieve_last_record_filtered(session, Order, cart_id=cart_id)\n\n return ((first_ord.wlo, last_ord.wlo), (first_ord.oid, last_ord.oid))\n","repo_name":"BookOps-CAT/babel","sub_path":"babel/data/transactions_carts.py","file_name":"transactions_carts.py","file_ext":"py","file_size_in_byte":13518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21449380820","text":"\"\"\"\nBruce Pucci\nData Scientist, Progressive Insurance\n\nPython 3.5.1 :: Anaconda 2.4.0 (64-bit)\n\n\n\n############## Quick Explanation ####################\n\nLets simplify the problem to find the largest sub-array of length k \n (where k > 1) in a larger array (m).\n\nSuppose m = [0 1 2 3 4 5 6 7 8 9]\n\nIf we cumsum along m we get [0 1 3 6 10 15 21 28 36 45].\n\nWe can find the largest sub-array by taking the differences of the values k indices apart.\n\nSuppose k = 2\n\nm_rght_offset = [0 1 3 6 10 15 21 28 36 45 0 0]\nm_left_offset = [0 0 0 1 3 6 10 15 21 28 36 45]\n\nargmax(m_rght_offset - m_left_offset) = 9\nSo largest sub array is m[8:10]\n\nThis solution is similar but in 2 dimensions.\n\n####################################################\n\n\n\n\n\n\nBoilerplate code for your convenience. Feel free to modify as you want.\n\nTesting your implemention:\n cat inputs/input0.txt | python solution.py\n cat inputs/input1.txt | python solution.py\n cat inputs/input2.txt | python solution.py\n cat inputs/input3.txt | python solution.py\n\n Check if your output matches the corresponding outputs in 'outputs' folder.\n\"\"\"\n\nfrom sys import stdin\nimport numpy as np\n\n\ndef _cumsum_matrix(m):\n return m.cumsum(axis=0).cumsum(axis=1)\n\n\ndef _shift_top(M):\n M = np.concatenate([np.array([[0]*M.shape[1]]), M], axis=0)\n return M\n\n\ndef _shift_left(M):\n M = np.concatenate([np.array([[0]*M.shape[0]]).T, M], axis=1)\n return M\n\n\ndef find_densest(big, small_ncols, small_nrows):\n big_nrows, big_ncols = big.shape\n\n if small_nrows > big_nrows or small_ncols > big_ncols:\n return\n\n cumsum_big = _cumsum_matrix(big)\n\n bottom_right = cumsum_big[small_nrows-1:, small_ncols-1:]\n bottom_left = _shift_left(cumsum_big[small_nrows-1:, :-small_ncols:])\n top_left = _shift_top(cumsum_big[:-small_nrows, :-small_ncols])\n top_left = _shift_left(top_left)\n top_right = _shift_top(cumsum_big[:-small_nrows:, small_ncols-1:])\n\n diff_matrix = bottom_right - bottom_left - top_right + top_left # adding top left reverts the double counting of top left.\n return diff_matrix.max()\n\n\ndef read_problem_instances(ifile):\n \"\"\"Reads problem instances from given file.\n\n See readme.md for file format.\n\n Returns:\n A generator that gives a tuple of (2D array, RECTANGLE_ROWS, RECTANGLE_COLUMNS)\n\n This function has been changed for performance reasons.\n \"\"\"\n lines = ifile.read().splitlines()\n lines = (np.fromstring(x, dtype='int', sep=' ') for x in lines)\n N = next(lines)[0]\n for i in range(N):\n small_nrows, small_ncols = tuple(next(lines))\n big_nrows, big_ncols = tuple(next(lines))\n big = np.concatenate([next(lines) for x in range(big_nrows)]).reshape(big_nrows, big_ncols)\n yield big, small_nrows, small_ncols\n\nif __name__ == '__main__':\n for big, small_nrows, small_ncols in read_problem_instances(stdin):\n print(find_densest(big, small_nrows=small_nrows, small_ncols=small_ncols))\n","repo_name":"brucepucci/HouseCanaryPyCon2016","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7142927801","text":"import openai\n\nfrom apps.open.base.codes import CodeCreate\nfrom apps.open.base.images import ImageCreate, ImageVariation\nfrom apps.open.base.texts import TextCreate\n\nengines = openai.Engine.list()\nprint(\"\\n\".join(sorted(engine.id for engine in engines.data)), \"\\n\")\n\nprint(\n TextCreate(\n prompt=\"Quais os melhores sites sobre Django e Python?\",\n )\n .execute()\n .result,\n)\n\n\nimage = (\n ImageCreate(\n prompt=\"age of empires hdr 8k ultra realistic futuristic\",\n size=\"1024x1024\",\n )\n .execute()\n .file_name\n)\n\nprint(\"Image Created:\", image, \"\\n\")\n\nfile_name = (\n TextCreate(prompt=f\"Extraia apenas o nome do arquivo, sem a extensão: {image}\")\n .execute()\n .result.strip()\n)\nprint(\n \"Image Variation:\",\n ImageVariation(\n prompt=f\"variation_{file_name}\",\n size=\"1024x1024\",\n image_filename=image,\n )\n .execute()\n .file_name,\n \"\\n\",\n)\n\nprint(\n CodeCreate(\n prompt=\"Imprima 'Olá, Mundo!' na tela.\",\n )\n .execute()\n .result,\n)\n","repo_name":"marvinbraga/hand_tracking","sub_path":"apps/open/test_open_ai.py","file_name":"test_open_ai.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4649674399","text":"\"\"\"Cog for Greetings\"\"\"\n\nimport disnake\nfrom disnake.ext import commands\n\nclass Greetings(commands.Cog, name='Greetings'):\n \"\"\"Cog for handling Greetings\"\"\"\n def __init__(self, bot):\n self.bot = bot\n self._last_member = None\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n \"\"\"Greet on member join\"\"\"\n channel = member.guild.system_channel\n if channel is not None:\n await channel.send(f'Welcome {member.mention}.')\n\n @commands.command()\n async def hello(self, ctx, *, member: disnake.Member = None):\n \"\"\"Says hello\"\"\"\n member = member or ctx.author\n if self._last_member is None or self._last_member.id != member.id:\n await ctx.send(f'Hello {member.name}~')\n else:\n await ctx.send(f'Hello {member.name}... This feels familiar.')\n self._last_member = member\n","repo_name":"h3mmy/BloopyBoi-python","sub_path":"app/cogs/greetings.py","file_name":"greetings.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41685306700","text":"from ariadne import (\n MutationType\n)\nfrom crud.user import (\n find_user_by_email\n)\nfrom core.security import (\n verify_password\n)\nfrom core.jwt import (\n create_access_token\n)\nfrom crud.token import (\n add_token_in_db,\n remove_token_in_db\n)\n\n\nauth_mutation = MutationType()\n\n\n@auth_mutation.field(\"login\")\nasync def resolve_login(_, info, **kwargs):\n user = await find_user_by_email(kwargs[\"email\"])\n\n if not user:\n return {\n \"error\": \"User with that email not found\"\n }\n\n password_matched = verify_password(kwargs[\"password\"], user[\"password\"])\n if not password_matched:\n return {\n \"error\": \"Incorrect credentials combination.\"\n }\n\n encoded_jwt, expire = create_access_token(\n data={\n \"user_id\": str(user[\"id\"]),\n \"group\": user[\"group\"]\n }\n )\n added_token_in_db = await add_token_in_db(\n user_id=str(user[\"id\"]),\n token=encoded_jwt.decode(\"utf-8\")\n )\n\n return {\n \"token\": {\n \"access_token\": encoded_jwt.decode(\"utf-8\"),\n \"token_type\": \"bearer\"\n }\n }\n\n\n@auth_mutation.field(\"logout\")\nasync def resolve_logout(\n _,\n info,\n current_user=None,\n **kwargs\n):\n resp = {\n \"error\": \"User was not logged out\",\n \"logged_out\": False\n }\n\n logged_out = await remove_token_in_db(current_user[\"id\"])\n if logged_out:\n resp.update(logged_out=True, error=\"\")\n return resp\n","repo_name":"zero-shubham/permissions_system_example","sub_path":"ra_graphql/resolvers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"74229564642","text":"import tarfile\nimport os\n\n\ndef list_tar(archive, verbosity):\n \"\"\"List a TAR archive with the tarfile Python module.\"\"\"\n try:\n with tarfile.open(archive) as tfile:\n tfile.list(verbose=verbosity > 1)\n except Exception as err:\n msg = \"error listing %s: %s\" % (archive, err)\n print(msg)\n return None\n\n\ntest_tar = list_tar\n\n\ndef extract_tar(archive, outdir):\n \"\"\"Extract a TAR archive with the tarfile Python module.\"\"\"\n try:\n with tarfile.open(archive) as tfile:\n tfile.extractall(path=outdir)\n except Exception as err:\n msg = \"error extracting %s: %s\" % (archive, err)\n print(msg)\n return None\n\n\ndef create_tar(archive, folder_name, compression=None):\n \"\"\"Create a TAR archive with the tarfile Python module.\"\"\"\n mode = \"w:\"\n if compression is not None:\n mode = get_tar_mode(compression)\n try:\n with tarfile.open(archive, mode) as tfile:\n for filename in os.listdir(folder_name):\n tfile.add(folder_name + filename, arcname=filename)\n except Exception as err:\n msg = \"error creating %s: %s\" % (archive, err)\n print(msg)\n return None\n\n\ndef get_tar_mode(compression):\n \"\"\"Determine tarfile open mode according to the given compression.\"\"\"\n if compression == 'gzip':\n return 'w:gz'\n if compression == 'bzip2':\n return 'w:bz2'\n if compression == 'lzma':\n return 'w:xz'\n if compression:\n msg = 'pytarfile does not support %s for tar compression'\n print(msg)\n # no compression\n return 'w'\n","repo_name":"python-geeks/Automation-scripts","sub_path":"tar_py/tar_py.py","file_name":"tar_py.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"54"} +{"seq_id":"34987209256","text":"#!/usr/bin/env python3\n\nfrom dotenv.main import load_dotenv\nfrom flask import Flask, render_template, flash, request, redirect, url_for, send_from_directory, session, abort, Blueprint\nimport database_conn as db\n\n\n# App context\norientador = Blueprint('orientador', __name__)\n\n\n@orientador.route('/orientador', methods=['GET', 'POST'])\ndef orientador_page():\n\n return render_template(\"orientador.html\")\n\n\n@orientador.route('/orientador/projects', methods=['GET', 'POST'])\ndef new_projects():\n # Submit new project proposals\n if(request.method == 'POST'):\n\n suggestions = [\n {\n \"sigla\": request.form['sigla'],\n \"nome_projeto\": request.form['name'],\n \"description\": request.form['description']\n }\n ]\n\n db.connection_db(data=suggestions, query=\"insert\", tablename=\"orientador_suggestions\")\n\n return render_template(\"project_suggestion.html\")\n\n\n@orientador.route('/orientador/projects/available', methods=['GET', 'POST'])\ndef available_projects():\n\n projects = db.connection_db(query=\"select\", tablename=\"orientador_suggestions\")\n return render_template(\"available_projects.html\", data=projects)\n\n\n@orientador.route('/orientador/submit_grade', methods=['GET', 'POST'])\ndef submit_grade():\n\n if(request.method == 'POST'):\n\n # Submit final grade\n grade = {\n \"student\": request.form['student'],\n \"project_name\": request.form['project'],\n \"grade\": request.form['note']\n }\n\n # Insert data into DB\n db.connection_db(data=grade, query=\"insert\", tablename=\"grades\")\n\n return render_template(\"final_grade.html\")\n","repo_name":"diogolopes18-cyber/MODSI","sub_path":"user/orientador_page.py","file_name":"orientador_page.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18376518506","text":"#!/usr/bin/python\nimport urllib.request\nimport json\nimport time\nimport serial\nimport sys\nimport glob\nimport os\n\nkodi_ip = \"127.0.0.1\"\nplayer_id = \"1\" # Video = 1, Audio = 0\narduino_serial_speed = 57600\n\nser = \"\"\n\ndef main():\n global ser\n\n ser = get_serial_port()\n\n if len(sys.argv) > 1 and sys.argv[1] == \"--test\":\n run_test(ser)\n sys.exit(1)\n else:\n loop()\n\ndef loop():\n global player_id\n global kodi_ip\n global ser\n\n while 1:\n ser = get_serial_port()\n\n if (ser is False):\n time.sleep(0.5)\n continue\n\n start = time.time()\n\n # Where to connect for the Kodi API\n url = 'http://' + kodi_ip + '/jsonrpc?request={\"jsonrpc\":\"2.0\",\"method\":\"Player.GetProperties\",\"params\":{\"playerid\":' + player_id + ',\"properties\":[\"time\",\"totaltime\",\"percentage\",\"speed\"]},\"id\":\"1\"}'\n\n # This is raw bytes\n resp = urllib.request.urlopen(url).read()\n # Convert bytes to a string\n resp = resp.decode(\"utf-8\")\n\n # String to hash\n x = json.loads(resp)\n\n error_code = x.get(\"error\", {}).get(\"code\", 0)\n\n if error_code != 0:\n new_player_id = get_active_player();\n print(\"Error playerid \" + player_id + \" is not valid anymore. Switching to \" + new_player_id)\n player_id = new_player_id\n\n time.sleep(2)\n continue\n\n timez = x.get('result', {}).get('time', {})\n if (len(timez) == 0):\n continue\n\n hours = timez.get('hours', 0)\n mins = timez.get('minutes', 0)\n secs = timez.get('seconds', 0)\n\n cur_time = (hours * 3600) + (mins * 60) + secs\n total = (x[\"result\"][\"totaltime\"][\"hours\"] * 3600) + (x[\"result\"][\"totaltime\"][\"minutes\"] * 60) + x[\"result\"][\"totaltime\"][\"seconds\"]\n\n speed = x['result']['speed'];\n if (speed == 1):\n speed_str = \"Play\"\n elif (speed == 0 and total > 0):\n speed_str = \"Pause\"\n elif (speed == 0):\n speed_str = \"Stop\"\n\n # Build the string to send to the Arduino via serial\n # Format : \n # Example: <1042:2820:Play>\n line = \"<\" + str(cur_time) + \":\" + str(total) + \":\" + speed_str + \">\"\n print(line)\n\n line = line + \"\\n\"\n line = line.encode('ascii')\n\n # Write the line to the serial port\n ser.write(line)\n\n end = time.time()\n total = end - start\n sleep_time = 0.4995 - total\n\n end = time.time()\n total = end - start\n sleep_time = 0.4995 - total\n\n # Sleep X seconds\n time.sleep(sleep_time)\n\ndef run_test(ser):\n i = 0\n total = 6000;\n\n while i < total:\n line = \"<\" + str(i) + \":\" + str(total) + \":Play\" + \">\"\n\n # Write the line to the serial port\n ser.write(line + \"\\n\")\n #print(line)\n\n time.sleep(0.05)\n i += 1\n\n return i\n\ndef get_active_player():\n global kodi_ip\n\n url = 'http://' + kodi_ip + '/jsonrpc?request={\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"Player.GetActivePlayers\"}'\n\n # This is raw bytes\n resp = urllib.request.urlopen(url).read()\n # Convert bytes to a string\n resp = resp.decode(\"utf-8\")\n\n # String to hash\n x = json.loads(resp)\n\n try:\n active_id = x[\"result\"][0][\"playerid\"]\n except:\n active_id = -1\n\n active_id = str(active_id)\n\n print(\"Active playerid: \" + active_id)\n\n return active_id\n\nPREV_SERIAL_PORT = \"\"\ndef get_serial_port():\n global PREV_SERIAL_PORT\n global ser\n x = []\n\n # Find the all the USB serial ports\n x.extend(glob.glob(\"/dev/ttyUSB*\"))\n #x.extend(glob.glob(\"/dev/ttyACM*\"))\n\n # We're going to assume the FIRST one\n x.sort()\n\n # We didn't find anything\n if (len(x) == 0):\n print(\"No serial ports found\")\n PREV_SERIAL_PORT = \"\"\n return False\n\n found_port = x[0]\n\n # It's a new port if it's not the previous value we used\n is_new = False\n if (PREV_SERIAL_PORT != found_port):\n print(\"Found a new serial port %s (Prev: %s)\" % (found_port, PREV_SERIAL_PORT))\n is_new = True\n\n #print (found_port)\n #print (os.path.isfile(found_port))\n\n # It's a newly found port, and it's openable\n if (is_new and os.access(found_port, os.R_OK)):\n print(\"Opening %s for serial access\" % found_port)\n ret = serial.Serial(found_port, arduino_serial_speed, timeout=1)\n\n PREV_SERIAL_PORT = found_port\n elif (not is_new):\n return ser\n else:\n ret = False\n\n return ret\n\nmain()\n","repo_name":"scottchiefbaker/Arduino-Kodi-Elapsed","sub_path":"python/kodi-api-interface.py","file_name":"kodi-api-interface.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30323074816","text":"# -*- coding: utf-8 -*-\n\n# Calculating transmittance with an absorbing material\n\nimport math\nimport meep as mp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as pltcolors\n\ndef main():\n duration = 100\n resolution = 5\n cell_x = 20\n cell_y = 30\n cell_z = 30\n pml = 1\n src_buffer = 1\n mosi_buffer = 1\n mosi_length = cell_x - 2 * pml - src_buffer - 2 * mosi_buffer\n mosi_center_x = src_buffer / 2\n wavelength = 1.55\n cladding_thickness = 125\n core_thickness = 8\n core_radius = core_thickness / 2\n cladding_min_thickness = 1\n cladding_min_radius = cladding_min_thickness + core_radius\n mosi_thickness = 1\n mosi_width = 2\n bottom_min = core_radius + mosi_thickness + 2\n axis_y = 3 * cell_y / 8 - pml - bottom_min\n cell = mp.Vector3(cell_x, cell_y, cell_z)\n freq = 1/wavelength\n src_pt = mp.Vector3(-cell_x/2 + pml + src_buffer, 0, 0)\n\n default_material=mp.Medium(epsilon=1.444)\n\n geometry = [mp.Cylinder(center=mp.Vector3(y=axis_y), height=mp.inf, radius=core_radius,\n material=mp.Medium(epsilon=1.4475),\n axis=mp.Vector3(1,0,0)),\n mp.Block(mp.Vector3(mp.inf, cell_y, mp.inf),\n center=mp.Vector3(0, cell_y / 2 + axis_y + cladding_min_radius, 0),\n material=mp.Medium(epsilon=1))\n ]\n\n absorber = mp.Block(mp.Vector3(mosi_length, mosi_thickness, mosi_width),\n center=mp.Vector3(mosi_center_x, axis_y + cladding_min_radius + mosi_thickness / 2, 0),\n material=mp.Medium(epsilon=1.61, D_conductivity=2*math.pi*wavelength*7.55/1.61))\n\n sources = [mp.EigenModeSource(src=mp.ContinuousSource(frequency=freq),\n center=src_pt,\n size=mp.Vector3(0, cell_y - 2 * pml, cell_z - 2 * pml),\n eig_match_freq=True,\n eig_parity=mp.ODD_Z)]\n\n pml_layers = [mp.PML(pml)]\n\n sim = mp.Simulation(cell_size=cell,\n boundary_layers=pml_layers,\n geometry=geometry,\n sources=sources,\n resolution=resolution,\n eps_averaging=False,\n default_material=default_material,\n symmetries=[mp.Mirror(mp.Z)],\n force_complex_fields=True)\n\n fr_y = max(min(cladding_thickness, cell_y - 2 * pml), 0)\n fr_z = max(min(cladding_thickness, cell_z - 2 * pml), 0)\n\n refl_fr = mp.FluxRegion(center=mp.Vector3(-0.5 * cell_x + pml + src_buffer + 1, 0, 0),\n size=mp.Vector3(0, fr_y, fr_z))\n refl = sim.add_flux(freq, 0, 1, refl_fr)\n\n tran_fr = mp.FluxRegion(center=mp.Vector3(0.5 * cell_x - pml - 1, 0, 0),\n size=mp.Vector3(0, fr_y, fr_z))\n tran = sim.add_flux(freq, 0, 1, tran_fr)\n\n sim.init_sim()\n sim.solve_cw(L=10)\n\n eps_data = sim.get_array(center=mp.Vector3(), size=mp.Vector3(cell_x, cell_y, 0), component=mp.Dielectric)\n plt.figure()\n plt.imshow(eps_data.transpose(), interpolation='spline36', cmap='binary')\n plt.axis('off')\n plt.show()\n\n # for normalization run, save flux fields data for reflection plane\n no_absorber_refl_data = sim.get_flux_data(refl)\n # save incident power for transmission plane\n no_absorber_tran_flux = mp.get_fluxes(tran)\n\n sim.reset_meep()\n\n geometry.append(absorber)\n\n sim = mp.Simulation(cell_size=cell,\n boundary_layers=pml_layers,\n geometry=geometry,\n sources=sources,\n resolution=resolution,\n eps_averaging=False,\n default_material=default_material,\n symmetries=[mp.Mirror(mp.Z)],\n force_complex_fields=True)\n\n refl = sim.add_flux(freq, 0, 1, refl_fr)\n tran = sim.add_flux(freq, 0, 1, tran_fr)\n\n sim.load_minus_flux_data(refl, no_absorber_refl_data)\n\n sim.init_sim()\n sim.solve_cw(L=10)\n\n absorber_refl_flux = mp.get_fluxes(refl)\n absorber_tran_flux = mp.get_fluxes(tran)\n\n transmittance = absorber_tran_flux[0] / no_absorber_tran_flux[0]\n reflectance = absorber_refl_flux[0] / no_absorber_tran_flux[0]\n absorption = 1 - transmittance\n penetration_depth = - mosi_length / math.log(transmittance)\n\n print(\"Transmittance: %f\" % transmittance)\n print(\"Reflectance: %f\" % reflectance)\n print(\"Absorption: {} over {} um\".format(absorption, mosi_length))\n print(\"lambda = {} mm\".format(penetration_depth / 1000))\n\n eps_data = sim.get_array(center=mp.Vector3(), size=mp.Vector3(cell_x, cell_y, 0), component=mp.Dielectric)\n plt.figure()\n plt.imshow(eps_data.transpose(), interpolation='spline36', cmap='binary')\n plt.axis('off')\n plt.show()\n\n cm = pltcolors.LinearSegmentedColormap.from_list(\n 'em', [(0,0,1), (0,0,0), (1,0,0)])\n\n ez_data = sim.get_array(center=mp.Vector3(), size=mp.Vector3(cell_x, cell_y, 0), component=mp.Ez)\n plt.figure()\n plt.imshow(eps_data.transpose(), interpolation='spline36', cmap='binary')\n plt.imshow(ez_data.transpose(), interpolation='spline36', cmap='RdBu', alpha=0.9)\n plt.axis('off')\n plt.show()\n","repo_name":"kyledebry/Evan","sub_path":"transmittance_fd.py","file_name":"transmittance_fd.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26796842580","text":"import requests\nfrom config import Config\n\n\nclass NSE:\n def __init__(self, dt):\n self.dt = dt\n self.NSE_URL = Config().get_config(\"downloader\")[\"NSE_URL\"]\n self.nse_url = ''\n\n def url(self):\n # https://archives.nseindia.com/content/historical/EQUITIES/2020/MAY/cm17MAY2022bhav.csv.zip\n month = self.dt.strftime('%b').upper()\n self.nse_url = self.NSE_URL.format(month, month)\n self.nse_url = self.dt.strftime(self.nse_url)\n\n def download(self):\n with requests.Session() as session:\n session.headers.update({'User-Agent': 'Mozilla/5.0'})\n response = session.get(self.nse_url)\n response.raise_for_status()\n return response\n return None\n","repo_name":"nruharish/stock-quotes","sub_path":"downloader/nse.py","file_name":"nse.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38426360936","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# import libraries\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport os\nimport cv2\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras import initializers\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.utils import to_categorical\nfrom keras import backend as K\n\n\n# In[2]:\n\n\n# Loading MNIST data having images of handwritten digits [1,2,3,6,9]\n\nroot_dir = \"..\\CS671_Assignment3\\Group_13\"\ntrain_dir = os.path.join(root_dir,\"train\")\nval_dir = os.path.join(root_dir,\"val\")\ntest_dir = os.path.join(root_dir,\"test\")\n\n\n# In[3]:\n\n\nclass_map = {1:0,2:1,3:2,6:3,9:4}\ndef load_data(path):\n x,y = [],[]\n class_dirs = [\"1\",\"2\",\"3\",\"6\",\"9\"]\n\n for class_dir in class_dirs:\n class_path = os.path.join(path,class_dir)\n for img_file in os.listdir(class_path):\n img_path = os.path.join(class_path,img_file)\n \n #reading image\n img = cv2.imread(img_path,-1)\n x.append(np.array(img))\n y.append(class_map[int(class_dir)])\n x=np.array(x)\n #y=np.array(list(map(int, y)))\n y=to_categorical(y,num_classes=5)\n return x,y\n\n\n# In[4]:\n\n\ntrain_x,train_y=load_data(train_dir)\nval_x,val_y=load_data(val_dir)\ntest_x,test_y=load_data(test_dir)\n\n\n# In[5]:\n\n\n# normalizing data\nfrom sklearn.preprocessing import MinMaxScaler\n\ntrain_x_flat=train_x.reshape(train_x.shape[0],-1)\nval_x_flat=val_x.reshape(val_x.shape[0],-1)\ntest_x_flat=test_x.reshape(test_x.shape[0],-1)\n\nscaler=MinMaxScaler()\nscaler.fit(train_x_flat)\ntrain_x_scaled=scaler.transform(train_x_flat)\nval_x_scaled=scaler.transform(val_x_flat)\ntest_x_scaled=scaler.transform(test_x_flat)\n\n\n# In[6]:\n\n\n# set weight initializer for Keras layers\nweights_ini=initializers.RandomNormal(mean=0,stddev=0.5,seed=123)\nbias_ini=initializers.Zeros()\n\n\n# In[7]:\n\n\n# single hidden layer autoencoder\ninputs = Input(shape=(784,),name='input_layer')\nl1=Dense(256, kernel_initializer=weights_ini, bias_initializer=bias_ini, activation = 'sigmoid',name='middle_layer')(inputs)\noutputs = Dense(784, kernel_initializer=weights_ini, bias_initializer=bias_ini, activation = 'sigmoid', name='output_layer')(l1)\n\nfull_model = Model(inputs=inputs, outputs=outputs)\nencoder = Model(inputs=inputs, outputs=l1)\ndecoder = Model(inputs=l1, outputs=outputs)\n\n\n# In[8]:\n\n\nfull_model.summary()\n\n\n# In[9]:\n\n\n# Setting optimiser\nopt_Adam = keras.optimizers.Adam(learning_rate=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08,name=\"Adam\")\n\n\n# In[10]:\n\n\n# compiling model\nfull_model.compile(optimizer=opt_Adam,loss=\"mse\",metrics=[\"mean_squared_error\"])\n\n\n# In[11]:\n\n\n# setting convergence criterion\nmy_callbacks = EarlyStopping(monitor='loss',min_delta=0.0001,patience=10,verbose=1)\n\n\n# In[12]:\n\n\n# fitting model\nmodel_train=full_model.fit(train_x_scaled,train_x_scaled,batch_size=32,epochs=10000,callbacks=my_callbacks,verbose=\"auto\",\n validation_data=(val_x_scaled,val_x_scaled),shuffle=True)\n\n\n# In[13]:\n\n\n# Saving and plotting MSE\nhistory = model_train.history\n\nimport pickle\nwith open('./models/history_256.pkl','wb') as f:\n pickle.dump(history,f)\n\nplt.plot(history['loss'])\nplt.ylabel('Average Error',fontsize=12)\nplt.xlabel('Epoch',fontsize=12)\nplt.savefig('./img/256/loss.png')\nplt.show()\n\n\n# In[14]:\n\n\n# saving model\nfull_model.save(\"./models/singleAE_256.h5\")\n\n\n# In[15]:\n\n\n# Average reconstruction error\n\ntrain_loss=full_model.evaluate(train_x_scaled,train_x_scaled)[0]\nval_loss=full_model.evaluate(val_x_scaled,val_x_scaled)[0]\ntest_loss=full_model.evaluate(test_x_scaled,test_x_scaled)[0]\n\n\n# In[16]:\n\n\ntrain_out = full_model.predict(train_x_scaled)\nval_out = full_model.predict(val_x_scaled)\ntest_out = full_model.predict(test_x_scaled)\n\n\n# In[17]:\n\n\nfig,axes = plt.subplots(1,5,figsize=(20,4))\nfor i,ax in enumerate(axes.flatten()):\n j=2277*i\n ax.imshow(train_x[j],cmap='gray') # original image\n ax.axis('off')\nfig.suptitle('Original Image',fontsize=14)\nplt.savefig('./img/256/org.png')\nplt.show()\nplt.close()\nfig,axes = plt.subplots(1,5,figsize=(20,4))\nfor i,ax in enumerate(axes.flatten()):\n j=2277*i\n recon = cv2.normalize(train_out[j].reshape(28,28),None,255,0,cv2.NORM_MINMAX,cv2.CV_8U) # reconstructed image\n ax.imshow(recon,cmap='gray') \n ax.axis('off')\nfig.suptitle('Reconstructed Image',fontsize=14)\nplt.savefig('./img/256/recon.png')\nplt.show()\nplt.close()\n\n\n# In[19]:\n\n\nfig,axes = plt.subplots(1,5,figsize=(20,4))\nfor i,ax in enumerate(axes.flatten()):\n j=759*i\n ax.imshow(val_x[j],cmap='gray') # original image\n ax.axis('off')\nfig.suptitle('Original Image',fontsize=14)\nplt.savefig('./img/256/org_val.png')\nplt.show()\nplt.close()\nfig,axes = plt.subplots(1,5,figsize=(20,4))\nfor i,ax in enumerate(axes.flatten()):\n j=759*i\n recon = cv2.normalize(val_out[j].reshape(28,28),None,255,0,cv2.NORM_MINMAX,cv2.CV_8U) # reconstructed image\n ax.imshow(recon,cmap='gray') \n ax.axis('off')\nfig.suptitle('Reconstructed Image',fontsize=14)\nplt.savefig('./img/256/recon_val.png')\nplt.show()\nplt.close()\n\n\n# In[21]:\n\n\nfig,axes = plt.subplots(1,5,figsize=(20,4))\nfor i,ax in enumerate(axes.flatten()):\n j=759*i\n ax.imshow(test_x[j],cmap='gray') # original image\n ax.axis('off')\nfig.suptitle('Original Image',fontsize=14)\nplt.savefig('./img/256/org_test.png')\nplt.show()\nplt.close()\nfig,axes = plt.subplots(1,5,figsize=(20,4))\nfor i,ax in enumerate(axes.flatten()):\n j=759*i\n recon = cv2.normalize(test_out[j].reshape(28,28),None,255,0,cv2.NORM_MINMAX,cv2.CV_8U) # reconstructed image\n ax.imshow(recon,cmap='gray') \n ax.axis('off')\nfig.suptitle('Reconstructed Image',fontsize=14)\nplt.savefig('./img/256/recon_test.png')\nplt.show()\nplt.close()\n\n\n# ## Training a FCNN using compressed representation\n\n# In[23]:\n\n\n# Getting compressed representation of train, validation and test data to further use to train FCNN\n\nencoded_train = encoder.predict([train_x_scaled])\nencoded_val = encoder.predict([val_x_scaled])\nencoded_test = encoder.predict([test_x_scaled])\n\n\n# In[24]:\n\n\n# 3 layer FCNN with sigmoidal activation at hidden layers and softmax at output layer\n\nmodel1 = keras.Sequential([\n Input(shape=(256,),name='input_layer'),\n Dense(512,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_1\"),\n Dense(256,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_2\"),\n Dense(128,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_3\"),\n Dense(5,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"softmax\",name=\"output_layer\")\n ])\n\n\n# In[25]:\n\n\nmodel1.summary()\n\n\n# In[26]:\n\n\n# Setting optimiser\nopt_Adam = keras.optimizers.Adam(learning_rate=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08,name=\"Adam\")\n\n# compiling model\nmodel1.compile(optimizer=opt_Adam,loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\n\n# setting convergence criterion\nmy_callbacks = EarlyStopping(monitor='loss',min_delta=0.0001,patience=10,verbose=1)\n\n\n# In[27]:\n\n\n# fitting model\nFCNN1_train=model1.fit(encoded_train,train_y,batch_size=32,epochs=10000,callbacks=my_callbacks,verbose=\"auto\",\n validation_data=(encoded_val,val_y),shuffle=True)\n\n\n# In[28]:\n\n\n# Average error plot on train data\nhistory = FCNN1_train.history\n\nwith open('./models/256/history_FCNN1.pkl','wb') as f:\n pickle.dump(history,f)\n\nplt.plot(history['loss'])\nplt.ylabel('Average Error',fontsize=12)\nplt.xlabel('Epoch',fontsize=12)\nplt.savefig('./models/256/loss1.png')\nplt.show()\n\n\n# In[29]:\n\n\n# saving model\nmodel1.save(\"./models/256/FCNN1.h5\")\n\n\n# In[30]:\n\n\n# Accuracy and loss on validation data\nval_loss,val_acc=model1.evaluate(encoded_val,val_y)\n\nprint('Validation loss: {} \\t Validation accuracy: {}'.format(val_loss,val_acc))\n\n\n# In[31]:\n\n\n# 4 layer FCNN with sigmoidal activation at hidden layers and softmax at output layer\n\nmodel2 = keras.Sequential([\n Input(shape=(256,),name='input_layer'),\n Dense(512,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_1\"),\n Dense(256,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_2\"),\n Dense(128,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_3\"),\n Dense(64,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_4\"),\n Dense(5,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"softmax\",name=\"output_layer\")\n ])\n\n\n# In[32]:\n\n\nmodel2.summary()\n\n\n# In[33]:\n\n\n# Setting optimiser\nopt_Adam = keras.optimizers.Adam(learning_rate=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08,name=\"Adam\")\n\n# compiling model\nmodel2.compile(optimizer=opt_Adam,loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\n\n# setting convergence criterion\nmy_callbacks = EarlyStopping(monitor='loss',min_delta=0.0001,patience=10,verbose=1)\n\n\n# In[34]:\n\n\n# fitting model\nFCNN2_train=model2.fit(encoded_train,train_y,batch_size=32,epochs=10000,callbacks=my_callbacks,verbose=\"auto\",\n validation_data=(encoded_val,val_y),shuffle=True)\n\n\n# In[35]:\n\n\n# Average error plot on train data\nhistory = FCNN2_train.history\n\nwith open('./models/256/history_FCNN2.pkl','wb') as f:\n pickle.dump(history,f)\n\nplt.plot(history['loss'])\nplt.ylabel('Average Error',fontsize=12)\nplt.xlabel('Epoch',fontsize=12)\nplt.savefig('./models/256/loss2.png')\nplt.show()\n\n\n# In[36]:\n\n\n# saving model\nmodel2.save(\"./models/256/FCNN2.h5\")\n\n\n# In[37]:\n\n\n# Accuracy and loss on validation data\nval_loss,val_acc=model2.evaluate(encoded_val,val_y)\nprint('Validation loss: {} \\t Validation accuracy: {}'.format(val_loss,val_acc))\n\n\n# In[38]:\n\n\n# 5 layer FCNN with sigmoidal activation at hidden layers and softmax at output layer\n\nmodel3 = keras.Sequential([\n Input(shape=(256,),name='input_layer'),\n Dense(512,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_1\"),\n Dense(256,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_2\"),\n Dense(128,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_3\"),\n Dense(64,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_4\"),\n Dense(32,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"sigmoid\",name=\"hidden_layer_5\"),\n Dense(5,kernel_initializer=weights_ini,bias_initializer=bias_ini,activation=\"softmax\",name=\"output_layer\")\n ])\n\n\n# In[39]:\n\n\nmodel3.summary()\n\n\n# In[40]:\n\n\n# Setting optimiser\nopt_Adam = keras.optimizers.Adam(learning_rate=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08,name=\"Adam\")\n\n# compiling model\nmodel3.compile(optimizer=opt_Adam,loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\n\n# setting convergence criterion\nmy_callbacks = EarlyStopping(monitor='loss',min_delta=0.0001,patience=10,verbose=1)\n\n\n# In[41]:\n\n\n# fitting model\nFCNN3_train=model3.fit(encoded_train,train_y,batch_size=32,epochs=10000,callbacks=my_callbacks,verbose=\"auto\",\n validation_data=(encoded_val,val_y),shuffle=True)\n\n\n# In[42]:\n\n\n# Average error plot on train data\nhistory = FCNN3_train.history\n\nwith open('./models/256/history_FCNN3.pkl','wb') as f:\n pickle.dump(history,f)\n\nplt.plot(history['loss'])\nplt.ylabel('Average Error',fontsize=12)\nplt.xlabel('Epoch',fontsize=12)\nplt.savefig('./models/256/loss3.png')\nplt.show()\n\n\n# In[43]:\n\n\n# saving model\nmodel3.save(\"./models/256/FCNN3.h5\")\n\n\n# In[44]:\n\n\n# Accuracy and loss on validation data\nval_loss,val_acc=model3.evaluate(encoded_val,val_y)\nprint('Validation loss: {} \\t Validation accuracy: {}'.format(val_loss,val_acc))\n\n\n# ## Model accuracy and confusion matrix for best selected architecture\n\n# In[50]:\n\n\nfrom keras import models\n\nbest=models.load_model(\"./models/256/FCNN1.h5\")\n\n# get weights of best model\nwts = best.get_weights()\n\n# create a Keras model with same architecture as best model and set its weights equal to the best model weights\n\nbest_model = keras.Sequential([\n Input(shape=(256,),name='input_layer'),\n Dense(512,activation=\"sigmoid\",name=\"hidden_layer_1\"),\n Dense(256,activation=\"sigmoid\",name=\"hidden_layer_2\"),\n Dense(128,activation=\"sigmoid\",name=\"hidden_layer_3\"),\n Dense(5,activation=\"softmax\",name=\"output_layer\")\n ])\n\n# Setting optimiser ( select best optimiser by comparing model complexity and validation accuracy )\nopt_Adam = keras.optimizers.Adam(learning_rate=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-08,name=\"Adam\")\n\nbest_model.set_weights(wts)\n\n# compile the model\n\nbest_model.compile(optimizer=opt_Adam,loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\n\n\n# In[51]:\n\n\nfrom tensorflow.math import confusion_matrix\n\n# Testing on train data\ntrain_metric=best_model.evaluate(encoded_train,train_y)\nprint(\"Train loss: {} \\t Train accuracy: {}\".format(train_metric[0],train_metric[1]))\n\n# Testing on validation data\nval_metric=best_model.evaluate(encoded_val,val_y)\nprint(\"Validation loss: {} \\t Validation accuracy: {}\".format(val_metric[0],val_metric[1]))\n\n# Testing on test data\ntest_metric=best_model.evaluate(encoded_test,test_y)\nprint(\"Test loss: {} \\t Test accuracy: {}\".format(test_metric[0],test_metric[1]))\n\ntest_pred=best_model.predict(encoded_test,verbose=1)\npred_y=np.argmax(test_pred,axis=1)\n\n# confusion matrix for test data\ntest_y_label = np.argmax(test_y,axis=1)\nconf_mat=confusion_matrix(test_y_label,pred_y)\nprint(conf_mat)\n\n\n# ## Weight visualization\n\n# In[47]:\n\n\nw_ij=full_model.layers[1].get_weights() # weights between input and hidden layer 1 (bottleneck layer)\nw_ij[0].shape\n\n\n# In[48]:\n\n\nfrom numpy.linalg import norm\nwts1=[]\nfor w in w_ij[0].T:\n w=w/norm(w)\n wts1.append(w.reshape(28,28))\n\nwts1 = np.array(wts1)\nwts1.shape\n\n\n# In[54]:\n\n\nfig,axes = plt.subplots(4,4,figsize=(12,12))\nfor i,ax in enumerate(axes.flatten()):\n j = 8*i\n ax.set_title(\"Node no: \"+str(j),fontsize=12)\n ax.imshow(wts1[j],cmap='gray')\n ax.axis('off')\nfig.suptitle('Visualization of inputs that maximally activate each neuron',fontsize=14)\nplt.savefig('./img/256/wts.png')\nplt.show()\nplt.close()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"prachish820/CS671-Deep-Learning-and-Applications","sub_path":"Autoencoders/Single_layer_AE.py","file_name":"Single_layer_AE.py","file_ext":"py","file_size_in_byte":14507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22699717098","text":"from __future__ import division, print_function\n# coding=utf-8\nimport sys\nimport os\nimport glob\nimport re\nimport numpy as np\nimport pdfdemo as pdfgen\nimport esdemo as puti\nimport mergingpdf as mpdf\nimport appendingpdf as ap\nfrom flask import send_file\nimport frontfacedetection as fd\nimport os\nimport cv2\n\n# Keras\nfrom keras.applications.imagenet_utils import preprocess_input, decode_predictions\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n\n# Flask utils\nfrom flask import Flask, redirect, url_for, request, session,render_template\nfrom werkzeug.utils import secure_filename\nimport sqlite3\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\n\nUPLOAD_FOLDER = 'static/uploads/'\n\n# allow files of a specific type\nALLOWED_EXTENSIONS = set(['jpg', 'jpeg'])\n\n# function to check the file extension\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\nmodel_path2 = 'model.h5' # load .h5 Model\n\nCTS = load_model(model_path2)\nfrom keras.preprocessing.image import load_img, img_to_array\n\ndef model_predict2(image_path,model):\n print(\"Predicted\")\n #image = load_img(image_path,target_size=(224,224))\n img=cv2.imread(image_path)\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n detector=cv2.CascadeClassifier(harcascadePath)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = detector.detectMultiScale(gray, 1.3, 5)\n file_namess = os.path.basename(image_path)\n print(\"Filenames==\",file_namess)\n\n path = 'C:\\\\Users\\\\Vrunda\\\\Desktop\\\\newui\\\\cropped'\n\n for (x,y,w,h) in faces:\n \n crop_img = img[y:(y+h), x:(x+w)]\n #cv2.imwrite(\"cropped/\"+file_namess, crop_img)\n \n cv2.imwrite(os.path.join(path , file_namess), crop_img)\n image_path=\"cropped/\"+file_namess\n print(\"image path==\",image_path)\n\n image = load_img(image_path,target_size=(224,224))\n image = img_to_array(image)\n image = image/255\n image = np.expand_dims(image,axis=0)\n \n result = np.argmax(model.predict(image))\n #prediction = classes2[result] \n \n if result == 0:\n return \"Down Syndrome\",\"result.html\" \n elif result == 1:\n return \"Non Down Syndrome\",\"result.html\"\n \n@app.route('/patreg',methods=['POST'])\ndef patreg():\n\tpid=request.form['pid']\n\tpname=request.form['pname']\n\tage=request.form['age']\n\tgender=request.form['g1']\n\tsession['patname'] = request.form['pname']\n\tpdfgen.process(pid,pname,age,gender)\n\treturn render_template(\"uploadpic.html\") \n \n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/logon')\ndef logon():\n\treturn render_template('signup.html')\n\n@app.route('/login')\ndef login():\n\treturn render_template('signin.html')\n\n@app.route(\"/signup\")\ndef signup():\n\n username = request.args.get('user','')\n # name = request.args.get('name','')\n email = request.args.get('email','')\n number = request.args.get('mobile','')\n password = request.args.get('password','')\n con = sqlite3.connect('data.db')\n cur = con.cursor()\n cur.execute(\"insert into information (`user`,`email`, `password`,`mobile`) VALUES (?, ?, ?, ?)\",(username,email,password,number))\n con.commit()\n con.close()\n return render_template(\"signin.html\")\n\n@app.route(\"/signin\")\ndef signin():\n\n mail1 = request.args.get('user','')\n password1 = request.args.get('password','')\n con = sqlite3.connect('data.db')\n cur = con.cursor()\n cur.execute(\"select `user`, `password` from information where `user` = ? AND `password` = ?\",(mail1,password1))\n data = cur.fetchone()\n\n if data == None:\n return render_template(\"signin.html\") \n\n elif mail1 == str(data[0]) and password1 == str(data[1]):\n return render_template(\"addpatient.html\")\n else:\n return render_template(\"signup.html\")\n\n@app.route('/home')\ndef home():\n\treturn render_template('uploadpic.html')\n\n@app.route('/predict2',methods=['GET','POST'])\ndef predict2():\n print(\"Entered\")\n \n \n print(\"Entered here\")\n file = request.files['files'] # fet input\n print(\"File ==\",file)\n filename = file.filename \n print(\"File name==\",filename) \n print(\"@@ Input posted = \", filename)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n \n file_path = os.path.join(UPLOAD_FOLDER, filename)\n file.save(file_path)\n fd.process(file_path)\n\n print(\"@@ Predicting class......\")\n \n pred, output_page = model_predict2(file_path,CTS)\n uname= session['patname']\n ap.process(uname,uname,pred)\n #puti.process(uname,UPLOAD_FOLDER+file.filename)\n #mpdf.process(uname)\n return render_template(output_page, pred_output = pred, img_src=UPLOAD_FOLDER + file.filename)\n else:\n msg=\"Unsupported file format\"\n return render_template(\"uploadpic.html\",message=msg)\n \n@app.route('/download', methods=['POST'])\ndef download_file():\n\tpath=\"\"\n\tuname= session['patname']\n\tprint(\"uname==\",uname)\n\tpath=str(uname)+\"final.pdf\"\n\treturn send_file(path, as_attachment=True)\n@app.route(\"/logout\",methods=['POST'])\ndef log_out():\n session.clear()\n return render_template(\"signin.html\")\n\n\n \nif __name__ == '__main__':\n app.run(debug=False)\n","repo_name":"vrundavk/down_syndrome_detection","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74495372640","text":"import random\n\nPLUS = ' + '\nMINUS = ' - '\nMUL = ' * '\nDIV = ' / '\nAND = '&&'\nOR = '||'\nLT = '<'\nGT = '>'\nEQ = '='\nsymbol_map = {}\nsymbol_map[AND] = lambda x,y: x and y\nsymbol_map[OR] = lambda x,y: x or y\nsymbol_map[PLUS] = lambda x,y: int(x + y)\nsymbol_map[MINUS] = lambda x,y: int(x - y)\nsymbol_map[MUL] = lambda x,y: int(x * y)\nsymbol_map[DIV] = lambda x,y: int(x // y)\nsymbol_map[LT] = lambda x,y: x < y\nsymbol_map[GT] = lambda x,y: x > y\nsymbol_map[EQ] = lambda x,y: x == y\n\nif_count = 0\nunit_num_count = 0\nif_bool_count = 0\nunit_bool_count = 0\ndef gen_bool_value():\n choice = random.randint(1, 10000)\n mode = choice % 3\n if mode == 0:\n return \"true\", True\n elif mode == 1:\n return \"false\", False\n else :\n return gen_relop_expr()\n\n\ndef gen_unit_num():\n global unit_num_count\n unit_num_count +=1\n choice = random.randint(1, 100)\n module = 5\n if choice % module == 0:\n random.seed(choice)\n num = random.randint(-100, 100)\n return str(num), num\n else:\n random.seed(choice+1)\n num1 = random.randrange(-100, 100)\n num2 = random.randint(-100, 100)\n if choice % module == 1:\n return '(' + str(num1) + PLUS + str(num2) +')', num1 + num2\n elif choice % module == 2:\n return '(' + str(num1) + MINUS + str(num2) +')', num1 - num2\n elif choice % module == 3:\n return '(' + str(num1) + MUL + str(num2) +')', num1 * num2\n else:\n while num2 == 0:\n num2 = random.randint(-100, 100)\n return '(' + str(num1) + DIV + str(num2) +')', num1 // num2\n\ndef get_string(name1, name2, symbol):\n return '(' + name1 + symbol + name2 + ')'\n\ndef gen_expr(symbols, unit_func):\n choice = random.randint(1, 10000)\n sym_len = len(symbols) + 1\n mode = choice % sym_len - 1\n if mode == -1:\n return unit_func()\n else:\n name1, value1 = unit_func()\n name2, value2 = unit_func()\n return get_string(name1, name2, symbols[mode]), symbol_map[symbols[mode]](value1, value2)\n\n\ndef gen_rand_expr(symbols, unit_func, connectors):\n count = random.randint(2, 2)\n expr, value = gen_expr(symbols, unit_func)\n conn_len = len(connectors)\n for _ in range(1, count):\n choice = random.randint(1, 10000)\n sub_expr, sub_value = gen_expr(symbols, unit_func)\n mode = choice % conn_len\n expr += connectors[mode] + sub_expr\n value = symbol_map[connectors[mode]](value, sub_value)\n\n return \"(\" + expr + \")\", value\n\ndef gen_multi_expr(symbols, unit_func, connectors, concators):\n count = 2\n expr, value = gen_rand_expr(symbols, unit_func, connectors)\n for _ in range(1,count):\n sub_expr, sub_value = gen_rand_expr(symbols, unit_func, connectors)\n choice = random.randint(1, 10000)\n mode = choice % len(concators)\n expr += concators[mode] + sub_expr\n value = symbol_map[concators[mode]](value, sub_value)\n return expr, value\n\ndef gen_bool_expr():\n n = random.randint(0, 10000)\n global if_bool_count\n global unit_bool_count\n if (n % 2 == 0 and if_bool_count < 10) or (unit_bool_count - if_bool_count > 5 and if_bool_count < 5 ):\n return gen_if_bool_expr()\n unit_bool_count += 1\n return gen_multi_expr([AND,OR], gen_bool_value, [AND, OR],[AND, OR])\n\ndef gen_if_bool_expr():\n global if_bool_count\n if_bool_count += 1\n cond, value = gen_bool_expr()\n then, v_then = gen_bool_expr()\n else_, v_else = gen_bool_expr()\n ret = '(' + 'if' + ' ' + cond + ' then ' + then + ' else ' + else_ + ')'\n if_value = v_then\n if not value:\n if_value = v_else\n return ret, if_value\n\ndef gen_if_num_expr():\n global if_count\n if_count += 1\n cond, c_value = gen_bool_expr()\n then_, v_then = gen_arith_expr()\n else_, v_else = gen_arith_expr()\n ret = '(' + 'if' + ' ' + cond + ' then ' + then_ + ' else ' + else_ + ')'\n value = v_then\n if not c_value:\n value = v_else\n return ret, value \n\ndef gen_random_num():\n global if_count \n global unit_num_count\n\n num = random.randrange(0, 100000)\n if num % 2 == 0 and if_count < 20 or (unit_num_count - if_count > 5 and if_count <= 10):\n return gen_if_num_expr()\n else:\n return gen_unit_num()\n\ndef gen_arith_expr():\n arith_op = [PLUS, MINUS, MUL]\n return gen_multi_expr(arith_op, gen_random_num, arith_op, arith_op)\n\ndef gen_relop_expr():\n arith_op = [PLUS, MINUS, MUL]\n relops = [GT, LT, EQ]\n num1, value1 = gen_multi_expr(arith_op, gen_unit_num, arith_op, arith_op)\n num2, value2 = gen_multi_expr(arith_op, gen_unit_num, arith_op, arith_op)\n choice = random.randint(1, 10000)\n mode = choice % len(relops)\n return get_string(num1, num2, relops[mode]), symbol_map[relops[mode]](value1, value2)\n\n\n\nif __name__ == '__main__':\n expr, value = gen_if_num_expr()\n print(expr)\n print(value)\n f = open(\"Correct_programs/if_07.lam\", 'wb')\n f.write(expr.encode('utf-8'))\n f.close()\n f = open(\"Correct_programs/results/if_07.result\", 'wb')\n if value:\n f.write('true'.encode('utf-8'))\n else:\n f.write('false'.encode('utf-8'))\n f.close()\n","repo_name":"cookieli/Lambda_interpreter","sub_path":"tests/gen_test.py","file_name":"gen_test.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26387534809","text":"\"\"\"\n This module is for functions which perform measurements.\n\"\"\"\nimport numpy as np\nfrom .atom_data import atomic_weights\n\n\ndef calculate_distance(rA, rB):\n # This function calculates the distance between two points given as numpy arrays.\n d=(rA-rB)\n dist=np.linalg.norm(d)\n return dist\n\ndef calculate_angle(rA, rB, rC, degrees=False):\n # Calculate the angle between three points. Answer is given in radians by default, but can be given in degrees\n # by setting degrees=True\n AB = rB - rA\n BC = rB - rC\n theta=np.arccos(np.dot(AB, BC)/(np.linalg.norm(AB)*np.linalg.norm(BC)))\n\n if degrees:\n return np.degrees(theta)\n else:\n return theta\n\ndef calculate_molecular_mass(symbols):\n \"\"\"Calculate the mass of a molecule.\n\n Parameters\n ----------\n symbols : list\n A list of elements.\n\n Returns\n -------\n mass : float\n The mass of the molecule\n \"\"\"\n #initialize weight\n weight= 0\n #sum over all symbols\n for i in symbols:\n weight += atomic_weights[i]\n\n return weight\n","repo_name":"noahwamble/molecool","sub_path":"molecool/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13188560291","text":"class Product:\r\n def funct1 (self, Product_name, quantity, Price):\r\n self.Product_name = Product_name\r\n self.quantity =quantity\r\n self.Price = Price\r\n\r\nclass subProduct(Product):\r\n def readValues(self):\r\n self.Product_name = input(\"Enter the product name: \")\r\n self.quantity = int(input(\"Enter the quantity of the product: \"))\r\n self.price = float(input(\"Enter the product price: \")) \r\n\r\n def calc_Cost(self):\r\n cost = self.quantity * self.price\r\n Vat = cost * 0.12\r\n total_cost = cost + Vat\r\n print(\"The total cost is: \", total_cost)\r\n#Instantiate the class with one object\r\nsub_Product = subProduct()\r\nsub_Product.readValues()\r\nsub_Product.calc_Cost()\r\n ","repo_name":"Ismael-Njihia/Let-s-learn-Python","sub_path":"python/INHERITANCE.PY","file_name":"INHERITANCE.PY","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16402053126","text":"# Written by Eric Miller 6-10-17\r\n# Given a list of URLs, connect each website in parallel, and print out the HTML\r\n# Run the python script demoParallellScript.py for a demonstration\r\n\r\n# library for website connections. \r\n# References for functions from: https://docs.python.org/3/library/urllib.request.html#urllib.request.Request\r\nimport urllib.request\r\n\r\n# library for multiprocessing\r\n# References for functions from https://docs.python.org/dev/library/multiprocessing.html\r\nfrom multiprocessing import Process\r\n\r\n\r\n# Prints out the HTML code for a list of URLs in parallel\r\ndef print_URL_List(URLs):\r\n\t#loop through URL list, have each URL's HTML extracted with the get_HTML function in a paraell process\r\n\tfor address in URLs:\r\n\t\tparaellProcess = Process(target = get_HTML, args= (address,))\r\n\t\tparaellProcess.start()\r\n\t\r\n\t#join paraell processes\r\n\tparaellProcess.join()\r\n\treturn\r\n\r\n\t\r\n# Capture and print the HTML of a target website 'URL'\r\ndef get_HTML(URL):\r\n\t# Attempt a connection to the target website. \r\n\ttry:\r\n\t\twebConnection = urllib.request.urlopen(URL)\r\n\texcept:\r\n\t\tprint(\"FAILED to connect to URL: \" +URL) \r\n\t\treturn\r\n\t\t\r\n\t# Get website HTML code as a bytes object \r\n\tHTML = webConnection.read()\r\n\t#convert to utf-8, ignoring invalid characters (ignore parameter from https://docs.python.org/3/howto/unicode.html)\r\n\tstringHTML = HTML.decode(\"utf-8\", \"ignore\")\r\n\tprint(\"\\n------------------------------------------------------\\nHTML for URL %s is:\" % URL)\r\n\tprint(\"------------------------------------------------------\\n\\n%s\" % stringHTML)\r\n\tprint(\"\\n\\n------------------------------------------------------\\nEnd of %s HTML\\n------------------------------------------------------\\n\\n\" % URL)\r\n\treturn\r\n\r\n","repo_name":"millerer/Parallel-Processing","sub_path":"printHTML.py","file_name":"printHTML.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71836390242","text":"# ****************************************************************\n# AULA: Visão Computacional\n# Prof: Adriano A. Santos, DSc.\n# ****************************************************************\n\n# Importando a biblioteca OpenCV\nimport cv2\n\n# Imagem\naquivo = \"./imagens/raposa.jpg\"\n\n# Carregando a imagem\nimage = cv2.imread(aquivo)\n\n# Obtendo nova imagem e visualizando\nnova_raposa = image[54:305, 135:343] #y, x\ncv2.imshow(\"Parte\", nova_raposa)\ncv2.waitKey(0) \n","repo_name":"adrianosantospb/unifacisa-visao-computacional","sub_path":"modulo1/2-pre-processamento/4-visualizando-uma-parte-da-imagem.py","file_name":"4-visualizando-uma-parte-da-imagem.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44813424180","text":"\n\nfrom eos import ValidationError\nfrom webargs import fields\nfrom webargs.flaskparser import use_args\nfrom flask_restful import Resource\n\nfrom ..pyfa_eos import pyfa_eos_service as pes\n\n\nclass FitValidation(Resource):\n module_args = fields.Nested({\n 'id': fields.Int(required=True),\n 'state': fields.Str(required=True),\n 'charge': fields.Int(missing=None),\n })\n\n drone_args = fields.Nested({\n 'id': fields.Int(required=True),\n 'state': fields.Str(required=True),\n })\n\n eos_fit_args = {\n 'ship': fields.Int(required=True),\n 'rigs': fields.List(fields.Int(), missing=[]),\n 'implants': fields.List(fields.Int(), missing=[]),\n 'high_slots': fields.List(module_args, missing=[]),\n 'mid_slots': fields.List(module_args, missing=[]),\n 'low_slots': fields.List(module_args, missing=[]),\n 'drones': fields.List(drone_args, missing=[]),\n }\n\n @staticmethod\n def convert_fit_to_response(fit):\n dps = fit.stats.get_nominal_dps(reload=False)\n dps_reload = fit.stats.get_nominal_dps(reload=True)\n\n return {\n 'cpu': {\n 'used': fit.stats.cpu.used,\n 'output': fit.stats.cpu.output,\n },\n 'powergrid': {\n 'used': fit.stats.powergrid.used,\n 'output': fit.stats.powergrid.output,\n },\n 'calibration': {\n 'used': fit.stats.calibration.used,\n 'output': fit.stats.calibration.output,\n },\n 'dronebay': {\n 'used': fit.stats.dronebay.used,\n 'output': fit.stats.dronebay.output,\n },\n 'drone_bandwidth': {\n 'used': fit.stats.drone_bandwidth.used,\n 'output': fit.stats.drone_bandwidth.output,\n },\n 'high_slots': {\n 'used': fit.stats.high_slots.used,\n 'total': fit.stats.high_slots.total,\n },\n 'med_slots': {\n 'used': fit.stats.med_slots.used,\n 'total': fit.stats.med_slots.total,\n },\n 'low_slots': {\n 'used': fit.stats.low_slots.used,\n 'total': fit.stats.low_slots.total,\n },\n 'rig_slots': {\n 'used': fit.stats.rig_slots.used,\n 'total': fit.stats.rig_slots.total,\n },\n 'subsystem_slots': {\n 'used': fit.stats.subsystem_slots.used,\n 'total': fit.stats.subsystem_slots.total,\n },\n 'turret_slots': {\n 'used': fit.stats.turret_slots.used,\n 'total': fit.stats.turret_slots.total,\n },\n 'launcher_slots': {\n 'used': fit.stats.launcher_slots.used,\n 'total': fit.stats.launcher_slots.total,\n },\n 'launched_drones': {\n 'used': fit.stats.launched_drones.used,\n 'total': fit.stats.launched_drones.total,\n },\n 'damage': {\n 'reload': {\n 'em': dps_reload.em,\n 'thermal': dps_reload.thermal,\n 'kinetic': dps_reload.kinetic,\n 'explosive': dps_reload.explosive,\n 'total': dps_reload.total,\n },\n 'no_reload': {\n 'em': dps.em,\n 'thermal': dps.thermal,\n 'kinetic': dps.kinetic,\n 'explosive': dps.explosive,\n 'total': dps.total,\n },\n },\n }\n\n @use_args(eos_fit_args)\n def post(self, args):\n ship = pes.build_ship(args['ship'])\n highs = [pes.build_high_module(x['id'], x['state'], x['charge']) for x in args['high_slots']]\n mids = [pes.build_mid_module(x['id'], x['state'], x['charge']) for x in args['mid_slots']]\n lows = [pes.build_low_module(x['id'], x['state'], x['charge']) for x in args['low_slots']]\n rigs = [pes.build_rig(x) for x in args['rigs']]\n implants = [pes.build_implant(x) for x in args['implants']]\n drones = [pes.build_drone(x['id'], x['state']) for x in args['drones']]\n\n skills = pes.build_all_v_character()\n\n fit = pes.build_full_fit(ship, skills, highs, mids, lows, rigs, implants, drones)\n\n try:\n fit.validate()\n return self.convert_fit_to_response(fit), 200\n except ValidationError:\n return {}, 200\n","repo_name":"regner/pyfa-ng-backend","sub_path":"pyfa_ng_backend/resources/fit_validation.py","file_name":"fit_validation.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71221546081","text":"from django import template\n\nregister = template.Library()\n\nfrom ..models import Mytext\n\n@register.simple_tag(name='my_tag')\ndef total_post():\n return Mytext.name.count()\n\n@register.inclusion_tag('artical.html')\ndef show_latest_post(count = 10):\n latest_posts = Mytext.updated.order_by('updated')[:count]\n return {'latest-posts':latest_posts}","repo_name":"Jonleon/myblog","sub_path":"blog/templatetags/blog_tags.py","file_name":"blog_tags.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33917955848","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport math\nfrom itertools import combinations\n\n\nwith open('inp.txt') as fin:\n weights = [int(e.strip()) for e in fin.readlines()]\n\n\n# part 1\nthird = sum(weights) // 3\n\nbest = math.inf, math.inf\ndef rec(len_a, sum_a, prod_a, sum_b, weights_a, weights_other):\n global best\n\n if sum_a > third or sum_b > third:\n return\n\n if len_a > best[0] or (len_a == best[0] and prod_a > best[1]):\n return\n\n if sum_a == sum_b == third:\n best = len_a, prod_a\n\n if sum_a < third:\n for i, e in enumerate(weights_a):\n rec(\n len_a + 1, sum_a + e, prod_a * e, sum_b, weights_a[i + 1:],\n weights_other + weights_a[:i])\n return\n n_weights_other = weights_a + weights_other\n for i, e in enumerate(n_weights_other):\n rec(len_a, sum_a, prod_a, sum_b + e, [], n_weights_other[i + 1:])\n\nrec(0, 0, 1, 0, weights, [])\nprint(best[1])\n\n\n# part 2\nfourth = sum(weights) // 4\n\n\ndef splittable(sum_b, sum_c, weights_b, weights_other_2):\n if sum_b == sum_c == fourth:\n return True\n\n if sum_b > fourth or sum_c > fourth:\n return False\n\n if sum_b < fourth:\n for i, e in enumerate(weights_b):\n if splittable(sum_b + e, sum_c, weights_b[i + 1:],\n weights_other_2 + weights_b[:i]):\n return True\n return False\n\n n_weights_other = weights_b + weights_other_2\n for i, e in enumerate(n_weights_other):\n if splittable(sum_b, sum_c + e, [], n_weights_other[i + 1:]):\n return True\n return False\n\n\nfor i in range(1, len(weights)):\n combs = [e for e in combinations(weights, i) if sum(e) == fourth]\n best = math.inf\n for comb in combs:\n rem = sorted(list(set(weights) - set(comb)))\n if splittable(0, 0, rem, []):\n best = min(best, math.prod(comb))\n if best != math.inf:\n print(best)\n break\n","repo_name":"zswaff/advent","sub_path":"2015/24/sln.py","file_name":"sln.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17888119476","text":"from flask import Flask, render_template, Response, redirect, request\nimport subprocess\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\nrootPath = \"/home/franklin/Desktop/Projetos/iBikeSafe/CyclingView/BikeWay-view/modules/\"\napp = Flask(__name__)\ncurrentMap = None\n\n@app.route('/')\ndef index():\n mapsFiles = available_maps()\n return render_template('index.html', firstMonth = mapsFiles[0], lastMonth = mapsFiles[len(mapsFiles) - 1], currentMonth = currentMap)\n\n@app.route('/monthChange/', methods=['POST'])\ndef monthChange():\n monthYear = request.form['month_change_picker']\n get_map(monthYear)\n return redirect('/')\n\n#Gets the private address\ndef get_ip_address():\n ip = str(subprocess.check_output('hostname -I', shell=True).decode('utf-8'))\n return ip\n\ndef get_map(monthYear):\n global currentMap\n try:\n os.system(\"cp \"+rootPath+\"maps/\"+monthYear+\".html \"+rootPath+\"templates/map.html\")\n currentMap = monthYear\n except:\n None\n\ndef available_maps():\n mapsPath = rootPath+\"maps/\"\n mapsFiles = [f.replace('.html', '') for f in listdir(mapsPath) if isfile(join(mapsPath, f))]\n mapsFiles.sort()\n return mapsFiles\n\nif __name__ == '__main__':\n mapsFiles = available_maps()\n get_map(mapsFiles[len(mapsFiles) - 1])\n\n ip = get_ip_address().replace('\\n', '')\n ip = ip[0:ip.find(' ')]\n\n app.run(host=ip, debug=True)\n","repo_name":"lablara/bikeway","sub_path":"CyclingView/BikeWay-view/modules/visualizationApp.py","file_name":"visualizationApp.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13511884840","text":"import serial\nfrom serial.serialutil import SerialException\n\nfrom pymodbus.client.sync import ModbusSerialClient\nfrom .robotiq_modbus_rtu import comModbusRtu\n\nfrom math import ceil\n\nimport numpy as np\nimport array\n\nACTION_REQ_IDX = 7\nPOS_INDEX = 10\nSPEED_INDEX = 11\nFORCE_INDEX = 12\n\n\nclass Robotiq2FingerGripper:\n def __init__(self, device_id=0, stroke=0.085, comport=\"/dev/ttyUSB0\", baud=115200):\n\n self.client = comModbusRtu.communication()\n\n connected = self.client.connectToDevice(device=comport)\n if not connected:\n raise Exception(\n \"Communication with gripper %d on serial port: %s and baud rate: %d not achieved\"\n % (device_id, comport, baud)\n )\n\n self.init_success = True\n self.device_id = device_id + 9\n self.stroke = stroke\n self.initialize_communication_variables()\n\n self.message = []\n\n def _update_cmd(self):\n\n # Initiate command as an empty list\n self.message = []\n # Build the command with each output variable\n self.message.append(self.rACT + (self.rGTO << 3) + (self.rATR << 4))\n self.message.append(0)\n self.message.append(0)\n self.message.append(self.rPR)\n self.message.append(self.rSP)\n self.message.append(self.rFR)\n\n def sendCommand(self):\n \"\"\"Send the command to the Gripper.\"\"\"\n return self.client.sendCommand(self.message)\n\n def getStatus(self):\n \"\"\"Request the status from the gripper and return it in the Robotiq2FGripper_robot_input msg type.\"\"\"\n\n # Acquire status from the Gripper\n status = self.client.getStatus(6)\n\n # Check if read was successful\n if status is None:\n return False\n\n # Assign the values to their respective variables\n self.gACT = (status[0] >> 0) & 0x01\n self.gGTO = (status[0] >> 3) & 0x01\n self.gSTA = (status[0] >> 4) & 0x03\n self.gOBJ = (status[0] >> 6) & 0x03\n self.gFLT = status[2]\n self.gPR = status[3]\n self.gPO = status[4]\n self.gCU = status[5]\n\n return True\n\n def initialize_communication_variables(self):\n # Out\n self.rPR = 0\n self.rSP = 255\n self.rFR = 150\n self.rARD = 1\n self.rATR = 0\n self.rGTO = 0\n self.rACT = 0\n # In\n self.gSTA = 0\n self.gACT = 0\n self.gGTO = 0\n self.gOBJ = 0\n self.gFLT = 0\n self.gPO = 0\n self.gPR = 0\n self.gCU = 0\n\n self._update_cmd()\n self._max_force = 100.0 # [%]\n\n def shutdown(self):\n self.client.close()\n\n def activate_gripper(self):\n self.rACT = 1\n self.rPR = 0\n self.rSP = 255\n self.rFR = 150\n self._update_cmd()\n\n def deactivate_gripper(self):\n self.rACT = 0\n self._update_cmd()\n\n def activate_emergency_release(self, open_gripper=True):\n self.rATR = 1\n self.rARD = 1\n\n if open_gripper:\n self.rARD = 0\n self._update_cmd()\n\n def deactivate_emergency_release(self):\n self.rATR = 0\n self._update_cmd()\n\n def goto(self, pos, vel, force):\n self.rACT = 1\n self.rGTO = 1\n self.rPR = int(np.clip((3.0 - 230.0) / self.stroke * pos + 230.0, 0, 255))\n self.rSP = int(np.clip(255.0 / (0.1 - 0.013) * vel - 0.013, 0, 255))\n self.rFR = int(np.clip(255.0 / (self._max_force) * force, 0, 255))\n self._update_cmd()\n\n def stop(self):\n self.rACT = 1\n self.rGTO = 0\n self._update_cmd()\n\n def is_ready(self):\n return self.gSTA == 3 and self.gACT == 1\n\n def is_reset(self):\n return self.gSTA == 0 or self.gACT == 0\n\n def is_moving(self):\n return self.gGTO == 1 and self.gOBJ == 0\n\n def is_stopped(self):\n return self.gOBJ != 0\n\n def object_detected(self):\n return self.gOBJ == 1 or self.gOBJ == 2\n\n def get_fault_status(self):\n return self.gFLT\n\n def get_pos(self):\n po = float(self.gPO)\n return np.clip(self.stroke / (3.0 - 230.0) * (po - 230.0), 0, self.stroke)\n\n def get_req_pos(self):\n pr = float(self.gPR)\n return np.clip(self.stroke / (3.0 - 230.0) * (pr - 230.0), 0, self.stroke)\n\n def get_current(self):\n return self.gCU * 0.1\n","repo_name":"facebookresearch/fairo","sub_path":"polymetis/polymetis/python/polymetis/robot_client/robotiq_gripper/third_party/robotiq_2finger_grippers/robotiq_2f_gripper.py","file_name":"robotiq_2f_gripper.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":826,"dataset":"github-code","pt":"54"} +{"seq_id":"17899021987","text":"import discord\nfrom discord.ext import commands\nimport youtube_dl\nimport asyncio\n\n\nclass Music(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command()\n async def rickrollvc(self, message):\n if message.author.voice:\n channel = message.author.voice.channel\n voice = await channel.connect()\n source = discord.FFmpegPCMAudio(\"rickroll.mp3\")\n voice.play(source)\n\n @commands.command()\n async def play(self, message, url: str):\n try:\n voice_client = await message.author.voice.channel.connect()\n except:\n pass\n try:\n videoID = url.split(\"watch?v=\")[1].split(\"&\")[0]\n voice_clients[voice_client.guild.id] = voice_client\n loop = asyncio.get_event_loop()\n data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=False))\n song = data[\"url\"]\n player = discord.FFmpegPCMAudio(song, **ffmpeg_options)\n voice_clients[message.guild.id].play(player)\n\n embed = discord.Embed(\n title=f\"Now playing:\", color=discord.Colour.green())\n embed.add_field(name=\"url:\", value=f\"[{url}]\", inline=False)\n embed.set_image(url=f\"https://img.youtube.com/vi/{videoID}/0.jpg\")\n await message.send(embed=embed)\n except Exception as err:\n print(err)\n\n @commands.command()\n async def pause(self, message):\n if not voice_clients[message.guild.id].is_paused():\n voice_clients[message.guild.id].pause()\n embed = discord.Embed(title=\"Paused Music\",\n color=discord.Colour.red())\n await message.send(embed=embed)\n else:\n embed = discord.Embed(\n title=\"I am not already paused!\", color=discord.Colour.green())\n\n @commands.command()\n async def resume(self, message):\n if voice_clients[message.guild.id].is_paused():\n voice_clients[message.guild.id].resume()\n embed = discord.Embed(title=\"Resumed Music\")\n await message.send(embed=embed)\n else:\n embed = discord.Embed(\n title=\"I am not currently paused!\", color=discord.Colour.red())\n\n @commands.command()\n async def stop(self, message):\n voice_clients[message.guild.id].stop()\n await voice_clients[message.guild.id].disconnect()\n embed = discord.Embed(title=\"Stopped Music\",\n color=discord.Colour.green())\n await message.send(embed=embed)\n\n\ndef setup(client):\n global voice_clients\n global yt_dl_opts\n global ytdl\n global ffmpeg_options\n voice_clients = {}\n yt_dl_opts = {\"format\": \"bestaudio/best\"}\n ytdl = youtube_dl.YoutubeDL(yt_dl_opts)\n ffmpeg_options = {\"options\": \"-vn\"}\n\n client.add_cog(Music(client))\n","repo_name":"FantasyPvP/Crystal","sub_path":"cogs/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9775751982","text":"import unittest\n\nfrom kmk.keys import KC\nfrom kmk.modules.holdtap import HoldTap\nfrom kmk.modules.layers import Layers\nfrom kmk.modules.tapdance import TapDance\nfrom tests.keyboard_test import KeyboardTest\n\n\nclass TestTapDance(unittest.TestCase):\n def setUp(self):\n self.t_within = 2 * KeyboardTest.loop_delay_ms\n self.t_after = 10 * KeyboardTest.loop_delay_ms\n tap_time = (self.t_after + self.t_within) // 4 * 3\n\n TapDance.tap_time = tap_time\n\n self.keyboard = KeyboardTest(\n [Layers(), HoldTap(), TapDance()],\n [\n [\n KC.TD(KC.N0, KC.N1),\n KC.TD(\n KC.HT(KC.N1, KC.A),\n KC.HT(KC.N2, KC.B, tap_time=2 * tap_time),\n ),\n KC.TD(KC.HT(KC.X, KC.Y), KC.X, tap_time=0),\n KC.TD(KC.LT(1, KC.N3), KC.X, tap_time=0),\n KC.N4,\n ],\n [KC.N9, KC.N8, KC.N7, KC.N6, KC.N5],\n ],\n debug_enabled=False,\n )\n\n def test_normal_key(self):\n keyboard = self.keyboard\n t_within = self.t_within\n\n keyboard.test('Tap x1', [(0, True), (0, False)], [{KC.N0}, {}])\n\n keyboard.test(\n 'Tap x2',\n [(0, True), (0, False), t_within, (0, True), (0, False)],\n [{KC.N1}, {}],\n )\n\n keyboard.test(\n 'Tap x3',\n [\n (0, True),\n (0, False),\n (0, True),\n (0, False),\n (0, True),\n (0, False),\n ],\n [{KC.N1}, {}, {KC.N0}, {}],\n )\n\n keyboard.test(\n 'Tap x1 interrupted',\n [(0, True), (4, True), (4, False), (0, False)],\n [{KC.N0}, {KC.N0, KC.N4}, {KC.N0}, {}],\n )\n\n keyboard.test(\n 'Tap x1 interrupted',\n [(0, True), (4, True), (0, False), (4, False)],\n [{KC.N0}, {KC.N0, KC.N4}, {KC.N4}, {}],\n )\n\n keyboard.test(\n 'Tap x1 interrupted',\n [(0, True), (0, False), (4, True), (4, False)],\n [{KC.N0}, {}, {KC.N4}, {}],\n )\n\n keyboard.test(\n 'Tap x2, interrupted',\n [\n (0, True),\n (0, False),\n t_within,\n (0, True),\n (4, True),\n (0, False),\n (4, False),\n ],\n [{KC.N1}, {KC.N1, KC.N4}, {KC.N4}, {}],\n )\n\n def test_holdtap(self):\n keyboard = self.keyboard\n t_within = self.t_within\n t_after = self.t_after\n\n keyboard.test('Tap x1', [(1, True), (1, False)], [{KC.N1}, {}])\n\n keyboard.test(\n 'Tap x2',\n [(1, True), (1, False), t_within, (1, True), (1, False)],\n [{KC.N2}, {}],\n )\n\n keyboard.test('Hold', [(1, True), t_after, (1, False)], [{KC.A}, {}])\n\n keyboard.test(\n 'Tap-Hold',\n [(1, True), (1, False), t_within, (1, True), 2 * t_after, (1, False)],\n [{KC.B}, {}],\n )\n\n keyboard.test(\n 'Tap-Hold interrupted',\n [\n (1, True),\n (1, False),\n t_within,\n (1, True),\n t_within,\n (4, True),\n (4, False),\n (1, False),\n ],\n [{KC.B}, {KC.B, KC.N4}, {KC.B}, {}],\n )\n\n def test_multi_tapdance(self):\n keyboard = self.keyboard\n t_within = self.t_within\n t_after = self.t_after\n\n keyboard.test(\n '',\n [(0, True), (0, False), t_within, (1, True), (1, False)],\n [{KC.N0}, {}, {KC.N1}, {}],\n )\n\n keyboard.test(\n '',\n [\n (0, True),\n (0, False),\n (0, True),\n (2, True),\n (2, False),\n t_after,\n (0, False),\n ],\n [{KC.N1}, {KC.N1, KC.X}, {KC.N1}, {}],\n )\n\n keyboard.test(\n '',\n [\n (2, True),\n (2, False),\n (2, True),\n (0, True),\n (0, False),\n t_after,\n (2, False),\n ],\n [{KC.X}, {KC.X, KC.N0}, {KC.X}, {}],\n )\n\n def test_layer(self):\n keyboard = self.keyboard\n t_within = self.t_within\n t_after = self.t_after\n\n keyboard.test(\n '',\n [(3, True), (3, False), t_within, (1, True), (1, False)],\n [{KC.N3}, {}, {KC.N1}, {}],\n )\n\n keyboard.test(\n '', [(3, True), t_after, (1, True), (1, False), (3, False)], [{KC.N8}, {}]\n )\n\n keyboard.test(\n '', [(3, True), t_after, (1, True), (3, False), (1, False)], [{KC.N8}, {}]\n )\n\n keyboard.test(\n '',\n [\n (1, True),\n (3, True),\n t_after,\n (1, False),\n (4, True),\n (4, False),\n (3, False),\n (1, False),\n ],\n [{KC.A}, {}, {KC.N5}, {}],\n )\n\n def test_holdtap_repeat(self):\n keyboard = self.keyboard\n t_after = self.t_after\n\n keyboard.test(\n 'HoldTap repeat',\n [\n (2, True),\n (2, False),\n (2, True),\n t_after,\n (4, True),\n (2, False),\n (4, False),\n ],\n [{KC.X}, {KC.X, KC.N4}, {KC.N4}, {}],\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"KMKfw/kmk_firmware","sub_path":"tests/test_tapdance.py","file_name":"test_tapdance.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","stars":1113,"dataset":"github-code","pt":"54"} +{"seq_id":"64036906","text":"from collections import deque\n\n\ndef part1(n, search):\n recipies = [3, 7]\n a, b = 0, 1\n\n di = 30\n for i in range(n+10):\n tot = str(recipies[a] + recipies[b])\n recipies += [int(d) for d in tot]\n a = (a + 1 + recipies[a]) % len(recipies)\n b = (b + 1 + recipies[b]) % len(recipies)\n\n if i % (di-6) == 0:\n if search in ''.join(str(x) for x in recipies[-di:]):\n print('found')\n break\n\n # print(recipies)\n s = ''.join(str(x) for x in recipies[max(i-di,0):])\n # print(s,i,di)\n if search in s:\n idx = s.index(search)\n print(idx+max(i-di,0))\n else:\n print('no match')\n\n\n# with open('d14.txt','rt') as f:\n# input = f.read().strip()\ninput = 440231\n\nimport time\nt = time.perf_counter()\n\n# for inp in [\"59414\"]:\n# part1(2330, inp)\n\npart1(int(5e7), str(input))\n\n\nprint(f'{time.perf_counter() - t}s')\n","repo_name":"bj0/aoc","sub_path":"aoc/2018/d14.py","file_name":"d14.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37308240917","text":"from django.db import models\nfrom django.utils.text import slugify\n\n\nclass Book(models.Model):\n title = models.CharField(max_length=80)\n author = models.CharField(max_length=80)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n category = models.ForeignKey(\n 'Category', on_delete=models.DO_NOTHING, null=True, blank=True)\n\n def __str__(self):\n return f\"Book title: {self.title} author: {self.author}\"\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=40)\n slug = models.SlugField(null=False, unique=True)\n\n class Meta:\n verbose_name_plural = \"Categories\"\n\n def __str__(self):\n return f'{self.name}'\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)\n return super().save(*args, **kwargs)\n","repo_name":"amygori/django-freeshelf","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13508091940","text":"import os\nimport sys\nimport cv2\nimport numpy as np\nimport pickle\nimport tempfile\nimport logging\nfrom imantics import Mask\nimport torch\nimport json\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.utils.visualizer import ColorMode\nfrom detectron2.config import get_cfg\nfrom detectron2.engine.defaults import DefaultPredictor\nfrom .core import AbstractHandler, WorldObject\nfrom droidlet.shared_data_structs import RGBDepth\nfrom ..detectron.detector.utils import get_predictor\nfrom ..detectron.detector.visualizer import LocobotVisualizer\nfrom droidlet.perception.robot.perception_util import get_color_tag\n\n\nlvis_yaml = \"configs/mask_rcnn_R_101_FPN_1x.yaml\"\ndetector_weights = \"model_999.pth\"\ndefault_json_dir = (\n os.path.abspath(os.path.dirname(__file__)) + \"/../../../../annotation_data/model/v0\"\n)\nprops_filename = \"props.json\"\nthings_filename = \"things.json\"\n\nfile_root = os.path.dirname(os.path.realpath(__file__))\n\n\nclass ObjectDetection(AbstractHandler):\n \"\"\"Class for object detector.\n\n We use a modified Mask R-CNN with an additional head that predicts object properties.\n\n Args:\n model_data_dir (string): path to the model directory\n \"\"\"\n\n def __init__(self, model_data_dir):\n self.detector = DetectorBase(model_data_dir, verbose=self.verbose)\n\n def __call__(self, rgb_depth):\n \"\"\"the inference logic for the handler lives here.\n\n Args:\n rgb_depth (RGBDepth): the current input frame to run inference on.\n\n Returns:\n detections (list[Detections]): list of detections found\n \"\"\"\n if self.verbose > 0:\n logging.info(\"In DetectionHandler ... \")\n rgb = rgb_depth.rgb\n p_list, predictions = self.detector(rgb)\n detections = []\n for x in p_list:\n if self.verbose > 0:\n logging.info(\"Detected {} objects\".format(len(p_list)))\n # create a detection object for each instance\n detections.append(\n Detection(\n rgb_depth,\n x[\"class_label\"],\n x[\"prop_label\"],\n x[\"mask\"],\n x[\"bbox\"],\n center=x[\"center\"],\n )\n )\n if os.getenv(\"DEBUG_DRAW\") == \"True\":\n self._debug_draw(rgb_depth, predictions)\n return detections\n\n def _debug_draw(self, rgb_depth, predictions):\n self.detector.draw(rgb_depth.rgb, predictions)\n\n\nclass DetectorBase:\n \"\"\"Class that encapsulates low_level logic for the detector, like loading the model and parsing inference outputs.\"\"\"\n\n def __init__(self, model_data_dir, verbose=1):\n self.verbose = verbose\n files = os.listdir(model_data_dir)\n props_file = (\n os.path.join(model_data_dir, props_filename)\n if props_filename in files\n else os.path.join(default_json_dir, props_filename)\n )\n things_file = (\n os.path.join(model_data_dir, things_filename)\n if things_filename in files\n else os.path.join(default_json_dir, things_filename)\n )\n\n with open(props_file, \"r\") as h:\n self.properties = json.load(h)[\"items\"]\n if self.verbose > 0:\n logging.info(\"{} properties\".format(len(self.properties)))\n with open(things_file, \"r\") as h:\n self.things = json.load(h)[\"items\"]\n if self.verbose > 0:\n logging.info(\"{} things\".format(len(self.things)))\n\n weights = os.path.join(model_data_dir, detector_weights)\n self.dataset_name = \"dummy_dataset\"\n self.predictor = get_predictor(\n lvis_yaml, weights, self.dataset_name, self.properties, self.things\n )\n\n def __call__(self, img):\n predictions = self.predictor(img)\n oi = predictions[\"instances\"].to(\"cpu\").get_fields()\n centers = oi[\n \"pred_boxes\"\n ].get_centers() # N*2 https://detectron2.readthedocs.io/_modules/detectron2/structures/boxes.html\n p_list = []\n num_instances = len(oi[\"pred_classes\"])\n if self.verbose > 0:\n logging.info(\"{} instances detected.\".format(num_instances))\n for x in range(num_instances):\n p = {}\n # class label\n pred_class = oi[\"pred_classes\"][x]\n pred_label = self.things[pred_class]\n p[\"class_label\"] = pred_label\n\n # properties\n pred_prop = oi[\"pred_props\"][x]\n prop_label = []\n for k in pred_prop:\n prop_label.append(self.properties[k])\n p[\"prop_label\"] = prop_label\n\n # mask\n p[\"mask\"] = oi[\"pred_masks\"][x]\n p[\"bbox\"] = oi[\"pred_boxes\"][x]\n p[\"center\"] = (int(centers[x][0].item()), int(centers[x][1].item()))\n p_list.append(p)\n\n return p_list, predictions\n\n def draw(self, im, predictions, save_to_disk=False):\n v = LocobotVisualizer(\n im[:, :, ::-1],\n metadata=MetadataCatalog.get(self.dataset_name),\n scale=0.8,\n instance_mode=ColorMode.IMAGE_BW, # remove the colors of unsegmented pixels\n )\n v = v.draw_instance_predictions(predictions[\"instances\"].to(\"cpu\"))\n cv2.imshow(\"Insight\", v.get_image()[:, :, ::-1])\n cv2.waitKey(3)\n\n if save_to_disk:\n tmp = os.path.join(file_root, \"tmp\")\n if not os.path.isdir(tmp):\n os.mkdir(tmp)\n tf = tempfile.NamedTemporaryFile(\n prefix=\"detector_\", suffix=\".jpg\", dir=tmp, delete=False\n )\n logging.info(\"Saving to {}\".format(tf.name))\n cv2.imwrite(tf.name, v.get_image()[:, :, ::-1])\n\n\nclass Detection(WorldObject):\n \"\"\"Instantiation of the WorldObject that is used by the detector.\"\"\"\n\n def __init__(\n self,\n rgb_depth: RGBDepth,\n class_label,\n properties,\n mask,\n bbox,\n face_tag=None,\n center=None,\n xyz=None,\n ):\n WorldObject.__init__(\n self, label=class_label, center=center, rgb_depth=rgb_depth, mask=mask, xyz=xyz\n )\n self.bbox = bbox\n self.tracked_features = []\n self.properties = properties\n self.color = get_color_tag(rgb_depth.get_pillow_image(), self.center)\n self.facial_rec_tag = face_tag\n self.feature_repr = None\n\n def _maybe_bbox(self, bbox, mask):\n if hasattr(bbox, \"tensor\"):\n bbox = bbox.tensor.tolist()[0]\n if bbox is None:\n nz = mask.nonzero()\n y, x = nz[0], nz[1]\n bbox = [int(x[0]), int(y[0]), int(x[-1]), int(y[-1])]\n return bbox\n\n def to_struct(self):\n bbox = self._maybe_bbox(self.bbox, self.mask)\n mask_arr = []\n if self.mask is not None and isinstance(self.mask, np.ndarray):\n mask_arr = self.mask\n if self.mask is not None and isinstance(self.mask, torch.Tensor):\n mask_arr = self.mask.cpu().detach().numpy()\n mask_points_nd = Mask(mask_arr).polygons().points\n mask_points = list(map(lambda x: x.tolist(), mask_points_nd))\n return {\n \"id\": self.eid,\n \"xyz\": list(self.xyz),\n \"bbox\": bbox,\n \"label\": self.label,\n \"properties\": \"\\n \".join(self.properties if self.properties is not None else \"\"),\n \"mask\": mask_points,\n }\n\n def get_masked_img(self):\n rgb = self.rgb_depth.rgb\n h, w, _ = rgb.shape\n bbox = self._maybe_bbox(self.bbox, self.mask)\n x1, y1, x2, y2 = [int(x) for x in bbox]\n mask = np.zeros((h, w), np.uint8)\n cv2.rectangle(mask, (x1, y1), (x2, y2), 255, -1, 4)\n im = cv2.bitwise_and(rgb, rgb, mask=mask)\n # logging.debug(\"Calculating feature repr for {}\".format(self.label))\n return im\n","repo_name":"facebookresearch/fairo","sub_path":"droidlet/perception/robot/handlers/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","stars":826,"dataset":"github-code","pt":"54"} +{"seq_id":"1218907711","text":"import re\nfrom typing import (\n Callable,\n Optional,\n Sequence,\n)\n\nfrom dl_attrs_model_mapper_doc_tools.render_units import DocLink\n\n\nINLINE_LINK_RE = re.compile(r\"\\[([^]]+)]\\(([^)]+)\\)\")\n\n\ndef process_links(\n txt: str,\n link_processor: Optional[Callable[[DocLink], Optional[DocLink]]] = None,\n) -> Sequence[str | DocLink]:\n \"\"\"\n Tokenize `txt` into 2 types of tokens: plain text & links.\n Links can be processed by `link_processor`.\n If `link_processor` returns `None`\n - link will be treated as plain text and will not be extracted as dedicated token.\n If `link_processor` is `None` all links will be extracted as-is.\n \"\"\"\n ret: list[str | DocLink] = []\n effective_link_processor: Callable[[DocLink], Optional[DocLink]] = (\n link_processor if link_processor is not None else lambda x: x\n )\n pos = 0\n\n for matcher in INLINE_LINK_RE.finditer(txt):\n span_start = matcher.span()[0]\n span_end = matcher.span()[1]\n\n doc_link = DocLink(text=matcher.group(1), href=matcher.group(2))\n processed_link = effective_link_processor(doc_link)\n\n if processed_link is not None:\n if pos != span_start:\n ret.append(txt[pos:span_start])\n ret.append(processed_link)\n pos = span_end\n\n if pos < len(txt):\n ret.append(txt[pos : len(txt)])\n\n return ret\n","repo_name":"datalens-tech/datalens-backend","sub_path":"lib/dl_attrs_model_mapper_doc_tools/dl_attrs_model_mapper_doc_tools/md_link_extractor.py","file_name":"md_link_extractor.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"34817645108","text":"\"\"\"Routines to pad convolution to control the centering of the kernel.\n\n\"\"\"\nimport numpy\ndef pad(x, klength, origin=0, value=0.):\n \"\"\"Pad an array that is going to be convolved.\n\n Set origin to zero to have only positive delays. Set origin to\n klength-1 to have entirely negative delays. Set origin to the\n center of the kernel for centering on zero delay.\n\n Parameters\n ----------\n klength: scalar\n Length of the kernel.\n origin: scalar\n The index of the kernel value you want at the origin (default 0).\n value: scalar \n Value to pad the array with (default 0). \n\n \"\"\"\n if (origin > klength-1):\n raise ValueError(\"Origin can't be > klength-1.\")\n elif (origin < 0):\n raise ValueError(\"Origin can't be < 0.\")\n return numpy.hstack((numpy.zeros(klength-1-origin) + value,\n x,\n numpy.zeros(origin) + value))\n\ndef padded_convolve(x, kernel, origin=0, value=0.0):\n \"\"\"Convolve an array with padding.\n \n See docstring for pad for more info.\n \"\"\"\n return numpy.convolve(pad(x, len(kernel), origin, value), \n kernel, \n mode='valid')\n\n","repo_name":"roban/EnrichPy","sub_path":"enrichpy/padconvolve.py","file_name":"padconvolve.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3823400548","text":"from .lib.nmap_consumer import nmap_import\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass NmapConsumer:\n def __init__(self, nerds):\n self.data = nerds\n\n def process(self):\n try:\n nmap_import(self.data)\n except:\n name = self.data.get(\"host\", {}).get(\"name\")\n logger.exception(\"Unable to process %s\" % name)\n raise\n\n","repo_name":"NORDUnet/ni","sub_path":"src/niweb/apps/nerds/nerds.py","file_name":"nerds.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"24119195132","text":"\"\"\"\nGeneral and reusable exploratory data analysis functionality\n\"\"\"\nimport pandas as pd \n\ndef missing_perc(df):\n \"\"\"\n Return a dataframe with percentage of missing values\n in each column in a sorted order\n\n Args:\n df: dataframe\n Returns:\n dataframe with percentage of missing values in each column\n \"\"\"\n\n missing = df.isnull().sum()\n missing = missing[missing > 0] * 100 / df.shape[0]\n missing.sort_values(inplace=True)\n return pd.DataFrame(missing, columns=['missing_perc'])\n\n\ndef unique_val(df, outputcol='n_unique_vals'):\n \"\"\"\n Count the number of distinct values in each column in a sorted order\n\n Args:\n df: dataframe\n col: string, column name of the output dataframe\n Returns:\n dataframe with the number of distinct values in each column\n \"\"\"\n\n columns = df.columns\n undict = {}\n for col in columns:\n undict[col] = df[col].astype(str).nunique()\n undf = pd.DataFrame.from_dict(undict,\n 'index',\n columns=[outputcol])\n undf.sort_values(by=[outputcol], inplace=True)\n return undf\n\n\ndef impute_missing(df, column, method='median', val=0):\n \"\"\"\n Return a dataframe where the target column missing values have been imputed\n\n Args:\n df: dataframe\n colum: string, target column with missing values to be filled\n method: string, imputation method (mean, median or numeric)\n val: integer, value to be set for numeric method imputation\n Returns:\n Dataframe where the target column missing values have been imputed\n \"\"\"\n\n if method == 'median':\n df[column] = df[column].fillna(df[column].median())\n elif method == 'mean':\n df[column] = df[column].fillna(df[column].mean())\n elif method == 'mode':\n df[column] = df[column].fillna(df[column].mode()[0])\n elif method == 'numeric':\n df[column] = df[column].fillna(val)\n else:\n raise RuntimeError(\n 'Enter a valid imputation method,'\n 'median, mean, mode and numeric are supported.'\n ' Missing values were not replaced.')\n return df\n\n\n\n","repo_name":"rdekou/Bike_Sharing","sub_path":"functions/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18057059424","text":"#!/usr/bin/env python\r\n\"\"\" \r\nL ist die Anzahl der Ebenen des Labyrinths.\r\nR ist die Länge des Labyrinths, und C die Breite.\r\n\"\"\"\r\n\r\nfrom __future__ import annotations\r\nimport heapq\r\nimport collections\r\nfrom typing import Protocol, Dict, List, Iterator, Tuple, TypeVar, Optional\r\nT = TypeVar('T')\r\n\r\n\r\nGridLocation = Tuple[int, int, int]\r\nLocation = Tuple[int, int, int]\r\n\r\nclass SquareGrid: # Struct of the diagram and the relevant functions\r\n def __init__(self, width: int, height: int, depth: int):\r\n self.width = width\r\n self.height = height\r\n self.depth = depth\r\n self.walls: List[GridLocation] = []\r\n\r\n def in_bounds(self, id: GridLocation) -> bool:\r\n (x, y, z) = id\r\n return 0 <= x < self.width and 0 <= y < self.height and 0 <= z < self.depth\r\n\r\n def passable(self, id: GridLocation) -> bool:\r\n if id in self.walls:\r\n return False\r\n else:\r\n return True\r\n\r\n def neighbors(self, id: GridLocation) -> Iterator[GridLocation]:\r\n (x, y, z) = id\r\n neighbors = [(x+1, y, z), (x-1, y, z), (x, y-1, z),\r\n (x, y+1, z), (x, y, z+1), (x, y, z-1)]\r\n # E W N S UP DOWN\r\n\r\n results1 = filter(self.passable, neighbors)\r\n results = filter(self.in_bounds, results1)\r\n return results\r\n\r\nclass PriorityQueue: # The Priority of all elements\r\n def __init__(self):\r\n self.elements: List[Tuple[float, T]] = []\r\n\r\n def empty(self) -> bool:\r\n return not self.elements\r\n\r\n def put(self, item: T, priority: float):\r\n heapq.heappush(self.elements, (priority, item))\r\n\r\n def get(self) -> T:\r\n return heapq.heappop(self.elements)[1]\r\n\r\ndef heuristic(a: GridLocation, b: GridLocation) -> float: #Count the mannhaton distance\r\n (x1, y1, z1) = a\r\n (x2, y2, z2) = b\r\n return abs(x1 - x2) + abs(y1 - y2) + abs(z1 - z2)\r\n\r\ndef a_star_search(graph, start, goal): #A* Search\r\n frontier = PriorityQueue()\r\n frontier.put(start, 0)\r\n came_from: Dict[Location, Optional[Location]] = {}\r\n cost_so_far: Dict[Location, float] = {}\r\n came_from[start] = None\r\n cost_so_far[start] = 0\r\n\r\n while not frontier.empty():\r\n current = frontier.get()\r\n\r\n if current == goal:\r\n break\r\n\r\n for next in graph.neighbors(current):\r\n new_cost = cost_so_far[current] + 1 # graph.cost(current, next) no weight\r\n if next not in cost_so_far or new_cost < cost_so_far[next]:\r\n cost_so_far[next] = new_cost\r\n priority = new_cost + heuristic(next, goal)\r\n frontier.put(next, priority)\r\n came_from[next] = current\r\n\r\n return came_from, cost_so_far\r\n\r\ndef split(word): # split an input line into characters\r\n return [char for char in word]\r\n\r\n\r\n# main loop function till three zeros are input\r\nwhile True:\r\n L = int(input(\"Enter number of levels: \")) \r\n R = int(input(\"Enter number of length: \"))\r\n C = int(input(\"Enter number of width: \"))\r\n\r\n if L==R==C==0:\r\n break\r\n\r\n arr = [[[0 for col in range(C)]for row in range(R)] for x in range(L)]\r\n DIAGRAM1_WALLS = []\r\n start = (0, 0, 0)\r\n goal = (0, 0, 0)\r\n\r\n for i in range(L):\r\n print(\"\\nPlease input the \" + str(i) + \"th Level: \")\r\n j = 0\r\n while j < R:\r\n line = input()\r\n if line: \r\n arr[i][j][:] = split(line)\r\n j += 1\r\n\r\n \r\n\r\n\r\n for i in range(L):\r\n for j in range(R):\r\n for k in range(C):\r\n if arr[i][j][k] == 'S':\r\n start = (k, j, i) \r\n elif arr[i][j][k] == 'E':\r\n goal = (k, j, i)\r\n elif arr[i][j][k] == '#':\r\n DIAGRAM1_WALLS.append((k, j, i))\r\n\r\n g = SquareGrid(C, R, L)\r\n g.walls = DIAGRAM1_WALLS\r\n came_from, cost_so_far = a_star_search(g, start, goal)\r\n\r\n try:\r\n print(\"Entkommen in \" + str(cost_so_far[goal]) + \" Minute(n)!\")\r\n except:\r\n print(\"Gefangen :-(\")\r\n\r\n\r\n# TODO: check the existence of S and E; Limit the input of each column;","repo_name":"swang1543/CodingTest","sub_path":"AStar.py","file_name":"AStar.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20750540278","text":"# CUT 30s to WAV\nimport os\nimport subprocess\nfrom sys import path\nimport time\nimport datetime\nimport json\nfrom Data.MetaData.genre import genre\n\nlistGenre = genre\ncurrentGenre = \"\"\n\npathGenre= r\"../../Data/MetaData/trainGenre/\"\npathSongJSON = r\"../../Data/MetaData/song/\"\n\ninput = r\"../../Data/MetaData/streaming/\"\noutFol = r\"../../Data/DataSet/DatasetWav30s/\"\n\nbash_data_relative_path = r\"../\"\nlistSong ={}\ntotalSong = 0\n\ntotal = 0\ncount = 0\nlineBash = \"\"\nbashAll = \"\"\nindex = 100\n\n\n\n\ndef writeBash(data):\n global index\n f = open(\"../../Data/BashCut/BashCUT30s/bash\" +str(index)+\".sh\", 'a+')\n f.write(data)\n f.close()\n\n\n\ndef CUT(songId,file_path, output):\n #global total\n #dst_wav = output + \".wav\"\n #sound = AudioSegment.from_mp3(file_path)\n global count\n global lineBash\n global bashAll\n global total\n global index\n try:\n duration = listSong[songId]\n except:\n duration = 0\n start = 15\n part = 1\n while start+30 < duration:\n begin_cut = str(datetime.timedelta(0, start))\n end_cut = str(datetime.timedelta(0, start+30))\n #print(begin_cut+\" \"+end_cut)\n if not os.path.exists(output+\"_\"+str(part)+\".wav\"):\n # print(output+\"_\"+str(part)+\".wav\")\n lineBash = lineBash + (\"ffmpeg -i \"+ bash_data_relative_path +file_path+\" -ss \"+begin_cut +\n \" -to \"+end_cut+\" \"+ bash_data_relative_path +output+\"_\"+str(part)+\".wav -loglevel error & \")\n start += 30\n part += 1\n total += 1\n count += 1\n if count >= 100:\n print(total)\n bashAll += \"! \"+lineBash + \"\\n\"\n lineBash = \"\"\n count = 0\n if total >= 800:\n bashAll += \"! \"+lineBash + \"\\n\"\n writeBash(bashAll)\n bashAll = \"\"\n total = 0\n lineBash = \"\"\n count = 0\n index += 1\n\n\n\ndef dumpSong(songPath):\n global totalSong\n with open(songPath, encoding=\"utf8\") as fsong:\n line = fsong.readline()\n try:\n if totalSong <=3000:\n obj = json.loads(line)\n listSong[obj['encodeId']] = obj['duration']\n totalSong +=1\n except:\n pass\n\ndef dumpGenre():\n\n global currentGenre\n\n for f in os.listdir(pathGenre):\n\n fileName = os.path.join(pathGenre, f)\n currentGenre = f[:-4]\n\n outPath = os.path.join(outFol) + f[:-4]\n\n if not os.path.exists(outPath):\n os.makedirs(outPath)\n\n with open(fileName, encoding=\"utf8\") as fGenre:\n lines = fGenre.readlines()\n\n for line in lines:\n print(line)\n songObj = json.loads(line)\n songPath = pathSongJSON + songObj['encodeId'] + \".txt\"\n dumpSong(songPath)\n\n inFile = input + songObj['encodeId'] + \".mp3\"\n outFile = outPath + \"/\" + songObj['encodeId']\n\n if os.path.exists(inFile):\n CUT(songObj['encodeId'], inFile, outFile)\n\n\n\n\nif __name__ ==\"__main__\":\n dumpGenre()\n #for fol in os.listdir(input):\n # pathFol = os.path.join(input, fol)\n # for filename in os.listdir(os.path.join(input, fol)):\n # fullPath = os.path.join(pathFol, filename)\n # # print(fullPath)\n # CUT(filename[:-4],fullPath, os.path.join(outFol, fol, filename))\n\n #bashAll += \"! \"+lineBash + \"\\n\"\n #writeBash(bashAll)","repo_name":"huutri148/MusicGenreClassification","sub_path":"src/Preprocess/GenerateBashCutAudio.py","file_name":"GenerateBashCutAudio.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"8338363927","text":"#Peça para o usuario um numero inteiro e peça uma base de conversão\n\nnum = int(input('\\nDigite um numero inteiro: '))\nbase = int(input('Escolha entre binario(1), octal(2) e hexadecimal(3): '))\nprint('')\nhexcount = 10\ndone = False\n\nnumalf = {\n 10 : 'A',\n 11 : 'B',\n 12 : 'C',\n 13 : 'D',\n 14 : 'E',\n 15 : 'F',\n}\n\nif(base == 1):\n binary = []\n while(done == False):\n\n rest = num % 2\n num = num // 2\n binary.append(rest)\n\n if(num == 0):\n result = binary[::-1]\n \n done = True\n\nif(base == 2):\n octal = []\n while(done == False):\n\n rest = num % 8\n num = num // 8\n octal.append(rest)\n\n if(num == 0):\n result = octal[::-1]\n \n done = True\n\nif(base == 3):\n hexadecimal = []\n while(done == False):\n\n rest = num % 16\n num = num // 16\n hexadecimal.append(rest)\n\n if(num == 0):\n while(hexcount <= 15):\n count = [i for i, x in enumerate(hexadecimal) if x == hexcount]\n if(count != []):\n for x,y in zip(count, numalf[hexcount]):\n hexadecimal[x] = y\n hexcount = hexcount + 1\n result = hexadecimal[::-1]\n done = True\n \n\nprint(''.join(str(e) for e in result))","repo_name":"henriquekirchheck/Curso-em-Video-Python","sub_path":"desafio/desafio037.py","file_name":"desafio037.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74340702561","text":"# import the necessary packages\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nfrom imutils.video import VideoStream\nimport numpy as np\nimport imutils\nimport time\nimport cv2\nimport os\n\n\ndef detect_and_predict_mask(frame, faceNet, maskNet):\n\t\n\t(h, w) = frame.shape[:2]\n\tblob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n\t\n\tfaceNet.setInput(blob)\n\tdetections = faceNet.forward()\n\t#print(detections.shape)\n\n\n\tfaces = [] # use to store multiple faces\n\tlocs = [] # use to store locations of each face\n\tpreds = [] # for prediction data of each face\n\n\t\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the confidence (i.e., probability) associated with\n\t\t# the detection\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t# filter out weak detections by ensuring the confidence is\n\t\t# greater than the minimum confidence\n\t\tif confidence > 0.5:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t\t# the object\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# ensure the bounding boxes fall within the dimensions of\n\t\t\t# the frame\n\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n\t\t\t\n\t\t\tface = frame[startY:endY, startX:endX]\n\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\t\tface = cv2.resize(face, (224, 224)) # using small image size otherwise its slowing the process\n\t\t\tface = img_to_array(face)\n\t\t\tface = preprocess_input(face)\n\t\t\t#print(face)\n\n\t\t\t# for multiple faces\n\t\t\tfaces.append(face)\n\t\t\tlocs.append((startX, startY, endX, endY))\n\n\t# check if at least one face was detected\n\tif len(faces) > 0:\n\t\t# check all faces in bulk using numpy\n\t\tfaces = np.array(faces, dtype=\"float32\")\n\t\tpreds = maskNet.predict(faces, batch_size=32)\n\n\t# return a 2-tuple of the face locations and their corresponding predection\n\treturn (locs, preds)\n\n# load our serialized face detector model from disk\nprototxtPath = r\"face_detector\\deploy.prototxt\"\nweightsPath = r\"face_detector\\res10_300x300_ssd_iter_140000.caffemodel\"\nfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# load the face mask detector model from disk\nmaskNet = load_model(\"mask_detector.model\")\n\n# initialize the video stream\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\n\n# loop over the frames from the video stream\nwhile True:\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=800) \n\n\t# run face dector on each frame of video\n\t(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)\n\n\t# loop over the detected face locations and their corresponding predection\n\tfor (box, pred) in zip(locs, preds):\n\t\t\n\t\t(startX, startY, endX, endY) = box\n\t\t(mask, withoutMask) = pred\n\n\t\tlabel = \"Mask\"\n\t\tcolor = (0, 255, 0)\n\t\tif mask < withoutMask:\n\t\t\tlabel = \"No Mask\"\n\t\t\tcolor = (0, 0, 255)\n\t\t\t\n\n\t\t# include the probability in the label\n\t\tlabel = \"{}: {}%\".format(label, max(mask, withoutMask) * 100)\n\n\t\t# display the label and bounding box rectangle on the output frame\n\t\tcv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\n\n\t# show the output frame\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# do a bit of cleanup\ncv2.destroyAllWindows()\nvs.stop()","repo_name":"imraheel/Mask-Detection-System-MDS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11003863411","text":"\nnum_tests = int(input())\n\n\ndef better_player():\n\tA_R, A_W, A_C = list(map(int, input().split()))\n\tB_R, B_W, B_C = list(map(int, input().split()))\n\n\tA_points = 0\n\tB_points = 0\n\n\t# for each case compute points of each player\n\n\tif(A_R > B_R):\n\t\tA_points = A_points + 1\n\telse:\n\t\tB_points = B_points + 1\n\n\tif(A_W > B_W):\n\t\tA_points = A_points + 1\n\telse:\n\t\tB_points = B_points + 1\n\n\tif(A_C > B_C):\n\t\tA_points = A_points + 1\n\telse:\n\t\tB_points = B_points + 1\n\n\tif(A_points > B_points):\n\t\tprint(\"A\")\n\telse:\n\t\tprint(\"B\")\n\n\nfor each_test in range(num_tests):\n\tbetter_player()\n","repo_name":"hkbiet/CodeChef","sub_path":"CRICRANK.py","file_name":"CRICRANK.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33164944140","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport unittest\n\nimport cilt\n\nclass TestTransversalFilter(unittest.TestCase):\n\n def test_general(self):\n size = 5\n filt = cilt.FilterTransversal(np.zeros(size))\n\n for n in filt.getNumerator():\n self.assertEqual(n, 0.)\n\n b = np.zeros(size)\n\n for i in range(size):\n b[i] = i+1\n\n filt.setNumerator(b)\n b_filter = filt.getNumerator()\n\n for i in range(size):\n self.assertEqual(b[i], b_filter[i])\n\n b = np.zeros(size+2)\n filt.setCoeffs(b)\n self.assertEqual(filt.getOrder(), size+2)\n b_filter = filt.getNumerator()\n\n for n in filt.getNumerator():\n self.assertEqual(n, 0.)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kornerc/cilt","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33004821831","text":"# coding: utf-8\nimport argparse\nimport os\nimport pandas as pd\n#from matplotlib import pyplot as plt\n#import codecs\n#import chardet\n#import io\n#import cvthtml\n#import numpy\nimport influxdb\nimport pymysql\nimport subprocess\nimport re\nimport json\n\n#설정 파일 로드\nwith open(\"../lite_config.json\", \"r\") as json_file:\n config = json.load(json_file)\n\ndef dbconn():\n conn = pymysql.connect(user=config['db_uid'], password=config['db_pwd'], host=config['db_server'], port=config['db_port'], database=config['db_database'], connect_timeout=28800, charset='utf8')\n conn.query('SET GLOBAL connect_timeout=28800')\n conn.query('SET GLOBAL max_allowed_packet=167772160')\n return conn\n \ndef analysis(row, userid, run_pid, run_log, cmds):\n try:\n state = \"분석중\"\n updatesql = \"insert tb_algoinfo_history (id, userid, run_pid, state, run_log, run_stime) values (%s, %s, %s, %s, %s, now())\"\n conn_sub = dbconn()\n cur_sub = conn_sub.cursor(pymysql.cursors.DictCursor)\n cur_sub.execute(updatesql,(row['id'], userid, run_pid, state, run_log))\n conn_sub.commit()\n cur_sub.close()\n conn_sub.close()\n\n result = subprocess.run(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=3600, text=True)\n if result.returncode != 0:\n res = \"error:\"+str(result.returncode)+\":\"+result.stderr\n state = \"분석불가\"\n else:\n res = \"success:\"+result.stdout\n if len(result.stdout) == 0 :\n res += result.stdout\n state = \"분석완료\"\n except Exception as e:\n res = \"error:\"+str(e)\n state = \"분석불가\"\n \n updatesql = \"update tb_algoinfo_history set run_result=%s, state=%s, run_etime=now() where id=%s and run_pid=%s and userid=%s\"\n lines = res.split(\"\\n\")\n print(\"[\",len(res),len(lines),\"]\")\n\n if len(lines)>500:\n lines = lines[:500]\n res = \"\\n\".join(lines)\n \n conn_sub = dbconn()\n cur_sub = conn_sub.cursor(pymysql.cursors.DictCursor)\n cur_sub.execute(updatesql,(res, state, row['id'], run_pid, userid))\n conn_sub.commit()\n\n return res\n\ndef autoanalysis(ftype, userid, q, server, port, uid, pwd, db):\n run_pid = str(os.getpid())\n conn = dbconn()\n cur = conn.cursor(pymysql.cursors.DictCursor)\n rcnt = cur.execute( \"select * from tb_algoinfo order by id\" )\n rows = cur.fetchall()\n run_log = \"start PID=\"+run_pid+\"\\n\"\n \n updatesql = \"update tb_algoinfo set run_log='%s' \"%(run_log)\n cur.execute(updatesql)\n conn.commit()\n \n #data 준비 python 실행\n workfile = \"%s/upload/%s/autoanalysis.csv\" % (config['base_path'], userid)\n modelpath = \"%s/model/%s/\" % (config['base_path'], userid)\n cmds = [config['python_exec'], \"%s/fetchdata.py\"%(config['python_path']), \"-ftype\", ftype, \"-output_path\", workfile, \"-q\", q, \"-server\", server, \"-port\", port, \"-uid\", uid, \"-pwd\", pwd, \"-db\", db]\n result = subprocess.run(cmds, stdout=subprocess.PIPE, text=True)\n if result.returncode != 0:\n run_log += \"에러: 데이터 준비중 오류발생\\n\"\n updatesql = \"update tb_algoinfo set run_log='%s' \"%(run_log)\n cur.execute(updatesql)\n conn.commit()\n conn.close()\n return\n print(\"데이터 준비 ok......\")\n \n input_df = pd.read_csv(workfile, encoding=\"UTF-8\")\n columns = input_df.columns\n dep_column = columns[-1] # 종속변수 마지막 컬럼\n ind_columns = columns[0:-1] # 독립변수 마지막을 제외한 컬럼\n last_column_index = str(len(columns)-1)\n \n print(\"변수:%s\"%(\",\".join(columns)))\n print(\"독립변수:%s\"%(\",\".join(ind_columns)))\n print(\"종속변수:%s\"%(dep_column))\n \n # 기초전처리...\n preprocess_cmd = [\"%s/data_process.sh\"%(config['python_path']), workfile, workfile+\".csv\", \"Y\", \",\".join(columns), \",\".join(columns), \"\", \"UTF-8\"]\n \n# print(preprocess_cmd)\n # null제거 전체컬럼 이상치제거, 수치형변환 안함,\n\n result = subprocess.run(preprocess_cmd, stdout=subprocess.PIPE, text=True)\n print(\"기초전처리전처리 ok......\")\n if result.returncode != 0:\n run_log += \"에러: 기초 전처리에서 오류발생\\n\"\n updatesql = \"update tb_algoinfo set run_log='%s' \"%(run_log)\n cur.execute(updatesql)\n conn.commit()\n conn.close()\n return \n \n print(result.stdout)\n \n workfile = workfile+\".csv\"\n \n input_df = pd.read_csv(workfile, encoding=\"UTF-8\")\n columns = input_df.columns\n dep_column = columns[-1] # 종속변수 마지막 컬럼\n ind_columns = columns[0:-1] # 독립변수 마지막을 제외한 컬럼\n last_column_index = str(len(columns)-1)\n \n print(\"변수:%s\"%(\",\".join(columns)))\n print(\"독립변수:%s\"%(\",\".join(ind_columns)))\n print(\"종속변수:%s\"%(dep_column))\n params = \"input_file:%s;dep_column:%s;ind_columns:%s;columns:%s;last_column_index:%s;model_path:%s;\"%(workfile,dep_column,\",\".join(ind_columns),\",\".join(columns),last_column_index,modelpath)\n run_log += \"파라미터- %s\\n\"%(params)\n\n# print(preprocess_cmd)\n \n print(\"알고리즘 처리중......\")\n \n updatesql = \"update tb_algoinfo set run_log=%s,state=%s \"\n cur.execute(updatesql,(run_log,'분석대기'))\n conn.commit()\n \n for i,row in enumerate(rows):\n print(\"분석중: %d/%d %s algorithm\"%(i+1, rcnt, row['name']))\n run_log += \"분석중: %d/%d %s algorithm\\n\"%(i+1, rcnt, row['name'])\n cmd = row['cmd']\n updatesql = \"update tb_algoinfo set run_log=%s\"\n cur.execute(updatesql,(run_log))\n conn.commit()\n \n if cmd is not None:\n updatesql = \"update tb_algoinfo set run_stime=now(), state=%s, run_etime=NULL where id=%s\"\n cur.execute(updatesql,('분석중', row['id']))\n conn.commit()\n cur.close()\n conn.close()\n cmds = cmd.split(' ')\n bracket = re.compile(\"\\{(.*)\\}\")\n \n for i, v in enumerate(cmds):\n cmds[i] = re.sub('\\{model_path.*\\}', modelpath, cmds[i])\n cmds[i] = re.sub('\\{input_file.*\\}', workfile, cmds[i])\n cmds[i] = re.sub('\\{dep_column.*\\}', dep_column, cmds[i])\n cmds[i] = re.sub('\\{ind_columns.*\\}', \",\".join(ind_columns), cmds[i])\n cmds[i] = re.sub('\\{columns.*\\}', \",\".join(columns), cmds[i])\n cmds[i] = re.sub('\\{last_column_index.*\\}', last_column_index, cmds[i])\n \n \n # 그외 파라미터는 defval로 처리\n match = bracket.search(cmds[i])\n if not match is None:\n var = match.groups()[0]\n ps = var.split('/')\n vitem = {'vname':'','vtype':'','defval':''}\n if len(ps)>=1:\n vitem['vname'] = ps[0]\n if len(ps)>=2:\n vitem['vtype'] = ps[1]\n if len(ps)>=3:\n vitem['defval'] = ps[2]\n cmds[i] = re.sub('\\{'+vitem['vname']+'.*\\}',vitem['defval'], cmds[i])\n \n# cmds[i] = cmds[i].replace(\"{model_path}\", modelpath).replace(\"{input_file}\", workfile).replace(\"{dep_column}\", dep_column).replace(\"{ind_columns}\", \",\".join(ind_columns)).replace(\"{columns}\", \",\".join(columns)).replace(\"{last_column_index}\", last_column_index)\n if cmds[i]=='\"\"' or cmds[i]==\"''\":\n cmds[i] = \"\"\n\n state = \"분석완료\"\n res = analysis(row, userid, run_pid, run_log, cmds)\n \n updatesql = \"update tb_algoinfo set run_result=%s, state=%s, run_etime=now() where id=%s\"\n \n '''\n try:\n result = subprocess.run(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=3600, text=True)\n if result.returncode != 0:\n res = \"error:\"+str(result.returncode)+\":\"+result.stderr\n state = \"분석불가\"\n else:\n res = \"success:\"+result.stdout\n if len(result.stdout) == 0 :\n res += result.stdout\n except Exception as e:\n res = \"error:\"+str(e)\n state = \"분석불가\"\n updatesql = \"update tb_algoinfo set run_result=%s, state=%s, run_etime=now() where id=%s\"\n \n lines = res.split(\"\\n\")\n print(\"[\",len(res),len(lines),\"]\")\n\n if len(lines)>500:\n lines = lines[:500]\n res = \"\\n\".join(lines)\n '''\n conn = dbconn()\n cur = conn.cursor(pymysql.cursors.DictCursor)\n cur.execute(updatesql,(res, state, row['id']))\n conn.commit()\n # shell 호출 \n \n run_log += \"처리완료:\"+str(rcnt)+\" algorithms\\n\"\n updatesql = \"update tb_algoinfo set run_log='%s' \"%(run_log)\n cur.execute(updatesql)\n conn.commit()\n cur.close()\n conn.close()\n print(\"처리완료:\"+str(rcnt)+\" algorithms\")\n\n'''\npython fetchdata.py -ftype 'influxdb' -q 'select * from TS0001 where time>=now()-600s' -server 'onsite-monitor.xip.kr' -port '8086' -uid 'whaleshark' -pwd 'whaleshark' -db 'facility'\n'''\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='argparser')\n# parser.add_argument('--fun', action='store_true', help='Statistics help')\n# subparsers = parser.add_subparsers(help='sub-command help', dest='fun')\n \n # 명령을 위한 파서를 만듭니다\n parser.add_argument('-ftype', type=str, help='ftype help', required=True)\n parser.add_argument('-userid', type=str, help='userid help', required=True)\n parser.add_argument('-q', type=str, help='q help', required=True) # 쿼리 또는 path 정보\n parser.add_argument('-server', type=str, help='server ip help', required=False, default='') # 서버 ip\n parser.add_argument('-port', type=str, help='server port help', required=False, default='') # 서버 port\n parser.add_argument('-uid', type=str, help='uid help', required=False, default='') # 서버 uid\n parser.add_argument('-pwd', type=str, help='pwd help', required=False, default='') # 서버 pwd\n parser.add_argument('-db', type=str, help='pwd help', required=False, default='') # 서버 db\n args = parser.parse_args()\n ftype = args.ftype\n userid = args.userid\n q = args.q\n server = args.server\n port = args.port\n uid = args.uid\n pwd = args.pwd\n db = args.db\n\n autoanalysis(ftype, userid, q, server, port, uid, pwd, db)\n","repo_name":"dataignitelab/WhaleShark_Lite2","sub_path":"lite2/shell/autoanalysis.py","file_name":"autoanalysis.py","file_ext":"py","file_size_in_byte":10698,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"33523257520","text":"import paho.mqtt.client as mqtt\r\nimport time\r\n\r\ndef on_connect(client, userdata, flags, rc):\r\n print(\"Connected with result code \"+str(rc))\r\n client.subscribe(\"test\")\r\n\r\ndef on_message(client, userdata, msg):\r\n print(msg.topic+\" \"+str(msg.payload))\r\n\r\nclient = mqtt.Client()\r\nclient.on_connect = on_connect\r\nclient.on_message = on_message\r\nclient.connect(\"localhost\", 1883, 60)\r\n\r\nflag = 1\r\nwhile(True):\r\n\tif(flag):\r\n\t\tclient.publish(\"test\", \"OFF\")\r\n\t\tflag = 0\r\n\telse:\r\n\t\tclient.publish(\"test\", \"ON\")\r\n\t\tflag = 1\r\n\ttime.sleep(2)\r\n","repo_name":"kyjcoope/MQTT","sub_path":"MQTT_publish.py","file_name":"MQTT_publish.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2430037105","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\n#print(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\nfrom collections import defaultdict\n\n\ndef run_solution():\n print('Preparing arrays...')\n f = open(\"../input/train.csv\", \"r\")\n f.readline()\n total = 0\n\n client_product_arr = defaultdict(int)\n client_product_arr_count = defaultdict(int)\n\n # Calc counts\n avg_target = 0.0\n while 1:\n line = f.readline().strip()\n total += 1\n\n if total % 10000000 == 0:\n print('Read {} lines...'.format(total))\n\n if line == '':\n break\n\n arr = line.split(\",\")\n week = int(arr[0])\n agency = arr[1]\n canal_id = arr[2]\n ruta_sak = arr[3]\n cliente_id = int(arr[4])\n producto_id = int(arr[5])\n vuh = arr[6]\n vh = arr[7]\n dup = arr[8]\n dp = arr[9]\n target = int(arr[10])\n \nrun_solution()\n\n# Any results you write to the current directory are saved as output.\n\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/grupo-bimbo-inventory-demand/JungwooChae/dontknowwhat2do.py","file_name":"dontknowwhat2do.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"35540865188","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nfrom casacore.tables import taql\nfrom losoto.h5parm import h5parm\nimport numpy as np\nfrom cursesmenu import SelectionMenu\nimport sys\nfrom subprocess import call\nfrom glob import glob\n\nfrom matplotlib import pyplot as plt\n\ndef cursechoose(lst, title = \"Make a selection\"):\n nr = SelectionMenu.get_selection(lst, title = title)\n if nr == len(lst):\n \"Exiting\"\n sys.exit(0)\n else:\n return lst[nr], nr\n\nfiles = glob('*.h5')\nfilename, __ = cursechoose(files, title = \"Choose a file\")\n\nH = h5parm(filename)\n\nsolsetnames = H.getSolsets().keys()\n\nif len(solsetnames)>1:\n solsetname, __ = cursechoose(solsetnames, title = \"Select a solset\")\nelse:\n solsetname = solsetnames[0]\n\nparmkeys = H.getSoltabs(solsetname).keys()\nparmkey, __ = cursechoose(parmkeys, title = \"Select a parameter\")\n\nsoltab = H.getSoltabs(solsetname)[parmkey]\n\nantnames = list(soltab.ant)\nantname, antnr = cursechoose(antnames, title = \"Select a station\")\n\nvals = soltab.val[0, antnr, 0, :]\nvals0 = soltab.val[0, 0, 0, :]\n\n#plt.ylim([-0.5,0.5])\nplt.plot(vals-vals0, '.', zorder=10)\nplt.ylim([-0.125,0.125])\n#plt.ylim([-3.15,3.15])\nplt.xlim([0,len(vals)])\nplt.ylabel(parmkey)\nplt.xlabel(\"Time slot\")\nplt.title(\"Antenna \" + antname)\n\n# Make figure transparent, plot white\nax = plt.gca()\nax.set_zorder(2)\nfig= plt.gcf()\nfig.patch.set_alpha(0.0)\n\n# Add flag bars\n#ax2 = ax.twinx()\n#flags=taql(\"select gntrue(FLAG) as nflag from /data/scratch/dijkema/tec/allsb/all_TC00.MS where mscal.ant1name()=='\"+antname+\"' group by TIME order by TIME\").getcol('nflag')\n#ax2.bar(np.arange(len(flags)), flags, 1.,color='r',edgecolor='r')\n#ax2.set_zorder(1)\n#ax2.set_ylim(4500,5000)\n#ax.patch.set_alpha(0.0)\nplt.xlim([0,len(vals)])\n\n# Fit aspect ratio\nfig.set_size_inches((8.7,5.5))\n\nplt.savefig('myplot.pdf')\n\ncall('~/.iterm2/imgcat myplot.pdf', shell=True)\n","repo_name":"tammojan/lofarscripts","sub_path":"ploth5parm.py","file_name":"ploth5parm.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31144097785","text":"\"\"\"\nThis script will convert your Json data ti a CSV file.\nIt takes .json a file as input and provides .csv the file as outout.\n\nInstall json library: pip install json\n\n\"\"\"\n\nimport json\n\nif __name__ == '__main__':\n try:\n with open('input.json', 'r') as f:\n data = json.load(f.read())\n\n output = ','.join([*data[0]])\n for obj in data:\n output += f'\\n{obj[\"Name\"]}, {obj[\"age\"]},{obj[\"birthyear\"]}'\n\n with open('output.csv', 'w') as f:\n f.write(output)\n except Exception as ex:\n print(f'Error: {str(ex)}')\n ","repo_name":"madmad89/useful_application","sub_path":"50_useful_python_scripts/01_convert_JSON_to_CSV.py","file_name":"01_convert_JSON_to_CSV.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21003556368","text":"import numpy as np\nimport cv2\nimport glob\nimport os\n\n# STEP A.1 CALIBRATION\n\n# Number of corners\nnx = 9\nny = 6\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((ny*nx,3), np.float32)\nobjp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('./camera_cal/calibration*.jpg')\n\n# Step through the list and search for chessboard corners\nfor fname in images:\n\tprint('Processing image', fname)\n\timg = cv2.imread(fname)\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\t# Find the chessboard corners\n\tret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\n\n\t# If found, add object points, image points\n\tif ret == True:\n\t\tobjpoints.append(objp)\n\t\timgpoints.append(corners)\n\n\t\t# Draw and display the corners\n\t\tcv2.drawChessboardCorners(img, (nx,ny), corners, ret)\n\t\tfilename = os.path.basename(fname)\n\t\twrite_name = './output_images/' + filename\n\t\tcv2.imwrite(write_name, img)\n\nimport pickle\n\n# We take the image calibration1 as \nimg = cv2.imread('./camera_cal/calibration3.jpg')\nimg_size = (img.shape[1], img.shape[0])\n\n# Do camera calibration given object points and image points\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)\n\n# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\ndist_pickle = {}\ndist_pickle[\"mtx\"] = mtx\ndist_pickle[\"dist\"] = dist\npickle.dump(dist_pickle, open(\"./camera_cal/wide_dist_pickle.p\", \"wb\" ))\n\n# Undistort all calibration images\nfor fname in images:\n\tprint('Processing undistortion of image', fname)\n\timg = cv2.imread(fname)\n\tdst = cv2.undistort(img, mtx, dist, None, mtx)\n\tfilename = os.path.basename(fname)\n\twrite_name = './output_images/undist_' + filename\n\tcv2.imwrite(write_name,dst)","repo_name":"AnsgarNell/CarND-Advanced-Lane-Lines","sub_path":"camera_calibration.py","file_name":"camera_calibration.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43006058910","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass ClothesRecognizer:\n\n def __init__(self):\n fashion_mnist = keras.datasets.fashion_mnist\n (self.train_images, self.train_labels), (self.test_images, self.test_labels) = fashion_mnist.load_data()\n\n self.class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n self.train_images = self.train_images / 255.0\n\n self.test_images = self.test_images / 255.0\n\n self.model = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n ])\n\n self.model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n self.model.fit(self.train_images, self.train_labels, epochs=10)\n\n test_loss, test_acc = self.model.evaluate(self.test_images, self.test_labels, verbose=2)\n print('\\nTest accuracy:', test_acc)\n\n self.predictions = self.model.predict(self.test_images)\n\n def plot_image(self, i):\n\n predictions_array, true_label, img = self.predictions[i], self.test_labels[i], self.test_images[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(self.class_names[predicted_label],\n 100 * np.max(predictions_array),\n self.class_names[true_label]),\n color=color)\n\n def plot_value_array(self, i):\n predictions_array, true_label = self.predictions[i], self.test_labels[i]\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n\nif __name__ == '__main__':\n cl = ClothesRecognizer()\n","repo_name":"rummens1337/neural-network-tensorflow","sub_path":"src/clothes_recognizer/ClothesRecognizer.py","file_name":"ClothesRecognizer.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27208175866","text":"import os\n\nimport numpy as np\nimport plotly.graph_objs as go\nfrom multiprocess.pool import Pool\nfrom scipy.stats import wilcoxon\nfrom tqdm import tqdm\n\nfrom libs import compute_lib\nfrom libs.config_lib import CPUS_TO_USE\nfrom libs.simulations import load, filtering, config, compute, paths\nfrom plotting import save\n\nOFFSET_X = 0\nOFFSET_Y = 0\nTIME_POINTS = 50\nDERIVATIVES = [0, 1, 2]\nDERIVATIVES_TEXT = ['D', 'D\\'', 'D\\'\\'']\n\n\ndef compute_simulations_fiber_densities(_simulations):\n _arguments = []\n for _simulation in _simulations:\n for _direction in ['left', 'right', 'up', 'down']:\n _arguments.append({\n 'simulation': _simulation,\n 'length_x': config.QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER\n if _direction in ['up', 'down'] else config.QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER,\n 'length_y': config.QUANTIFICATION_WINDOW_WIDTH_IN_CELL_DIAMETER\n if _direction in ['up', 'down'] else config.QUANTIFICATION_WINDOW_HEIGHT_IN_CELL_DIAMETER,\n 'offset_x': OFFSET_Y if _direction in ['up', 'down'] else OFFSET_X,\n 'offset_y': OFFSET_X if _direction in ['up', 'down'] else OFFSET_Y,\n 'cell_id': 'cell',\n 'direction': _direction,\n 'time_points': TIME_POINTS\n })\n\n _fiber_densities = {}\n with Pool(CPUS_TO_USE) as _p:\n for _keys, _value in tqdm(\n _p.imap_unordered(compute.window_fiber_density_by_time, _arguments),\n total=len(_arguments), desc='Computing windows & fiber densities'):\n _fiber_densities[(_keys['simulation'], _keys['direction'])] = _value\n _p.close()\n _p.join()\n\n return _fiber_densities\n\n\ndef main():\n _simulations = load.structured()\n _simulations = filtering.by_time_points_amount(_simulations, _time_points=TIME_POINTS)\n _simulations = filtering.by_categories(\n _simulations,\n _is_single_cell=True,\n _is_heterogeneity=False,\n _is_low_connectivity=False,\n _is_causality=False,\n _is_dominant_passive=False,\n _is_fibrin=False\n )\n print('Total simulations:', len(_simulations))\n\n _fiber_densities = compute_simulations_fiber_densities(_simulations)\n\n _y_arrays = [[] for _i in DERIVATIVES]\n for _index_1 in tqdm(range(len(_simulations)), desc='Simulations loop'):\n _simulation_1 = _simulations[_index_1]\n _cell_1_fiber_densities = \\\n [_fiber_densities[(_simulation_1, _direction)] for _direction in ['left', 'right', 'up', 'down']]\n _cell_1_fiber_densities = np.mean(_cell_1_fiber_densities, axis=0)\n for _index_2 in range(_index_1 + 1, len(_simulations)):\n _simulation_2 = _simulations[_index_2]\n _cell_2_fiber_densities = \\\n [_fiber_densities[(_simulation_2, _direction)] for _direction in ['left', 'right', 'up', 'down']]\n _cell_2_fiber_densities = np.mean(_cell_2_fiber_densities, axis=0)\n for _derivative_index, _derivative in enumerate(DERIVATIVES):\n _y_arrays[_derivative_index].append(compute_lib.correlation(\n compute_lib.derivative(_cell_1_fiber_densities, _n=_derivative),\n compute_lib.derivative(_cell_2_fiber_densities, _n=_derivative)\n ))\n\n print('Total points:', len(_y_arrays[0]))\n print('Wilcoxon around the zero')\n for _y_array, _derivative in zip(_y_arrays, DERIVATIVES):\n print('Derivative:', _derivative, wilcoxon(_y_array))\n\n # plot\n _colors_array = config.colors(3)\n _fig = go.Figure(\n data=[\n go.Box(\n y=_y,\n name=_derivative,\n boxpoints='all',\n jitter=1,\n pointpos=0,\n line={\n 'width': 1\n },\n fillcolor='white',\n marker={\n 'size': 10,\n 'color': _color\n },\n opacity=0.7,\n showlegend=False\n ) for _y, _derivative, _color in zip(_y_arrays, DERIVATIVES_TEXT, _colors_array)\n ],\n layout={\n 'xaxis': {\n 'title': 'Fiber density derivative',\n 'zeroline': False\n },\n 'yaxis': {\n 'title': 'Correlation',\n 'range': [-1, 1],\n 'zeroline': False,\n 'tickmode': 'array',\n 'tickvals': [-1, -0.5, 0, 0.5, 1]\n }\n }\n )\n\n save.to_html(\n _fig=_fig,\n _path=os.path.join(paths.PLOTS, save.get_module_name()),\n _filename='plot'\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"assafna/cell-ecm-project","sub_path":"fiber_density/simulations/inner_correlation_by_derivatives_single_cells.py","file_name":"inner_correlation_by_derivatives_single_cells.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"41914408486","text":"from django.utils.safestring import mark_safe\nimport threading\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.core.mail import send_mail, BadHeaderError, EmailMessage\n\nclass EmailThread(threading.Thread):\n def __init__(self, subject, body, to_email, content_subtype=None, *args, **kwargs):\n self.subject = subject\n self.to_email = [to_email,]\n self.body = body\n self.content_subtype = content_subtype\n threading.Thread.__init__(self)\n\n def run(self, *args, **kwargs):\n msg = EmailMessage(\n self.subject,\n self.body,\n settings.DEFAULT_FROM_EMAIL,\n self.to_email,\n *args,\n **kwargs\n )\n if self.content_subtype is not None:\n msg.content_subtype = self.content_subtype\n try:\n msg.send()\n print(\"Email send: \" + self.to_email[0])\n except BadHeaderError:\n print(\"Invalid header found\")\n\n\nclass EmailService:\n @classmethod\n def send_otp_to_user(cls,user):\n EmailThread(\n subject=\"Please verify your email\",\n body=f'Hello your OTP is {user.otp}.\\n\\n You OTP is valid for {settings.OTP_VALIDITY_MINS} mins',\n to_email=user.email\n ).start()\n ","repo_name":"kiransbaliga/Glome-Django","sub_path":"accounts/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2326622808","text":"\"\"\"Auto Install python package.\"\"\"\nfrom os import system\nimport sys\n\nsystem(\"echo > auto_install_python_pack.log\")\nPACKAGE = \"pip\"\n\nwith open('requirements.txt', 'r', encoding='utf8') as file:\n print(sys.version)\n for line in file.readlines():\n if line != \"\\n\" and line[:1] != \"#\":\n if sys.version[:3] == \"3.8\" and line[-13:-1] == \" # python3.8\":\n PACKAGE = line[:-13]\n elif line[-13:-4] != \" # python\":\n PACKAGE = line[:-1]\n else:\n PACKAGE = \"pip\"\n\n print(PACKAGE)\n system(\n \"python3 -m pip install \" +\n PACKAGE + \" --upgrade --no-warn-script-location\" +\n \" >> auto_install_python_pack.log\")\n","repo_name":"TSVS-Special-Topic-Group/Development-Environment-Build","sub_path":"src/auto_install_python_pack.py","file_name":"auto_install_python_pack.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13702691925","text":"#coding:utf-8\nimport struct\nfrom collections import namedtuple\nfrom StringIO import StringIO\n\nfrom errors import ReachStreamEnd\nfrom core.data import DataTypeMixIn, PrettyReaderMixIn, StringIOProxy\nfrom core.readers import StreamReader\n\nimport logging\n\nlogger = logging.getLogger('binder.chips')\n\n\nHead = namedtuple('FLVHead', 'prefix version flag audio_flag video_flag data_offset')\n#Tag = namedtuple('Tag', 'seq type size timestamp ext stream_id data pre_size')\n\nAUDIO_TAG = 8 \nVIDEO_TAG = 9\nMETA_DATA = 18\n\n\nclass Tag(object):\n \n TYPE_MAP = {\n 9: 'video tag',\n 8: 'audio tag',\n 18: 'meta data',\n }\n \n def __init__(self, seq, t, size, timestamp, ext, stream_id, data, pre_size):\n self._seq = seq\n self._type = t\n self._size = size\n self._timestamp = timestamp\n self._ext = ext\n self._stream_id = stream_id\n self._data = data\n self._pre_size = pre_size\n\n def __repr__(self):\n return \"\" % (self.TYPE_MAP[self._type], self._size, self._ext, self._timestamp)\n\n def pack(self):\n fmt = '>B3B3BB3B'\n data = (self._type,) + conv24(self._size) + conv24(self._timestamp) + (self._ext,) + conv24(self._stream_id)\n ret = struct.pack(fmt, *data) \n if self.data:\n ret += self.data\n ret += struct.pack('>I', self._pre_size)\n return ret\n\n @property\n def size(self):\n return self._size\n\n @property\n def type(self):\n return self._type\n\n @property\n def seq(self):\n return self._seq\n\n @property\n def timestamp(self):\n return (self._timestamp | self._ext << 24)\n\n @timestamp.setter\n def timestamp(self, value):\n self._ext = (value >> 24 & 0xff)\n self._timestamp = (value & 0xffffff)\n\n @property\n def stream_id(self):\n return self._stream_id\n\n @property\n def data(self):\n return self._data\n\n @property\n def pre_size(self):\n return self._pre_size\n\n @property\n def is_keyframe(self):\n if self._type != 9: return False\n r, = struct.unpack_from('>B', self._data)\n return (r >> 4) == 1\n \n\ndef conv24(int24):\n high = (int24 >> 16)\n medium = (int24 >> 8) & 0xff\n low = int24 & 0xff\n return (high, medium, low)\n\ndef pack_head(a):\n \"\"\"\n Head: prefix version flag data_offset empty\n \"\"\"\n ret = struct.pack('>3sBBII', a.prefix, a.version, a.flag, a.data_offset, 0)\n return ret\n\ndef pack_tag(a):\n \"\"\"\n Tag: seq type size timestamp ext stream_id data pre_size\n \"\"\"\n return a.pack()\n\ndef flv_pack(a):\n return pack_head(a) if isinstance(a, Head) else pack_tag(a)\n\n\nclass RtmpStreamReader(StreamReader):\n\n FILE_FORMAT_FLV = 0 #flv file format\n FILE_FORMAT_F4V = 1 #f4v file format\n \n def __init__(self, stream):\n self.seq = 0\n self.file_format = RtmpStreamReader.FILE_FORMAT_FLV\n super(RtmpStreamReader, self).__init__(stream)\n\n def readHead(self):\n prefix = 'F' + self.readRaw(2)\n version = self.read8()\n flag = self.read8()\n audio_flag = flag & 0b100\n video_flag = flag & 0b001\n data_offset = self.read32()\n self.read32() #ignore first 0 \n return Head(prefix, version, flag, audio_flag, video_flag, data_offset)\n \n def readTag(self, seq, tag_type, flag):\n tagType = tag_type\n dataSize = self.read24()\n timestamp = self.read24()\n timestampExtended = self.read8()\n streamID = self.read24()\n data = None\n if flag:\n data = self.readRaw(dataSize)\n else:\n self.readRaw(dataSize)\n size = self.read32()\n return Tag(seq, tagType, dataSize, timestamp, timestampExtended, streamID, data, size)\n \n def next(self, flag=True):\n try:\n c = self.readRaw(1)\n if c == 'F':\n return self.readHead()\n self.seq += 1\n return self.readTag(self.seq, ord(c), flag)\n except:\n raise StopIteration\n\n def __iter__(self):\n return self\n\n\n__all__ = ['Head', 'Tag', 'conv24', 'pack_head', 'pack_tag', 'flv_pack', 'RtmpStreamReader', 'AUDIO_TAG', 'VIDEO_TAG', 'META_DATA']\n","repo_name":"pingansdaddy/newtempo","sub_path":"src/readers.py","file_name":"readers.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70170466402","text":"'''\r\nCreated on 15.02.2017\r\n\r\n@author: Yingxiong\r\n'''\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom cbfe.scratch.fe_nls_solver_incre1 import MATSEval, FETS1D52ULRH, TStepper, TLoop\r\n\r\n\r\nfrom ibvpy.api import BCDof\r\n\r\nts = TStepper(n_e_x=100.)\r\nn_dofs = ts.domain.n_dofs\r\ntl = TLoop(ts=ts)\r\n\r\n# print n_dofs\r\n#\r\n# tl.ts.mats_eval.slip = [0., 100.]\r\n# tl.ts.mats_eval.bond = [0., 100.]\r\n\r\n\r\n# bs = ts.mats_eval.b_s_law\r\n#\r\n# bs1 = ts.mats_eval.G\r\n#\r\n# print bs(-4.)\r\n# print bs1([-4., -5.])\r\n#\r\n#\r\n# print dfsdfsdfsdf\r\n\r\n\r\ndef predict(L_x, u, slip, bond):\r\n\r\n tl.ts.bc_list = [BCDof(var='u', dof=n_dofs - 2, value=0.0),\r\n BCDof(var='u', dof=n_dofs - 1, value=u)]\r\n tl.ts.L_x = L_x\r\n tl.ts.mats_eval.slip = slip.tolist()\r\n tl.ts.mats_eval.bond = bond.tolist()\r\n U_record, F_record, sig_record = tl.eval()\r\n return U_record, F_record, sig_record\r\n\r\n# slip = np.array([0., 0.5, 1., 1.5, 2.0, 2.5, 3.0])\r\n# bond = np.array([0., 25., 40., 53, 65, 50, 115])\r\n\r\n# slip = np.array([0., 1e-8, 100.])\r\n# bond = np.array([0., 40., 40.])\r\n\r\n# slip = np.array([0., 0.4, 4.5])\r\n# bond = np.array([0., 50., 50])\r\n\r\n\r\nslip = np.array([0.0, 0.15000000000000002, 0.40000000000000002, 2.0, 4.0])\r\nbond = np.array([0.0, 18.000000000170711, 47.937059019961012,\r\n 50.021030274276697, 50.652307876262164])\r\n\r\n\r\nslip2 = np.array([0., 0.5, 1.5, 2.5, 3.5, 4.5])\r\nbond2 = np.array([0., 60., 40, 60, 40, 60])\r\n\r\n\r\ndef nl_bond(slip):\r\n x = slip\r\n y = np.zeros_like(x)\r\n y[x < 1.05] = 0.1 * x[x < 1.05] - 0.05 * x[x < 1.05] ** 2\r\n y[x > 1.05] = 0.1 * 1.05 - 0.05 * \\\r\n 1.05 ** 2 - 0.005 * (x[x > 1.05] - 1.05)\r\n return 1200. * y\r\n\r\n# slip2 = np.linspace(0, 3, 100)\r\n# bond2 = nl_bond(slip2)\r\n# plt.plot(x, nl_bond(x))\r\n# plt.ylim(0, 80)\r\n# plt.show()\r\n\r\n\r\nplt.subplot(221)\r\n# plt.plot(slip, bond)\r\nplt.plot(slip2, bond2)\r\nplt.xlabel('slip')\r\nplt.ylabel('bond')\r\n\r\nU1, F1, sig1 = predict(700, 4, slip, bond)\r\nU2, F2, sig2 = predict(700, 4, slip2, bond2)\r\n# U2, F2, sig2 = predict(700, 4.5, slip2, bond2)\r\n\r\n# U3, F3, sig3 = predict(1000, 3., slip2, bond2)\r\n\r\nplt.subplot(222)\r\n# plt.plot(U1[:, n_dofs - 1], F1[:, n_dofs - 1], lw=2)\r\nplt.plot(U2[:, n_dofs - 1], F2[:, n_dofs - 1])\r\nnp.savetxt('D:\\\\1.txt', np.vstack((U2[:, n_dofs - 1], F2[:, n_dofs - 1])))\r\n# plt.plot(U3[:, n_dofs - 1], F3[:, n_dofs - 1])\r\nplt.xlabel('displacement')\r\nplt.ylabel('pull-out force')\r\n\r\nplt.subplot(223)\r\nX = np.linspace(0, ts.L_x, ts.n_e_x + 1)\r\nu1_node = np.reshape(U1[-1, :], (-1, 2)).T\r\nu2_node = np.reshape(U2[-1, :], (-1, 2)).T\r\n# u3_node = np.reshape(U3[-1, :], (-1, 2)).T\r\n\r\n# plt.plot(X, u1_node[1] - u1_node[0])\r\n# plt.plot(X, u2_node[1])\r\n# plt.plot(X, u2_node[0])\r\nplt.plot(X, u2_node[1] - u2_node[0])\r\n# plt.plot(X, u3_node[1] - u3_node[0])\r\nplt.ylim(0, 6)\r\n\r\nplt.xlabel('x')\r\nplt.ylabel('slip')\r\n\r\nplt.subplot(224)\r\nX_ip = np.repeat(X, 2)[1:-1]\r\n# plt.plot(X_ip, sig1[-1, :])\r\nplt.plot(X_ip, sig2[-1, :])\r\n# plt.plot(X_ip, sig3[-1, :])\r\nplt.xlabel('x')\r\nplt.ylabel('shear flow')\r\n\r\n\r\nplt.show()\r\n\r\n\r\n# slip2 = u2_node[1] - u2_node[0]\r\n#\r\n# print slip2\r\n#\r\n# print np.argmin(np.abs(slip2 - 3.5))\r\n# x2_slip2 = X[np.argmin(np.abs(slip2 - 3.5))]\r\n# print x2_slip2\r\n# #\r\n# print X_ip\r\n# #\r\n# print len(X_ip)\r\n# print len(slip2)\r\n# #\r\n# plt.plot(X, slip2)\r\n# plt.show()\r\n#\r\n#\r\n# x = np.linspace(0, 700, 2000)\r\n# y = np.interp(x, X_ip, sig1[-1, :])\r\n#\r\n#\r\n# print 'F_integral', np.trapz(y, x)\r\n# print 'F', F1[-1][-1]\r\n#\r\n# print 'slip2', slip2[np.argmin(np.abs(slip2 - 3.5))]\r\n#\r\n# x2 = np.linspace(0, x2_slip2, 2000)\r\n# y2 = np.interp(x2, X_ip, sig2[-1, :])\r\n#\r\n# print 'F2_integral', np.trapz(y2, x2)\r\n","repo_name":"liyingxiong/cbfe","sub_path":"cbfe/plot/parametric_study_bilinear_bond_slip.py","file_name":"parametric_study_bilinear_bond_slip.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2238893773","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# author:ShidongDu time:2020/2/13\n'''\n给你一根长度为 n 的绳子,请把绳子剪成整数长度的 m 段(m、n 都是整数,n>1 并且 m≥1),\n每段绳子的长度记为 k[0],k[1]...k[m-1] 。请问 k[0]*k[1]*...*k[m-1] 可能的最大乘积是多少?\n例如,当绳子的长度是 8 时,我们把它剪成长度分别为 2、3、3 的三段,此时得到的最大乘积是 18。\n\n答案需要取模 1e9+7(1000000007),如计算初始结果为:1000000008,请返回 1。\n\n \n\n示例 1:\n\n输入: 2\n输出: 1\n解释: 2 = 1 + 1, 1 × 1 = 1\n示例 2:\n\n输入: 10\n输出: 36\n解释: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36\n \n\n提���:\n\n2 <= n <= 1000\n注意:本题与主站 343 题相同:https://leetcode-cn.com/problems/integer-break/\n'''\n# 思路:n < 5,用[1, 1, 2, 4]\n# n >= 5: res *= 3, n-= 3\nclass Solution:\n def cuttingRope(self, n: int) -> int:\n a = [0, 1, 1, 2, 4]\n if n < 0: return False\n if n <= 4: return a[n]\n res = 1\n while(n>=5):\n n -= 3\n res *= 3\n res *= n\n return res % (pow(10, 9)+7)\n\nsolution = Solution()\nres = solution.cuttingRope(-120)\nprint(res)","repo_name":"weiyuyan/LeetCode","sub_path":"剑指offer/面试题14- II. 剪绳子 II.py","file_name":"面试题14- II. 剪绳子 II.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"5300471485","text":"# -*- coding:utf-8 -*-\n# @Time :2020-03-10 21:52\n# @Email :876417305@qq.com\n# @Author :yanxia\n# @File :study_request.PY\nimport requests\n'''\n1、构造请求:请求方式、请求地址、请求参数\n2、发起请求\n3、返回响应\n4、判断响应码,响应体'''\n#注册接口\nparams={\"mobilephone\":\"15810447833\",\"pwd\":123456}\nresp=requests.get(\"http://test.lemonban.com/futureloan/mvc/api/member/register\",params=params)\nprint(resp.text)\n# 登录接口\nparams={\"mobilephone\":\"15810447878\",\"pwd\":123456}\nresp=requests.post(\"http://test.lemonban.com/futureloan/mvc/api/member/login\",data=params)\nprint(resp.text)\nprint(resp.cookies)\n#充值接口\nparams={\"mobilephone\":\"15810447878\",\"amount\":111}\nresp=requests.post(\"http://test.lemonban.com/futureloan/mvc/api/member/recharge\",\n data=params,cookies=resp.cookies)\nprint(resp.text)\nprint(resp.cookies)\n","repo_name":"wangyanxia-626/api-test","sub_path":"class_0310shizhan/study_request.py","file_name":"study_request.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41503142456","text":"from ovs.dal.datalist import DataList\nfrom ovs.dal.dataobject import DataObjectList\nfrom ovs.dal.hybrids.backend import Backend\n\n\nclass BackendList(object):\n \"\"\"\n This BackendList class contains various lists regarding to the Backend class\n \"\"\"\n\n @staticmethod\n def get_backends():\n \"\"\"\n Returns a list of all Backends\n \"\"\"\n backends = DataList({'object': Backend,\n 'data': DataList.select.GUIDS,\n 'query': {'type': DataList.where_operator.AND,\n 'items': []}}).data\n return DataObjectList(backends, Backend)\n","repo_name":"rootfs-analytics/openvstorage","sub_path":"ovs/dal/lists/backendlist.py","file_name":"backendlist.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9852815097","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"account\", views.account, name=\"account\"),\n path(\"add\", views.add, name=\"add\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"update/\", views.update, name=\"update\"),\n path(\"tag=\", views.view_tasks, name=\"view_tasks\"),\n path(\"bother_us\", views.bother_us, name=\"bother_us\"),\n path(\"date_picker\", views.date_picker, name=\"date_picker\"),\n path(\"logbook\", views.logbook, name=\"logbook\"),\n path(\"\", views.open_page, name=\"open_page\"),\n path(\"confirm\", views.confirm, name=\"confirm\"),\n path(\"decline\", views.decline, name=\"decline\"),\n path(\"check_off/\", views.check_off, name=\"check_off\"),\n path(\"quick_add\", views.quick_add, name=\"quick_add\"),\n path(\"edit_tag\", views.edit_tag, name=\"edit_tag\"),\n path(\"delete_tag\", views.delete_tag, name=\"delete_tag\"),\n path(\"delete_friend\", views.delete_friend, name=\"delete_friend\"),\n path(\"add_friend\", views.add_friend, name=\"add_friend\"),\n path(\"add_tag\", views.add_tag, name=\"add_tag\"),\n path(\"delete_account\", views.delete_account, name=\"delete_account\"),\n path(\"change_password\", views.change_password, name=\"change_password\"),\n path(\"not_today/\", views.not_today, name=\"not_today\"),\n path(\"change_theme\", views.change_theme, name=\"change_theme\"),\n path(\"default_theme\", views.default_theme, name=\"default_theme\"),\n path(\"privacy_policy\", views.privacy_policy, name=\"privacy_policy\")\n]","repo_name":"bgaudino/halfempty","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73648586082","text":"# Clase 32. Curso Píldoras Informáticas.\r\n\r\n# Control de Flujo. POO9.\r\n\r\n# Polimorfismo\r\n\r\nclass Car():\r\n\r\n\tdef desplaza(self):\r\n\r\n\t\tprint(\"El coche se desplaza sobre sus cuatro ruedas.\")\r\n\r\n\r\nclass Moto():\r\n\r\n\tdef desplaza(self):\r\n\r\n\t\tprint(\"La moto se desplaza sobre sus dos ruedas.\")\r\n\r\n\r\nclass Furgo():\r\n\r\n\tdef desplaza(self):\r\n\r\n\t\tprint(\"La furgo se desplaza sobre sus seis ruedas.\")\r\n\r\n\r\nprint(\"----Sin Polimorismo----\")\r\n\r\nVehiculoUno = Moto()\r\n\r\nVehiculoUno.desplaza()\r\n\r\nVehiculoDos = Furgo()\r\n\r\nVehiculoDos.desplaza()\r\n\r\nVehiculoTres = Car()\r\n\r\nVehiculoTres.desplaza()\r\n\r\n\r\nprint(\"----Con Polimorismo----\")\r\n\r\n\r\ndef desplazaPolif(k):\r\n\r\n\tk.desplaza()\r\n\r\nVehiculoCuatro = Furgo()\r\n\r\ndesplazaPolif(VehiculoCuatro)\r\n\r\nVehiculoCinco = Moto()\r\n\r\ndesplazaPolif(VehiculoCinco)\r\n","repo_name":"Angnar1997/PythonPI","sub_path":"Python_PI/Clase32.py","file_name":"Clase32.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25340882713","text":"# a linguagem python segue o padrão ANSI de cores\n#print('\\033[1;33;44mola\\033[m mundo')\na = 3\nb = 5\nprint('os valores são \\033[32m{}\\033[m e \\033[31m{}\\033[m'.format(a,b))\nnome = 'bruno'\n# usando o format para indicar a cor\nprint('prazer em te conhecer {}{}{}!!!'.format('\\033[4;34m', nome, '\\033[m'))\n# usando array de objetos para fazer cores\ncores = {\n 'limpa' : '\\033[m',\n 'amarelo' : '\\033[33m',\n 'azul' : '\\033[34m',\n 'pretobranco' : '\\033[7;30m'\n}\nprint('ola novamente {}{}{}!!!'.format(cores['amarelo'], nome, cores['limpa']))\nprint('ola novamente {}{}{}!!!'.format(cores['pretobranco'], nome, cores['limpa']))\nprint('ola novamente {}{}{}!!!'.format(cores['azul'], nome, cores['limpa']))","repo_name":"brunoalves2105/curso-de-python-basico---curso-em-video","sub_path":"testes/aula11.py","file_name":"aula11.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26173682564","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport common\nimport sys\nimport os\nfrom time import sleep\n\nclass FakeChef:\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.reporthost = \"testhost\"\n\t\tclass FakeSock:\n\t\t\tdef shutdown(self, opt):\n\t\t\t\treturn None\n\t\t\tdef close(self):\n\t\t\t\treturn None\n\t\tself.socket = FakeSock()\n\tdef send(self, message):\n\t\tprint(\"INFO;FAKELARM: %s\"%(message,))\n\ntry:\n\timport pysvt.herrchef\n\tpysvt.herrchef.HerrChef = FakeChef\nexcept:\n\tprint(\"No herrchef\")\n\nbasepath = os.path.realpath(\".\") + \"/\"\n\nif not \"reload\" in dir(__builtins__):\n\tfrom importlib import reload\n\ndef test(args):\n\tsys.argv = args\n\timport src\n\treload(src)\n\timport src.main\n\treload(src.main)\n\tfrom src.main import main\n\tdef fakeModules():\n\t\tmypath = basepath\n\t\treturn dict(\n\t\t\t\tfakedyn = dict(\n\t\t\t\t\tname = \"%(id)s\",\n\t\t\t\t\ttype = \"dynamic\",\n\t\t\t\t\tloop = \"True\",\n\t\t\t\t\tlogpath = basepath,\n\t\t\t\t\tpidpath = basepath,\n\t\t\t\t\tloopinterval = \"1\",\n\t\t\t\t\tlistcmd = \"echo 'd1\\nd2\\nd3 {\\\"logrestarts\\\":\\\"false\\\"}'\",\n\t\t\t\t\texeccmd = \"echo Output from %(id)s\",\n\t\t\t\t\t),\n\t\t\t\tprint_trace = dict(\n\t\t\t\t\tname = \"%(id)s\",\n\t\t\t\t\ttype = \"dynamic\",\n\t\t\t\t\tloop = \"True\",\n\t\t\t\t\tlogpath = basepath,\n\t\t\t\t\tpidpath = basepath,\n\t\t\t\t\tloopinterval = \"1\",\n\t\t\t\t\tlistcmd = \"echo 'st {\\\"sendtrace\\\":\\\"herrchef\\\"}\\nnotworking'\",\n\t\t\t\t\texeccmd = \"python %(mypath)s/print_trace.py\"%locals(),\n\t\t\t\t\t),\n\t\t\t\tfakesingle = dict(\n\t\t\t\t\tname = \"single1\",\n\t\t\t\t\ttype = \"single\",\n\t\t\t\t\tloop = \"True\",\n\t\t\t\t\tloopinterval = \"1.5\",\n\t\t\t\t\tlogpath = basepath,\n\t\t\t\t\tpidpath = basepath,\n\t\t\t\t\texeccmd = \"echo Command output: single\"\n\t\t\t\t\t),\n\t\t\t\t)\n\tsrc.main.getModules = fakeModules\n\tmain()\n\ndef runmany(args,names):\n\tfor name in names:\n\t\ttestargs = args+[name]\n\t\ttest(testargs)\ndef startstop(name, wait=0):\n\tif not isinstance(name, list):\n\t\tnames = [name]\n\telse:\n\t\tnames = name\n\trunmany([\"main\",\"start\"],names)\n\ttest([\"main\",\"status\"])\n\tsleep(wait)\n\trunmany([\"main\",\"stop\"],names)\n\ntest(sys.argv)\n#test([\"main\",\"status\"])\n#startstop(\"d1\")\n#startstop([\"d2\",\"d3\",\"single1\"],2)\n#startstop(\"st\",1)\n\n","repo_name":"svt/daemonctl","sub_path":"test/testmain.py","file_name":"testmain.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2479455095","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport math\nimport csv\nimport random\nfrom sklearn import linear_model, model_selection\nfrom sklearn.metrics import make_scorer, accuracy_score\nfrom sklearn.model_selection import cross_validate, train_test_split, GridSearchCV\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\n# print(os.listdir(\"../input/datafiles\"))\n\ndf = pd.read_csv(\"../input/datafiles/RegularSeasonCompactResults.csv\")\nprint(df)\n\n# Load the data\nfolder = '../input'\nseason_data = pd.read_csv(folder + '/datafiles/RegularSeasonDetailedResults.csv')\ntourney_data = pd.read_csv(folder + '/datafiles/NCAATourneyDetailedResults.csv')\nseeds = pd.read_csv(folder + '/datafiles/NCAATourneySeeds.csv')\nframes = [season_data, tourney_data]\nall_data = pd.concat(frames)\nstat_fields = ['score', 'fga', 'fgp', 'fga3', '3pp', 'ftp', 'or', 'dr',\n 'ast', 'to', 'stl', 'blk', 'pf']\nprediction_year = 2018\nbase_elo = 1600\nteam_elos = {}\nteam_stats = {}\nX = []\ny = []\nsubmission_data = []\ndef initialize_data():\n for i in range(1985, prediction_year+1):\n team_elos[i] = {}\n team_stats[i] = {}\ninitialize_data()\n\nall_data.head(10) # Gets the top 10 data\n\n#hello aman","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/mens-machine-learning-competition-2019/Aman Kishore/ncaa-drafter-ml.py","file_name":"ncaa-drafter-ml.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"38579015747","text":"import re\nfrom jamo import h2j, j2hcj\nimport ko_pron\nfrom g2pk2 import G2p\n\ng2pk = G2p()\n\n\n# This is a list of Korean classifiers preceded by pure Korean numerals.\n_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'\n\n# List of (hangul, hangul divided) pairs:\n_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [\n ('ㄳ', 'ㄱㅅ'),\n ('ㄵ', 'ㄴㅈ'),\n ('ㄶ', 'ㄴㅎ'),\n ('ㄺ', 'ㄹㄱ'),\n ('ㄻ', 'ㄹㅁ'),\n ('ㄼ', 'ㄹㅂ'),\n ('ㄽ', 'ㄹㅅ'),\n ('ㄾ', 'ㄹㅌ'),\n ('ㄿ', 'ㄹㅍ'),\n ('ㅀ', 'ㄹㅎ'),\n ('ㅄ', 'ㅂㅅ'),\n ('ㅘ', 'ㅗㅏ'),\n ('ㅙ', 'ㅗㅐ'),\n ('ㅚ', 'ㅗㅣ'),\n ('ㅝ', 'ㅜㅓ'),\n ('ㅞ', 'ㅜㅔ'),\n ('ㅟ', 'ㅜㅣ'),\n ('ㅢ', 'ㅡㅣ'),\n ('ㅑ', 'ㅣㅏ'),\n ('ㅒ', 'ㅣㅐ'),\n ('ㅕ', 'ㅣㅓ'),\n ('ㅖ', 'ㅣㅔ'),\n ('ㅛ', 'ㅣㅗ'),\n ('ㅠ', 'ㅣㅜ')\n]]\n\n# List of (Latin alphabet, hangul) pairs:\n_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [\n ('a', '에이'),\n ('b', '비'),\n ('c', '시'),\n ('d', '디'),\n ('e', '이'),\n ('f', '에프'),\n ('g', '지'),\n ('h', '에이치'),\n ('i', '아이'),\n ('j', '제이'),\n ('k', '케이'),\n ('l', '엘'),\n ('m', '엠'),\n ('n', '엔'),\n ('o', '오'),\n ('p', '피'),\n ('q', '큐'),\n ('r', '아르'),\n ('s', '에스'),\n ('t', '티'),\n ('u', '유'),\n ('v', '브이'),\n ('w', '더블유'),\n ('x', '엑스'),\n ('y', '와이'),\n ('z', '제트')\n]]\n\n# List of (ipa, lazy ipa) pairs:\n_ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [\n ('t͡ɕ','ʧ'),\n ('d͡ʑ','ʥ'),\n ('ɲ','n^'),\n ('ɕ','ʃ'),\n ('ʷ','w'),\n ('ɭ','l`'),\n ('ʎ','ɾ'),\n ('ɣ','ŋ'),\n ('ɰ','ɯ'),\n ('ʝ','j'),\n ('ʌ','ə'),\n ('ɡ','g'),\n ('\\u031a','#'),\n ('\\u0348','='),\n ('\\u031e',''),\n ('\\u0320',''),\n ('\\u0339','')\n]]\n\n\ndef latin_to_hangul(text):\n for regex, replacement in _latin_to_hangul:\n text = re.sub(regex, replacement, text)\n return text\n\n\ndef divide_hangul(text):\n text = j2hcj(h2j(text))\n for regex, replacement in _hangul_divided:\n text = re.sub(regex, replacement, text)\n return text\n\n\ndef hangul_number(num, sino=True):\n '''Reference https://github.com/Kyubyong/g2pK'''\n num = re.sub(',', '', num)\n\n if num == '0':\n return '영'\n if not sino and num == '20':\n return '스무'\n\n digits = '123456789'\n names = '일이삼사오육칠팔구'\n digit2name = {d: n for d, n in zip(digits, names)}\n\n modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'\n decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'\n digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}\n digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}\n\n spelledout = []\n for i, digit in enumerate(num):\n i = len(num) - i - 1\n if sino:\n if i == 0:\n name = digit2name.get(digit, '')\n elif i == 1:\n name = digit2name.get(digit, '') + '십'\n name = name.replace('일십', '십')\n else:\n if i == 0:\n name = digit2mod.get(digit, '')\n elif i == 1:\n name = digit2dec.get(digit, '')\n if digit == '0':\n if i % 4 == 0:\n last_three = spelledout[-min(3, len(spelledout)):]\n if ''.join(last_three) == '':\n spelledout.append('')\n continue\n else:\n spelledout.append('')\n continue\n if i == 2:\n name = digit2name.get(digit, '') + '백'\n name = name.replace('일백', '백')\n elif i == 3:\n name = digit2name.get(digit, '') + '천'\n name = name.replace('일천', '천')\n elif i == 4:\n name = digit2name.get(digit, '') + '만'\n name = name.replace('일만', '만')\n elif i == 5:\n name = digit2name.get(digit, '') + '십'\n name = name.replace('일십', '십')\n elif i == 6:\n name = digit2name.get(digit, '') + '백'\n name = name.replace('일백', '백')\n elif i == 7:\n name = digit2name.get(digit, '') + '천'\n name = name.replace('일천', '천')\n elif i == 8:\n name = digit2name.get(digit, '') + '억'\n elif i == 9:\n name = digit2name.get(digit, '') + '십'\n elif i == 10:\n name = digit2name.get(digit, '') + '백'\n elif i == 11:\n name = digit2name.get(digit, '') + '천'\n elif i == 12:\n name = digit2name.get(digit, '') + '조'\n elif i == 13:\n name = digit2name.get(digit, '') + '십'\n elif i == 14:\n name = digit2name.get(digit, '') + '백'\n elif i == 15:\n name = digit2name.get(digit, '') + '천'\n spelledout.append(name)\n return ''.join(elem for elem in spelledout)\n\n\ndef number_to_hangul(text):\n '''Reference https://github.com/Kyubyong/g2pK'''\n tokens = set(re.findall(r'(\\d[\\d,]*)([\\uac00-\\ud71f]+)', text))\n for token in tokens:\n num, classifier = token\n if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:\n spelledout = hangul_number(num, sino=False)\n else:\n spelledout = hangul_number(num, sino=True)\n text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')\n # digit by digit for remaining digits\n digits = '0123456789'\n names = '영일이삼사오육칠팔구'\n for d, n in zip(digits, names):\n text = text.replace(d, n)\n return text\n\n\ndef korean_to_lazy_ipa(text):\n text = latin_to_hangul(text)\n text = number_to_hangul(text)\n text=re.sub('[\\uac00-\\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)\n for regex, replacement in _ipa_to_lazy_ipa:\n text = re.sub(regex, replacement, text)\n return text\n\n\ndef korean_to_ipa(text):\n text = korean_to_lazy_ipa(text)\n return text.replace('ʧ','tʃ').replace('ʥ','dʑ')\n\ndef korean_to_ipa2(text):\n text = latin_to_hangul(text)\n text = number_to_hangul(text)\n text = g2pk(text)\n text=re.sub('[\\uac00-\\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)\n for regex, replacement in _ipa_to_lazy_ipa:\n text = re.sub(regex, replacement, text)\n text = text.replace('ʧ','tʃ').replace('ʥ','dʑ')\n return text\n\n\n\n\n\n\nimport itertools\n\nINITIAL = 0x001\nMEDIAL = 0x010\nFINAL = 0x100\nCHAR_LISTS = {\n INITIAL: list(map(chr, [\n 0x3131, 0x3132, 0x3134, 0x3137, 0x3138, 0x3139,\n 0x3141, 0x3142, 0x3143, 0x3145, 0x3146, 0x3147,\n 0x3148, 0x3149, 0x314a, 0x314b, 0x314c, 0x314d,\n 0x314e\n ])),\n MEDIAL: list(map(chr, [\n 0x314f, 0x3150, 0x3151, 0x3152, 0x3153, 0x3154,\n 0x3155, 0x3156, 0x3157, 0x3158, 0x3159, 0x315a,\n 0x315b, 0x315c, 0x315d, 0x315e, 0x315f, 0x3160,\n 0x3161, 0x3162, 0x3163\n ])),\n FINAL: list(map(chr, [\n 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136,\n 0x3137, 0x3139, 0x313a, 0x313b, 0x313c, 0x313d,\n 0x313e, 0x313f, 0x3140, 0x3141, 0x3142, 0x3144,\n 0x3145, 0x3146, 0x3147, 0x3148, 0x314a, 0x314b,\n 0x314c, 0x314d, 0x314e\n ]))\n}\nCHAR_INITIALS = CHAR_LISTS[INITIAL]\nCHAR_MEDIALS = CHAR_LISTS[MEDIAL]\nCHAR_FINALS = CHAR_LISTS[FINAL]\nCHAR_SETS = {k: set(v) for k, v in CHAR_LISTS.items()}\nCHARSET = set(itertools.chain(*CHAR_SETS.values()))\nCHAR_INDICES = {k: {c: i for i, c in enumerate(v)}\n for k, v in CHAR_LISTS.items()}\n\n\ndef is_hangul_syllable(c):\n return 0xac00 <= ord(c) <= 0xd7a3 # Hangul Syllables\n\n\ndef is_hangul_jamo(c):\n return 0x1100 <= ord(c) <= 0x11ff # Hangul Jamo\n\n\ndef is_hangul_compat_jamo(c):\n return 0x3130 <= ord(c) <= 0x318f # Hangul Compatibility Jamo\n\n\ndef is_hangul_jamo_exta(c):\n return 0xa960 <= ord(c) <= 0xa97f # Hangul Jamo Extended-A\n\n\ndef is_hangul_jamo_extb(c):\n return 0xd7b0 <= ord(c) <= 0xd7ff # Hangul Jamo Extended-B\n\n\ndef is_hangul(c):\n return (is_hangul_syllable(c) or\n is_hangul_jamo(c) or\n is_hangul_compat_jamo(c) or\n is_hangul_jamo_exta(c) or\n is_hangul_jamo_extb(c))\n\n\ndef is_supported_hangul(c):\n return is_hangul_syllable(c) or is_hangul_compat_jamo(c)\n\n\ndef check_hangul(c, jamo_only=False):\n if not ((jamo_only or is_hangul_compat_jamo(c)) or is_supported_hangul(c)):\n raise ValueError(f\"'{c}' is not a supported hangul character. \"\n f\"'Hangul Syllables' (0xac00 ~ 0xd7a3) and \"\n f\"'Hangul Compatibility Jamos' (0x3130 ~ 0x318f) are \"\n f\"supported at the moment.\")\n\n\ndef get_jamo_type(c):\n check_hangul(c)\n assert is_hangul_compat_jamo(c), f\"not a jamo: {ord(c):x}\"\n return sum(t for t, s in CHAR_SETS.items() if c in s)\n\n\ndef split_syllable_char(c):\n \"\"\"\n Splits a given korean syllable into its components. Each component is\n represented by Unicode in 'Hangul Compatibility Jamo' range.\n\n Arguments:\n c: A Korean character.\n\n Returns:\n A triple (initial, medial, final) of Hangul Compatibility Jamos.\n If no jamo corresponds to a position, `None` is returned there.\n\n Example:\n >>> split_syllable_char(\"안\")\n (\"ㅇ\", \"ㅏ\", \"ㄴ\")\n >>> split_syllable_char(\"고\")\n (\"ㄱ\", \"ㅗ\", None)\n >>> split_syllable_char(\"ㅗ\")\n (None, \"ㅗ\", None)\n >>> split_syllable_char(\"ㅇ\")\n (\"ㅇ\", None, None)\n \"\"\"\n check_hangul(c)\n if len(c) != 1:\n raise ValueError(\"Input string must have exactly one character.\")\n\n init, med, final = None, None, None\n if is_hangul_syllable(c):\n offset = ord(c) - 0xac00\n x = (offset - offset % 28) // 28\n init, med, final = x // 21, x % 21, offset % 28\n if not final:\n final = None\n else:\n final -= 1\n else:\n pos = get_jamo_type(c)\n if pos & INITIAL == INITIAL:\n pos = INITIAL\n elif pos & MEDIAL == MEDIAL:\n pos = MEDIAL\n elif pos & FINAL == FINAL:\n pos = FINAL\n idx = CHAR_INDICES[pos][c]\n if pos == INITIAL:\n init = idx\n elif pos == MEDIAL:\n med = idx\n else:\n final = idx\n return tuple(CHAR_LISTS[pos][idx] if idx is not None else None\n for pos, idx in\n zip([INITIAL, MEDIAL, FINAL], [init, med, final]))\n\n\ndef split_syllables(s, ignore_err=True, pad=None):\n \"\"\"\n Performs syllable-split on a string.\n\n Arguments:\n s (str): A string (possibly mixed with non-Hangul characters).\n ignore_err (bool): If set False, it ensures that all characters in\n the string are Hangul-splittable and throws a ValueError otherwise.\n (default: True)\n pad (str): Pad empty jamo positions (initial, medial, or final) with\n `pad` character. This is useful for cases where fixed-length\n strings are needed. (default: None)\n\n Returns:\n Hangul-split string\n\n Example:\n >>> split_syllables(\"안녕하세요\")\n \"ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ\"\n >>> split_syllables(\"안녕하세요~~\", ignore_err=False)\n ValueError: encountered an unsupported character: ~ (0x7e)\n >>> split_syllables(\"안녕하세요ㅛ\", pad=\"x\")\n 'ㅇㅏㄴㄴㅕㅇㅎㅏxㅅㅔxㅇㅛxxㅛx'\n \"\"\"\n\n def try_split(c):\n try:\n return split_syllable_char(c)\n except ValueError:\n if ignore_err:\n return (c,)\n raise ValueError(f\"encountered an unsupported character: \"\n f\"{c} (0x{ord(c):x})\")\n\n s = map(try_split, s)\n if pad is not None:\n tuples = map(lambda x: tuple(pad if y is None else y for y in x), s)\n else:\n tuples = map(lambda x: filter(None, x), s)\n return \"\".join(itertools.chain(*tuples))\n\n\ndef join_jamos_char(init, med, final=None):\n \"\"\"\n Combines jamos into a single syllable.\n\n Arguments:\n init (str): Initial jao.\n med (str): Medial jamo.\n final (str): Final jamo. If not supplied, the final syllable is made\n without the final. (default: None)\n\n Returns:\n A Korean syllable.\n \"\"\"\n chars = (init, med, final)\n for c in filter(None, chars):\n check_hangul(c, jamo_only=True)\n\n idx = tuple(CHAR_INDICES[pos][c] if c is not None else c\n for pos, c in zip((INITIAL, MEDIAL, FINAL), chars))\n init_idx, med_idx, final_idx = idx\n # final index must be shifted once as\n # final index with 0 points to syllables without final\n final_idx = 0 if final_idx is None else final_idx + 1\n return chr(0xac00 + 28 * 21 * init_idx + 28 * med_idx + final_idx)\n\n\ndef join_jamos(s, ignore_err=True):\n \"\"\"\n Combines a sequence of jamos to produce a sequence of syllables.\n\n Arguments:\n s (str): A string (possible mixed with non-jamo characters).\n ignore_err (bool): If set False, it will ensure that all characters\n will be consumed for the making of syllables. It will throw a\n ValueError when it fails to do so. (default: True)\n\n Returns:\n A string\n\n Example:\n >>> join_jamos(\"ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ\")\n \"안녕하세요\"\n >>> join_jamos(\"ㅇㅏㄴㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ\")\n \"안ㄴ녕하세요\"\n >>> join_jamos()\n \"\"\"\n last_t = 0\n queue = []\n new_string = \"\"\n\n def flush(n=0):\n new_queue = []\n while len(queue) > n:\n new_queue.append(queue.pop())\n if len(new_queue) == 1:\n if not ignore_err:\n raise ValueError(f\"invalid jamo character: {new_queue[0]}\")\n result = new_queue[0]\n elif len(new_queue) >= 2:\n try:\n result = join_jamos_char(*new_queue)\n except (ValueError, KeyError):\n # Invalid jamo combination\n if not ignore_err:\n raise ValueError(f\"invalid jamo characters: {new_queue}\")\n result = \"\".join(new_queue)\n else:\n result = None\n return result\n\n for c in s:\n if c not in CHARSET:\n if queue:\n new_c = flush() + c\n else:\n new_c = c\n last_t = 0\n else:\n t = get_jamo_type(c)\n new_c = None\n if t & FINAL == FINAL:\n if not (last_t == MEDIAL):\n new_c = flush()\n elif t == INITIAL:\n new_c = flush()\n elif t == MEDIAL:\n if last_t & INITIAL == INITIAL:\n new_c = flush(1)\n else:\n new_c = flush()\n last_t = t\n queue.insert(0, c)\n if new_c:\n new_string += new_c\n if queue:\n new_string += flush()\n return new_string","repo_name":"kdrkdrkdr/JK-VITS","sub_path":"text/korean.py","file_name":"korean.py","file_ext":"py","file_size_in_byte":15328,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"74644958562","text":"def fetch_fish(data_train,data_test,target):\n\n X_train=data_train.drop(target,axis=1)\n X_test=data_test.drop(target,axis=1)\n\n Y_train=data_train[[target]]\n Y_test=data_test[[target]]\n\n X_train = X_train.to_numpy()\n Y_train = Y_train.to_numpy().reshape(-1,)\n\n X_test = X_test.to_numpy()\n Y_test = Y_test.to_numpy().reshape(-1,)\n\n return X_train, Y_train, X_test, Y_test\n\ndef train_xgb(n_trees,learning_rate, max_depth,subsample,dev,oot,target):\n\n x_train, y_train, x_test, y_test = fetch_fish(data_train=dev,data_test=oot,target=target)\n import xgboost as xgb\n from sklearn.metrics import accuracy_score\n\n xgb_cl = xgb.XGBClassifier(x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test,subsample=subsample,\n n_trees=n_trees,learning_rate=learning_rate, max_depth=max_depth)\n model=xgb_cl.fit(x_train,y_train)\n y_pred=model.predict(x_test)\n loss = 1-(accuracy_score(y_test, y_pred))\n return loss\n\n\n","repo_name":"KamalenduPy/BOHB_XGB_Implementation","sub_path":"bohb/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18290002787","text":"import os\n\nfrom datetime import datetime as dt\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import wordnet\n\nfrom ... import helpers\nfrom . import postagger, VERBS_PATH\n\nVERBS = helpers.get_verbs(VERBS_PATH)\nWORDNET_POS = {\n 'N': wordnet.NOUN, 'V': wordnet.VERB, 'J': wordnet.ADJ, 'R': wordnet.ADV\n}\nMAP = {\n \"'m\": 'am', \"'ll\": 'will', \"n't\": 'not', \"'ve\": 'have', \"'re\": 'are'\n}\n\nlemmatizer = WordNetLemmatizer()\n\n\nclass Lemmatizer(object):\n \"\"\" Interface. \"\"\"\n def __init__(self, tokens):\n \"\"\" Constructor. \"\"\"\n self.tokens = tokens\n\n def execute(self):\n \"\"\" Raises NotImplementedError. \"\"\"\n raise NotImplementedError(\"Lemmatizer is an abstract class. In must \"\n \"be implemented by another class. Try using \"\n \"the NLTKLemmatizer.\")\n\n\ndef fix(token, lemma, prev=None, next=None):\n \"\"\"\n Attempts to fix lemmatization errors with hardcoded rules.\n \"\"\"\n if not token and not lemma and not prev and not next:\n raise ValueError(\"Recieved invalid input to lemmatizer.fix()\")\n elif token.lower() == \"ca\":\n if next and next[0] and next[0].lower() == \"n't\":\n return \"can\"\n else:\n return lemma.lower()\n elif token.lower() == \"as\":\n return \"as\"\n elif token.lower() == \"left\":\n if prev and prev[1] == wordnet.VERB:\n return \"leave\"\n else:\n return lemma.lower()\n elif token in MAP:\n return MAP[token]\n elif lemma in VERBS:\n return VERBS[lemma]\n elif token in VERBS:\n return VERBS[token]\n else:\n return lemma.lower()\n\n\nclass NLTKLemmatizer(Lemmatizer):\n \"\"\" Implements Lemmatizer. \"\"\"\n def __init__(self, tokens):\n \"\"\" Constructor. \"\"\"\n super().__init__(tokens)\n\n def execute(self):\n \"\"\" Return a list of all tokens within the specified string. \"\"\"\n lemmas = []\n\n tokens = [\n (t, WORDNET_POS.get(p[0], wordnet.NOUN))\n for (t, p) in postagger.PosTagger(self.tokens).execute()\n ]\n\n for (i, (token, pos)) in enumerate(tokens):\n lemma = lemmatizer.lemmatize(token, pos)\n prev = None if i == 0 else tokens[i - 1]\n next = None if i == len(tokens) - 1 else tokens[i + 1]\n lemmas.append(fix(token, lemma, prev, next))\n\n return lemmas\n","repo_name":"meyersbs/uncertainty","sub_path":"uncertainty/lib/nlp/lemmatizer.py","file_name":"lemmatizer.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"71877508640","text":"from sys import * \r\nimport math\r\ninput = stdin.readline\r\nradius = 10 \r\nwhile radius != 0:\r\n radius = int(input())\r\n if radius!= 0:\r\n _sum = 0\r\n for y in range(1, radius):\r\n x = math.sqrt(radius * radius - y * y)\r\n _sum += math.floor(x) * 2 + 1 \r\n print(2 * _sum + 2 * radius + 3)","repo_name":"jasonl0209/CCC-Solutions-Senior","sub_path":"CCC '08 S2.py","file_name":"CCC '08 S2.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15725227605","text":"from dolfin import *\nimport matplotlib.pyplot as plt\nimport os\n\n\ndef solve_elasticity(facet_function, E, nu, dt, T_end, output_dir):\n \"\"\"Solves elasticity problem with Young modulus E, Poisson ration nu,\n timestep dt, until T_end and with output data going to output_dir.\n Geometry is defined by facet_function which also defines rest boundary\n by marker 1 and traction boundary by marker 2.\"\"\"\n\n # Get mesh and prepare boundary measure\n mesh = facet_function.mesh()\n gdim = mesh.geometry().dim()\n dx = Measure(\"dx\")\n ds = Measure(\"ds\", subdomain_data=facet_function, subdomain_id=2)\n\n # Limit quadrature degree\n dx = dx(degree=4)\n ds = ds(degree=4)\n\n # Build function space\n element_v = VectorElement(\"P\", mesh.ufl_cell(), 1)\n element_s = FiniteElement(\"P\", mesh.ufl_cell(), 1)\n mixed_element = MixedElement([element_v, element_v, element_s])\n W = FunctionSpace(mesh, mixed_element)\n info(\"Num DOFs {}\".format(W.dim()))\n\n # Prepare BCs\n bc0 = DirichletBC(W.sub(0), gdim*(0,), facet_function, 1)\n bc1 = DirichletBC(W.sub(1), gdim*(0,), facet_function, 1)\n bcs = [bc0, bc1]\n\n # Define constitutive law\n def stress(u, p):\n \"\"\"Returns 1st Piola-Kirchhoff stress and (local) mass balance\n for given u, p.\"\"\"\n mu = Constant(E/(2.0*(1.0 + nu)))\n F = I + grad(u)\n J = det(F)\n B = F * F.T\n T = -p*I + mu*(B-I) # Cauchy stress\n S = J*T*inv(F).T # 1st Piola-Kirchhoff stress\n if nu == 0.5:\n # Incompressible\n pp = J-1.0\n else:\n # Compressible\n lmbd = Constant(E*nu/((1.0 + nu)*(1.0 - 2.0*nu)))\n pp = 1.0/lmbd*p + (J*J-1.0)\n return S, pp\n\n # Timestepping theta-method parameters\n q = Constant(0.5)\n dt = Constant(dt)\n\n # Unknowns, values at previous step and test functions\n w = Function(W)\n u, v, p = split(w)\n w0 = Function(W)\n u0, v0, p0 = split(w0)\n _u, _v, _p = TestFunctions(W)\n\n I = Identity(W.mesh().geometry().dim())\n\n # Balance of momentum\n S, pp = stress(u, p)\n S0, pp0 = stress(u0, p0)\n F1 = (1.0/dt)*inner(u-u0, _u)*dx \\\n - ( q*inner(v, _u)*dx + (1.0-q)*inner(v0, _u)*dx )\n F2a = inner(S, grad(_v))*dx + pp*_p*dx\n F2b = inner(S0, grad(_v))*dx + pp0*_p*dx\n F2 = (1.0/dt)*inner(v-v0, _v)*dx + q*F2a + (1.0-q)*F2b\n\n # Traction at boundary\n F = I + grad(u)\n bF_magnitude = Constant(0.0)\n bF_direction = {2: Constant((0.0, 1.0)), 3: Constant((0.0, 0.0, 1.0))}[gdim]\n bF = det(F)*dot(inv(F).T, bF_magnitude*bF_direction)\n FF = inner(bF, _v)*ds\n\n # Whole system and its Jacobian\n F = F1 + F2 + FF\n J = derivative(F, w)\n\n # Initialize solver\n problem = NonlinearVariationalProblem(F, w, bcs=bcs, J=J)\n solver = NonlinearVariationalSolver(problem)\n solver.parameters['newton_solver']['relative_tolerance'] = 1e-6\n solver.parameters['newton_solver']['linear_solver'] = 'mumps'\n\n # Extract solution components\n u, v, p = w.split()\n u.rename(\"u\", \"displacement\")\n v.rename(\"v\", \"velocity\")\n p.rename(\"p\", \"pressure\")\n\n # Create files for storing solution\n vfile = XDMFFile(os.path.join(output_dir, \"velo.xdmf\"))\n ufile = XDMFFile(os.path.join(output_dir, \"disp.xdmf\"))\n pfile = XDMFFile(os.path.join(output_dir, \"pres.xdmf\"))\n\n # Prepare plot window\n fig = plt.figure()\n fig.show()\n\n # Time-stepping loop\n t = 0\n while t <= T_end:\n t += float(dt)\n info(\"Time: {}\".format(t))\n\n # Increase traction\n bF_magnitude.assign(100.0*t)\n\n # Prepare to solve and solve\n w0.assign(w)\n solver.solve()\n\n # Store solution to files and plot\n ufile.write(u, t)\n vfile.write(v, t)\n pfile.write(p, t)\n fig.clear()\n plot(u, mode=\"displacement\")\n fig.canvas.draw()\n\n # Close files\n vfile.close()\n ufile.close()\n pfile.close()\n\n\ndef geometry_2d(length):\n \"\"\"Prepares 2D geometry. Returns facet function with 1, 2 on parts of\n the boundary.\"\"\"\n n = 5\n x0 = 0.0\n x1 = x0 + length\n y0 = 0.0\n y1 = 1.0\n mesh = RectangleMesh(Point(x0, y0), Point(x1, y1), int((x1-x0)*n), int((y1-y0)*n), 'crossed')\n boundary_parts = MeshFunction('size_t', mesh, mesh.topology().dim()-1)\n left = AutoSubDomain(lambda x: near(x[0], x0))\n right = AutoSubDomain(lambda x: near(x[0], x1))\n left .mark(boundary_parts, 1)\n right.mark(boundary_parts, 2)\n return boundary_parts\n\n\ndef geometry_3d():\n \"\"\"Prepares 3D geometry. Returns facet function with 1, 2 on parts of\n the boundary.\"\"\"\n mesh = Mesh('lego_beam.xml')\n gdim = mesh.geometry().dim()\n x0 = mesh.coordinates()[:, 0].min()\n x1 = mesh.coordinates()[:, 0].max()\n boundary_parts = MeshFunction('size_t', mesh, mesh.topology().dim()-1)\n left = AutoSubDomain(lambda x: near(x[0], x0))\n right = AutoSubDomain(lambda x: near(x[0], x1))\n left .mark(boundary_parts, 1)\n right.mark(boundary_parts, 2)\n return boundary_parts\n\n\nif __name__ == '__main__':\n parameters['std_out_all_processes'] = False\n\n solve_elasticity(geometry_2d(20.0), 1e5, 0.3, 0.25, 5.0, 'results_2d_comp')\n solve_elasticity(geometry_2d(20.0), 1e5, 0.5, 0.25, 5.0, 'results_2d_incomp')\n solve_elasticity(geometry_2d(80.0), 1e5, 0.3, 0.25, 5.0, 'results_2d_long_comp')\n solve_elasticity(geometry_3d(), 1e5, 0.3, 0.50, 5.0, 'results_3d_comp')\n","repo_name":"blechta/fenics-handson","sub_path":"elasticity/elast.py","file_name":"elast.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"29481153229","text":"import sys\nimport itertools\nsys.path.append('../day_02')\nimport puzzle_02\n\n# https://adventofcode.com/2019/day/7\n# Day 7: Amplification Circuit\n# PART 1\n\nli = puzzle_02.process_input('input_07')\n\n# I decided to add another input to the original intcode processor to be able\n# to handle both the phase setting and the input signal. I used the built-in\n# permutations function to get every possible combination of the phase settings\n# and iterate through the combinations, then take the highest result.\n\ndef run_amp(li, phase_setting, input_signal):\n li_copy = li.copy()\n output = puzzle_02.process_intcode(li_copy, phase_setting, input_signal)[1][-1]\n return output\n\ndef run_program(li):\n possible_phase_settings = [0, 1, 2, 3, 4]\n perm = itertools.permutations(possible_phase_settings, 5)\n\n outputs = []\n for p in perm:\n settings = list(p)\n input = 0\n for n in range(0, 5):\n input = run_amp(li, settings[n], input)\n outputs.append(input)\n\n return max(outputs)\n\n# print(run_program(li))\n# 567045\n\n# PART 2\n","repo_name":"oliviagardiner/advent-of-code-2019","sub_path":"day_07/puzzle_07.py","file_name":"puzzle_07.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35925914477","text":"\"\"\"\r\nCreated on Tue Apr 12 04:53:21 2022\r\n\r\n@author: Kuldeep\r\n\"\"\"\r\n\r\nimport functools\r\nclass Solution:\r\n def equalPartition(self, N, arr):\r\n if sum(arr)%2 != 0:\r\n return False\r\n self.arr = arr\r\n return self.solve(N, sum(arr)//2)\r\n \r\n @functools.lru_cache(maxsize=None)\r\n def solve(self, n, S):\r\n if S==0:\r\n return True\r\n if n==0:\r\n return False\r\n \r\n if arr[n-1]<=S:\r\n return self.solve(n-1, S-arr[n-1]) or\\\r\n self.solve(n-1, S)\r\n else:\r\n return self.solve(n-1, S)\r\n\r\nif __name__=='__main__':\r\n sol = Solution()\r\n arr = [1, 5, 11, 5]\r\n print(sol.equalPartition(len(arr), arr))\r\n","repo_name":"kuldeepbishnoi/Competitive-Programming","sub_path":"DP/01_01 Knapsack/03_equalPartition_recursive.py","file_name":"03_equalPartition_recursive.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2541069545","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport matplotlib\nimport sklearn \nimport datetime\nimport matplotlib.pyplot as plt\nfrom sklearn import model_selection\nfrom numpy import array\nfrom sklearn import preprocessing\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.cross_validation import train_test_split\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n\n# In[2]:\n\n\ncrime_test = pd.read_csv(\"../input/test.csv\", sep=',',error_bad_lines=False)\ncrime_train = pd.read_csv(\"../input/train.csv\", sep=',',error_bad_lines=False)\n\n\n# ## Address preprocessing\n\n# In[3]:\n\n\ndict = {}\nfor index, row in crime_train.iterrows():\n obj = row[\"Address\"].replace(\" \",\"\")\n in0 = obj.find('/')\n len0 = len(obj)\n ele = []\n ele.append(obj[len0-2: len0])\n if in0!=-1:\n ele.append(obj[in0-2:in0])\n for ss in ele:\n if dict.__contains__(ss):\n dict[ss] = dict[ss] + 1\n else:\n dict[ss] = 1\n\n\n# In[4]:\n\n\ndef getClass0(number):\n if number < 1000:\n return 0\n if number < 10000:\n return 1\n return 2\ndef extractRegion(row):\n ss = row.replace(\" \",\"\")\n index = ss.find('/')\n len0 = len(ss)\n str1 = dict[ss[len0-2: len0]]\n if index != -1:\n str0 = dict[ss[index-2:index]]\n if str0 > str1:\n t = str0\n str0 = str1\n str1 = t\n return str(getClass0(str0)) + \"/\" + str(getClass0(str1))\n else:\n return \"Block \"+ str(getClass0(str1))\n\n\n# In[5]:\n\n\ncrime_train['Region'] = crime_train.apply(lambda row: extractRegion(row['Address']), axis=1)\ncrime_test['Region'] = crime_test.apply(lambda row: extractRegion(row['Address']), axis=1)\n\n\n# ## Date preprocessing(including Hour and Month)\n\n# In[6]:\n\n\ndef getHourClass(hour):\n if hour>=6 and hour <=12:\n return \"morning\"\n elif hour >12 and hour <=18:\n return \"afternoon\"\n elif hour > 18 and hour <=23:\n return \"evening\"\n else:\n return \"dawn\"\n \n# Extract hour from date colum\ndef extractHours(row):\n date, time = row.split()\n h = int(time[0:2])\n return getHourClass(h)\n\ncrime_train['Hour'] = crime_train.apply(lambda row: extractHours(row['Dates']), axis=1)\ncrime_test['Hour'] = crime_test.apply(lambda row: extractHours(row['Dates']), axis=1)\n\n\n# In[7]:\n\n\ndef getMonthClass(month):\n if month>=2 and month <=5:\n return \"spring\"\n elif month >=6 and month <=8:\n return \"summer\"\n elif month >=9 and month <=11:\n return \"fall\"\n else:\n return \"winter\"\n \n# Extract hour from date colum\ndef extractMonths(row):\n date, time = row.split()\n h = int(date[5:7])\n return getMonthClass(h)\n\ncrime_train['Month'] = crime_train.apply(lambda row: extractMonths(row['Dates']), axis=1)\ncrime_test['Month'] = crime_test.apply(lambda row: extractMonths(row['Dates']), axis=1)\n\n\n# ## generate the vectors for training set by dummy\n\n# In[9]:\n\n\n\n\n#用LabelEncoder对不同的犯罪类型编号\nleCrime = preprocessing.LabelEncoder()\ncrime = leCrime.fit_transform(crime_train.Category)\n\n#因子化星期几,街区,小时等特征\ndays = pd.get_dummies(crime_train.DayOfWeek)\ndistrict = pd.get_dummies(crime_train.PdDistrict)\nhour = pd.get_dummies(crime_train.Hour) \nmonth = pd.get_dummies(crime_train.Month)\nregion = pd.get_dummies(crime_train.Region)\n#组合特征\ntrainData = pd.concat([hour, days, district, region, month], axis=1)\ntrainData['crime']=crime\n\n\n# ## generate the vectors for test set by dummy\n\n# In[11]:\n\n\n#对于测试数据做同样的处理\ndays = pd.get_dummies(crime_test.DayOfWeek)\ndistrict = pd.get_dummies(crime_test.PdDistrict)\n\nhour = pd.get_dummies(crime_test.Hour) \nregion = pd.get_dummies(crime_test.Region)\nmonth = pd.get_dummies(crime_test.Month)\n\ntestData = pd.concat([crime_test.Id, hour, days, district, region, month], axis=1)\n\n\ntraining, validation = train_test_split(trainData, train_size=.60)\n# ## Navie Bayes model\n\n# In[15]:\n\n\nfeatures = ['morning','afternoon','evening','dawn','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','BAYVIEW','CENTRAL','INGLESIDE','MISSION', 'NORTHERN','PARK','RICHMOND','SOUTHERN','TARAVAL','TENDERLOIN','0/0','0/1','0/2','1/1','1/2','2/2','Block 0','Block 1','Block 2','spring','summer','fall','winter']\n\n# 朴素贝叶斯建模,计算log_loss\nmodel = BernoulliNB()\nf = model.fit(training[features], training['crime'])\npredicted = np.array(model.predict_proba(testData[features]))\n\n\n# ## generate the result of predict\n\n# In[16]:\n\n\n#np.round_(predicted, decimals=2, out=predicted)\ncolmn = [\"ARSON\",\"ASSAULT\",\"BAD CHECKS\",\"BRIBERY\",\"BURGLARY\",\"DISORDERLY CONDUCT\",\"DRIVING UNDER THE INFLUENCE\",\"DRUG/NARCOTIC\",\"DRUNKENNESS\",\"EMBEZZLEMENT\",\"EXTORTION\",\"FAMILY OFFENSES\",\"FORGERY/COUNTERFEITING\",\"FRAUD\",\"GAMBLING\",\"KIDNAPPING\",\"LARCENY/THEFT\",\"LIQUOR LAWS\",\"LOITERING\",\"MISSING PERSON\",\"NON-CRIMINAL\",\"OTHER OFFENSES\",\"PORNOGRAPHY/OBSCENE MAT\",\"PROSTITUTION\",\"RECOVERED VEHICLE\",\"ROBBERY\",\"RUNAWAY\",\"SECONDARY CODES\",\"SEX OFFENSES FORCIBLE\",\"SEX OFFENSES NON FORCIBLE\",\"STOLEN PROPERTY\",\"SUICIDE\",\"SUSPICIOUS OCC\",\"TREA\",\"TRESPASS\",\"VANDALISM\",\"VEHICLE THEFT\",\"WARRANTS\",\"WEAPON LAWS\"]\n\nresult = pd.DataFrame(predicted, columns=colmn)\n\nresult.to_csv(path_or_buf=\"resultbaye.csv\", index=True, index_label = 'Id')\n\n# Any results you write to the current directory are saved as output.","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/sf-crime/Bo Zhang/naive-bayes-methods.py","file_name":"naive-bayes-methods.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"71118932322","text":"import torch.nn as nn\nimport torch\nimport math\n\nfrom Aggregator_ct import MeanAggregator,RAW\nfrom utils_ct import *\nfrom settings import settings\n\nclass GELU(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\nclass ScaledDotProductAttention(nn.Module):\n \"\"\" Scaled Dot-Product Attention \"\"\"\n\n def __init__(self, temperature, attn_dropout=0.2):\n super().__init__()\n\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n\n def forward(self, q, k, v, mask=None):\n attn = torch.matmul(q / self.temperature, k.transpose(2, 3))\n\n if mask is not None:\n attn = attn.masked_fill(mask, -1e9)\n\n attn = self.dropout(nn.functional.softmax(attn, dim=-1))\n output = torch.matmul(attn, v)\n\n return output, attn\n\nclass MultiHeadAttention(nn.Module):\n \"\"\" Multi-Head Attention module \"\"\"\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1, normalize_before=True):\n super().__init__()\n\n self.normalize_before = normalize_before\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)\n nn.init.xavier_uniform_(self.w_qs.weight)\n nn.init.xavier_uniform_(self.w_ks.weight)\n nn.init.xavier_uniform_(self.w_vs.weight)\n\n self.fc = nn.Linear(d_v * n_head, d_model)\n nn.init.xavier_uniform_(self.fc.weight)\n\n self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5, attn_dropout=dropout)\n\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, q, k, v, mask=None):\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)\n\n residual = q\n if self.normalize_before:\n q = self.layer_norm(q)\n\n # Pass through the pre-attention projection: b x lq x (n*dv)\n # Separate different heads: b x lq x n x dv\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n # Transpose for attention dot product: b x n x lq x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # For head axis broadcasting.\n\n output, attn = self.attention(q, k, v, mask=mask)\n\n # Transpose to move the head dimension back: b x lq x n x dv\n # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n output = output.transpose(1, 2).contiguous().view(sz_b, len_q, -1)\n output = self.dropout(self.fc(output))\n output += residual\n\n if not self.normalize_before:\n output = self.layer_norm(output)\n return output, attn\n\nclass PositionwiseFeedForward(nn.Module):\n \"\"\" Two-layer position-wise feed-forward neural network. \"\"\"\n\n def __init__(self, d_in, d_hid, dropout=0.1, normalize_before=True):\n super().__init__()\n\n self.normalize_before = normalize_before\n\n self.w_1 = nn.Linear(d_in, d_hid)\n self.w_2 = nn.Linear(d_hid, d_in)\n\n self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n nn.init.xavier_uniform_(self.w_1.weight)\n nn.init.xavier_uniform_(self.w_2.weight)\n\n def forward(self, x):\n residual = x\n if self.normalize_before:\n x = self.layer_norm(x)\n\n x = nn.functional.gelu(self.w_1(x))\n x = self.dropout(x)\n x = self.w_2(x)\n x = self.dropout(x)\n x = x + residual\n\n if not self.normalize_before:\n x = self.layer_norm(x)\n return x\n\nclass EncoderLayer(nn.Module):\n \"\"\" Compose with two layers \"\"\"\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1, normalize_before=True):\n super(EncoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout, normalize_before=normalize_before)\n self.pos_ffn = PositionwiseFeedForward(\n d_model, d_inner, dropout=dropout, normalize_before=normalize_before)\n\n def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input, mask=slf_attn_mask)\n enc_output *= non_pad_mask\n\n enc_output = self.pos_ffn(enc_output)\n enc_output *= non_pad_mask\n\n return enc_output, enc_slf_attn\n\nclass Encoder(nn.Module):\n \"\"\" A encoder model with self attention mechanism. \"\"\"\n def __init__(\n self,\n num_types, d_model, d_rnn, d_inner,\n n_layers, n_head, d_k, d_v, dropout):\n super().__init__()\n\n self.d_model = d_model\n\n # position vector, used for temporal encoding\n self.position_vec = torch.tensor(\n [math.pow(10000.0, 2.0 * (i // 2) / d_model*3) for i in range(d_model*3)],\n device=torch.device('cuda'))\n\n self.layer_stack = nn.ModuleList([\n EncoderLayer(d_model*3, d_inner, n_head, d_k, d_v, dropout=dropout, normalize_before=False)\n for _ in range(n_layers)])\n\n self.output_g = nn.Linear(d_model*3, d_model)\n\n # OPTIONAL recurrent layer, this sometimes helps\n self.rnn = RNN_layers(self.d_model*3, d_rnn)\n\n def temporal_enc(self, time, non_pad_mask):\n \"\"\"\n temporal encoding\n return real_batch * his_len * d_model\n \"\"\"\n\n result = time.unsqueeze(-1) / self.position_vec\n result[:, :, 0::2] = torch.sin(result[:, :, 0::2])\n result[:, :, 1::2] = torch.cos(result[:, :, 1::2])\n return result * non_pad_mask\n\n def get_attn_key_pad_mask(self, seq_k, seq_q):\n \"\"\"\n For masking out the padding part of key sequence.\n return real_batch*his_len*his_len\n \"\"\"\n\n # expand to fit the shape of key query attention matrix\n len_q = seq_q.size(1)\n padding_mask = seq_k.eq(0)\n padding_mask = padding_mask.unsqueeze(1).expand(-1, len_q, -1) # b x lq x lk\n return padding_mask\n\n def get_subsequent_mask(self, seq):\n \"\"\"\n For masking out the subsequent info, i.e., masked self-attention.\n return triu: real_batch*his_len*his_len\n \"\"\"\n\n sz_b, len_s = seq.size()\n subsequent_mask = torch.triu(\n torch.ones((len_s, len_s), device=seq.device, dtype=torch.uint8), diagonal=1)\n subsequent_mask = subsequent_mask.unsqueeze(0).expand(sz_b, -1, -1) # b x ls x ls\n return subsequent_mask\n\n def forward(self, event_type, event_time, non_pad_mask, sort_idx):\n \"\"\" Encode event sequences via masked self-attention. \"\"\"\n\n non_pad_mask_temp = non_pad_mask.reshape(non_pad_mask.shape[0], -1)\n event_time = event_time.cumsum(1)\n event_time = event_time*non_pad_mask_temp\n\n # prepare attention masks\n # slf_attn_mask is where we cannot look, i.e., the future and the padding\n slf_attn_mask_subseq = self.get_subsequent_mask(event_time)\n slf_attn_mask_keypad = self.get_attn_key_pad_mask(seq_k=event_time, seq_q=event_time)\n slf_attn_mask_keypad = slf_attn_mask_keypad.type_as(slf_attn_mask_subseq)\n slf_attn_mask = (slf_attn_mask_keypad + slf_attn_mask_subseq).gt(0)\n\n tem_enc = self.temporal_enc(event_time, non_pad_mask)\n\n enc_output = event_type\n for enc_layer in self.layer_stack:\n\n enc_output += tem_enc\n enc_output, _ = enc_layer(\n enc_output,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask)\n\n #optional\n enc_output = self.rnn(enc_output, non_pad_mask)\n\n enc_output = enc_output[:, 0, :]\n enc_output = self.output_g(enc_output)\n\n _, ori_idx = sort_idx.sort()\n\n enc_output = torch.cat((enc_output, to_device(torch.zeros(len(sort_idx) - enc_output.shape[0],\n self.d_model))), dim=0)[ori_idx]\n return enc_output\n\nclass RNN_layers(nn.Module):\n \"\"\"\n Optional recurrent layers. This is inspired by the fact that adding\n recurrent layers on top of the Transformer helps language modeling.\n \"\"\"\n\n def __init__(self, d_model, d_rnn):\n super().__init__()\n\n self.rnn = nn.LSTM(d_model, d_rnn, num_layers=1, batch_first=True)\n self.projection = nn.Linear(d_rnn, d_model)\n\n def forward(self, data, non_pad_mask):\n lengths = non_pad_mask.squeeze(2).long().sum(1).cpu()\n pack_enc_output = nn.utils.rnn.pack_padded_sequence(\n data, lengths, batch_first=True, enforce_sorted=False)\n temp = self.rnn(pack_enc_output)[0]\n out = nn.utils.rnn.pad_packed_sequence(temp, batch_first=True)[0]\n\n out = self.projection(out)\n return out\n\nclass TemT(nn.Module):\n \"\"\" A sequence to sequence model with attention mechanism. \"\"\"\n\n def __init__(\n self, args,\n num_e, num_r):\n super().__init__()\n\n self.h_dim = args.n_hidden\n\n self.d_rnn = args.d_rnn\n\n self.d_inner = args.d_inner\n\n self.n_layers = args.n_layers\n\n self.n_head = args.n_head\n\n self.d_k = args.d_k\n\n self.d_v = args.d_v\n\n self.args = args\n\n self.encoder_s = Encoder(\n num_types=num_e,\n d_model=self.h_dim,\n d_rnn=self.d_rnn,\n d_inner=self.d_inner,\n n_layers=self.n_layers,\n n_head=self.n_head,\n d_k=self.d_k,\n d_v=self.d_v,\n dropout=args.dropout\n )\n\n self.encoder_o = Encoder(\n num_types=num_e,\n d_model=self.h_dim,\n d_rnn=self.d_rnn,\n d_inner=self.d_inner,\n n_layers=self.n_layers,\n n_head=self.n_head,\n d_k=self.d_k,\n d_v=self.d_v,\n dropout=args.dropout\n )\n\n self.gelu = GELU()\n\n self.num_e = num_e\n\n self.num_rels = num_r\n\n # convert hidden vectors into a scalar\n self.linear = nn.Linear(self.h_dim, num_e)\n\n # parameter for the weight of time difference\n self.alpha = nn.Parameter(torch.tensor(-0.1))\n\n # parameter for the softplus function\n self.beta = nn.Parameter(torch.tensor(1.0))\n\n # event type embedding\n self.ent_embeds = nn.Embedding(num_e + 1, args.embd_rank, padding_idx=0)\n\n # relation type embedding\n self.rel_embeds = nn.Embedding(num_r * 2, args.embd_rank, padding_idx=0)\n\n self.aggregator_s = MeanAggregator(self.h_dim, args.embd_rank, args.dropout, args.max_hist_len, args,gcn=False)\n self.aggregator_o = self.aggregator_s\n\n self.alpha_t = nn.Parameter(torch.zeros(self.num_e + 1, 1))\n\n nn.init.xavier_uniform_(self.alpha_t)\n\n self.base_t = nn.Parameter(torch.zeros(self.num_e + 1, 1))\n nn.init.xavier_uniform_(self.base_t)\n\n self.linear_h = nn.Linear(self.h_dim, args.embd_rank, bias=False)\n\n self.linear_inten_layer = nn.Linear(self.h_dim + 2 * args.embd_rank, args.embd_rank, bias=False)\n\n self.Softplus = nn.Softplus(beta=args.softrelu_scale)\n\n self.criterion_time = nn.CrossEntropyLoss()\n self.criterion_link = nn.CrossEntropyLoss()\n\n self.dropout = nn.Dropout(args.dropout)\n\n self.graph_dict = None\n\n self.raw_s_encoder = None\n self.raw_o_encoder = None\n\n self.start_layer = nn.Sequential(\n nn.Linear(self.h_dim + 3 * args.embd_rank, args.embd_rank, bias=True),\n self.gelu\n )\n\n self.converge_layer = nn.Sequential(\n nn.Linear(self.h_dim + 3 * args.embd_rank, args.embd_rank, bias=True),\n self.gelu\n )\n\n self.decay_layer = nn.Sequential(\n nn.Linear(self.h_dim + 3 * args.embd_rank, args.embd_rank, bias=True)\n , nn.Softplus(beta=10.0)\n )\n\n self.intensity_layer = nn.Sequential(\n nn.Linear(self.h_dim + 3 * args.embd_rank, args.embd_rank, bias=True)\n , nn.Softplus(beta=1.)\n )\n\n self.t_start_layer = nn.Sequential(\n nn.Linear(self.h_dim + 2 * args.embd_rank, args.embd_rank, bias=True),\n self.gelu\n )\n\n self.t_converge_layer = nn.Sequential(\n nn.Linear(self.h_dim + 2 * args.embd_rank, args.embd_rank, bias=True),\n self.gelu\n )\n\n self.t_decay_layer = nn.Sequential(\n nn.Linear(self.h_dim + 2 * args.embd_rank, args.embd_rank, bias=True)\n , nn.Softplus(beta=10.0)\n )\n\n self.t_intensity_layer = nn.Sequential(\n nn.Linear(self.h_dim + 2 * args.embd_rank, args.embd_rank, bias=True)\n , nn.Softplus(beta=1.)\n )\n\n\n def get_non_pad_mask(self, seq):\n \"\"\"\n Get the non-padding positions.\n return real_batch*his_len*1\n \"\"\"\n\n assert seq.dim() == 2\n return seq.ne(0).type(torch.float).unsqueeze(-1)\n\n def forward(self, input, mode_tp, mode_lk, graph_dict):\n\n if mode_lk == 'Training':\n quadruples, s_history_event_tp, s_history_event_lk, o_history_event_tp, o_history_event_lk, \\\n s_history_dt_tp, s_history_dt_lk, o_history_dt_tp, o_history_dt_lk, dur_last_tp, sub_synchro_dt_tp, obj_synchro_dt_tp = input\n self.graph_dict = graph_dict\n elif mode_lk in ['Valid', 'Test']:\n quadruples, s_history_event_tp, s_history_event_lk, o_history_event_tp, o_history_event_lk, \\\n s_history_dt_tp, s_history_dt_lk, o_history_dt_tp, o_history_dt_lk, dur_last_tp, sub_synchro_dt_tp, obj_synchro_dt_tp,\\\n val_subcentric_fils_lk, val_objcentric_fils_lk= input\n self.graph_dict = graph_dict\n else:\n raise ValueError('Not implemented')\n\n #prepare model input\n s = quadruples[:, 0]\n r = quadruples[:, 1]\n o = quadruples[:, 2]\n t = quadruples[:, 3]\n\n self.raw_s_encoder = RAW(self.args, graph_dict).to('cuda')\n s_raw = self.raw_s_encoder(s, t, self.ent_embeds)\n\n self.raw_o_encoder = RAW(self.args, graph_dict).to('cuda')\n o_raw = self.raw_o_encoder(o, t, self.ent_embeds)\n\n if isListEmpty(s_history_event_tp) or isListEmpty(o_history_event_tp):\n error_tp, density_tp, dt_tp, mae_tp, dur_last_nonzero_tp, den1_tp, den2_tp, tpred, abs_error = [None] * 9\n else:\n # Aggregating concurrent events\n s_packed_input_tp, s_packed_dt_tp, s_idx_tp, s_nonzero_tp = \\\n self.aggregator_s(s_history_event_tp, s, r, o, t, s_raw,\n self.ent_embeds, self.rel_embeds(to_device(torch.arange(0, self.num_rels, 1))),\n s_history_dt_tp)\n\n o_packed_input_tp, o_packed_dt_tp, o_idx_tp, o_nonzero_tp = \\\n self.aggregator_o(o_history_event_tp, o, r, s, t, o_raw, self.ent_embeds,\n self.rel_embeds(to_device(torch.arange(self.num_rels, 2 * self.num_rels, 1))),\n o_history_dt_tp)\n\n # compute hidden state\n sub_non_pad_mask = self.get_non_pad_mask(s_packed_dt_tp)\n obj_non_pad_mask = self.get_non_pad_mask(o_packed_dt_tp)\n\n sub_hidden_tp = self.encoder_s(s_packed_input_tp, s_packed_dt_tp, sub_non_pad_mask, s_idx_tp)\n obj_hidden_tp = self.encoder_o(o_packed_input_tp, o_packed_dt_tp, obj_non_pad_mask, o_idx_tp)\n\n dur_last_tp = to_device(torch.tensor(dur_last_tp))\n\n dur_non_zero_idx_tp = (dur_last_tp > 0).nonzero().squeeze()\n\n dur_last_nonzero_tp = dur_last_tp[dur_non_zero_idx_tp]\n\n # add synchro_dt_tp to synchronize the concatenated intensity from subject centric and object centeric\n sub_synchro_dt_tp = to_device(torch.tensor(sub_synchro_dt_tp, dtype=torch.float))\n sub_synchro_non_zero_idx_tp = (sub_synchro_dt_tp >= 0).nonzero().squeeze()\n\n sub_synchro_dt_nonzero_tp = sub_synchro_dt_tp[sub_synchro_non_zero_idx_tp]\n assert (torch.all(torch.eq(sub_synchro_non_zero_idx_tp, dur_non_zero_idx_tp)))\n\n obj_synchro_dt_tp = to_device(torch.tensor(obj_synchro_dt_tp, dtype=torch.float))\n obj_synchro_non_zero_idx_tp = (obj_synchro_dt_tp >= 0).nonzero().squeeze()\n obj_synchro_dt_nonzero_tp = obj_synchro_dt_tp[obj_synchro_non_zero_idx_tp]\n assert (torch.all(torch.eq(obj_synchro_non_zero_idx_tp, dur_non_zero_idx_tp)))\n\n if mode_tp == 'MSE':\n dur_last_nonzero_tp = dur_last_nonzero_tp.type(torch.float)\n sub_inten_tp = self.compute_inten_t(dur_non_zero_idx_tp, sub_synchro_dt_nonzero_tp,\n t, sub_hidden_tp, s, o, r, self.rel_embeds.weight[:self.num_rels], s_raw)\n obj_inten_tp = self.compute_inten_t(dur_non_zero_idx_tp, obj_synchro_dt_nonzero_tp,\n t, obj_hidden_tp, s, o, r, self.rel_embeds.weight[self.num_rels:], o_raw)\n dt_tp, error_tp, density_tp, mae_tp, den1_tp, den2_tp, tpred, abs_error = self.predict_t(sub_inten_tp,\n obj_inten_tp,\n dur_last_nonzero_tp)\n\n else:\n raise ValueError('Not implemented')\n\n if isListEmpty(s_history_event_lk) or isListEmpty(o_history_event_lk):\n sub_rank, obj_rank, cro_entr_lk = [None] * 3\n if mode_lk == 'Training':\n return cro_entr_lk, error_tp, density_tp, dt_tp, mae_tp, dur_last_nonzero_tp, den1_tp, den2_tp, tpred, abs_error\n elif mode_lk in ['Valid', 'Test']:\n return sub_rank, obj_rank, cro_entr_lk, error_tp, density_tp, dt_tp, mae_tp, dur_last_nonzero_tp, den1_tp, den2_tp, tpred, abs_error\n else:\n raise ValueError('Not implemented')\n else:\n #Aggregating concurrent events\n s_packed_input_lk, s_packed_dt_lk, s_idx_lk, s_nonzero_lk = \\\n self.aggregator_s(s_history_event_lk, s, r, o, t, s_raw,\n self.ent_embeds, self.rel_embeds(to_device(torch.arange(0, self.num_rels, 1))),\n s_history_dt_lk)\n\n o_packed_input_lk, o_packed_dt_lk, o_idx_lk, o_nonzero_lk = \\\n self.aggregator_o(o_history_event_lk, o, r, s, t, o_raw, self.ent_embeds,\n self.rel_embeds(to_device(torch.arange(self.num_rels, 2 * self.num_rels, 1))),\n o_history_dt_lk)\n\n # compute hidden state\n sub_non_pad_mask = self.get_non_pad_mask(s_packed_dt_lk)\n obj_non_pad_mask = self.get_non_pad_mask(o_packed_dt_lk)\n\n sub_hidden_lk = self.encoder_s(s_packed_input_lk, s_packed_dt_lk, sub_non_pad_mask, s_idx_lk)\n obj_hidden_lk = self.encoder_o(o_packed_input_lk, o_packed_dt_lk, obj_non_pad_mask, o_idx_lk)\n\n # compute intensity\n if mode_lk == 'Training':\n sub_cro_entr_loss = self.predict_link(sub_hidden_lk, s, o, r, self.rel_embeds(to_device(torch.arange(0,self.num_rels, 1))), mode_lk, s_raw)\n obj_cro_entr_loss = self.predict_link(obj_hidden_lk, o, s, r, self.rel_embeds(to_device(torch.arange(self.num_rels, 2*self.num_rels, 1))), mode_lk, o_raw)\n cro_entr_lk = (sub_cro_entr_loss + obj_cro_entr_loss) / 2\n return cro_entr_lk, error_tp, density_tp, dt_tp, mae_tp, dur_last_nonzero_tp, den1_tp, den2_tp, tpred, abs_error\n\n elif mode_lk in ['Valid', 'Test']:\n sub_cro_entr_loss, sub_rank = self.predict_link(sub_hidden_lk, s, o, r, self.rel_embeds(to_device(torch.arange(0,self.num_rels, 1))), mode_lk, s_raw,\n val_fils = val_subcentric_fils_lk)\n obj_cro_entr_loss, obj_rank = self.predict_link(obj_hidden_lk, o, s, r, self.rel_embeds(to_device(torch.arange(self.num_rels, 2*self.num_rels, 1))), mode_lk, o_raw,\n val_fils = val_objcentric_fils_lk)\n cro_entr_lk = (sub_cro_entr_loss + obj_cro_entr_loss) / 2\n return sub_rank, obj_rank, cro_entr_lk, error_tp, density_tp, dt_tp, mae_tp, dur_last_nonzero_tp, den1_tp, den2_tp, tpred, abs_error\n\n else:\n raise ValueError('Not implemented')\n\n def predict_link(self, hiddens_ti, actor1, actor2, r, rel_embeds, mode_lk, actor_raw, val_fils=None):\n start_point = self.start_layer(torch.cat((self.ent_embeds.weight[actor1.long()], actor_raw,\n self.linear_h(hiddens_ti),\n rel_embeds[r.long()]), dim=1))\n converge_point = self.converge_layer(torch.cat((self.ent_embeds.weight[actor1.long()], actor_raw,\n self.linear_h(hiddens_ti),\n rel_embeds[r.long()]), dim=1))\n omega = self.decay_layer(torch.cat((self.ent_embeds.weight[actor1.long()], actor_raw,\n self.linear_h(hiddens_ti),\n rel_embeds[r.long()]), dim=1))\n\n inten_raw = torch.tanh(converge_point + (start_point - converge_point) * torch.exp(- omega))\n intens = self.Softplus(inten_raw.mm(self.ent_embeds.weight.transpose(0, 1))) # shape of pred_intens: num_batch*num_e\n\n cro_entr_loss = self.criterion_link(intens, actor2.to(torch.int64))\n ranks = []\n if mode_lk == 'Training':\n return cro_entr_loss\n elif mode_lk in ['Valid', 'Test']:\n ground = intens.gather(1, actor2.to(torch.int64).view(-1, 1))\n assert (len(val_fils) == intens.shape[0])\n for i in range(len(val_fils)):\n if self.args.filtering:\n intens[i, :][val_fils[i]] = 0\n intens[i, actor2[i]] = ground[i]\n pred_comp1 = (intens[i, 1:] > ground[i]).sum().item() + 1\n ranks.append(pred_comp1)\n return cro_entr_loss, ranks\n else:\n raise ValueError('Not implemented')\n\n def compute_inten_t(self, non_zero_idx, synchro_dt_nonzero_tp, t, hidden_tp, actors, another_actors, r, rel_embeds, actor_raw):\n hmax = settings['time_horizon']\n timestep = settings['CI']\n n_samples = int(hmax / timestep) + 1\n dt = to_device(torch.linspace(0, hmax, n_samples).repeat(non_zero_idx.shape[0], 1)\n .transpose(0, 1)) + synchro_dt_nonzero_tp[None, :]\n\n start_point = self.t_start_layer(torch.cat((self.ent_embeds.weight[actors[non_zero_idx].long()],\n hidden_tp[non_zero_idx],\n rel_embeds[r[non_zero_idx].long()]),dim=1))\n converge_point = self.t_converge_layer(torch.cat((self.ent_embeds.weight[actors[non_zero_idx].long()],\n hidden_tp[non_zero_idx],\n rel_embeds[r[non_zero_idx].long()]),dim=1))\n omega = self.t_decay_layer(torch.cat((self.ent_embeds.weight[actors[non_zero_idx].long()],\n hidden_tp[non_zero_idx],\n rel_embeds[r[non_zero_idx].long()]),dim=1))\n\n inten_raw = torch.tanh(converge_point + (start_point - converge_point) * torch.exp(- omega[None, :, :] * dt[:, :, None]))\n o = self.ent_embeds.weight[another_actors[non_zero_idx].long()].repeat(n_samples, 1, 1)\n\n intens = self.Softplus((inten_raw * o).sum(dim=2))\n\n return intens\n\n def predict_t(self, sub_inten_t, obj_inten_t, gt_t):\n timestep = settings['CI']\n hmax = settings['time_horizon']\n n_samples = int(hmax / timestep) + 1\n dt = to_device(torch.linspace(0, hmax, n_samples).repeat(gt_t.shape[0], 1).transpose(0, 1))\n intens = (sub_inten_t + obj_inten_t) / 2\n integral_ = torch.cumsum(timestep * intens, dim=0)\n density = (intens * torch.exp(-integral_))\n t_pit = dt * density\n estimate_dt = (timestep * 0.5 * (t_pit[1:] + t_pit[:-1])).sum(dim=0)\n mse = nn.MSELoss()\n error_dt = mse(estimate_dt, gt_t)\n\n with torch.no_grad():\n abs_error = (estimate_dt - gt_t).abs()\n mae = abs_error.mean()\n return dt, error_dt, density, mae, intens, torch.exp(-integral_), estimate_dt.detach(), abs_error\n\n\n\n\n\n\n\n\n","repo_name":"wanwano/TKGR-RHETNE","sub_path":"model_ct.py","file_name":"model_ct.py","file_ext":"py","file_size_in_byte":25415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2363690945","text":"try:\n import cv2\nexcept ImportError:\n print('Couldn\\'t find opencv so trying to use the fallback')\n from _cv2_fallback import cv2\n\nimport pandas as pd\nimport numpy as np\n\nIMG_WIDTH = 208\nIMG_HEIGHT = 156 \nNUM_IMAGES = 9\n\ndef show_new_set(set1, set2, isdup, gen_method):\n\n combo_w = 0\n combo_image = np.zeros((2 * IMG_HEIGHT, NUM_IMAGES * IMG_WIDTH, 3), np.uint8)\n\n #if not isinstance(set1, unicode) and not isinstance(set1, str):\n if not isinstance(set1, str):\n print( \"str1 has no images\" )\n return\n\n #if not isinstance(set2, unicode) and not isinstance(set2, str):\n if not isinstance(set2, str):\n print( \"str2 has no images\" )\n return\n\n if len(set1) == 0 or len(set2) == 0:\n print( \"one has no images\" )\n return\n\n for s1 in set1.split(\",\"):\n s1 = s1.strip()\n idx = s1[-2:]\n path = '../input/images/Images_%s/%s/%s.jpg' % ( idx[0], idx, s1 )\n if idx[0] == '0':\n path = '../input/images/Images_%s/%s/%s.jpg' % ( idx[0], idx[1], s1 )\n\n print(\"Attempt load %s\"%(path))\n img = cv2.imread(path, cv2.IMREAD_COLOR)\n h = img.shape[0]\n w = img.shape[1]\n print(w,h)\n if h<1 or w<1 or img is None:\n print(\"Corrupt file %s\"%(path))\n continue\n\n combo_image[0:h, combo_w:combo_w+w] = img # copy the obj into the combo image\n combo_w += IMG_WIDTH\n\n combo_w = 0\n for s2 in set2.split(\",\"):\n s2 = s2.strip()\n idx = s2[-2:]\n path = '../input/images/Images_%s/%s/%s.jpg' % ( idx[0], idx, s2 )\n if idx[0] == '0':\n path = '../input/images/Images_%s/%s/%s.jpg' % ( idx[0], idx[1], s2 )\n print(\"Attempt load %s\"%(path))\n img = cv2.imread(path, cv2.IMREAD_COLOR)\n h = img.shape[0]\n w = img.shape[1]\n print(w,h)\n if h<1 or w<1 or img is None:\n print(\"Corrupt file %s\"%(path))\n continue\n combo_image[IMG_HEIGHT:IMG_HEIGHT+h, combo_w:combo_w+w] = img # copy the obj into the combo image\n combo_w += IMG_WIDTH\n\n dupped = 'NotDup'\n if isdup:\n dupped = 'Dup'\n\n cv2.imshow('%s:%d, %s vs %s' % (dupped, gen_method, set1, set2), combo_image)\n\n print(\"Showing images\")\n cv2.waitKey()\n cv2.destroyAllWindows()\n\ndef show_datasets( infofilename, pairfilename ):\n info = pd.read_csv(infofilename, encoding=\"utf-8\")\n df = pd.read_csv(pairfilename)\n info = info.drop(['title','description','attrsJSON'], axis = 1)\n df = pd.merge(pd.merge(df, info, how = 'inner', left_on = 'itemID_1', right_on = 'itemID'), info, how = 'inner', left_on = 'itemID_2', right_on = 'itemID')\n\n df[['images_array_x', 'images_array_y', 'isDuplicate', 'generationMethod']].apply(lambda x:show_new_set(x[0],x[1], x[2], x[3]), axis=1)\n\nshow_datasets(\"../input/ItemInfo_train.csv\", \"../input/ItemPairs_train.csv\")\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/avito-duplicate-ads-detection/Gerard Toonstra/visualize-images.py","file_name":"visualize-images.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"18497662720","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Symbolic derivation of Hermite shape functions using SymPy.\n\nCreated on Thu Nov 2 13:38:34 2017\n\n@author: Juha Jeronen \n\"\"\"\n\nimport sympy as sy\n\ndef construct(k):\n \"\"\"Construct Hermite shape functions.\n\n The result can be used for interpolating a function and its first k derivatives on [0,1].\"\"\"\n order = 2*k + 1 # minimum polynomial order that has enough degrees of freedom\n *A,x = sy.symbols(\"a0:%d,x\" % (order+1))\n\n # Create the polynomial and its derivatives\n #\n w = sum(a*x**i for i,a in enumerate(A))\n λw = lambda x0: w.subs({x: x0})\n\n wp = [sy.diff(w, x, i) for i in range(1,1+k)]\n # see my lecture notes, sec. 5.8\n λwp = [(lambda expr: lambda x0: expr.subs({x: x0}))(expr) for expr in wp]\n\n # Formulate the interpolation conditions\n #\n zero,one = sy.S.Zero, sy.S.One\n w0,w1 = sy.symbols(\"w0, w1\")\n eqs = [λw(zero) - w0, # see sy.solve()\n λw(one) - w1]\n dofs = [w0, w1]\n\n for i,f in enumerate(λwp):\n d0_name = \"w%s0\" % ((i+1) * \"p\")\n d1_name = \"w%s1\" % ((i+1) * \"p\")\n d0,d1 = sy.symbols(\"%s, %s\" % (d0_name, d1_name))\n eqs.extend([f(zero) - d0,\n f(one) - d1])\n dofs.extend([d0, d1])\n\n # Solve the interpolation conditions for the polynomial coefficients aj\n #\n coeffs = sy.solve(eqs, A)\n\n # Substitute solution into w, collect w.r.t. the DOFs\n #\n solution = w.subs(coeffs)\n solution = sy.collect(sy.expand(solution), dofs)\n\n # Extract the shape functions\n #\n N = [solution.coeff(dof) for dof in dofs]\n\n return (dofs, N)\n\ndef main():\n k = 1\n syms,N = construct(k)\n\n for dof,expr in zip(syms,N):\n print(\"Shape function for %s:\" % (dof))\n print(expr)\n\n # Plot them\n #\n import numpy as np\n import matplotlib.pyplot as plt\n plt.figure(1)\n plt.clf()\n x = sy.symbols(\"x\")\n λN = [sy.lambdify(x, expr, modules=\"numpy\") for expr in N]\n xx = np.linspace(0,1, 1001)\n for f in λN:\n plt.plot(xx, f(xx))\n plt.grid(b=True, which=\"both\")\n plt.xlabel(r\"$x$\")\n plt.ylabel(r\"$N_i(x)$\")\n plt.title(r\"$C^%d$ Hermite shape functions\" % (k))\n plt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"TUTElectromechanics/mm-codegen","sub_path":"extras/hermite_element.py","file_name":"hermite_element.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34776679160","text":"import numpy as np\n\nfrom glue.core.subset import SliceSubsetState\nfrom glue.core.exceptions import IncompatibleAttribute\nfrom glue.core.link_manager import pixel_cid_to_pixel_cid_matrix\n\n__all__ = ['PixelSubsetState']\n\n\nclass PixelSubsetState(SliceSubsetState):\n\n def copy(self):\n return PixelSubsetState(self.reference_data, self.slices)\n\n def to_array(self, data, att):\n\n try:\n\n return super(PixelSubsetState, self).to_array(data, att)\n\n except IncompatibleAttribute:\n\n if data is not self.reference_data:\n pix_coord_out = self._to_linked_pixel_coords(data)\n pix_coord_out = tuple([slice(None) if p is None else slice(p, p + 1) for p in pix_coord_out])\n return data[att, pix_coord_out]\n\n raise IncompatibleAttribute()\n\n def _to_linked_pixel_coords(self, data):\n\n # Determine which pixel dimensions are being sliced over\n dimensions = [idim for idim, slc in enumerate(self.slices) if slc.start is not None]\n\n # Determine pixel to pixel correlation matrix\n matrix = pixel_cid_to_pixel_cid_matrix(self.reference_data, data)\n\n # Find pixel dimensions in 'data' that are correlated\n correlated_dims = np.nonzero(np.any(matrix[dimensions], axis=0))[0]\n\n # Check that if we do the operation backwards we just get the\n # original dimensions back\n check_dimensions = np.nonzero(np.any(matrix[:, correlated_dims], axis=1))[0]\n\n if np.array_equal(dimensions, check_dimensions):\n pix_coord_in = tuple([slice(0, 1) if slc.start is None else slc for slc in self.slices])\n pix_coord_out = []\n for idim, pix_cid in enumerate(data.pixel_component_ids):\n if idim in correlated_dims:\n coord = int(np.round(self.reference_data[pix_cid, pix_coord_in].ravel()[0]))\n else:\n coord = None\n pix_coord_out.append(coord)\n\n return pix_coord_out\n\n raise IncompatibleAttribute()\n\n def get_xy(self, data, dim1, dim2):\n pix_coord_out = self._to_linked_pixel_coords(data)\n if pix_coord_out[dim1] is None or pix_coord_out[dim2] is None:\n raise IncompatibleAttribute\n else:\n return pix_coord_out[dim1], pix_coord_out[dim2]\n","repo_name":"glue-viz/glue","sub_path":"glue/viewers/image/pixel_selection_subset_state.py","file_name":"pixel_selection_subset_state.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":699,"dataset":"github-code","pt":"54"} +{"seq_id":"74565218721","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter.ttk import Progressbar\nfrom tkmacosx import Button\nfrom PIL import Image, ImageTk\nfrom sys import platform\nimport tkmacosx\n\n# Main declaration: folder and background color app\n\nbg_main = \"#BFBDC1\"\nbg_bar = \"#37323E\"\nbg_item = \"#6D6A75\"\nbg_item_border = \"#BFBDC1\"\nbg_button = \"#DE9E36\"\nbg_button_onclick = \"#e4ae58\"\nbg_button_resume = \"#5fa769\"\nbg_button_resume_onclick = \"#78b581\"\nbg_delete = \"#f34958\"\nbg_delete_onclick = \"#f56e7a\"\n\n\nclass Gui:\n def __init__(self, root, downloader):\n self.folder_path = \"\"\n self.downloader = downloader\n\n # Create a frame element to put inside the root window\n self.frame = Frame(root, bg=bg_main)\n self.frame.pack(fill=\"both\", expand=True)\n\n # Create a input_frame that will contain the buttons\n self.input_frame = Frame(self.frame, bg=bg_bar, padx=5, pady=5)\n self.input_frame.grid_columnconfigure(0, weight=1)\n\n # Define and place the 2 buttons inside the input_frame element\n self.label_url = Label(self.input_frame, text=\"Insert URL\", bg=bg_bar, fg=\"white\", padx=5, pady=5, anchor=\"w\")\n self.label_url.grid(row=0, column=0, sticky=\"nsew\")\n\n if platform == 'darwin':\n self.entry_url = Entry(self.input_frame, bg=\"white\", fg=\"black\", width=50)\n else:\n self.entry_url = Entry(self.input_frame, bg=\"white\", fg=\"black\", width=80)\n\n self.entry_url.grid(row=0, column=1)\n self.button_download = Button(self.input_frame, text=\"Download\", bg=bg_button, fg=\"black\",\n activebackground=bg_button_onclick,\n activeforeground=\"black\", borderless=True, focusthickness=0,\n command=lambda: self.downloader.pass_url_data(self),\n padx=10, pady=10)\n self.button_download.grid(row=0, column=2, sticky=\"nsew\", padx=10, pady=10)\n self.label_directory = Label(self.input_frame, text=\"SAVE directory\", bg=bg_bar, fg=\"white\", padx=5, pady=5,\n anchor=\"w\")\n self.label_directory.grid(row=1, column=0, sticky=\"nsew\")\n\n if platform == 'darwin':\n self.entry_directory = Entry(self.input_frame, bg=\"white\", fg=\"black\", width=50)\n else:\n self.entry_directory = Entry(self.input_frame, bg=\"white\", fg=\"black\", width=80)\n\n self.entry_directory.insert(END, self.folder_path)\n self.entry_directory.grid(row=1, column=1)\n self.button_browse = Button(self.input_frame, text=\"Browse\", bg=bg_button, fg=\"black\",\n activebackground=bg_button_onclick,\n activeforeground=\"black\", borderless=True, focusthickness=0,\n command=lambda: self.downloader.browse_button(self),\n padx=10, pady=10)\n self.button_browse.grid(row=1, column=2, sticky=\"nsew\", padx=10, pady=10)\n\n self.input_frame.pack(fill=\"x\")\n\n self.download_frame = tkmacosx.SFrame(self.frame, bg=bg_main, autohidescrollbar=True, mousewheel=True,\n scrollbarwidth=14)\n self.download_frame.pack(fill=\"both\", expand=True)\n\n\nclass GuiItem:\n\n def __init__(self, gui, f_name, event_thread):\n # Create a frame_item representing the element in download\n self.frame_item = Frame(gui.download_frame, bg=bg_item, padx=5, pady=5, highlightbackground=bg_item_border,\n highlightcolor=bg_item_border, highlightthickness=1)\n\n self.img = Image.open(\"./icons/download_icon_win.png\")\n if platform == 'darwin':\n self.img = Image.open(\"./icons/download_icon_mac.png\").convert(\"RGB\")\n self.render = ImageTk.PhotoImage(self.img.resize((54, 54), Image.ANTIALIAS))\n self.label = Label(self.frame_item, bg=bg_item, image=self.render)\n self.label.image = self.render\n self.label.grid(row=0, column=0, rowspan=2)\n\n # Title of the element in download\n self.title = Label(self.frame_item, bg=bg_item, fg=\"white\", text=f_name, padx=5, pady=5, anchor=\"w\")\n self.title.config(font=(\"Arial\", \"15\"))\n self.title.grid(row=0, column=1, sticky=\"nsew\")\n\n # Setting a theme for the progress bar (on OSX)\n self.s = ttk.Style()\n self.s.theme_use('default')\n self.s.configure(\"bar.Horizontal.TProgressbar\", troughcolor='#BFBDC1', bordercolor='#BFBDC1',\n background='#5fa769', lightcolor='#5fa769', darkcolor='#5fa769', borderless=True)\n\n # Define a progress bar\n self.progress = Progressbar(self.frame_item, orient=HORIZONTAL, mode=\"determinate\",\n style=\"bar.Horizontal.TProgressbar\")\n self.progress['value'] = 0\n self.progress.grid(row=1, column=1, sticky=\"nsew\")\n\n # Labels for percentage and size\n self.percent = StringVar()\n self.percent.set(\"0 %\")\n self.label_percentage = Label(self.frame_item, textvariable=self.percent, padx=5, anchor=\"w\", bg=bg_item,\n fg=\"white\")\n self.label_percentage.config(font=(\"Arial\", \"13\"))\n self.label_percentage.grid(row=0, column=2)\n self.size = StringVar()\n self.size.set(\"0 KB\")\n self.label_size = Label(self.frame_item, textvariable=self.size, padx=5, anchor=\"w\", bg=bg_item, fg=\"white\")\n self.label_size.config(font=(\"Arial\", \"11\"))\n self.label_size.grid(row=1, column=2)\n\n # Buttons for pause/resume and delete downloads\n self.btn_text = StringVar()\n self.btn_text.set(\"Pause\")\n self.button_pause_resume = Button(self.frame_item, textvariable=self.btn_text, bg=bg_button_resume, fg=\"white\",\n activebackground=bg_button_resume_onclick, borderless=True, focusthickness=0,\n command=lambda: gui.downloader.paused(event_thread), padx=5, pady=5)\n self.button_pause_resume.grid(row=0, column=3)\n self.button_delete = Button(self.frame_item, text=\"Delete\", bg=bg_delete, fg=\"white\",\n activebackground=bg_delete_onclick, borderless=True, focusthickness=0,\n command=lambda: gui.downloader.delete(self.frame_item), padx=5, pady=5)\n self.button_delete.grid(row=1, column=3)\n\n self.frame_item.pack(fill=\"x\", expand=True)\n self.frame_item.columnconfigure(1, weight=1)\n","repo_name":"giuliocalamai14/HCI-Download-Manager","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26183845383","text":"import sensor, image, time, lcd\n\nlcd.init(freq=20000000)\n\nsensor.reset(dual_buff=True)\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QVGA)\nsensor.run(1)\nsensor.skip_frames(40)\n\n\nwhile(True):\n img = sensor.snapshot()\n lcd.display(img)\n","repo_name":"jcubuntu/sdvaix","sub_path":"03_CamDisplay.py","file_name":"03_CamDisplay.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16506634940","text":"try:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup\n\nconfig = {\n 'description': 'Trajectory generation for TORQ Quadrotors',\n 'author': 'Gene Merewether',\n 'url': 'https://github.com/genemerewether/traj',\n 'download_url': 'https://github.com/genemerewether/traj',\n 'author_email': 'genemerewether@gmail.com',\n 'version': '0.1',\n 'setup_requires': ['numpy>=1.12.0'],\n 'install_requires': ['setuptools', 'numpy>=1.12.0', 'scipy>=0.19.0',\n 'pyyaml', 'future', 'rdp', 'lxml','transforms3d', 'cvxopt'],\n 'packages': find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n 'name': 'traj'\n}\n\nsetup(**config)\n","repo_name":"genemerewether/torq","sub_path":"traj/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"2075511305","text":"import os\nimport random\n\nclass Player:\n # Initialize player values\n prizeDict = {1 : 1000, 2 : 2000, 3: 5000, 4 : 10000, 5: 25000, 6 : 50000, 7 : 100000, 8: 250000, 9 : 500000, 10 : 1000000}\n def __init__(self, lives = 3, ff = True, hint = True, double = True, level = 1, winnings = 0):\n self.level = level\n self.winnings = winnings\n self.lives = lives\n self.ff = ff\n self.hint = hint\n self.double = double\n \n # Update player status (Win/Loss)\n def update(self, result):\n if result == \"Win\":\n self.winnings = Player.prizeDict[self.level]\n self.level += 1\n elif result == \"Loss\":\n self.lives -= 1\n\n # Use powerup\n def powerUp(self, type):\n if type == \"ff\":\n if self.ff == True:\n self.ff = False\n return \"ff\"\n else:\n return print(\"Fifty-Fifty Unavailable.\")\n elif type == \"hint\":\n if self.hint == True:\n self.hint = False\n return \"hint\"\n else:\n return print(\"Hint Unavailable.\")\n elif type == \"double\":\n if self.double == True:\n self.double = False\n return \"double\"\n else:\n return print(\"Double Unavailable\")\n\n\n\ndef main():\n diff = \"\"\n inputList = [\"easy\", \"medium\", \"hard\", \"test\"]\n # Interface & Explanation for players\n while diff.lower() not in inputList:\n print(\"Welcome to Who Wants To Be A Millionaire!\\nPlease choose a difficulty\\nEasy | Medium | Hard\")\n print(\"Enter Hint for an explanation\")\n diff = input()\n\n if diff.lower() == \"hint\":\n print(\"Easy Difficulty : 3 Lives, Full Powerups | Fifty-Fifty: Delete 2 Wrong Answers | Hint: Self explanatory to be honest | Double: Double the wins, double the risk\")\n print(\"Medium Difficulty: 3 Lives, no powerups\")\n print(\"Hard Difficulty : 1 Life, no powerups\\n\\n\\n\")\n \n if diff.lower() == \"easy\":\n player = Player()\n\n elif diff.lower() == \"medium\":\n player = Player(3, False, False, False)\n\n elif diff.lower() == \"hard\":\n player = Player(1, False, False, False)\n #Lists and Dicts filled with Answer Keys and their associated questions\n qnaDict = {1 : \"a\", 2 : \"c\", 3 : \"b\", 4 : \"d\", 5 : \"b\", 6 : \"a\", 7 : \"d\", 8 : \"c\", 9 : \"a\", 10 : \"b\"}\n questionsList = [\"A wildlife management military operation occurred in Australia in 1932, this event is also known as\", \"Vietnam's national currency is\", \"Who founded the popular sportswear brand Adidas?\", \"Who is the current CEO of Honda Motor Company\", \"In the UK, the abbreviation NHS stands for National what Service?\", \"What name is given to the revolving belt machinery in an airport that delivers checked luggage from the plane to baggage reclaim?\", \"The hammer and sickle is one of the most recognisable symbols of which political ideology?\", \"What does the word loquacious mean?\", \"Which of these religious observances lasts for the shortest period of time during the calendar year?\", \"In 1718, which pirate died in a battle off the coast of what is now North Carolina?\"]\n answersDict = {1 : [\"The Great Emu War\", \"Operation Zebra\", \"Integrated National Resources Management\", \"The Washburn Fire\"], 2 : [\"Ding\", \"RMB\", \"Dong\", \"Baht\"], 3 : [\"Adi Dassler\", \"Adolf Dassler\", \"Adira Dast\", \"Bill Bowerman\"], 4 : [\"Hiroshi Honda\", \"Suzune Miru\", \"Kiichiro Toyonda\", \"Toshihiro Mibe\"], 5 : [\"Humanity\", \"Health\", \"Household\", \"Honour\"], 6 : [\"Carousel\", \"Revolver\", \"Belt Track\", \"Baggage Claim\"], 7 : [\"Socialism\", \"Capitalism\", \"Neo-Nazism\", \"Communism\"], 8 : [\"Extremely Hungry\", \"Irritable\", \"Chatty\", \"Impatient\"], 9 : [\"Diwali\", \"Lent\", \"Ramadhan\", \"Easter\"], 10 : [\"Jack Sparrow\", \"Blackbeard\", \"Calico Jack\", \"William Kidd\"]}\n ffLst = [\"The Great Emu War\", \"Dong\", \"Adolf Dassler\", \"Toshihiro Mibe\", \"Health\", \"Carousel\", \"Communism\", \"Chatty\", \"Diwali\", \"Blackbeard\"]\n hintList = [\"It involved machine guns and birds\", \"Definitely not Ding\", \"He was born in Germany during the 1900s\", \"It's not the obvious answer\", \"This service played a crucial role during the Covid-19 Pandemic\", \"Has the same exact name as a carnival attraction\", \"An ideology that the Americans wholeheartedly hate\", \"Synonymous with Verbose\", \"This festival is not celebrated by any of the Abrahamic Religions\", \"Some say he had a dark coloured beard\"]\n\n # Game Begins\n os.system(\"CLS\")\n print(\"Press Enter to start the game.\")\n startGame = input()\n while player.lives >= 1 and player.level < 11:\n ifDouble = False\n print(\"Welcome to level {}, you have {} lives left.\".format(player.level, player.lives))\n print(\"For ${}, {}\".format(Player.prizeDict[player.level], questionsList[player.level - 1]))\n print(\"A. {}\\nB. {}\\nC. {}\\nD. {}\".format(answersDict[player.level][0], answersDict[player.level][1], answersDict[player.level][2], answersDict[player.level][3]))\n if diff.lower() == \"easy\":\n print(\"Enter FF for Fifty-Fifty, Hint for Hint, Double for Double\")\n playerAnswer = input()\n if playerAnswer.lower() == \"ff\" or playerAnswer.lower() == \"hint\" or playerAnswer.lower() == \"double\":\n pUp = player.powerUp(playerAnswer.lower())\n if pUp == \"ff\":\n # Reveals 2 wrong answers\n decoyLst = []\n while len(decoyLst) < 2:\n decoy = random.choice(answersDict[player.level])\n if decoy != ffLst[player.level - 1] and decoy not in decoyLst:\n decoyLst.append(decoy)\n print(\"{} and {} are Incorrect.\".format(decoyLst[0], decoyLst[1]))\n playerAnswer = input()\n\n elif pUp == \"hint\":\n # Provides a Hint\n print(hintList[player.level - 1])\n playerAnswer = input()\n\n elif pUp == \"double\":\n # Doubles wins and losses\n ifDouble = True\n\n playerAnswer = input()\n if playerAnswer.lower() == qnaDict[player.level]:\n player.winnings += Player.prizeDict[player.level]\n player.level += 1\n player.winnings += Player.prizeDict[player.level]\n player.level += 1\n else:\n player.level += 2\n player.lives -= 2\n else:\n playerAnswer = input()\n else:\n playerAnswer = input()\n if(ifDouble == False):\n if playerAnswer.lower() == qnaDict[player.level]:\n player.winnings += Player.prizeDict[player.level]\n player.level += 1\n else:\n player.level += 1\n player.lives -= 1\n\n if player.lives < 1:\n player.level -= 1\n if player.winnings > 1000000:\n print(\"Congratulations Player, You have successfully completed all levels, you are\")\n elif player.level > 5:\n print(\"Congratulations Player, You have finished {} levels and will be taking home ${}\".format(player.level - 1, player.winnings))\n else:\n print(\"Unforunately, you have only cleared {} levels and will be going home with ${}. Better luck next time!\".format(player.level - 1, player.winnings))\n\ndef new_func():\n ifDouble = True\n return ifDouble\n \n\"\"\"\n #Testing\n elif diff.lower() == \"test\":\n playerEasy = Player()\n playerMedium = Player(3, False, False, False)\n playerHard = Player(1, False, False, False)\n\n print(\"Player Easy Lives = {} ff = {}, hint = {}, double = {}\".format(playerEasy.lives, playerEasy.ff, playerEasy.hint, playerEasy.double))\n print(\"Player Medium Lives = {} ff = {}, hint = {}, double = {}\".format(playerMedium.lives, playerMedium.ff, playerMedium.hint, playerMedium.double))\n print(\"Player Hard Lives = {} ff = {}, hint = {}, double = {}\".format(playerHard.lives, playerHard.ff, playerHard.hint, playerHard.double))\n\"\"\"\nif __name__==\"__main__\":\n main()","repo_name":"Vincentsasmito/WhoWantsToBeAMillionaire","sub_path":"name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":8269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13073667702","text":"import cv2\nimport numpy as np\n\ndef get_output_layers(net):\n \n layer_names = net.getLayerNames()\n \n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n return output_layers\n\nclasses = \"C:/Users/RamyaV/Downloads/Mini project/MiniProject/Mini/object_detection/yolov3.txt\"\nweights = \"C:/Users/RamyaV/Downloads/Mini project/MiniProject/Mini/object_detection/yolov3.weights\"\nconfig = \"C:/Users/RamyaV/Downloads/Mini project/MiniProject/Mini/object_detection/yolov3.cfg\"\nwith open(classes, 'r') as f:\n classes = [line.strip() for line in f.readlines()]\nnet = cv2.dnn.readNet(weights, config)\n\ndef object_detect_function():\n raw_image = \"C:/Users/RamyaV/Downloads/Mini project/MiniProject/Mini/object_detection/Motion Pictures.jpg\" ##Change the code here to Motion_Picture.jpg\n image = cv2.imread(raw_image)\n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392\n blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(get_output_layers(net))\n class_ids = []\n confidences = []\n boxes = []\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.5:\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n detected_objects=[]\n detected_objects=[classes[index] for index in class_ids]\n return detected_objects\n\n\n\n","repo_name":"ramyasree0299/Infant-Monitoring-System","sub_path":"MiniProject/Mini/object_detection/yolo_opencv.py","file_name":"yolo_opencv.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70758796002","text":"'''\r\nCreated on Mar 25, 2016\r\n\r\n@author: Yi-Hsuan Su\r\n'''\r\nfrom __future__ import division\r\nfrom pyechonest import config\r\nfrom pyechonest import song, artist, track\r\nfrom pyechonest.util import EchoNestAPIError, EchoNestIOError\r\nimport json, time\r\nfrom socket import timeout\r\nfrom datetime import date\r\n\r\n\"\"\"\r\nTODO: Use Echo Nest API Key\r\n\"\"\"\r\nconfig.ECHO_NEST_API_KEY = None\r\n\r\n\r\ndef calculate_elapsed_time(begin):\r\n spent = time.time() - begin\r\n hour = int(spent // 3600)\r\n remain = int(spent % 3600)\r\n minute = int(remain // 60)\r\n second = int(remain % 60)\r\n\r\n return hour, minute, second\r\n\r\n\r\ndef create_artist_list(n=10000):\r\n \"\"\"\r\n n is desired number of artists in the list\r\n \"\"\"\r\n begin_t = time.time()\r\n\r\n artist_list = {}\r\n searched = {}\r\n\r\n with open('create_artist_list.log', 'w+') as f:\r\n f.write('=' * 20 + 'Date: ' + str(date.today()) + '=' * 20)\r\n f.close()\r\n\r\n while len(artist_list) <= n:\r\n try:\r\n if len(artist_list) == 0:\r\n # get the 100 hottest artist\r\n for i in artist.top_hottt(results=100):\r\n artist_list[i.name] = 1\r\n\r\n elif len(searched) == len(artist_list):\r\n with open('create_artist_list.log', 'a') as f:\r\n f.write('\\nEnd of expanding the artist list')\r\n else:\r\n for i in [a for a in artist_list.keys() if a not in searched]:\r\n searched[i] = 1\r\n a = artist.Artist(i)\r\n for j in a.get_similar(results=100):\r\n if j not in artist_list:\r\n artist_list[j.name] = 2\r\n\r\n except EchoNestAPIError:\r\n\r\n hrs, mins, secs = calculate_elapsed_time(begin_t)\r\n\r\n with open('create_artist_list.log', 'a') as f:\r\n f.write('\\nStatus:')\r\n f.write('\\nElapsed time: ' + str(hrs) + ':' + str(mins) + ':' + str(secs))\r\n f.write('\\n# of searched artists: ' + str(len(searched)))\r\n f.write('\\n# of artist in the list: ' + str(len(artist_list)))\r\n f.write('\\n------')\r\n f.close()\r\n time.sleep(60)\r\n\r\n hrs, mins, secs = calculate_elapsed_time(begin_t)\r\n\r\n with open('create_artist_list.log', 'a') as f:\r\n f.write('\\nTotal elapsed time: ' + str(hrs) + ':' + str(mins) + ':' + str(secs))\r\n f.write('\\nFinal # of artists: ' + str(len(artist_list)))\r\n\r\n with open('artist_list.txt', mode='w') as f:\r\n f.write('\\n'.join(artist_list.keys()).encode('utf8'))\r\n\r\n\r\ndef create_song_list():\r\n begin_t = time.time()\r\n\r\n with open('artist_list(10k).txt', 'r') as f:\r\n artist_list = f.read().split('\\n')\r\n \r\n with open('create_song_list.log', 'w+'):\r\n f.write('=' * 20 + 'Date: ' + str(date.today()) + '=' * 20)\r\n f.write('\\nNow running on' + str(len(artist_list)) + 'artists...')\r\n f.close()\r\n\r\n artist_done = {}\r\n song_list = []\r\n result_num = 1\r\n start_index = 0\r\n\r\n while len(artist_done) != len(artist_list):\r\n try:\r\n for i in [aa for aa in artist_list if aa not in artist_done]:\r\n with open('create_song_list.log', 'a') as f:\r\n f.write('\\nNow collecting songs of' + i)\r\n\r\n a = artist.Artist(i)\r\n\r\n search_song = True\r\n\r\n while search_song:\r\n ls = a.get_songs(results=result_num, start=start_index)\r\n if len(ls) >= 1:\r\n song_song = ls[0]\r\n title = song_song.title \r\n idd = song_song.id\r\n s_hottness = song_song.song_hotttnesss\r\n a_hottness = song_song.artist_hotttnesss\r\n a_familiarity = song_song.artist_familiarity\r\n summary = song_song.audio_summary\r\n #analysis = urllib2.urlopen(summary['analysis_url'])\r\n #analysis = eval(analysis.read())\r\n \r\n song_list.append({'artist': i,\r\n 'title': title,\r\n 'song_id': idd,\r\n 'song_hottness': s_hottness,\r\n 'artist_hottness': a_hottness,\r\n 'artist_familiarity': a_familiarity,\r\n 'danceability': summary['danceability'],\r\n 'duration': summary['duration'],\r\n 'energy': summary['energy'],\r\n 'key': summary['key'],\r\n 'liveness': summary['liveness'],\r\n 'loudness': summary['loudness'],\r\n 'speechiness': summary['speechiness'],\r\n 'acousticness': summary['acousticness'],\r\n 'instrumentalness': summary['instrumentalness'],\r\n 'mode': summary['mode'],\r\n 'time_signature': summary['time_signature'],\r\n 'tempo': summary['tempo']\r\n })\r\n\r\n if len(ls) < result_num:\r\n start_index = 0\r\n search_song = False\r\n else:\r\n start_index += result_num\r\n \r\n artist_done[i] = 1\r\n \r\n except EchoNestAPIError:\r\n hrs, mins, secs = calculate_elapsed_time(begin_t)\r\n\r\n with open('create_song_list.log', 'a') as f:\r\n f.write('\\nStatus:')\r\n f.write('\\nElapsed time: ' + str(hrs) + ':' + str(mins) + ':' + str(secs))\r\n f.write('\\n# of searched artist: ' + str(len(artist_done)))\r\n f.write('\\n# of songs: ' + str(len(song_list)))\r\n f.write('\\n------')\r\n with open('songs.json', 'w') as f:\r\n json.dump(song_list, f)\r\n \r\n time.sleep(60)\r\n\r\n except EchoNestIOError as e:\r\n with open('create_song_list.log', 'a') as f:\r\n f.write('\\n***caught a IOError***\\n' + str(e))\r\n with open('songs.json', 'w') as f:\r\n json.dump(song_list, f)\r\n\r\n except timeout:\r\n with open('create_song_list.log', 'a') as f:\r\n f.write('\\n***caught a timeout***')\r\n with open('songs.json', 'w') as f:\r\n json.dump(song_list, f)\r\n\r\n hrs, mins, secs = calculate_elapsed_time(begin_t)\r\n\r\n with open('create_song_list.log', 'a') as f:\r\n f.write('\\nTotal elapsed time: ' + str(hrs) + ':' + str(mins) + ':' + str(secs))\r\n f.write('\\nTotal # of songs in file' + str(len(song_list)))\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n create_artist_list()\r\n create_song_list()\r\n","repo_name":"lisa-su/MSA8150-ML-Project-Hit-Songs-Prediction","sub_path":"CollectRandomSongs.py","file_name":"CollectRandomSongs.py","file_ext":"py","file_size_in_byte":7229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8634952441","text":"import os # For the operating system function ti control the automatic directory reader \nfrom xml.dom import minidom \nfrom xml.dom.minidom import parse \nimport xml.etree.ElementTree as ET \n # Reading the poxition of the catesian file to collect the shape and angle of the plane with accurate position \n#Modelloader = open('Catbot3.stp','r') # Reading raw data from step file input \ntree = ET.parse('stm32F103c8t6_1.lbr')\nroot = tree.getroot()\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #Child root for the xml reader \nfor child in root: \n print(child.tag,child.attrib)\nprint(root[0][0].text)\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Searching reading grid \nfor grid in root.iter('grid'):\n print(grid.attrib)\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Package reading the layer \nfor layers in root.iter('layer'):\n print(layers.attrib)\n print(len(layers.attrib))\n number = layers.get('number')\n\n print(\"Number:\"+number)\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Package reading part \nfor package in root.iter('package'):\n print(package.attrib)\n desc = package.find('description').text\n print(\"Packages of MCU\"+\"\\t\"+desc.split(\" \")[0])\n #Showing the list to of the all mcu to classify the right type of the mcu catigory\n listPackages = ['QFP','BGA','UFBGA','WLCSP','VQFPN','UFQFPN'] # List category of the packages to show the right data of each mcu package\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Package size for estimation function\n Packagedetails = desc.split(\" \")\n print(Packagedetails)\n Packageseeker = desc.split(\" \")[0].split(\"-\")[1]\n GPIOs = desc.split(\" \")[0].split(\"-\")[0] # Amout of pins on the microcontroller \n e = Packagedetails[1] # Get the pitch of the part \n span = Packagedetails[7] #Get the span of the square rounded chip \n \n #Searching for package of the mcus \n for i in range(0,int(len(listPackages))): \n if str(listPackages[i]) in Packageseeker:\n print(\"Found:\"+listPackages[i],GPIOs,e,span) #Package check function \n #Get the position back from the step file data \ndef ThreedCatesian(x,y,z): \n print(x,y,z) # Getting the position of the 3d data from the step file model \n #Catesian data from the step file function \n \n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \n'''\nprint('\\n All Item data:')\nprint(root)\nfor elem in root:\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # Get the darawing setting on the eagle xml code \n Settingdata = str(elem[0]).split('at')\n Settingsplitdata = Settingdata[0].split('Element')\n print(Settingsplitdata[1]) \n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n Data = str(elem[1]).split('at')\n Getelem = Data[0].split('Element')\n print(Getelem[1]) # Get the element in the item tree\n''' \n \n ","repo_name":"KornbotDevUltimatorKraton/JsonrequestJavascript","sub_path":"Eaglelibraryalgo.py","file_name":"Eaglelibraryalgo.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40213368158","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 9 14:18:11 2020\r\n\r\n@author: prads\r\n\"\"\"\r\n\r\nfrom flask import Flask,render_template,request\r\n\r\nimport pickle\r\nimport numpy as np\r\n\r\nmodel = pickle.load(open('profit.pkl','rb'))\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route('/login',methods =['POST'])\r\ndef login():\r\n ms = request.form['ms']\r\n ad = request.form['as']\r\n rd = request.form['rd']\r\n s = request.form['s']\r\n if(s == \"Newyork\"):\r\n s1,s2,s3 = 0,0,1\r\n if(s == \"California\"):\r\n s1,s2,s3 = 1,0,0\r\n if(s == \"Florida\"):\r\n s1,s2,s3 = 0,1,0\r\n \r\n total = [[s1,s2,s3,int(rd),int(ad),int(ms)]]\r\n print(total)\r\n y_pred = model.predict(total)\r\n \r\n print(y_pred)\r\n \r\n \r\n return render_template(\"index.html\",showcase = \"the profit that you would get is \"+ str(y_pred[0][0]))\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)\r\n\r\n\r\n\r\n","repo_name":"SmartPracticeschool/llSPS-INT-15-Predictive-Maintenance-for-Airlines","sub_path":"Flask-april15/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4836422332","text":"from ast import literal_eval as make_tuple\nfrom functools import lru_cache\n\nimport networkx as nx\nfrom cvrplib.Instance import VRPTW\n\n\ndef instance_graph(instance: VRPTW):\n graph = nx.DiGraph()\n\n # edges between customers\n for ci in instance.customers:\n for cj in instance.customers:\n if ci != cj:\n graph.add_edge(ci, cj, distance=instance.distances[ci][cj])\n\n # edges from start depot\n for c in instance.customers:\n graph.add_edge(instance.depot, c, distance=instance.distances[instance.depot][c])\n\n # edges to end depot\n end_depot = instance.n_customers + 1\n for c in instance.customers:\n graph.add_edge(c, end_depot, distance=instance.distances[c][instance.depot])\n graph.add_edge(instance.depot, end_depot, distance=0)\n\n return graph\n\n\ndef minify_instance(instance, only_first):\n distances = [[0] * (only_first + 1) for _ in range(only_first + 1)]\n for i in range(only_first + 1):\n for j in range(only_first + 1):\n distances[i][j] = instance.distances[i][j]\n return VRPTW(\n n_vehicles=instance.n_vehicles,\n earliest=instance.earliest[:only_first + 1],\n latest=instance.latest[:only_first + 1],\n name=instance.name,\n dimension=instance.dimension,\n n_customers=only_first,\n depot= instance.depot,\n customers=instance.customers[:only_first],\n capacity=instance.capacity,\n distances=distances,\n demands=instance.demands[:only_first+1],\n service_times=instance.service_times[:only_first+1],\n coordinates=instance.coordinates[:only_first+1]\n )\n\n\ndef var_to_edges(var):\n return var_name_to_edges(str(var))\n\n\n@lru_cache(maxsize=None)\ndef var_name_to_edges(var_name):\n var_name = var_name if var_name[0] != \"t\" else var_name[2:]\n nodes = list(make_tuple(var_name))\n nodes[-1] = nodes[0]\n return set(zip(nodes[:-1], nodes[1:]))\n","repo_name":"mmghannam/scip-routing","sub_path":"scip_routing/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"64257486","text":"from aocd import data\nfrom aocd.models import Puzzle\n\n\ndef mandist(s, t):\n return sum(abs(x - y) for x, y in zip(s, t))\n\n\n_dx = dict(U=0, R=1, L=-1, D=0)\n_dy = dict(U=1, R=0, L=0, D=-1)\n\n\ndef get_pts(wire):\n pts = {}\n l = 1\n x, y = 0, 0\n for op in wire.split(','):\n d, n = op[0], int(op[1:])\n for i in range(n):\n x, y = x + _dx[d], y + _dy[d]\n pts.setdefault((x, y), l + i)\n l += n\n return pts\n\n\n# input = \"\"\"\n# R8,U5,L5,D3\n# U7,R6,D4,L4\n# \"\"\".strip().split()\n# input = \"\"\"\n# R75,D30,R83,U83,L12,D49,R71,U7,L72\n# U62,R66,U55,R34,D71,R55,D58,R83\"\"\".strip().split()\n\ndef main(*_):\n wire0, wire1 = data.strip().split()\n pts0 = get_pts(wire0)\n pts1 = get_pts(wire1)\n cross = pts0.keys() & pts1.keys()\n part_a = min(mandist((0, 0), c) for c in cross)\n print(f'part 1: {part_a}')\n\n part_b = min(pts0[c] + pts1[c] for c in cross)\n print(f'part 2: {part_b}')\n\n return part_a, part_b\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bj0/aoc","sub_path":"aoc/2019/d3.py","file_name":"d3.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2353482395","text":"\"\"\"\nPLAsTiCC_in_a_kernel_meta_and_data\n----------------------------------\n@website https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data\n\n@author Olivier https://www.kaggle.com/ogrellier\n\nGoal :\n------\nTrain 5 lightgbms on the meta_data + aggregated data\n\nThen go through test data in chunks and generate predictions\n\nNew in this version :\n---------------------\n1. This versions adds some of the Flux calculations made available by MichaelApers https://www.kaggle.com/michaelapers\n here https://www.kaggle.com/michaelapers/the-plasticc-astronomy-starter-kit\n2. class 99 mean adjustment\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nimport gc\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport lightgbm as lgb\nimport logging\n\n\ndef create_logger():\n logger_ = logging.getLogger('main')\n logger_.setLevel(logging.DEBUG)\n fh = logging.FileHandler('simple_lightgbm.log')\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('[%(levelname)s]%(asctime)s:%(name)s:%(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger_.addHandler(fh)\n logger_.addHandler(ch)\n\n\ndef get_logger():\n return logging.getLogger('main')\n\n\ndef lgb_multi_weighted_logloss(y_true, y_preds):\n \"\"\"\n @author olivier https://www.kaggle.com/ogrellier\n multi logloss for PLAsTiCC challenge\n \"\"\"\n # class_weights taken from Giba's topic : https://www.kaggle.com/titericz\n # https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194\n # with Kyle Boone's post https://www.kaggle.com/kyleboone\n classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]\n class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}\n if len(np.unique(y_true)) > 14:\n classes.append(99)\n class_weight[99] = 2\n y_p = y_preds.reshape(y_true.shape[0], len(classes), order='F')\n\n # Trasform y_true in dummies\n y_ohe = pd.get_dummies(y_true)\n # Normalize rows and limit y_preds to 1e-15, 1-1e-15\n y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)\n # Transform to log\n y_p_log = np.log(y_p)\n # Get the log for ones, .values is used to drop the index of DataFrames\n # Exclude class 99 for now, since there is no class99 in the training set\n # we gave a special process for that class\n y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)\n # Get the number of positives for each class\n nb_pos = y_ohe.sum(axis=0).values.astype(float)\n # Weight average and divide by the number of positives\n class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])\n y_w = y_log_ones * class_arr / nb_pos\n\n loss = - np.sum(y_w) / np.sum(class_arr)\n return 'wloss', loss, False\n\n\ndef multi_weighted_logloss(y_true, y_preds):\n \"\"\"\n @author olivier https://www.kaggle.com/ogrellier\n multi logloss for PLAsTiCC challenge\n \"\"\"\n # class_weights taken from Giba's topic : https://www.kaggle.com/titericz\n # https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194\n # with Kyle Boone's post https://www.kaggle.com/kyleboone\n classes = [6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95]\n class_weight = {6: 1, 15: 2, 16: 1, 42: 1, 52: 1, 53: 1, 62: 1, 64: 2, 65: 1, 67: 1, 88: 1, 90: 1, 92: 1, 95: 1}\n if len(np.unique(y_true)) > 14:\n classes.append(99)\n class_weight[99] = 2\n y_p = y_preds\n # Trasform y_true in dummies\n y_ohe = pd.get_dummies(y_true)\n # Normalize rows and limit y_preds to 1e-15, 1-1e-15\n y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)\n # Transform to log\n y_p_log = np.log(y_p)\n # Get the log for ones, .values is used to drop the index of DataFrames\n # Exclude class 99 for now, since there is no class99 in the training set\n # we gave a special process for that class\n y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)\n # Get the number of positives for each class\n nb_pos = y_ohe.sum(axis=0).values.astype(float)\n # Weight average and divide by the number of positives\n class_arr = np.array([class_weight[k] for k in sorted(class_weight.keys())])\n y_w = y_log_ones * class_arr / nb_pos\n\n loss = - np.sum(y_w) / np.sum(class_arr)\n return loss\n\n\ndef predict_chunk(df_, clfs_, meta_, features, train_mean):\n\n df_['flux_ratio_sq'] = np.power(df_['flux'] / df_['flux_err'], 2.0)\n df_['flux_by_flux_ratio_sq'] = df_['flux'] * df_['flux_ratio_sq']\n\n # Group by object id\n aggs = get_aggregations()\n\n aggs = get_aggregations()\n aggs['flux_ratio_sq'] = ['sum']\n aggs['flux_by_flux_ratio_sq'] = ['sum']\n\n new_columns = get_new_columns(aggs)\n\n agg_ = df_.groupby('object_id').agg(aggs)\n agg_.columns = new_columns\n\n agg_ = add_features_to_agg(df=agg_)\n\n # Merge with meta data\n full_test = agg_.reset_index().merge(\n right=meta_,\n how='left',\n on='object_id'\n )\n\n full_test = full_test.fillna(train_mean)\n # Make predictions\n preds_ = None\n for clf in clfs_:\n if preds_ is None:\n preds_ = clf.predict_proba(full_test[features]) / len(clfs_)\n else:\n preds_ += clf.predict_proba(full_test[features]) / len(clfs_)\n\n # Compute preds_99 as the proba of class not being any of the others\n # preds_99 = 0.1 gives 1.769\n preds_99 = np.ones(preds_.shape[0])\n for i in range(preds_.shape[1]):\n preds_99 *= (1 - preds_[:, i])\n\n # Create DataFrame from predictions\n preds_df_ = pd.DataFrame(preds_, columns=['class_' + str(s) for s in clfs_[0].classes_])\n preds_df_['object_id'] = full_test['object_id']\n preds_df_['class_99'] = 0.14 * preds_99 / np.mean(preds_99) \n\n print(preds_df_['class_99'].mean())\n\n del agg_, full_test, preds_\n gc.collect()\n\n return preds_df_\n\n\ndef save_importances(importances_):\n mean_gain = importances_[['gain', 'feature']].groupby('feature').mean()\n importances_['mean_gain'] = importances_['feature'].map(mean_gain['gain'])\n plt.figure(figsize=(8, 12))\n sns.barplot(x='gain', y='feature', data=importances_.sort_values('mean_gain', ascending=False))\n plt.tight_layout()\n plt.savefig('importances.png')\n\n\ndef train_classifiers(full_train=None, y=None):\n\n folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)\n clfs = []\n importances = pd.DataFrame()\n lgb_params = {\n 'boosting_type': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 14,\n 'metric': 'multi_logloss',\n 'learning_rate': 0.03,\n 'subsample': .9,\n 'colsample_bytree': .7,\n 'reg_alpha': .01,\n 'reg_lambda': .01,\n 'min_split_gain': 0.01,\n 'min_child_weight': 10,\n 'n_estimators': 1000,\n 'silent': -1,\n 'verbose': -1,\n 'max_depth': 3\n }\n \n # Compute weights\n w = y.value_counts()\n weights = {i : np.sum(w) / w[i] for i in w.index}\n \n oof_preds = np.zeros((len(full_train), np.unique(y).shape[0]))\n for fold_, (trn_, val_) in enumerate(folds.split(y, y)):\n trn_x, trn_y = full_train.iloc[trn_], y.iloc[trn_]\n val_x, val_y = full_train.iloc[val_], y.iloc[val_]\n\n clf = lgb.LGBMClassifier(**lgb_params)\n clf.fit(\n trn_x, trn_y,\n eval_set=[(trn_x, trn_y), (val_x, val_y)],\n eval_metric=lgb_multi_weighted_logloss,\n verbose=100,\n early_stopping_rounds=50,\n sample_weight=trn_y.map(weights)\n )\n oof_preds[val_, :] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)\n get_logger().info(multi_weighted_logloss(val_y, clf.predict_proba(val_x, num_iteration=clf.best_iteration_)))\n\n imp_df = pd.DataFrame()\n imp_df['feature'] = full_train.columns\n imp_df['gain'] = clf.feature_importances_\n imp_df['fold'] = fold_ + 1\n importances = pd.concat([importances, imp_df], axis=0, sort=False)\n\n clfs.append(clf)\n\n get_logger().info('MULTI WEIGHTED LOG LOSS : %.5f ' % multi_weighted_logloss(y_true=y, y_preds=oof_preds))\n\n return clfs, importances\n\n\ndef get_aggregations():\n return {\n # Dropped mjd aggregations on CPMP advice\n # see https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69696\n # 'mjd': ['min', 'max', 'size'],\n 'passband': ['mean', 'std', 'var'], # ''min', 'max', 'mean', 'median', 'std'],\n 'flux': ['min', 'max', 'mean', 'median', 'std'],\n 'flux_err': ['min', 'max', 'mean', 'median', 'std'],\n 'detected': ['mean'], # ''min', 'max', 'mean', 'median', 'std'],\n }\n\n\ndef get_new_columns(aggs):\n return [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]\n\n\ndef add_features_to_agg(df):\n # CPMP using the following feature was really silliy :)\n # df['mjd_diff'] = df['mjd_max'] - df['mjd_min']\n # see https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69696\n \n # The others may be useful\n df['flux_diff'] = df['flux_max'] - df['flux_min']\n df['flux_dif2'] = (df['flux_max'] - df['flux_min']) / df['flux_mean']\n df['flux_w_mean'] = df['flux_by_flux_ratio_sq_sum'] / df['flux_ratio_sq_sum']\n df['flux_dif3'] = (df['flux_max'] - df['flux_min']) / df['flux_w_mean']\n\n # del df['mjd_max'], df['mjd_min']\n\n return df\n\ndef main():\n train = pd.read_csv('../input/training_set.csv')\n train['flux_ratio_sq'] = np.power(train['flux'] / train['flux_err'], 2.0)\n train['flux_by_flux_ratio_sq'] = train['flux'] * train['flux_ratio_sq']\n\n # train = pd.concat([train, pd.get_dummies(train['passband'], prefix='passband')], axis=1, sort=False)\n\n aggs = get_aggregations()\n aggs['flux_ratio_sq'] = ['sum']\n aggs['flux_by_flux_ratio_sq'] = ['sum']\n\n # passbands = [f for f in train if 'passband_' in f]\n # get_logger().info('Passband features : {}'.format(passbands))\n # for pb in passbands:\n # aggs[pb] = ['mean']\n\n agg_train = train.groupby('object_id').agg(aggs)\n new_columns = get_new_columns(aggs)\n agg_train.columns = new_columns\n\n agg_train = add_features_to_agg(df=agg_train)\n \n agg_train.head()\n\n del train\n gc.collect()\n\n meta_train = pd.read_csv('../input/training_set_metadata.csv')\n meta_train.head()\n\n full_train = agg_train.reset_index().merge(\n right=meta_train,\n how='outer',\n on='object_id'\n )\n\n y = full_train['target']\n del full_train['target']\n del full_train['object_id'], full_train['hostgal_specz'] # , full_train['distmod']\n\n train_mean = full_train.mean(axis=0)\n full_train.fillna(train_mean, inplace=True)\n get_logger().info(full_train.columns)\n clfs, importances = train_classifiers(full_train, y)\n\n save_importances(importances_=importances)\n\n meta_test = pd.read_csv('../input/test_set_metadata.csv')\n\n import time\n\n start = time.time()\n chunks = 5000000\n remain_df = None\n \n def the_unique(x):\n return [x[i] for i in range(len(x)) if x[i] != x[i-1]]\n\n for i_c, df in enumerate(pd.read_csv('../input/test_set.csv', chunksize=chunks, iterator=True)):\n # Check object_ids\n # I believe np.unique keeps the order of group_ids as they appear in the file\n # My belief is wrong (I should have read the doc !)\n # A big thank you to https://www.kaggle.com/filby89\n # Use .tolist() is almost 3 times faster than the_unique(df['object_id'].values)\n unique_ids = the_unique(df['object_id'].tolist())\n new_remain_df = df.loc[df['object_id'] == unique_ids[-1]].copy()\n\n if remain_df is None:\n df = df.loc[df['object_id'].isin(unique_ids[:-1])].copy()\n else:\n df = pd.concat([remain_df, df.loc[df['object_id'].isin(unique_ids[:-1])]], axis=0)\n\n # Create remaining samples df\n remain_df = new_remain_df\n\n preds_df = predict_chunk(df_=df,\n clfs_=clfs,\n meta_=meta_test,\n features=full_train.columns,\n train_mean=train_mean)\n\n if i_c == 0:\n preds_df.to_csv('predictions_v3.csv', header=True, index=False, float_format='%.6f')\n else:\n preds_df.to_csv('predictions_v3.csv', header=False, mode='a', index=False, float_format='%.6f')\n\n del preds_df\n gc.collect()\n\n if (i_c + 1) % 10 == 0:\n get_logger().info('%15d done in %5.1f' % (chunks * (i_c + 1), (time.time() - start) / 60))\n print('%15d done in %5.1f' % (chunks * (i_c + 1), (time.time() - start) / 60))\n\n # Compute last object in remain_df\n\n preds_df = predict_chunk(df_=remain_df,\n clfs_=clfs,\n meta_=meta_test,\n features=full_train.columns,\n train_mean=train_mean)\n\n preds_df.to_csv('predictions_v3.csv', header=False, mode='a', index=False, float_format='%.6f')\n\n z = pd.read_csv('predictions_v3.csv')\n\n z = z.groupby('object_id').mean()\n\n z.to_csv('single_predictions_v3.csv', index=True, float_format='%.6f')\n\n\nif __name__ == '__main__':\n gc.enable()\n create_logger()\n try:\n main()\n except Exception:\n get_logger().exception('Unexpected Exception Occured')\n raise\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/PLAsTiCC-2018/olivier/plasticc-in-a-kernel-meta-and-data.py","file_name":"plasticc-in-a-kernel-meta-and-data.py","file_ext":"py","file_size_in_byte":13443,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"32784648025","text":"\"\"\n#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\n# Ejercicio 10\n\nprint(\"Ingrese los datos necesarios\")\n\n# Ingreso de datos\n\n# Operación\n\ncTer = 5\n\nc = 1\n\ns = 0\n\nwhile c <= cTer:\n\n es = float(input(str(c) + \".- Ingrese la estatura: \"))\n\n s += es\n\n c += 1\n\n\n# Resultados\n\nprint(\"La suma de las estaturas es: \", s)\n\nprint(\"El promedio de las estaturas es: \", round(s / cTer, 2))\n\nprint(\"Muchas Gracias, programa terminado.\")\n","repo_name":"harrymanga/Clases_De_Programacion_Idat","sub_path":"Ciclo_1/Python_Basico/Tema.7/Ejercicios/T7.E10.py","file_name":"T7.E10.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41204095224","text":"from typing import Dict\nmemo: Dict[int, int] = {0: 0, 1: 1}\n\ndef fib3(n: int) -> int:\n if n not in memo:\n memo[n] = fib3(n - 1) + fib3(n - 2)\n return memo[n]\n\nif __name__ == \"__main__\":\n print(fib3(5))\n print(fib3(50))\n","repo_name":"tuantran1810/ClassicCSProblemWithPython","sub_path":"Chapter1/fib3.py","file_name":"fib3.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"70289924643","text":"from PIL import Image\nimport random\n\ndef glass_filter(pixel_map, width, height):\n for i in range(width):\n for j in range(height):\n\n rgbp = lambda x,y: input_image.getpixel((x, y))\n\n possible_values = [rgbp(i, j)]\n\n # add pixel above if possible\n if j != 0:\n possible_values.append(rgbp(i, j - 1))\n # add pixel below if possible\n if j != height - 1:\n possible_values.append(rgbp(i, j + 1))\n # pixel left\n if i != 0:\n possible_values.append(rgbp(i - 1, j))\n # pixel right\n if i != width - 1:\n possible_values.append(rgbp(i + 1, j))\n \n # choose random rgb val\n rand_index = random.randrange(0, len(possible_values))\n\n # set pixel value\n pixel_map[i, j] = possible_values[rand_index]\n \nif __name__ == \"__main__\":\n fpath = input(\"enter path for image relative to ~/Pictures: \")\n BASE_DIR = \"/home/harishb/Pictures\"\n input_image = Image.open(f\"{BASE_DIR}/{fpath}\")\n before_ext = fpath[0: fpath.rfind(\".\")]\n after_ext = fpath[fpath.rfind(\".\") + 1: ]\n\n pixel_map = input_image.load()\n\n width, height = input_image.size\n\n glass_filter(pixel_map, width, height)\n\n # save as final output\n new_ext = \"png\"\n new_fpath = f\"{BASE_DIR}/{before_ext}_glass.{new_ext}\"\n input_image.save(new_fpath, format=new_ext)\n\n print(f\"Picture succesfully saved at {new_fpath}\")","repo_name":"harishbommakanti/image_processor","sub_path":"python_version/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11816227265","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nUniwalk\n- An explainable and accurate recommender system\n for rating and network data\n\nAuthors\n- Haekyu Park (hkpark627@snu.ac.kr)\n- Hyunsik Jeon (jeon185@gmail.com)\n- Junghwan Kim (kjh900809@snu.ac.kr)\n- Beunguk Ahn (elaborate@snu.ac.kr)\n- U Kang (ukang@snu.ac.kr)\n\nFile\n- main.py\n : runs Uniwalk algorithm\n\nThis software is free of charge under research purposes.\nFor commercial purposes, please contact the authors.\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nImport packages\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport os\nimport sys\nfrom time import localtime\nfrom par import parse_args, set_paras, set_files, set_basic_info, print_args\nfrom embedding import embedding\nfrom prediction import prediction\nfrom build_graph import build_graph\nfrom split_5_folds import split_5_folds\nfrom xval import xval\nfrom holdout import holdout\nimport numpy as np\nsys.path.insert(0, os.path.dirname(os.path.abspath('.')))\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nAdditional functions\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# Print the present time\ndef now_string():\n\tnow = localtime()\n\tnow_day = (now.tm_year, now.tm_mon, now.tm_mday)\n\tnow_time = (now.tm_hour, now.tm_min)\n\ts = \"%04d-%02d-%02d_\" % now_day + \"%02d-%02d\" % now_time\n\treturn s\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nMain method\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nif __name__ == '__main__':\n\t# 1. Get arguments\n\targs = parse_args()\n\tset_paras(args)\n\tset_files(args)\n\tset_basic_info(args)\n\tprint_args(args)\n \n\n\t# 2. Split input ratings into 5 folds\n\t#split_5_folds(args.dataset, 5)\n\t#xval(args.dataset, 5)\n\t#holdout(args.dataset, 30)\n\n\t# 3. Build graph\n\t#build_graph(args, 30)\n\n\t# 4. Learn\n\tnumfold = 30\n\tsampling_type = ['positive', 'negative', 'unweighted']\n\tTrain_RMSEs = list()\n\tmin_TestRMSEs = list()\n\tmin_TestMAEs = list()\n\t\n\tfor fold in range(numfold):\n\t\trmse, min_TestRMSE, min_TestMAE = embedding(args, fold, sampling_type)\n\t\tprtstr = \"Fold = %d, rmse = %.3f\" % (fold, rmse)\n\t\tprint(prtstr)\n\t\tprtstr = \"Fold = %d, min rmse = %.3f\" % (fold, min_TestRMSE)\n\t\tprint(prtstr)\n\t\tprtstr = \"Fold = %d, min mae = %.3f\" % (fold, min_TestMAE)\n\t\tprint(prtstr)\n\t\tTrain_RMSEs.append(rmse)\n\t\tmin_TestRMSEs.append(min_TestRMSE)\n\t\tmin_TestMAEs.append(min_TestMAE)\n\t\n\t#prtstr = \"Train RMSEs = %s\" % Train_RMSEs\n\t#print(prtstr)\n\t\n\t# 5. Prediction\n\tTest_RMSEs, Test_MAEs = prediction(args, numfold)\n\terr_filename = \"../data/%s/prediction/err.txt\" % (args.dataset)\n\twith open(err_filename, mode='w') as f:\n\t\tfor i in range(numfold):\n\t\t f.write(\"%.5f\\t\" %Test_RMSEs[i])\n\t\tf.write(\"\\n\")\n\t\tfor i in range(numfold):\n\t\t f.write(\"%.5f\\t\" %Test_MAEs[i])\n\t\tf.write(\"\\n%.5f\\n%.5f\" %(np.mean(Test_RMSEs), np.mean(Test_MAEs)))\n\tprint(\"Mean RMSE: %.5f\\nMean MAE: %.5f\" %(np.mean(Test_RMSEs), np.mean(Test_MAEs)))\n \n\n\t\n","repo_name":"mikele700/ExplainableRecommendation","sub_path":"UniWalk-1.0/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31378184206","text":"import gzip\nimport shutil\n\ndef compress(infile, tofile):\n with open(infile, 'rb') as f_in:\n with gzip.open(tofile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\ndef decompress(infile, tofile):\n with gzip.open(infile, 'rb') as f_in:\n with open(tofile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n","repo_name":"JamesMHarmon/optima","sub_path":"model_py/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31189252122","text":"\r\nclass Schedules(dict):\r\n\r\n testingCounter = 0\r\n contentCounter = -1\r\n yearCounter = -1\r\n monthIntervals = [1]\r\n monthNames = []\r\n years = []\r\n\r\n classes=[{'start': '2018-12-12',\r\n 'end': '2018-12-13',\r\n 'length': '6 weeks',\r\n 'course': 'Beg. Obedience',\r\n 'price': '$140',\r\n 'city': 'Glendora',\r\n 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n 'time': '6PM - 7PM',\r\n 'address': '555 A St, 98821',\r\n 'note': 'No dogs at first meeting!'},\r\n {'start': '2019-01-16',\r\n 'end': '2019-02-13',\r\n 'length': '4 weeks',\r\n 'course': 'Beg. Obedience',\r\n 'price': '$180',\r\n 'city': 'Upland',\r\n 'preregister': 'Pre-Registration Required! Call (999) 555-3333',\r\n 'time': '6PM - 7PM',\r\n 'address': '555 A St, 98821',\r\n 'note': 'No dogs at first meeting!'},\r\n {'start': '2019-01-16',\r\n 'end': '2018-12-13',\r\n 'length': '6 weeks',\r\n 'course': 'Beg. Obedience',\r\n 'price': '$140',\r\n 'city': 'Glendora',\r\n 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n 'time': '6PM - 7PM',\r\n 'address': '555 A St, 98821',\r\n 'note': 'No dogs at first meeting!'},\r\n {'start': '2019-01-18',\r\n 'end': '2018-12-13',\r\n 'length': '6 weeks',\r\n 'course': 'Beg. Obedience',\r\n 'price': '$140',\r\n 'city': 'Glendora',\r\n 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n 'time': '6PM - 7PM',\r\n 'address': '555 A St, 98821',\r\n 'note': 'No dogs at first meeting!'},\r\n {'start': '2019-02-16',\r\n 'end': '2018-12-13',\r\n 'length': '6 weeks',\r\n 'course': 'Beg. Obedience',\r\n 'price': '$140',\r\n 'city': 'Glendora',\r\n 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n 'time': '6PM - 7PM',\r\n 'address': '555 A St, 98821',\r\n 'note': 'No dogs at first meeting!'}]\r\n \r\n def monthNumberToName(monthNumber):\r\n if monthNumber == '01':\r\n return 'January'\r\n elif monthNumber == '02':\r\n return 'February'\r\n elif monthNumber == '03':\r\n return 'March'\r\n elif monthNumber == '04':\r\n return 'April'\r\n elif monthNumber == '05':\r\n return 'May'\r\n elif monthNumber == '06':\r\n return 'June'\r\n elif monthNumber == '07':\r\n return 'July'\r\n elif monthNumber == '08':\r\n return 'August'\r\n elif monthNumber == '09':\r\n return 'September'\r\n elif monthNumber == '10':\r\n return 'October'\r\n elif monthNumber == '11':\r\n return 'November'\r\n elif monthNumber == '12':\r\n return 'December'\r\n\r\n\r\n monthNames.append(monthNumberToName(classes[0]['start'][5:7]))\r\n years.append(classes[0]['start'][0:4])\r\n\r\n monthIntervalsCounter = 1\r\n for index, item in enumerate(classes):\r\n monthIntervalsCounter += 1\r\n try:\r\n if item['start'][5:7] != classes[index + 1]['start'][5:7]:\r\n monthIntervals.append(monthIntervalsCounter)\r\n monthNames.append(monthNumberToName(classes[index + 1]['start'][5:7]))\r\n # if item['start'][0:4] != classes[index + 1]['start'][0:4]:\r\n years.append(classes[index + 1]['start'][0:4])\r\n except:\r\n continue\r\n\r\n @property\r\n def testing(self):\r\n Schedules.testingCounter += 1\r\n for i in Schedules.monthIntervals:\r\n if Schedules.testingCounter == i:\r\n return True\r\n return False\r\n\r\n @property\r\n def content(self):\r\n Schedules.contentCounter += 1\r\n return Schedules.monthNames[Schedules.contentCounter]\r\n \r\n @property\r\n def year(self):\r\n Schedules.yearCounter += 1\r\n return Schedules.years[Schedules.yearCounter]\r\n\r\n\r\nsched = Schedules(customer={'name': 'poop'})\r\n\r\n# class Schedules(set):\r\n# pass\r\n\r\n# sched = Schedules(classes={{'January',{'start': '2018-1-12',\r\n# 'end': '2018-1-13',\r\n# 'length': '6 weeks',\r\n# 'course': 'Beg. Obedience',\r\n# 'price': '$140',\r\n# 'city': 'Glendora',\r\n# 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n# 'time': '6PM - 7PM',\r\n# 'address': '555 A St, 98821',\r\n# 'note': 'No dogs at first meeting!'},\r\n# {'start': '2018-1-12',\r\n# 'end': '2018-1-13',\r\n# 'length': '7 weeks',\r\n# 'course': 'Beg. Obedience',\r\n# 'price': '$140',\r\n# 'city': 'Glendora',\r\n# 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n# 'time': '6PM - 7PM',\r\n# 'address': '555 A St, 98821',\r\n# 'note': 'No dogs at first meeting!'}},\r\n# {'February',{'start': '2018-2-12',\r\n# 'end': '2018-2-13',\r\n# 'length': '6 weeks',\r\n# 'course': 'Beg. Obedience',\r\n# 'price': '$140',\r\n# 'city': 'Glendora',\r\n# 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n# 'time': '6PM - 7PM',\r\n# 'address': '555 A St, 98821',\r\n# 'note': 'No dogs at first meeting!'},\r\n# {'start': '2018-2-12',\r\n# 'end': '2018-2-13',\r\n# 'length': '7 weeks',\r\n# 'course': 'Beg. Obedience',\r\n# 'price': '$140',\r\n# 'city': 'Glendora',\r\n# 'preregister': 'Pre-Registration Required! Call (999) 222-3333',\r\n# 'time': '6PM - 7PM',\r\n# 'address': '555 A St, 98821',\r\n# 'note': 'No dogs at first meeting!'}}})\r\n\r\n# MONTH SECTION HEADERS\r\n# =====================\r\n# Template will have an IF (a function in Data that returns boolean) at the beginning of the for loop\r\n# with content (a function in Data that returns month) that is displayed on True\r\n\r\n# Data will have: \r\n# a function that counts the number of classes in each month\r\n# a function called by Template's IF:\r\n# initially (counter = 0) it returns true and the content function returns first month\r\n# the counter increments each time the function is called. when it hits a value corresponding to the start of a new month, it returns true\r\n# a content function called by Template when IF function returns True\r\n# each time it is called, a counter is incremented and the next month is returned ","repo_name":"tunacanman/lodo-schedule-system","sub_path":"relatorio/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37769675450","text":"#!/usr/bin/env python\nimport rosbag\nimport rospy\nimport numpy as np\nimport yaml\nimport tf\n\nif __name__ == '__main__':\n bagfile = rospy.myargv()[1]\n bag = rosbag.Bag(bagfile, 'r')\n # info = yaml.load(bag._get_yaml_info())\n # tft = tf.Transformer(True, rospy.Duration(info['duration']))\n\n bag_out = rosbag.Bag('bag_out.bag', 'w')\n\n odoms = []\n for topic, msg, t in bag.read_messages():#topics=['/tf']): #, '/odom_temp']):\n # if msg.child_frame_id\n # for msg in msg.transforms:\n # tft.setTransform(msg)\n # elif topic == '/odom_temp':\n # odoms.append(msg)\n if topic == '/tf':\n new_msg = tf.msg.tfMessage()\n for transform in msg.transforms:\n if transform.child_frame_id != 'base_footprint':\n new_msg.transforms.append(transform)\n msg = new_msg\n\n bag_out.write(topic, msg, t)","repo_name":"OSUrobotics/map_analysis","sub_path":"scripts/extract_ground_truth_path.py","file_name":"extract_ground_truth_path.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32468099708","text":"\"\"\"\nthis module converts the state of vermont voter file into a pandas dataframe.\nin order to get the vermont voter file, i emailed a form found here --\nhttps://outside.vermont.gov/dept/sos/Elections%20Division/voters/2019-sworn-affidavit-checklist-request.pdf\n-- to the vermont secretary of state and they added me to monthly distributions of the voter file.\nThe file is saved to state_voter_files/vermont/9.7.2022Statewidevoters (38).txt.\nn.b. i left the filename unchanged from how it was sent\n\nFunctions\n---------\nvt_voter_file\n get the voter file in pandas dataframe form\n\"\"\"\nimport os\nimport pandas as pd\n\n\ndef main():\n vt_voter_file(load=False, save=True)\n\n\ndef vt_voter_file(load=True, save=True):\n \"\"\"\n pull the state of vermont voter file\n\n Parameters\n ----------\n load : bool, optional\n load the dataframe if it already exists\n save : bool, optional\n save the dataframe after creating it\n\n Returns\n -------\n pandas dataframe\n voter file\n \"\"\"\n file_out = f\"generated_data/vt2020_vf.feather\"\n if os.path.exists(file_out) and load:\n print(f\"*loading dataframe from {file_out}\")\n return pd.read_feather(file_out)\n\n filename = \"state_voter_files/vermont/9.7.2022Statewidevoters (38).txt\"\n usecols = [\"Last Name\", \"County\"]\n df = pd.read_csv(filename, sep=\"|\", usecols=usecols)\n df[\"name\"] = df[\"Last Name\"].str.lower()\n df[\"county\"] = df[\"County\"].str.lower()\n\n # filter out any names with numbers or any names with no letters\n # does name contain a digit\n mask1 = df[\"name\"].map(lambda x: any(char.isdigit() for char in str(x)))\n # does name contain zero letters\n mask2 = ~df[\"name\"].map(lambda x: any(char.isalpha() for char in str(x)))\n mask = ~mask1 & ~mask2\n df = df[mask]\n\n # write dataframe to feather, but first reset index as required by feather\n if save:\n df.reset_index(drop=True).to_feather(file_out)\n\n return df\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pgree/raking_bisg","sub_path":"vermont_vf.py","file_name":"vermont_vf.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71626678561","text":"from jax.numpy import float32, isclose\r\nfrom jax.random import PRNGKey\r\nfrom sarx import synapse\r\n\r\n\r\ndef test():\r\n key = PRNGKey(0)\r\n weights = synapse(key, shape=(1000,))\r\n assert weights.dtype == float32\r\n assert not weights.weak_type\r\n assert isclose(weights.mean(), 1.5, atol=0.01)\r\n assert isclose(weights.std(), 0.1, atol=0.01)\r\n","repo_name":"ivandustin/sarx","sub_path":"tests/test_synapse.py","file_name":"test_synapse.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27286288024","text":"a=\"hello,world!\" #single line string\nprint(a)\n\n##multiline string-->\nb=\"\"\"hello i am jay \ni live in village siwai\nmy father is farmer\"\"\"\nprint(b)\n\n#slicing string(You can return a range of characters by using the slice syntax.)-->\na=\"hello,world!\"\nprint(a[2:8])\n\n\n##modify strings-->\n#1)upper case(The upper() method returns the string in upper case:)-->\na=\"this is demo\"\nprint(a.upper())\n\n#2)lower case(The lower() method returns the string in lower case:)-->\na=\"this is DEMO\"\nprint(a.lower())\n\n#3)remove whitespac(The strip() method removes any whitespace from the beginning or the end:)-->\na=\" hello , world ! \"\nprint(a.strip())\n\n#4)replace string(The replace() method replaces a string with another string:)-->\na=\"hwllo , world!\"\nprint(a.replace(\"w\",\"e\"))\n\n##concatenation of string(To concatenate, or combine, two strings you can use the + operator.)-->\na='hello,'\nb=\"world\"\nc=a+b\nprint(c)\n\n\n\n\n\n","repo_name":"JayMuleva/python_learnings","sub_path":"basic_learning/python_string.py","file_name":"python_string.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70477688801","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from nextcord import Interaction\n\nfrom os import urandom\n\nfrom nextcord import ButtonStyle\n\nfrom ..Components.view_base import ViewBase\nfrom ..Components.custom_button import CustomButton\n\n\nclass VerificationView(ViewBase):\n def __init__(self, author_id: int, timeout: float | None = 30.0) -> None:\n super().__init__(0, author_id, timeout)\n self.add_item(CustomButton(\n style=ButtonStyle.green,\n custom_id=f\"0_{author_id}_\" + urandom(4).hex(),\n emoji=\"✅\"\n ))\n self.add_item(CustomButton(\n style=ButtonStyle.red,\n custom_id=f\"1_{author_id}_\" + urandom(4).hex(),\n emoji=\"❌\"\n ))\n self.approved: bool | None = None\n \n async def click_button(self, interaction: Interaction, custom_id: str) -> None:\n assert custom_id[0] in {\"0\", \"1\"}\n await interaction.response.defer()\n self.approved = custom_id[0] == \"0\"\n self.stop()\n \n async def click_select_menu(self, interaction: Interaction, custom_id: str, values: list[str]) -> None:\n return\n","repo_name":"i80287/studpid-bot","sub_path":"storebot/Components/verification_view.py","file_name":"verification_view.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36433689574","text":"\"\"\"\nA simple plotting example\n==========================\n\nA plotting example with a few simple tweaks\n\"\"\"\n\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(5, 4), dpi=72)\naxes = fig.add_axes([0.01, 0.01, 0.98, 0.98])\nx = np.linspace(0, 2, 200)\ny = np.sin(2 * np.pi * x)\nplt.plot(x, y, lw=0.25, c=\"k\")\nplt.xticks(np.arange(0.0, 2.0, 0.1))\nplt.yticks(np.arange(-1.0, 1.0, 0.1))\nplt.grid()\nplt.show()\n","repo_name":"scipy-lectures/scipy-lecture-notes","sub_path":"intro/matplotlib/examples/plot_bad.py","file_name":"plot_bad.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":3000,"dataset":"github-code","pt":"54"} +{"seq_id":"17888100646","text":"import folium\nfrom folium.plugins import Search\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport json\nfrom collections import namedtuple\nimport numpy as np\nimport math\nfrom datetime import datetime\n\nfrom util.city import City\n\nrootPath = \"/home/franklin/Desktop/Projetos/iBikeSafe/\"\n\ndef main():\n map = folium.Map(location=[-12.280265953993627, -38.96356796067759], height = '91.5%', zoom_start=3, min_zoom = 2, max_bounds=True)\n cities = list()\n\n print(\"\\n######################################################## MAP GENERATOR ##############################################\\n\")\n #Load cities paths------------------------------------------------------------------------------\n print(\"[INFO] Importing cities data from BikeWay system\")\n classifierPath = rootPath+\"BikeWay/outputFiles/CyclingView/\"\n classifierFiles = [f for f in listdir(classifierPath) if isfile(join(classifierPath, f))]\n today = datetime.today()\n month = today.month\n if month < 10:\n month = \"0\"+str(month)\n monthYear = str(today.year)+\"-\"+month\n\n #Gets each city file\n nCities = 1\n for classifierFile in classifierFiles:\n print(\" \"+str(nCities)+\". \"+classifierFile.replace(\".json\",\"\"))\n cities.append(importCity(classifierFile))\n nCities+=1\n\n print(\"\\n---------------------------------------------------------------------------------------------------------------------\\n\")\n\n print(\"[INFO] Importing variables information\")\n variablesInfo = importVariables()\n print(\" Group 1: Environment\")\n for variable in variablesInfo['monitoring']:\n print(\" > \"+variable[0]+\" in \"+variable[1])\n print(\" Group 2: Infrastructure\")\n for variable in variablesInfo['statistic']:\n print(\" > \"+variable[0]+\" in \"+variable[1])\n\n print(\"\\n---------------------------------------------------------------------------------------------------------------------\\n\")\n\n print(\"[INFO] Plotting cities paths stretches on map\")\n #Get new city----------------------------------------------------------------------------------\n nCities = 1\n for city in cities:\n print(\" \"+str(nCities)+\". \"+city.ID)\n for path in city.paths:\n for stretch in path.stretches:\n print(\" > Plotting \"+stretch.ID)\n insertBikePath(map, [stretch.P1, stretch.P2], stretch.type, stretch.direction, stretch.bikeWayQuality, getPopupLegend(stretch, variablesInfo))\n nCities+=1\n\n folium.plugins.LocateControl().add_to(map)\n folium.plugins.Geocoder().add_to(map)\n\n if not os.path.exists(rootPath+\"CyclingView/BikeWay-view/modules/maps/\"):\n os.makedirs(rootPath+\"CyclingView/BikeWay-view/modules/maps/\")\n\n map.save(rootPath+\"CyclingView/BikeWay-view/modules/maps/\"+monthYear+\".html\")\n\ndef importCity(cityFileName):\n #City file name\n fileName = rootPath+\"BikeWay/outputFiles/CyclingView/\"+cityFileName\n #City object\n city = None\n\n with open(fileName) as infile:\n #Loads city JSON file\n cityData = json.load(infile)\n\n #Loads city info by JSON data\n city = City(cityFileName.replace(\".json\", ''), [], [])\n\n pathCount = 1\n for pathData in cityData['paths']:\n print(\" > Path \"+str(pathCount)+\": \"+pathData['ID'])\n print(\" - Construction date: \"+pathData['constructionDate'])\n print(\" - Maintenance date: \"+pathData['maintenanceDate'])\n print(\" - Inspection date: \"+pathData['inspectionDate'])\n print(\" - Creator: \"+str(pathData['creator']))\n\n #Loads path info by JSON dataself.location\n path = city.insertPath(pathData['ID'], pathData['constructionDate'], pathData['maintenanceDate'], pathData['inspectionDate'], pathData['creator'])\n stretchCount = 1\n #Loads stretch info by JSON data\n for stretchData in pathData['stretches']:\n print(\" - Stretch \"+str(stretchCount)+\": \"+stretchData['ID'])\n print(\" . Signage: \"+str(stretchData['signage']))\n directionSymbol = \"<-->\"\n if stretchData['direction'] == 1:\n directionSymbol = \"-->\"\n elif stretchData['direction'] == 2:\n directionSymbol = \"<--\"\n print(\" . Points connection: \"+str(stretchData['P0'])+\" \"+directionSymbol+\" \"+str(stretchData['P1']))\n print(\" . Statistic data: \"+str(stretchData['statisticData']))\n print(\" . Bike passage number: \"+str(stretchData['bikePassageNumber']))\n print(\" . Average monitoring variables: \"+str(stretchData['averageMonitoringData']))\n print(\" . Peak monitoring variables: \"+str(stretchData['peakMonitoringData']))\n print(\" . Valley monitoring variables: \"+str(stretchData['valleyMonitoringData']))\n print(\" . BikeWay Quality: \"+str(stretchData['bikeWayQuality']))\n\n stretch = path.insertStretch(stretchData['ID'], stretchData['P0'], stretchData['P1'], stretchData['statisticData'][2], stretchData['direction'], stretchData['signage'])\n stretch.statisticData = stretchData['statisticData']\n stretch.bikePassageNumber = stretchData['bikePassageNumber']\n stretch.averageMonitoringData = stretchData['averageMonitoringData']\n stretch.peakMonitoringData = stretchData['peakMonitoringData']\n stretch.valleyMonitoringData = stretchData['valleyMonitoringData']\n stretch.bikeWayQuality = stretchData['bikeWayQuality']\n stretchCount+=1\n pathCount+=1\n\n #os.system(\"rm \"+fileName)\n\n return city\n\ndef importVariables():\n #Variables file name\n fileName = rootPath+\"CyclingView/BikeWay-view/modules/util/variables.json\"\n #Variables object\n variablesInfo = None\n\n with open(fileName) as infile:\n #Loads city JSON file\n variablesInfo = json.load(infile)\n\n return variablesInfo\n\ndef getPopupLegend(stretch, variablesInfo):\n signage = \"high\"\n if stretch.signage == 0:\n signage = \"none\"\n elif stretch.signage == 1:\n signage = \"low\"\n\n popupLegend=\"\"\"\n {id}
\n Size
\n {size}m
\n Signage
\n {signage}
\n Monitoring samples
\n {bikePassageNumber}
\n \"\"\".format(id=stretch.ID, size=stretch.getDistance(), signage=signage, bikePassageNumber=stretch.bikePassageNumber)\n\n for index in range(len(variablesInfo[\"monitoring\"])):\n average = \"--\"\n peak = \"--\"\n valley = \"--\"\n try:\n average = int(stretch.averageMonitoringData[index])\n peak = int(stretch.peakMonitoringData[index])\n valley = int(stretch.valleyMonitoringData[index])\n except:\n None\n\n popupLegend += \"\"\"\n {name}
\n {average}{unit}  (Min: {valley}   Max: {peak})
\n \"\"\".format(name = variablesInfo[\"monitoring\"][index][0], average = average, peak = peak\n , valley = valley, unit = variablesInfo[\"monitoring\"][index][1])\n\n for index in range(len(variablesInfo[\"statistic\"]) - 1):\n statistic = \"--\"\n try:\n statistic = int(stretch.statisticData[index])\n except:\n None\n\n popupLegend += \"\"\"{name}
{value}{unit}

\n \"\"\".format(name = variablesInfo[\"statistic\"][index][0], value = statistic, unit = variablesInfo[\"statistic\"][index][1])\n\n return popupLegend\n\ndef insertBikePath(map, path, type, direction, quality , popupLegend):\n color = None\n if quality == 4:\n color = 'blue'\n elif quality == 3:\n color = 'green'\n elif quality == 2:\n color = 'yellow'\n elif quality == 1:\n color = 'orange'\n elif quality == 0:\n color = 'red'\n else:\n color = 'grey'\n\n dash_array = 0\n if type == 0:\n dash_array = 20\n elif type == 1:\n dash_array = 10\n\n popup = folium.Popup(popupLegend, max_width=180,min_width=180)\n folium.PolyLine(path, color=color, weight=8, opacity=0.5, popup=popup, dash_array=dash_array).add_to(map)\n\n arrows = list()\n if direction == 0:\n arrows = getArrows([path[0],path[1]], color)\n arrows = arrows + getArrows([path[1],path[0]], color)\n elif direction == 1:\n arrows = getArrows([path[0],path[1]], color)\n elif direction == 2:\n arrows = getArrows([path[1],path[0]], color)\n\n for arrow in arrows:\n arrow.add_to(map)\n\ndef getArrows(locations, color):\n size = 3\n n_arrows = 5\n\n Point = namedtuple('Point', field_names=['lat', 'lon'])\n\n # creating point from Point named tuple\n point1 = Point(locations[0][0], locations[0][1])\n point2 = Point(locations[1][0], locations[1][1])\n\n # calculate the rotation required for the marker.\n #Reducing 90 to account for the orientation of marker\n # Get the degree of rotation\n angle = get_angle(point1, point2) - 90\n\n # get the evenly space list of latitudes and longitudes for the required arrows\n\n arrow_latitude = np.linspace(point1.lat, point2.lat, n_arrows + 2)[1:n_arrows+1]\n arrow_longitude = np.linspace(point1.lon, point2.lon, n_arrows + 2)[1:n_arrows+1]\n\n final_arrows = []\n\n #creating each \"arrow\" and appending them to our arrows list\n for points in zip(arrow_latitude, arrow_longitude):\n final_arrows.append(folium.RegularPolygonMarker(location=points, color = color, fill_color=color, number_of_sides=3, radius=size, rotation=angle))\n return final_arrows\n\ndef get_angle(p1, p2):\n longitude_diff = np.radians(p2.lon - p1.lon)\n\n latitude1 = np.radians(p1.lat)\n latitude2 = np.radians(p2.lat)\n\n x_vector = np.sin(longitude_diff) * np.cos(latitude2)\n y_vector = (np.cos(latitude1) * np.sin(latitude2)\n - (np.sin(latitude1) * np.cos(latitude2)\n * np.cos(longitude_diff)))\n angle = np.degrees(np.arctan2(x_vector, y_vector))\n\n # Checking and adjustring angle value on the scale of 360\n if angle < 0:\n return angle + 360\n return angle\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lablara/bikeway","sub_path":"CyclingView/BikeWay-view/modules/mapgenerator.py","file_name":"mapgenerator.py","file_ext":"py","file_size_in_byte":10501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1210770462","text":"def solution(record):\n answer = []\n\n list_r = [r.split(' ') for r in record]\n dict_uid = {} # key: 유저 아이디 value : 닉 네임\n\n for i in range(len(list_r)):\n type = list_r[i][0]\n uid = list_r[i][1]\n if type == 'Enter':\n nick = list_r[i][2]\n dict_uid[uid] = nick\n\n elif type == 'Change':\n nick = list_r[i][2]\n dict_uid[uid] = nick\n\n for i in range(len(list_r)):\n type = list_r[i][0]\n uid = list_r[i][1]\n if type == 'Enter':\n answer.append(dict_uid[uid] + '님이 들어왔습니다.')\n if type == 'Leave':\n answer.append(dict_uid[uid] + '님이 나갔습니다.')\n\n\n return answer\n\n\nprint(solution([\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\"Leave uid1234\",\"Enter uid1234 Prodo\",\"Change uid4567 Ryan\"]))\n","repo_name":"angelatto/Algorithm","sub_path":"programmers/0903.py","file_name":"0903.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22448479108","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.urls import reverse\nimport jieba\nfrom functools import reduce\nfrom .models import Article,Word\nimport datetime,time\n# Create your views here.\n\ndef redtitle(text, wordlist):\n newtext = text\n for word in wordlist:\n if newtext.find(word) != -1:\n newtext = newtext.replace(word, ''+word+'')\n return newtext\n\ndef redcontent(text, wordlist):\n newtext = text\n length = len(text)\n minpos = length+10\n for word in wordlist:\n tmp = newtext.find(word)\n if tmp != -1:\n if tmp < minpos:\n minpos = tmp\n if (minpos > 20) & (minpos < length+1):\n newtext = '...'+newtext[minpos-20:minpos+180]+'...'\n else:\n newtext = newtext[0:200]+'...'\n for word in wordlist:\n if newtext.find(word) != -1:\n newtext = newtext.replace(word, ''+word+'')\n #print(newtext)\n return newtext\n\n\ndef result(request, text, starttime, endtime):\n # return render(request, 'index.html')\n print('in result:',text,starttime,endtime)\n wordList = text.strip().split(' ')\n searchList = []\n articleList = set()\n for x in wordList:\n searchList.extend(jieba.lcut(x))\n searchList = list(set(searchList))\n #单词的列表\n for x in searchList:\n try:\n curWord = Word.objects.get(pk=x)\n except:\n continue\n if articleList:\n articleList = articleList & set(curWord.m_article.filter(m_time__range=(starttime,endtime)))\n else:\n articleList = set(curWord.m_article.filter(m_time__range=(starttime,endtime)))\n articleList = list(articleList)\n #生成文章结果的列表\n articleList = sorted(articleList, key=lambda x: x.m_time, reverse=True)\n lengthArticle = len(articleList)\n if lengthArticle:\n dicts = [{\n 'url': each.m_url,\n 'title': redtitle(each.m_title, wordList),\n 'content': redcontent(each.m_content, wordList),\n } for each in articleList]\n # 显示红色文字\n print(dicts)\n paginator = Paginator(dicts, 20)\n page = request.GET.get('page')\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n contacts = paginator.page(1)\n except EmptyPage:\n contacts = paginator.page(paginator.num_pages)\n\n return render(request, 'result.html', {\n 'num': lengthArticle,\n 'text': text,\n 'starttime': starttime,\n 'endtime': endtime,\n 'contacts': contacts\n })\n else:\n return render(request,'notfound.html')\n\n\n\n\ndef search(request):\n if request.method=='GET':\n return render(request,'index.html')\n else:\n text = request.POST.get('text', None)\n label = request.POST.get('select', None)\n starttime = request.POST.get('starttime', None)\n endtime = request.POST.get('endtime', None)\n print(starttime,endtime)\n if starttime=='':\n starttime = '2010-01-01'\n if endtime=='':\n endtime = time.strftime('%Y-%m-%d')\n print('starttime=',starttime,'endtime=',endtime)\n if text:\n return HttpResponseRedirect(reverse('result',args=(text,starttime,endtime)))\n else:\n return render(request, 'index.html')\n\ndef index(request):\n return render(request, 'index.html')","repo_name":"SlongLiu/THUnewsSearchEngine","sub_path":"searchEngine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32711711356","text":"import os\n\nclass PostNlFileParser(object):\n def __init__(self, inputfile):\n self._f = open(inputfile, encoding=\"iso-8859-1\")\n self._lines = []\n self._lines = self._f.readlines()\n self.raw_header, self.raw_documents, self.raw_footer = self.split_file(self._lines)\n self.doctype = self.determine_doctype(self.raw_header)\n self.documents = self.split_documents(self.raw_documents)\n broken_documents = self.documents\n fixed_documents = []\n for d in broken_documents:\n fixed_documents.append(self.fix_document(d))\n self.documents = fixed_documents\n\n \n\n @staticmethod\n def split_file(lines):\n header = []\n documents = []\n footer = []\n for l in lines:\n if l.startswith(\"A\"):\n header.append(l)\n elif l.startswith(\"V\"):\n documents.append(l)\n elif l.startswith(\"Z\"):\n footer.append(l)\n else:\n raise ValueError(\"Invallid linestart: {}\".format(l[0:1]))\n\n return header, documents, footer\n\n @staticmethod\n def determine_doctype(header):\n a040_found = False\n for l in header:\n if l.startswith(\"A040\"):\n a040_found = True\n break\n if a040_found:\n return \"16R\"\n else:\n return \"25R\"\n\n @staticmethod\n def split_documents(raw_documents):\n documents = []\n new_document = []\n for l in raw_documents:\n if l.startswith(\"V010\"):\n if len(new_document) > 0:\n raise ValueError(\"New Document array was not empty! Is the inputfile syntactical correct?\")\n new_document.append(l)\n elif l.startswith(\"V999\"):\n new_document.append(l)\n documents.append(new_document)\n new_document = []\n else:\n new_document.append(l)\n return documents\n\n def fix_document(self, document):\n if self.doctype == \"25R\":\n required_lines = [\"V010\", \"V020\", \"V035\", \"V040\", \"V050\", \"V060\", \"V070\", \"V080\", \"V081\", \"V090\", \"V091\",\n \"V100\", \"V110\", \"V140\", \"V150\", \"V151\", \"V160\", \"V170\", \"V180\", \"V200\", \"V210\", \"V211\",\n \"V212\", \"V213\", \"V999\"]\n elif self.doctype == \"16R\":\n required_lines = [\"V010\", \"V020\", \"V035\", \"V040\", \"V050\", \"V060\", \"V070\", \"V080\", \"V081\", \"V090\", \"V091\",\n \"V100\", \"V110\", \"V120\", \"V140\", \"V999\"]\n else:\n raise ValueError(\"Unknown Documenttype {}\".format(self.doctype))\n\n keys_in_documents = [l[0:4] for l in document]\n for req in required_lines:\n if req not in keys_in_documents:\n document.append(req + ' \\n')\n return sorted(document)\n\n def save_as(self, filename):\n with open(filename, \"w\", encoding=\"iso-8859-1\") as outfile:\n outfile.writelines(self.raw_header)\n for document in self.documents:\n outfile.writelines(document)\n outfile.writelines(self.raw_footer)\n\n def delete(self):\n self._f.close()\n f_path = self._f.name\n os.remove(f_path)\n\n","repo_name":"aacoba/PostNLInterfaceFixer","sub_path":"filerepair.py","file_name":"filerepair.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73039348641","text":"import sys\nn, m = map(int, input().split())\ngraph = [list(map(int, list(input().rstrip()))) for _ in range(n)]\n\n\"\"\"\n1. 사각형을 세 직사각형으로 나눈다.\n2. 세 직사각형 범위 내의 값들을 더해 모두 곱한다.\n3. answer 값을 더 큰 값으로 갱신한다. \n\"\"\"\n\ndef cal(sy, ey, sx, ex):\n ret = 0\n for i in range(sy, ey):\n for j in range(sx, ex):\n ret += graph[i][j]\n return ret\n\nanswer = 0\n# 1번 모양\nfor i in range(1, n):\n for j in range(1, m):\n first = cal(0, i, 0, m)\n second = cal(i, n, 0, j)\n third = cal(i, n, j, m)\n answer = max(answer, first * second * third)\n\n# 2번 모양\nfor i in range(1, n-1):\n for j in range(i+1, n):\n first = cal(0, i, 0, m)\n second = cal(i, j, 0, m)\n third = cal(j, n, 0, m)\n #print(i, j, first * second * third)\n answer = max(answer, first * second * third)\n\n# 3번모양\nfor i in range(1, m-1):\n for j in range(i+1, m):\n first = cal(0, n, 0, i)\n second = cal(0, n, i, j)\n third = cal(0, n, j, m)\n answer = max(answer, first * second * third)\n\n# 4번 모양\n\nfor i in range(1, n):\n for j in range(1, m):\n first = cal(0, i, 0, j)\n second = cal(i, n, 0, j)\n third = cal(0, n, j, m)\n answer = max(answer, first * second * third)\n\n# 5번 모양\n\nfor i in range(1, n):\n for j in range(1, m):\n first = cal(0, n, 0, j)\n second = cal(0, i, j, m)\n third = cal(i, n, j, m)\n answer = max(answer, first * second * third)\n\n\n# 6번모양\nfor i in range(1, n):\n for j in range(1, m):\n first = cal(0, i, 0, j)\n second = cal(0, i, j, m)\n third = cal(i, n, 0, m)\n answer = max(answer, first * second * third)\n\n \nprint(answer)\n ","repo_name":"jeean0668/algorithm","sub_path":"bruteforce/1451.py","file_name":"1451.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6853755554","text":"import subprocess\n\nfrom RouterConfig.common import schema as schemautils\nfrom RouterConfig.priority_strategy import schemas\nfrom RouterConfig.priority_strategy import logger\nfrom RouterConfig.common.shell.api import API\n\n\nclass PriorityStrategyParams(object):\n\n fields = [\n 'source_mac',\n 'ip_address',\n 'port',\n 'protocol',\n 'interface',\n 'priority'\n ]\n\n execute_cmd_api = API(logger=logger)\n\n def as_dict(self):\n \"\"\"transform the object to dict\"\"\"\n return {\n k: getattr(self, k)\n for k in self.fields\n if self.obj_attr_is_set(k)\n }\n\n @schemautils.validate_schema(schemas.priority_strategy_schema, logger=logger)\n def _from_json(self, body):\n \"\"\"\n transform from the json data to object\n :param body: the json data\n :type dict\n :return:\n \"\"\"\n for field in self.fields:\n body_field_val = body.get(field, None)\n if body_field_val is None or len(body_field_val) == 0:\n continue\n setattr(self, field, body_field_val)\n return self\n\n def obj_attr_is_set(self, attr):\n \"\"\"check if the object has the attribute\"\"\"\n try:\n getattr(self, attr)\n except AttributeError:\n return False\n else:\n return True\n\n def _to_iptables_and_tc_cmds(self, mark_number):\n \"\"\"\n transform from the PriorityStrategyParams object to iptables and tc commands\n :param mark_number: number which should be used to mark the data\n :type int\n :return: command of tc and iptables\n :type: list\n \"\"\"\n cmds = []\n cmds.append(self._to_iptables_cmd(mark_number))\n cmds.extend(self._to_tc_cmds(mark_number))\n return cmds\n\n def _to_iptables_cmd(self, mark_number):\n \"\"\"\n transform from the PriorityStrategyParams object to iptables command.\n :param mark_number: number which should be used to mark the data\n :return: command of iptables\n :type str\n \"\"\"\n cmd = 'iptables '\n cmd_table = '-t raw -A PREROUTING '\n cmd_params = ''\n cmd_act = '-j MARK --set-mark ' + str(mark_number)\n\n if self.obj_attr_is_set('source_mac'):\n mac_params = getattr(self, 'source_mac')\n cmd_params += '-m mac --mac-source ' + mac_params + ' '\n\n if self.obj_attr_is_set('port'):\n if self.obj_attr_is_set('protocol'):\n if getattr(self, 'protocol') == 'all':\n logger.error('\\'port\\' can not be specified while \\'protocol\\' is set to \\'all\\'.', exc_info=True)\n return ''\n else:\n logger.error('\\'port\\' can not be specified while \\'protocol\\' is not set.', exc_info=True)\n return ''\n pro = getattr(self, 'protocol')\n port_params = getattr(self, 'port')\n if port_params.get('src', None) is not None and len(port_params.get('src')) > 0:\n cmd_params += '-m ' + pro + ' --sport ' + port_params.get('src') + ' '\n if port_params.get('dst', None) is not None and len(port_params.get('dst')) > 0:\n cmd_params += '-m ' + pro + ' --dport ' + port_params.get('dst') + ' '\n\n if self.obj_attr_is_set('protocol'):\n protocol_params = getattr(self, 'protocol')\n cmd_params += '-p ' + protocol_params + ' '\n\n if self.obj_attr_is_set('ip_address'):\n address_params = getattr(self, 'ip_address')\n if address_params.get('src', None) is not None and len(address_params.get('src')) > 0:\n cmd_params += '-s '\n for src_address in address_params.get('src'):\n cmd_params += src_address + ','\n cmd_params = cmd_params.rsplit(',', 1)[0] + ' '\n if address_params.get('dst', None) is not None and len(address_params.get('dst')) > 0:\n cmd_params += '-d '\n for dst_address in address_params.get('dst'):\n cmd_params += dst_address + ','\n cmd_params = cmd_params.rsplit(',', 1)[0] + ' '\n\n cmd = cmd + cmd_table + cmd_params + cmd_act\n return cmd\n\n def _to_tc_cmds(self, mark_number):\n \"\"\"\n transform from the PriorityStrategyParams object to tc commands\n :param mark_number: number which should be used to mark the data\n :type int\n :return: commands of tc\n :type list\n \"\"\"\n cmds = []\n\n prio_handle = self._get_prio_info()\n if prio_handle is not None:\n cmds.append(self._generate_prio_filter(prio_handle, mark_number))\n else:\n htb_info = self._get_htb_info()\n if htb_info is not None:\n htb_handle, htb_classid = htb_info\n prio_handle = str(int(htb_handle) + 1)\n\n # create a qdisc prio first before generate filters\n self.apply_qdisc_prio(htb_classid, prio_handle)\n cmds.append(self._generate_prio_filter(prio_handle, mark_number))\n else:\n self.init_qdisc()\n # create a qdisc prio first before generate filters\n self.apply_qdisc_prio('root', '1')\n cmds.append(self._generate_prio_filter('1', mark_number))\n\n return cmds\n\n def init_qdisc(self):\n \"\"\"\n Delete all qdisc on root.\n :return:\n \"\"\"\n cmd = 'tc qdisc del dev {} root'.format(self.interface)\n self.execute_cmd_api.execute(cmd)\n\n def apply_qdisc_prio(self, parent_handle, handle):\n \"\"\"\n Configure qdisc for the priority queue.\n :param parent_handle: parent's handle (such as 1:1)\n :param handle: handle of self (such as 2)\n :return:\n \"\"\"\n cmd = 'tc qdisc add dev {} parent {} handle {}: prio'\\\n .format(self.interface, parent_handle, handle)\n self.execute_cmd_api.execute(cmd)\n\n def _generate_prio_filter(self, parent_handle, handle):\n \"\"\"\n Generate filters for the priority queue.\n :param parent_handle: parent's handle (such as 1)\n :param handle: handle of self (such as 2)\n :return:\n \"\"\"\n priority = self._get_filter_priority()\n cmd = 'tc filter add dev {} prio 8 parent {}: handle {} fw flowid {}:{}'\\\n .format(self.interface, parent_handle, handle, parent_handle, priority)\n return cmd\n\n def _get_filter_priority(self):\n \"\"\"\n Get priority based on the configuration file.\n :return:\n \"\"\"\n prio_str = getattr(self, 'priority')\n if prio_str == 'low':\n return '3'\n elif prio_str == 'high':\n return '1'\n else:\n return '2'\n\n def _get_prio_info(self):\n \"\"\"\n Get the handle of the existing prio qdisc.\n :return: handle of prio qdisc (such as 2)\n :type str\n \"\"\"\n ret = subprocess.Popen('tc -s qdisc show dev %s | grep \" prio \" | cut -d \" \" -f 3' % self.interface,\n shell=True, stdout=subprocess.PIPE).communicate()[0].strip(':\\n')\n if ret == '':\n return None\n else:\n prio_handle = ret\n return prio_handle\n\n def _get_htb_info(self):\n \"\"\"\n Get the handle and classid of the existing htb qdisc.\n :return: handle(such as 1) and classid(such as 1:1)\n :type str\n \"\"\"\n ret = subprocess.Popen('tc -s qdisc show dev %s | grep \" htb \" | cut -d \" \" -f 3' % self.interface,\n shell=True, stdout=subprocess.PIPE).communicate()[0].strip(':\\n')\n if ret == '':\n return None\n else:\n htb_handle = ret\n ret = subprocess.Popen('tc -s class show dev %s | grep \" htb \" | cut -d \" \" -f 3' % self.interface,\n shell=True, stdout=subprocess.PIPE).communicate()[0].strip('\\n')\n if ret == '':\n return None\n else:\n htb_classid = ret\n return (htb_handle, htb_classid)\n","repo_name":"yangsijie666/RouterConfig","sub_path":"RouterConfig/priority_strategy/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"42702126629","text":"import copy\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport seaborn as sns\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom collections import Counter\nfrom langconv import Converter\nfrom nltk import word_tokenize\nfrom torch.autograd import Variable\n\n\nPAD = 0 # padding占位符的索引\nUNK = 1 # 未登录词标识符的索引\nBATCH_SIZE = 128 # 批次大小\nEPOCHS = 20 # 训练轮数\nLAYERS = 6 # transformer中encoder、decoder层数\nH_NUM = 8 # 多头注意力个数\nD_MODEL = 256 # 输入、输出词向量维数\nD_FF = 1024 # feed forward全连接层维数\nDROPOUT = 0.1 # dropout比例\nMAX_LENGTH = 60 # 语句最大长度\n\nTRAIN_FILE = 'nmt/en-cn/train.txt' # 训练集\nDEV_FILE = \"nmt/en-cn/dev.txt\" # 验证集\nSAVE_FILE = 'save/model.pt' # 模型保存路径\n#DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nDEVICE = 'cpu'\n\ndef seq_padding(X, padding=PAD):\n \"\"\"\n 按批次(batch)对数据填充、长度对齐\n \"\"\"\n # 计算该批次各条样本语句长度\n L = [len(x) for x in X]\n # 获取该批次样本中语句长度最大值\n ML = max(L)\n # 遍历该批次样本,如果语句长度小于最大长度,则用padding填充\n return np.array([\n np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X\n ])\n\ndef cht_to_chs(sent):\n sent = Converter(\"zh-hans\").convert(sent)\n sent.encode(\"utf-8\")\n return sent\n\n\nclass PrepareData:\n def __init__(self, train_file, dev_file):\n # 读取数据、分词\n self.train_en, self.train_cn = self.load_data(train_file)\n self.dev_en, self.dev_cn = self.load_data(dev_file)\n # 构建词表\n self.en_word_dict, self.en_total_words, self.en_index_dict = \\\n self.build_dict(self.train_en)\n self.cn_word_dict, self.cn_total_words, self.cn_index_dict = \\\n self.build_dict(self.train_cn)\n # 单词映射为索引\n self.train_en, self.train_cn = self.word2id(self.train_en, self.train_cn, self.en_word_dict, self.cn_word_dict)\n self.dev_en, self.dev_cn = self.word2id(self.dev_en, self.dev_cn, self.en_word_dict, self.cn_word_dict)\n # 划分批次、填充、掩码\n self.train_data = self.split_batch(self.train_en, self.train_cn, BATCH_SIZE)\n self.dev_data = self.split_batch(self.dev_en, self.dev_cn, BATCH_SIZE)\n\n def load_data(self, path):\n \"\"\"\n 读取英文、中文数据\n 对每条样本分词并构建包含起始符和终止符的单词列表\n 形式如:en = [['BOS', 'i', 'love', 'you', 'EOS'], ['BOS', 'me', 'too', 'EOS'], ...]\n cn = [['BOS', '我', '爱', '你', 'EOS'], ['BOS', '我', '也', '是', 'EOS'], ...]\n \"\"\"\n en = []\n cn = []\n with open(path, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f.readlines():\n sent_en, sent_cn = line.strip().split(\"\\t\")\n sent_en = sent_en.lower()\n sent_cn = cht_to_chs(sent_cn)\n sent_en = [\"BOS\"] + word_tokenize(sent_en) + [\"EOS\"]\n # 中文按字符切分\n sent_cn = [\"BOS\"] + [char for char in sent_cn] + [\"EOS\"]\n en.append(sent_en)\n cn.append(sent_cn)\n return en, cn\n\n def build_dict(self, sentences, max_words=5e4):\n \"\"\"\n 构造分词后的列表数据\n 构建单词-索引映射(key为单词,value为id值)\n \"\"\"\n # 统计数据集中单词词频\n word_count = Counter([word for sent in sentences for word in sent])\n # 按词频保留前max_words个单词构建词典\n # 添加UNK和PAD两个单词\n ls = word_count.most_common(int(max_words))\n total_words = len(ls) + 2\n word_dict = {w[0]: index + 2 for index, w in enumerate(ls)}\n word_dict['UNK'] = UNK\n word_dict['PAD'] = PAD\n # 构建id2word映射\n index_dict = {v: k for k, v in word_dict.items()}\n return word_dict, total_words, index_dict\n\n def word2id(self, en, cn, en_dict, cn_dict, sort=True):\n \"\"\"\n 将英文、中文单词列表转为单词索引列表\n `sort=True`表示以英文语句长度排序,以便按批次填充时,同批次语句填充尽量少\n \"\"\"\n length = len(en)\n # 单词映射为索引\n out_en_ids = [[en_dict.get(word, UNK) for word in sent] for sent in en]\n out_cn_ids = [[cn_dict.get(word, UNK) for word in sent] for sent in cn]\n\n # 按照语句长度排序\n def len_argsort(seq):\n \"\"\"\n 传入一系列语句数据(分好词的列表形式),\n 按照语句长度排序后,返回排序后原来各语句在数据中的索引下标\n \"\"\"\n return sorted(range(len(seq)), key=lambda x: len(seq[x]))\n\n # 按相同顺序对中文、英文样本排序\n if sort:\n # 以英文语句长度排序\n sorted_index = len_argsort(out_en_ids)\n out_en_ids = [out_en_ids[idx] for idx in sorted_index]\n out_cn_ids = [out_cn_ids[idx] for idx in sorted_index]\n return out_en_ids, out_cn_ids\n\n def split_batch(self, en, cn, batch_size, shuffle=True):\n \"\"\"\n 划分批次\n `shuffle=True`表示对各批次顺序随机打乱\n \"\"\"\n # 每隔batch_size取一个索引作为后续batch的起始索引\n idx_list = np.arange(0, len(en), batch_size)\n # 起始索引随机打乱\n if shuffle:\n np.random.shuffle(idx_list)\n # 存放所有批次的语句索引\n batch_indexs = []\n for idx in idx_list:\n \"\"\"\n 形如[array([4, 5, 6, 7]), \n array([0, 1, 2, 3]), \n array([8, 9, 10, 11]),\n ...]\n \"\"\"\n # 起始索引最大的批次可能发生越界,要限定其索引\n batch_indexs.append(np.arange(idx, min(idx + batch_size, len(en))))\n # 构建批次列表\n batches = []\n for batch_index in batch_indexs:\n # 按当前批次的样本索引采样\n batch_en = [en[index] for index in batch_index]\n batch_cn = [cn[index] for index in batch_index]\n # 对当前批次中所有语句填充、对齐长度\n # 维度为:batch_size * 当前批次中语句的最大长度\n batch_cn = seq_padding(batch_cn)\n batch_en = seq_padding(batch_en)\n # 将当前批次添加到批次列表\n # Batch类用于实现注意力掩码\n batches.append(Batch(batch_en, batch_cn))\n return batches\n\n# 示例\n# data_mini = PrepareData(\"./nmt/en-cn/train_mini.txt\", \"./nmt/en-cn/dev_mini.txt\")\n# print(\"*** batch 0, en ***\")\n# print(data_mini.train_data[0][0])\n# print(\"*** batch 0, chs ***\")\n# print(data_mini.train_data[0][1])\n# print(\"*** en word to index ***\")\n# print(data_mini.en_word_dict)\n# print(\"*** chs word to index ***\")\n# print(data_mini.cn_word_dict)\nclass Embeddings(nn.Module):\n def __init__(self, d_model, vocab):\n super(Embeddings, self).__init__()\n # Embedding层\n self.lut = nn.Embedding(vocab, d_model)\n # Embedding维数\n self.d_model = d_model\n\n def forward(self, x):\n # 返回x的词向量(需要乘以math.sqrt(d_model))\n return self.lut(x) * math.sqrt(self.d_model)\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n # 位置编码矩阵,维度[max_len, embedding_dim]\n pe = torch.zeros(max_len, d_model, device=DEVICE)\n # 单词位置\n position = torch.arange(0.0, max_len, device=DEVICE)\n position.unsqueeze_(1)\n # 使用exp和log实现幂运算\n div_term = torch.exp(torch.arange(0.0, d_model, 2, device=DEVICE) * (- math.log(1e4) / d_model))\n div_term.unsqueeze_(0)\n # 计算单词位置沿词向量维度的纹理值\n pe[:, 0 : : 2] = torch.sin(torch.mm(position, div_term))\n pe[:, 1 : : 2] = torch.cos(torch.mm(position, div_term))\n # 增加批次维度,[1, max_len, embedding_dim]\n pe.unsqueeze_(0)\n # 将位置编码矩阵注册为buffer(不参加训练)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n # 将一个批次中语句所有词向量与位置编码相加\n # 注意,位置编码不参与训练,因此设置requires_grad=False\n x += Variable(self.pe[:, : x.size(1), :], requires_grad=False)\n return self.dropout(x)\n\n\n# emb_dim = 64\n# max_seq_len = 100\n# seq_len = 20\n\n# pe = PositionalEncoding(emb_dim, 0, max_seq_len)\n# positional_encoding = pe(torch.zeros(1, seq_len, emb_dim, device=DEVICE))\n# plt.figure()\n# sns.heatmap(positional_encoding.squeeze().to(\"cpu\"))\n# plt.xlabel(\"i\")\n# plt.ylabel(\"pos\")\n# plt.show()\n#\n# plt.figure()\n# y = positional_encoding.to(\"cpu\").numpy()\n# plt.plot(np.arange(seq_len), y[0, :, 0 : 64 : 8], \".\")\n# plt.legend([\"dim %d\" % p for p in [0, 7, 15, 31, 63]])\n# plt.show()\n\ndef clones(module, N):\n \"\"\"\n 克隆基本单元,克隆的单元之间参数不共享\n \"\"\"\n return nn.ModuleList([\n copy.deepcopy(module) for _ in range(N)\n ])\n\ndef attention(query, key, value, mask=None, dropout=None):\n \"\"\"\n Scaled Dot-Product Attention(方程(4))\n \"\"\"\n # q、k、v向量长度为d_k\n d_k = query.size(-1)\n # 矩阵乘法实现q、k点积注意力,sqrt(d_k)归一化\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n # 注意力掩码机制\n if mask is not None:\n scores = scores.masked_fill(mask==0, -1e9)\n # 注意力矩阵softmax归一化\n p_attn = F.softmax(scores, dim=-1)\n # dropout\n if dropout is not None:\n p_attn = dropout(p_attn)\n # 注意力对v加权\n return torch.matmul(p_attn, value), p_attn\n\nclass MultiHeadedAttention(nn.Module):\n \"\"\"\n Multi-Head Attention(编码器(2))\n \"\"\"\n def __init__(self, h, d_model, dropout=0.1):\n super(MultiHeadedAttention, self).__init__()\n \"\"\"\n `h`:注意力头的数量\n `d_model`:词向量维数\n \"\"\"\n # 确保整除\n assert d_model % h == 0\n # q、k、v向量维数\n self.d_k = d_model // h\n # 头的数量\n self.h = h\n # WQ、WK、WV矩阵及多头注意力拼接变换矩阵WO\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None):\n if mask is not None:\n mask = mask.unsqueeze(1)\n # 批次大小\n nbatches = query.size(0)\n # WQ、WK、WV分别对词向量线性变换,并将结果拆成h块\n query, key, value = [\n l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))\n ]\n # 注意力加权\n x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)\n # 多头注意力加权拼接\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n # 对多头注意力加权拼接结果线性变换\n return self.linears[-1](x)\n\nclass LayerNorm(nn.Module):\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n # α、β分别初始化为1、0\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n # 平滑项\n self.eps = eps\n\n def forward(self, x):\n # 沿词向量方向计算均值和方差\n mean = x.mean(dim=-1, keepdim=True)\n std = x.std(dim=-1, keepdim=True)\n # 沿词向量和语句序列方向计算均值和方差\n # mean = x.mean(dim=[-2, -1], keepdim=True)\n # std = x.std(dim=[-2, -1], keepdim=True)\n # 归一化\n x = (x - mean) / torch.sqrt(std ** 2 + self.eps)\n return self.a_2 * x + self.b_2\n\nclass SublayerConnection(nn.Module):\n \"\"\"\n 通过层归一化和残差连接,连接Multi-Head Attention和Feed Forward\n \"\"\"\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n # 层归一化\n x_ = self.norm(x)\n x_ = sublayer(x_)\n x_ = self.dropout(x_)\n # 残差连接\n return x + x_\n\nclass PositionwiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n x = self.w_1(x)\n x = F.relu(x)\n x = self.dropout(x)\n x = self.w_2(x)\n return x\n\n\n\nemb_dim = 64\nmax_seq_len = 100\nseq_len = 20\n\npe = PositionalEncoding(emb_dim, 0, max_seq_len)\npositional_encoding = pe(torch.zeros(1, seq_len, emb_dim, device=DEVICE))\nplt.figure()\nsns.heatmap(positional_encoding.squeeze().to(\"cpu\"))\nplt.xlabel(\"i\")\nplt.ylabel(\"pos\")\nplt.show()\n\nplt.figure()\ny = positional_encoding.to(\"cpu\").numpy()\nplt.plot(np.arange(seq_len), y[0, :, 0 : 64 : 8], \".\")\nplt.legend([\"dim %d\" % p for p in [0, 7, 15, 31, 63]])\nplt.show()\n\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n # SublayerConnection作用连接multi和ffn\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n # d_model\n self.size = size\n\n def forward(self, x, mask):\n # 将embedding层进行Multi head Attention\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n # attn的结果直接作为下一层输入\n return self.sublayer[1](x, self.feed_forward)\n\n\nclass Encoder(nn.Module):\n def __init__(self, layer, N):\n \"\"\"\n layer = EncoderLayer\n \"\"\"\n super(Encoder, self).__init__()\n # 复制N个编码器基本单元\n self.layers = clones(layer, N)\n # 层归一化\n self.norm = LayerNorm(layer.size)\n\n def forward(self, x, mask):\n \"\"\"\n 循环编码器基本单元N次\n \"\"\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\nclass Decoder(nn.Module):\n def __init__(self, layer, N):\n super(Decoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n\n def forward(self, x, memory, src_mask, tgt_mask):\n \"\"\"\n 循环解码器基本单元N次\n \"\"\"\n for layer in self.layers:\n x = layer(x, memory, src_mask, tgt_mask)\n return self.norm(x)\n\nclass DecoderLayer(nn.Module):\n def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n super(DecoderLayer, self).__init__()\n self.size = size\n # 自注意力机制\n self.self_attn = self_attn\n # 上下文注意力机制\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 3)\n\n def forward(self, x, memory, src_mask, tgt_mask):\n # memory为编码器输出隐表示\n m = memory\n # 自注意力机制,q、k、v均来自解码器隐表示\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n # 上下文注意力机制:q为来自解码器隐表示,而k、v为编码器隐表示\n x = self.sublayer[1](x, lambda x: self.self_attn(x, m, m, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n\nclass Generator(nn.Module):\n \"\"\"\n 解码器输出经线性变换和softmax函数映射为下一时刻预测单词的概率分布\n \"\"\"\n def __init__(self, d_model, vocab):\n super(Generator, self).__init__()\n # decode后的结果,先进入一个全连接层变为词典大小的向量\n self.proj = nn.Linear(d_model, vocab)\n\n def forward(self, x):\n # 然后再进行log_softmax操作(在softmax结果上再做多一次log运算)\n return F.log_softmax(self.proj(x), dim=-1)\n\ndef subsequent_mask(size):\n \"Mask out subsequent positions.\"\n # 设定subsequent_mask矩阵的shape\n attn_shape = (1, size, size)\n # 生成一个右上角(不含主对角线)为全1,左下角(含主对角线)为全0的subsequent_mask矩阵\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n # 返回一个右上角(不含主对角线)为全False,左下角(含主对角线)为全True的subsequent_mask矩阵\n return torch.from_numpy(subsequent_mask) == 0\n\nplt.figure(figsize=(5, 5))\nplt.imshow(subsequent_mask(20)[0])\nplt.show()\n\n\nclass Batch:\n \"\"\"\n 批次类\n 1. 输入序列(源)\n 2. 输出序列(目标)\n 3. 构造掩码\n \"\"\"\n\n def __init__(self, src, trg=None, pad=PAD):\n # 将输入、输出单词id表示的数据规范成整数类型\n src = torch.from_numpy(src).to(DEVICE).long()\n trg = torch.from_numpy(trg).to(DEVICE).long()\n self.src = src\n # 对于当前输入的语句非空部分进行判断,bool序列\n # 并在seq length前面增加一维,形成维度为 1×seq length 的矩阵\n self.src_mask = (src != pad).unsqueeze(-2)\n # 如果输出目标不为空,则需要对解码器使用的目标语句进行掩码\n if trg is not None:\n # 解码器使用的目标输入部分\n self.trg = trg[:, : -1] # 去除最后一列\n # 解码器训练时应预测输出的目标结果\n self.trg_y = trg[:, 1:] #去除第一列的SOS\n # 将目标输入部分进行注意力掩码\n self.trg_mask = self.make_std_mask(self.trg, pad)\n # 将应输出的目标结果中实际的词数进行统计\n self.ntokens = (self.trg_y != pad).data.sum()\n\n # 掩码操作\n @staticmethod\n def make_std_mask(tgt, pad):\n \"Create a mask to hide padding and future words.\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))\n return tgt_mask\n\nclass Transformer(nn.Module):\n def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n super(Transformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.src_embed = src_embed\n self.tgt_embed = tgt_embed\n self.generator = generator\n\n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n\n def decode(self, memory, src_mask, tgt, tgt_mask):\n return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n # encoder的结果作为decoder的memory参数传入,进行decode\n return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask)\n\n\ndef make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):\n c = copy.deepcopy\n # 实例化Attention对象\n attn = MultiHeadedAttention(h, d_model).to(DEVICE)\n # 实例化FeedForward对象\n ff = PositionwiseFeedForward(d_model, d_ff, dropout).to(DEVICE)\n # 实例化PositionalEncoding对象\n position = PositionalEncoding(d_model, dropout).to(DEVICE)\n # 实例化Transformer模型对象\n model = Transformer(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout).to(DEVICE), N).to(DEVICE),\n Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout).to(DEVICE), N).to(DEVICE),\n nn.Sequential(Embeddings(d_model, src_vocab).to(DEVICE), c(position)),\n nn.Sequential(Embeddings(d_model, tgt_vocab).to(DEVICE), c(position)),\n Generator(d_model, tgt_vocab)).to(DEVICE)\n\n # This was important from their code.\n # Initialize parameters with Glorot / fan_avg.\n for p in model.parameters():\n if p.dim() > 1:\n # 这里初始化采用的是nn.init.xavier_uniform\n nn.init.xavier_uniform_(p)\n return model.to(DEVICE)\n\n\nclass LabelSmoothing(nn.Module):\n \"\"\"\n 标签平滑\n \"\"\"\n\n def __init__(self, size, padding_idx, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.criterion = nn.KLDivLoss(reduction='sum')\n self.padding_idx = padding_idx\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.size = size\n self.true_dist = None\n\n def forward(self, x, target):\n assert x.size(1) == self.size\n true_dist = x.data.clone()\n true_dist.fill_(self.smoothing / (self.size - 2))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n true_dist[:, self.padding_idx] = 0\n mask = torch.nonzero(target.data == self.padding_idx)\n if mask.dim() > 0:\n true_dist.index_fill_(0, mask.squeeze(), 0.0)\n self.true_dist = true_dist\n return self.criterion(x, Variable(true_dist, requires_grad=False))\n\n# Label smoothing的例子\ncrit = LabelSmoothing(5, 0, 0.4) # 设定一个ϵ=0.4\npredict = torch.FloatTensor([[0, 0.2, 0.7, 0.1, 0],\n [0, 0.2, 0.7, 0.1, 0],\n [0, 0.2, 0.7, 0.1, 0]])\nv = crit(Variable(predict.log()),\n Variable(torch.LongTensor([2, 1, 0])))\n\n# Show the target distributions expected by the system.\nprint(crit.true_dist)\nplt.imshow(crit.true_dist)\n\n\nclass SimpleLossCompute:\n \"\"\"\n 简单的计算损失和进行参数反向传播更新训练的函数\n \"\"\"\n\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n\n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)),\n y.contiguous().view(-1)) / norm\n loss.backward()\n if self.opt is not None:\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data.item() * norm.float()\n\n\nclass NoamOpt:\n \"Optim wrapper that implements rate.\"\n\n def __init__(self, model_size, factor, warmup, optimizer):\n self.optimizer = optimizer\n self._step = 0\n self.warmup = warmup\n self.factor = factor\n self.model_size = model_size\n self._rate = 0\n\n def step(self):\n \"Update parameters and rate\"\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n\n def rate(self, step=None):\n \"Implement `lrate` above\"\n if step is None:\n step = self._step\n return self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))\n\n\ndef get_std_opt(model):\n return NoamOpt(model.src_embed[0].d_model, 2, 4000,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\n# Three settings of the lrate hyperparameters.\nopts = [NoamOpt(512, 1, 4000, None),\n NoamOpt(512, 1, 8000, None),\n NoamOpt(256, 1, 4000, None)]\nplt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])\nplt.legend([\"512:4000\", \"512:8000\", \"256:4000\"])\n\n\ndef run_epoch(data, model, loss_compute, epoch):\n start = time.time()\n total_tokens = 0.\n total_loss = 0.\n tokens = 0.\n\n for i, batch in enumerate(data):\n out = model(batch.src, batch.trg, batch.src_mask, batch.trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\n\n total_loss += loss\n total_tokens += batch.ntokens\n tokens += batch.ntokens\n\n if i % 50 == 1:\n elapsed = time.time() - start\n print(\"Epoch %d Batch: %d Loss: %f Tokens per Sec: %fs\" % (\n epoch, i - 1, loss / batch.ntokens, (tokens.float() / elapsed / 1000.)))\n start = time.time()\n tokens = 0\n\n return total_loss / total_tokens\n\n\ndef train(data, model, criterion, optimizer):\n \"\"\"\n 训练并保存模型\n \"\"\"\n # 初始化模型在dev集上的最优Loss为一个较大值\n best_dev_loss = 1e5\n\n for epoch in range(EPOCHS):\n # 模型训练\n model.train()\n run_epoch(data.train_data, model, SimpleLossCompute(model.generator, criterion, optimizer), epoch)\n model.eval()\n\n # 在dev集上进行loss评估\n print('>>>>> Evaluate')\n dev_loss = run_epoch(data.dev_data, model, SimpleLossCompute(model.generator, criterion, None), epoch)\n print('<<<<< Evaluate loss: %f' % dev_loss)\n\n # 如果当前epoch的模型在dev集上的loss优于之前记录的最优loss则保存当前模型,并更新最优loss值\n if dev_loss < best_dev_loss:\n torch.save(model.state_dict(), SAVE_FILE)\n best_dev_loss = dev_loss\n print('****** Save model done... ******')\n\n print()\n\n\n# 数据预处理\ndata = PrepareData(TRAIN_FILE, DEV_FILE)\nsrc_vocab = len(data.en_word_dict)\ntgt_vocab = len(data.cn_word_dict)\nprint(\"src_vocab %d\" % src_vocab)\nprint(\"tgt_vocab %d\" % tgt_vocab)\n\n# 初始化模型\nmodel = make_model(\n src_vocab,\n tgt_vocab,\n LAYERS,\n D_MODEL,\n D_FF,\n H_NUM,\n DROPOUT\n )\n\n# 训练\nprint(\">>>>>>> start train\")\ntrain_start = time.time()\ncriterion = LabelSmoothing(tgt_vocab, padding_idx = 0, smoothing= 0.0)\noptimizer = NoamOpt(D_MODEL, 1, 2000, torch.optim.Adam(model.parameters(), lr=0, betas=(0.9,0.98), eps=1e-9))\n\ntrain(data, model, criterion, optimizer)\nprint(f\"<<<<<<< finished train, cost {time.time()-train_start:.4f} seconds\")\n\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n \"\"\"\n 传入一个训练好的模型,对指定数据进行预测\n \"\"\"\n # 先用encoder进行encode\n memory = model.encode(src, src_mask)\n # 初始化预测内容为1×1的tensor,填入开始符('BOS')的id,并将type设置为输入数据类型(LongTensor)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n # 遍历输出的长度下标\n for i in range(max_len - 1):\n # decode得到隐层表示\n out = model.decode(memory,\n src_mask,\n Variable(ys),\n Variable(subsequent_mask(ys.size(1)).type_as(src.data)))\n # 将隐藏表示转为对词典各词的log_softmax概率分布表示\n prob = model.generator(out[:, -1])\n # 获取当前位置最大概率的预测词id\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data[0]\n # 将当前位置预测的字符id与之前的预测内容拼接起来\n ys = torch.cat([ys,\n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\n return ys\n\n\ndef evaluate(data, model):\n \"\"\"\n 在data上用训练好的模型进行预测,打印模型翻译结果\n \"\"\"\n # 梯度清零\n with torch.no_grad():\n # 在data的英文数据长度上遍历下标\n for i in range(len(data.dev_en)):\n # 打印待翻译的英文语句\n en_sent = \" \".join([data.en_index_dict[w] for w in data.dev_en[i]])\n print(\"\\n\" + en_sent)\n\n # 打印对应的中文语句答案\n cn_sent = \" \".join([data.cn_index_dict[w] for w in data.dev_cn[i]])\n print(\"\".join(cn_sent))\n\n # 将当前以单词id表示的英文语句数据转为tensor,并放如DEVICE中\n src = torch.from_numpy(np.array(data.dev_en[i])).long().to(DEVICE)\n # 增加一维\n src = src.unsqueeze(0)\n # 设置attention mask\n src_mask = (src != 0).unsqueeze(-2)\n # 用训练好的模型进行decode预测\n out = greedy_decode(model, src, src_mask, max_len=MAX_LENGTH, start_symbol=data.cn_word_dict[\"BOS\"])\n # 初始化一个用于存放模型翻译结果语句单词的列表\n translation = []\n # 遍历翻译输出字符的下标(注意:开始符\"BOS\"的索引0不遍历)\n for j in range(1, out.size(1)):\n # 获取当前下标的输出字符\n sym = data.cn_index_dict[out[0, j].item()]\n # 如果输出字符不为'EOS'终止符,则添加到当前语句的翻译结果列表\n if sym != 'EOS':\n translation.append(sym)\n # 否则终止遍历\n else:\n break\n # 打印模型翻译输出的中文语句结果\n print(\"translation: %s\" % \" \".join(translation))\n\n# 预测\n# 加载模型\nmodel.load_state_dict(torch.load(SAVE_FILE))\n# 开始预测\nprint(\">>>>>>> start evaluate\")\nevaluate_start = time.time()\nevaluate(data, model)\nprint(f\"<<<<<<< finished evaluate, cost {time.time()-evaluate_start:.4f} seconds\")","repo_name":"seanzhang-zhichen/-transformer-english2chinese-","sub_path":"transformer_nmt.py","file_name":"transformer_nmt.py","file_ext":"py","file_size_in_byte":29829,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"13747095755","text":"import subprocess\nimport re\nfrom app import app\nfrom Common.constants import PYTHON_INTERPRETER_NAME, PDFID_LOCATION, PDFID_FEATURES_COUNT\n\n\nclass FeatureExtractor:\n def __init__(self, filepath):\n self.filepath = filepath\n self.features = []\n app.logger.info(\"FeatureExtractor initialized.\")\n\n def extract(self):\n app.logger.info(\"Extracting features for file: '{}'\".format(self.filepath))\n\n output = self.__run_pdfid()\n if len(output) == PDFID_FEATURES_COUNT:\n self.__featurize(output)\n return self.features\n else:\n app.logger.info(\"[ERROR] Can't extract features for file: '{}'. Additional Info: '{}'\".format(self.filepath, output))\n return None\n\n def __run_pdfid(self):\n app.logger.info(\"Running PDFiD for file: '{}'\".format(self.filepath))\n\n proc = subprocess.Popen([PYTHON_INTERPRETER_NAME, PDFID_LOCATION, self.filepath], stdout=subprocess.PIPE)\n output = str(proc.communicate()[0], 'utf-8').strip()\n\n output = output.split(\"\\n\")[2:] # skip PDFiD Header\n return output\n\n def __featurize(self, output_lines):\n \"\"\"\n Extract values from output of PDFiD and featurize the pdf\n \"\"\"\n\n app.logger.info(\"Featurizing file: '{}'\".format(self.filepath))\n\n obfuscations = 0\n \n for line in output_lines:\n val = line.split()[-1]\n # Parse number of obfuscations if it exists\n m = re.match(r\"(\\d*)\\((\\d*)\\)\", val)\n if m:\n x = int(m.group(1))\n obfuscations += int(m.group(2))\n else:\n x = int(val)\n \n self.features.append(x)\n\n self.features.append(obfuscations)\n","repo_name":"viorelyo/ExtWatcher","sub_path":"AnalyzeServer/API/Core/PDFAnalyzer/DataMiner/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73745010721","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 24 05:42:21 2017\n\n@author: Young\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# df for dataframe, s for series\ndf = pd.read_csv('Tianchi_power.csv')\ndf['record_date'] = pd.to_datetime(df['record_date'])\n\n# total power consumption\ns_power_consumption = df.groupby('record_date')['power_consumption'].sum()\ns_power_consumption.index = pd.to_datetime(s_power_consumption.index).sort_values()\n\n# create day types\n# 2015-1-1 is wendsday so ..\n#day_type = ['wen','thu','fri','sat','sun','mon','tue']\nday_type = [3,4,5,6,7,1,2] # for sklearn\nrest_days = []\nif s_power_consumption.size % 7 == 0:\n num_weeks = s_power_consumption.size / 7\nelse:\n num_rest_days = s_power_consumption.size % 7\n rest_days = day_type[0:num_rest_days]\n \ns_day_type = pd.Series(data = day_type * num_weeks + rest_days, index = s_power_consumption.index,name='day_type')\n#s_day_type.rename('day_type')\n\n#dataset = pd.concat([s_power_consumption,s_day_type],axis=1)\n\n# 剔除趋势因子,移动平均\ndef auto_corr(x, l):\n return [x.autocorr(i) for i in l]\n\nfor window_size in range(36,41):\n# window_size = 7\n avg_power = s_power_consumption.rolling(window=window_size,center=False).mean()\n s_power = s_power_consumption - avg_power\n corr = auto_corr(s_power,range(1,60))\n plt.plot(corr,label='windowsize%d'%window_size)\nplt.legend()\nplt.show()\n\ncorr = auto_corr(s_power_consumption,range(1,60))\nplt.plot(corr,label='normal')\nplt.legend()\nplt.show()\n\n# 序列平稳化MA\navg_power = s_power_consumption.rolling(window=30,center=False).mean()\ns_power = s_power_consumption - avg_power\ns_values = s_power.values[30:]\n\n# 序列平稳化,一阶差分\ns1 = pd.Series(data = s_power_consumption.values[0:-1])\ns2 = pd.Series(data = s_power_consumption.values[1:])\n\ndelta_power = s1-s2\ncorr = auto_corr(delta_power,range(1,60))\nplt.plot(corr,label='normal')\nplt.legend()\nplt.show()\n\nfrom statsmodels.graphics.tsaplots import plot_acf\nplot_acf(avg_power.values[30:]).show()\nplot_acf(s_power_consumption).show()\nplot_acf(s_values).show()\nplot_acf(delta_power).show()\nplt.show()\n## 平稳性检验\n#from statsmodels.tsa.stattools import adfuller as ADF\n#ADF(s_power_consumption)\n#ADF(avg_power.values[30:])\n#ADF(s_values)\n#ADF(delta_power)\n#\n## 白噪声检验\n#from statsmodels.stats.diagnostic import acorr_ljungbox\n#acorr_ljungbox(s_power_consumption, lags=1)\n#acorr_ljungbox(avg_power.values[30:], lags=1)\n#acorr_ljungbox(s_values, lags=1)\n#acorr_ljungbox(delta_power, lags=1)\n#\n#\n## 序列平稳化MA\n#avg_power = s_power_consumption.rolling(window=5,center=False).mean()\n#s_power = s_power_consumption - avg_power\n#s_values = s_power.values[6:]\n#ADF(s_values)\n#acorr_ljungbox(s_values, lags=1)\n\nfrom statsmodels.tsa.arima_model import ARIMA\ns = s_power_consumption.values[:]\ns = s.astype(np.float64)\nmodel = ARIMA(s, (6,1,2)).fit()\npred = model.forecast(30)[0]\nplt.plot(pred,label='predict')\nplt.plot(s[-30:],label='real')\nplt.legend()\nplt.show()\n\n#def auto_corr(x, lags=1):\n# n = len(x)\n# x = np.array(x)\n# variance = x.var()\n# x = x - x.mean()\n# result = np.correlate(x, x, mode = 'full')[-n+1:-n+lags+1]/\\\n# (variance*np.arange(n-1, n-1-lags,-1))\n# return result\n\n\n# \npivoted = df.pivot('record_date','user_id','power_consumption')\nuser = pivoted.mean()\nindex = user.index\nindex1 = index[np.where(user.values==1)]\n\n\n","repo_name":"YoungGod/Power-Consumption-Prediction","sub_path":"Explore_Analysis/explore_analysis.py","file_name":"explore_analysis.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"54"} +{"seq_id":"29203545297","text":"# Bấm BT2 quay lại video vào lưu tại thư mục video, khi không bấm nữa thì dừng quay; Bấm\n# BT3 lần 1 thì bắt đầu quay video và lưu, bấm lần 2 thì dừng quay \nimport cv2\nimport RPi.GPIO as GPIO \nimport time\ndef main():\n BT2 = 26\n BT3 = 20\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(BT2, GPIO.IN, pull_up_down= GPIO.PUD_UP) \n GPIO.setup(BT3, GPIO.IN, pull_up_down =GPIO. PUD_UP) \n global namewindow\n namewindow= \"Camera User\"\n capture=cv2.VideoCapture(0) # khởi động camera\n print(\"Capture da ok\")\n fourcc = cv2.VideoWriter_fourcc(*'DIVX') # định dạng cho việc quay video\n out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20.0,(640, 480)) \n cap_video= False\n while True: # nếu camera được mở\n ret, frame = capture.read() # đọc video từ camera\n if GPIO.input(BT2)== GPIO.LOW: \n print(\"press BT2\")\n cv2.imshow(namewindow, frame) \n out.write(frame) \n print(\"video luu\")\n if cv2.waitKey(1) & 0XFF == ord('q'): # bẩm q để thoát\n GPIO.cleanup()\n cv2.destroyWindow(namewindow)\n break\n continue\n if GPIO.input(BT3) ==GPIO.LOW:\n print(\"press BT3\") \n if cap_video: \n cap_video=False\n cv2.destroyWindow(namewindow)\n continue\n time.sleep(0.5)\n if not cap_video:\n cap_video= True\n continue\n time.sleep(0.5)\n if cap_video:\n cv2.imshow(namewindow, frame) \n out.write(frame)\n if cv2.waitKey(1) & 0xFF== ord('q'): #bắm q để thoát\n GPIO.cleanup() \n cv2.destroyWindow(namewindow)\n break\ntry:\n main()\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n cv2.destroywindow(namewindow)\n","repo_name":"manhhus/ComputerArchitecture","sub_path":"buoi10/bai10_2.py","file_name":"bai10_2.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21337495657","text":"import mock\nimport time\nimport unittest\nfrom collections import defaultdict\n\nimport koji\nimport kojihub\n\nDP = kojihub.DeleteProcessor\nQP = kojihub.QueryProcessor\nUP = kojihub.UpdateProcessor\n\n\nclass TestDeleteBuild(unittest.TestCase):\n\n def getDelete(self, *args, **kwargs):\n delete = DP(*args, **kwargs)\n delete.execute = mock.MagicMock()\n self.deletes.append(delete)\n return delete\n\n def getQuery(self, *args, **kwargs):\n query = QP(*args, **kwargs)\n query.execute = self.query_execute\n self.queries.append(query)\n return query\n\n def getUpdate(self, *args, **kwargs):\n update = UP(*args, **kwargs)\n update.execute = mock.MagicMock()\n self.updates.append(update)\n return update\n\n def setUp(self):\n self.DeleteProcessor = mock.patch('kojihub.kojihub.DeleteProcessor',\n side_effect=self.getDelete).start()\n self.deletes = []\n self.QueryProcessor = mock.patch('kojihub.kojihub.QueryProcessor',\n side_effect=self.getQuery).start()\n self.queries = []\n self.query_execute = mock.MagicMock()\n self.UpdateProcessor = mock.patch('kojihub.kojihub.UpdateProcessor',\n side_effect=self.getUpdate).start()\n self.updates = []\n self.context_db = mock.patch('kojihub.db.context').start()\n self.context_db.session.assertLogin = mock.MagicMock()\n self.context_db.event_id = 42\n self.context_db.session.user_id = 24\n self.get_build = mock.patch('kojihub.kojihub.get_build').start()\n self._delete_build = mock.patch('kojihub.kojihub._delete_build').start()\n self.get_user = mock.patch('kojihub.kojihub.get_user').start()\n self.context = mock.patch('kojihub.kojihub.context').start()\n self.context.session.assertPerm = mock.MagicMock()\n self.binfo = {'id': 'BUILD ID', 'state': koji.BUILD_STATES['COMPLETE'], 'name': 'test_nvr',\n 'nvr': 'test_nvr-3.3-20.el8', 'version': '3.3', 'release': '20'}\n\n def tearDown(self):\n mock.patch.stopall()\n\n def test_delete_build_raise_error(self):\n references = ['tags', 'rpms', 'archives', 'component_of']\n for ref in references:\n context = mock.MagicMock()\n context.session.return_value = context\n\n with mock.patch('kojihub.kojihub.build_references') as refs:\n retval = defaultdict(dict)\n retval[ref] = True\n refs.return_value = retval\n with self.assertRaises(koji.GenericError):\n kojihub.delete_build(build='', strict=True)\n\n def test_delete_build_return_false(self):\n references = ['tags', 'rpms', 'archives', 'component_of']\n for ref in references:\n context = mock.MagicMock()\n context.session.return_value = context\n\n with mock.patch('kojihub.kojihub.build_references') as refs:\n retval = defaultdict(dict)\n retval[ref] = True\n refs.return_value = retval\n assert kojihub.delete_build(build='', strict=False) is False\n\n def test_delete_build_check_last_used_raise_error(self):\n references = ['tags', 'rpms', 'archives', 'component_of', 'last_used']\n for ref in references:\n context = mock.MagicMock()\n context.session.return_value = context\n\n with mock.patch('kojihub.kojihub.build_references') as refs:\n retval = defaultdict(dict)\n if ref == 'last_used':\n retval[ref] = time.time() + 100\n refs.return_value = retval\n self.assertFalse(kojihub.delete_build(build='', strict=False))\n\n @mock.patch('kojihub.kojihub.build_references')\n def test_delete_build_lazy_refs(self, buildrefs):\n '''Test that we can handle lazy return from build_references'''\n self.get_user.return_value = {'authtype': 2, 'id': 1, 'krb_principal': None,\n 'krb_principals': [], 'name': 'kojiadmin', 'status': 0,\n 'usertype': 0}\n buildrefs.return_value = {'tags': []}\n self.get_build.return_value = self.binfo\n kojihub.delete_build(build=self.binfo, strict=True)\n\n # no build refs, so we should have called _delete_build\n self._delete_build.assert_called_with(self.binfo)\n\n def test_delete_build_queries(self):\n self.query_execute.return_value = [(123, )]\n\n kojihub._delete_build(self.binfo)\n\n self.assertEqual(len(self.queries), 1)\n query = self.queries[0]\n self.assertEqual(query.tables, ['rpminfo'])\n self.assertEqual(query.joins, None)\n self.assertEqual(query.clauses, ['build_id=%(build_id)i'])\n self.assertEqual(query.columns, ['id'])\n\n self.assertEqual(len(self.deletes), 2)\n delete = self.deletes[0]\n self.assertEqual(delete.table, 'rpmsigs')\n self.assertEqual(delete.clauses, [\"rpm_id=%(rpm_id)i\"])\n\n delete = self.deletes[1]\n self.assertEqual(delete.table, 'rpm_checksum')\n self.assertEqual(delete.clauses, [\"rpm_id=%(rpm_id)i\"])\n\n self.assertEqual(len(self.updates), 2)\n update = self.updates[0]\n self.assertEqual(update.table, 'tag_listing')\n self.assertEqual(update.values, {'build_id': self.binfo['id']})\n self.assertEqual(update.data, {'revoke_event': 42, 'revoker_id': 24})\n self.assertEqual(update.rawdata, {'active': 'NULL'})\n self.assertEqual(update.clauses, [\"build_id=%(build_id)i\", 'active = TRUE'])\n\n update = self.updates[1]\n self.assertEqual(update.table, 'build')\n self.assertEqual(update.values, {'build_id': self.binfo['id']})\n self.assertEqual(update.data, {'state': 2})\n self.assertEqual(update.rawdata, {})\n self.assertEqual(update.clauses, ['id=%(build_id)i'])\n\n","repo_name":"yifengyou/koji","sub_path":"BUILD/koji-1.33.0/tests/test_hub/test_delete_build.py","file_name":"test_delete_build.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"35751389900","text":"import asyncio\r\n\r\n\r\nasync def func(fut):\r\n await asyncio.sleep(2)\r\n fut.set_result(666)\r\n\r\n\r\nasync def main():\r\n # 获取当前事件循环\r\n loop = asyncio.get_running_loop()\r\n\r\n # 创建一个Future对象,���个对象什么都不干\r\n fut = loop.create_future()\r\n\r\n # 创建一个任务\r\n loop.create_task(func(fut))\r\n\r\n # 等待任务最终结果(future对象),没有结果会一直等下去。\r\n await fut\r\n\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main())\r\n","repo_name":"nenusoulgithub/LearnPython","sub_path":"杂七杂八/Python协程&asyncio&异步编程/06-Future示例2.py","file_name":"06-Future示例2.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35922855884","text":"def main():\r\n man = menorF20 = maior_18 = 0\r\n op = 'S'\r\n while op not in 'nN':\r\n print(\"-\" * 40)\r\n print(\"Cadastre uma pessoa\")\r\n print(\"-\" * 40)\r\n idade = int(input(\"Idade: \"))\r\n while True:\r\n sex = input(\"sexo [F/M]: \").upper().strip()[0]\r\n if sex in 'FM':break \r\n print(\"opção invalida \\n\")\r\n if idade > 18: maior_18 += 1\r\n if sex == 'M': man += 1\r\n if sex == 'F' and idade < 20: menorF20\r\n op = input(\"Gostaria de continuar? [S/N]: \")\r\n print(f\"\\nForam registrados {maior_18} pessoas com mais de 18 anos\")\r\n print(f\"Foram registrados {man} homens\")\r\n print(f\"E foram registrados {menorF20} mulheres com menos de 20 anos \\n\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"f3pe/Estudos","sub_path":"python/exercicios/ex_curso-em-video/ex069.py","file_name":"ex069.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5477360502","text":"import os\nfrom tqdm import tqdm\n\n\nDATA_DIR = \"./data\"\nTRAIN_PATH = os.path.join(DATA_DIR, \"eng.testa\")\nTEST_PATH = os.path.join(DATA_DIR, \"eng.train\")\n\n\ndef read_data(file):\n lines = open(file, \"r\").readlines()\n data = {\"sentences\": [], \"labels_per_sent\": []}\n sentence, labels = [], []\n\n for line in tqdm(lines):\n line = line.strip()\n\n if not line:\n if sentence and labels:\n assert len(sentence) == len(labels)\n data[\"sentences\"].append(\" \".join(sentence))\n data[\"labels_per_sent\"].append(\" \".join(labels))\n sentence, labels = [], []\n continue\n\n if line.startswith(\"-DOCSTART-\"):\n continue\n else:\n values = line.split(\" \")\n try:\n token, _, _, label = values\n sentence.append(token)\n if label != \"O\":\n labels.append(label.split(\"-\")[-1])\n else:\n labels.append(label)\n\n except Exception as e:\n print(f\"Error has occur: {e}\")\n continue\n\n data[\"sentences\"].append(\"\")\n data[\"labels_per_sent\"].append(\"\")\n\n return \"\\n\".join(data[\"sentences\"]), \"\\n\".join(data[\"labels_per_sent\"])\n\n\nx_train, y_train = read_data(TRAIN_PATH)\nx_test, y_test = read_data(TEST_PATH)\n\n\nwith open(os.path.join(DATA_DIR, \"text_train.txt\"), \"w\") as f:\n f.write(x_train)\n\nwith open(os.path.join(DATA_DIR, \"labels_train.txt\"), \"w\") as f:\n f.write(y_train)\n\nwith open(os.path.join(DATA_DIR, \"text_dev.txt\"), \"w\") as f:\n f.write(x_test)\n\nwith open(os.path.join(DATA_DIR, \"labels_dev.txt\"), \"w\") as f:\n f.write(y_test)\n","repo_name":"ArtyomZemlyak/NeMo_NER_CoNLL_2003","sub_path":"convert_for_nemo.py","file_name":"convert_for_nemo.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23783544875","text":"import threading\nimport time\n\n\ndef thread1_job():\n print('T1 开始\\n')\n for i in range(10):\n time.sleep(0.5)\n print('T1 结束\\n')\n\n\ndef exampleFuc():\n thread1 = threading.Thread(target=thread1_job, name='T4')\n thread1.start()\n thread1.join()\n print('所有的任务都做完了。\\n')\n\n\nexampleFuc()","repo_name":"yxp0916/test","sub_path":"139/jincheng.py","file_name":"jincheng.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39458408897","text":"\ndef main():\n n = input()\n cnt = 10\n while cnt > 0:\n cnt = cnt - 1\n if n == n[::-1]:\n print('%s is a palindromic number.' % n)\n return\n m = str(int(n) + int(n[::-1]))\n print('%s + %s = %s' % (n, n[::-1], m))\n n = m\n print('Not found in 10 iterations.')\n\n\nmain()\n","repo_name":"rflin/PAT-Advanced-Level","sub_path":"pysolutions/1136 A Delayed Palindrome (20 分).py","file_name":"1136 A Delayed Palindrome (20 分).py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"54"} +{"seq_id":"9831857240","text":"from django.shortcuts import render\n# from second_app.models import AccessRecord,user\nfrom second_app.forms import NewUserForm\n\n# Create your views here.\ndef index(request):\n return render(request,'second_app/index.html')\n\ndef users(request):\n\n form = NewUserForm\n\n if request.method == 'POST':\n form = NewUserForm(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return index(request)\n\n else:\n print('Form is Invalid')\n return render(request,'second_app/users.html',{'form':form})\n","repo_name":"Alsaheem/full_django_course","sub_path":"BACKEND/DJANGO/LEVEL3/second_project/second_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"363068003","text":"from flask import Flask\nfrom flask import request\n\n\nimport json\nfrom config import DevelopmentConfig\nfrom handler import received_message\n\napp = Flask(__name__)\napp.config.from_object( DevelopmentConfig )\n\n@app.route('/webhook', methods = ['GET', 'POST'])\ndef webhook():\n\tif request.method == 'GET':\n\t\tverify_token = request.args.get('hub.verify_token', '')\n\t\tif verify_token == app.config['SECRET_KEY']:\n\t\t\treturn request.args.get('hub.challenge', '')\n\t\treturn 'Error al validar el secreto'\n\n\telif request.method == 'POST':\n\n\t\tpayload = request.get_data()\n\t\n\t\tdata = json.loads(payload)\n\t\n\t\tfor page_entry in data['entry']:\n\n\t\t\tfor message_event in page_entry['messaging']:\n\t\t\t\n\t\t\t\tif 'message' in message_event:\n\t\t\t\t\tevento = message_event['message']\n\t\t\t\t\treceived_message(message_event, app.config['PAGE_ACCESS_TOKEN'])\n\n\t\treturn \"ok\"\n\n@app.route('/', methods = ['GET'])\ndef index():\n\treturn 'Hola ya sirve el API ...... !'\n\nif __name__ == '__main__':\n\tapp.run(port = 8000)\n\n\n","repo_name":"rafaaban/python_bot","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72572667680","text":"import os\nimport re\nimport json\nimport time\nimport socket\nimport bcrypt\nimport threading\nfrom dbmodels import database\nfrom crypter import AESCrypter\nfrom base64 import b64encode, b64decode\nfrom Crypto.Random import get_random_bytes\n\n# Create database object from dbmodels\ndb = database()\n\n# Establish server host and port via socket object\nhost = \"127.0.0.1\"\nport = 1400\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserver.bind((host, port))\n\n# Start listening\nserver.listen()\n\n# List of connected clients\nclients = []\n\n# List of usernames of connected clients\nusername_list = []\n\n# List of admins\nadmins = []\n\n# List of crypters\ncrypters = []\n\n# Flag which changes if a file is on the server\nfile_flag = 0\n\n# Fucntion to receive connection from client\ndef receive():\n while True:\n clientconn, address = server.accept()\n print(f\"Connection to {address} established...\")\n\n # Ask client for username\n clientconn.send(\"Username\".encode())\n\n # user_pass_json var stores the username and password received from client as json\n user_pass_json = clientconn.recv(1024).decode()\n user_pass_json = json.loads(user_pass_json)\n\n username = user_pass_json[0]\n password = user_pass_json[1]\n\n # Check if user is already in server, by checking username/client ip list\n if (ifuserexists(username)):\n clientconn.send(\"Duplicate\".encode())\n clientconn.close()\n continue\n\n # Check for ban database\n db.checkfordb('ban_database.sqlite')\n # Check if client is banned\n if db.checkban(username):\n # If client is on ban list send them ban message\n clientconn.send(\"Banned\".encode())\n # Disconnect client from server\n clientconn.close()\n continue\n\n # init crypter\n crypter = AESCrypter()\n send_iv = get_random_bytes(16)\n recv_iv = get_random_bytes(16)\n crypter.init_cipher(send_iv, recv_iv)\n\n clientconn.send(\"IV\".encode())\n time.sleep(1)\n clientconn.send(b64encode(recv_iv))\n clientconn.send(b64encode(send_iv))\n\n # Check for user database\n db.checkfordb('user_database.sqlite')\n # Check if username exists\n print(\"enters check\")\n if db.checkUsername(username):\n print(\"Username exists\")\n # If username exists then verify login\n if not db.checkloginHash(username, password):\n #if not db.checklogin(username, password):\n print(\"Password does not match\")\n # We want to disconnect client so they retry password\n clientconn.send(\"Wrongpass\".encode())\n clientconn.close()\n continue\n else:\n # user cannot enter hashed password so it will not match database\n hashed = bcrypt.hashpw(password.encode(), bcrypt.gensalt(13))\n # print (f'before stored {hashed}')\n # checking if DB exists before trying to store\n db.checkfordb('user_database.sqlite')\n db.storeuserinfo(username, hashed.decode())\n \n\n # Update client list and username list with new client\n crypters.append(crypter)\n clients.append(clientconn)\n username_list.append(username)\n\n print(f\"{username} is joining the server\")\n\n # Call broadcast func to send a message to all clients\n broadcast(f\"{username} has joined the server\\n\", clientconn)\n\n # Let client know they are now connected to the chat server\n cstr = crypter.encrypt_string(\"You are now connected to the live chat server\\n\")\n clientconn.send(cstr)\n\n # Handle multiple clients\n handlerthread = threading.Thread(target=handler, args=(clientconn,))\n handlerthread.start()\n # send_to_clientthread = threading.Thread(target=send_to_client, args=(clientconn,))\n # send_to_clientthread.start()\n\n# Functions handles messages sent to server by clients\ndef handler(client):\n crypter = client_to_crypter(client)\n\n while True:\n try:\n # Get message from client\n message = client.recv(1024)\n\n if message == b\"\":\n raise Exception(\"exception: received empty string\")\n\n dmsg = crypter.decrypt_string(message)\n\n temp = dmsg.decode()\n\n if temp == \"SENDXX\":\n get_from_client(client) \n \n elif temp == \"RECVXX\":\n send_to_client(client)\n \n elif temp == \"CXFXL\":\n check_for_client(client)\n \n\n #print (f\"THIS IS DSMG {dmsg}\")\n elif re.search (r\":\\s/.\",str(dmsg)):\n broadcast_single(dmsg.decode(), client)\n commands(dmsg, client)\n\n # Broadcast message to all clients\n elif re.search (r\"@.\",str(dmsg)):\n broadcast(dmsg.decode(), client)\n ping(dmsg, client)\n\n else:\n # Broadcast message to all clients\n print(\"received {}\".format(dmsg.decode()))\n broadcast(dmsg.decode(), client)\n\n except Exception as ex:\n print(ex)\n # Broadcast the user has disconnected\n index = clients.index(client)\n username = username_list[index]\n broadcast(f'{username} has left the chat\\n', client)\n\n # Disconnect client from server and remove from list\n crypters.remove(crypter)\n username_list.remove(username)\n clients.remove(client)\n client.close()\n break\n\ndef client_to_crypter(client):\n # get the index of the target client\n index = clients.index(client)\n # get the crypter of the target client\n return crypters[index]\n\n# Function sends message to all clients\ndef broadcast(message, client):\n print(message)\n for x in clients:\n try:\n crypter = client_to_crypter(x)\n # encrypt the message to send\n emsg = crypter.encrypt_string(message)\n # ( ( ( hacky sleep cuz no size header ) ) )\n #time.sleep(0.5)\n # send the message to the target clientss\n x.send(emsg)\n except Exception as ex:\n print(ex)\n\n\n# Function sends message only to all connected clients\ndef broadcast_disconnected(message, client):\n for x in clients:\n if x == client:\n continue\n else: \n crypter = client_to_crypter(x)\n emsg = crypter.encrypt_string(message)\n x.send(emsg)\n\n# Function sends message to only one client\ndef broadcast_single(message, client):\n for x in clients:\n if x == client:\n try:\n crypter = client_to_crypter(x)\n emsg = crypter.encrypt_string(message)\n x.send(emsg)\n except Exception as ex:\n print(ex)\n\n\n# Returns if user exists in user\ndef ifuserexists(username):\n if username in username_list:\n return True\n else:\n False\n\n\n# Search for ip of given username\ndef transverse(names):\n # transverse the user list one by one\n for num in range(len(username_list)):\n # if they found the user. then return the ip address according to the client list.\n if str(username_list[num]) == names:\n return clients[num]\n # if we can't find anything we return -1\n return -1\n\n\n# use user ip address to find the username\n# this is similar function to the transverse function. We use ip address to find the username this time\ndef namelookup(ip_address):\n for g in range(len(clients)):\n if clients[g] == ip_address:\n return username_list[g]\n # if we can't find anything we return -1\n return -1\n\n\n# Returns if user is admin\ndef admincheck(usernamee):\n # check through the admin list to see if a user is inside the admin list\n for good in admins:\n # if we found the user then return true\n if good == usernamee:\n return True\n # if not return false\n return False\n\n\n# Add a user into admin list\ndef adminadd(usernamess):\n admins.append(usernamess)\n\n# Ping function\ndef ping(message1,client1):\n crypter = client_to_crypter(client1)\n if re.search (r\"@.+\",str(message1)):\n #extract the message from the text\n target = re.findall(r\"@.+\",str(message1))\n target[0] = target[0].replace(\"'\",\"\")\n target[0] = target[0].replace(\"\\\\n\",\"\")\n # split them withg space\n lists = target[0].split(' ')\n print(\"message before \",message1)\n print(\"extracted \",target[0])\n print(\"this is list \",lists)\n # for each word between space\n for i in lists:\n #if there's @ inside the word\n if \"@\" in i:\n #extract name\n i = i.replace(\"@\",\"\")\n #see if the name is exist in chatroom\n if str(transverse(i)) != \"-1\":\n found = transverse(i)\n # ping it \n found.send(\"Pinged\".encode())\n time.sleep(0.1)\n broadcast_single(\"Pinged!\\n\", found)\n #found.send(crypter.encrypt_string(\"Pinged!\\n\"))\n break\n else:\n client1.send(crypter.encrypt_string(\"user\"))\n #if we didnt have a \"@user\" in the format then we will return the message.\n else:\n client1.send(crypter.encrypt_string(\"Please check if you have the right format for the command\\n\"))\n\n# Kick function\ndef kick(mess, client1):\n crypter = client_to_crypter(client1)\n # if they found kick inside the message\n print(\"this is mess :\",mess)\n if re.search(r\"kick\\s.+\", str(mess)):\n # extract the message from the text\n target = re.findall(r\"kick\\s.+\", str(mess))\n target[0] = target[0].replace(\"kick \", \"\")\n target[0] = target[0].replace(\"'\", \"\")\n #target[0] = target[0].replace(\"n\", \"\")\n target[0] = target[0].replace(\"\\\\n\", \"\")\n print(mess)\n print(target[0])\n print(transverse(target[0]))\n # if they found a client ip address with transverse function\n if str(transverse(target[0])) != \"-1\":\n found = transverse(target[0])\n # close the GUI\n found.send(\"Exit\".encode())\n else:\n client1.send(crypter.encrypt_string(\"User doesn't exist please double check\\n\"))\n else:\n client1.send(crypter.encrypt_string(\"Please check if you have the right format for the command\\n\"))\n\n\n\n# current number of people inside the chat room\ndef chat_member(client1):\n crypter = client_to_crypter(client1)\n #send the list of username to the user\n final = str(username_list)+\"\\n\"\n client1.send(crypter.encrypt_string(final))\n\n\n# help function, to send out the commands\ndef helps(client1, client_name):\n # if they found client name under the admin list\n if admincheck(client_name):\n crypter = client_to_crypter(client1)\n help_message = \"Those are available commands: \\n/chatmember\\n/help\\n/kick\\n/ban\\n/disconnect\\n\"\n help_message = crypter.encrypt_string(help_message)\n client1.send(help_message) \n #client1.send(\"Those are available commands: \\n/chatmember\\n/help\\n/kick\\n/ban\\n/disconnect\\n\".encode())\n # if not then return regular user list\n else:\n crypter = client_to_crypter(client1)\n help_message = \"Those are available commands: \\n/chatmember\\n/help\\n/disconnect\\n\"\n help_message = crypter.encrypt_string(help_message)\n client1.send(help_message) \n #client1.send(\"Those are available commands: \\n/chatmember\\n/help\\n/disconnect\\n\".encode())\n\n\n# ban function, to ban a user from branch\ndef bans(mess, client1):\n crypter = client_to_crypter(client1)\n # if it picks up subject\n if re.search(r\"ban\\s.+\", str(mess)):\n # extract the subject from the message\n target = re.findall(r\"ban\\s.+\", str(mess))\n target[0] = target[0].replace(\"ban \", \"\")\n target[0] = target[0].replace(\"'\", \"\")\n target[0] = target[0].replace(\"n\", \"\")\n target[0] = target[0].replace(\"\\\\\", \"\")\n # if subject is found in the user list\n print(mess)\n print(target[0])\n print(transverse(target[0]))\n if str(transverse(target[0])) != \"-1\":\n found = transverse(target[0])\n # ban it and disconnect it\n db.storebaninfo(target[0], found)\n # close the GUI\n found.send(\"Exit\".encode())\n # close client\n #found.close()\n # else return a message if didnt find the subject in the database\n else:\n client1.send(crypter.encrypt_string(\"User doesn't exist please double check\\n\"))\n # if didnt find the subjest in the text\n else:\n client1.send(crypter.encrypt_string(\"Please check if you have the right format for the command\\n\"))\n\n\n# admin function,make a user to become admin\ndef New_admin(client1, name_client):\n crypter = client_to_crypter(client1)\n client1.send(crypter.encrypt_string(\"Please enter the code you get from the Staff\\n\"))\n # receive code from the user\n codes = client1.recv(1024).decode()\n dcode = crypter.decrypt_string(codes)\n # extract the code from the message\n txts = re.findall(r\"\\s.+\", str(dcode))\n #print(\"codes: \",txts)\n txts[0] = txts[0].replace(\" \", \"\")\n txts[0] = txts[0].replace(\"'\", \"\")\n txts[0] = txts[0].replace(\"\\\\n\", \"\")\n # print(\"Acodes: \",txts)\n # if password matches, then add user into admin list\n if txts[0] == \"Passcodes\": # you can change the password here\n adminadd(name_client)\n client1.send(crypter.encrypt_string(\"You are now an admin\\n\"))\n # send a message if the passcode is wrong\n else:\n client1.send(crypter.encrypt_string(\"Wrong passcode, try again\\n\"))\n\ndef filePermission(client1):\n crypter = client_to_crypter(client1)\n client1.send(crypter.encrypt_string(\"Do you want send file to chatroom or a client?\\n\"))\n answer = client1.recv(1024).decode()\n answer_decoded = crypter.decrypt_string(answer)\n regexp = re.compile(r'SERVER')\n if regexp.search(answer_decoded):\n return \"SERVER\"\n else:\n return \"CLIENT\"\n\n# Function to send files to server\n# After update handler will call get_from_client when it receives type file\ndef get_from_client(client1):\n # We need to tell client we are ready to recieve a file \n client1.send(\"SendImage\".encode())\n crypter = client_to_crypter(client1)\n # We need to send name of file to client client1.send(\"Imagename.encode\")\n remaining = client1.recv(1024).decode()\n temp_rem = remaining\n remaining = int(remaining)\n client1.send(crypter.encrypt_string(f\"You are sending file of size: {remaining} bytes\\n\"))\n with open('gotit.jpg','wb') as file:\n while remaining:\n image_data = client1.recv(min(4096,remaining))\n remaining -= len(image_data)\n file.write(image_data)\n file.close()\n global file_flag\n file_flag = 1\n help_message = \"Server has received the entire file\\n\" \n help_message = crypter.encrypt_string(help_message)\n client1.send(help_message) \n # Name of client has sent a file, click download to view. \n broadcast_disconnected(f\"{namelookup(client1)} has uploaded a file to the server. Click download to view!\\n\", client1)\n\n\n# Function to send files to client\ndef send_to_client(client1):\n \n client1.send(\"RecvImage\".encode())\n fileToSend = open('gotit.jpg', 'rb')\n fileToSend.seek(0, os.SEEK_END)\n file_size = fileToSend.tell()\n data_message = client1.recv(1024).decode()\n if data_message == \"READYTORECV\":\n print(\"Size of file is :\", file_size,\"bytes\")\n client1.send(str(file_size).encode())\n fileToSend.seek(0,0)\n print (\"Sending file...\")\n while True:\n image_data = fileToSend.read(4096)\n while (image_data):\n client1.send(image_data)\n image_data = fileToSend.read(4096)\n if not image_data:\n fileToSend.close() \n break\n\ndef check_for_client(client1):\n global file_flag\n print (f\"File Flag is {file_flag}\")\n if file_flag == 1:\n client1.send(\"GoodF\".encode())\n else:\n client1.send(\"BadF\".encode())\n\n#we add commands here\ndef commands(message1, client):\n name_of_client = namelookup(client)\n crypterX = client_to_crypter(client)\n print(\"message1:\",message1)\n if \"/chatmember\" in str(message1):\n chat_member(client)\n elif \"/disconnect\" in str(message1):\n client.send(\"Exit\".encode())\n #client.close()\n # We need to make sure client is deleted off our lists\n elif \"/help\" in str(message1):\n helps(client, name_of_client)\n elif \"/kick\" in str(message1) and admincheck(name_of_client):\n kick(message1, client)\n elif \"/ban\" in str(message1) and admincheck(name_of_client):\n bans(message1, client)\n elif \"/admin\" in str(message1):\n New_admin(client, name_of_client)\n elif \"/get_from_client\" in str(message1):\n get_from_client(client)\n else:\n client.send(crypterX.encrypt_string(\"Command Not Found, Use /help to Check for Command\\n\"))\n #client.send(\"Command Not Found, Use /help to Check for Command\\n\".encode())\n\n# Ready to receieve connection\nprint(\"Server open for connection\")\nreceive()","repo_name":"KingFisherr/Chatroom-Project","sub_path":"testserver.py","file_name":"testserver.py","file_ext":"py","file_size_in_byte":17639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7418962214","text":"\"\"\"\nSolution for Reorder List, Time O(n), Space O(1).\n\nIdea:\nMultiple passes.\n\"\"\"\n\nfrom __future__ import annotations\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n# Solution.\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n if not head or not head.next:\n return\n \n # Finds the middle of the linked list.\n one_hop = head\n two_hop = head\n while two_hop and two_hop.next:\n one_hop = one_hop.next\n two_hop = two_hop.next.next\n mid = one_hop.next\n one_hop.next = None\n \n # Reverse the last half of the list.\n head2 = None\n while mid:\n temp = mid.next\n mid.next = head2\n head2 = mid\n mid = temp\n \n # Merge two lists.\n while head2:\n temp1 = head.next\n temp2 = head2.next\n head.next = head2\n head2.next = temp1\n head = temp1\n head2 = temp2\n\n# Main.\nif __name__ == \"__main__\":\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n head.next.next.next.next.next = ListNode(6)\n print(Solution().reorderList(head))\n","repo_name":"printfoo/leetcode-python","sub_path":"problems/0143/reorder_list.py","file_name":"reorder_list.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71344427361","text":"import os\nimport sys\nimport argparse\nroot_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(root_dir)\nfrom utils.functions import gen_input, CIFAR100_TEST_MEAN, CIFAR100_TEST_STD, gen_weights_cpp_header, save_weights_dat\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom resnet_model import nn_resnet18\n\ndef gen_input_dat():\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_TEST_MEAN, CIFAR100_TEST_STD)\n ])\n cifar100_test = torchvision.datasets.CIFAR100(root='../../dataset', train=False, download=False, transform=transform_test)\n gen_input(cifar100_test, \"../../dat_input/resnet18\", 2)\n\ndef gen_weight_dat():\n dat_dir = \"../../weights/resnet18/batchsize2.pth_dat/\"\n if not os.path.exists(dat_dir):\n save_weights_dat(\"../../weights/resnet18/batchsize2.pth\")\n gen_weights_cpp_header(nn_resnet18, dat_dir, \"./c_resnet/weights.h\")\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"--input\", help=\"Generate input .dat files\", action=\"store_true\", default=False)\n arg_parser.add_argument(\"--weight\", help=\"Generate weight .dat files\", action=\"store_true\", default=False)\n \n args = arg_parser.parse_args()\n\n if args.input:\n gen_input_dat()\n if args.weight:\n gen_weight_dat()\n","repo_name":"Yang-Qirui/CNN-Acceleration","sub_path":"examples/my_resnet/gen_input_weight.py","file_name":"gen_input_weight.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16633484646","text":"#%%\nimport pandas as pd\nfrom pycaret.classification import *\nfrom imblearn.over_sampling import *\nfrom sklearn.model_selection import RepeatedKFold\nimport matplotlib.pyplot as plt\n#%%\nn_jobs = 60\nX = pd.read_csv(\"X.csv\")\ny = pd.read_csv(\"y.csv\")\ndataset = pd.concat([X,y], axis=1)\nfeature_table = pd.read_csv(\"features_table.csv\")\n\n# %%\ncompare_df = pd.DataFrame()\nmax_mcc = 0\nfor i in sorted(list(set(feature_table[\"Freq\"])),reverse=True):\n if i < 50:\n continue\n features = list(feature_table[feature_table[\"Freq\"] >=i][\"Feature\"]) + [\"group\"]\n if len(features) <= 3:\n continue\n rkf = RepeatedKFold(n_splits=10, n_repeats=5, random_state=2021)\n adasyn = ADASYN(sampling_strategy='minority',random_state=2021)\n clf = setup(dataset[features], target = 'group', session_id=2021,log_experiment=True, experiment_name=str(i),silent=True,fix_imbalance = True,fix_imbalance_method=adasyn,fold_strategy=rkf,n_jobs=n_jobs)\n model= create_model('et')\n tuned_model = tune_model(model,n_iter = 200,search_library=\"scikit-optimize\",optimize = \"MCC\")\n re = pull()\n mean_cv = pd.DataFrame(re.loc[\"Mean\"])\n mean_cv.columns= [i]\n compare_df = mean_cv if compare_df.empty else pd.concat([compare_df, mean_cv],axis=1)\n if mean_cv[i][\"MCC\"] > max_mcc :\n max_mcc = mean_cv[i][\"MCC\"]\n# %%\ncompare_df.to_csv(\"model_cv.csv\")","repo_name":"mohammadmirhakkak/A_fumigatus_GEM","sub_path":"src/fig3d_results_step2.py","file_name":"fig3d_results_step2.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35819255910","text":"import pandas\r\nfrom file_operations import file_methods\r\nfrom data_preprocessing import preprocessing\r\nfrom data_ingestion import data_loader_prediction\r\nfrom application_logging import logger\r\n\r\nclass prediction:\r\n\r\n def __init__(self,path):\r\n self.file_object = open(\"Prediction_Logs/Prediction_Log.txt\", 'a+')\r\n self.log_writer = logger.App_Logger()\r\n self.pred_data_val = Prediction_Data_validation(path)\r\n\r\n def predictionFromModel(self):\r\n\r\n try:\r\n self.log_writer.log(self.file_object,'Start of Prediction')\r\n data_getter=data_loader_prediction.Data_Getter_Pred(self.file_object,self.log_writer)\r\n data=data_getter.get_data()\r\n\r\n\r\n preprocessor=preprocessing.Preprocessor(self.file_object,self.log_writer)\r\n columnNameList=['id','eduInformal','eduOther','vocationCategory','interestedCertProgram','immovablePropLostCOVID','movablePropLostCOVID','injuryCOVID','illnessCOVID','disabledCOVID','liveLostCOVID',\r\n 'wageRecievedCOVID','noGroup','isFPOMember','isCooperativeMember','isSHGMember','isWageEarner','employmentType','reasonLandless','relWithHeadOfFamily','genderHeadOfFamily','eduTransport',\r\n 'noScholarshipReason','typeOfSchool','eduOther','eduInformal','hasEnrolledAdultLiteracy']\r\n data = preprocessor.dropUnnecessaryColumnsAndDuplicates(data,columnNameList)\r\n\r\n is_null_present,cols_with_missing_values=preprocessor.is_null_present(data)\r\n if(is_null_present):\r\n data=preprocessor.impute_missing_values(data,cols_with_missing_values)\r\n\r\n # get encoded values for categorical data\r\n data = preprocessor.encodeCategoricalValuesPrediction(data)\r\n\r\n #data=data.to_numpy()\r\n file_loader=file_methods.File_Operation(self.file_object,self.log_writer)\r\n kmeans=file_loader.load_model('KMeans')\r\n\r\n ##Code changed\r\n\r\n clusters=kmeans.predict(data)\r\n data['clusters']=clusters\r\n clusters=data['clusters'].unique()\r\n result=[] # initialize blank list for storing predicitons\r\n with open('EncoderPickle/enc.pickle', 'rb') as file: #let's load the encoder pickle file to decode the values\r\n encoder = pickle.load(file)\r\n\r\n for i in clusters:\r\n cluster_data= data[data['clusters']==i]\r\n cluster_data = cluster_data.drop(['clusters'],axis=1)\r\n model_name = file_loader.find_correct_model_file(i)\r\n model = file_loader.load_model(model_name)\r\n for val in (model.predict(cluster_data)):\r\n\r\n result.append(val)\r\n result = pandas.DataFrame(result,columns=['Predictions'])\r\n path=\"Prediction_Output_File/Predictions.csv\"\r\n result.to_csv(\"Prediction_Output_File/Predictions.csv\",header=True) #appends result to prediction file\r\n self.log_writer.log(self.file_object,'End of Prediction')\r\n except Exception as ex:\r\n self.log_writer.log(self.file_object, 'Error occured while running the prediction!! Error:: %s' % ex)\r\n raise ex\r\n return path\r\n\r\n\r\n","repo_name":"Sumit-Kumar-Dash/indus_pre_work","sub_path":"predictFromModel.py","file_name":"predictFromModel.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74268622242","text":"# Leetcode 278. First Bad Version\n#\n# Link: https://leetcode.com/problems/first-bad-version/\n# Difficulty: Easy\n# Complexity:\n# O(logN) time | where N represent the given number of program version\n# O(1) space\n\n# The isBadVersion API is already defined for you.\n# def isBadVersion(version: int) -> bool:\n\nclass Solution:\n def firstBadVersion(self, n: int) -> int:\n\n left, right = 0, n\n\n while left < right:\n\n pivot = (left + right) // 2\n\n if (isBadVersion(pivot)):\n right = pivot\n else:\n left = pivot + 1\n\n return right\n","repo_name":"edab/LC-Study-Plan","sub_path":"solutions/first-bad-version.py","file_name":"first-bad-version.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1218307774","text":"from wordcloud import WordCloud\nfrom PIL import Image\nimport numpy as np\n\ntext=''\nwith open(\"16141.txt\", \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n for line in lines:\n if ' : ' in line:\n text+=line.split(' : ')[1].replace('201','').replace('ㅋ','').replace('ㅠ','').replace('ㅜ','').replace('이모티콘','').replace('사진','').replace('동영상','').replace('삭제된 메시지입니다','').replace('ㅎ','').replace('그리고','').replace('근데','').replace('ㅇ','').replace('그냥','').replace('너무','')\nprint(text)\n\nwc = WordCloud(font_path='/System/Library/Fonts/Supplemental/AppleGothic.ttf', background_color=\"black\", width=600, height=400)\nwc.generate(text)\nwc.to_file(\"16141.png\")\n\nimport matplotlib.font_manager as fm\n\nfor font in fm.fontManager.ttflist:\n if 'Gothic' in font.name:\n print(font.name, font.fname)\n\n\n\nmask = np.array(Image.open('cloud.png'))\nwc = WordCloud(font_path='/System/Library/Fonts/Supplemental/AppleGothic.ttf', background_color=\"white\", mask=mask)\nwc.generate(text)\nwc.to_file(\"16141.png\")","repo_name":"rinah97/testrepo","sub_path":"family.py","file_name":"family.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30300177596","text":"import torch\n\nfrom torch.utils.data.dataloader import DataLoader\nfrom torchvision import transforms\nfrom dataset import Event_dataloader\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ntrain_data_dir = '/home/HuSH/data/time_align374/train'\ntest_data_dir = '/home/HuSH/data/time_align374/test'\nmodel = torch.load('/home/HuSH/code/avenger/output/output_0122/avenger_19_Ws8DN3Y1.pt')\n\nmytransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Resize((800,800))\n ])\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntrain_set = Event_dataloader(path=train_data_dir,transform=mytransform)\ntest_set = Event_dataloader(path=test_data_dir,transform=mytransform)\ntrain_loader = DataLoader(train_set,batch_size=1,shuffle=False)\ntest_loader = DataLoader(test_set,batch_size=1,shuffle=False)\n\ndef single_test(data_loader=train_loader,index=0,model=model):\n for i,batch in enumerate(data_loader):\n if i!=index:\n continue\n x,y = batch\n x = x.squeeze()\n y = y.squeeze()\n x = Variable(x.float().to(device))\n with torch.no_grad():\n logits,_ = model(x)\n return logits,y\n\ndef draw(y_hat,y):\n y_hat = y_hat.cpu().detach().numpy()\n y = y.numpy()\n x = range(y.shape[0])\n fig,ax = plt.subplots()\n ax.plot(x,y_hat)\n ax.plot(x,y)\n plt.show()\n\nif __name__ == \"__main__\":\n y_hat,y = single_test(train_loader,0,model)\n draw(y_hat,y)\n ","repo_name":"oashua/avenger","sub_path":"single_test.py","file_name":"single_test.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40297723868","text":"\"\"\"Provides a command line utility to run key experiments described in the paper.\"\"\"\nimport argparse\nimport os\nimport sys\nimport subprocess\nimport shutil\n\nfrom src.model_training.model_training_code import train_evaluate_models, train_evaluate_trastuzumab\nfrom src.model_training.model_training_code import plot_all_scores, score_trastuzumab\nfrom src.raw_data_processing.process_raw_reads import process_all_raw_reads\nfrom src.sequence_encoding.encode_sequences import sequence_encoding_wrapper\nfrom src.sequence_encoding.alternate_encoding import alternate_encoding_wrapper\nfrom src.sequence_encoding.Unirep_encoding import unirep_encoding_wrapper\nfrom src.simulated_annealing.run_markov_chains import run_annealing_chains\nfrom src.simulated_annealing.run_markov_chains import analyze_annealing_results\nfrom src.simulated_annealing.run_markov_chains import score_mutations\nfrom src.sequence_encoding.seqs_to_fasta import convert_to_fasta\nfrom src.sequence_encoding.fair_esm_wrapper import fair_esm_wrapper\nfrom src.model_training.cv_scoring import run_all_5x_cvs\nfrom src.trastuzumab_exp.process_raw_seqs import encode_trastuzumab_seqs\n\n\ndef gen_arg_parser():\n parser = argparse.ArgumentParser(description=\"Use this command line app \"\n \"to run / reproduce all of the key steps in the pipeline: \"\n \"processing raw sequence data, training and evaluating models, \"\n \"running simulated annealing and analyzing the simulated annealing \"\n \"results.\\n\\nSpecify which steps you would like to execute. Note that \"\n \"some steps must be executed before other steps can be executed. \"\n \"For example, setup is a necessary first step in the pipeline. You \"\n \"can execute one step at a time, or specify all to perform all \"\n \"of them in sequence.\")\n parser.add_argument(\"--setup\", action=\"store_true\", help=\n \"Provide instructions on how to retrieve the raw data and set up the \"\n \"pipeline.\")\n parser.add_argument(\"--processraw\", action=\"store_true\", help=\n \"Process the raw sequences.\")\n parser.add_argument(\"--downloadencodings\", action=\"store_true\", help=\n \"Some of the embeddings described in the paper are expensive \"\n \"to generate (primarily fair-esm, which is computationally \"\n \"quite expensive). We offer the ability to download them instead \"\n \"using this option. If following the instructions under --setup, \"\n \"you can skip this step.\")\n parser.add_argument(\"--encode\", action=\"store_true\", help=\n \"Encode the processed raw data using the various representations \"\n \"described in the paper (e.g. Unirep, one-hot etc.) Be aware that for \"\n \"some encodings, primarily fair-esm, this may take considerable \"\n \"time -- downloading them is usually preferable. If following the \"\n \"instructions under --setup, you can skip this step.\")\n parser.add_argument(\"--altprocess\", action=\"store_true\", help=\n \"Process the compiled sequences into fasta files for use by other \"\n \"routines.\")\n parser.add_argument(\"--traintest\", action=\"store_true\", help=\n \"Train models on the test set for each encoding and model type; \"\n \"reproduce the test set evaluation performed for the paper.\")\n parser.add_argument(\"--finalmodel\", action=\"store_true\", help=\n \"Train a final model on the combined training and test sets \"\n \"(encoded using the task-adapted autoencoder)\")\n parser.add_argument(\"--runcvs\", action=\"store_true\", help=\n \"Reproduce the 5x CV evaluations from the paper. Note that \"\n \"this step is likely to take several hours to run.\")\n parser.add_argument(\"--gettopseqs\", action=\"store_true\", help=\n \"Get the top scoring sequences from the orgiinal dataset.\")\n parser.add_argument(\"--simulatedanneal\", action=\"store_true\", help=\n \"Reproduce the 10 simulated annealing experiments run for \"\n \"the paper\")\n parser.add_argument(\"--analyzeanneal\", action=\"store_true\", help=\n \"Analyze the results of the simulated annealing experiment \"\n \"and store the final selected sequences.\")\n parser.add_argument(\"--mutcheck\", action=\"store_true\", help=\n \"Check which mutations in the selected sequences are predicted \"\n \"to contribute most highly to affinity.\")\n parser.add_argument(\"--plotscores\", action=\"store_true\", help=\n \"Plot the score distribution vs experimentally determined \"\n \"binding category.\")\n parser.add_argument(\"--trastsetup\", action=\"store_true\", help=\n \"Set up the trastuzumab sequence data.\")\n parser.add_argument(\"--trasteval\", action=\"store_true\", help=\n \"Evaluate on train-test for the trastuzumab data.\")\n parser.add_argument(\"--trastscore\", action=\"store_true\", help=\n \"Score trastuzumab sequences selected for experimental eval.\")\n return parser\n\n\ndef main():\n \"\"\"This is the entry point for all of the steps in the pipeline;\n it uses argparse to parse command line arguments, then calls\n the relevant routines from specific subdirectories in src as\n appropriate. The easiest way to reproduce the experiments\n described in the paper is to use\n the command line arguments specified here. You can of course\n extract specific chunks of the pipeline code from src as \n desired, but if so you will have to reconfigure them to work\n with your alternative pipeline.\n \"\"\"\n #Many of the actions taken by routines in the pipeline involve\n #directories and files that have specific locations relative\n #to the start directory.\n start_dir = os.getcwd()\n parser = gen_arg_parser()\n args = parser.parse_args()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n if args.setup:\n print(\"The raw data used in these experiments is available from SRA and \"\n \"also from Dropbox. To download from Dropbox, use the following \"\n \"link: https://www.dropbox.com/sh/2iyxmsljo551cwy/AACetM27l1CbiIy7NV6a4mNra?dl=0 \\n\\n.\"\n \"You will find a tarball containing encoded data and three folders named \"\n \"rh01, rh02 and rh03.\\nExtract the encoded data file and move the \"\n \"contents of the resulting folder to the encoded data folder in \"\n \"this directory. Move the three folders to the raw_data folder in \"\n \"this directory. You should then be able to proceed (and do not need \"\n \"to run downloadencodings).\")\n if args.processraw:\n process_all_raw_reads(start_dir)\n if args.downloadencodings:\n #Data is temporarily stored here (move to permanent location soon):\n procstat = subprocess.run([\"gdown\", \"1kmTs8XumNcUC8R4RQo90fk-8V_Yy7lmm\"])\n tarfile = \"encoded_data.tar.gz\"\n shutil.move(tarfile, \"encoded_data\")\n os.chdir(\"encoded_data\")\n for f in os.listdir():\n if not f.endswith(\".tar.gz\"):\n os.remove(f)\n procstat = subprocess.run([\"tar\", \"-xzf\", tarfile])\n os.chdir(\"encoded_data\")\n for f in os.listdir():\n shutil.move(f, \"..\")\n os.chdir(\"..\")\n os.rmdir(\"encoded_data\")\n os.remove(\"encoded_data.tar.gz\")\n if args.encode:\n sequence_encoding_wrapper(start_dir)\n convert_to_fasta(start_dir)\n unirep_encoding_wrapper(start_dir)\n fair_esm_wrapper(start_dir)\n if args.altprocess:\n alternate_encoding_wrapper(start_dir)\n if args.traintest:\n train_evaluate_models(start_dir, action_to_take=\"traintest_eval\")\n if args.finalmodel:\n train_evaluate_models(start_dir, action_to_take=\"train_final_model\")\n if args.gettopseqs:\n train_evaluate_models(start_dir, action_to_take=\"get_top_seqs\")\n if args.simulatedanneal:\n run_annealing_chains(start_dir)\n if args.analyzeanneal:\n analyze_annealing_results(start_dir)\n if args.mutcheck:\n score_mutations(start_dir)\n if args.plotscores:\n plot_all_scores(start_dir)\n if args.runcvs:\n run_all_5x_cvs(start_dir)\n if args.trastsetup:\n encode_trastuzumab_seqs(start_dir)\n if args.trasteval:\n train_evaluate_trastuzumab(start_dir)\n if args.trastscore:\n score_trastuzumab(start_dir)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Wang-lab-UCSD/RESP","sub_path":"run_experiments.py","file_name":"run_experiments.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"23262066848","text":"#Live sobre estrutura de repetição\n#As estruturas de repetição se repetem até que a condição não atenda mais o requisito\nprint(\"Exemplo de tabuada\")\nresp=\"S\"\nwhile (resp.upper()==\"S\"):\n n=int(input(\"Insira o valor da tabuada: \"))\n i=1\n while (i<=10):\n r=n*i\n print(\"{:2} X {:2} = {:3}\".format(n,i,r))\n i=i+1\n resp=input(\"Digite S para continuar / digite outro caractere para sair: \")\n","repo_name":"leabr1/python","sub_path":"lives-univem/aula4.py","file_name":"aula4.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41653516150","text":"import argparse\nfrom simulations.afferent_stimulation import AfferentStimulation\nfrom simulations.parameters import StimParameters, FiberParameters, FIBERS_LENGTH_UM, PROPAGATION_DELAY_MS, \\\n FASCICLE_RADIUS_UM, MEAN_DIAMETER_UM, STD_DIAMETER\n\n\ndef main():\n \"\"\" Main script running an AfferentStimulation simulation with cli defined input parameters.\n The results from this simulation are saved in the results folder (see Simulation._results_folder).\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Run a AfferentStimulation simulation.\")\n parser.add_argument(\"-n\", \"--n-fibers\", help=\"number of fibers\", type=int, default=100)\n parser.add_argument(\"-a\", \"--stim-amp\", help=\"Simulation amplitude (mA)\", type=float, default=-0.045)\n parser.add_argument(\"--min-stim-amp\", help=\"Simulation min amplitude (mA)\", type=float, default=-0.01)\n parser.add_argument(\"-f\", \"--stim-freq\", help=\"Stimulation frequency (Hz)\", type=int, default=40)\n parser.add_argument(\"-p\", \"--pulse-width\", help=\"Stimulation pulse width (us)\", type=float, default=50.)\n parser.add_argument(\"-b\", \"--bios\", help=\"flag to use bios burst stimulation\", action=\"store_true\")\n parser.add_argument(\"--burst-frequency\", help=\"Stimulation frequency within a bios burst (Hz)\", type=float,\n default=8000.)\n parser.add_argument(\"-d\", \"--burst-duration\", help=\"Bios burst duration (ms)\", type=float, default=20.)\n parser.add_argument(\"-t\", \"--sim-time\", help=\"Simulation time (ms)\", type=int, default=500)\n parser.add_argument(\"--sim-name\", help=\"String to append at the end of the result files\", type=str, default=\"\")\n parser.add_argument(\"--plot-response-stats\", help=\"Flag to plot the stimulation response statistics\",\n action=\"store_true\")\n parser.add_argument(\"-w\", \"--plot-window\", help=\"Flag to plot a specific window of data\", action=\"store_true\")\n parser.add_argument(\"--plot-window-duration\", help=\"Duration in ms of the window to plot\", type=float, default=150.)\n parser.add_argument(\"--non-blocking-plots\", help=\"Flag to use non-blocking plots\", action=\"store_true\")\n parser.add_argument(\"--results-folder\", help=\"Path to folder where the results are saved\", type=str, default=None)\n args = parser.parse_args()\n\n stim_parameters = StimParameters(\n frequency=args.stim_freq,\n pulse_width_ms=args.pulse_width / 1000.,\n amplitude_ma=args.stim_amp,\n bios=args.bios,\n burst_frequency=args.burst_frequency,\n burst_duration_ms=args.burst_duration,\n min_amplitude_ma=args.min_stim_amp,\n )\n\n fiber_parameters = FiberParameters(\n n_fibers=args.n_fibers,\n length_um=FIBERS_LENGTH_UM,\n mean_diameter_um=MEAN_DIAMETER_UM,\n std_diameter=STD_DIAMETER,\n propagation_delay_ms=PROPAGATION_DELAY_MS,\n fascicle_radius_um=FASCICLE_RADIUS_UM\n )\n\n simulation = AfferentStimulation(fiber_parameters, stim_parameters, args.sim_time)\n if args.results_folder is not None:\n simulation.set_results_folder(args.results_folder)\n simulation.run()\n if args.plot_response_stats:\n simulation.plot_stim_response_stats(args.sim_name, block=False)\n block = not args.non_blocking_plots\n simulation.plot(args.sim_name, block)\n if args.plot_window:\n start_from_stim_event_n = 3\n start_ms = AfferentStimulation.START_STIM_TIME_MS + start_from_stim_event_n * (1000./args.stim_freq) - 1\n simulation.plot(args.sim_name, block, window_ms=[start_ms, start_ms+args.plot_window_duration])\n simulation.save_results(args.sim_name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"FormentoEmanuele/BioS","sub_path":"run_afferent_stimulation.py","file_name":"run_afferent_stimulation.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"17070149531","text":"\"\"\"update db\n\nRevision ID: 369123239fc7\nRevises: 220c46c7811d\nCreate Date: 2023-06-14 10:07:33.246474\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '369123239fc7'\ndown_revision = '220c46c7811d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('properties', sa.Column('createdAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('properties', sa.Column('updatedAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('rental_history', sa.Column('createdAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('rental_history', sa.Column('updatedAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('transaction_history', sa.Column('createdAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('transaction_history', sa.Column('updatedAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('users', sa.Column('createdAt', sa.DateTime(timezone=True), nullable=True))\n op.add_column('users', sa.Column('updatedAt', sa.DateTime(timezone=True), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'updatedAt')\n op.drop_column('users', 'createdAt')\n op.drop_column('transaction_history', 'updatedAt')\n op.drop_column('transaction_history', 'createdAt')\n op.drop_column('rental_history', 'updatedAt')\n op.drop_column('rental_history', 'createdAt')\n op.drop_column('properties', 'updatedAt')\n op.drop_column('properties', 'createdAt')\n # ### end Alembic commands ###\n","repo_name":"namhee1997/acmo-BE","sub_path":"migrations/versions/369123239fc7_update_db.py","file_name":"369123239fc7_update_db.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16661734960","text":"\"\"\"Арифметика с игральными костями,\nИгра с обучающими карточками на сложение, в которой нужно\n➥ суммировать все очки на выброшенных игральных костях\n\"\"\"\n\nimport random, time\n\n# Задаем константы:\nDICE_WIDTH = 9\nDICE_HEIGHT = 5\nCANVAS_WIDTH = 79\nCANVAS_HEIGHT = 24 - 3 # -3, чтобы было куда ввести сумму внизу\n\n# Длительность в секундах:\nQUIZ_DURATION = 30 # (!) Попробуйте заменить это значение на 10 или 60.\nMIN_DICE = 2 # (!) Попробуйте заменить это значение на 1 или 5.\nMAX_DICE = 6 # (!) Попробуйте заменить это значение на 14.\n\n# (!) Попробуйте заменить эти значения на различные другие:\nREWARD = 4 # (!) Очки, полученные за правильные ответы.\nPENAltY = 1 # (!) Очки, отнятые за неправильные ответы.\n# (!) Попробуй��е задать отрицательное значение PENAltY, чтобы давать\n# очки за неправильные ответы!\n\n# Если все кости не помещаются на экране, программа зависает:\nassert MAX_DICE <= 14\nD1 = (['+-------+',\n '| |',\n '| O |',\n '| |',\n '+-------+'], 1)\n\nD2a = (['+-------+',\n '| O |',\n '| |',\n '| O |',\n '+-------+'], 2)\n\nD2b = (['+-------+',\n '| O |',\n '| |',\n '| O |',\n '+-------+'], 2)\n\nD3a = (['+-------+',\n '| O |',\n '| O |',\n '| O |',\n '+-------+'], 3)\n\nD3b = (['+-------+',\n '| O |',\n '| O |',\n '| O |',\n '+-------+'], 3)\n\nD4 = (['+-------+',\n '| O O |',\n '| |',\n '| O O |',\n '+-------+'], 4)\n\nD5 = (['+-------+',\n '| O O |',\n '| O |',\n '| O O |',\n '+-------+'], 5)\n\nD6a = (['+-------+',\n '| O O |',\n '| O O |',\n '| O O |',\n '+-------+'], 6)\n\nD6b = (['+-------+',\n '| O O O |',\n '| |',\n '| O O O |',\n '+-------+'], 6)\n\nALL_DICE = [D1, D2a, D2b, D3a, D3b, D4, D5, D6a, D6b]\n\nprint('''Dice Math, by Ruslan Sayfullin\n\nAdd up the sides of all the dice displayed on the screen. You have\n{} seconds to answer as many as possible. You get {} points for each\ncorrect answer and lose {} point for each incorrect answer.\n'''.format(QUIZ_DURATION, REWARD, PENAltY))\ninput('Press Enter to begin...')\n\n# Отслеживаем количество правильных и неправильных ответов:\ncorrectAnswers = 0\nincorrectAnswers = 0\nstartTime = time.time()\nwhile time.time() < startTime + QUIZ_DURATION: # Основной цикл игры.\n # Выбираем кость для отображения:\n sumAnswer = 0\n diceFaces = []\n for i in range(random.randint(MIN_DICE, MAX_DICE)):\n die = random.choice(ALL_DICE)\n # die[0] содержит список лицевых сторон костей в виде строк:\n diceFaces.append(die[0])\n # die[1] содержит количество точек на лицевой стороне в виде чисел:\n sumAnswer += die[1]\n\n # Содержит кортежи (x, y) с местоположением верхнего левого угла кости.\n topLeftDiceCorners = []\n\n # Определяем, где должна быть размещена кость:\n for i in range(len(diceFaces)):\n while True:\n # Находим случайное место на холсте для размещения кости:\n left = random.randint(0, CANVAS_WIDTH - 1 - DICE_WIDTH)\n top = random.randint(0, CANVAS_HEIGHT - 1 - DICE_HEIGHT)\n # Получаем координаты x, y всех четырех углов:\n # left\n # v\n #top > +-------+ ^\n # | O | |\n # | O | DICE_HEIGHT (5)\n # | O | |\n # +-------+ v\n # <------->\n # DICE_WIDTH (9)\n topLeftX = left\n topLeftY = top\n topRightX = left + DICE_WIDTH\n topRightY = top\n bottomLeftX = left\n bottomLeftY = top + DICE_HEIGHT\n bottomRightX = left + DICE_WIDTH\n bottomRightY = top + DICE_HEIGHT\n\n # Проверяем, не пересекается ли эта игральная кость с предыдущей.\n overlaps = False\n for prevDieLeft, prevDieTop in topLeftDiceCorners:\n prevDieRight = prevDieLeft + DICE_WIDTH\n prevDieBottom = prevDieTop + DICE_HEIGHT\n # Проверяем все углы этой кости, не входят ли они\n # в область, занимаемую предыдущей костью:\n for cornerX, cornerY in ((topLeftX, topLeftY),\n (topRightX, topRightY),\n (bottomLeftX, bottomLeftY),\n (bottomRightX, bottomRightY)):\n if (prevDieLeft <= cornerX < prevDieRight\n and prevDieTop <= cornerY < prevDieBottom):\n overlaps = True\n if not overlaps:\n # Если не пересекается, можем ее тут разместить:\n topLeftDiceCorners.append((left, top))\n break\n\n # Отрисовываем кость на холсте:\n\n # Ключи представляют собой кортежи (x, y) целочисленных значений,\n # значения �� символы на соответствующем месте холста:\n canvas = {}\n # Проходим в цикле по всем костям:\n for i, (dieLeft, dieTop) in enumerate(topLeftDiceCorners):\n # Проходим в цикле по всем символам лицевой стороны кости:\n dieFace = diceFaces[i]\n for dx in range(DICE_WIDTH):\n for dy in range(DICE_HEIGHT):\n # Копируем символ в соответствующее место холста:\n canvasX = dieLeft + dx\n canvasY = dieTop + dy\n # Обратите внимание, что в dieFace, списке строковых\n # значений, x и y поменяны местами:\n canvas[(canvasX, canvasY)] = dieFace[dy][dx]\n\n # Выводим холст на экран:\n for cy in range(CANVAS_HEIGHT):\n for cx in range(CANVAS_WIDTH):\n print(canvas.get((cx, cy), ' '), end='')\n print() # Выводим символ новой строки.\n\n # Даем игроку возможность ввести свой ответ:\n response = input('Enter the sum: ').strip()\n if response.isdecimal() and int(response) == sumAnswer:\n correctAnswers += 1\n else:\n print('Incorrect, the answer is', sumAnswer)\n time.sleep(2)\n incorrectAnswers += 1\n\n# Отображаем итоговый счет:\nscore = (correctAnswers * REWARD) - (incorrectAnswers * PENAltY)\nprint('Correct: ', correctAnswers)\nprint('Incorrect:', incorrectAnswers)\nprint('Score:', score)","repo_name":"RuslanSayfullin/small_python_projects","sub_path":"dicemath.py","file_name":"dicemath.py","file_ext":"py","file_size_in_byte":7780,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15126213026","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 30 20:48:45 2020\n\n@author: katewujciak\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nattractions = pd.read_csv(\"attractions.csv\")\natt = np.array(attractions)\nlocations = att[:,0]\natt_list = att[:,1]\nprint(locations)\n\n\nlocation = input(\"What would you like to travel? \")\n\ndef attract():\n index = np.where(locations == location)\n print(index)\n# if location in locations:\n# value_index = locations.index(location)\n# print(value_index)\nattract()\n\n \n\n\n#def readcsv(filename):\t\n# att = open(filename, \"rU\")\n# reader = csv.reader(att, delimiter=\";\")\n#\n# rownum = 0\t\n# a = []\n#\n# for row in reader:\n# a.append (row)\n# rownum += 1\n# \n# att.close()\n# print(a)\n# return a\n#readcsv(\"attractions.csv\")\n\n#attractions = np.loadtxt('attractions.csv', delimiter=',', skiprows=1, unpack=True)\n#\n#print(attractions)\n\n#import csv\n#\n#with open('attractions.csv', newline='') as attractions:\n# att = csv.reader(attractions, delimiter=' ', quotechar='|')\n# for row in att:\n# print(row['London'])","repo_name":"kwujciak/Final-Project-ES2","sub_path":"FP_attractions.py","file_name":"FP_attractions.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9493769914","text":"import unittest\nimport Obj\nimport os\n\n\nclass TestSelectCourseList(unittest.TestCase):\n \"\"\"测试选课类\"\"\"\n\n\n def setUp(self):\n pass\n\n def tearDown(self):\n \"\"\"清理文件\"\"\"\n path = Obj.SelectCourseList().path\n if os.path.exists(path):\n os.remove(path)\n\n def test_add(self):\n \"\"\"测试添加选课信息\"\"\"\n sc = Obj.SelectCourseList()\n sc.add('1', '2')\n self.assertEqual(len(sc.select), 1)\n self.assertEqual(sc.select[0]['studentID'], '1')\n self.assertEqual(sc.select[0]['courseID'], '2')\n\n def test_remove(self):\n \"\"\"测试删除选课信息\"\"\"\n sc = Obj.SelectCourseList()\n sc.add('1', '2')\n sc.remove('1', '2')\n self.assertEqual(len(sc.select), 0)\n\n def test_query(self):\n \"\"\"测试查询选课信息\"\"\"\n sc = Obj.SelectCourseList()\n sc.add('1', '2')\n sc.add('1', '3')\n sc.add('2', '2')\n self.assertEqual(len(sc.query('1')), 2)\n self.assertEqual(len(sc.query('2')), 1)\n self.assertEqual(len(sc.query('')), 3)\n\n def test_removeByStudentID(self):\n \"\"\"测试删除选课信息\"\"\"\n sc = Obj.SelectCourseList()\n sc.add('1', '2')\n sc.add('1', '3')\n sc.add('2', '2')\n sc.removeByStudentID('1')\n self.assertEqual(len(sc.select), 1)\n self.assertEqual(sc.select[0]['studentID'], '2')\n self.assertEqual(sc.select[0]['courseID'], '2')\n\n def test_removeByCourseID(self):\n \"\"\"测试删除选课信息\"\"\"\n sc = Obj.SelectCourseList()\n sc.add('1', '2')\n sc.add('1', '3')\n sc.add('2', '2')\n sc.removeByCourseID('2')\n self.assertEqual(len(sc.select), 1)\n self.assertEqual(sc.select[0]['studentID'], '1')\n self.assertEqual(sc.select[0]['courseID'], '3')\n\n def test_write(self):\n \"\"\"测试写入文件\"\"\"\n sc = Obj.SelectCourseList()\n sc.add('1', '2')\n sc.write()\n self.assertTrue(os.path.exists(sc.path))\n\n\n# 运行测试\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Karenina-na/Basic-Cognition-and-Practice-of-Information-Technology","sub_path":"lab3/test/TestSCList.py","file_name":"TestSCList.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10862417830","text":"from __future__ import print_function\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True, help = \"Path to the image\")\nargs = vars(ap.parse_args())\n\nimage = cv2.imread(args[\"image\"])\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# get blurred, thresh, and threshInv\n# for the 3ird argument, by supplying cv2.ADAPTIVE_THRESH_MEAN_C, we indicate that we want to compute the mean of the neighborhood of pixels and threat it as our T value\n# for the 5th argument, 1& means that we examine 11x11 pixel regions of the image, instead of trying to threshold the image globally\n# the 6th argument is for fine-tune our thresholding\nblurred = cv2.GaussianBlur(image, (5,5), 0)\nthresh_mean1 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11,4)\nthresh_mean2 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11,3)\nthresh_mean3 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 15,4)\nthresh_mean4 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 15,3)\nthresh_gaussian1 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11,4)\nthresh_gaussian2 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11,3)\nthresh_gaussian3 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15,4)\nthresh_gaussian4 = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15,3)\n\nthresh_mean = np.hstack([blurred, thresh_mean1, thresh_mean2, thresh_mean3, thresh_mean4])\nthresh_gaussian = np.hstack([blurred, thresh_gaussian1, thresh_gaussian2, thresh_gaussian3, thresh_gaussian4])\ncv2.imshow(\"adaptivetThreshold_mean\", thresh_mean)\ncv2.imshow(\"adaptivetThreshold_gaussian\", thresh_gaussian)\n\ncv2.waitKey(0)\n\n# #show histogram\n# hist = cv2.calcHist([image], [0], None, [256], [0, 256])\n# plt.figure()\n# plt.title(\"Color Histogram\")\n# plt.xlabel(\"Bins\")\n# plt.ylabel(\"# of Pixels\")\n# plt.plot(hist)\n# plt.xlim([0,256])\n# plt.show()","repo_name":"fangyansun/PracticalPython_OpenCV","sub_path":"adaptive_thresholding.py","file_name":"adaptive_thresholding.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1646722492","text":"from veroviz._common import *\nfrom veroviz._internal import distributeTimeDist\nfrom veroviz._internal import loc2Dict\nfrom veroviz._internal import locs2Dict\n\ndef osrmGetSnapToRoadLatLon(loc):\n\t\"\"\"\n\tA function to get snapped latlng for one coordinate using OSRM\n\n\tParameters\n\t----------\n\tloc: list\n\t\tThe location to be snapped to road\n\n\tReturns\n\t-------\n\tlist\n\t\tA snapped locations in the format of [lat, lon], notice that this function will lost the info of altitude of the location.\n\t\"\"\"\n\n\tdicLoc = loc2Dict(loc)\n\tsnapToRoadUrl = ('http://router.project-osrm.org/nearest/v1/driving/%s,%s') % (dicLoc['lon'], dicLoc['lat']) # OSRM use lon/lat\n\tdata = []\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', snapToRoadUrl)\n\t\tdata = json.loads(response.data.decode('utf-8'))\n\n\t\tsnapLoc = [data['waypoints'][0]['location'][1], data['waypoints'][0]['location'][0]] # OSRM use lon/lat\n\texcept:\n\t\tprint (\"Message: OSRM is currently not available, please try again later.\")\n\n\treturn snapLoc\n\ndef osrmGetShapepointsTimeDist(startLoc, endLoc):\n\t\"\"\"\n\tA function to get a list of shapepoints from start coordinate to end coordinate, the result of this function is not as detailed as mpqGetShapepointTimeDist, however, it is faster.\n\n\tParameters\n\t----------\n\tstartLoc: list\n\t\tStart location, the format is [lat, lon] (altitude, above sea level, set to be 0) or [lat, lon, alt]\n\tendLoc: list\n\t\tEnd location, the format is [lat, lon] (altitude, above sea level, set to be 0) or [lat, lon, alt]\n\n\tReturns\n\t-------\n\tpath: list of lists\n\t\tA list of coordinates in sequence that shape the route from startLoc to endLoc\n\ttimeInSeconds: double\n\t\ttime between current shapepoint and previous shapepoint, the first element should be 0\n\tdistInMeters: double\n\t\tdistance between current shapepoint and previous shapepoint, the first element should be 0\n\t\"\"\"\n\n\tdicStartLoc = loc2Dict(startLoc)\n\tdicEndLoc = loc2Dict(endLoc)\n\tshapepointsUrl = ('http://router.project-osrm.org/route/v1/driving/%s,%s;%s,%s?steps=true') % (dicStartLoc['lon'], dicStartLoc['lat'], dicEndLoc['lon'], dicEndLoc['lat']) # OSRM use lon/lat\n\tdata = []\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', shapepointsUrl)\n\t\tdata = json.loads(response.data.decode('utf-8'))\n\n\t\tpath = []\n\t\tfor i in range(len(data['routes'])):\n\t\t\tfor j in range(len(data['routes'][i]['legs'])):\n\t\t\t\tfor k in range(len(data['routes'][i]['legs'][j]['steps'])):\n\t\t\t\t\tfor m in range(len(data['routes'][i]['legs'][j]['steps'][k]['intersections'])):\n\t\t\t\t\t\tpath.append([data['routes'][i]['legs'][j]['steps'][k]['intersections'][m]['location'][1], data['routes'][i]['legs'][j]['steps'][k]['intersections'][m]['location'][0]])\n\t\ttotalTimeInSecond = data['routes'][0]['duration']\n\t\t[timeInSeconds, distInMeters] = distributeTimeDist(path, totalTimeInSecond)\n\texcept:\n\t\tprint (\"Message: OSRM is currently not available, please try again later.\")\n\n\treturn [path, timeInSeconds, distInMeters]\n\ndef osrmGetTimeDistOnePair(startLoc, endLoc):\n\t\"\"\"\n\tA function to get a total time and total distance between two given coordinates\n\n\tParameters\n\t----------\n\tstartLoc: list\n\t\tStart location, the format is [lat, lon] (altitude, above sea level, set to be 0) or [lat, lon, alt]\n\tendLoc: list\n\t\tEnd location, the format is [lat, lon] (altitude, above sea level, set to be 0) or [lat, lon, alt]\n\n\tReturns\n\t-------\n\ttimeSeconds: double\n\t\ttime between current shapepoint and previous shapepoint, the first element should be 0\n\tdistMeters: double\n\t\tdistance between current shapepoint and previous shapepoint, the first element should be 0\n\t\"\"\"\n\n\tdicStartLoc = loc2Dict(startLoc)\n\tdicEndLoc = loc2Dict(endLoc)\n\ttimeDistUrl = ('http://router.project-osrm.org/route/v1/driving/%s,%s;%s,%s') % (dicStartLoc['lon'], dicStartLoc['lat'], dicEndLoc['lon'], dicEndLoc['lat']) # OSRM use lon/lat\n\tdata = []\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', timeDistUrl)\n\t\tdata = json.loads(response.data.decode('utf-8'))\n\n\t\ttimeSeconds = data['routes'][0]['duration']\n\t\tdistMeters = data['routes'][0]['distance']\n\texcept:\n\t\tprint (\"Message: OSRM is currently not available, please try again later.\")\n\n\treturn [timeSeconds, distMeters]\n\ndef osrmGetTimeDist(fromLocs, toLocs):\n\t\"\"\"\n\tA function to get distance and time matrices between a list of starting coordinates and a list of ending coordinates\n\n\tParameters\n\t----------\n\tfromLocs: list of lists\n\t\tA list of starting coordinates, the format is [[lat1, lon1], [lat2, lon2], ...]\n\ttoLocs: list of lists\n\t\tA list of ending coordinates, the format is [[lat1, lon1], [lat2, lon2], ...]\n\n\tReturns\n\t-------\n\ttimeInSeconds: double\n\t\ttime between current shapepoint and previous shapepoint, the first element should be 0\n\tdistInMeters: double\n\t\tdistance between current shapepoint and previous shapepoint, the first element should be 0\n\t\"\"\"\n\ttimeSeconds = {}\n\tdistMeters = {}\n\tfor i in range(len(fromLocs)):\n\t\tfor j in range(len(toLocs)):\n\t\t\t[time, dist] = osrmGetTimeDistOnePair(fromLocs[i], toLocs[j])\n\t\t\ttimeSeconds[i, j] = time\n\t\t\tdistMeters[i, j] = dist\n\n\treturn [timeSeconds, distMeters]","repo_name":"optimatorlab/veroviz","sub_path":"veroviz/_queryOSRM.py","file_name":"_queryOSRM.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"54"} +{"seq_id":"29975677395","text":"import abc\r\nimport os\r\nimport pygame\r\n\r\npygame.init()\r\npygame.font.init()\r\n\r\nRUNNING = True\r\n\r\n\r\ndef play(**kwargs):\r\n try:\r\n os.system('python3 main.py')\r\n except:\r\n ('хз что произошло')\r\n\r\n\r\nclass MenuElement(abc.ABC):\r\n def __init__(self, x, y, size, content, func_handler):\r\n self.w, self.h = size\r\n self._x, self._y = x, y\r\n self.content = content\r\n self.rect = pygame.Rect(self._x, self._y, self.w, self.h)\r\n self.func_handler = func_handler\r\n\r\n @property\r\n @abc.abstractmethod\r\n def loc(self):\r\n pass\r\n\r\n @abc.abstractmethod\r\n def draw(self, screen):\r\n pass\r\n\r\n def update(self, **kwargs):\r\n if self.func_handler is not None:\r\n self.func_handler(**kwargs)\r\n\r\n\r\nclass Button(MenuElement):\r\n def __init__(self, size, content, func_handler, font_weight):\r\n super().__init__(0, 0, size, content, func_handler)\r\n\r\n self.highlight = False\r\n self.font_weight = font_weight\r\n self.font = pygame.font.SysFont('Comic Sans MS', font_weight)\r\n\r\n @property\r\n def loc(self):\r\n return (self._x, self._y)\r\n\r\n @loc.setter\r\n def loc(self, value):\r\n self._x = value[0]\r\n self._y = value[1]\r\n\r\n self.rect.center = (self._x, self._y)\r\n\r\n def draw(self, screen):\r\n content = self.font.render(self.content, False, (255, 255, 255))\r\n content_rect = content.get_rect(center=(self._x, self._y))\r\n\r\n pygame.draw.rect(screen, (100, 100, 100), self.rect, 1 if not self.highlight else 0, 4)\r\n screen.blit(content, content_rect)\r\n\r\n\r\nclass Menu:\r\n def __init__(self, wnd_size, elem_size):\r\n self.wnd_width, self.wnd_height = wnd_size\r\n self.elem_width, self.elem_height = elem_size\r\n self.elements = []\r\n self.selected = -1\r\n self.hover = -1\r\n self.checked = False\r\n\r\n def __update_elements(self):\r\n step = (self.wnd_height - self.elem_height * len(self.elements)) // (len(self.elements) + 1)\r\n top = step\r\n\r\n for elem in self.elements:\r\n elem.loc = (self.wnd_width // 2, top + self.elem_height // 2)\r\n top += (step + self.elem_height)\r\n\r\n def add_elem(self, elem):\r\n self.elements.append(elem)\r\n self.__update_elements()\r\n\r\n def draw(self, screen):\r\n for elem in self.elements:\r\n elem.draw(screen)\r\n\r\n def update(self):\r\n for i, elem in enumerate(self.elements):\r\n elem.highlight = False\r\n if elem.rect.collidepoint(pygame.mouse.get_pos()):\r\n elem.highlight = True\r\n self.hover = i\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n self.selected = self.hover\r\n\r\n if self.selected != -1:\r\n self.elements[self.selected].update()\r\n self.selected = -1\r\n\r\n self.hover = -1\r\n\r\n\r\ndef close(**kwargs):\r\n global RUNNING\r\n RUNNING = False\r\n\r\n\r\nif __name__ == '__main__':\r\n screen = pygame.display.set_mode((800, 600))\r\n wnd_size = (800, 600)\r\n elem_size = (200, 50)\r\n\r\n menu = Menu(wnd_size, elem_size)\r\n button = Button(elem_size, \"Play\", play, 32)\r\n button1 = Button(elem_size, \"Liderboard\", None, 32)\r\n button2 = Button(elem_size, \"Exit\", close, 32)\r\n menu.add_elem(button)\r\n menu.add_elem(button1)\r\n menu.add_elem(button2)\r\n\r\n clock = pygame.time.Clock()\r\n while RUNNING:\r\n screen.fill((0, 0, 0))\r\n menu.update()\r\n menu.draw(screen)\r\n pygame.display.flip()\r\n clock.tick(30)\r\n","repo_name":"forgooo/the_best_pacman","sub_path":"test_menu2.py","file_name":"test_menu2.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22215777900","text":"while True:\n n = int(input())\n if n == 0: break\n n -= 1\n values = list(bin(n)[2:])\n values.reverse()\n \n nums = []\n powerOfThree = 1\n \n for i,value in enumerate(values):\n if value == \"1\": nums.append(str(3 ** i))\n \n print(\"{ \" + \", \".join(nums) + \" }\")","repo_name":"hexaquarks/programming-challenges","sub_path":"Kattis/threepowers.py","file_name":"threepowers.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37624358833","text":"class StreamChecker(object):\n\n def __init__(self, words):\n \"\"\"\n :type words: List[str]\n \"\"\"\n self.root = {}\n for word in words:\n self.insert(word[::-1])\n self.history = []\n\n def insert(self, word):\n \"\"\"\n Inserts a word into the trie.\n :type word: str\n :rtype: None\n \"\"\"\n node = self.root\n for c in word:\n if c not in node.keys():\n node[c] = {}\n node = node[c]\n node['is_word'] = True\n\n def query(self, letter):\n \"\"\"\n :type letter: str\n :rtype: bool\n \"\"\"\n node = self.root\n self.history.append(letter)\n i = len(self.history)-1\n while i >= 0:\n c = self.history[i]\n if c in node.keys():\n node = node[c]\n if 'is_word' in node.keys():\n return True\n i -= 1\n else:\n return False\n return False\n\n# Your StreamChecker object will be instantiated and called as such:\n# obj = StreamChecker(words)\n# param_1 = obj.query(letter)\n","repo_name":"qianlongzju/Leet_Code","sub_path":"Algorithms/py/1032.StreamOfCharacters.py","file_name":"1032.StreamOfCharacters.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18579772216","text":"result = 0\nwith open(\"input\", \"r\") as input:\n for line in input:\n half = int(len(line) / 2)\n left = sorted(line[:half])\n right = sorted(line[half:])\n currentLeft = left.pop(0)\n currentRight = right.pop(0)\n while True:\n if currentLeft < currentRight:\n currentLeft = left.pop(0)\n elif currentLeft > currentRight:\n currentRight = right.pop(0)\n else:\n break\n prio = ord(currentLeft)\n if prio > 96:\n prio -= 96\n else:\n prio -= 38\n result += prio\nprint(result)","repo_name":"Meidimax99/adventofcodecollection","sub_path":"3/CdrSonan/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"35481018003","text":"import dimidium.lib.singleton as dosa_singleton\nfrom dimidium.backend.codeGen.IlaDebugCore import IlaDebugCore\nfrom dimidium.backend.codeGen.WrapperInterfaces import WrapperInterface, InterfaceAxisFifo\n\n\nclass VhdlEntity:\n\n def __init__(self, template_file=None, use_debug=False):\n self.template_file = template_file\n # self.signal_decls = []\n self.comp_decls = {}\n self.network_adapter_inst = None\n # self.network_output_adapter_inst = None\n self.processing_comp_insts = {}\n self.next_proc_comp_cnt = 0\n self.add_tcl_valid = False\n self.add_tcl_lines = []\n self.lib_includes = {}\n self.debug_core = IlaDebugCore()\n self.inst_debug = use_debug\n\n def set_template(self, template_file):\n self.template_file = template_file\n\n def set_network_adapter(self, decl_lines, inst_template, if_types):\n self.comp_decls['network_adapter'] = decl_lines\n self.network_adapter_inst = {'inst_tmpl': inst_template, 'if_types': if_types}\n\n # def set_network_output_adapter(self, decl_lines, inst_template, if_types):\n # self.comp_decls['network_output_adapter'] = decl_lines\n # self.network_output_adapter_inst = {'inst_tmpl': inst_template, 'if_types': if_types}\n\n def add_comp_decls(self, name, decl_lines):\n self.comp_decls[name] = decl_lines\n\n # def add_signal_decls(self, decl_lines):\n # self.signal_decls.append(decl_lines)\n\n def add_lib_include(self, lib_name, use_lines: list):\n if lib_name in self.lib_includes:\n self.lib_includes[lib_name].extend(use_lines)\n else:\n self.lib_includes[lib_name] = use_lines\n\n def add_proc_comp_inst(self, arch_block, decl_lines, inst_template, input_if: WrapperInterface, output_if=None):\n if output_if is not None:\n assert isinstance(output_if, WrapperInterface)\n\n self.comp_decls[self.next_proc_comp_cnt] = decl_lines\n inst_name = 'proc_comp_{}'.format(self.next_proc_comp_cnt)\n self.processing_comp_insts[self.next_proc_comp_cnt] = {'inst_tmpl': inst_template, 'input_if': input_if,\n 'output_if': output_if, 'name': inst_name,\n 'arch_block': arch_block}\n self.next_proc_comp_cnt += 1\n\n def write_file(self, target_path, target_device):\n # check compatibility of input layer\n is_compatible = False\n for at in self.network_adapter_inst['if_types']:\n if isinstance(self.processing_comp_insts[0]['input_if'], at):\n is_compatible = True\n if not is_compatible:\n print(\"[DOSA:VhdlGen:ERROR] Can't connect first processing component to network input adapter (\" +\n \"wrong interface type). STOP.\")\n exit(-1)\n # 1. generate what still needs to be generated\n output_if = None\n if self.processing_comp_insts[self.next_proc_comp_cnt - 1]['output_if'] is None:\n last_brick = self.processing_comp_insts[self.next_proc_comp_cnt - 1]['arch_block'].brick_list[-1]\n out_type = self.network_adapter_inst['if_types'][0] # just take the first one?\n output_if = out_type('output_node_end', last_brick.output_bw_Bs, target_device)\n else:\n output_if = self.processing_comp_insts[self.next_proc_comp_cnt - 1]['output_if']\n tcl_tmp, decl_tmp, inst_tmp = output_if.get_debug_lines()\n self.debug_core.add_new_probes(tcl_tmp, decl_tmp, inst_tmp)\n # get tcl from all interfaces --> no, is done by OSGs\n # for pci in self.processing_comp_insts.keys():\n # pc = self.processing_comp_insts[pci]\n # tcl_l = pc['input_if'].get_tcl_lines()\n # generate debug\n add_decl_lines = ''\n add_inst_lines = ''\n if self.inst_debug:\n add_decl_lines = self.debug_core.get_vhdl_decl()\n map_dict = {\n 'clk': 'piSHL_156_25Clk',\n # 'rst': 'piMMIO_Ly7_Rst',\n 'rst': 'sResetApps',\n 'rst_n': 'sResetApps_n',\n # 'enable': 'piMMIO_Ly7_En'\n 'enable': 'sEnableApps'\n }\n add_inst_lines = self.debug_core.get_vhdl_inst_tmpl().format_map(map_dict)\n # 2. add output_if tcl\n tcl_lines = output_if.get_tcl_lines()\n if self.inst_debug:\n tcl_lines += self.debug_core.get_tcl_lines()\n self.add_tcl_lines.append(tcl_lines)\n self.add_tcl_valid = True\n # 3. write vhdl, from top to bottom\n with open(self.template_file, 'r') as in_file, \\\n open(target_path, 'w') as out_file:\n for line in in_file.readlines():\n if 'DOSA_ADD_library_includes' in line:\n if len(self.lib_includes) > 0:\n outline = '-- DOSA generated library includes\\n'\n for lk in self.lib_includes:\n outline += 'library {};\\n'.format(lk)\n for ll in self.lib_includes[lk]:\n outline += ' use {}.{};\\n'.format(lk, ll)\n outline += '\\n'\n else:\n outline = '-- no further DOSA libraries\\n'\n elif 'DOSA_ADD_version_string' in line:\n outline = '--** This VHDL file was generated by DOSA version {} **\\n'\\\n .format(dosa_singleton.config.git_version)\n elif 'DOSA_ADD_decl_lines' in line:\n outline = ' -- DOSA generated interface declarations\\n'\n for pci in self.processing_comp_insts.keys():\n pc = self.processing_comp_insts[pci]\n sig_decl = pc['input_if'].get_vhdl_signal_declaration()\n outline += '\\n' + sig_decl\n if_decl = pc['input_if'].get_vhdl_entity_declaration()\n outline += '\\n' + if_decl\n outline += '\\n' + output_if.get_vhdl_signal_declaration()\n outline += '\\n' + output_if.get_vhdl_entity_declaration()\n outline += '\\n\\n -- DOSA generated processing components declarations\\n'\n for dk in self.comp_decls.keys():\n comp_decl = self.comp_decls[dk]\n outline += '\\n' + comp_decl\n outline += '\\n'\n outline += add_decl_lines\n elif 'DOSA_ADD_inst_lines' in line:\n outline = ' -- DOSA generated instantiations\\n'\n outline += '\\n -- Instantiate network adapter\\n'\n inst_tmpl = self.network_adapter_inst['inst_tmpl']\n next_signals = self.processing_comp_insts[0]['input_if'].get_vhdl_signal_dict()\n assert isinstance(self.processing_comp_insts[0]['input_if'],\n InterfaceAxisFifo) # TODO: make dynamic\n last_signals = output_if.get_vhdl_signal_dict()\n assert isinstance(output_if, InterfaceAxisFifo) # TODO: make also dynamic\n in_map_dict = {'in_sig_0': last_signals['from_signals']['0'],\n 'in_sig_1_n': last_signals['from_signals']['1_n'],\n 'in_sig_1': last_signals['from_signals']['1'],\n 'in_sig_2': last_signals['from_signals']['2'],\n 'in_sig_3': last_signals['from_signals']['3'],\n 'in_sig_4_n': last_signals['from_signals']['4_n'],\n 'in_sig_4': last_signals['from_signals']['4'],\n 'in_sig_5': last_signals['from_signals']['5'],\n 'in_sig_6': last_signals['from_signals']['6'],\n 'in_sig_7_n': last_signals['from_signals']['7_n'],\n 'in_sig_7': last_signals['from_signals']['7'],\n 'in_sig_8': last_signals['from_signals']['8'],\n 'out_sig_0': next_signals['to_signals']['0'],\n 'out_sig_1_n': next_signals['to_signals']['1_n'],\n 'out_sig_1': next_signals['to_signals']['1'],\n 'out_sig_2': next_signals['to_signals']['2'],\n 'out_sig_3': next_signals['to_signals']['3'],\n 'out_sig_4_n': next_signals['to_signals']['4_n'],\n 'out_sig_4': next_signals['to_signals']['4'],\n 'out_sig_5': next_signals['to_signals']['5'],\n 'out_sig_6': next_signals['to_signals']['6'],\n 'out_sig_7_n': next_signals['to_signals']['7_n'],\n 'out_sig_7': next_signals['to_signals']['7'],\n 'out_sig_8': next_signals['to_signals']['8'],\n 'inst_name': 'DosaNetworkAdapter',\n 'clk': 'piSHL_156_25Clk',\n # 'rst': 'piMMIO_Ly7_Rst',\n 'rst': 'sResetApps',\n 'rst_n': 'sResetApps_n',\n # 'enable': 'piMMIO_Ly7_En'\n 'enable': 'sEnableApps'\n }\n # other signals are static for network adapter\n new_inst = inst_tmpl.format_map(in_map_dict)\n outline += '\\n' + new_inst\n for pci in self.processing_comp_insts.keys():\n pc = self.processing_comp_insts[pci]\n assert isinstance(pc['input_if'], InterfaceAxisFifo) # TODO: make dynamic\n if pci + 1 >= self.next_proc_comp_cnt:\n next_if = output_if\n else:\n next_if = self.processing_comp_insts[pci + 1]['input_if']\n outline += '\\n -- Instantiate processing component {}\\n'.format(pci)\n our_signals = pc['input_if'].get_vhdl_signal_dict()\n # first, instantiate interface (if necessary)\n inst_tmpl = pc['input_if'].get_vhdl_entity_inst_tmpl()\n map_dict = {'in_sig_0': our_signals['to_signals']['0'],\n 'in_sig_1_n': our_signals['to_signals']['1_n'],\n 'in_sig_1': our_signals['to_signals']['1'],\n 'in_sig_2': our_signals['to_signals']['2'],\n 'out_sig_0': our_signals['from_signals']['0'],\n 'out_sig_1_n': our_signals['from_signals']['1_n'],\n 'out_sig_1': our_signals['from_signals']['1'],\n 'out_sig_2': our_signals['from_signals']['2'],\n 'in_sig_3': our_signals['to_signals']['3'],\n 'in_sig_4_n': our_signals['to_signals']['4_n'],\n 'in_sig_4': our_signals['to_signals']['4'],\n 'in_sig_5': our_signals['to_signals']['5'],\n 'out_sig_3': our_signals['from_signals']['3'],\n 'out_sig_4_n': our_signals['from_signals']['4_n'],\n 'out_sig_4': our_signals['from_signals']['4'],\n 'out_sig_5': our_signals['from_signals']['5'],\n 'in_sig_6': our_signals['to_signals']['6'],\n 'in_sig_7_n': our_signals['to_signals']['7_n'],\n 'in_sig_7': our_signals['to_signals']['7'],\n 'in_sig_8': our_signals['to_signals']['8'],\n 'out_sig_6': our_signals['from_signals']['6'],\n 'out_sig_7_n': our_signals['from_signals']['7_n'],\n 'out_sig_7': our_signals['from_signals']['7'],\n 'out_sig_8': our_signals['from_signals']['8'],\n 'inst_name': pc['name'] + '_input_if',\n 'clk': 'piSHL_156_25Clk',\n # 'rst': 'piMMIO_Ly7_Rst',\n 'rst': 'sResetApps',\n 'rst_n': 'sResetApps_n',\n # 'enable': 'piMMIO_Ly7_En'\n 'enable': 'sEnableApps'\n }\n new_inst = inst_tmpl.format_map(map_dict)\n outline += '\\n' + new_inst\n # next, instantiate processing component\n next_signals = next_if.get_vhdl_signal_dict()\n map_dict = {'in_sig_0': our_signals['from_signals']['0'],\n 'in_sig_1_n': our_signals['from_signals']['1_n'],\n 'in_sig_1': our_signals['from_signals']['1'],\n 'in_sig_2': our_signals['from_signals']['2'],\n 'out_sig_0': next_signals['to_signals']['0'],\n 'out_sig_1_n': next_signals['to_signals']['1_n'],\n 'out_sig_1': next_signals['to_signals']['1'],\n 'out_sig_2': next_signals['to_signals']['2'],\n 'in_sig_3': our_signals['from_signals']['3'],\n 'in_sig_4_n': our_signals['from_signals']['4_n'],\n 'in_sig_4': our_signals['from_signals']['4'],\n 'in_sig_5': our_signals['from_signals']['5'],\n 'out_sig_3': next_signals['to_signals']['3'],\n 'out_sig_4_n': next_signals['to_signals']['4_n'],\n 'out_sig_4': next_signals['to_signals']['4'],\n 'out_sig_5': next_signals['to_signals']['5'],\n 'in_sig_6': our_signals['from_signals']['6'],\n 'in_sig_7_n': our_signals['from_signals']['7_n'],\n 'in_sig_7': our_signals['from_signals']['7'],\n 'in_sig_8': our_signals['from_signals']['8'],\n 'out_sig_6': next_signals['to_signals']['6'],\n 'out_sig_7_n': next_signals['to_signals']['7_n'],\n 'out_sig_7': next_signals['to_signals']['7'],\n 'out_sig_8': next_signals['to_signals']['8'],\n 'inst_name': pc['name'],\n 'clk': 'piSHL_156_25Clk',\n # 'rst': 'piMMIO_Ly7_Rst',\n 'rst': 'sResetApps',\n 'rst_n': 'sResetApps_n',\n # 'enable': 'piMMIO_Ly7_En'\n 'enable': 'sEnableApps'\n }\n inst_tmpl = pc['inst_tmpl']\n new_inst = inst_tmpl.format_map(map_dict)\n outline += '\\n' + new_inst\n # instantiate output interface (if necessary)\n our_signals = output_if.get_vhdl_signal_dict()\n inst_tmpl = output_if.get_vhdl_entity_inst_tmpl()\n map_dict = {'in_sig_0': our_signals['to_signals']['0'],\n 'in_sig_1_n': our_signals['to_signals']['1_n'],\n 'in_sig_1': our_signals['to_signals']['1'],\n 'in_sig_2': our_signals['to_signals']['2'],\n 'out_sig_0': our_signals['from_signals']['0'],\n 'out_sig_1_n': our_signals['from_signals']['1_n'],\n 'out_sig_1': our_signals['from_signals']['1'],\n 'out_sig_2': our_signals['from_signals']['2'],\n 'in_sig_3': our_signals['to_signals']['3'],\n 'in_sig_4_n': our_signals['to_signals']['4_n'],\n 'in_sig_4': our_signals['to_signals']['4'],\n 'in_sig_5': our_signals['to_signals']['5'],\n 'out_sig_3': our_signals['from_signals']['3'],\n 'out_sig_4_n': our_signals['from_signals']['4_n'],\n 'out_sig_4': our_signals['from_signals']['4'],\n 'out_sig_5': our_signals['from_signals']['5'],\n 'in_sig_6': our_signals['to_signals']['6'],\n 'in_sig_7_n': our_signals['to_signals']['7_n'],\n 'in_sig_7': our_signals['to_signals']['7'],\n 'in_sig_8': our_signals['to_signals']['8'],\n 'out_sig_6': our_signals['from_signals']['6'],\n 'out_sig_7_n': our_signals['from_signals']['7_n'],\n 'out_sig_7': our_signals['from_signals']['7'],\n 'out_sig_8': our_signals['from_signals']['8'],\n 'inst_name': pc['name'] + '_output_if',\n 'clk': 'piSHL_156_25Clk',\n # 'rst': 'piMMIO_Ly7_Rst',\n 'rst': 'sResetApps',\n 'rst_n': 'sResetApps_n',\n # 'enable': 'piMMIO_Ly7_En'\n 'enable': 'sEnableApps'\n }\n new_inst = inst_tmpl.format_map(map_dict)\n outline += '\\n' + new_inst\n outline += '\\n'\n outline += add_inst_lines\n else:\n outline = line\n out_file.write(outline)\n return 0\n\n def get_add_tcl_lines(self):\n if not self.add_tcl_valid:\n print(\"[DOSA:VhdlGen:ERROR] The data from this call is not yet valid (add_tcl_lines of VhdlEntity). STOP.\")\n exit(1)\n return self.add_tcl_lines\n","repo_name":"cloudFPGA/DOSA","sub_path":"dimidium/backend/codeGen/VhdlEntity.py","file_name":"VhdlEntity.py","file_ext":"py","file_size_in_byte":19151,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"17642229145","text":"from tastypie.resources import ModelResource, ALL\nfrom tastypie.authorization import Authorization, DjangoAuthorization\nfrom tastypie.authentication import SessionAuthentication\nfrom tastypie import fields\n\nfrom avatar.templatetags.avatar_tags import avatar\n\nfrom .Auth import UserObjectsOnlyAuthorization, UserDataOnlyAuthorization\n\nfrom twitter.models import Tweet, Tweeter\n\n\nclass TweetResource(ModelResource):\n #tweeter_id = fields.IntegerField(readonly=True)\n class Meta:\n queryset = Tweet.objects.order_by('-id')\n resource_name = 'tweet'\n always_return_data = True\n\n authentication = SessionAuthentication()\n #authorization = Authorization()\n authorization = UserObjectsOnlyAuthorization()\n\n def hydrate(self, bundle):\n \t#bundle.obj.id = max(Tweet.objects.all(),key = lambda x : x.id).id+1\n \tbundle.obj.tweeter_id = bundle.request.user.id\n \treturn bundle\n\n def dehydrate(self, bundle):\n # Include the request IP in the bundle.\n bundle.data['editable'] = (bundle.obj.tweeter_id == bundle.request.user.id)\n bundle.data['tweeter_id'] = bundle.obj.tweeter_id\n if bundle.obj.tweeter.get_full_name() :\n bundle.data['name'] = bundle.obj.tweeter.get_full_name()\n bundle.data['ident'] = \"full_name\"\n else :\n bundle.data['name'] = bundle.obj.tweeter.username\n bundle.data['ident'] = \"username\"\n bundle.data['avatar_url'] = avatar(bundle.obj.tweeter,50)\n return bundle\n\nclass TweeterResource(ModelResource):\n class Meta:\n queryset = Tweeter.objects.all()\n resource_name = 'tweeter'\n always_return_data = True\n \n authentication = SessionAuthentication()\n authorization = UserDataOnlyAuthorization()\n","repo_name":"emadshaaban92/Fake-Twitter","sub_path":"apirest/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11413133393","text":"from time import time, sleep\n\nfrom zmqmw.implementations.notifier.publisher.PublisherNotifierStrategy import PublisherNotifierStrategy\nfrom zmqmw.implementations.proxy.publisher.PublisherProxyStrategy import PublisherProxyStrategy\nfrom zmqmw.middleware.BrokerInfo import BrokerInfo\nfrom zmqmw.middleware.PublisherInfo import PublisherInfo\nfrom zmqmw.middleware.adapter.PublisherClient import PublisherClient\n\n\n# we create an OPTIONAL logger, for demonstration purposes\nclass LocalLogger:\n def log(self, val: str):\n print(val)\n\n\n# notify the code where our broker lives...\nbroker = BrokerInfo(broker_address=\"127.0.0.1\", broker_sub_port=6000)\n\npub_info = PublisherInfo(publisher_address='127.0.0.1', publisher_port=7000)\n\n# select the strategy under which our broker is running (e.g. proxy or notifier)\nstrategy = PublisherNotifierStrategy(broker_info=broker, publisher_info=pub_info, logger=LocalLogger())\n\n# create a publisher for the broker...\npublisher = PublisherClient(strategy=strategy)\n\n# register topics we want to publish for...\npublisher.register(topics=['timer'])\n\ntry:\n for i in range(100):\n # publish!\n publisher.publish(topic='timer', val=str(time()))\n sleep(1)\nexcept KeyboardInterrupt:\n publisher.close()\n","repo_name":"cmelende/CS6381_Project","sub_path":"testing_api/basic_publisher_notifier.py","file_name":"basic_publisher_notifier.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13674226405","text":"from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n#sample inventory data (for testing purposes)\nproducts = [\n {'id': 'W1000',\n 'title': 'lip balm',\n 'price': 3.99,\n 'inventory_count': 100},\n {'id': 'W1001',\n 'title': 'mittens',\n 'price': 8.99,\n 'inventory_count': 50},\n {'id': 'W1002',\n 'title': 'scarf',\n 'price': 30.00,\n 'inventory_count': 20},\n {'id': 'F1000',\n 'title': 'ultraboost sneakers',\n 'price': 170.00,\n 'inventory_count': 0}\n]\n\n@app.route('/') #route() binds a URL to a function\ndef home():\n return \"Welcome to Christina's Marketplace!\"\n\n#query all products\n@app.route('/inventory/all', methods=['GET'])\ndef all_products():\n return jsonify(products)\n\n#query available products\n@app.route('/inventory/available', methods=['GET'])\ndef available_products():\n result = []\n for prod in products:\n if prod['inventory_count'] > 0:\n result.append(prod)\n return jsonify(result)\n\n@app.route('/inventory', methods=['GET'])\ndef purchase():\n if 'id' in request.args:\n id = request.args['id']\n else:\n return \"Error: No id field provided. Please specify a product id.\"\n \n if 'quantity' in request.args:\n quantity = int(request.args['quantity'])\n else:\n return \"Error: No quantity field provided. Please specify a product purchase quantity.\"\n \n for prod in products:\n if prod['id'] == id and prod['inventory_count'] > 0:\n quantity_purchased = min(prod['inventory_count'],quantity)\n prod['inventory_count'] -= quantity_purchased\n return \"PURCHASED Item: {}, Quantity: {}\".format(prod['title'],quantity_purchased)\n return \"Sorry, item id: {} is currently sold out.\".format(id)\n\nif __name__ == '__main__':\n app.debug = False\n app.run()","repo_name":"liuchristin4/flask_marketplace_api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13924557740","text":"# Beautiful_soup doc : https://www.crummy.com/software/BeautifulSoup/bs4/doc/\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport lxml\r\n\r\nwith open('beautiful_soup/website.html', 'rt', encoding='UTF8') as file:\r\n contents = file.read()\r\n\r\nsoup = BeautifulSoup(contents, 'lxml')\r\n\r\nall_anchor_tags = soup.find_all(name='a')\r\nfor tag in all_anchor_tags:\r\n print(tag.getText())\r\n print(tag.get('href'))\r\n print()\r\n\r\nheading = soup.find(name='h1', id='name')\r\nprint(heading.getText())\r\n\r\nsection_heading = soup.find(name='h3', class_='heading')\r\nprint(section_heading.getText())\r\n\r\nname = soup.select_one(selector='#name')\r\nprint(name)\r\n\r\nheadings = soup.select('.heading')\r\nprint(headings)","repo_name":"pokavv/beautiful_soup","sub_path":"beautiful_soup/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41282269377","text":"\"\"\"EducationPlatform URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nimport xadmin, students, homework, teachers\nfrom students.views import StuRegisterView, StuLoginView, StuRegisterInfoView, logout_stu\nfrom teachers.views import TchRegisterView, TchLoginView, TchRegisterInfoView, logout_tch\nfrom homework.views import PublishView, SubmitView, ViewTheDegreeOfCompletionView\n\nurlpatterns = [\n url(r'^xadmin/', xadmin.site.urls),\n\n url(r'^homepage_stu/$' ,students.views.homepage_stu, name='homepage_stu'),\n url(r'^register_stu/$', StuRegisterView.as_view(), name=\"register_stu\"),\n url(r'^register_info_stu/$', StuRegisterView.as_view(), name=\"register_info_stu\"),\n url(r'^submit_register_info_stu/$', StuRegisterInfoView.as_view(), name=\"submit_register_info_stu\"),\n url(r'^login_stu/$', StuLoginView.as_view(), name='login_stu'),\n url(r'^submit_login_info_stu/$', StuLoginView.as_view(), name='submit_login_info_stu'),\n url(r'^view_all_tasks/$', teachers.views.logout_tch, name='view_all_tasks'),\n url(r'^logout_stu/$', students.views.logout_stu),\n\n url(r'^homepage_tch/$', teachers.views.homepage_tch, name='homepage_tch'),\n url(r'^register_tch/$', TchRegisterView.as_view(), name=\"register_tch\"),\n url(r'^register_info_tch/$', TchRegisterInfoView.as_view(), name=\"register_info_tch\"),\n url(r'^submit_register_info_tch/$', TchRegisterInfoView.as_view(), name=\"submit_register_info_tch\"),\n url(r'^login_tch/$', TchLoginView.as_view(), name='login_tch'),\n url(r'^submit_login_info_tch/$', TchLoginView.as_view(), name='submit_login_info_tch'),\n url(r'^publish_tasks/$', PublishView.as_view(), name='publish_tasks'),\n url(r'^submit_course_info/$', PublishView.as_view(), name='submit_course_info'),\n url(r'^submit_module_info/$', homework.views.submit_module_info, name='submit_module_info'),\n url(r'^submit_knowledgebase_info/$', homework.views.submit_knowledgebase_info, name='submit_knowledgebase_info'),\n url(r'^view_the_degree_of_completion_tch/$', ViewTheDegreeOfCompletionView.as_view(), name='view_the_degree_of_completion_tch'),\n url(r'^view_detail_of_completion_tch/$', ViewTheDegreeOfCompletionView.as_view(), name='view_detail_of_completion_tch'),\n url(r'^logout_tch/$', teachers.views.logout_tch, name='logout_tch'),\n]\n\n","repo_name":"kechunlinbot/EducationPlatform","sub_path":"EducationPlatform/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9535282121","text":"from pervane import serve\r\n# from pervane import run as serve\r\nimport multiprocessing\r\nimport gunicorn.app.base\r\n\r\n\r\ndef number_of_workers():\r\n return (multiprocessing.cpu_count() * 2) + 1\r\n\r\n\r\nclass StandaloneApplication(gunicorn.app.base.BaseApplication):\r\n\r\n def __init__(self, app, options=None):\r\n self.options = options or {}\r\n self.application = app\r\n super().__init__()\r\n\r\n def load_config(self):\r\n config = {key: value for key, value in self.options.items()\r\n if key in self.cfg.settings and value is not None}\r\n for key, value in config.items():\r\n self.cfg.set(key.lower(), value)\r\n\r\n def load(self):\r\n return self.application\r\n\r\n\r\ndef main(as_module=False):\r\n options = {\r\n 'bind': '%s:%s' % (serve.args.host, serve.args.port),\r\n 'workers': number_of_workers(),\r\n }\r\n StandaloneApplication(serve.app, options).run()\r\n\r\n\r\nif __name__ == '__main__':\r\n cli_main()\r\n","repo_name":"hakanu/pervane","sub_path":"pervane/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"54"} +{"seq_id":"44844832622","text":"# import json\n# import numpy as np\n# import matplotlib as mpl\n# import matplotlib.pyplot as plt\n\n# fr = open('data/align/FLIR_val.json')\n# f = json.load(fr)\n\n# ann = f['annotations']\n# area = [a['area'] for a in ann]\n\n# plt.hist(area, bins=10, range=(0,10000))\n\n# plt.savefig('tmp.jpg')\na = [3, 0, 0, 2, 1, 0, 3, 1, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 0, 3, 2, 0, 0, 1, 2, 0, 0, 1, 3, 1, 3, 0, 2, 1, 2, 0, 2, 0, 2, 0, 1, 1, 3, 0, 1]\nl = 4+4+8+4\n\narch = dict(\n backbone_rgb=[\n a[:l]\n ],\n backbone_thermal=[\n a[l:2*l]\n ],\n head_rgb=[\n a[2*l:2*l+4]\n ],\n head_thermal=[\n a[2*l+4:]\n ]\n)\nprint(arch)","repo_name":"Eureka-JTX/rgbt_detection","sub_path":"size.py","file_name":"size.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"54527760","text":"from aiogram import types\nfrom aiogram.dispatcher.filters import Text\nfrom anabot.keyboards.inline.callback_datas import continue_callback, reaction_callback\nfrom anabot.keyboards.inline.reaction_buttons import create_reaction_keyboard\nfrom anabot.loader import dp, bot\nfrom anabot.loader import api\nfrom anabot.utils.db_api.models import News\n\n\n@dp.message_handler(Text(equals=[\"Дневной дайджест\"]), state=None)\nasync def send_news(message: types.Message):\n news = api.digest(message.from_user.id, 1)\n await message.answer(text=news[0].short_text,\n reply_markup=create_reaction_keyboard(news[0].url, 1, str(news[0].id)[:5]))\n await message.answer(text=news[1].short_text,\n reply_markup=create_reaction_keyboard(news[1].url, 2, str(news[1].id)[:5]))\n await message.answer(text=news[2].short_text,\n reply_markup=create_reaction_keyboard(news[2].url, 3, str(news[2].id)[:5], continue_button=True))\n print(\"Запрос дневного дайджеста\")\n\n\n@dp.callback_query_handler(reaction_callback.filter(reaction=[\"like\", \"dislike\"]))\nasync def save_reaction(call: types.CallbackQuery, callback_data: dict):\n await call.answer(cache_time=15)\n answer_reaction = 1 if callback_data[\"reaction\"] == \"like\" else 2\n # api.add_reaction(callback_data[\"news_id\"], call.from_user.id, answer_reaction)\n reaction_bool = True if callback_data[\"reaction\"] == \"like\" else False\n await call.message.edit_reply_markup(\n create_reaction_keyboard(\n callback_data[\"link\"],\n int(callback_data[\"article_number\"]),\n callback_data[\"news_id\"],\n reaction=reaction_bool,\n continue_button=bool(int(callback_data[\"continue_button\"])),\n )\n )\n print(callback_data)\n # save like/dislike\n\n\n@dp.callback_query_handler(continue_callback.filter(article_number=[\"1\", \"2\", \"3\"]))\nasync def continue_news(call: types.CallbackQuery, callback_data: dict):\n await call.answer(cache_time=15)\n if callback_data[\"reaction\"] == \"None\":\n reaction = None\n elif callback_data[\"reaction\"] == \"True\":\n reaction = True\n else:\n reaction = False\n await call.message.edit_reply_markup(\n create_reaction_keyboard(\n callback_data[\"link\"],\n int(callback_data[\"article_number\"]),\n int(callback_data[\"news_id\"]),\n reaction=reaction,\n )\n )\n news = api.digest(call.from_user.id, 1)\n await bot.send_message(call.from_user.id, text=news[0].short_text,\n reply_markup=create_reaction_keyboard(news[0].url, 1, str(news[0].id)[:5]))\n await bot.send_message(call.from_user.id, text=news[1].short_text,\n reply_markup=create_reaction_keyboard(news[1].url, 2, str(news[1].id)[:5]))\n await bot.send_message(call.from_user.id, text=news[2].short_text,\n reply_markup=create_reaction_keyboard(news[2].url, 3, str(news[2].id)[:5], continue_button=True))\n print(callback_data)\n","repo_name":"itatmisis/more-tech-4-ananas-anabot","sub_path":"src/anabot/handlers/users/news.py","file_name":"news.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"339248052","text":"#!/usr/bin/env python\n\n\"\"\" Web app specific utilities.\n\nIn particular, it handles tasks related to deployment and minimization which are not relevant\nto other Overwatch packages.\n\n.. codeauthor:: Raymond Ehlers , Yale University\n\"\"\"\n\nimport os\nimport subprocess\nimport logging\nlogger = logging.getLogger(__name__)\n# Webassets\nimport webassets.filter\n\n# Configuration\nfrom ..base import config\n(serverParameters, filesRead) = config.readConfig(config.configurationType.webApp)\n\nclass PolymerBundler(webassets.filter.ExternalTool):\n \"\"\" Filter to bundle Polymer html imports into a single file for deployment.\n\n Best practices dictate that the Polymer html imports should be combined into a single file\n to reduce the number of individual http requests. Polymer provides a tool to do so, called\n ``polymer-bundler``. By taking advantage of ``webassets``, we can automatically combine and\n minimize these files when starting a web app deployment.\n\n To successfully define the filter, the following details must be addressed:\n\n - polymer-bundler must only be executed with relative paths, so we cannot use\n ``ExternalTool.subprocess``, since that gives absolute paths.\n - To ensure that the polymer components also work when not bundled, the filter must be\n executed in a directory above the static dir.\n\n These issues causes quite some complications! See the ``input(...)`` function for how to deal\n with these issues.\n\n When ``webassets`` is run in debug mode, this filter will not be run! Instead, the standard\n (un-minified) version will be included. For information on forcing this filter to be run,\n see the :doc:`web app README `.\n \"\"\"\n # Define the name of the bundle so it can be referenced.\n name = \"PolymerBundler\"\n\n def input(self, _in, out, **kwargs):\n \"\"\" Plugin function for adding an external filter to ``webassets``.\n\n As of August 2018, the ``kwargs`` options available include:\n\n .. code-block:: python\n\n kwargs = {'output': 'gen/polymerBundle.html',\n 'output_path': '/pathToOverwatch/overwatch/webApp/static/gen/polymerBundle.html',\n 'source_path': '/pathToOverwatch/overwatch/webApp/static/polymerComponents.html',\n 'source': 'polymerComponents.html'}\n\n Note:\n ``polymer-bundler`` parses arguments a bit strangely - values such as paths still need\n to be in a separate argument. Thus, the arguments looks more split than would usually\n be expected.\n\n Args:\n _in (StringIO): Input for the filter. Not used here.\n out (StringIO): Output for the filter. The output for ``polymer-bundler`` is written here.\n This will eventually be written out to a file.\n **kwargs (dict): Additional options required to run the filter properly. See the function\n description for the available options.\n Returns:\n None\n \"\"\"\n # Printed because otherwise we won't be able to see the output.\n logger.debug(\"polymer-bundler filter arguments. _in: {}, out: {}, kwargs: {}\".format(_in, out, kwargs))\n\n # Cannot just use the naive current path since this could be executed from anywhere. Instead,\n # look for the static folder - it must be included somewhere.\n output_path = \"{output_path}\".format(**kwargs)\n executionPath = output_path[:output_path.find(serverParameters[\"staticFolder\"])]\n # Stream the result to stdout since writing the file seems to cause trouble with\n # the \"out\" string, which will overwrite the desired output\n args = [\n \"polymer-bundler\",\n \"--inline-scripts\",\n \"--strip-comments\",\n #\"--out-html\",\n #os.path.join(serverParameters[\"staticFolder\"], \"{output}.tmp\".format(**kwargs)),\n # NOTE: It appears that ``--in-html`` is not a valid option anyonre. The input file should just be the last argument.\n os.path.join(serverParameters[\"staticFolder\"], \"{source}\".format(**kwargs))\n ]\n\n logger.debug(\"Executing polymer filter with execution path \\\"{executionPath}\\\" and args {args}\".format(executionPath = executionPath, args = args))\n output = subprocess.check_output(args, cwd = executionPath)\n if len(output) > 0:\n logger.debug(\"Received non-zero output string! This means the polymer-bundler filter likely worked!\")\n # Write the output to the out string, which will then eventually automatically be written to file\n # Without explicit decoding here, it will fail\n out.write(output.decode('utf-8'))\n\n# Register filter so it can be run in the web app\nwebassets.filter.register_filter(PolymerBundler)\n","repo_name":"raymondEhlers/OVERWATCH","sub_path":"overwatch/webApp/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"20932260669","text":"from os.path import join\nimport os.path\nimport shutil\nimport os\nimport re\nimport datetime\n\n__all__ = [\"get_dependencies\", \"get_app_version_only\"]\n\nclass VersionException(Exception):\n \"\"\"Report problems updating app version\"\"\"\n\ndef update_app_version(old_version, scheme, new_version, build_revision):\n version_number = old_version\n if new_version is not None:\n version_number = new_version\n\n if scheme == \"nightly\" or scheme == \"unofficial\":\n #strip off any suffix from the version #\n version_parts = version_number.split(\".\")\n if len(version_parts) >= 2:\n #Check for Nightly.BuildRev where BuildRev is just a number\n #If so, strip off the BuildRev portion so the rest off the\n #suffix stripping will work.\n if version_parts[-2].find(\"Nightly\") != -1:\n version_parts = version_parts[0:-1] #Trim off the BuildRev part\n\n for count in range(0, len(version_parts) - 1):\n if not(version_parts[count].isdigit()):\n raise VersionException(\"The current app version # \"\\\n \"is improperly formatted.\")\n last_part = version_parts[-1]\n match_obj = re.match(\"^\\d+(\\D*)\", last_part)\n if match_obj is None:\n raise VersionException(\"The current app version # is \"\\\n \"improperly formatted.\")\n version_parts[-1] = last_part[:match_obj.start(1)]\n version_number = \".\".join(version_parts)\n\n #append on the appropriate suffix to the version #\n if scheme == \"unofficial\":\n version_number = version_number + \"Unofficial\"\n elif scheme == \"nightly\":\n todays_date = datetime.date.today()\n today_str = todays_date.strftime(\"%Y%m%d\")\n if len(today_str) != 8:\n raise VersionException(\"This platform does not properly \"\\\n \"pad month and days to 2 digits when using \"\\\n \"strftime. Please update this script to address \"\\\n \"this problem\")\n if not(str(build_revision).isdigit()):\n raise VersionException(\"The Build Revision when using \"\\\n \"--update-version=nightly must indicate a \"\\\n \"subversion working copy that has not been modified.\")\n version_number = version_number + \\\n \"Nightly%s.%s\" % (today_str, build_revision)\n elif new_version is None:\n print(\"You need to use --new-version to provide the version \"\\\n \"# when using the production, rc, or milestone scheme\")\n\n return version_number\n\ndef get_app_version_only(opticks_code_folder):\n app_version_path = join(opticks_code_folder, \"application\",\n \"PlugInUtilities\", \"AppVersion.h\")\n if not(os.path.exists(app_version_path)):\n return None\n app_version = open(app_version_path, \"rt\", encoding='latin_1')\n version_info = app_version.read()\n app_version.close()\n\n version_number_match = re.search(r'APP_VERSION_NUMBER +?\"(.*?)\"',\n version_info)\n if version_number_match is not None:\n version_number = version_number_match.group(1)\n return version_number\n\n return None\n\ndef is_subversion_soft_link(srcname):\n the_file = None\n file_size = os.path.getsize(srcname)\n if file_size < 500:\n #open this file and determine if it was a soft link\n #when it was checked into Subversion\n the_file_contents = open(srcname, mode=\"rb\")\n first_line = the_file_contents.readline()\n the_file_contents.close()\n if first_line.startswith(b\"link\"):\n the_linked_file = first_line.decode().split(\" \", 2)[1]\n the_dir = os.path.split(srcname)[0]\n the_file = os.path.abspath(join(the_dir, the_linked_file))\n return the_file\n\ndef copy_dependencies(dp_list, dest_dir):\n if not(os.path.exists(dest_dir)):\n os.makedirs(dest_dir)\n create_qt_conf(dest_dir, 1)\n for the_file, the_dest_dir in dp_list:\n the_file = os.path.abspath(the_file)\n file_name = os.path.split(the_file)[1]\n full_dest_dir = join(dest_dir, the_dest_dir)\n if not(os.path.exists(full_dest_dir)):\n os.makedirs(full_dest_dir)\n the_linked_file = is_subversion_soft_link(the_file)\n if the_linked_file is not None:\n the_file = the_linked_file\n shutil.copy2(the_file, os.path.abspath(join(full_dest_dir, file_name)))\n\ndef get_dependencies(dependencies_path, platform, is_debug, arch):\n def la(dp_file, dest_dir=\".\"):\n dp_list.append([join(dp, dp_file), dest_dir])\n def bla(dp_file, dest_dir=\".\"):\n dp_list.append([join(dp, \"bin\", dp_file), dest_dir])\n def lla(dp_file, dest_dir=\".\"):\n dp_list.append([join(dp, \"lib\", dp_file), dest_dir])\n #this will return a list of dependencies as file paths\n #depending on the platform, mode and arch being passed in\n dp = join(dependencies_path, arch)\n\n dp_list = list()\n if platform == \"Solaris\" or platform == \"Linux\":\n if platform == \"Solaris\":\n plat_dir = \"solaris-sparc\"\n lla(\"libshp.so\")\n lla(\"libgdal.so.1\")\n else:\n plat_dir = \"linux-x86_64\"\n lla(\"libboost_program_options.so.1.55.0\")\n lla(\"libboost_thread.so.1.55.0\")\n lla(\"libCg.so\")\n lla(\"libCgGL.so\")\n lla(\"libGLEW.so\")\n lla(\"liblas.so.2.2.0\")\n lla(\"libshp.so.1\")\n lla(\"libgdal.so\")\n lla(\"libGLEW.so.1.10\")\n lla(\"libopenjp2.so.6\")\n lla(\"libavcodec.so.51\")\n lla(\"libavformat.so.50\")\n lla(\"libavutil.so.49\")\n lla(\"libboost_regex.so.1.55.0\")\n lla(\"libboost_system.so.1.55.0\")\n lla(\"libxqilla.so.5\")\n lla(\"libqwt.so.5\")\n lla(\"libxerces-c-3.1.so\")\n lla(\"libQt3Support.so.4\")\n lla(\"libQtCore.so.4\")\n lla(\"libQtGui.so.4\")\n lla(\"libQtNetwork.so.4\")\n lla(\"libQtOpenGL.so.4\")\n lla(\"libQtScript.so.4\")\n lla(\"libQtSql.so.4\")\n lla(\"libQtSvg.so.4\")\n lla(\"libQtXml.so.4\")\n lla(\"libQtXmlPatterns.so.4\")\n lla(\"libhdf5.so.8\")\n lla(\"libsz.so.2\")\n la(join(\"plugins\", \"imageformats\", \"qgifd.dll\"), \"imageformats\")\n la(join(\"plugins\", \"imageformats\", \"qjpegd.dll\"), \"imageformats\")\n #VS2017 la(join(\"plugins\", \"imageformats\", \"qmng4.dll\"), \"imageformats\")\n la(join(\"plugins\", \"imageformats\", \"qsvgd.dll\"), \"imageformats\")\n la(join(\"plugins\", \"imageformats\", \"qtiff.dll\"), \"imageformats\")\n la(join(\"plugins\", \"platforms\", \"qdirect2dd.dll\"), \"platforms\")\n la(join(\"plugins\", \"platforms\", \"qminimald.dll\"), \"platforms\")\n la(join(\"plugins\", \"platforms\", \"qoffscreend.dll\"), \"platforms\")\n la(join(\"plugins\", \"platforms\", \"qwindowsd.dll\"), \"platforms\")\n lla(\"libossim.so.1\")\n lla(\"libcurl.so.4\")\n lla(\"libopencv_core.so.2.2\")\n lla(\"libopencv_imgproc.so.2.2\")\n lla(\"libopencv_ml.so.2.2\")\n lla(\"libopencv_features2d.so.2.2\")\n lla(\"libopencv_video.so.2.2\")\n lla(\"libopencv_objdetect.so.2.2\")\n lla(\"libopencv_calib3d.so.2.2\")\n lla(\"libopencv_flann.so.2.2\")\n lla(\"libopencv_highgui.so.2.2\")\n bla(\"opencv_haartraining\")\n bla(\"opencv_traincascade\")\n lla(\"libyaml-cpp.so.0.2\")\n elif platform == \"Windows\":\n bla(\"pthreadVC2.dll\")\n bla(\"zlib.dll\") #gdal\n bla(\"zlib1.dll\")\n bla(\"raptor.dll\")\n bla(\"libexpat.dll\")\n bla(\"shapelib.dll\")\n bla(\"hd425m.dll\")\n bla(\"hm425m.dll\")\n bla(\"szlibdll.dll\") #hdf4\n #### Begin gdal workaround - TODO: need to possibly upgrade gdal\n #### this is based on the GISInternals distro\n ###bla(\"gdal241.dll\")\n bla(\"gdal204.dll\")\n bla(\"expat.dll\")\n bla(\"libcurl.dll\")\n bla(\"libmysql.dll\")\n bla(\"libpq.dll\")\n bla(\"spatialite.dll\")\n bla(\"sqlite3.dll\")\n bla(\"freexl.dll\")\n bla(\"iconv.dll\")\n bla(\"libcrypto-1_1-x64.dll\")\n bla(\"libssl-1_1-x64.dll\")\n ##### END\n bla(\"glut32.dll\")\n bla(\"xerces-c_3_2.dll\") #needed for GDAL\n bla(\"hdf5.dll\") #needed for GDAL\n bla(\"proj.dll\") #needed for OGR\n if is_debug:\n bla(\"opencv_core220d.dll\")\n bla(\"opencv_imgproc220d.dll\")\n bla(\"opencv_ml220d.dll\")\n bla(\"opencv_features2d220d.dll\")\n bla(\"opencv_video220d.dll\")\n bla(\"opencv_objdetect220d.dll\")\n bla(\"opencv_calib3d220d.dll\")\n bla(\"opencv_flann220d.dll\")\n bla(\"tbb_debug.dll\")\n bla(\"tbbmalloc_debug.dll\")\n bla(\"tbbmalloc_proxy_debug.dll\")\n bla(\"xerces-c_3_2D.dll\")\n bla(\"xqilla23.dll\")\n bla(\"glew32d.dll\")\n bla(join(\"debug\", \"liblas.dll\"))\n bla(join(\"debug\", \"openjp2.dll\"))\n bla(\"qwtd.dll\")\n bla(\"avcodecd.dll\")\n bla(\"avformatd.dll\")\n bla(\"avutild.dll\")\n bla(\"ossimd.dll\")\n bla(\"geosd.dll\")\n bla(\"geos_cd.dll\")\n bla(\"libtiff.dll\")\n bla(\"hdf5.dll\")\n la(join(\"lib\", \"Qt5Cored.dll\"))\n la(join(\"lib\", \"Qt5Guid.dll\"))\n la(join(\"lib\", \"Qt5Networkd.dll\"))\n la(join(\"lib\", \"Qt5OpenGLd.dll\"))\n la(join(\"lib\", \"Qt5Scriptd.dll\"))\n la(join(\"lib\", \"Qt5Sqld.dll\"))\n la(join(\"lib\", \"Qt5Svgd.dll\"))\n la(join(\"lib\", \"Qt5Xmld.dll\"))\n la(join(\"lib\", \"Qt5Widgetsd.dll\"))\n la(join(\"lib\", \"Qt5PrintSupportd.dll\"))\n else:\n bla(\"opencv_haartraining.exe\")\n bla(\"opencv_traincascade.exe\")\n bla(\"opencv_core220.dll\")\n bla(\"opencv_imgproc220.dll\")\n bla(\"opencv_ml220.dll\")\n bla(\"opencv_features2d220.dll\")\n bla(\"opencv_video220.dll\")\n bla(\"opencv_objdetect220.dll\")\n bla(\"opencv_calib3d220.dll\")\n bla(\"opencv_flann220.dll\")\n bla(\"tbb.dll\")\n bla(\"tbbmalloc.dll\")\n bla(\"tbbmalloc_proxy.dll\")\n bla(\"xqilla23.dll\")\n bla(\"glew32.dll\")\n bla(\"liblas.dll\")\n bla(\"openjp2.dll\")\n bla(\"qwt.dll\")\n bla(\"avcodec.dll\")\n bla(\"avformat.dll\")\n bla(\"avutil.dll\")\n bla(\"ossim.dll\")\n bla(\"geos.dll\")\n bla(\"geos_c.dll\")\n bla(\"libtiff.dll\")\n bla(\"hdf5.dll\")\n la(join(\"plugins\", \"imageformats\", \"qgif.dll\"), \"imageformats\")\n la(join(\"plugins\", \"imageformats\", \"qjpeg.dll\"), \"imageformats\")\n #VS2017 la(join(\"plugins\", \"imageformats\", \"qmng4.dll\"), \"imageformats\")\n la(join(\"plugins\", \"imageformats\", \"qsvg.dll\"), \"imageformats\")\n la(join(\"plugins\", \"imageformats\", \"qtiff.dll\"), \"imageformats\")\n la(join(\"plugins\", \"platforms\", \"qdirect2d.dll\"), \"platforms\")\n la(join(\"plugins\", \"platforms\", \"qminimal.dll\"), \"platforms\")\n la(join(\"plugins\", \"platforms\", \"qoffscreen.dll\"), \"platforms\")\n la(join(\"plugins\", \"platforms\", \"qwindows.dll\"), \"platforms\")\n #VS2017 la(join(\"lib\", \"Qt3Support4.dll\"))\n la(join(\"lib\", \"Qt5Core.dll\"))\n la(join(\"lib\", \"Qt5Gui.dll\"))\n la(join(\"lib\", \"Qt5Network.dll\"))\n la(join(\"lib\", \"Qt5OpenGL.dll\"))\n la(join(\"lib\", \"Qt5PrintSupport.dll\"))\n #VS2017 la(join(\"lib\", \"QtScript4.dll\"))\n la(join(\"lib\", \"Qt5Sql.dll\"))\n la(join(\"lib\", \"Qt5Svg.dll\"))\n la(join(\"lib\", \"Qt5Widgets.dll\"))\n la(join(\"lib\", \"Qt5Xml.dll\"))\n #VS2017 la(join(\"lib\", \"QtXmlPatterns4.dll\"))\n return dp_list\n\ndef create_qt_conf(path, verbosity):\n qt_conf_file_path = join(path, \"qt.conf\")\n if not(os.path.exists(qt_conf_file_path)):\n if not(os.path.exists(path)):\n os.makedirs(path)\n if verbosity > 1:\n print(\"Creating qt.conf file...\")\n qt_conf_file = open(qt_conf_file_path, \"w\")\n qt_conf_file.write(\"[Paths]\\n\"\\\n \"Plugins = .\")\n qt_conf_file.close()\n if verbosity > 1:\n print(\"Done creating qt.conf file\")\n","repo_name":"opticks-org/opticks","sub_path":"Code/commonutils.py","file_name":"commonutils.py","file_ext":"py","file_size_in_byte":12385,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"54"} +{"seq_id":"3558965482","text":"import datetime\nimport sys\nfrom typing import Any, Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom .. import _serialization\n\nif sys.version_info >= (3, 9):\n from collections.abc import MutableMapping\nelse:\n from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports\nif sys.version_info >= (3, 8):\n from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports\nelse:\n from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import,ungrouped-imports\n from .. import models as _models\nJSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object\n\n\nclass AgentConfiguration(_serialization.Model):\n \"\"\"Configurable properties that the user can set locally via the azcmagent config command, or\n remotely via ARM.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar proxy_url: Specifies the URL of the proxy to be used.\n :vartype proxy_url: str\n :ivar incoming_connections_ports: Specifies the list of ports that the agent will be able to\n listen on.\n :vartype incoming_connections_ports: list[str]\n :ivar extensions_allow_list: Array of extensions that are allowed to be installed or updated.\n :vartype extensions_allow_list: list[~azure.mgmt.hybridcompute.models.ConfigurationExtension]\n :ivar extensions_block_list: Array of extensions that are blocked (cannot be installed or\n updated).\n :vartype extensions_block_list: list[~azure.mgmt.hybridcompute.models.ConfigurationExtension]\n :ivar proxy_bypass: List of service names which should not use the specified proxy server.\n :vartype proxy_bypass: list[str]\n :ivar extensions_enabled: Specifies whether the extension service is enabled or disabled.\n :vartype extensions_enabled: str\n :ivar guest_configuration_enabled: Specified whether the guest configuration service is enabled\n or disabled.\n :vartype guest_configuration_enabled: str\n \"\"\"\n\n _validation = {\n \"proxy_url\": {\"readonly\": True},\n \"incoming_connections_ports\": {\"readonly\": True},\n \"extensions_allow_list\": {\"readonly\": True},\n \"extensions_block_list\": {\"readonly\": True},\n \"proxy_bypass\": {\"readonly\": True},\n \"extensions_enabled\": {\"readonly\": True},\n \"guest_configuration_enabled\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"proxy_url\": {\"key\": \"proxyUrl\", \"type\": \"str\"},\n \"incoming_connections_ports\": {\"key\": \"incomingConnectionsPorts\", \"type\": \"[str]\"},\n \"extensions_allow_list\": {\"key\": \"extensionsAllowList\", \"type\": \"[ConfigurationExtension]\"},\n \"extensions_block_list\": {\"key\": \"extensionsBlockList\", \"type\": \"[ConfigurationExtension]\"},\n \"proxy_bypass\": {\"key\": \"proxyBypass\", \"type\": \"[str]\"},\n \"extensions_enabled\": {\"key\": \"extensionsEnabled\", \"type\": \"str\"},\n \"guest_configuration_enabled\": {\"key\": \"guestConfigurationEnabled\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.proxy_url = None\n self.incoming_connections_ports = None\n self.extensions_allow_list = None\n self.extensions_block_list = None\n self.proxy_bypass = None\n self.extensions_enabled = None\n self.guest_configuration_enabled = None\n\n\nclass CloudMetadata(_serialization.Model):\n \"\"\"The metadata of the cloud environment (Azure/GCP/AWS/OCI...).\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar provider: Specifies the cloud provider (Azure/AWS/GCP...).\n :vartype provider: str\n \"\"\"\n\n _validation = {\n \"provider\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"provider\": {\"key\": \"provider\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.provider = None\n\n\nclass ConfigurationExtension(_serialization.Model):\n \"\"\"Describes properties that can identify extensions.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar publisher: Publisher of the extension.\n :vartype publisher: str\n :ivar type: Type of the extension.\n :vartype type: str\n \"\"\"\n\n _validation = {\n \"publisher\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"publisher\": {\"key\": \"publisher\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.publisher = None\n self.type = None\n\n\nclass ConnectionDetail(_serialization.Model):\n \"\"\"ConnectionDetail.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Azure resource Id.\n :vartype id: str\n :ivar private_ip_address: The private endpoint connection private ip address.\n :vartype private_ip_address: str\n :ivar link_identifier: The private endpoint connection link identifier.\n :vartype link_identifier: str\n :ivar group_id: The private endpoint connection group id.\n :vartype group_id: str\n :ivar member_name: The private endpoint connection member name.\n :vartype member_name: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"private_ip_address\": {\"readonly\": True},\n \"link_identifier\": {\"readonly\": True},\n \"group_id\": {\"readonly\": True},\n \"member_name\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"private_ip_address\": {\"key\": \"privateIpAddress\", \"type\": \"str\"},\n \"link_identifier\": {\"key\": \"linkIdentifier\", \"type\": \"str\"},\n \"group_id\": {\"key\": \"groupId\", \"type\": \"str\"},\n \"member_name\": {\"key\": \"memberName\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.private_ip_address = None\n self.link_identifier = None\n self.group_id = None\n self.member_name = None\n\n\nclass ErrorAdditionalInfo(_serialization.Model):\n \"\"\"The resource management error additional info.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar type: The additional info type.\n :vartype type: str\n :ivar info: The additional info.\n :vartype info: JSON\n \"\"\"\n\n _validation = {\n \"type\": {\"readonly\": True},\n \"info\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"info\": {\"key\": \"info\", \"type\": \"object\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.type = None\n self.info = None\n\n\nclass ErrorDetail(_serialization.Model):\n \"\"\"The error detail.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar code: The error code.\n :vartype code: str\n :ivar message: The error message.\n :vartype message: str\n :ivar target: The error target.\n :vartype target: str\n :ivar details: The error details.\n :vartype details: list[~azure.mgmt.hybridcompute.models.ErrorDetail]\n :ivar additional_info: The error additional info.\n :vartype additional_info: list[~azure.mgmt.hybridcompute.models.ErrorAdditionalInfo]\n \"\"\"\n\n _validation = {\n \"code\": {\"readonly\": True},\n \"message\": {\"readonly\": True},\n \"target\": {\"readonly\": True},\n \"details\": {\"readonly\": True},\n \"additional_info\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"code\": {\"key\": \"code\", \"type\": \"str\"},\n \"message\": {\"key\": \"message\", \"type\": \"str\"},\n \"target\": {\"key\": \"target\", \"type\": \"str\"},\n \"details\": {\"key\": \"details\", \"type\": \"[ErrorDetail]\"},\n \"additional_info\": {\"key\": \"additionalInfo\", \"type\": \"[ErrorAdditionalInfo]\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.code = None\n self.message = None\n self.target = None\n self.details = None\n self.additional_info = None\n\n\nclass ErrorResponse(_serialization.Model):\n \"\"\"Common error response for all Azure Resource Manager APIs to return error details for failed\n operations. (This also follows the OData error response format.).\n\n :ivar error: The error object.\n :vartype error: ~azure.mgmt.hybridcompute.models.ErrorDetail\n \"\"\"\n\n _attribute_map = {\n \"error\": {\"key\": \"error\", \"type\": \"ErrorDetail\"},\n }\n\n def __init__(self, *, error: Optional[\"_models.ErrorDetail\"] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword error: The error object.\n :paramtype error: ~azure.mgmt.hybridcompute.models.ErrorDetail\n \"\"\"\n super().__init__(**kwargs)\n self.error = error\n\n\nclass ExtensionTargetProperties(_serialization.Model):\n \"\"\"Describes the Machine Extension Target Version Properties.\n\n :ivar target_version: Properties for the specified Extension to Upgrade.\n :vartype target_version: str\n \"\"\"\n\n _attribute_map = {\n \"target_version\": {\"key\": \"targetVersion\", \"type\": \"str\"},\n }\n\n def __init__(self, *, target_version: Optional[str] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword target_version: Properties for the specified Extension to Upgrade.\n :paramtype target_version: str\n \"\"\"\n super().__init__(**kwargs)\n self.target_version = target_version\n\n\nclass PrivateLinkScopesResource(_serialization.Model):\n \"\"\"An azure resource object.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Azure resource Id.\n :vartype id: str\n :ivar name: Azure resource name.\n :vartype name: str\n :ivar type: Azure resource type.\n :vartype type: str\n :ivar location: Resource location. Required.\n :vartype location: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n }\n\n def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword location: Resource location. Required.\n :paramtype location: str\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n self.location = location\n self.tags = tags\n\n\nclass HybridComputePrivateLinkScope(PrivateLinkScopesResource):\n \"\"\"An Azure Arc PrivateLinkScope definition.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Azure resource Id.\n :vartype id: str\n :ivar name: Azure resource name.\n :vartype name: str\n :ivar type: Azure resource type.\n :vartype type: str\n :ivar location: Resource location. Required.\n :vartype location: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar properties: Properties that define a Azure Arc PrivateLinkScope resource.\n :vartype properties: ~azure.mgmt.hybridcompute.models.HybridComputePrivateLinkScopeProperties\n :ivar system_data: The system meta data relating to this resource.\n :vartype system_data: ~azure.mgmt.hybridcompute.models.SystemData\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n \"system_data\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"HybridComputePrivateLinkScopeProperties\"},\n \"system_data\": {\"key\": \"systemData\", \"type\": \"SystemData\"},\n }\n\n def __init__(\n self,\n *,\n location: str,\n tags: Optional[Dict[str, str]] = None,\n properties: Optional[\"_models.HybridComputePrivateLinkScopeProperties\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword location: Resource location. Required.\n :paramtype location: str\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword properties: Properties that define a Azure Arc PrivateLinkScope resource.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.HybridComputePrivateLinkScopeProperties\n \"\"\"\n super().__init__(location=location, tags=tags, **kwargs)\n self.properties = properties\n self.system_data = None\n\n\nclass HybridComputePrivateLinkScopeListResult(_serialization.Model):\n \"\"\"Describes the list of Azure Arc PrivateLinkScope resources.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar value: List of Azure Arc PrivateLinkScope definitions. Required.\n :vartype value: list[~azure.mgmt.hybridcompute.models.HybridComputePrivateLinkScope]\n :ivar next_link: The URI to get the next set of Azure Arc PrivateLinkScope definitions if too\n many PrivateLinkScopes where returned in the result set.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n \"value\": {\"required\": True},\n }\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[HybridComputePrivateLinkScope]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(\n self, *, value: List[\"_models.HybridComputePrivateLinkScope\"], next_link: Optional[str] = None, **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword value: List of Azure Arc PrivateLinkScope definitions. Required.\n :paramtype value: list[~azure.mgmt.hybridcompute.models.HybridComputePrivateLinkScope]\n :keyword next_link: The URI to get the next set of Azure Arc PrivateLinkScope definitions if\n too many PrivateLinkScopes where returned in the result set.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = value\n self.next_link = next_link\n\n\nclass HybridComputePrivateLinkScopeProperties(_serialization.Model):\n \"\"\"Properties that define a Azure Arc PrivateLinkScope resource.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar public_network_access: Indicates whether machines associated with the private link scope\n can also use public Azure Arc service endpoints. Known values are: \"Enabled\" and \"Disabled\".\n :vartype public_network_access: str or ~azure.mgmt.hybridcompute.models.PublicNetworkAccessType\n :ivar provisioning_state: Current state of this PrivateLinkScope: whether or not is has been\n provisioned within the resource group it is defined. Users cannot change this value but are\n able to read from it. Values will include Provisioning ,Succeeded, Canceled and Failed.\n :vartype provisioning_state: str\n :ivar private_link_scope_id: The Guid id of the private link scope.\n :vartype private_link_scope_id: str\n :ivar private_endpoint_connections: The collection of associated Private Endpoint Connections.\n :vartype private_endpoint_connections:\n list[~azure.mgmt.hybridcompute.models.PrivateEndpointConnectionDataModel]\n \"\"\"\n\n _validation = {\n \"provisioning_state\": {\"readonly\": True},\n \"private_link_scope_id\": {\"readonly\": True},\n \"private_endpoint_connections\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"public_network_access\": {\"key\": \"publicNetworkAccess\", \"type\": \"str\"},\n \"provisioning_state\": {\"key\": \"provisioningState\", \"type\": \"str\"},\n \"private_link_scope_id\": {\"key\": \"privateLinkScopeId\", \"type\": \"str\"},\n \"private_endpoint_connections\": {\n \"key\": \"privateEndpointConnections\",\n \"type\": \"[PrivateEndpointConnectionDataModel]\",\n },\n }\n\n def __init__(\n self, *, public_network_access: Union[str, \"_models.PublicNetworkAccessType\"] = \"Disabled\", **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword public_network_access: Indicates whether machines associated with the private link\n scope can also use public Azure Arc service endpoints. Known values are: \"Enabled\" and\n \"Disabled\".\n :paramtype public_network_access: str or\n ~azure.mgmt.hybridcompute.models.PublicNetworkAccessType\n \"\"\"\n super().__init__(**kwargs)\n self.public_network_access = public_network_access\n self.provisioning_state = None\n self.private_link_scope_id = None\n self.private_endpoint_connections = None\n\n\nclass Identity(_serialization.Model):\n \"\"\"Identity for the resource.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar principal_id: The principal ID of resource identity.\n :vartype principal_id: str\n :ivar tenant_id: The tenant ID of resource.\n :vartype tenant_id: str\n :ivar type: The identity type. Default value is \"SystemAssigned\".\n :vartype type: str\n \"\"\"\n\n _validation = {\n \"principal_id\": {\"readonly\": True},\n \"tenant_id\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"principal_id\": {\"key\": \"principalId\", \"type\": \"str\"},\n \"tenant_id\": {\"key\": \"tenantId\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n }\n\n def __init__(self, *, type: Optional[Literal[\"SystemAssigned\"]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword type: The identity type. Default value is \"SystemAssigned\".\n :paramtype type: str\n \"\"\"\n super().__init__(**kwargs)\n self.principal_id = None\n self.tenant_id = None\n self.type = type\n\n\nclass LocationData(_serialization.Model):\n \"\"\"Metadata pertaining to the geographic location of the resource.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar name: A canonical name for the geographic or physical location. Required.\n :vartype name: str\n :ivar city: The city or locality where the resource is located.\n :vartype city: str\n :ivar district: The district, state, or province where the resource is located.\n :vartype district: str\n :ivar country_or_region: The country or region where the resource is located.\n :vartype country_or_region: str\n \"\"\"\n\n _validation = {\n \"name\": {\"required\": True, \"max_length\": 256},\n }\n\n _attribute_map = {\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"city\": {\"key\": \"city\", \"type\": \"str\"},\n \"district\": {\"key\": \"district\", \"type\": \"str\"},\n \"country_or_region\": {\"key\": \"countryOrRegion\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n name: str,\n city: Optional[str] = None,\n district: Optional[str] = None,\n country_or_region: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword name: A canonical name for the geographic or physical location. Required.\n :paramtype name: str\n :keyword city: The city or locality where the resource is located.\n :paramtype city: str\n :keyword district: The district, state, or province where the resource is located.\n :paramtype district: str\n :keyword country_or_region: The country or region where the resource is located.\n :paramtype country_or_region: str\n \"\"\"\n super().__init__(**kwargs)\n self.name = name\n self.city = city\n self.district = district\n self.country_or_region = country_or_region\n\n\nclass Resource(_serialization.Model):\n \"\"\"Common fields that are returned in the response for all Azure Resource Manager resources.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n\n\nclass TrackedResource(Resource):\n \"\"\"The resource model definition for an Azure Resource Manager tracked top level resource which\n has 'tags' and a 'location'.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar location: The geo-location where the resource lives. Required.\n :vartype location: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n }\n\n def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword location: The geo-location where the resource lives. Required.\n :paramtype location: str\n \"\"\"\n super().__init__(**kwargs)\n self.tags = tags\n self.location = location\n\n\nclass Machine(TrackedResource):\n \"\"\"Describes a hybrid machine.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar location: The geo-location where the resource lives. Required.\n :vartype location: str\n :ivar properties: Hybrid Compute Machine properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.MachineProperties\n :ivar identity: Identity for the resource.\n :vartype identity: ~azure.mgmt.hybridcompute.models.Identity\n :ivar system_data: The system meta data relating to this resource.\n :vartype system_data: ~azure.mgmt.hybridcompute.models.SystemData\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n \"system_data\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"MachineProperties\"},\n \"identity\": {\"key\": \"identity\", \"type\": \"Identity\"},\n \"system_data\": {\"key\": \"systemData\", \"type\": \"SystemData\"},\n }\n\n def __init__(\n self,\n *,\n location: str,\n tags: Optional[Dict[str, str]] = None,\n properties: Optional[\"_models.MachineProperties\"] = None,\n identity: Optional[\"_models.Identity\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword location: The geo-location where the resource lives. Required.\n :paramtype location: str\n :keyword properties: Hybrid Compute Machine properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.MachineProperties\n :keyword identity: Identity for the resource.\n :paramtype identity: ~azure.mgmt.hybridcompute.models.Identity\n \"\"\"\n super().__init__(tags=tags, location=location, **kwargs)\n self.properties = properties\n self.identity = identity\n self.system_data = None\n\n\nclass MachineExtension(TrackedResource):\n \"\"\"Describes a Machine Extension.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar location: The geo-location where the resource lives. Required.\n :vartype location: str\n :ivar properties: Describes Machine Extension Properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.MachineExtensionProperties\n :ivar system_data: The system meta data relating to this resource.\n :vartype system_data: ~azure.mgmt.hybridcompute.models.SystemData\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"location\": {\"required\": True},\n \"system_data\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"location\": {\"key\": \"location\", \"type\": \"str\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"MachineExtensionProperties\"},\n \"system_data\": {\"key\": \"systemData\", \"type\": \"SystemData\"},\n }\n\n def __init__(\n self,\n *,\n location: str,\n tags: Optional[Dict[str, str]] = None,\n properties: Optional[\"_models.MachineExtensionProperties\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword location: The geo-location where the resource lives. Required.\n :paramtype location: str\n :keyword properties: Describes Machine Extension Properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.MachineExtensionProperties\n \"\"\"\n super().__init__(tags=tags, location=location, **kwargs)\n self.properties = properties\n self.system_data = None\n\n\nclass MachineExtensionInstanceView(_serialization.Model):\n \"\"\"Describes the Machine Extension Instance View.\n\n :ivar name: The machine extension name.\n :vartype name: str\n :ivar type: Specifies the type of the extension; an example is \"CustomScriptExtension\".\n :vartype type: str\n :ivar type_handler_version: Specifies the version of the script handler.\n :vartype type_handler_version: str\n :ivar status: Instance view status.\n :vartype status: ~azure.mgmt.hybridcompute.models.MachineExtensionInstanceViewStatus\n \"\"\"\n\n _attribute_map = {\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"type_handler_version\": {\"key\": \"typeHandlerVersion\", \"type\": \"str\"},\n \"status\": {\"key\": \"status\", \"type\": \"MachineExtensionInstanceViewStatus\"},\n }\n\n def __init__(\n self,\n *,\n name: Optional[str] = None,\n type: Optional[str] = None,\n type_handler_version: Optional[str] = None,\n status: Optional[\"_models.MachineExtensionInstanceViewStatus\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword name: The machine extension name.\n :paramtype name: str\n :keyword type: Specifies the type of the extension; an example is \"CustomScriptExtension\".\n :paramtype type: str\n :keyword type_handler_version: Specifies the version of the script handler.\n :paramtype type_handler_version: str\n :keyword status: Instance view status.\n :paramtype status: ~azure.mgmt.hybridcompute.models.MachineExtensionInstanceViewStatus\n \"\"\"\n super().__init__(**kwargs)\n self.name = name\n self.type = type\n self.type_handler_version = type_handler_version\n self.status = status\n\n\nclass MachineExtensionInstanceViewStatus(_serialization.Model):\n \"\"\"Instance view status.\n\n :ivar code: The status code.\n :vartype code: str\n :ivar level: The level code. Known values are: \"Info\", \"Warning\", and \"Error\".\n :vartype level: str or ~azure.mgmt.hybridcompute.models.StatusLevelTypes\n :ivar display_status: The short localizable label for the status.\n :vartype display_status: str\n :ivar message: The detailed status message, including for alerts and error messages.\n :vartype message: str\n :ivar time: The time of the status.\n :vartype time: ~datetime.datetime\n \"\"\"\n\n _attribute_map = {\n \"code\": {\"key\": \"code\", \"type\": \"str\"},\n \"level\": {\"key\": \"level\", \"type\": \"str\"},\n \"display_status\": {\"key\": \"displayStatus\", \"type\": \"str\"},\n \"message\": {\"key\": \"message\", \"type\": \"str\"},\n \"time\": {\"key\": \"time\", \"type\": \"iso-8601\"},\n }\n\n def __init__(\n self,\n *,\n code: Optional[str] = None,\n level: Optional[Union[str, \"_models.StatusLevelTypes\"]] = None,\n display_status: Optional[str] = None,\n message: Optional[str] = None,\n time: Optional[datetime.datetime] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword code: The status code.\n :paramtype code: str\n :keyword level: The level code. Known values are: \"Info\", \"Warning\", and \"Error\".\n :paramtype level: str or ~azure.mgmt.hybridcompute.models.StatusLevelTypes\n :keyword display_status: The short localizable label for the status.\n :paramtype display_status: str\n :keyword message: The detailed status message, including for alerts and error messages.\n :paramtype message: str\n :keyword time: The time of the status.\n :paramtype time: ~datetime.datetime\n \"\"\"\n super().__init__(**kwargs)\n self.code = code\n self.level = level\n self.display_status = display_status\n self.message = message\n self.time = time\n\n\nclass MachineExtensionProperties(_serialization.Model):\n \"\"\"Describes the properties of a Machine Extension.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar force_update_tag: How the extension handler should be forced to update even if the\n extension configuration has not changed.\n :vartype force_update_tag: str\n :ivar publisher: The name of the extension handler publisher.\n :vartype publisher: str\n :ivar type: Specifies the type of the extension; an example is \"CustomScriptExtension\".\n :vartype type: str\n :ivar type_handler_version: Specifies the version of the script handler.\n :vartype type_handler_version: str\n :ivar enable_automatic_upgrade: Indicates whether the extension should be automatically\n upgraded by the platform if there is a newer version available.\n :vartype enable_automatic_upgrade: bool\n :ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor\n version if one is available at deployment time. Once deployed, however, the extension will not\n upgrade minor versions unless redeployed, even with this property set to true.\n :vartype auto_upgrade_minor_version: bool\n :ivar settings: Json formatted public settings for the extension.\n :vartype settings: JSON\n :ivar protected_settings: The extension can contain either protectedSettings or\n protectedSettingsFromKeyVault or no protected settings at all.\n :vartype protected_settings: JSON\n :ivar provisioning_state: The provisioning state, which only appears in the response.\n :vartype provisioning_state: str\n :ivar instance_view: The machine extension instance view.\n :vartype instance_view: ~azure.mgmt.hybridcompute.models.MachineExtensionInstanceView\n \"\"\"\n\n _validation = {\n \"provisioning_state\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"force_update_tag\": {\"key\": \"forceUpdateTag\", \"type\": \"str\"},\n \"publisher\": {\"key\": \"publisher\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"type_handler_version\": {\"key\": \"typeHandlerVersion\", \"type\": \"str\"},\n \"enable_automatic_upgrade\": {\"key\": \"enableAutomaticUpgrade\", \"type\": \"bool\"},\n \"auto_upgrade_minor_version\": {\"key\": \"autoUpgradeMinorVersion\", \"type\": \"bool\"},\n \"settings\": {\"key\": \"settings\", \"type\": \"object\"},\n \"protected_settings\": {\"key\": \"protectedSettings\", \"type\": \"object\"},\n \"provisioning_state\": {\"key\": \"provisioningState\", \"type\": \"str\"},\n \"instance_view\": {\"key\": \"instanceView\", \"type\": \"MachineExtensionInstanceView\"},\n }\n\n def __init__(\n self,\n *,\n force_update_tag: Optional[str] = None,\n publisher: Optional[str] = None,\n type: Optional[str] = None,\n type_handler_version: Optional[str] = None,\n enable_automatic_upgrade: Optional[bool] = None,\n auto_upgrade_minor_version: Optional[bool] = None,\n settings: Optional[JSON] = None,\n protected_settings: Optional[JSON] = None,\n instance_view: Optional[\"_models.MachineExtensionInstanceView\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword force_update_tag: How the extension handler should be forced to update even if the\n extension configuration has not changed.\n :paramtype force_update_tag: str\n :keyword publisher: The name of the extension handler publisher.\n :paramtype publisher: str\n :keyword type: Specifies the type of the extension; an example is \"CustomScriptExtension\".\n :paramtype type: str\n :keyword type_handler_version: Specifies the version of the script handler.\n :paramtype type_handler_version: str\n :keyword enable_automatic_upgrade: Indicates whether the extension should be automatically\n upgraded by the platform if there is a newer version available.\n :paramtype enable_automatic_upgrade: bool\n :keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor\n version if one is available at deployment time. Once deployed, however, the extension will not\n upgrade minor versions unless redeployed, even with this property set to true.\n :paramtype auto_upgrade_minor_version: bool\n :keyword settings: Json formatted public settings for the extension.\n :paramtype settings: JSON\n :keyword protected_settings: The extension can contain either protectedSettings or\n protectedSettingsFromKeyVault or no protected settings at all.\n :paramtype protected_settings: JSON\n :keyword instance_view: The machine extension instance view.\n :paramtype instance_view: ~azure.mgmt.hybridcompute.models.MachineExtensionInstanceView\n \"\"\"\n super().__init__(**kwargs)\n self.force_update_tag = force_update_tag\n self.publisher = publisher\n self.type = type\n self.type_handler_version = type_handler_version\n self.enable_automatic_upgrade = enable_automatic_upgrade\n self.auto_upgrade_minor_version = auto_upgrade_minor_version\n self.settings = settings\n self.protected_settings = protected_settings\n self.provisioning_state = None\n self.instance_view = instance_view\n\n\nclass MachineExtensionsListResult(_serialization.Model):\n \"\"\"Describes the Machine Extensions List Result.\n\n :ivar value: The list of extensions.\n :vartype value: list[~azure.mgmt.hybridcompute.models.MachineExtension]\n :ivar next_link: The uri to fetch the next page of machine extensions. Call ListNext() with\n this to fetch the next page of extensions.\n :vartype next_link: str\n \"\"\"\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[MachineExtension]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n value: Optional[List[\"_models.MachineExtension\"]] = None,\n next_link: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword value: The list of extensions.\n :paramtype value: list[~azure.mgmt.hybridcompute.models.MachineExtension]\n :keyword next_link: The uri to fetch the next page of machine extensions. Call ListNext() with\n this to fetch the next page of extensions.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = value\n self.next_link = next_link\n\n\nclass ResourceUpdate(_serialization.Model):\n \"\"\"The Update Resource model definition.\n\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n \"\"\"\n\n _attribute_map = {\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n }\n\n def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n \"\"\"\n super().__init__(**kwargs)\n self.tags = tags\n\n\nclass MachineExtensionUpdate(ResourceUpdate):\n \"\"\"Describes a Machine Extension Update.\n\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar properties: Describes Machine Extension Update Properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.MachineExtensionUpdateProperties\n \"\"\"\n\n _attribute_map = {\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"MachineExtensionUpdateProperties\"},\n }\n\n def __init__(\n self,\n *,\n tags: Optional[Dict[str, str]] = None,\n properties: Optional[\"_models.MachineExtensionUpdateProperties\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword properties: Describes Machine Extension Update Properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.MachineExtensionUpdateProperties\n \"\"\"\n super().__init__(tags=tags, **kwargs)\n self.properties = properties\n\n\nclass MachineExtensionUpdateProperties(_serialization.Model):\n \"\"\"Describes the properties of a Machine Extension.\n\n :ivar force_update_tag: How the extension handler should be forced to update even if the\n extension configuration has not changed.\n :vartype force_update_tag: str\n :ivar publisher: The name of the extension handler publisher.\n :vartype publisher: str\n :ivar type: Specifies the type of the extension; an example is \"CustomScriptExtension\".\n :vartype type: str\n :ivar type_handler_version: Specifies the version of the script handler.\n :vartype type_handler_version: str\n :ivar auto_upgrade_minor_version: Indicates whether the extension should use a newer minor\n version if one is available at deployment time. Once deployed, however, the extension will not\n upgrade minor versions unless redeployed, even with this property set to true.\n :vartype auto_upgrade_minor_version: bool\n :ivar settings: Json formatted public settings for the extension.\n :vartype settings: JSON\n :ivar protected_settings: The extension can contain either protectedSettings or\n protectedSettingsFromKeyVault or no protected settings at all.\n :vartype protected_settings: JSON\n \"\"\"\n\n _attribute_map = {\n \"force_update_tag\": {\"key\": \"forceUpdateTag\", \"type\": \"str\"},\n \"publisher\": {\"key\": \"publisher\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"type_handler_version\": {\"key\": \"typeHandlerVersion\", \"type\": \"str\"},\n \"auto_upgrade_minor_version\": {\"key\": \"autoUpgradeMinorVersion\", \"type\": \"bool\"},\n \"settings\": {\"key\": \"settings\", \"type\": \"object\"},\n \"protected_settings\": {\"key\": \"protectedSettings\", \"type\": \"object\"},\n }\n\n def __init__(\n self,\n *,\n force_update_tag: Optional[str] = None,\n publisher: Optional[str] = None,\n type: Optional[str] = None,\n type_handler_version: Optional[str] = None,\n auto_upgrade_minor_version: Optional[bool] = None,\n settings: Optional[JSON] = None,\n protected_settings: Optional[JSON] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword force_update_tag: How the extension handler should be forced to update even if the\n extension configuration has not changed.\n :paramtype force_update_tag: str\n :keyword publisher: The name of the extension handler publisher.\n :paramtype publisher: str\n :keyword type: Specifies the type of the extension; an example is \"CustomScriptExtension\".\n :paramtype type: str\n :keyword type_handler_version: Specifies the version of the script handler.\n :paramtype type_handler_version: str\n :keyword auto_upgrade_minor_version: Indicates whether the extension should use a newer minor\n version if one is available at deployment time. Once deployed, however, the extension will not\n upgrade minor versions unless redeployed, even with this property set to true.\n :paramtype auto_upgrade_minor_version: bool\n :keyword settings: Json formatted public settings for the extension.\n :paramtype settings: JSON\n :keyword protected_settings: The extension can contain either protectedSettings or\n protectedSettingsFromKeyVault or no protected settings at all.\n :paramtype protected_settings: JSON\n \"\"\"\n super().__init__(**kwargs)\n self.force_update_tag = force_update_tag\n self.publisher = publisher\n self.type = type\n self.type_handler_version = type_handler_version\n self.auto_upgrade_minor_version = auto_upgrade_minor_version\n self.settings = settings\n self.protected_settings = protected_settings\n\n\nclass MachineExtensionUpgrade(_serialization.Model):\n \"\"\"Describes the Machine Extension Upgrade Properties.\n\n :ivar extension_targets: Describes the Extension Target Properties.\n :vartype extension_targets: dict[str,\n ~azure.mgmt.hybridcompute.models.ExtensionTargetProperties]\n \"\"\"\n\n _attribute_map = {\n \"extension_targets\": {\"key\": \"extensionTargets\", \"type\": \"{ExtensionTargetProperties}\"},\n }\n\n def __init__(\n self, *, extension_targets: Optional[Dict[str, \"_models.ExtensionTargetProperties\"]] = None, **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword extension_targets: Describes the Extension Target Properties.\n :paramtype extension_targets: dict[str,\n ~azure.mgmt.hybridcompute.models.ExtensionTargetProperties]\n \"\"\"\n super().__init__(**kwargs)\n self.extension_targets = extension_targets\n\n\nclass MachineListResult(_serialization.Model):\n \"\"\"The List hybrid machine operation response.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar value: The list of hybrid machines. Required.\n :vartype value: list[~azure.mgmt.hybridcompute.models.Machine]\n :ivar next_link: The URI to fetch the next page of Machines. Call ListNext() with this URI to\n fetch the next page of hybrid machines.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n \"value\": {\"required\": True},\n }\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[Machine]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(self, *, value: List[\"_models.Machine\"], next_link: Optional[str] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword value: The list of hybrid machines. Required.\n :paramtype value: list[~azure.mgmt.hybridcompute.models.Machine]\n :keyword next_link: The URI to fetch the next page of Machines. Call ListNext() with this URI\n to fetch the next page of hybrid machines.\n :paramtype next_link: str\n \"\"\"\n super().__init__(**kwargs)\n self.value = value\n self.next_link = next_link\n\n\nclass MachineProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes\n \"\"\"Describes the properties of a hybrid machine.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar location_data: Metadata pertaining to the geographic location of the resource.\n :vartype location_data: ~azure.mgmt.hybridcompute.models.LocationData\n :ivar agent_configuration: Configurable properties that the user can set locally via the\n azcmagent config command, or remotely via ARM.\n :vartype agent_configuration: ~azure.mgmt.hybridcompute.models.AgentConfiguration\n :ivar service_statuses: Statuses of dependent services that are reported back to ARM.\n :vartype service_statuses: ~azure.mgmt.hybridcompute.models.ServiceStatuses\n :ivar cloud_metadata: The metadata of the cloud environment (Azure/GCP/AWS/OCI...).\n :vartype cloud_metadata: ~azure.mgmt.hybridcompute.models.CloudMetadata\n :ivar os_profile: Specifies the operating system settings for the hybrid machine.\n :vartype os_profile: ~azure.mgmt.hybridcompute.models.OSProfile\n :ivar provisioning_state: The provisioning state, which only appears in the response.\n :vartype provisioning_state: str\n :ivar status: The status of the hybrid machine agent. Known values are: \"Connected\",\n \"Disconnected\", and \"Error\".\n :vartype status: str or ~azure.mgmt.hybridcompute.models.StatusTypes\n :ivar last_status_change: The time of the last status change.\n :vartype last_status_change: ~datetime.datetime\n :ivar error_details: Details about the error state.\n :vartype error_details: list[~azure.mgmt.hybridcompute.models.ErrorDetail]\n :ivar agent_version: The hybrid machine agent full version.\n :vartype agent_version: str\n :ivar vm_id: Specifies the hybrid machine unique ID.\n :vartype vm_id: str\n :ivar display_name: Specifies the hybrid machine display name.\n :vartype display_name: str\n :ivar machine_fqdn: Specifies the hybrid machine FQDN.\n :vartype machine_fqdn: str\n :ivar client_public_key: Public Key that the client provides to be used during initial resource\n onboarding.\n :vartype client_public_key: str\n :ivar os_name: The Operating System running on the hybrid machine.\n :vartype os_name: str\n :ivar os_version: The version of Operating System running on the hybrid machine.\n :vartype os_version: str\n :ivar os_type: The type of Operating System (windows/linux).\n :vartype os_type: str\n :ivar vm_uuid: Specifies the Arc Machine's unique SMBIOS ID.\n :vartype vm_uuid: str\n :ivar extensions: Machine Extensions information.\n :vartype extensions: list[~azure.mgmt.hybridcompute.models.MachineExtensionInstanceView]\n :ivar os_sku: Specifies the Operating System product SKU.\n :vartype os_sku: str\n :ivar domain_name: Specifies the Windows domain name.\n :vartype domain_name: str\n :ivar ad_fqdn: Specifies the AD fully qualified display name.\n :vartype ad_fqdn: str\n :ivar dns_fqdn: Specifies the DNS fully qualified display name.\n :vartype dns_fqdn: str\n :ivar private_link_scope_resource_id: The resource id of the private link scope this machine is\n assigned to, if any.\n :vartype private_link_scope_resource_id: str\n :ivar parent_cluster_resource_id: The resource id of the parent cluster (Azure HCI) this\n machine is assigned to, if any.\n :vartype parent_cluster_resource_id: str\n :ivar mssql_discovered: Specifies whether any MS SQL instance is discovered on the machine.\n :vartype mssql_discovered: str\n :ivar detected_properties: Detected properties from the machine.\n :vartype detected_properties: dict[str, str]\n \"\"\"\n\n _validation = {\n \"agent_configuration\": {\"readonly\": True},\n \"provisioning_state\": {\"readonly\": True},\n \"status\": {\"readonly\": True},\n \"last_status_change\": {\"readonly\": True},\n \"error_details\": {\"readonly\": True},\n \"agent_version\": {\"readonly\": True},\n \"display_name\": {\"readonly\": True},\n \"machine_fqdn\": {\"readonly\": True},\n \"os_name\": {\"readonly\": True},\n \"os_version\": {\"readonly\": True},\n \"vm_uuid\": {\"readonly\": True},\n \"os_sku\": {\"readonly\": True},\n \"domain_name\": {\"readonly\": True},\n \"ad_fqdn\": {\"readonly\": True},\n \"dns_fqdn\": {\"readonly\": True},\n \"detected_properties\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"location_data\": {\"key\": \"locationData\", \"type\": \"LocationData\"},\n \"agent_configuration\": {\"key\": \"agentConfiguration\", \"type\": \"AgentConfiguration\"},\n \"service_statuses\": {\"key\": \"serviceStatuses\", \"type\": \"ServiceStatuses\"},\n \"cloud_metadata\": {\"key\": \"cloudMetadata\", \"type\": \"CloudMetadata\"},\n \"os_profile\": {\"key\": \"osProfile\", \"type\": \"OSProfile\"},\n \"provisioning_state\": {\"key\": \"provisioningState\", \"type\": \"str\"},\n \"status\": {\"key\": \"status\", \"type\": \"str\"},\n \"last_status_change\": {\"key\": \"lastStatusChange\", \"type\": \"iso-8601\"},\n \"error_details\": {\"key\": \"errorDetails\", \"type\": \"[ErrorDetail]\"},\n \"agent_version\": {\"key\": \"agentVersion\", \"type\": \"str\"},\n \"vm_id\": {\"key\": \"vmId\", \"type\": \"str\"},\n \"display_name\": {\"key\": \"displayName\", \"type\": \"str\"},\n \"machine_fqdn\": {\"key\": \"machineFqdn\", \"type\": \"str\"},\n \"client_public_key\": {\"key\": \"clientPublicKey\", \"type\": \"str\"},\n \"os_name\": {\"key\": \"osName\", \"type\": \"str\"},\n \"os_version\": {\"key\": \"osVersion\", \"type\": \"str\"},\n \"os_type\": {\"key\": \"osType\", \"type\": \"str\"},\n \"vm_uuid\": {\"key\": \"vmUuid\", \"type\": \"str\"},\n \"extensions\": {\"key\": \"extensions\", \"type\": \"[MachineExtensionInstanceView]\"},\n \"os_sku\": {\"key\": \"osSku\", \"type\": \"str\"},\n \"domain_name\": {\"key\": \"domainName\", \"type\": \"str\"},\n \"ad_fqdn\": {\"key\": \"adFqdn\", \"type\": \"str\"},\n \"dns_fqdn\": {\"key\": \"dnsFqdn\", \"type\": \"str\"},\n \"private_link_scope_resource_id\": {\"key\": \"privateLinkScopeResourceId\", \"type\": \"str\"},\n \"parent_cluster_resource_id\": {\"key\": \"parentClusterResourceId\", \"type\": \"str\"},\n \"mssql_discovered\": {\"key\": \"mssqlDiscovered\", \"type\": \"str\"},\n \"detected_properties\": {\"key\": \"detectedProperties\", \"type\": \"{str}\"},\n }\n\n def __init__( # pylint: disable=too-many-locals\n self,\n *,\n location_data: Optional[\"_models.LocationData\"] = None,\n service_statuses: Optional[\"_models.ServiceStatuses\"] = None,\n cloud_metadata: Optional[\"_models.CloudMetadata\"] = None,\n os_profile: Optional[\"_models.OSProfile\"] = None,\n vm_id: Optional[str] = None,\n client_public_key: Optional[str] = None,\n os_type: Optional[str] = None,\n extensions: Optional[List[\"_models.MachineExtensionInstanceView\"]] = None,\n private_link_scope_resource_id: Optional[str] = None,\n parent_cluster_resource_id: Optional[str] = None,\n mssql_discovered: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword location_data: Metadata pertaining to the geographic location of the resource.\n :paramtype location_data: ~azure.mgmt.hybridcompute.models.LocationData\n :keyword service_statuses: Statuses of dependent services that are reported back to ARM.\n :paramtype service_statuses: ~azure.mgmt.hybridcompute.models.ServiceStatuses\n :keyword cloud_metadata: The metadata of the cloud environment (Azure/GCP/AWS/OCI...).\n :paramtype cloud_metadata: ~azure.mgmt.hybridcompute.models.CloudMetadata\n :keyword os_profile: Specifies the operating system settings for the hybrid machine.\n :paramtype os_profile: ~azure.mgmt.hybridcompute.models.OSProfile\n :keyword vm_id: Specifies the hybrid machine unique ID.\n :paramtype vm_id: str\n :keyword client_public_key: Public Key that the client provides to be used during initial\n resource onboarding.\n :paramtype client_public_key: str\n :keyword os_type: The type of Operating System (windows/linux).\n :paramtype os_type: str\n :keyword extensions: Machine Extensions information.\n :paramtype extensions: list[~azure.mgmt.hybridcompute.models.MachineExtensionInstanceView]\n :keyword private_link_scope_resource_id: The resource id of the private link scope this machine\n is assigned to, if any.\n :paramtype private_link_scope_resource_id: str\n :keyword parent_cluster_resource_id: The resource id of the parent cluster (Azure HCI) this\n machine is assigned to, if any.\n :paramtype parent_cluster_resource_id: str\n :keyword mssql_discovered: Specifies whether any MS SQL instance is discovered on the machine.\n :paramtype mssql_discovered: str\n \"\"\"\n super().__init__(**kwargs)\n self.location_data = location_data\n self.agent_configuration = None\n self.service_statuses = service_statuses\n self.cloud_metadata = cloud_metadata\n self.os_profile = os_profile\n self.provisioning_state = None\n self.status = None\n self.last_status_change = None\n self.error_details = None\n self.agent_version = None\n self.vm_id = vm_id\n self.display_name = None\n self.machine_fqdn = None\n self.client_public_key = client_public_key\n self.os_name = None\n self.os_version = None\n self.os_type = os_type\n self.vm_uuid = None\n self.extensions = extensions\n self.os_sku = None\n self.domain_name = None\n self.ad_fqdn = None\n self.dns_fqdn = None\n self.private_link_scope_resource_id = private_link_scope_resource_id\n self.parent_cluster_resource_id = parent_cluster_resource_id\n self.mssql_discovered = mssql_discovered\n self.detected_properties = None\n\n\nclass MachineUpdate(ResourceUpdate):\n \"\"\"Describes a hybrid machine Update.\n\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n :ivar identity: Identity for the resource.\n :vartype identity: ~azure.mgmt.hybridcompute.models.Identity\n :ivar properties: Hybrid Compute Machine properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.MachineUpdateProperties\n \"\"\"\n\n _attribute_map = {\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n \"identity\": {\"key\": \"identity\", \"type\": \"Identity\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"MachineUpdateProperties\"},\n }\n\n def __init__(\n self,\n *,\n tags: Optional[Dict[str, str]] = None,\n identity: Optional[\"_models.Identity\"] = None,\n properties: Optional[\"_models.MachineUpdateProperties\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n :keyword identity: Identity for the resource.\n :paramtype identity: ~azure.mgmt.hybridcompute.models.Identity\n :keyword properties: Hybrid Compute Machine properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.MachineUpdateProperties\n \"\"\"\n super().__init__(tags=tags, **kwargs)\n self.identity = identity\n self.properties = properties\n\n\nclass MachineUpdateProperties(_serialization.Model):\n \"\"\"Describes the ARM updatable properties of a hybrid machine.\n\n :ivar location_data: Metadata pertaining to the geographic location of the resource.\n :vartype location_data: ~azure.mgmt.hybridcompute.models.LocationData\n :ivar os_profile: Specifies the operating system settings for the hybrid machine.\n :vartype os_profile: ~azure.mgmt.hybridcompute.models.OSProfile\n :ivar cloud_metadata: The metadata of the cloud environment (Azure/GCP/AWS/OCI...).\n :vartype cloud_metadata: ~azure.mgmt.hybridcompute.models.CloudMetadata\n :ivar parent_cluster_resource_id: The resource id of the parent cluster (Azure HCI) this\n machine is assigned to, if any.\n :vartype parent_cluster_resource_id: str\n :ivar private_link_scope_resource_id: The resource id of the private link scope this machine is\n assigned to, if any.\n :vartype private_link_scope_resource_id: str\n \"\"\"\n\n _attribute_map = {\n \"location_data\": {\"key\": \"locationData\", \"type\": \"LocationData\"},\n \"os_profile\": {\"key\": \"osProfile\", \"type\": \"OSProfile\"},\n \"cloud_metadata\": {\"key\": \"cloudMetadata\", \"type\": \"CloudMetadata\"},\n \"parent_cluster_resource_id\": {\"key\": \"parentClusterResourceId\", \"type\": \"str\"},\n \"private_link_scope_resource_id\": {\"key\": \"privateLinkScopeResourceId\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n location_data: Optional[\"_models.LocationData\"] = None,\n os_profile: Optional[\"_models.OSProfile\"] = None,\n cloud_metadata: Optional[\"_models.CloudMetadata\"] = None,\n parent_cluster_resource_id: Optional[str] = None,\n private_link_scope_resource_id: Optional[str] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword location_data: Metadata pertaining to the geographic location of the resource.\n :paramtype location_data: ~azure.mgmt.hybridcompute.models.LocationData\n :keyword os_profile: Specifies the operating system settings for the hybrid machine.\n :paramtype os_profile: ~azure.mgmt.hybridcompute.models.OSProfile\n :keyword cloud_metadata: The metadata of the cloud environment (Azure/GCP/AWS/OCI...).\n :paramtype cloud_metadata: ~azure.mgmt.hybridcompute.models.CloudMetadata\n :keyword parent_cluster_resource_id: The resource id of the parent cluster (Azure HCI) this\n machine is assigned to, if any.\n :paramtype parent_cluster_resource_id: str\n :keyword private_link_scope_resource_id: The resource id of the private link scope this machine\n is assigned to, if any.\n :paramtype private_link_scope_resource_id: str\n \"\"\"\n super().__init__(**kwargs)\n self.location_data = location_data\n self.os_profile = os_profile\n self.cloud_metadata = cloud_metadata\n self.parent_cluster_resource_id = parent_cluster_resource_id\n self.private_link_scope_resource_id = private_link_scope_resource_id\n\n\nclass OperationListResult(_serialization.Model):\n \"\"\"The List Compute Operation operation response.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: The list of compute operations.\n :vartype value: list[~azure.mgmt.hybridcompute.models.OperationValue]\n \"\"\"\n\n _validation = {\n \"value\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[OperationValue]\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.value = None\n\n\nclass OperationValue(_serialization.Model):\n \"\"\"Describes the properties of a Compute Operation value.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar origin: The origin of the compute operation.\n :vartype origin: str\n :ivar name: The name of the compute operation.\n :vartype name: str\n :ivar display: Display properties.\n :vartype display: ~azure.mgmt.hybridcompute.models.OperationValueDisplay\n :ivar is_data_action: This property indicates if the operation is an action or a data action.\n :vartype is_data_action: bool\n \"\"\"\n\n _validation = {\n \"origin\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"is_data_action\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"origin\": {\"key\": \"origin\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"display\": {\"key\": \"display\", \"type\": \"OperationValueDisplay\"},\n \"is_data_action\": {\"key\": \"isDataAction\", \"type\": \"bool\"},\n }\n\n def __init__(self, *, display: Optional[\"_models.OperationValueDisplay\"] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword display: Display properties.\n :paramtype display: ~azure.mgmt.hybridcompute.models.OperationValueDisplay\n \"\"\"\n super().__init__(**kwargs)\n self.origin = None\n self.name = None\n self.display = display\n self.is_data_action = None\n\n\nclass OperationValueDisplay(_serialization.Model):\n \"\"\"Describes the properties of a Hybrid Compute Operation Value Display.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar operation: The display name of the compute operation.\n :vartype operation: str\n :ivar resource: The display name of the resource the operation applies to.\n :vartype resource: str\n :ivar description: The description of the operation.\n :vartype description: str\n :ivar provider: The resource provider for the operation.\n :vartype provider: str\n \"\"\"\n\n _validation = {\n \"operation\": {\"readonly\": True},\n \"resource\": {\"readonly\": True},\n \"description\": {\"readonly\": True},\n \"provider\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"operation\": {\"key\": \"operation\", \"type\": \"str\"},\n \"resource\": {\"key\": \"resource\", \"type\": \"str\"},\n \"description\": {\"key\": \"description\", \"type\": \"str\"},\n \"provider\": {\"key\": \"provider\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.operation = None\n self.resource = None\n self.description = None\n self.provider = None\n\n\nclass OSProfile(_serialization.Model):\n \"\"\"Specifies the operating system settings for the hybrid machine.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar computer_name: Specifies the host OS name of the hybrid machine.\n :vartype computer_name: str\n :ivar windows_configuration: Specifies the windows configuration for update management.\n :vartype windows_configuration: ~azure.mgmt.hybridcompute.models.OSProfileWindowsConfiguration\n :ivar linux_configuration: Specifies the linux configuration for update management.\n :vartype linux_configuration: ~azure.mgmt.hybridcompute.models.OSProfileLinuxConfiguration\n \"\"\"\n\n _validation = {\n \"computer_name\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"computer_name\": {\"key\": \"computerName\", \"type\": \"str\"},\n \"windows_configuration\": {\"key\": \"windowsConfiguration\", \"type\": \"OSProfileWindowsConfiguration\"},\n \"linux_configuration\": {\"key\": \"linuxConfiguration\", \"type\": \"OSProfileLinuxConfiguration\"},\n }\n\n def __init__(\n self,\n *,\n windows_configuration: Optional[\"_models.OSProfileWindowsConfiguration\"] = None,\n linux_configuration: Optional[\"_models.OSProfileLinuxConfiguration\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword windows_configuration: Specifies the windows configuration for update management.\n :paramtype windows_configuration:\n ~azure.mgmt.hybridcompute.models.OSProfileWindowsConfiguration\n :keyword linux_configuration: Specifies the linux configuration for update management.\n :paramtype linux_configuration: ~azure.mgmt.hybridcompute.models.OSProfileLinuxConfiguration\n \"\"\"\n super().__init__(**kwargs)\n self.computer_name = None\n self.windows_configuration = windows_configuration\n self.linux_configuration = linux_configuration\n\n\nclass OSProfileLinuxConfiguration(_serialization.Model):\n \"\"\"Specifies the linux configuration for update management.\n\n :ivar assessment_mode: Specifies the assessment mode. Known values are: \"ImageDefault\" and\n \"AutomaticByPlatform\".\n :vartype assessment_mode: str or ~azure.mgmt.hybridcompute.models.AssessmentModeTypes\n :ivar patch_mode: Specifies the patch mode. Known values are: \"ImageDefault\",\n \"AutomaticByPlatform\", \"AutomaticByOS\", and \"Manual\".\n :vartype patch_mode: str or ~azure.mgmt.hybridcompute.models.PatchModeTypes\n \"\"\"\n\n _attribute_map = {\n \"assessment_mode\": {\"key\": \"patchSettings.assessmentMode\", \"type\": \"str\"},\n \"patch_mode\": {\"key\": \"patchSettings.patchMode\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n assessment_mode: Optional[Union[str, \"_models.AssessmentModeTypes\"]] = None,\n patch_mode: Optional[Union[str, \"_models.PatchModeTypes\"]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword assessment_mode: Specifies the assessment mode. Known values are: \"ImageDefault\" and\n \"AutomaticByPlatform\".\n :paramtype assessment_mode: str or ~azure.mgmt.hybridcompute.models.AssessmentModeTypes\n :keyword patch_mode: Specifies the patch mode. Known values are: \"ImageDefault\",\n \"AutomaticByPlatform\", \"AutomaticByOS\", and \"Manual\".\n :paramtype patch_mode: str or ~azure.mgmt.hybridcompute.models.PatchModeTypes\n \"\"\"\n super().__init__(**kwargs)\n self.assessment_mode = assessment_mode\n self.patch_mode = patch_mode\n\n\nclass OSProfileWindowsConfiguration(_serialization.Model):\n \"\"\"Specifies the windows configuration for update management.\n\n :ivar assessment_mode: Specifies the assessment mode. Known values are: \"ImageDefault\" and\n \"AutomaticByPlatform\".\n :vartype assessment_mode: str or ~azure.mgmt.hybridcompute.models.AssessmentModeTypes\n :ivar patch_mode: Specifies the patch mode. Known values are: \"ImageDefault\",\n \"AutomaticByPlatform\", \"AutomaticByOS\", and \"Manual\".\n :vartype patch_mode: str or ~azure.mgmt.hybridcompute.models.PatchModeTypes\n \"\"\"\n\n _attribute_map = {\n \"assessment_mode\": {\"key\": \"patchSettings.assessmentMode\", \"type\": \"str\"},\n \"patch_mode\": {\"key\": \"patchSettings.patchMode\", \"type\": \"str\"},\n }\n\n def __init__(\n self,\n *,\n assessment_mode: Optional[Union[str, \"_models.AssessmentModeTypes\"]] = None,\n patch_mode: Optional[Union[str, \"_models.PatchModeTypes\"]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword assessment_mode: Specifies the assessment mode. Known values are: \"ImageDefault\" and\n \"AutomaticByPlatform\".\n :paramtype assessment_mode: str or ~azure.mgmt.hybridcompute.models.AssessmentModeTypes\n :keyword patch_mode: Specifies the patch mode. Known values are: \"ImageDefault\",\n \"AutomaticByPlatform\", \"AutomaticByOS\", and \"Manual\".\n :paramtype patch_mode: str or ~azure.mgmt.hybridcompute.models.PatchModeTypes\n \"\"\"\n super().__init__(**kwargs)\n self.assessment_mode = assessment_mode\n self.patch_mode = patch_mode\n\n\nclass ProxyResource(Resource):\n \"\"\"The resource model definition for a Azure Resource Manager proxy resource. It will not have\n tags and a location.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n\n\nclass PrivateEndpointConnection(ProxyResource):\n \"\"\"A private endpoint connection.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar properties: Resource properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.PrivateEndpointConnectionProperties\n :ivar system_data: The system meta data relating to this resource.\n :vartype system_data: ~azure.mgmt.hybridcompute.models.SystemData\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"system_data\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"PrivateEndpointConnectionProperties\"},\n \"system_data\": {\"key\": \"systemData\", \"type\": \"SystemData\"},\n }\n\n def __init__(\n self, *, properties: Optional[\"_models.PrivateEndpointConnectionProperties\"] = None, **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword properties: Resource properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.PrivateEndpointConnectionProperties\n \"\"\"\n super().__init__(**kwargs)\n self.properties = properties\n self.system_data = None\n\n\nclass PrivateEndpointConnectionDataModel(_serialization.Model):\n \"\"\"The Data Model for a Private Endpoint Connection associated with a Private Link Scope.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: The ARM Resource Id of the Private Endpoint.\n :vartype id: str\n :ivar name: The Name of the Private Endpoint.\n :vartype name: str\n :ivar type: Azure resource type.\n :vartype type: str\n :ivar properties: The Private Endpoint Connection properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.PrivateEndpointConnectionProperties\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"PrivateEndpointConnectionProperties\"},\n }\n\n def __init__(\n self, *, properties: Optional[\"_models.PrivateEndpointConnectionProperties\"] = None, **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword properties: The Private Endpoint Connection properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.PrivateEndpointConnectionProperties\n \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.name = None\n self.type = None\n self.properties = properties\n\n\nclass PrivateEndpointConnectionListResult(_serialization.Model):\n \"\"\"A list of private endpoint connections.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: Array of results.\n :vartype value: list[~azure.mgmt.hybridcompute.models.PrivateEndpointConnection]\n :ivar next_link: Link to retrieve next page of results.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n \"value\": {\"readonly\": True},\n \"next_link\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[PrivateEndpointConnection]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.value = None\n self.next_link = None\n\n\nclass PrivateEndpointConnectionProperties(_serialization.Model):\n \"\"\"Properties of a private endpoint connection.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar private_endpoint: Private endpoint which the connection belongs to.\n :vartype private_endpoint: ~azure.mgmt.hybridcompute.models.PrivateEndpointProperty\n :ivar private_link_service_connection_state: Connection state of the private endpoint\n connection.\n :vartype private_link_service_connection_state:\n ~azure.mgmt.hybridcompute.models.PrivateLinkServiceConnectionStateProperty\n :ivar provisioning_state: State of the private endpoint connection.\n :vartype provisioning_state: str\n :ivar group_ids: List of group IDs.\n :vartype group_ids: list[str]\n \"\"\"\n\n _validation = {\n \"provisioning_state\": {\"readonly\": True},\n \"group_ids\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"private_endpoint\": {\"key\": \"privateEndpoint\", \"type\": \"PrivateEndpointProperty\"},\n \"private_link_service_connection_state\": {\n \"key\": \"privateLinkServiceConnectionState\",\n \"type\": \"PrivateLinkServiceConnectionStateProperty\",\n },\n \"provisioning_state\": {\"key\": \"provisioningState\", \"type\": \"str\"},\n \"group_ids\": {\"key\": \"groupIds\", \"type\": \"[str]\"},\n }\n\n def __init__(\n self,\n *,\n private_endpoint: Optional[\"_models.PrivateEndpointProperty\"] = None,\n private_link_service_connection_state: Optional[\"_models.PrivateLinkServiceConnectionStateProperty\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword private_endpoint: Private endpoint which the connection belongs to.\n :paramtype private_endpoint: ~azure.mgmt.hybridcompute.models.PrivateEndpointProperty\n :keyword private_link_service_connection_state: Connection state of the private endpoint\n connection.\n :paramtype private_link_service_connection_state:\n ~azure.mgmt.hybridcompute.models.PrivateLinkServiceConnectionStateProperty\n \"\"\"\n super().__init__(**kwargs)\n self.private_endpoint = private_endpoint\n self.private_link_service_connection_state = private_link_service_connection_state\n self.provisioning_state = None\n self.group_ids = None\n\n\nclass PrivateEndpointProperty(_serialization.Model):\n \"\"\"Private endpoint which the connection belongs to.\n\n :ivar id: Resource id of the private endpoint.\n :vartype id: str\n \"\"\"\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n }\n\n def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin\n \"\"\"\n :keyword id: Resource id of the private endpoint.\n :paramtype id: str\n \"\"\"\n super().__init__(**kwargs)\n self.id = id\n\n\nclass PrivateLinkResource(ProxyResource):\n \"\"\"A private link resource.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Fully qualified resource ID for the resource. Ex -\n /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.\n :vartype id: str\n :ivar name: The name of the resource.\n :vartype name: str\n :ivar type: The type of the resource. E.g. \"Microsoft.Compute/virtualMachines\" or\n \"Microsoft.Storage/storageAccounts\".\n :vartype type: str\n :ivar properties: Resource properties.\n :vartype properties: ~azure.mgmt.hybridcompute.models.PrivateLinkResourceProperties\n :ivar system_data: The system meta data relating to this resource.\n :vartype system_data: ~azure.mgmt.hybridcompute.models.SystemData\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n \"name\": {\"readonly\": True},\n \"type\": {\"readonly\": True},\n \"system_data\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"name\": {\"key\": \"name\", \"type\": \"str\"},\n \"type\": {\"key\": \"type\", \"type\": \"str\"},\n \"properties\": {\"key\": \"properties\", \"type\": \"PrivateLinkResourceProperties\"},\n \"system_data\": {\"key\": \"systemData\", \"type\": \"SystemData\"},\n }\n\n def __init__(self, *, properties: Optional[\"_models.PrivateLinkResourceProperties\"] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword properties: Resource properties.\n :paramtype properties: ~azure.mgmt.hybridcompute.models.PrivateLinkResourceProperties\n \"\"\"\n super().__init__(**kwargs)\n self.properties = properties\n self.system_data = None\n\n\nclass PrivateLinkResourceListResult(_serialization.Model):\n \"\"\"A list of private link resources.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar value: Array of results.\n :vartype value: list[~azure.mgmt.hybridcompute.models.PrivateLinkResource]\n :ivar next_link: Link to retrieve next page of results.\n :vartype next_link: str\n \"\"\"\n\n _validation = {\n \"value\": {\"readonly\": True},\n \"next_link\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"value\": {\"key\": \"value\", \"type\": \"[PrivateLinkResource]\"},\n \"next_link\": {\"key\": \"nextLink\", \"type\": \"str\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.value = None\n self.next_link = None\n\n\nclass PrivateLinkResourceProperties(_serialization.Model):\n \"\"\"Properties of a private link resource.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar group_id: The private link resource group id.\n :vartype group_id: str\n :ivar required_members: The private link resource required member names.\n :vartype required_members: list[str]\n :ivar required_zone_names: Required DNS zone names of the the private link resource.\n :vartype required_zone_names: list[str]\n \"\"\"\n\n _validation = {\n \"group_id\": {\"readonly\": True},\n \"required_members\": {\"readonly\": True},\n \"required_zone_names\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"group_id\": {\"key\": \"groupId\", \"type\": \"str\"},\n \"required_members\": {\"key\": \"requiredMembers\", \"type\": \"[str]\"},\n \"required_zone_names\": {\"key\": \"requiredZoneNames\", \"type\": \"[str]\"},\n }\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\" \"\"\"\n super().__init__(**kwargs)\n self.group_id = None\n self.required_members = None\n self.required_zone_names = None\n\n\nclass PrivateLinkScopeValidationDetails(_serialization.Model):\n \"\"\"PrivateLinkScopeValidationDetails.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n :ivar id: Azure resource Id.\n :vartype id: str\n :ivar public_network_access: Indicates whether machines associated with the private link scope\n can also use public Azure Arc service endpoints. Known values are: \"Enabled\" and \"Disabled\".\n :vartype public_network_access: str or ~azure.mgmt.hybridcompute.models.PublicNetworkAccessType\n :ivar connection_details: List of Private Endpoint Connection details.\n :vartype connection_details: list[~azure.mgmt.hybridcompute.models.ConnectionDetail]\n \"\"\"\n\n _validation = {\n \"id\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"id\": {\"key\": \"id\", \"type\": \"str\"},\n \"public_network_access\": {\"key\": \"publicNetworkAccess\", \"type\": \"str\"},\n \"connection_details\": {\"key\": \"connectionDetails\", \"type\": \"[ConnectionDetail]\"},\n }\n\n def __init__(\n self,\n *,\n public_network_access: Union[str, \"_models.PublicNetworkAccessType\"] = \"Disabled\",\n connection_details: Optional[List[\"_models.ConnectionDetail\"]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword public_network_access: Indicates whether machines associated with the private link\n scope can also use public Azure Arc service endpoints. Known values are: \"Enabled\" and\n \"Disabled\".\n :paramtype public_network_access: str or\n ~azure.mgmt.hybridcompute.models.PublicNetworkAccessType\n :keyword connection_details: List of Private Endpoint Connection details.\n :paramtype connection_details: list[~azure.mgmt.hybridcompute.models.ConnectionDetail]\n \"\"\"\n super().__init__(**kwargs)\n self.id = None\n self.public_network_access = public_network_access\n self.connection_details = connection_details\n\n\nclass PrivateLinkServiceConnectionStateProperty(_serialization.Model):\n \"\"\"State of the private endpoint connection.\n\n Variables are only populated by the server, and will be ignored when sending a request.\n\n All required parameters must be populated in order to send to Azure.\n\n :ivar status: The private link service connection status. Required.\n :vartype status: str\n :ivar description: The private link service connection description. Required.\n :vartype description: str\n :ivar actions_required: The actions required for private link service connection.\n :vartype actions_required: str\n \"\"\"\n\n _validation = {\n \"status\": {\"required\": True},\n \"description\": {\"required\": True},\n \"actions_required\": {\"readonly\": True},\n }\n\n _attribute_map = {\n \"status\": {\"key\": \"status\", \"type\": \"str\"},\n \"description\": {\"key\": \"description\", \"type\": \"str\"},\n \"actions_required\": {\"key\": \"actionsRequired\", \"type\": \"str\"},\n }\n\n def __init__(self, *, status: str, description: str, **kwargs: Any) -> None:\n \"\"\"\n :keyword status: The private link service connection status. Required.\n :paramtype status: str\n :keyword description: The private link service connection description. Required.\n :paramtype description: str\n \"\"\"\n super().__init__(**kwargs)\n self.status = status\n self.description = description\n self.actions_required = None\n\n\nclass ServiceStatus(_serialization.Model):\n \"\"\"Describes the status and behavior of a service.\n\n :ivar status: The current status of the service.\n :vartype status: str\n :ivar startup_type: The behavior of the service when the Arc-enabled machine starts up.\n :vartype startup_type: str\n \"\"\"\n\n _attribute_map = {\n \"status\": {\"key\": \"status\", \"type\": \"str\"},\n \"startup_type\": {\"key\": \"startupType\", \"type\": \"str\"},\n }\n\n def __init__(self, *, status: Optional[str] = None, startup_type: Optional[str] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword status: The current status of the service.\n :paramtype status: str\n :keyword startup_type: The behavior of the service when the Arc-enabled machine starts up.\n :paramtype startup_type: str\n \"\"\"\n super().__init__(**kwargs)\n self.status = status\n self.startup_type = startup_type\n\n\nclass ServiceStatuses(_serialization.Model):\n \"\"\"Reports the state and behavior of dependent services.\n\n :ivar extension_service: The state of the extension service on the Arc-enabled machine.\n :vartype extension_service: ~azure.mgmt.hybridcompute.models.ServiceStatus\n :ivar guest_configuration_service: The state of the guest configuration service on the\n Arc-enabled machine.\n :vartype guest_configuration_service: ~azure.mgmt.hybridcompute.models.ServiceStatus\n \"\"\"\n\n _attribute_map = {\n \"extension_service\": {\"key\": \"extensionService\", \"type\": \"ServiceStatus\"},\n \"guest_configuration_service\": {\"key\": \"guestConfigurationService\", \"type\": \"ServiceStatus\"},\n }\n\n def __init__(\n self,\n *,\n extension_service: Optional[\"_models.ServiceStatus\"] = None,\n guest_configuration_service: Optional[\"_models.ServiceStatus\"] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword extension_service: The state of the extension service on the Arc-enabled machine.\n :paramtype extension_service: ~azure.mgmt.hybridcompute.models.ServiceStatus\n :keyword guest_configuration_service: The state of the guest configuration service on the\n Arc-enabled machine.\n :paramtype guest_configuration_service: ~azure.mgmt.hybridcompute.models.ServiceStatus\n \"\"\"\n super().__init__(**kwargs)\n self.extension_service = extension_service\n self.guest_configuration_service = guest_configuration_service\n\n\nclass SystemData(_serialization.Model):\n \"\"\"Metadata pertaining to creation and last modification of the resource.\n\n :ivar created_by: The identity that created the resource.\n :vartype created_by: str\n :ivar created_by_type: The type of identity that created the resource. Known values are:\n \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :vartype created_by_type: str or ~azure.mgmt.hybridcompute.models.CreatedByType\n :ivar created_at: The timestamp of resource creation (UTC).\n :vartype created_at: ~datetime.datetime\n :ivar last_modified_by: The identity that last modified the resource.\n :vartype last_modified_by: str\n :ivar last_modified_by_type: The type of identity that last modified the resource. Known values\n are: \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :vartype last_modified_by_type: str or ~azure.mgmt.hybridcompute.models.CreatedByType\n :ivar last_modified_at: The timestamp of resource last modification (UTC).\n :vartype last_modified_at: ~datetime.datetime\n \"\"\"\n\n _attribute_map = {\n \"created_by\": {\"key\": \"createdBy\", \"type\": \"str\"},\n \"created_by_type\": {\"key\": \"createdByType\", \"type\": \"str\"},\n \"created_at\": {\"key\": \"createdAt\", \"type\": \"iso-8601\"},\n \"last_modified_by\": {\"key\": \"lastModifiedBy\", \"type\": \"str\"},\n \"last_modified_by_type\": {\"key\": \"lastModifiedByType\", \"type\": \"str\"},\n \"last_modified_at\": {\"key\": \"lastModifiedAt\", \"type\": \"iso-8601\"},\n }\n\n def __init__(\n self,\n *,\n created_by: Optional[str] = None,\n created_by_type: Optional[Union[str, \"_models.CreatedByType\"]] = None,\n created_at: Optional[datetime.datetime] = None,\n last_modified_by: Optional[str] = None,\n last_modified_by_type: Optional[Union[str, \"_models.CreatedByType\"]] = None,\n last_modified_at: Optional[datetime.datetime] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"\n :keyword created_by: The identity that created the resource.\n :paramtype created_by: str\n :keyword created_by_type: The type of identity that created the resource. Known values are:\n \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :paramtype created_by_type: str or ~azure.mgmt.hybridcompute.models.CreatedByType\n :keyword created_at: The timestamp of resource creation (UTC).\n :paramtype created_at: ~datetime.datetime\n :keyword last_modified_by: The identity that last modified the resource.\n :paramtype last_modified_by: str\n :keyword last_modified_by_type: The type of identity that last modified the resource. Known\n values are: \"User\", \"Application\", \"ManagedIdentity\", and \"Key\".\n :paramtype last_modified_by_type: str or ~azure.mgmt.hybridcompute.models.CreatedByType\n :keyword last_modified_at: The timestamp of resource last modification (UTC).\n :paramtype last_modified_at: ~datetime.datetime\n \"\"\"\n super().__init__(**kwargs)\n self.created_by = created_by\n self.created_by_type = created_by_type\n self.created_at = created_at\n self.last_modified_by = last_modified_by\n self.last_modified_by_type = last_modified_by_type\n self.last_modified_at = last_modified_at\n\n\nclass TagsResource(_serialization.Model):\n \"\"\"A container holding only the Tags for a resource, allowing the user to update the tags on a\n PrivateLinkScope instance.\n\n :ivar tags: Resource tags.\n :vartype tags: dict[str, str]\n \"\"\"\n\n _attribute_map = {\n \"tags\": {\"key\": \"tags\", \"type\": \"{str}\"},\n }\n\n def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None:\n \"\"\"\n :keyword tags: Resource tags.\n :paramtype tags: dict[str, str]\n \"\"\"\n super().__init__(**kwargs)\n self.tags = tags\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/hybridcompute/azure-mgmt-hybridcompute/azure/mgmt/hybridcompute/models/_models_py3.py","file_name":"_models_py3.py","file_ext":"py","file_size_in_byte":91248,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"5515617683","text":"import os\nimport sys\nimport unittest\nimport ConfigParser\n\nsys.path.append('.')\nimport bleachbit.Options\n\n\nclass OptionsTestCase(unittest.TestCase):\n\n \"\"\"Test case for class Options\"\"\"\n\n def test_Options(self):\n \"\"\"Unit test for class Options\"\"\"\n o = bleachbit.Options.options\n value = o.get(\"check_online_updates\")\n\n # toggle a boolean\n o.toggle('check_online_updates')\n self.assertEqual(not value, o.get(\"check_online_updates\"))\n\n # restore original boolean\n o.set(\"check_online_updates\", value)\n self.assertEqual(value, o.get(\"check_online_updates\"))\n\n # test auto commit\n shred = o.get(\"shred\")\n o.set(\"shred\", False)\n self.assertFalse(o.get(\"shred\"))\n o.set(\"shred\", True, commit=False)\n self.assertTrue(o.get(\"shred\"))\n o.restore()\n self.assertFalse(o.get(\"shred\"))\n o.set(\"shred\", shred)\n self.assertEqual(o.get(\"shred\"), shred)\n\n # try a list\n list_values = ['a', 'b', 'c']\n o.set_list(\"list_test\", list_values)\n self.assertEqual(list_values, o.get_list(\"list_test\"))\n\n # whitelist\n self.assert_(type(o.get_whitelist_paths() is list))\n whitelist = [('file', '/home/foo'), ('folder', '/home')]\n old_whitelist = o.get_whitelist_paths()\n o.config.remove_section('whitelist/paths')\n self.assert_(type(o.get_whitelist_paths() is list))\n self.assertEqual(o.get_whitelist_paths(), [])\n o.set_whitelist_paths(whitelist)\n self.assert_(type(o.get_whitelist_paths() is list))\n self.assertEqual(set(whitelist), set(o.get_whitelist_paths()))\n o.set_whitelist_paths(old_whitelist)\n self.assertEqual(set(old_whitelist), set(o.get_whitelist_paths()))\n\n # these should always be set\n for bkey in bleachbit.Options.boolean_keys:\n self.assert_(isinstance(o.get(bkey), bool))\n\n # language\n value = o.get_language('en')\n self.assert_(isinstance(value, bool))\n o.set_language('en', True)\n self.assertTrue(o.get_language('en'))\n o.set_language('en', False)\n self.assertFalse(o.get_language('en'))\n o.set_language('en', value)\n\n # tree\n o.set_tree(\"parent\", \"child\", True)\n self.assertTrue(o.get_tree(\"parent\", \"child\"))\n o.set_tree(\"parent\", \"child\", False)\n self.assertFalse(o.get_tree(\"parent\", \"child\"))\n o.config.remove_option(\"tree\", \"parent.child\")\n self.assertFalse(o.get_tree(\"parent\", \"child\"))\n\n def test_purge(self):\n \"\"\"Test purging\"\"\"\n # By default ConfigParser stores keys (the filenames) as lowercase.\n # This needs special consideration when combined with purging.\n o1 = bleachbit.Options.Options()\n import tempfile\n dirname = tempfile.mkdtemp('bleachbit_test_options')\n pathname = os.path.join(dirname, 'foo.xml')\n file(pathname, 'w').write('') # make an empty file\n self.assertTrue(os.path.exists(pathname))\n myhash = '0ABCD'\n o1.set_hashpath(pathname, myhash)\n self.assertEqual(myhash, o1.get_hashpath(pathname))\n if 'nt' == os.name:\n # check case sensitivity\n self.assertEqual(myhash, o1.get_hashpath(pathname.upper()))\n del o1\n\n # reopen\n o2 = bleachbit.Options.Options()\n # write something, which triggers the purge\n o2.set('dummypath', 'dummyvalue', 'hashpath')\n # verify the path was not purged\n self.assertTrue(os.path.exists(pathname))\n self.assertEqual(myhash, o2.get_hashpath(pathname))\n\n # delete the path\n os.remove(pathname)\n # close and reopen\n del o2\n o3 = bleachbit.Options.Options()\n # write something, which triggers the purge\n o3.set('dummypath', 'dummyvalue', 'hashpath')\n # verify the path was purged\n self.assertRaises(\n ConfigParser.NoOptionError, lambda: o3.get_hashpath(pathname))\n\n # clean up\n os.rmdir(dirname)\n\n\ndef suite():\n return unittest.makeSuite(OptionsTestCase)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"theatre-x/bleachbit","sub_path":"tests/TestOptions.py","file_name":"TestOptions.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"11926952982","text":"from sys import stdin\r\nfrom collections import deque\r\ndef dfs(u,num):\r\n\tglobal vis, scc,G\r\n\tvis[u] = 1\r\n\tscc[u] = num\r\n\tfor v in G[u]:\r\n\t\tif(vis[v] == 0):\r\n\t\t\tdfs(v,num)\r\n\treturn\r\n\r\ndef dfs_list(u):\r\n\tglobal L, vis, I,G\r\n\tvis[u] = 1\r\n\tfor v in I[u]:\r\n\t\tif(vis[v] == 0):\r\n\t\t\tdfs_list(v)\r\n\tL.append(u)\r\n\treturn\r\n\r\n\r\ndef compute():\r\n\tglobal L,I, scc, vis,G\r\n\tn = len(G)\r\n\tscc = [-1 for i in range(n)]\r\n\r\n\tI = [[] for i in range(n)]\r\n\tfor i in range(n):\r\n\t\tfor j in G[i]:\r\n\t\t\tI[j].append(i)\r\n\tvis = [0 for i in range(n)]\r\n\tL = []\r\n\tfor i in range(n):\r\n\t\tif(vis[i] == 0):\r\n\t\t\tdfs_list(i)\r\n\tvis = [0 for i in range(n)]\r\n\tcont = 0\r\n\twhile(len(L)):\r\n\t\ti = L.pop()\r\n\t\tif(vis[i] == 0):\r\n\t\t\tdfs(i,cont)\r\n\t\t\tcont +=\t1\r\n\tprint(cont)\r\n\treturn \r\n\r\n\r\ndef main():\r\n\tglobal G\t\r\n\tline = stdin.readline().strip().split()\r\n\twhile int(line[0])!=0 or int(line[1])!= 0:\t\t\t\r\n\t\tp = int(line[0])\r\n\t\tt = int(line[1])\r\n\t\tG = [[] for _ in range(p)]\t\t\r\n\t\taux={}\r\n\t\tfor i in range(p):\r\n\t\t\tperson = stdin.readline().strip(\"\\n\")\r\n\t\t\taux[person] = i\t\t\t\r\n\t\tfor i in range(t):\t\t\t\r\n\t\t\tq = stdin.readline().strip(\"\\n\")\t\t\t\r\n\t\t\tG[aux[q]].append(aux[stdin.readline().strip(\"\\n\")])\t\t\t\t\t\t\r\n\t\tcompute()\r\n\t\tline = stdin.readline().strip().split()\r\n\treturn\r\nmain()\r\n\r\n","repo_name":"Juanma1909/AGRA","sub_path":"trust.py","file_name":"trust.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3794741419","text":"import os\nos.system(\"clear\")\n# # You have a list of Disney characters and you want to find out if each of them contain i, o, or u in\n# # their names. Loop through each character in the list and print out the following:\n\n# If the name contains a \"u,\" print out the name plus \"U are so Uniquely U!\"\n# Otherwise if the name contains an \"i,\" print out the name plus \"I bet you're\n# Impressively Intelligent!\"\n# Otherwise if the name contains an \"o,\" print out the name plus \"O My! How\n# Original!\"\n# Otherwise, print the name plus \"Ehh, a's and e's are so ordinary.\"\n\ndisney_characters = [\"simba\", \"ariel\", \"pumba\", \"flounder\", \"nala\", \"ursula\",\n\"scar\", \"flotsam\", \"timon\"]\n\nfor Name in disney_characters:\n print(\"\\n\")\n if \"u\" in Name.lower():\n print(\"U are so Uniquely U!\")\n elif \"i\" in Name.lower():\n print(Name,\"\\nI bet you're Impressively Intelligent!\")\n elif \"o\" in Name.lower():\n print(\"O My! How Original!\")\n else:\n print(Name,\"\\nEhh, a's and e's are so ordinary.\")\n\n\n\n","repo_name":"mikestrain/PythonGA","sub_path":"CLASS6 - Dictionaries and More/HW2/challenge_problem1.py","file_name":"challenge_problem1.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16022303132","text":"import math\nfrom numpy import array\n\ndef calculate_angle(point1, point2, invert=-1):\n if point2[0] - point1[0] == 0: # Prevent didivison by zero\n if point2[1] - point1[1] > 0:\n return 90\n else:\n return -90\n else:\n # Add a minus to the y difference as the y coordinates are inverted in pygame\n return math.degrees(math.atan2(invert*(point2[1] - point1[1]), (point2[0] - point1[0])))\n\ndef calculate_angle_diff(org_angle, food_angle):\n food_angle %= 360\n angle_difference = org_angle - food_angle\n \n if angle_difference > 180: \n angle_difference -= 360\n if angle_difference < -180:\n angle_difference += 360\n \n return angle_difference\n\ndef calculate_distance(point1, point2):\n return math.sqrt((point2[0] - point1[0])**2 + (point2[1]-point1[1])**2)\n\ndef inbox(p, box):\n return all(box[:,0] <= p) and all(p<= box[:,1])\n\n# KdTree for optimizations\nclass KdTree:\n def __init__(self, P, d=0):\n n = len(P)\n m = n // 2\n P.sort(key = lambda x: x[d])\n self.point = P[m]\n self.d = d\n d = (d + 1) % len(P[0])-1 # -1 because then the last element will not be a dimension (wanted since last ele is info obj)\n self.left = self.right = None\n if m > 0 :\n self.left = KdTree(P[:m], d)\n if n - (m+1) > 0:\n self.right = KdTree(P[m+1:], d)\n\n def range_search(self, box):\n p = self.point\n if inbox(p[:2], box): # Only first two elements of p (x, y) 3rd is info object\n yield p\n min, max = box[self.d]\n split = p[self.d]\n if self.left is not None and split >= min:\n yield from self.left.range_search(box)\n if self.right is not None and split <= max:\n yield from self.right.range_search(box)","repo_name":"johanvandongen/evolution_simulation","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"40702843253","text":"\nfrom time import sleep, time\nimport os\nimport sys\n# sys.path.append(\"../\")\nsys.path.append(\"./\")\nprint(sys.path)\nfrom tqdm import tqdm\nfrom shutil import copyfile\nfrom setting import config\nimport shutil\n\n\n\n# 生成文件夹CRC-HE-FID\n# 用于FID值评价\n# 使用的两个文件夹CRC-HE/TRAIN,CRC-HE—TRAIN\n\n\nCRC_HE=os.path.join(config['dataset_path'],'train')\nCRC_HE_TRAIN=os.path.join(config['server_path'],'CRC-HE-TRAIN')\n\n# 要复制到的目标文件夹\ntarget=os.path.join(config['server_path'],'CRC-HE-FID')\n\n\n# CRC-HE/TRAIN 复制\n\ntarget_CRC_HE=os.path.join(target,'CRC-HE')\n\nif not os.path.exists(target_CRC_HE):\n os.makedirs(target_CRC_HE)\n\nfolders=os.listdir(CRC_HE) # 目录下的各种文件夹类\nfor fold in folders:\n imgs=os.listdir(os.path.join(CRC_HE,fold))\n for img in tqdm(imgs):\n copyfile(os.path.join(CRC_HE,fold,img),os.path.join(target_CRC_HE,img))\n \n\n# CRC-HE-TRAIN \n\ntarget_CRC_HE_TRAIN=os.path.join(target,'CRC-HE-TRAIN')\n\nif not os.path.exists(target_CRC_HE_TRAIN):\n os.makedirs(target_CRC_HE_TRAIN)\n\nfolders=os.listdir(CRC_HE_TRAIN) # 目录下的各种文件夹类\nfor fold in folders:\n imgs=os.listdir(os.path.join(CRC_HE_TRAIN,fold))\n for img in tqdm(imgs):\n copyfile(os.path.join(CRC_HE_TRAIN,fold,img),os.path.join(target_CRC_HE_TRAIN,img))","repo_name":"mrzhu666/classification-models-pytorch","sub_path":"dataprocessing/generateFID.py","file_name":"generateFID.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26782227876","text":"from collections import deque\nimport random\n\n\nclass ListNode:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass Solution:\n def minimal_tree(self, lst):\n if len(lst) == 0:\n return None\n elif len(lst) == 1:\n return TreeNode(lst[0])\n else:\n midpt = len(lst)//2\n node = TreeNode(lst[midpt])\n node.left = self.minimal_tree(lst[:midpt])\n node.right = self.minimal_tree(lst[midpt+1:])\n return node\n\n def inorder_traversal(self, node):\n if node:\n self.inorder_traversal(node.left)\n print(node.val)\n self.inorder_traversal(node.right)\n\n def list_of_depths(self, node):\n q = deque()\n q.append(node)\n final_list = []\n while q:\n ptr = ListNode(-1)\n head = ptr\n for i in range(len(q)):\n node = q.popleft()\n list_node = ListNode(node.val)\n ptr.next = list_node\n ptr = ptr.next\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n final_list.append(head.next)\n return final_list\n\n def successor(self, root, val):\n node = self.find_in_bst(root, val)\n if node.right:\n temp = node.right\n while temp.left:\n temp = temp.left\n return temp\n else:\n ancestor = root\n successor = None\n while ancestor.val != node.val:\n if ancestor.val > node.val:\n successor = ancestor\n ancestor = ancestor.left\n else:\n ancestor = ancestor.right\n return successor\n\n def find_in_bst(self, root, val):\n if root.val == val:\n return root\n elif root.val > val:\n return self.find_in_bst(root.left, val)\n else:\n return self.find_in_bst(root.right, val)\n\n def weave_lists(self, first, second, prefix, results):\n if not first or not second:\n results.append(prefix + first + second)\n return\n first_head = first[1:]\n first_prefix = first[0]\n self.weave_lists(first_head, second, prefix + [first_prefix], results)\n second_head = second[1:]\n second_prefix = second[0]\n self.weave_lists(first, second_head, prefix + [second_prefix], results)\n\n def bst_sequences(self, root):\n if not root:\n return [[]]\n sol = []\n prefix = [root.val]\n left = self.bst_sequences(root.left)\n right = self.bst_sequences(root.right)\n for i in range(len(left)):\n for j in range(len(right)):\n weaved = []\n self.weave_lists(left[i], right[j], prefix, weaved)\n sol += weaved\n return sol\n\n def insert(self, root, x):\n if not root:\n root = TreeNode(x)\n else:\n if root.val < x:\n if not root.right:\n root.right = TreeNode(x)\n else:\n self.insert(root.right, x)\n else:\n if not root.left:\n root.left = TreeNode(x)\n else:\n self.insert(root.left, x)\n\n def find(self, root, x):\n if not root:\n return None\n else:\n if root.val == x:\n return root\n elif root.val > x:\n return self.find(root.left, x)\n else:\n return self.find(root.right, x)\n\n def delete_deepest(self, root):\n temp = root\n while temp.right.right:\n temp = temp.right\n temp.right = None\n\n def delete(self, root, x):\n del_node = self.find(root, x)\n temp = root\n while temp.right:\n temp = temp.right\n value = temp.val\n del_node.val = value\n self.delete_deepest(root)\n return root\n\n def build_inorder_traversal(self, root):\n if not root:\n return []\n elif not root.right and not root.left:\n return [root.val]\n else:\n return self.build_inorder_traversal(root.left) + [root.val] + self.build_inorder_traversal(root.right)\n\n def get_random_node(self, root):\n lst = self.build_inorder_traversal(root)\n index = random.randrange(0, len(lst)-1)\n return lst[index]\n\n\nclass Graph:\n def __init__(self, length):\n self.length = length\n self.adj = []\n for i in range(length):\n self.adj.append([])\n\n def add_edge(self, n1, n2):\n self.adj[n1].append(n2)\n\n def remove_edge(self, n1, n2):\n for i in range(len(self.adj[n1])):\n if self.adj[i] == n2:\n break\n del self.adj[n1][i]\n\n def route_between_nodes(self, n1, n2):\n n1_visited = [False] * self.length\n n2_visited = [False] * self.length\n d1, d2 = deque(), deque()\n d1.append(n1)\n d2.append(n2)\n n1_visited[n1] = True\n n2_visited[n2] = True\n while d1 and d2:\n node1 = d1.popleft()\n node2 = d2.popleft()\n if node1 == n2 or node2 == n1:\n return True\n for n in self.adj[node1]:\n if not n1_visited[n]:\n n1_visited[n] = True\n d1.append(n)\n for n in self.adj[node2]:\n if not n2_visited[n]:\n n2_visited[n] = True\n d2.append(n)\n for i in range(self.length):\n if n1_visited[i] and n2_visited[i]:\n return True\n return False\n\n def build_order(self, dependencies):\n self.add_dependencies(dependencies)\n indegree = self.get_indegree()\n build_list = []\n built = [False] * self.length\n while len(build_list) < self.length:\n poss = False\n added = 0\n i = 0\n while i < len(indegree) and not poss:\n if indegree[i] == 0 and not built[i]:\n added = i\n poss = True\n i += 1\n if not poss:\n return []\n build_list.append(added)\n built[added] = True\n for n in self.adj[added]:\n indegree[n] -= 1\n self.adj[added].clear()\n return build_list\n\n def add_dependencies(self, dependencies):\n for dep in dependencies:\n self.add_edge(dep[0], dep[1])\n\n def get_indegree(self):\n indegree = [0] * self.length\n for i in range(self.length):\n for n in self.adj[i]:\n indegree[n] += 1\n return indegree\n\n def bfs(self):\n visited = [False] * self.length\n q = deque()\n q.append(0)\n while q:\n node = q.popleft()\n print(node)\n for n in self.adj[node]:\n if not visited[n]:\n visited[n] = True\n q.append(n)\n\n\n\n\n\n\n","repo_name":"ksingh23/CTCIPractice","sub_path":"treesgraphs.py","file_name":"treesgraphs.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22735020594","text":"\"\"\"\nExport a series of tagged images to a specified directory, from a timed video capture\n\n:return: list of tagged filenames\n\n>>> e = RPS()\n>>> e.start_capture()\nStarting capture...\n\"\"\"\n\nimport cv2\n\nclass RPS:\n # Area to which images will be cropped\n CROP = ((355,100), (600,400), (300, 0, 0)) \n\n def __init__(self, webcam=0):\n self.images = []\n self.webcam = webcam\n self.dest_folder = './testdata/'\n\n def start_training(self):\n cap = cv2.VideoCapture(self.webcam)\n print(\"Starting capture...\")\n \n while True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame = cv2.rectangle(frame, *self.CROP, 2)\n\n cropped = frame[\n self.CROP[0][1] + 2 : self.CROP[1][1] - 1,\n self.CROP[0][0] + 2 : self.CROP[1][0] - 1\n ]\n\n cv2.imshow(\"img\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()","repo_name":"Perrogramador/rps-ai","sub_path":"training_generator.py","file_name":"training_generator.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14994604839","text":"from utils_tree_sitter import Parser, JAVA_LANGUAGE\n\ndef extract_functions(file_path):\n # Setup Parse-c\n parser = Parser()\n parser.set_language(JAVA_LANGUAGE)\n \n with open(file_path, \"r\") as file:\n source_code = file.read()\n \n tree = parser.parse(bytes(source_code, \"utf-8\"))\n root_node = tree.root_node\n \n source_code = source_code.split(\"\\n\")\n \n \n function_bodies = {}\n function_names = []\n \n for child_node in root_node.children:\n \n \n \n if child_node.type == \"interface_declaration\":\n class_node = child_node\n \n # class_declaration = \n \n # Class \n for index in range(class_node.child_count):\n node = class_node.children[index]\n \n \n\n\n \n # return function_bodies, function_names\n\n\ndef test():\n test_file = \"DataSet/CommitsCollection/Java/hyperledger_besu/a9bdd29128eca70d1922b282aa2b479ab492b563/ethereum_api_src_main_java_org_hyperledger_besu_ethereum_api_jsonrpc_internal_methods_JsonRpcMethod.java\"\n function_bodies, function_names = extract_functions(test_file)\n \n print(function_bodies, function_names)\n \ntest()","repo_name":"fe1w0/VulnInfoSearch","sub_path":"Programs/utils/handle_file_factory/parse_java_file_new.py","file_name":"parse_java_file_new.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"20830376790","text":"# encoding: utf-8\n\nfrom unittest import TestCase\n\nfrom marrow.util.patterns import Borg\n\n\n\nclass TestBorg(TestCase):\n def test_borg(self):\n a = Borg()\n b = Borg()\n \n assert a._dict is b._dict, \"I am Hue.\"\n assert a is not b, \"too alike\"\n \n a.foo = 1\n \n self.assertEqual(a.foo, b.foo)\n","repo_name":"marrow/util","sub_path":"tests/test_patterns.py","file_name":"test_patterns.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"39348447082","text":"from icalendar import Calendar, Event\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport datetime\n\nimport sys\n\nif not sys.warnoptions:\n import warnings\n\n# very hacky regex to recognise the date or timerange fields\n# feel free to improve\ndatereg = re.compile(r\"[\\w\\s]+[\\d]+\\.[\\d]+\\.[\\d]+\")\ntimereges = re.compile(r\"[\\d]+:[\\d\\s]+-[\\d\\s]+:[\\d\\s]+\")\n\n\nclass course():\n def __init__(self, url, f):\n # fetching HTML\n fp = urllib.request.urlopen(url)\n htmldoc = fp.read().decode(\"utf8\")\n fp.close()\n # parsing the HTML\n s = BeautifulSoup(htmldoc, 'html.parser')\n tables = self.findTable(s)\n self.cal = Calendar()\n self.events = []\n\n for table in tables:\n self.parseWeek(table)\n\n self.write(f)\n\n def findTable(self, s):\n return(s.find_all(\"table\", class_=\"tab-cco-program\"))\n\n def asUNIX(self, date, time=None, start=True, defaultStart=\"09:00\",\n defaulEnd=\"17:00\"):\n '''function to transform our text into unix timestamps'''\n d = date.split(\" \")[1]\n\n # if a time was given\n if time is None and start:\n d = \"{} {}\".format(d, defaultStart)\n elif time is None and start is False:\n d = \"{} {}\".format(d, defaulEnd)\n else:\n d = \"{} {}\".format(d, time)\n\n # check if the date is 2018 or 18 only:\n Yreg = re.compile(r\"20[0-9][0-9]\")\n if Yreg.search(d) is not None:\n us = datetime.datetime.strptime(d, \"%d.%m.%Y %H:%M\")\n else:\n us = datetime.datetime.strptime(d, \"%d.%m.%y %H:%M\")\n\n return(us)\n\n def parseWeek(self, table):\n '''this function parses a table of a week'''\n\n # basically scan the trs and try to make sense of them\n rows = table.find_all(\"tr\")\n prevdate = False # do we have a date yet?\n for row in rows:\n # first field (td) gives time range\n # if its a date we submit a whole day event\n # if second field is empty\n fields = row.find_all(\"td\")\n f1 = fields[0].get_text(strip=True)\n f2 = fields[1].get_text(strip=True)\n\n # match our time and date regex\n dm = datereg.match(f1)\n tm = timereges.match(f1)\n if dm:\n prevdate = f1\n # this is a date field\n if f2 == \"\":\n continue\n else:\n # whole day event, presumingly:\n eventName = f2\n eventStart = self.asUNIX(date=prevdate)\n eventEnd = self.asUNIX(date=prevdate, start=False)\n elif tm:\n # figuring the date ranges\n startend = f1.split(\" - \")\n eventStart = self.asUNIX(date=prevdate, time=startend[0])\n eventEnd = self.asUNIX(date=prevdate, time=startend[1])\n eventName = f2\n else:\n # here we do custom handlers for dates\n # examples are:\n # Afternoon\n # 13:30 onwards\n # From 17:00\n eventName = f2\n # defining afternoon:\n if f1.lower() == \"afternoon\":\n eventStart = self.asUNIX(date=prevdate, time=\"12:00\")\n eventEnd = self.asUNIX(date=prevdate, time=\"17:00\")\n else:\n singleTimeReg = re.compile(r\"[\\d]+:[\\d]+\")\n m = singleTimeReg.search(f1)\n if m:\n # we have a date:\n tm = m.group(0)\n directionalityReg = re.compile(r\"onwards|From\")\n if directionalityReg.search(f1):\n eventStart = self.asUNIX(date=prevdate, time=tm)\n eventEnd = self.asUNIX(date=prevdate, start=False)\n else:\n warnings.warn(\"We dont know what time this\\\n is: {}\".format(f1))\n\n # save the event\n event = Event()\n event.add(\"dtstart\", eventStart)\n event.add(\"dtend\", eventEnd)\n event.add(\"summary\", eventName)\n self.cal.add_component(event)\n\n def ical(self):\n a = self.cal.to_ical()\n b = a.decode('utf-8').replace(\"\\r\\n\", \"\\n\")\n return b\n\n def write(self, f):\n with open(f, \"w\") as file:\n file.write(self.ical())\n\n\n# starting the class\nurl = \"https://www.embl.de/predoccourse/2018/schedule/index.html\"\n\nif len(sys.argv) > 1:\n lc = sys.argv[1]\nelse:\n lc = \"Predoc_Course_2018.ics\"\ncs = course(url,\n lc)\n","repo_name":"openpaul/emblphdcal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33867494072","text":"from django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom django.template import Context\nimport json\n\ndef mainpage(request):\n if request.method == 'GET':\n t = get_template('test.html')\n html=t.render(Context({}))\n return HttpResponse(html)\n else:\n colours = range(9)\n data = request.POST.get('data')\n adjancencyList = json.loads(data)\n colouredVertex = [-1]*len(adjancencyList)\n for i in range(len(adjancencyList)):\n vertex = adjancencyList[i]\n choosedColours = []\n for adjacent in vertex:\n #a = 10\n choosedColours.append(colouredVertex[adjacent])\n remainingColours = []\n for colour in colours:\n if colour not in choosedColours:\n remainingColours.append(colour)\n colouredVertex[i]=remainingColours[0]\n\t\t\n return HttpResponse(json.dumps(colouredVertex))\n","repo_name":"prabeesh/GraphColouring-Javascript-Canvas-Django","sub_path":"graph/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5539117646","text":"# -*- coding: utf-8 -*-\n#\n# monasca-api documentation build configuration file, created by\n# sphinx-quickstart on Wed Nov 18 12:02:03 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\nsys.path = [\n os.path.abspath('../..'),\n os.path.abspath('../../bin')\n] + sys.path\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.6'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.coverage',\n 'sphinx.ext.ifconfig',\n 'sphinx.ext.graphviz',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'oslo_config.sphinxconfiggen',\n 'oslo_config.sphinxext',\n 'openstackdocstheme',\n]\n\n# geeneral information about project\nopenstackdocs_repo_name = u'openstack/monasca-api'\nopenstackdocs_pdf_link = True\nopenstackdocs_use_storyboard = True\ncopyright = u'2014-present, OpenStack Foundation'\nauthor = u'OpenStack Foundation'\n\n# sample config\nconfig_generator_config_file = [\n ('config-generator/monasca-api.conf', '_static/monasca-api')\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\nsource_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n 'common',\n 'doc',\n 'documentation',\n 'etc',\n 'java'\n]\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\nadd_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = True\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'native'\n\n# A list of ignored prefixes for module index sorting.\nmodindex_common_prefix = ['monasca_api.', 'monasca']\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'openstackdocs'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# doc. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If false, no module index is generated.\nhtml_use_modindex = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'MonitoringApiDoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'doc-monasca-api.tex', u'Monasca Documentation',\n [author], 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\nlatex_domain_indices = False\n\nlatex_elements = {\n 'makeindex': '',\n 'printindex': '',\n 'preamble': r'\\setcounter{tocdepth}{3}',\n}\n\n# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664\nlatex_use_xindy = False\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'monitoringapi', u'Monasca Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://doc.python.org/': None}\n","repo_name":"openstack/monasca-api","sub_path":"doc/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"54"} +{"seq_id":"28124521610","text":"#!/usr/bin/env python\nimport random\nimport re\nimport argparse\nimport math\nimport os\nimport signal\nimport readline\nseed_bytes = 2048\nrandom.seed(os.urandom(seed_bytes))\n\n# Add CLI flag parser\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-v\", \"--debug\", help=\"Enable debug mode\", action=\"store_true\")\nparser.add_argument(\"-i\", \"--interactive\", help=\"Enable interactive mode, return to the DiceCode entry on finish\", action=\"store_true\")\nparser.add_argument(\"-sr\", \"--statroll\", help=\"Do a stat roll (4d6r1k3)\", action=\"store_true\")\nparser.add_argument(\"-d\", \"--dicecode\", help=\"Specify dicecode at launch (Example=dice.py --dicecode 4d20+69\")\nparser.add_argument(\"-q\", \"--quiet\", help=\"Don't display the induvidual dice rolls, only the total.\", action=\"store_true\")\nargs = parser.parse_args()\n# STAT ROLL 4d6r1k3\nif args.statroll == True:\n\tfor x in [\"Strength\", \"Dexterity\", \"Constitution\", \"Intelligence\", \"Wisdom\", \"Charisma\"]:\n\t\td = []\n\t\td.append(random.randint(2, 6)) \n\t\td.append(random.randint(2, 6)) \n\t\td.append(random.randint(2, 6)) \n\t\td.append(random.randint(2, 6)) \n\t\tif args.debug == True:\n\t\t\tprint('Rolls: \\n' + str(d))\n\t\td.remove(min(d))\n\t\tdsum = int(sum(d)) \n\t\tmod = math.floor((dsum - 10) / 2)\n\t\tprint(x + ': ' + str(sum(d)) + '\\nMod: ' + str(mod) + '\\n')\n\t\t\n\texit()\ndef RollTheDice():\n\tif args.dicecode:\n\t\tdicecode = args.dicecode\n\telse:\n\t\tdicecode = input('Enter dice code: ')\n\t\n\t# DEBUG STUFFz\n\tif args.debug == True:\n\t\tprint('DiceCode before alteration: ' + dicecode)\n\t\n\t# Check if first character is a number, if not, prefix dicecode with \"1\" to only roll one dice\n\tif dicecode[0].isdigit() == False : \n\t\tdicecode = '1' + dicecode\n\t\n\t# DEBUG STUFFz\n\tif args.debug == True:\n\t\tprint('DiceCode after alteration: ' + dicecode)\n\t\n\tsnumdice = re.match(r'^\\d+', dicecode)\n\t\n\t# DEBUG STUFFz\n\tif args.debug == True:\n\t\tprint('Number of dice to roll: '+snumdice.group(0))\n\t\n\t# Get dice value to roll\n\tsdiceval = re.search('(?<=d)\\d+', dicecode)\n\t\n\t# Covert to int\n\tdiceval = int(sdiceval.group())\n\tnumdice = int(snumdice.group())\n\t\n\t# DEBUG STUFFz\n\tif args.debug == True:\n\t\tprint('Dice value to roll: '+sdiceval.group())\n\t\n\t# Reset dice array\n\tdicearray = []\n\tfor x in range(0, numdice):\n\t\tdicearray.append(random.randint(1, diceval))\n\t\n\tif 'r' in dicecode:\n\t\tsdicereroll = re.search('(?<=r)\\d+', dicecode)\n\t\tdicereroll = int(sdicereroll.group())\n\t\tif dicereroll >= diceval:\n\t\t\tprint('r cant >= d value, wtf dude?')\n\t\t\treturn;\n\t\n\t\tif args.debug == True:\n\t\t\tprint('DiceArray before rerolls: ' + str(dicearray))\n\t\t\tprint('Reroll all rolls that equal/is lower than: ' + sdicereroll.group())\n\t\n\t\twhile min(dicearray) <= dicereroll:\n\t\t\tdicearray.remove(min(dicearray))\n\t\t\tdicearray.append(random.randint(dicereroll+1, diceval))\n\t\tif args.debug == True:\n\t\t\tprint('DiceArray after rerolling: ' + str(dicearray))\t\n\telse:\t\n\t\tif args.debug == True:\n\t\t\tprint('r not specified, skipping...')\n\tif 'k' in dicecode:\n\t\tsdicekeep = re.search('(?<=k)\\d+', dicecode)\n\t\tdicekeep = int(sdicekeep.group())\n\t\tif dicekeep >= numdice:\n\t\t\tprint('k cant >= ammount of dice to roll, wtf dude?')\n\t\t\treturn;\n\t\n\t\tif args.debug == True:\n\t\t\tprint('Keep this many dice after roll: ' + sdicekeep.group())\n\t\twhile len(dicearray) > dicekeep:\n\t\t\tif args.debug == True:\n\t\t\t\tprint('DiceArray before removing all but the '+str(dicekeep)+' highest dice: '+str(dicearray))\n\t\t\t\tprint('Array length: ' + str(len(dicearray)))\n\t\t\tdicearray.remove(min(dicearray))\n\t\n\t\tif args.debug == True:\n\t\t\tprint('After keeping highest ' + str(dicekeep) + ': ' + str(dicearray))\n\telse:\t\n\t\tif args.debug == True:\n\t\t\tprint('k not specified, skipping...')\n\t\t\n\tif '+' in dicecode:\n\t\tsdiceplus = re.search('(?<=\\+)\\d+', dicecode)\n\t\tdiceplus = int(sdiceplus.group())\n\t\tprint('Modifier: +' + sdiceplus.group())\t\n\t\tif args.quiet != True:\n\t\t\tprint('Dice Rolls: '+ str(dicearray))\t\n\t\tprint('Total: ' + str(sum(dicearray) + diceplus) + '\\n')\n\telif '-' in dicecode:\n\t\tsdiceminus = re.search('(?<=\\-)\\d+', dicecode)\n\t\tdiceminus = int(sdiceminus.group())\n\t\tprint('Modifier: -' + sdiceminus.group())\t\n\t\tif args.quiet != True:\n\t\t\tprint('Dice Rolls: '+ str(dicearray))\t\n\t\tprint('Total: ' + str(sum(dicearray) - diceminus) + '\\n')\n\telse:\n\t\tif args.quiet != True:\n\t\t\tprint('Dice Rolls: '+ str(dicearray))\t\n\t\tprint('Total: ' + str(sum(dicearray)) + '\\n')\n\treturn;\ntry:\n\tif args.interactive:\n\t\twhile (True):\n\t\t\tRollTheDice()\n\telse:\n\t\tRollTheDice()\nexcept KeyboardInterrupt:\n\tprint(\"\\nBye!\")\nfinally:\n\texit()\n","repo_name":"Total-Ecl1ps3/DiceRoller","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74270100000","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom mppi_train.trainer import Trainer\nfrom rosbot_model import RosbotModel\n\n# model_3kre9wom\n\ndef save_control_to_file(control, name):\n \"\"\"\n Args:\n control (tensor shape [1,N,2]): selected control\n name (string): output file name (or path + name)\n \"\"\"\n with open(name, 'w') as f:\n f.write(\"t x yaw \\n\")\n t = 0.0\n for item in control.detach().cpu().numpy()[0]:\n f.write(str(t) + \" \" + str(item[0]) + \" \" + str(item[1]) + \"\\n\")\n t = t + 0.03\n\ndef save_state_to_file(state, name):\n \"\"\"\n Args:\n state (tensor shape [1,N,4]): selected control\n name (string): output file name (or path + name)\n \"\"\"\n with open(name, 'w') as f:\n f.write(\"t x y yaw v \\n\")\n t = 0.0\n for item in state.detach().cpu().numpy()[0]:\n f.write(\n str(t) + \" \" +\n str(item[0]) + \" \" +\n str(item[1]) + \" \" +\n str(item[2]) + \" \" + \n str(item[3]) + \"\\n\"\n )\n t = t + 0.03\n\n\ndef predict_by_control(control, model, trainer):\n \"\"\"\n Args:\n control: torch.tensor of shape [1, time, 2]\n [1, time, U_v, U_w]\n model: torch.nn.Module \n Return:\n result_xya_v: torch.tensor of shape [batch, time, 4]\n [batch, time, x, y, yaw, lin_velocity]\n\n \"\"\"\n # (torch.tensor of shape [batch sizes, robot_state])\n init_state = torch.zeros([1,5])\n # Integrate control\n batch_dt = control = torch.ones([1, 999, 1]) * 0.033 # shape [batch_size, time, 2]\n rollout_size = 10\n print(\"init_state shape = {}\".format(init_state.shape))\n print(\"control shape = {}\".format(control.shape))\n print(\"batch_dt shape = {}\".format(batch_dt.shape))\n result_xya_vw = trainer.predict_multi_step(\n model,\n init_state, \n control[:, 0:-1, :], \n batch_dt, \n rollout_size\n )\n return result_xya_vw\n\n\ndef loss_for_square(result_xya_v):\n \"\"\"\n Args:\n result_xya_v: torch.tensor of shape [batch, time, 4]\n [batch, time, x, y, yaw, lin_velocity]\n Return:\n loss: torch.tensor of shape [1]\n \"\"\"\n x = result_xya_v[0, :, 0]\n y = result_xya_v[0, :, 1]\n v = result_xya_v[0, :, 3]\n # First goal: [1, 0]\n loss_x_1 = (x[250] - 1) ** 2\n loss_y_1 = y[250] ** 2\n # Second goal: [1, 1]\n loss_x_2 = (x[500] - 1) ** 2\n loss_y_2 = (y[500] - 1) ** 2\n # Third goal: [0, 1]\n loss_x_3 = x[750] ** 2\n loss_y_3 = (y[750] - 1) ** 2\n # Last goal\n loss_x_4 = x[999] ** 2\n loss_y_4 = y[999] ** 2\n # Summary loss\n traj_loss = (torch.sqrt(loss_x_1 + loss_y_1) + torch.sqrt(loss_x_2 + loss_y_2) +\n torch.sqrt(loss_x_3 + loss_y_3) + torch.sqrt(loss_x_4 + loss_y_4))\n loss = traj_loss + torch.abs(v).sum() * 0.03 * 0.1 # 0.1 default\n return loss\n\n\ndef loss_for_obstacle(result_xya_v, obstacle_boundaries=None):\n \"\"\"\n Args:\n result_xya_v: torch.tensor of shape [batch, time, 4]\n obstacle_boundaries (list of lists): obstacle boundaries\n Return:\n loss (torch.tensor shape=[1]): loss for current run\n Note:\n obstacle_boundaries example \n [[lower left corner coords], [upper right corner coords]] = [[0,1], [2,2]] \n \"\"\"\n x = result_xya_v[0, :, 0]\n y = result_xya_v[0, :, 1]\n\n left_cor = obstacle_boundaries[0]\n right_cor = obstacle_boundaries[1]\n\n # hinge loss \n residual = torch.cat([\n (x - left_cor[0])[None],\n (right_cor[0] - x)[None],\n (y - left_cor[0])[None],\n (right_cor[1] - y)[None]\n ], 0)\n\n loss = torch.min(residual, 0).values\n loss = torch.clamp(loss, 0).sum()\n return loss\n\n\ndef loss_for_goal(result_xya_v, goal):\n \"\"\"\n Args:\n result_xya_v: torch.tensor of shape [batch, time, 4]\n goal (list of 2 elements): coord of main goal \n Return:\n goal_loss: torch.tensor of shape [1]\n \"\"\"\n x = result_xya_v[0, :, 0]\n y = result_xya_v[0, :, 1]\n loss_x_goal = (x[999] - goal[0]) ** 2\n loss_y_goal = (y[999] - goal[1]) ** 2\n goal_loss = (torch.sqrt(loss_x_goal + loss_y_goal))\n return goal_loss\n\n\ndef calc_complex_loss(result_xya_v, goal, control, obstacle_boundaries=None):\n \"\"\"\n Args:\n result_xya_v: torch.tensor of shape [batch, time, 4]\n goal (list of 2 elements): coord of main goal \n control: torch.tensor of shape [1, time, 2]\n obstacle_boundaries (list of lists): obstacle boundaries\n Return:\n loss (torch.tensor): complex loss for goal, obstales and velocities\n \"\"\"\n v = result_xya_v[0, :, 3]\n loss = torch.tensor(0.0)\n loss += (loss_for_goal(result_xya_v, goal) * 5)\n loss += (torch.abs(v).sum() * 0.03 * 0.1) # loss for speed\n if obstacle_boundaries is not None:\n for obstacle in obstacle_boundaries:\n loss += (loss_for_obstacle(result_xya_v, obstacle) * 20)\n return loss\n\ndef draw_plots(result_xya_v, obsctacles, i):\n \"\"\"\n\n \"\"\"\n # Plot x, y\n fig, ax = plt.subplots()\n ax.plot(\n result_xya_v[0, :, 0].cpu().detach(),\n result_xya_v[0, :, 1].cpu().detach(),\n label='Predict'\n )\n ax.annotate(\n 'Control', xy=(0, 1), xytext=(12, -12),\n va='top', xycoords='axes fraction',\n textcoords='offset points',\n bbox=dict(facecolor='none', edgecolor='black')\n )\n ax.legend(loc=\"lower right\")\n ax.set_xlabel('X (m)')\n ax.set_ylabel('Y (m)')\n ax.set_aspect(\"equal\")\n if obsctacles is not None:\n for obs in obsctacles:\n # Create a Rectangle patch\n left_cor = obs[0]\n right_cor = obs[1]\n w = right_cor[0] - left_cor[0]\n h = right_cor[1] - left_cor[1]\n rect = patches.Rectangle(\n left_cor, w, h,\n linewidth=1,\n edgecolor='r',\n facecolor='none'\n )\n # Add the patch to the Axes\n ax.add_patch(rect)\n plt.savefig(f'plots/optimized_{i}.png')\n\n\ndef main():\n \"\"\"\n Create goa, obstacles, initial control and state\n try to optimize control for obstacles avoidance\n \"\"\"\n trainer = Trainer()\n\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n model = RosbotModel( \n n_layers=4,\n hidden_size=128,\n activation_function='relu',\n learning_rate=0.005,\n model_type='semilinear'\n )\n\n state_dict = torch.load('model_3kre9wom.pt')\n model.requires_grad_(False)\n model.load_state_dict(state_dict)\n # model = model.to(device)\n\n # Declare goal\n goal = [2.5, 5.0]\n # Declare obsctacle\n # for each obstacle declare: [[lower left corner coords], [upper right corner coords]]\n obsctacles = [[[0,1], [2,2]], [[3,1],[4,4]]] \n obsctacles = None\n # Create initial control\n control = torch.zeros([1, 1000, 2]) # shape [batch_size, time, 2]\n # Create initial state\n # result_xya_v = torch.zeros([1, 1000, 5])\n # result_xya_v = predict_by_control(control, model, trainer)\n # Optimize control for moving by set goal, and obstacles\n\n\n \"\"\"\n\n TEST\n\n batch_x.shape = torch.Size([1, 1809, 5])\n batch_u.shape = torch.Size([1, 1809, 2])\n batch_dt.shape = torch.Size([1, 1808, 1])\n \"\"\"\n\n init_state = torch.zeros([1, 1000, 5])\n control = torch.zeros([1, 1000, 2])\n batch_dt = torch.ones([1, 1000, 1]) * 0.033\n rollout = 1000\n\n print(\"init_state.shape = {}\".format(init_state.shape))\n print(\"control.shape = {}\".format(control.shape))\n print(\"batch_dt.shape = {}\".format(batch_dt.shape))\n\n\n predicted_traj = trainer.predict_multi_step(\n model,\n init_state[:, 0, :],\n control[:, 0:-1, :],\n batch_dt,\n rollout\n )\n\n control = control.clone().detach().requires_grad_(True)\n opt = torch.optim.Adam([control], lr=0.003)\n for i in range(1001):\n # result_xya_v = predict_by_control(control, model, trainer)\n # loss = calc_complex_loss(result_xya_v, goal, control, obsctacles)\n result_xya_v = trainer.predict_multi_step(\n model,\n init_state[:, 0, :],\n control[:, 0:-1, :],\n batch_dt,\n rollout\n )\n loss = loss_for_square(result_xya_v)\n loss.backward()\n opt.step()\n opt.zero_grad()\n\n if i % 10 == 0:\n print(i, loss.item())\n if i % 100 == 0:\n draw_plots(result_xya_v, obsctacles, i)\n\n save_control_to_file(control, \"cotrol_square.txt\")\n # save_state_to_file(result_xya_v, \"result_xya_v.txt\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"FastSense/robot_model_training","sub_path":"examples/gz-rosbot/gradient_descent_control.py","file_name":"gradient_descent_control.py","file_ext":"py","file_size_in_byte":8893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32269531918","text":"# %%\nimport torch \nfrom torch import nn\nfrom d2l import torch as d2l\n\nimport sys\nsys.path.append('..')\nfrom utils.load_fshnmnist import load_data\nfrom utils.device import device, try_gpu\n\n\n# %%\n# model definition\nalexnet = nn.Sequential(\n\n nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n\n nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2),\n\n nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),\n nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),\n\n nn.MaxPool2d(kernel_size=3, stride=2), \n nn.Flatten(),\n\n nn.Linear(6400, 4096), nn.ReLU(), \n nn.Dropout(p=0.5),\n\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(p=0.5),\n\n nn.Linear(4096, 10)\n)\n\n\n# %%\n\nx = torch.randn(size=(1, 1, 224, 224))\nfor layer in alexnet:\n x = layer(x)\n print(layer.__class__.__name__,'output shape:\\t', x.shape)\n\n \n# %%\ntotal_params = 0\nfor p in alexnet.parameters():\n total_params += p.numel()\n print(total_params, p.shape)\n\n# %%\nbatch_size = 128\ntrain_iter, test_iter = load_data(batch_size, resize=224)\nlen(train_iter), next(iter(train_iter))[0].shape\n\n\n# %%\n# training\n\nlr = 0.09\nnum_epochs = 10\n\n\n# %%\nd2l.train_ch6(alexnet, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())\n\n\n# %%\nweights = [(name, param.shape) for name, param in alexnet.named_parameters()]\nfor w in weights:\n print(w)\n# %%\nprint(alexnet)\n\n# %%\n","repo_name":"lihansen/dl.py","sub_path":"cnn/AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1353671266","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nimport statsmodels.api as sm\nimport joblib\nfrom sklearn import set_config\nfrom config import *\n\nset_config(transform_output=\"pandas\")\n\n\nclass FeatureTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Class for creating custom fit and transform method for feature transformation.\n Inherits from the BaseEstimator and TransformerMixin classes of scikit-learn.\n\n Attributes\n ----------\n\n numeric_imputer: The imputer to use for missing numeric values.\n\n Methods\n ---------\n\n fit: Get params of training data to use on the test dataset.\n\n Transform: perform the necessary transformations on the data.\"\"\"\n\n def __init__(self):\n \"\"\"Initialization method for the class.\"\"\"\n\n self.numeric_imputer = SimpleImputer(strategy=\"mean\")\n\n def fit(self, X: pd.DataFrame) -> \"FeatureTransformer\":\n \"\"\"Method to learn the parameters of the training dataset.\n\n Parameters:\n\n X: Pandas dataframe of the data to fit.\n\n Returns: Self.\"\"\"\n\n for i in model_numeric_columns:\n X[i] = (\n X[i]\n .str.replace(\",\", \"\", regex=True)\n .str.extract(r\"(\\d+)\")\n .astype(float)\n )\n\n self.numeric_imputer.fit(X[model_numeric_columns])\n return self\n\n def transform(self, X: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Method to transform the dataset based on learned parameters and additional transformation.\n\n Parameters:\n\n X: Pandas dataframe of data to transform.\n\n Returns: Transformed Pandas Dataframe.\"\"\"\n\n if pd.api.types.is_numeric_dtype(X[runtime_column]) == False:\n X[runtime_column] = (\n X[runtime_column]\n .str.replace(\",\", \"\", regex=True)\n .str.extract(r\"(\\d+)\")\n .astype(float)\n )\n else:\n pass\n\n if pd.api.types.is_numeric_dtype(X[imdb_votes_column]) == False:\n X[imdb_votes_column] = (\n X[imdb_votes_column]\n .str.replace(\",\", \"\", regex=True)\n .str.extract(r\"(\\d+)\")\n .astype(float)\n )\n else:\n pass\n\n if pd.api.types.is_numeric_dtype(X[metascore_column]) == False:\n X[metascore_column] = (\n X[metascore_column]\n .str.replace(\",\", \"\", regex=True)\n .str.extract(r\"(\\d+)\")\n .astype(float)\n )\n else:\n pass\n\n X[model_numeric_columns] = self.numeric_imputer.transform(\n X[model_numeric_columns]\n )\n\n X[awards_column] = X[awards_column].str.lower()\n X[awards_column] = X[awards_column].fillna(award_status_reference_column)\n X[top_genre_column] = X[genre_column].str.split(\",\").str[0].str.lower()\n\n return X\n\n\nclass CustomPreprocessor:\n \"\"\"Class for creating a model preprocessing pipeline and fitting and transforming data.\n\n Attributes\n ----------\n\n oscar_win: value for indicating oscar win.\n\n oscar_nom: Value for indicating oscar nomination.\n\n award_true: Value if the movie did win an award.\n\n nom_true: Value if the movie was nominated for an award.\n\n no_award: Value if the movie did not win an award.\n\n preprocessor: Use column transformer to create execution plan of preprocessing steps.\n\n Methods\n -------\n\n fit: Get params of training data to use on the test dataset.\n\n Transform: perform the necessary transformations on the data.\"\"\"\n\n def __init__(\n self,\n preprocessor=None,\n ):\n \"\"\"Method to initialize the class.\"\"\"\n\n self.preprocessor = preprocessor\n self.oscar_win = oscar_win_value\n self.oscar_nom = oscar_nom_value\n self.award_true = award_win_value\n self.nom_true = award_nom_value\n self.no_award = award_no_award_value\n self.action_adventure = action_adv_value\n self.drama_thriller = drama_thriller_value\n self.comedy = comedy_value\n self.horror = horror_value\n self.animation = animation_value\n self.other_genre = other_genre_value\n\n def fit(self, initial_df: pd.DataFrame) -> None:\n \"\"\"Method that creates the transformation pipeline and leverages the\n FeatureTransformer class.\"\"\"\n\n self.preprocessor = ColumnTransformer(\n transformers=[\n (\n \"preprocessing\",\n FeatureTransformer(),\n model_all_columns,\n ),\n ],\n verbose_feature_names_out=False,\n )\n\n self.preprocessor.fit(initial_df)\n\n return self\n\n def transform(self, new_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Method to transform the dataset based on learned parameters and additional\n transformation.\n\n Parameters:\n\n X: Pandas dataframe of data to transform.\n\n Returns: Transformed Pandas Dataframe.\"\"\"\n\n X_new = new_data.copy()\n\n X = self.preprocessor.transform(X_new)\n X[created_award_status_column] = np.where(\n X[awards_column].str.contains(oscar_win_regex, regex=True),\n self.oscar_win,\n np.where(\n X[awards_column].str.contains(oscar_nom_regex, regex=True),\n self.oscar_nom,\n np.where(\n X[awards_column].str.contains(win_text_identifier) == True,\n self.award_true,\n np.where(\n X[awards_column].str.contains(nom_text_identifier) == True,\n self.nom_true,\n self.no_award,\n ),\n ),\n ),\n ) # Create award status column\n\n X[created_genre_column] = np.where(\n X[top_genre_column].str.contains(\n f\"{action_value}|{adventure_value}\", regex=True\n ),\n self.action_adventure,\n np.where(\n X[top_genre_column].str.contains(\n f\"{drama_value}|{thriller_value}\", regex=True\n ),\n self.drama_thriller,\n np.where(\n X[top_genre_column].str.contains(comedy_value),\n self.comedy,\n np.where(\n X[top_genre_column].str.contains(horror_value) == True,\n self.horror,\n np.where(\n X[top_genre_column].str.contains(animation_value) == True,\n self.animation,\n self.other_genre,\n ),\n ),\n ),\n ),\n ) # Create Grouped Genre column\n\n X = sm.add_constant(X)\n X = pd.get_dummies(\n X, columns=[created_award_status_column, created_genre_column]\n )\n\n X.columns = [col.title() for col in X.columns]\n\n missing_columns = list(set(all_columns) - set(X.columns))\n\n if len(missing_columns) == 0:\n pass\n else:\n X.loc[:, missing_columns] = 0\n\n X.drop(\n [\n award_status_reference_column,\n awards_column,\n genre_reference_column,\n genre_column,\n top_genre_column,\n ],\n axis=1,\n inplace=True,\n )\n\n return X\n\n def save(self, filepath: str) -> None:\n \"\"\"Method to save down the preprocessor attribute.\n\n Parameters:\n\n filepath: string of the filepath to save down.\n\n Returns: None, saves down object.\"\"\"\n\n joblib.dump(self.preprocessor, filepath)\n\n def load(self, filepath: str) -> None:\n \"\"\"Method to read the saved preprocessor attribute.\n\n Parameters:\n\n filepath: string of the filepath to save down.\n\n Returns: None, Saves down as preprocessor attribute\"\"\"\n self.preprocessor = joblib.load(filepath)\n","repo_name":"nick12221/DashApp","sub_path":"model_preprocessing.py","file_name":"model_preprocessing.py","file_ext":"py","file_size_in_byte":8218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31408833859","text":"import requests\nfrom twilio.rest import Client\n\nOWM_Endpoint = \"https://api.openweathermap.org/data/2.5/onecall\"\napi_key = \"天氣的api key\"\n\naccount_sid = \"\"\nauth_token = \"\"\n\nparameters = {\n \"lat\": 35.689487,\n \"lon\": 139.691711,\n \"appid\": api_key,\n \"exclude\": \"current,minutely,daily\" # 排除某些資料\n}\n\nresponse = requests.get(OWM_Endpoint, params=parameters)\nresponse.raise_for_status()\nweather_data = response.json()\n# print(weather_data[\"hourly\"][0]['weather'][0]['id']) # ['weather']是list所以要用[0]取值\nweather_data_12h = weather_data[\"hourly\"][:12]\n\nwill_rain = False\n\nfor h in weather_data_12h:\n # print(h['weather'][0]['id'])\n condition_code = h['weather'][0]['id']\n if condition_code < 700:\n # print(\"Bring an umbrella.\")\n will_rain = True\n break\n\nif will_rain:\n # print(\"Bring an umbrella.\")\n\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body=\"It's going to rain today. Remember to bring an ☂️\",\n from_='+13802071751',\n to='+817043277580'\n )\n print(message.sid)","repo_name":"eclairsameal/Udemy_python100day","sub_path":"Day-35/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5259123462","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.views.generic import DetailView\n\nfrom cases.models import Case\nfrom services.models import ServiceArticle\n\n\nclass ServicesDetailView(DetailView):\n model = ServiceArticle\n\n def get(self, request, slug):\n article = ServiceArticle.objects.get(slug=slug)\n cases = Case.objects.all()\n return render(\n request=request,\n template_name='services/service.html',\n context={\n 'article': article,\n 'cases': cases,\n }\n )\n\n","repo_name":"BondarenkoOleksandr/ArtAds","sub_path":"src/services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20689813508","text":"\n\"\"\"Soy un profe, y me llega una lista con las notas de mis alumnos de este curso.\n\nQuiero imprimir una lista con el equivalente de cada nota, estas siendo:\n\nMenos de 4 (4 incluido), suspenso; 5 aprobado; 6 bien; 7 y 8 notable; 9 y 10 sobresaliente. (Asume que la lista siempre contiene números del 0 al 10).\n\nEjemplo:\n\nEntrada: [8, 4, 2, 9, 6, 7, 5, 0]\n\nSalida: [notable, suspenso, suspenso, sobresaliente, bien, notable, aprobado, suspenso]\"\"\"\n\n\nfrom tabulate import tabulate\n\n\nclass Curso:\n\n def __init__(self):\n\n self.curso = []\n\n self.otra = \"Y\"\n\n self.alumno = {\"ID\": None,\n \"Nombre\": None,\n \"Nota\": None,\n \"Calificación\": None}\n\n pass\n\n def introducir_datos(self):\n\n self.alumno = {\"ID\": int(input(\"Introduce el identificador del alumno: \")),\n \"Nombre\": str(input(\"Introduce un alumno: \")),\n \"Nota\": int(input(\"Introduce una nota: \")),\n \"Calificación\": calcular_nota()}\n\n self.curso.append(self.alumno)\n\n otra = input(\n \"¿Quieres introducir otra entrada en el registro? (Y/N): \").upper()\n\n while otra != \"N\" and otra != \"Y\":\n otra = input(\"Respuesta incorrecta. Introduce Y o N: \").upper()\n\n def calcular_nota(self):\n\n for nota in self.alumno:\n\n if nota <= 4:\n nota: \"Suspenso\"\n\n elif nota == 5:\n nota: \"Aprobado\"\n\n elif nota == 6:\n nota: \"Bien\"\n\n elif nota == 7 or nota == 8:\n nota: \"Notable\"\n\n elif nota == 9 or nota == 10:\n nota: \"Sobresaliente\"\n\n\nprint(tabulate(self.curso, headers=[\"Identificador\",\n \"Nombre\", \"Nota\", \"Calificación\"]))\n\n\npregunta = input(\"¿Quieres modificar algún elemento de la tabla?(Y/N \")\n\nwhile pregunta == \"Y\":\n\n \"\"\"Planteamiento para buscar los datos que queremos modificar\"\"\"\n\n while pregunta != \"N\" and pregunta != \"Y\":\n pregunta = input(\"Respuesta incorrecta. Introduce Y o N: \").upper()\n","repo_name":"LaraRabano/Gestor-de-notas","sub_path":"anteriores/gestor_4.py","file_name":"gestor_4.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73839457753","text":"from pywr_editor.form import (\n BooleanWidget,\n FieldConfig,\n FormSection,\n StoragePickerWidget,\n)\n\nfrom ..parameter_dialog_form import ParameterDialogForm\n\n\nclass StorageParameterSection(FormSection):\n def __init__(self, form: ParameterDialogForm, section_data: dict):\n \"\"\"\n Initialises the form section for a StorageParameter.\n :param form: The parent form.\n :param section_data: A dictionary containing data to pass to the widget.\n \"\"\"\n super().__init__(form, section_data)\n self.form: ParameterDialogForm\n\n self.add_fields(\n {\n \"Configuration\": [\n FieldConfig(\n name=\"storage_node\",\n field_type=StoragePickerWidget,\n value=self.form.field_value(\"storage_node\"),\n help_text=\"This parameter returns the storage from the \"\n \"node specified above\",\n ),\n FieldConfig(\n name=\"use_proportional_volume\",\n field_type=BooleanWidget,\n default_value=False,\n value=self.form.field_value(\"use_proportional_volume\"),\n help_text=\"If Yes the storage is returned as proportional \"\n \"volume (between 0 and 1)\",\n ),\n ],\n \"Miscellaneous\": [self.form.comment],\n }\n )\n","repo_name":"pywr-editor/editor","sub_path":"pywr_editor/dialogs/parameters/sections/storage_parameter_section.py","file_name":"storage_parameter_section.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"5"} +{"seq_id":"7083336207","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.utils import flt\n\ndef execute(filters=None):\n columns = get_columns()\n \n data = get_data(filters)\n \n message = \"Based on Sales Orders\"\n chart = get_chart(filters, data)\n \n return columns, data, message, chart\n\n \ndef get_columns():\n columns = [\n {\"label\": _(\"Year\"), \"fieldname\": \"year\", \"fieldtype\": \"Int\", \"width\": 60},\n {\"label\": _(\"Net amount\"), \"fieldname\": \"net_amount\", \"fieldtype\": \"Currency\", \"width\": 160},\n {\"label\": _(\"Customer\"), \"fieldname\": \"customer\", \"fieldtype\": \"Link\", \"options\": \"Customer\", \"width\": 80},\n {\"label\": _(\"Customer Name\"), \"fieldname\": \"customer_name\", \"fieldtype\": \"Data\", \"width\": 100},\n {\"label\": _(\"Comparison\"), \"fieldname\": \"comparison\", \"fieldtype\": \"Percent\", \"width\": 160}\n ]\n return columns\n \ndef get_data(filters):\n sql_query = \"\"\"\n SELECT\n YEAR(`transaction_date`) AS `year`,\n SUM(`base_net_total`) AS `net_amount`,\n `customer` AS `customer`,\n `customer_name` AS `customer_name`\n FROM `tabSales Order`\n WHERE `customer` = '{customer}'\n AND `docstatus` = 1\n AND `company` = \"{company}\"\n GROUP BY `year`\n ORDER BY `year` DESC\n \"\"\".format(customer=filters.customer, company=filters.company)\n \n data = frappe.db.sql(sql_query, as_dict=True)\n \n for i in range(0, (len(data) - 1)):\n data[i]['comparison'] = 100 * flt(data[i]['net_amount']) / flt(data[i+1]['net_amount'])\n \n return data\n\ndef get_chart(filters, data):\n datasets = []\n values = []\n labels = []\n for i in range(len(data), 0, -1):\n labels.append(\"{0}\".format(data[i-1]['year']))\n values.append(data[i-1]['net_amount'])\n \n datasets = [{\n 'name': [frappe.get_value(\"Customer\", filters.customer, \"customer_name\")],\n 'values': values\n }]\n \n chart = {\n 'data': {\n 'labels': labels,\n 'datasets': datasets,\n },\n 'type': \"line\"\n }\n return chart\n","repo_name":"libracore/hb","sub_path":"heimbohrtechnik/heim_bohrtechnik/report/customer_sales_trend/customer_sales_trend.py","file_name":"customer_sales_trend.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"9667758994","text":"from __future__ import absolute_import\n\ntry:\n import httplib\nexcept ImportError:\n # for python3\n import http.client as httplib\n\nimport logging\n\nfrom six import iteritems\n\n\ndef singleton(cls, *args, **kw):\n instances = {}\n\n def _singleton():\n if cls not in instances:\n instances[cls] = cls(*args, **kw)\n return instances[cls]\n\n return _singleton\n\n\nclass Settings(object):\n def __init__(self):\n self.__logger_file = None\n self.__debug = False\n self.__logger_format = None\n\n # Default Base url\n self.host = \"https://api-west1.attune.co\"\n # Default api client\n self.api_client = None\n # Temp file folder for downloading files\n self.temp_folder_path = None\n\n # Authentication Settings\n # access token for OAuth\n self.access_token = \"\"\n\n # Logging Settings\n self.logger = {\n \"package_logger\": logging.getLogger(\"swagger_client\"),\n \"urllib3_logger\": logging.getLogger(\"requests.packages.urllib3\"),\n \"attune_logger\": logging.getLogger(\"attune\"),\n }\n # Log format\n self.logger_format = '%(asctime)s %(levelname)s %(message)s'\n # Log stream handler\n self.logger_stream_handler = None\n # Log file handler\n self.logger_file_handler = None\n # Debug file location\n self.logger_file = None\n # Debug switch\n self.debug = False\n\n # SSL/TLS verification\n # Set this to false to skip verifying SSL certificate when calling API from https server.\n self.verify_ssl = True\n\n # HTTP Pool settings\n self.http_pool_connections = 200\n self.http_pool_size = 200\n self.http_max_retries = 3\n self.http_timeout_read = 5\n self.http_timeout_connect = 5\n\n self.commands_fallback = False\n\n # Thread pool executor workers\n self.threadpool_workers_default = 50\n self.threadpool_workers = {\n 'getauthtoken': 100,\n 'bind': 100,\n 'createanonymous': 200,\n 'boundcustomer': 200,\n 'getrankingsget': 200,\n 'getrankingspost': 200\n }\n\n self.circuit_breaker_default = (5, 60)\n self.circuit_breaker = {\n 'getauthtoken': (5, 15),\n 'bind': (5, 15),\n 'createanonymous': (5, 15),\n 'boundcustomer': (5, 15),\n 'getrankingsget': (5, 30),\n 'getrankingspost': (5, 30)\n }\n\n @property\n def logger_file(self):\n \"\"\"\n Gets the logger_file.\n \"\"\"\n return self.__logger_file\n\n @logger_file.setter\n def logger_file(self, value):\n \"\"\"\n Sets the logger_file.\n\n If the logger_file is None, then add stream handler and remove file handler.\n Otherwise, add file handler and remove stream handler.\n\n :param value: The logger_file path.\n :type: str\n \"\"\"\n self.__logger_file = value\n if self.__logger_file:\n # If set logging file,\n # then add file handler and remove stream handler.\n self.logger_file_handler = logging.FileHandler(self.__logger_file)\n self.logger_file_handler.setFormatter(self.logger_formatter)\n for _, logger in iteritems(self.logger):\n logger.addHandler(self.logger_file_handler)\n if self.logger_stream_handler:\n logger.removeHandler(self.logger_stream_handler)\n else:\n # If not set logging file,\n # then add stream handler and remove file handler.\n self.logger_stream_handler = logging.StreamHandler()\n self.logger_stream_handler.setFormatter(self.logger_formatter)\n for _, logger in iteritems(self.logger):\n logger.addHandler(self.logger_stream_handler)\n if self.logger_file_handler:\n logger.removeHandler(self.logger_file_handler)\n\n @property\n def debug(self):\n \"\"\"\n Gets the debug status.\n \"\"\"\n return self.__debug\n\n @debug.setter\n def debug(self, value):\n \"\"\"\n Sets the debug status.\n\n :param value: The debug status, True or False.\n :type: bool\n \"\"\"\n self.__debug = value\n if self.__debug:\n # if debug status is True, turn on debug logging\n for _, logger in iteritems(self.logger):\n logger.setLevel(logging.DEBUG)\n # turn on httplib debug\n httplib.HTTPConnection.debuglevel = 1\n else:\n # if debug status is False, turn off debug logging,\n # setting log level to default `logging.WARNING`\n for _, logger in iteritems(self.logger):\n logger.setLevel(logging.ERROR)\n\n # self.logger['hystrix_logger'].setLevel(logging.CRITICAL)\n # turn off httplib debug\n httplib.HTTPConnection.debuglevel = 0\n\n @property\n def logger_format(self):\n \"\"\"\n Gets the logger_format.\n \"\"\"\n return self.__logger_format\n\n @logger_format.setter\n def logger_format(self, value):\n \"\"\"\n Sets the logger_format.\n\n The logger_formatter will be updated when sets logger_format.\n\n :param value: The format string.\n :type: str\n \"\"\"\n self.__logger_format = value\n self.logger_formatter = logging.Formatter(self.__logger_format)\n\n\n@singleton\ndef Configuration():\n return Settings()\n","repo_name":"attune-api/attune-python","sub_path":"attune/client/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43503186930","text":"import pandas as pd\nimport numpy as np\nimport datetime\n\n\"\"\"\nAuthor: Jeff Chin\n\"\"\"\n\n\ndef save_csv(prob, sim, file_name='output.csv',\n traj='traj', phase='phase',\n x_name='time', x_units='s', y_name='', y_units=''):\n ''' Save Time History to CSV file for convenient portabilitiy\n\n prob is the problem\n sim is the explicit simulation \n\n optionally take in case recorder prob/sim\n\n y_name should be an array of variables\n y_units should be an array of corresponding units (not yet implemented)\n\n\n Usage:\n\n from boring.util.save_csv import save_csv\n save_csv(prob, sim, 'output.csv', \n y_name = ['h', 'r', 'CAS', 'SOC', 'T_motor'], \n y_units= ['m', 'km','m/s', None, 'degC' ])\n\n '''\n print(f'Saving CSV {file_name}...')\n\n varDict = {}\n\n try:\n t = prob.get_val(f'{traj}.{phase}.timeseries.time')\n print('Implicit Save')\n except:\n print('Fail Implicit Save')\n print(f'{traj}.{phase}.timeseries.time')\n\n try:\n t_s = sim.get_val(f'{traj}.{phase}.timeseries.time')\n print('Explicit Save')\n except:\n print('Fail Explicit Save')\n\n varDict.update({'t': np.concatenate(t).ravel().tolist()})\n\n for name in y_name:\n d = {}\n d2 = {} # TMS system has a different size\n try:\n y = prob.get_val(f'{traj}.{phase}.timeseries.{name}')\n d[f\"{name}\"] = np.concatenate(y).ravel().tolist()\n print(f'Saving problem: {name} ...')\n varDict.update(d)\n except:\n print(f'Unable to save: {name} ...')\n\n df = pd.DataFrame(varDict)\n df = df.set_index(['t'])\n df.index = pd.to_datetime(df.index, unit='s')\n df = df[~df.index.duplicated()] # remove duplicate timestamps\n df = df.resample('1s').bfill(limit=1).interpolate() # resample every 5 sec\n deltaT = df.index.to_series() - datetime.datetime(1970, 1, 1) # calculate timedelta\n df.index = deltaT.dt.total_seconds() # convert index back to elapsed seconds\n df['t'] = df.index # make explicit column with index\n\n df.to_pickle('./output.pkl')\n df.to_csv(file_name, index=False)\n","repo_name":"jcchin/boring","sub_path":"boring/util/save_csv.py","file_name":"save_csv.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10937207747","text":"class TreeStore:\n \"\"\"\n Обработка массива объектов по заданному условию с помощью дерева.\n Для выполнения требования максимального быстродействия и прямого доступа к элементам без поиска,\n представим дерево, как объединенный список родителей и список потомков)\n\n логическое представление дерева исходных данных следующее:\n вершины дерева: 0 1 2 3 4 5 6 7 8\n ссылка на родителя: r 1 1 2 2 2 4 4\n ссылки на потомков: 2 4 7\n 3 5 8\n 6\n\n программно дерево представляет список кортежей:\n [(), ('root', [2, 3], []), (1, [4, 5, 6], 'test'), (1, [], 'test'), (2, [7, 8], 'test'),\n (2, [], 'test'), (2, [], 'test'), (4, [], None), (4, [], None)]\n где нулевой элемент не используется, для упрощения восприятия кода ревьювером,\n т.е. id элемента будет равняться порядковому номеру вершины в списке\n\n ! в задании требуется максимальное быстродействие и не обговорена возможнос��ь ввода\n заведомо некорректных данных, поэтому исключена проверка id элемента на IndexError\n\n ! в задании не уточнено могут ли id идти не последовательно и начинаться не с единицы.\n Поэтому конструктор построен несколько усложненным, учитывающим любую точку начала\n и последовательность id.\n \"\"\"\n\n def __init__(self, data: list):\n \"\"\"Конструктор\"\"\"\n self.items = data # сохраняем исходный массив для метода getAll\n\n tmp_d, maximum = {}, 0\n for elem in data: # отдельный цикл для потомков, этим исключаем n^2 обходов в осн цикле\n tmp_d.setdefault(elem['parent'], []).append(elem['id'])\n maximum = max(maximum, elem['id'])\n\n self.tree = [tuple() for _ in range(maximum + 1)]\n for elem in data: # основной цикл формирования дерева\n tmp_childs = tmp_d[elem['id']] if elem['id'] in tmp_d else []\n tmp_type = elem['type'] if elem['id'] != 1 else []\n self.tree[elem['id']] = (elem['parent'], tmp_childs, tmp_type)\n\n def getAll(self) -> list:\n \"\"\"Возвращает изначальный массив элементов.\n В задании требуется максимальное быстродействие, и отсутствуют требования к ресурсам,\n поэтому быстрее всего сохранить и вернуть исходный массив без обработки\"\"\"\n return self.items\n\n def getItem(self, n: int) -> dict:\n \"\"\"Принимает id элемента и возвращает сам объект элемента. Прямой доступ к элементу\"\"\"\n if n == 1:\n return {'id': n, 'parent': self.tree[n][0]}\n return {'id': n, 'parent': self.tree[n][0], 'type': self.tree[n][2]}\n\n def getChildren(self, n: int) -> list:\n \"\"\"Принимает id элемента и возвращает массив элементов, являющихся дочерними\"\"\"\n res = []\n for elem in self.tree[n][1]:\n res.append(self.getItem(int(elem)))\n return res\n\n def getAllParents(self, n: int) -> list:\n \"\"\"Принимает id элемента и возвращает массив из цепочки родительских элементов\"\"\"\n res = []\n tmp_parent = self.tree[n][0]\n while tmp_parent != 'root':\n res.append(self.getItem(tmp_parent))\n tmp_parent = self.tree[tmp_parent][0]\n return res\n\n\nitems = [\n {\"id\": 1, \"parent\": \"root\"},\n {\"id\": 2, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 3, \"parent\": 1, \"type\": \"test\"},\n {\"id\": 4, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 5, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 6, \"parent\": 2, \"type\": \"test\"},\n {\"id\": 7, \"parent\": 4, \"type\": None},\n {\"id\": 8, \"parent\": 4, \"type\": None}\n]\nts = TreeStore(items)\n\nprint(ts.getAll()) # items\nprint(ts.getItem(7)) # {\"id\":7,\"parent\":4,\"type\":None}\nprint(ts.getChildren(4)) # [{\"id\":7,\"parent\":4,\"type\":None},{\"id\":8,\"parent\":4,\"type\":None}]\nprint(ts.getChildren(5)) # []\nprint(ts.getAllParents(7)) # [{\"id\":4,\"parent\":2,\"type\":\".\"},{\"id\":2,\"parent\":1,\"type\":\".\"},{\"id\":1,\"parent\":\"root\"}]\n","repo_name":"slavanr45/MyGit","sub_path":"Test_Task/test_task_1.py","file_name":"test_task_1.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44505887024","text":"from xml.dom import Node\nimport logging\n\n\ndef get_child(parent_node, tag_name):\n\tif parent_node:\n\t\tfor n in parent_node.childNodes:\n\t\t\tif n.nodeType == Node.ELEMENT_NODE and n.tagName == tag_name:\n\t\t\t\treturn n\n \t# logging.warn(\"get_child: no %s node in %s\", tag_name, parent_node)\n\ndef list_children(parent_node, tag_name = None):\n\tif parent_node is None:\n\t\t# logging.warn(\"list_children: no parent_node\")\n\t\treturn []\n\tif not tag_name:\n\t\treturn [ n for n in parent_node.childNodes if n.nodeType == Node.ELEMENT_NODE ]\n\treturn [ n for n in parent_node.childNodes if n.nodeType == Node.ELEMENT_NODE and n.tagName == tag_name ]\n\ndef iter_children(parent_node, tag_name):\n\tif parent_node:\n\t\tfor n in parent_node.childNodes:\n\t\t\tif n.nodeType == Node.ELEMENT_NODE and n.tagName == tag_name:\n\t\t\t\tyield n\n\t# else:\n\t# \tlogging.warn(\"iter_children: no parent_node\")\n\ndef add_child(parent_node, tag_name, text_value = None):\n\tif parent_node:\n\t\tnode = parent_node.ownerDocument.createElement(tag_name)\n\t\tparent_node.appendChild(node)\n\t\tif text_value is not None:\n\t\t\tnode_text = parent_node.ownerDocument.createTextNode(str(text_value))\n\t\t\tnode.appendChild(node_text)\n\t\treturn node\n\ndef filter(parent_node, tag_name, **kwargs):\n\t# logging.debug(\"matching %s -> %s : %s\", parent_node, tag_name, kwargs)\n\tfor n in list_children(parent_node, tag_name):\n\t\tmatches = True\n\t\tfor k, v in kwargs.items():\n\t\t\tif n.getAttribute(k) != v:\n\t\t\t\tmatches = False\n\t\t\t\tbreak\n\t\tif matches:\n\t\t\t# logging.debug(\"matched %s\", n)\n\t\t\tyield n\n\t# logging.debug(\"none matched\")\n\n\ndef set_text(tag_node, text_value = None):\n\tif not tag_node:\n\t\tlogging.warn(\"set_text: no tag_node\")\n\t\treturn False\n\ttnode = tag_node.firstChild\n\tif tnode is None:\n\t\tif text_value is not None:\n\t\t\ttnode = tag_node.ownerDocument.createTextNode(str(text_value))\n\t\t\ttag_node.appendChild(tnode)\n\t\treturn True\n\tif tnode.nodeType == Node.TEXT_NODE:\n\t\tif text_value is None:\n\t\t\ttagName.removeChild(tnode)\n\t\telse:\n\t\t\ttnode.data = str(text_value)\n\t\treturn True\n\tlogging.warn(\"can't handle %s child node %s\", tag_node, tnode)\n\treturn False\n\ndef get_text(tag_node):\n\tif tag_node:\n\t\ttnode = tag_node.firstChild\n\t\tif tnode and tnode.nodeType == Node.TEXT_NODE:\n\t\t\treturn tnode.data\n\t\tlogging.warn(\"get_node found no text node in %s\", tag_node)\n\t# else:\n\t# \tlogging.warn(\"get_text: no tag_node\")\n\n\ndef remove_whitespace(tag_node):\n\t\"\"\"removes irrelevant whitespace children\"\"\"\n\tif not tag_node:\n\t\treturn True\n\twas_modified = False\n\tfor n in list(tag_node.childNodes):\n\t\tif n.nodeType == Node.TEXT_NODE and not n.data.strip():\n\t\t\ttag_node.removeChild(n)\n\t\t\twas_modified = True\n\treturn was_modified\n","repo_name":"pwr/KSP","sub_path":"src/qxml.py","file_name":"qxml.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"5"} +{"seq_id":"5156459520","text":"import math\r\nfrom itertools import combinations\r\n\r\n\r\n\r\ndef sieve_of_erosthenes(lim):\r\n nums = list(True for i in range(lim + 1))\r\n primes = []\r\n i = 2\r\n while i < len(nums):\r\n if nums[i]:\r\n primes.append(i)\r\n curr = i\r\n jump = i\r\n curr += jump\r\n while curr < len(nums):\r\n nums[curr] = False\r\n curr += jump\r\n i += 1\r\n return primes\r\n\r\ndef get_prime_combs(primes, r):\r\n for p1 in primes:\r\n print(\"p1\", p1)\r\n for p2 in primes:\r\n #print(\"p2\", p1)\r\n if not is_comb_prime(p1, [p2]):\r\n continue\r\n for p3 in primes:\r\n #print(\"p3\", p3)\r\n if not is_comb_prime(p3, [p1, p2]):\r\n continue\r\n for p4 in primes:\r\n #print(\"p4\", p4)\r\n if not is_comb_prime(p4, [p1, p2, p3]):\r\n continue\r\n for p5 in primes:\r\n #print(\"p5\", p5)\r\n if is_comb_prime(p5, [p1, p2, p3, p4]):\r\n return [p1, p2, p3, p4, p5]\r\n\r\n\r\ndef is_prime(num):\r\n if num == 2 or num == 3:\r\n return True\r\n if num % 2 == 0 or num % 3 == 0 or num <= 0:\r\n return False\r\n k = 1\r\n while 6*k-1 < int(math.sqrt(num+1)):\r\n if num % (6*k-1) == 0 or num % (6*k+1) == 0:\r\n return False\r\n k += 1\r\n return True\r\n\r\n\r\ndef is_comb_prime(p1, primes):\r\n for p2 in primes:\r\n if p1 == p2 or not is_prime(int(str(p1) + str(p2))) or not is_prime(int(str(p2) + str(p1))):\r\n return False\r\n return True\r\n\r\n\r\ndef is_combs_prime(primes):\r\n combs = combinations(primes, 2)\r\n for comb in combs:\r\n if not is_prime(int(str(comb[0]) + str(comb[1]))) or not is_prime(int(str(comb[1]) + str(comb[0]))):\r\n return False\r\n return True\r\n\r\n\r\n#print(is_combs_prime((3, 7, 109, 673)))\r\n\r\nprint(get_prime_combs(sieve_of_erosthenes(10000), 5))","repo_name":"GaiaKoren/ARMY","sub_path":"Euler/p60.py","file_name":"p60.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"6783839486","text":"from logging import getLogger\nfrom typing import Generic, Optional, Type, TypeVar\nfrom sqlalchemy import select\nfrom sqlalchemy.dialects.postgresql import insert\nfrom sqlalchemy.sql.functions import current_timestamp\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom database.schema.base import Base\n\nTableType = TypeVar(\"TableType\", bound=Base)\n\nlogger = getLogger(f\"scrapy.{__name__}\")\n\nclass BasePostgresService(Generic[TableType]):\n \"\"\"\n Base Postgres service.\n \"\"\"\n model: Type[TableType]\n\n @classmethod\n async def get(cls, db: AsyncSession, _id: str) -> Optional[TableType]:\n return await db.get(cls.model, _id)\n\n @classmethod\n async def get_one(cls, db: AsyncSession, _filter: dict):\n query = select(cls.model).filter_by(**_filter)\n result = await db.execute(query)\n return result.first()\n\n @classmethod\n async def get_list(cls, db: AsyncSession, _filter: dict):\n query = select(cls.model).filter_by(**_filter)\n result = await db.execute(query)\n return result.all()\n\n @classmethod\n async def bulk_upsert(cls, db: AsyncSession, obj_list: list[dict]):\n \"\"\"\n Utilize Postgres' ON CONFLICT DO UPDATE to bulk upsert items.\n \"\"\"\n logger.debug(\"Upserting %d objects into db...\" % len(obj_list))\n stmt = insert(cls.model).values(obj_list)\n stmt = stmt.on_conflict_do_update(\n index_elements=['url'],\n set_={\n \"score\": stmt.excluded.score,\n \"update_time\": current_timestamp()\n }\n )\n await db.execute(stmt)\n await db.commit()\n\n @classmethod\n async def get_all_article_ranked(cls, db: AsyncSession):\n query = select(cls.model).order_by(cls.model.score.desc())\n result = await db.execute(query)\n await db.commit()\n return result.fetchall()","repo_name":"pmphan/news-crawler","sub_path":"database/services/postgres_service.py","file_name":"postgres_service.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5725351577","text":"from bs4 import BeautifulSoup\nimport requests\nfrom urllib.request import urlopen\nimport re\n\nprint(\"\\n\")\nprint(\"Let's Find Mobile with suitable range\")\nprint(\"-----------------------------------------------------------------------------------------------------\")\nprint(\"\\n\")\n\nlrange = str(input(\"Enter Lowest Amount of your budget = \"))\nhrange = str(input(\"Enter Highest Amount of your budget = \"))\n\nprint(\"\\n\")\nprint(\"Please wait while computer is doing your work\")\nprint(\"Your results will be displayed here\")\nprint(\"\\n\")\n\n\nurl = urlopen(\"https://www.91mobiles.com/list-of-phones/mobile-phones-in-range-of-\"+lrange+\"-to-\"+hrange)\n\nname = []\nprice = []\n\nsoup = BeautifulSoup(url , 'html.parser')\n\nfor i in soup.findAll('a' , attrs={'class' : (\"hover_blue_link name gaclick\") }):\n\tmname = re.search('>(.+?)<',str(i))\n\tname.append(mname.group(1))\n\nfor j in soup.findAll('span' , attrs={'class' : (\"price price_padding\") }):\n\tmprice = re.search('span>(.+?) \"+price[mobile])\n\nprint(\"\\n Thank You\")\n","repo_name":"rahul188/MobilePriceScrapper","sub_path":"ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"10618722322","text":"#-*- coding = utf-8 -*-\n#@Time: 2020/9/13 9:50\n#@Author: Winter\n#@File: spider.py\n#@Software: PyCharm\n\n\n#引入模块\nfrom bs4 import BeautifulSoup #网页解析,获取数据\nimport re #正则表达式,进行文字匹配\nimport urllib.request,urllib.error #制定URL,获取网页数据\nimport xlwt #进行Excel操作\nimport sqlite3 #进行sqlite3数据库操作\n\n\ndef info():\n baseurl = \"https://movie.douban.com/top250?start=\"\n\n #1.爬取网页\n datalist = getData(baseurl)\n\n\n\n #3.保存数据\n savePath = \"豆瓣电影Top250.xls\"\n saveData(datalist,savePath)\n #askURL(\"https://movie.douban.com/top250?start=\")\n\n #3.保存到数据库中去\n # dbPath = \"movie.db\"\n # saveData2DB(datalist,dbPath) #保存到数据库中\n\n\n\n#全局变量 创建正则表达式对象,表示规则(字符串的模式)\n\n#影片详情链接的规则\nfindLink = re.compile(r'')\n#影片图片的链接规则\nfindImaSrc = re.compile(r'(.*)')\n#评分\nfindRating = re.compile(r'(.*)')\n#评价人数\nfindJudge = re.compile(r'(\\d*)人评价')\n#找到概况\nfindInq = re.compile(r'(.*)')\n#找到影片的相关内容\nfindBd = re.compile(r'

(.*?)

',re.S) #re.S让换行符包含在字符中\n\n\n#1爬取网页\ndef getData(baseurl):\n datalist = []\n\n\n for i in range (0,10): #调用获取页面信息的函数10次\n url = baseurl+str(i*25)\n html = askURL(url) #保存获取到的网页原码\n\n # 2.逐一解析数据\n soup = BeautifulSoup(html,\"html.parser\") #解析\n #提取\n for item in soup.find_all(\"div\",class_=\"item\"): #查找符合要求的字符串,形成列表\n #print(item) #测试查看电影item\n data = [] #保存一部电影的所有信息\n item = str(item) #变成字符串\n\n\n\n #影片详情的超链接\n link = re.findall(findLink,item)[0] #re库用来通过正则表达式查找指定的字符串\n data.append(link) #添加链接\n\n imgSrc = re.findall(findImaSrc,item)[0]\n data.append(imgSrc) #添加图片\n\n titles = re.findall(findTitle,item)\n if (len(titles)==2):\n ctitle = titles[0]\n data.append(ctitle) #添加中文名\n otitle = titles[1].replace(\"/\",\"\") #去掉/\n data.append(otitle) #添加外国名\n else:\n data.append(titles[0])\n data.append(\" \") #外国名留空\n\n rating = re.findall(findRating,item)[0]\n data.append(rating) #添加打分\n\n judgeNum = re.findall(findJudge,item)[0]\n data.append(judgeNum) #添加评价人数\n\n inq = re.findall(findInq,item)\n if len(inq)!=0:\n inq = inq[0].replace(\"。\",\"\") #去掉句号\n data.append(inq) #添加概述\n else:\n data.append(\" \") #留空\n\n bd = re.findall(findBd,item)[0]\n bd = re.sub('(\\s+)?',\" \",bd) #去掉
\n bd = re.sub('/',\" \",bd) #替换/\n data.append(bd.strip()) #去掉前后的空格\n\n datalist.append(data) #处理好的一部电影信息放入datalist\n\n return datalist\n\n\n\n#得到指定一个URL的网页内容\ndef askURL(url):\n #用户代理,告诉豆瓣服务器,我们是什么类型的机器,浏览器(本质上是告诉浏览器,我们可以接受什么水平的文件内容)\n\n #头部信息,模拟浏览器头部信息,向豆瓣浏览器发送消息\n head={\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36\"}\n #请求\n request = urllib.request.Request(url,headers=head)\n #存储\n html = \"\"\n try:\n response = urllib.request.urlopen(request)\n html = response.read().decode(\"utf-8\")\n #print(html)\n except urllib.error.URLError as e:\n if hasattr(e,\"code\"):\n print(e.code)\n if hasattr(e,\"reason\"):\n print(e.reason)\n return html\n\n\n\n\n#3保存数据\ndef saveData(datalist,savePath):\n print(\"save......\")\n book = xlwt.Workbook(encoding=\"utf-8\",style_compression=0) # 1创建workbook对象\n sheet = book.add_sheet(\"豆瓣电影Top250\",cell_overwrite_ok=True) # 2.创建worksheet 创建工作表 cell_overwrite_ok=True覆盖以前的内容\n\n col = (\"电影详情链接\",\"图片链接\",\"影片中文名\",\"影片外国名\",\"评分\",\"评价数\",\"概况\",\"相关信息\")\n for i in range(0,8):\n sheet.write(0,i,col[i]) #写入列表\n\n for i in range(0,250):\n print(\"第%d条\"%(i+1))\n data = datalist[i]\n for j in range(0,8):\n sheet.write(i+1,j,data[j])\n\n book.save(savePath)\n\n\n#3保存到数据库中\ndef saveData2DB(datalist,dbPath):\n #1创建数据库\n init_db(dbPath)\n\n #2连接数据库\n conn = sqlite3.connect(dbPath)\n cursor = conn.cursor() #获取一个游标\n\n #3对电影信息进行解析,并插入到数据库中\n for data in datalist:\n for index in range(0,len(data)):\n if index==4 or index==5:\n continue\n #在数据前后加上引号,数值的不需要添加引号\n data[index] = '\"' + data[index] + '\"'\n sql = '''\n insert into movie250(\n info_link,pic_link,cname,ename,score,rated,introduction,info\n )\n values (%s)'''%\",\".join(data) #每一个data的数据用,连接起来\n print(sql)\n cursor.execute(sql) #执行SQL语句\n conn.commit() #执行完就提交一次\n cursor.close()\n conn.close()\n\n print(\".....\")\n\n\n#4.创建数据库\ndef init_db(dbPath):\n\n #建表语句\n sql = '''\n create table movie250(\n id integer primary key autoincrement,\n info_link text,\n pic_link text,\n cname varchar,\n ename varchar,\n score numeric,\n rated numeric,\n introduction text,\n info text\n )\n \n ''' #创建数据表\n conn = sqlite3.connect(dbPath) #默认在当前路径创建 打开或者创建数据库文件\n cursor = conn.cursor() #获取一个游标\n cursor.execute(sql) #执行SQL,创建数据表\n\n conn.commit() #提交\n conn.close() #关闭数据库\n\n\n#当程序执行时\nif __name__ ==\"__main__\":\n #调用函数\n info()\n\n #测试建表语句\n #init_db(\"movieTest.db\")\n\n print(\"爬取完毕......\")","repo_name":"StudyWinter/doubanSpider","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":7409,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"71631211352","text":"import setuptools\n\nversion = '2.13.3'\nsetuptools.setup(\n name='boto',\n version=version,\n license='OSI Approved :: MIT License',\n url='http://pypi.python.org/packages/source/b/boto/boto-%s.tar.gz' % version,\n author='Mitch Garnaat',\n author_email='mitch@garnaat.com'\n)\n","repo_name":"libertyy/packages","sub_path":"pkgs/boto/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39615402467","text":"#!/usr/bin/env python3\nimport torch\nimport torch.nn\nimport torch.nn.functional\n\n\n\nclass Layer(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n self.shuffle = nn.PixelShuffle()\n self.concat = Concatenate()\n self.instancenorm = torch.nn.InstanceNorm2d()\n self.instancenorm = torch.nn.InstanceNorm2d(affine=True)\n self.maxpool = MaxPool3d()\n self.dropout = DropOut3d()\n x = torch.nn.functional.maxpool1d(...)\n\n def nested(self):\n\n class Nested():\n pass\n\n def nested_function():\n pass\n\nimport numpy as np\n\nx=np.arange(100)\ny=np.arange(100)\nq=x.clone()\n\nnp.nansum(x)\nz = x @ y\n\nx=torch.arange(50)\nx=x.reshape(5,-1)\nx=x.view(5,-1)\nx=x.permute(1,0)\nnp.tile(...)\nnp.repeat(...)\ntorch.repeat(...)\nx.einsum(...)\nx.matmul(...)\n\n# losses\ncategorical_crossentropy(x)\nF.categorical_crossentropy(x)\nnn.Linear()\nDense()\nAdam(...)\nSGD(...)\ntorch.nn.CrossEntropyLoss(...)\nnumpy.nansum(...)\nnp.any(np.isnan(x))\nnp.any(np.any(x))\nnp.isnan(np.any(x))\nnp.all(numpy.isnan(x))\nnp.sum(x[np.isnan(x)])\nmask=np.isnan(x)\nx.detach()\nx.cpu()\nnonbase.x.cpu()\n","repo_name":"dvolgyes/flake8_tensors","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"42158404820","text":"import json\nimport requests\nimport mimetypes\nfrom PIL import Image\n\nwith open(\"Members_raw.json\", \"r\") as f:\n data = json.load(f)\n\nfor i in range(len(data)):\n link = data[i][\"image\"].replace(\"/file/d/\", \"/uc?id=\").replace(\"/open?id=\", \"/uc?id=\").replace(\"/view?usp=sharing\", \"\")+\"&export=download\"\n name = \"-\".join(data[i][\"fullName\"].split())\n response = requests.get(link)\n content_type = response.headers['content-type']\n extension = mimetypes.guess_extension(content_type)\n with open(f\"{name}{extension}\", \"wb\") as image:\n image.write(response.content)\n compressed_image = Image.open(f\"{name}{extension}\")\n width, height = compressed_image.size\n TARGET_WIDTH = 500\n coefficient = width / 500\n new_height = height / coefficient\n compressed_image.resize((int(TARGET_WIDTH),int(new_height)),Image.ANTIALIAS)\n compressed_image.save(f\"{name}{extension}\", quailty=80, optimize=True)\n data[i][\"image\"] = f\"about-us-images/{name}.jpg\"\n\nwith open(\"Members.json\", \"w\") as f:\n json.dump(data, f)","repo_name":"alonr619/MMT-Website","sub_path":"static/archive/about-us-images-22-23/downloadImage.py","file_name":"downloadImage.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"5914030675","text":"import pandas as pd\nimport os\nimport datetime\nimport calendar\n\ndef newest(path):\n files = os.listdir(path)\n paths = [os.path.join(path, basename) for basename in files]\n tester = max(paths, key=os.path.getctime)\n if os.path.isdir(tester) == True:\n return newest(tester)\n else: \n return tester\n\ndef setup():\n # Initialize global scope\n global now\n global today\n global month_dict\n \n # Setup time and date\n now = datetime.datetime.now()\n\n # This if/else is for proper MM/DD/YYYY formatting to ensure that MM is always 2 digits\n if now.month < 10:\n month = \"0\" + str(now.month)\n else:\n month = str(now.month)\n\n if now.day < 10:\n day = \"0\" + str(now.day)\n else:\n day = str(now.day)\n\n today = month + \".\" + day + \".\" + str(now.year)\n\n # This is used mostly for filepaths and also setting up dirs\n month_dict = {\n 1: \"JAN\",\n 2: \"FEB\",\n 3: \"MAR\",\n 4: \"APR\",\n 5: \"MAY\",\n 6: \"JUN\",\n 7: \"JUL\",\n 8: \"AUG\",\n 9: \"SEP\",\n 10: \"OCT\",\n 11: \"NOV\",\n 12: \"DEC\"\n }\n\n # Monday = 0, Friday = 4, if it's less than 5 than means it's a workday and we should run the program\n weekday_count = calendar.weekday(now.year, now.month, now.day)\n\n if weekday_count <= 4:\n # Check to make sure the year dir is setup \n if os.path.isdir(\"U://***//Daily Folder//\" + str(now.year)) == True:\n pass\n else:\n print(\"New year detected. Creating a new year directory in [Daily Folder]...\")\n os.mkdir(\"U://***//Daily Folder//\" + str(now.year))\n\n # Check to make sure the month dir is setup within the year dir\n if os.path.isdir(\"U://***//Daily Folder//\" + str(now.year) + \"//\" + month_dict[now.month]) == True:\n pass\n else:\n print(\"New month detected. Creating a new month directory in [Daily Folder]...\")\n os.mkdir(\"U://***//Daily Folder//\" + str(now.year) + \"//\" + month_dict[now.month])\n\n # Check to make sure the year dir is setup (for holdings check)\n if os.path.isdir(\"U://***//Holdings Check//\" + str(now.year)) == True:\n pass\n else:\n print(\"New year detected. Creating a new year directory in [Holdings Check]...\")\n os.mkdir(\"U://***//Holdings Check//\" + str(now.year))\n\n # Check to make sure the month dir is setup within the year dir (for holdings check)\n if os.path.isdir(\"U://***//Holdings Check//\" + str(now.year) + \"//\" + month_dict[now.month]) == True:\n pass\n else:\n print(\"New month detected. Creating a new month directory in [Holdings Check]...\")\n os.mkdir(\"U://***//Holdings Check//\" + str(now.year) + \"//\" + month_dict[now.month])\n \n return True\n else:\n print(\"Setup failure. Not a business day.\")\n return False\nsetup()\n\ndef get_different_rows(source_df, new_df):\n \"\"\"Returns just the rows from the new dataframe that differ from the source dataframe\"\"\"\n merged_df = source_df.merge(new_df, indicator=True, how='outer')\n changed_rows_df = merged_df[merged_df['_merge'] == 'right_only']\n return changed_rows_df.drop('_merge', axis=1)\n\nmorning_download = pd.read_csv(\"U://***//GDP//\" + str(now.year) + \"//\" + month_dict[now.month] + \"//\" + today + \" - GDP Download AM.csv\")\nafternoon_download = pd.read_csv(\"U://***//GDP//\" + str(now.year) + \"//\" + month_dict[now.month] + \"//\" + today + \" - GDP Download PM.csv\")\n\n# Remove unneccesary columns we don't need\nmorning_download = morning_download.drop([\"BBG_RETURN_CODE\", \"BBG_NUMBER_OF_FIELDS\", \"ID_BB_GLOBAL\"], axis=1)\nafternoon_download = afternoon_download.drop([\"BBG_RETURN_CODE\", \"BBG_NUMBER_OF_FIELDS\", \"ID_BB_GLOBAL\"], axis=1)\n\n# Filter the data frame for only the halt codes we need\nhalt_codes = [\"ACQU\", \"AHLT\", \"DLST\", \"HALT\", \"SUSP\", \"UNLS\"]\nmorning_download = morning_download.query(\"MARKET_STATUS in @halt_codes\")\nafternoon_download = afternoon_download.query(\"MARKET_STATUS in @halt_codes\")\n\n# This will find out which halt codes have changed from the morning \nwith pd.option_context('display.max_rows', None, 'display.max_columns', None):\n changed_tags = get_different_rows(morning_download, afternoon_download)\n print(changed_tags)\n\n# Load in the daily sheet \ndsdf = pd.read_csv(r\"U:\\***\\prod\\dailysheet.csv\")\n\n# Get rid of columns we don't need \ndsdf = dsdf.drop([\"SECURITY DESCRIPTION\", \"USERBANK & CLIENT\", \"REASON\"], axis=1)\n\n# Print results\nwith open(\"dsdf.csv\", \"w\") as f:\n dsdf.to_csv(f, index=False)","repo_name":"michaelobr/halts","sub_path":"testing/gain_reader.py","file_name":"gain_reader.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31722017111","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nimport sys\nfrom sensor_msgs.msg import Image\nimport numpy as np\nimport cv2\nimport os\nfrom cv_bridge import CvBridge, CvBridgeError\nimport time\n\nimport torch\nimport torchvision.transforms as transforms\nfrom modules.UNet import UNet\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n\nclass UltrasoundSegmentation():\n def __init__(self):\n PATH = os.path.expanduser(\"~/new_arm_w_tarso_data_folder/UNet/unet_usseg_arm_phantom.pth\")\n self.unet = UNet(init_features=64).to(device)\n self.unet.load_state_dict(torch.load(PATH))\n self.unet.eval()\n\n self.bridge = CvBridge()\n self.transform_image = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ])\n self.pub_img = rospy.Publisher(\"segmentedImg\",Image, queue_size=2)\n# while(True):\n# #img_msg = rospy.wait_for_message(\"/imfusion/cephasonics\",Image)\n# self.callback(img_msg)\n self.sub_img = rospy.Subscriber(\"ultrasound_img\",Image,self.callback)\n\n def callback(self, img_msg):\n print(device)\n img_msg.encoding = 'mono8'\n try:\n img = self.bridge.imgmsg_to_cv2(img_msg)\n except CvBridgeError as e:\n print(e)\n\n orig_size = img.shape\n print(orig_size)\n\n tmp = cv2.resize(img, (256, 256), interpolation=cv2.INTER_LANCZOS4)\n img = tmp.astype(np.float) / 255\n\n\n print(\"started segmentation\")\n\n x = self.transform_image(img)\n x = x.view(-1, 1, 256, 256).to(device, dtype=torch.float)\n pred_tensor = self.unet(x)\n\n\n pred = pred_tensor.view(256, 256)\n\n\n start_sending_time = time.time()\n pred = pred.cpu().detach().numpy()\n\n end_sending_time = time.time()\n print(\"sending the image and receiving back the mask time : \")\n print(end_sending_time - start_sending_time)\n pred = (pred * 255).astype(np.uint8)\n _, mask = cv2.threshold(pred, thresh=127, maxval=255, type=cv2.THRESH_BINARY)\n\n print(\"finished segmentation\")\n\n\n# mask = cv2.resize(mask,(256, 375), interpolation=cv2.INTER_LANCZOS4)\n mask = cv2.resize(mask, (orig_size[1], orig_size[0]), interpolation=cv2.INTER_LANCZOS4)\n# mask = cv2.resize(mask, (375, 550), interpolation=cv2.INTER_LANCZOS4)\n# cv2.imshow(\"mask\", mask)\n# cv2.waitKey(1)\n\n\n\n # the calculated mask using the segmentation network is published to the mask topic\n msg = Image()\n msg.header.stamp = img_msg.header.stamp\n msg.height = mask.shape[0]\n msg.width = mask.shape[1]\n msg.encoding = \"mono8\"\n msg.is_bigendian = False\n msg.step = 1 * mask.shape[1]\n msg.data = np.array(mask).tobytes()\n self.pub_img.publish(msg)\n\n\ndef main(args):\n rospy.init_node('Ultrasound_Segmentation_Node', anonymous=True)\n UltrasoundSegmentation()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n\nif __name__ == '__main__':\n main(sys.argv)\n\n##!/usr/bin/env python\n#import rospy\n#import torch\n#import torchvision.transforms as transforms\n\n#import cv2\n#import math\n#from cv_bridge import CvBridge\n#from sensor_msgs.msg import Image\n#from sensor_msgs.msg import PointCloud2\n#from sensor_msgs.msg import PointField\n\n#import numpy as np\n#import os,sys,time\n\n#sys.path.append(os.path.expanduser(\"/home/zhongliang/ros/ws_vessel/Ultrasound_Medical_Robots/network\"))\n\n#from modules.UNet import *\n\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#class ImageBuffer:\n# def init(self):\n# self.bridge = CvBridge()\n# self.sub_img = rospy.Subscriber(\"/imfusion/cephasonics\",Image,self.update_image)\n# #self.sub_img = rospy.Subscriber(\"/us_image\",Image,self.update_image)\n# self.pub_img = rospy.Publisher(\"/mask\",Image)\n# # self.pub_img_debug = rospy.Publisher(\"/us_image\",Image)\n# self.img = None\n# self.stamp = None\n\n# def update_image(self,msg):\n# self.stamp = msg.header.stamp\n# msg.encoding = 'mono8'\n# #tmp = cv2.resize(self.bridge.imgmsg_to_cv2(msg,desired_encoding='mono8'),(256,256),interpolation=cv2.INTER_LANCZOS4)\n# tmp = cv2.resize(self.bridge.imgmsg_to_cv2(msg),(256,256),interpolation=cv2.INTER_LANCZOS4)\n# # us_image = self.bridge.cv2_to_imgmsg(tmp)\n# # us_image.encoding = 'mono8'\n# # self.pub_img_debug.publish(us_image)\n\n# self.img = torch.Tensor((tmp.astype(np.float)/255-0.5)*2).unsqueeze(0).unsqueeze(0) #batch + color\n\n# def get_image(self):\n# if self.img is None:\n# rospy.loginfo(\"Waiting for the first image.\")\n# return -1\n# else:\n# return self.img,self.stamp\n# #TODO: return time stamp to avoid inaccurate tf\n\n# def send_image(self,img):\n# msg = self.bridge.cv2_to_imgmsg(img, encoding=\"bgr8\")\n# self.pub_img.publish(msg)\n\n#if name__ == '__main':\n# rospy.init_node('image_segmentation')\n\n# pub_pc2 = rospy.Publisher(\"us_vessel_pointcloud\", PointCloud2, queue_size=10)\n\n# #init networks\n# rospy.loginfo(\"loading UNet\")\n# PATH = os.path.expanduser(\"~/ros/ws_vessel/Ultrasound_Medical_Robots/network/unet_usseg_phantom.pth\")\n# #PATH = os.path.expanduser(\"~/workspace/us_robot/network/unet_usseg_real.pth\")\n# unet = UNet(init_features=64).to(device)\n# unet.load_state_dict(torch.load(PATH))\n# unet.eval()\n\n# rospy.loginfo(\"Initialization...\")\n# #resize_to=[256,256]\n\n# img_buf = ImageBuffer()\n\n# while img_buf.get_image() is -1 and not rospy.is_shutdown():\n# rospy.sleep(0.2)\n\n# sx = rospy.get_param('/calibration/scaling_x',1.4648e-4)\n# sy = rospy.get_param('/calibration/scaling_y',1.5625e-4)\n# cx = rospy.get_param('/calibration/c_x', -0.01875)\n# cz = rospy.get_param('/calibration/cz', 0)\n\n# calibMtx = np.array([[sx,0,cx],[0,0,0],[0,sy,cz]])\n\n# rospy.loginfo(\"Initialized\")\n\n# run_cntr = 1.0\n# avg_dt = 0.0\n# cx_ = None\n# cy_ = None\n# ti_ = 0\n# while not rospy.is_shutdown():\n# img, curr_stamp = img_buf.get_image()\n\n# img_cu = img.to(device)\n\n# ti = time.time()\n# with torch.no_grad():\n# pred_cu = unet(img_cu)\n# dt = time.time()-ti\n# avg_dt = (run_cntr-1)/run_cntr*avg_dt+1.0/run_cntr*dt\n# print(\"avg pred time: \",avg_dt)\n# #print(dt,\", \",run_cntr)\n# run_cntr += 1\n\n# print(\"total time: \",ti-ti_)\n# ti_ = ti\n\n# pred = pred_cu.cpu()\n# pred = np.array(pred[0].permute(1, 2, 0))\n\n# pred = (pred*255).astype(np.uint8)\n# _,pred = cv2.threshold(pred,thresh=127,maxval=255,type=cv2.THRESH_BINARY)\n\n# # print(img.shape)\n# img_rgb = cv2.cvtColor(np.array((np.squeeze(img)/2+0.5)*255),cv2.COLOR_GRAY2RGB)\n# pred_rgb = cv2.cvtColor(pred,cv2.COLOR_GRAY2RGB)\n\n# pred_rgb[:,:,-2] = 0\n# #img_buf.send_image(img_rgb)\n# img_buf.send_image( (pred_rgb*0.2+img_rgb).astype(np.uint8) )\n","repo_name":"NehilDanis/shape_registration","sub_path":"scripts/ultrasound_segmentation.py","file_name":"ultrasound_segmentation.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19024240823","text":"class Solution:\n def rightSideView(self, root: Optional[TreeNode]) -> List[int]:\n if not root:return []\n \n ans = []\n q = [root]\n level = 0\n while q:\n for i in range(len(q)):\n cur = q.pop(0)\n if len(ans) == level:\n ans.append([])\n ans[level] = cur.val\n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n level+=1\n return ans","repo_name":"yijencheng/Leetcode","sub_path":"bfs(recursive)/199_Binary_tree_Right_Side_View.py","file_name":"199_Binary_tree_Right_Side_View.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"40152310418","text":"\n\n\nclass Solution:\n\t'''\n 判断一个带头结点的链表是否有环,并输出\n\t'''\n\tdef isLoop(self, phead):\n\t\tif not phead or not phead.next:\n\t\t\treturn None\n\t\tslow = phead.next\n\t\tfast = phead.next\n\t\twhile not fast.next and not fast:\n\t\t\tslow = slow.next\n\t\t\tfast = fast.next.next\n\t\t\tif slow == fast:\n\t\t\t\treturn slow\n\t\treturn None\n\n\t'''\n 找出环的入口点\n\t'''\n\tdef findLoopNode(self, phead, meetNode):\n\t\tfirst = phead.next\n\t\tsecond = meetNode\n\t\twhile fist != second:\n\t\t\tfist = fist.next\n\t\t\tsecond = second.next\n\t\treturn first\n\n\n","repo_name":"Michaelhuazhang/code_offer","sub_path":"面试笔试真挺解析/IS_loop1.6.py","file_name":"IS_loop1.6.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72109207191","text":"import unittest\nimport os, glob, pandas, numpy\nimport sqlite3\nfrom matplotlib.figure import Figure\nimport site\nsite.addsitedir(r'..')\nfrom clust_old import *\nfrom scipy.stats import ttest_ind\nfrom sklearn.cluster import AgglomerativeClustering\n\n\n## folder to the microarray clustering\ndire = r'/home/b3053674/Documents/pytseries/Microarray'\n\nclass TestHClust(unittest.TestCase):\n def setUp(self):\n self.data_file = os.path.join(dire, 'MicroarrayDEGAgeravedData.xlsx')\n self.db_file = os.path.join(dire, 'microarray_dwt.db')\n\n self.data = pandas.read_excel(self.data_file, index_col=[0, 1]).transpose()\n self.data = self.data['TGFb'] / self.data['Control']\n self.tsg = TimeSeriesGroup(self.data)\n\n def test_x(self):\n f = FindSimilar(self.tsg, 'CTGF')\n self.assertTrue(isinstance(f.x, TimeSeries))\n\n def test_result(self):\n f = FindSimilar(self.tsg, 'CTGF')\n self.assertTrue(isinstance(f.result, TimeSeriesGroup))\n\n def test_dtw(self):\n fname = os.path.join(dire, 'FN1_find_similar.png')\n tsg = TimeSeriesGroup(self.data)\n tsg.interpolate(inplace=True, num=30)\n # print(tsg.features)\n f = FindSimilar(tsg, 'FN1', thresh=0.2)\n fig = f.tsg.plot(f.result.features, legend=True)\n fig.savefig(fname, dpi=300, bbox_inches='tight')\n\n # [i.plot() for i in f.dtw]\n plt.show()\n\n\n # fig = tsg.plot(tsg.features, legend=True)\n # fname = os.path.join(dire, 'plot.png')\n # fig.savefig(fname, bbox_inches='tight', dpi=300)\n\n\nif '__main__' == __name__:\n unittest.main()\n\n","repo_name":"CiaranWelsh/pytseries","sub_path":"Tests/FindSimilarTests.py","file_name":"FindSimilarTests.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"17565504840","text":"import scrapy\nimport pandas as pd\nfrom scrapy_splash import SplashRequest\nimport os\nimport csv\nimport re\ncwd = os.getcwd()\n#docker run -it -p 8050:8050 --rm scrapinghub/splash\n\n\nclass KBSpider(scrapy.Spider):\n date='0125'\n name = \"kb\"\n df = pd.read_excel(cwd+'/Report_'+date+'_3.xlsx')\n start_urls = []\n\n def start_requests(self):\n for index, row in self.df.iterrows():\n country = row['Activity Map Page (revar23)'].split('_')[0]\n language = (row['Activity Map Page (revar23)'].split(':')[0]).split('_')[1]\n ID = row['Activity Map Page (revar23)'].split(':')[-1]\n url = 'https://support.lenovo.com/'+country+'/'+language+'/solutions/'+ID\n mp = row['Activity Map Link (revar24)']\n item = {}\n item['original'] = url\n item['broken'] = ''\n item['language'] = language\n item['mp'] = mp\n item['ID'] = ID\n item['country'] = country\n item['view'] = row['Page Views']\n yield SplashRequest(url, self.parse,args={\n 'wait': 2,\n 'html': 1\n }, meta={'item':item})\n\n\n def parse(self, response):\n item = response.meta['item'].copy()\n mp = item['mp']\n all_href = None\n if \"\\\"\" not in mp and \"\\'\" not in mp:\n all_href_mult = response.xpath('//*[@id=\"detailBody\"]//a[text()=\"'+mp+'\"]/@href').getall()\n\n else:\n all_href_mult = ['NONE']\n\n if len(all_href_mult) > 1:\n for all_href_1 in all_href_mult:\n if '.com' not in all_href_1:\n all_href_1 = response.urljoin(all_href_1)\n elif 'https://' not in all_href_1:\n #all_href = 'https://' + all_href\n all_href_1 = response.urljoin(all_href_1)\n item['broken'] = all_href_1\n yield SplashRequest(all_href_1,callback=self.next_p, args={\n 'wait': 2,\n 'html': 1\n },meta={'item':item})\n else:\n if \"\\\"\" not in mp and \"\\'\" not in mp:\n all_href = response.xpath('//*[@id=\"detailBody\"]//a[text()=\"'+mp+'\"]/@href').get()\n else:\n ref = response.xpath('//*[@id=\"detailBody\"]//a').getall()\n for r in ref:\n res = re.search('>(.+?)<', r)\n if res:\n if res.group(1) == mp:\n href = re.search('href=\\\"(.+?)\\\"', r)\n if href:\n all_href = href.group(1)\n\n if all_href is not None:\n if '.com' not in all_href:\n all_href = response.urljoin(all_href)\n elif 'https://' not in all_href:\n #all_href = 'https://' + all_href\n all_href = response.urljoin(all_href)\n item['broken'] = all_href\n yield SplashRequest(all_href,callback=self.next_p, args={\n 'wait': 2,\n 'html': 1\n },meta={'item':item})\n else:\n word = mp.split(' ')\n #ref = response.xpath('//*[@id=\"detailBody\"]//a[contains(text(),\"'+word[0]+') and contains(text(),'+word[-1]+')]').getall()\n ref = response.xpath(\n '//*[@id=\"detailBody\"]//a').getall()\n if ref is not None:\n for r in ref:\n res = re.search('>(.+?)<', r,re.DOTALL)\n if res:\n if word[0].upper() in res.group(1).upper() and word[-1].upper() in res.group(1).upper():\n item['mp'] = res.group(1)\n href = re.search('href=\\\"(.+?)\\\"', r)\n\n if href and 'javascript' not in href.group(1):\n href = href.group(1)\n if '.com' not in href or 'https://' not in href:\n href = response.urljoin(href)\n item['broken'] = href\n yield SplashRequest(href, callback=self.next_p, args={\n 'wait': 2,\n 'html': 1\n }, meta={'item': item})\n\n def next_p(self,response):\n date='0125'\n data = [[response.meta['item']['ID'],response.meta['item']['country'],\n response.meta['item']['language'],\n response.meta['item']['original'],\n response.meta['item']['mp'],\n response.url,response.meta['item']['view']]]\n with open('Result_'+date+'.csv', 'a', encoding='utf-8-sig',newline='') as output:\n writer = csv.writer(output)\n if response.xpath('/html/head/title/text()').get() is not None:\n if 'Page Not Found' in response.xpath('/html/head/title/text()').get():\n writer.writerows(data)\n\n\n\n\n","repo_name":"EngineeringIV/broken_link_detection","sub_path":"kb/kb/spiders/kp_spider.py","file_name":"kp_spider.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"32846286463","text":"import socket\n\nhost = socket.gethostname()\nCLASSES = []\nNUM_MFCC = 40\nNUM_FRAMES = 87\nDURATION = 2 # in seconds\nGENDER_CLASSES = ['M', 'F']\nPICKLE_FILE_PREFIX = 'LibriSpeech-mfcc-'\n\nCLASSES = []\nMAX_CLASSES = 10\n\nPROJECT_ROOT = '/Users/DD/Developer/lstm_gender_classifier/'\n# DATASET_STR = 'dev-clean'\nDATASET_STR = 'train-clean-100'\nDATA_ROOT = '/Users/DD/Developer/lstm_gender_classifier/LibriSpeech/'\nDATA_DIR = DATA_ROOT + DATASET_STR + '/'\nSPEAKER_FILE = DATA_ROOT + 'SPEAKERS.TXT'\nSPEAKER_IDX = 7\nCHAPTER_IDX = 8\nFILENAME_IDX = 9\nNUM_CLASSES = 40\n\n","repo_name":"bruceDuand/lstm_gender_classifier","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"37466227055","text":"\nimport tensorflow as tf\n\n\nclass MyModel(tf.keras.Model):\n\n scope = 'my_model'\n\n def __init__(self):\n super().__init__()\n\n self.layer1 = tf.keras.layers.Dense(10, activation=tf.nn.tanh)\n self.layer2 = tf.keras.layers.Dense(2, activation=tf.nn.tanh)\n\n self.input_name = None\n self.output_name = None\n\n def __call__(self, x):\n \"\"\" return output of this model \"\"\"\n self.input_name = x.name\n with tf.name_scope(self.scope):\n x = self.layer1(x)\n x = self.layer2(x)\n x = tf.identity(x, name=\"output\")\n self.output_name = x.name\n return x\n\n def get_variables(self):\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)\n\n def get_io_names(self):\n assert not self.input_name is None and not self.output_name is None, \"input_name or output_name is not set\"\n return self.input_name, self.output_name\n","repo_name":"kzmssk/save-tf-model","sub_path":"models/my_model.py","file_name":"my_model.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"26666143465","text":"class Solution(object):\n def reverseVowels(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n # 1. iterative+allocate\n vowels = {\"a\", \"e\", \"i\", \"o\", \"u\", \"A\", \"E\", \"I\", \"O\", \"U\"}\n indexes = []\n chars = []\n for i, char in enumerate(s):\n if char in vowels:\n indexes.append(i)\n chars.append(str(char))\n \n ans = \"\"\n for i in range(len(s)):\n if i not in indexes:\n ans += s[i]\n else:\n ans += chars.pop()\n return ans\n \n # 2. iterative+inplace\n vowels_set = set(['a','e','i','o','u'])\n s = list(s)\n l,r = 0,len(s) - 1\n while l < r:\n if s[l].lower() not in vowels_set:\n l += 1\n elif s[r].lower() not in vowels_set:\n r -= 1\n else:\n s[l],s[r] = s[r],s[l]\n l += 1\n r -= 1\n \n return ''.join(s)","repo_name":"waynewu6250/LeetCode-Solutions","sub_path":"345_reverse_vowels_of_a_string.py","file_name":"345_reverse_vowels_of_a_string.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"43975558908","text":"import os\nfrom fl.client import FLClient\nimport argparse\nimport time\n\n\ndef dataloader(classes=None):\n import torch\n import torchvision as tv\n transform = tv.transforms.Compose([\n tv.transforms.RandomCrop(32, padding=4, padding_mode=\"reflect\"),\n tv.transforms.RandomHorizontalFlip(),\n tv.transforms.ColorJitter(0.2, 0.2, 0.2, 0.1),\n tv.transforms.ToTensor(),\n tv.transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2470, 0.2435, 0.2616))\n ])\n dataset = tv.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform)\n if classes:\n sidx = [idx for idx, t in enumerate(dataset.targets) if t in classes]\n dataset = torch.utils.data.Subset(dataset, sidx)\n\n return torch.utils.data.DataLoader(\n dataset, batch_size=128, shuffle=True, num_workers=os.cpu_count(), pin_memory=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--classes', type=int, nargs='+', help='class labels for non iid tests')\n parser.add_argument('--host', help='server host', default='localhost')\n parser.add_argument('--port', help='server port', default='5000')\n parser.add_argument('--sync', help='if sync version', action='store_true')\n args = parser.parse_args()\n client = FLClient(f'http://{args.host}:{args.port}')\n data = dataloader(args.classes)\n for epoch in range(200):\n if args.sync:\n client.pull(round=epoch)\n else:\n client.pull()\n client.run(data, device='cuda')\n\n","repo_name":"Team-EU/fl","sub_path":"tests/cifar10/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70522796632","text":"from sqlalchemy.dialects.mysql import TINYINT\nfrom sqlalchemy.types import Integer, String\nfrom common import errors as er\nfrom common import helpers\nfrom db import update\n\nimport pandas as pd\nimport argparse\nimport datetime\nimport sys\nimport re\n\n\ndef get_camp_data_list(prefix = \"camps/camp\", suffix = \".csv\"):\n\t\n\tobjects = [obj for obj in helpers.get_matching_s3_keys(prefix = prefix, suffix = suffix)]\n\t\n\treturn(objects)\n\t\n\ndef get_camp_data_file(camp_data_file):\n\t\n\tdf = pd.read_csv(helpers.get_object_s3(camp_data_file))\n\t\n\treturn(df)\n\n\ndef add_s3_file_name(df, camp_data_file):\n\t\n\tdf['file_name_s3'] = camp_data_file\n\t\n\treturn(df)\n\n\ndef add_s3_date(df, camp_data_file): \n\t\n\tdate = re.findall('camp_(.*).csv', camp_data_file)[0]\n\tdate = datetime.datetime.strptime(date, '%d%m%Y')\n\tdate = datetime.datetime.strftime(date, '%Y-%m-%d') \n\t\n\tdf['file_upload_to_s3_date'] = date\n\t\n\treturn(df)\n \n \ndef get_camp_data(camp_data_list):\n\t \n\t df = [get_camp_data_file(camp_data_file) for camp_data_file in camp_data_list]\n\t \n\t return(df)\n\n\ndef add_camp_data_columns(df, camp_data_list):\n\t\n\tdf = [add_s3_file_name(data, file_name) for data, file_name in zip(df, camp_data_list)]\n\tdf = [add_s3_date(data, file_name) for data, file_name in zip(df, camp_data_list)]\n\tdf = pd.concat(df, ignore_index = True)\n\t\n\tdf = df[['id', 'phone', 'jcn', 'jc_status', 'time_pref', 'time_pref_label', 'file_name_s3', 'file_upload_to_s3_date', \n\t\t 'breastfeeding', 'pregnant', 'children_under6', 'teenage_girls', 'nocategory']]\n\tdf['insert_date'] = str(datetime.datetime.today())\n\tdf['enrolment_date'] = ''\n\tdf['pilot'] = 0\n\t\n\treturn(df)\n\n\ndef remove_epod_staff(df):\n\t\n\tdf = df.loc[df['id'] >= 1000]\n\t\n\treturn(df)\n\n\ndef check_if_camp_table_exists():\n\t\n\tdb_name = helpers.sql_connect()['db']\n\tengine = helpers.db_engine()\n\ttable_exists = update.check_table_exists(engine, db_name, 'enrolment_record')\n\t\n\treturn(table_exists)\n\n\ndef check_data_from_s3(file_name_s3_source, file_name_s3_target):\n\t\n\tdf_source = pd.read_csv(helpers.get_object_s3(file_name_s3_source))\n\tdf_target = pd.read_csv(helpers.get_object_s3(file_name_s3_target))\n\t\n\tvariables_list = ['phone', 'jc_status', 'time_pref', \n\t\t\t\t 'time_pref_label', 'breastfeeding', \n\t\t\t\t 'pregnant', 'children_under6', 'teenage_girls', 'nocategory']\n\t\n\tdf_check = pd.merge(df_source, df_target, how = 'inner', on = ['id'], indicator = True)\n\t\n\tfor var in variables_list: df_check[var + '_check_failed'] = (df_check[var + '_x'] != df_check[var + '_y']).astype(int)\n\t\n\tcheck_columns = [var + '_check_failed' for var in variables_list]\n\tdf_check['check'] = df_check[check_columns].sum(axis = 1)\n\tdf_check = df_check.loc[df_check['check'] != 0]\n\tdf_check['file_name_source'] = file_name_s3_source\n\tdf_check['file_name_target'] = file_name_s3_target\n\t\n\treturn(df_check)\n\ndef run_data_check_from_s3(camp_data_list):\n\t\n\tdf_result = []\n\t\n\tfor source in camp_data_list: \n\t\tfor target in camp_data_list:\n\t\t\ttry: \n\t\t\t\tdf_result.append(check_data_from_s3(source, target))\n\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tsubject = 'GMA Error: There are inconsistent columns in the camp data...!'\n\t\t\t\tmessage = 'We are comparing {} with {}. Raipur team, help!'.format(source, target)\n\t\t\t\thelpers.send_email(subject, message)\n\t\t\t\tsys.exit() \n\t\n\tdf_result = pd.concat(df_result)\n\t\n\tif len(df_result) > 0:\n\t\t\n\t\ttoday = str(datetime.datetime.today().date())\n\t\tfile_name_today = datetime.datetime.strptime(today, '%Y-%m-%d').strftime('%d%m%Y')\n\t\t\n\t\tdf_result.to_csv('./output/camp_checks.csv', index = False)\n\t\thelpers.upload_s3('./output/camp_checks.csv', 'tests/camp_checks_{}.csv'.format(file_name_today))\n\t\t\n\t\tsubject = 'GMA Error: There are inconsistent rows in the camp data...!'\n\t\tmessage = 'We are comparing {} with {}. Check /tests/camp_checks_{}.csv. Raipur team, help!'.format(source, target, file_name_today)\n\t\thelpers.send_email(subject, message)\n\t\t\t\n\treturn(df_result)\n\n\ndef check_data_from_db(file_name_s3_source, file_name_s3_target, var):\n\t\n\tengine = helpers.db_engine()\n\t\n\tdf = pd.read_csv(helpers.get_object_s3(file_name_s3_source))\n\tdf_db = pd.read_sql(\"SELECT id, {} FROM enrolment_record where file_name_s3 = '{}'\".format(var, file_name_s3_target), con = engine)\n\t\n\tdf_check = pd.merge(df, df_db, how = 'inner', on = ['id'], indicator = True)\n\t\n\treturn(df_check, df_db, df)\n\n\ndef get_camp_table_from_db():\n\t\n\tengine = helpers.db_engine()\n\t\n\tdf_db = pd.read_sql(\"SELECT id FROM enrolment_record;\", con = engine)\n\t\n\treturn(df_db)\n\n\ndef get_new_trainees(df, df_db):\n\t\n\tnew_df = update.anti_join(df, df_db, on = ['id'])\n\t\n\treturn(new_df)\n\t\n\ndef get_health_category(df): \n\t \n\tdf['health_category'] = 0\n\t\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 0) & (df['children_under6'] == 1) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 1\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 0) & (df['children_under6'] == 0) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 2\n\t\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 0) & (df['children_under6'] == 0) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 3\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 1) & (df['children_under6'] == 0) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 4\n\t\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 0) & (df['children_under6'] == 0) & (df['teenage_girls'] == 0) & (df['nocategory'] == 1), 'health_category'] = 5\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 0) & (df['children_under6'] == 1) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 6\n\t\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 0) & (df['children_under6'] == 1) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 7\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 1) & (df['children_under6'] == 1) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 8\n\t\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 0) & (df['children_under6'] == 0) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 9\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 1) & (df['children_under6'] == 0) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 10\n\t \n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 1) & (df['children_under6'] == 0) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 11\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 1) & (df['children_under6'] == 1) & (df['teenage_girls'] == 0) & (df['nocategory'] == 0), 'health_category'] = 12\n\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 0) & (df['children_under6'] == 1) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 13\n\tdf.loc[(df['breastfeeding'] == 0) & (df['pregnant'] == 1) & (df['children_under6'] == 1) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 14\n\t\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 1) & (df['children_under6'] == 0) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 15\n\tdf.loc[(df['breastfeeding'] == 1) & (df['pregnant'] == 1) & (df['children_under6'] == 1) & (df['teenage_girls'] == 1) & (df['nocategory'] == 0), 'health_category'] = 16\n\t\n\treturn(df)\n\n\ndef put_new_trainees(new_df):\n\n\tengine = helpers.db_engine()\n\tconn = engine.connect()\n\t\n\tif not new_df.empty: \n\t\n\t\ttry: \n\t\t\n\t\t\tnew_df.to_sql('enrolment_record', if_exists = 'append', con = engine, index = False, chunksize = 100, \n\t\t\t\t\t\t dtype = {'id': Integer(), \n\t\t\t\t\t\t \t\t'phone': String(50), \n\t\t\t\t\t\t\t\t'jcn': String(50),\n\t\t\t\t\t\t\t\t'jc_status': Integer(), \n\t\t\t\t\t\t\t\t'time_pref': String(50),\n\t\t\t\t\t\t\t\t'time_pref_label': String(50),\n\t\t\t\t\t\t\t\t'file_name_s3': String(50),\n\t\t\t\t\t\t\t\t'file_upload_to_s3_date': String(50),\n\t\t\t\t\t\t\t\t'breastfeeding': String(50),\n\t\t\t\t\t\t\t\t'pregnant': String(50),\n\t\t\t\t\t\t\t\t'children_under6': String(50), \n\t\t\t\t\t\t\t\t'teenage_girls': String(50), \n\t\t\t\t\t\t\t\t'nocategory': String(50),\n\t\t\t\t\t\t\t\t'health_category': String(50), \n\t\t\t\t\t\t\t\t'insert_date': String(50),\n\t\t\t\t\t\t\t\t'enrolment_date': String(50),\n\t\t\t\t\t\t\t\t'pilot': TINYINT(2)})\n\t\t\n\t\texcept Exception as e: \n\t\t\ter.handle_error(error_code ='23', data = {})\n\t\t\tsys.exit()\n\t\t\n\treturn\n\n\ndef make_camp_primary_key():\n\t\n\tengine = helpers.db_engine()\n\tconn = engine.connect()\n\t\n\ttry: has_primary_key = update.check_primary_key(engine, 'enrolment_record')\n\t\n\texcept Exception as e: \n\t\t\n\t\ter.handle_error(error_code ='24', data = {})\n\t\tsys.exit()\n\t\n\ttry: \n\t\tif has_primary_key == 0: update.create_primary_key(engine, \"enrolment_record\", \"id\")\n\t\n\texcept Exception as e: \n\t\t\n\t\ter.handle_error(error_code ='25', data = {})\n\t\tsys.exit()\n\t\n\treturn\n\n\t\ndef main():\n\t\n\t# Create parser for command line arguments\n\tparser = argparse.ArgumentParser(description ='Parse the data for script generation')\n\tparser.add_argument('--prefix', type = str, help ='Prefix for file names to be searched on S3', default = 'camps/camp')\n\tparser.add_argument('--suffix', type = str, help ='Suffix for file names to be searched on S3', default = '.csv')\n\targs = parser.parse_args()\n\t\n\t# Parse arguments\n\tprefix = args.prefix\n\tsuffix = args.suffix\t\n\t\n\tcamp_data_list = get_camp_data_list(prefix = prefix, suffix = suffix)\n\trun_data_check_from_s3(camp_data_list)\n\t\n\tdf = get_camp_data(camp_data_list)\n\tdf = add_camp_data_columns(df, camp_data_list)\n\tdf = remove_epod_staff(df)\n\t\n\tif check_if_camp_table_exists() == 1:\n\t\tdf_db = get_camp_table_from_db()\n\t\tdf = get_new_trainees(df, df_db)\n\t\tdf = get_health_category(df)\n\t\n\tput_new_trainees(df)\n\tmake_camp_primary_key()\n\t\n\tsubject = 'GMA Update: The camp data has been refreshed...'\n\tmessage = ''\n\thelpers.send_email(subject, message)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"akshatgoel92/welfare-monitor","sub_path":"script/put_camp.py","file_name":"put_camp.py","file_ext":"py","file_size_in_byte":9795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42693085262","text":"from pathlib import Path\nimport yaml\n\nPROJECT_ROOT = Path('.')\nCONFIG = PROJECT_ROOT / 'configs.yml'\nCONFIG_SECRET = PROJECT_ROOT.resolve().parent / 'secret_configs.yml'\n\nSECTIONS = ['general', 'production', 'development']\nCONFIGS_GENERAL = ['project_name', 'app_name', 'version']\nCONFIGS_PRODUCTION = ['backend_domain', 'frontend_domain', 'backend_port']\nCONFIGS_DEVELOPMENT = ['frontend_domain', 'backend_port', 'frontend_port', 'server_port']\nCONFIGS_SECRET = ['secret_key', 'email_address', 'email_password',\n 'twilio_sms_from', 'twilio_sid', 'twilio_token', 'twilio_test_sid', 'twilio_test_token']\n\n\ndef load_config(config_path):\n with open(config_path, 'r') as cfg_file:\n cfg = yaml.safe_load(cfg_file)\n return cfg\n\n\ndef test_sections():\n config = load_config(CONFIG)\n assert set(config.keys()) == set(SECTIONS)\n\n\ndef test_configs():\n config = load_config(CONFIG)\n assert set(config['general'].keys()) == set(CONFIGS_GENERAL)\n assert set(config['production'].keys()) == set(CONFIGS_PRODUCTION)\n assert set(config['development'].keys()) == set(CONFIGS_DEVELOPMENT)\n\n\ndef test_secret_configs():\n config = load_config(CONFIG)\n secret_config = load_config(CONFIG_SECRET)\n project_name = config['general']['project_name']\n assert project_name in secret_config.keys()\n assert set(secret_config[project_name].keys()) == set(CONFIGS_SECRET)\n","repo_name":"MarkusKiesel93/bierrallye_webpage","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"14606921886","text":"'''5a. \r\nWrite a program to create a singly linked list for the following operations\r\n• Insert a Node at Beginning, at Ending and at a given Position\r\n• Delete a Node at Beginning, at Ending and at a given Position\r\n• Search, Count the Number of Nodes and Display'''\r\n\r\nclass Node:\r\n # create a node for the linked list\r\n def __init__(self, data=None):\r\n self.data = data\r\n self.next = None\r\n\r\n# create a singly linked list class\r\nclass SinglyLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n # insert an element at the beginning\r\n def AddFirst(self,newdata):\r\n NewNode = Node(newdata)\r\n NewNode.next = self.head\r\n self.head = NewNode\r\n # insert an element at the end\r\n def AddLast(self, newdata):\r\n NewNode = Node(newdata)\r\n if self.head is None:\r\n self.head = NewNode\r\n return\r\n last_element = self.head\r\n while(last_element.next):\r\n last_element = last_element.next\r\n last_element.next=NewNode\r\n # insert an element in the middle of the linked list\r\n def AddBetween(self,middle_node,newdata):\r\n if middle_node is None:\r\n print(\"The mentioned node is absent\")\r\n return\r\n NewNode = Node(newdata)\r\n NewNode.next = middle_node.next\r\n middle_node.next = NewNode\r\n # remove a specified element\r\n def RemoveElement(self, remove_key):\r\n head = self.head\r\n if (head is not None):\r\n if (head.data == remove_key):\r\n self.head = head.next\r\n head = None\r\n return\r\n\r\n while (head is not None):\r\n if head.data == remove_key:\r\n break\r\n prev = head\r\n head = head.next\r\n print(\"Removed Element is\", remove_key)\r\n if (head == None):\r\n return\r\n prev.next = head.next\r\n head = None\r\n # display the linked list\r\n def DisplayList(self):\r\n print_element = self.head\r\n while print_element is not None:\r\n print (print_element.data)\r\n print_element = print_element.next\r\n # count the number of nodes\r\n def CountNodes(self, node):\r\n count = 0\r\n while node:\r\n count +=1\r\n node=node.next\r\n return count\r\n # search for an element\r\n def SearchElement(self, search_element):\r\n current_node = self.head\r\n while current_node != None:\r\n if current_node.data == search_element:\r\n print(\"Element {} found.\" .format(search_element))\r\n current_node = current_node.next\r\n\r\n# create an instance of the class SinglyLinkedList\r\nsll = SinglyLinkedList()\r\nsll.head = Node(10)\r\ne2 = Node(20)\r\ne3 = Node(30)\r\nsll.head.next = e2\r\ne2.next = e3\r\nsll.AddFirst(0)\r\nsll.AddLast(50)\r\nsll.AddBetween(sll.head.next, 40)\r\nsll.DisplayList()\r\nsll.RemoveElement(30)\r\nsll.SearchElement(40)\r\n","repo_name":"srinijadharani/DataStructuresLab","sub_path":"05/05_singly_linked_list.py","file_name":"05_singly_linked_list.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"39416380131","text":"import os\n\nfrom .ilcd import grab_flow_name\nfrom ..xml_widgets import *\nfrom .ilcd_lcia import IlcdLcia\n\nELCD = os.path.join('/data', 'LCI', 'ELCD', 'ELCD3.2.zip')\n\n\ndef ilcd_flow_generator(archive=ELCD, **kwargs):\n \"\"\"\n This generates flows from the current reference ELCD archive.\n :param archive:\n :param kwargs:\n :return:\n \"\"\"\n i = IlcdLcia(archive, **kwargs)\n count = 0\n for f in i.list_objects('Flow'):\n o = i.objectify(f, dtype='Flow')\n if o is not None:\n yield o\n count += 1\n if count % 1000 == 0:\n print('%d data sets completed' % count)\n\n\nilcd_bad_synonyms = {\n 'fe0acd60-3ddc-11dd-a6f8-0050c2490048': \"Crude Oil; 42.3 MJ/kg\",\n '08a91e70-3ddc-11dd-944a-0050c2490048': \"Carbon, resource, in ground\",\n '3e4d9eab-6556-11dd-ad8b-0800200c9a66': \"Wood; 14.7 MJ/kg\"\n}\n\n\nilcd_bad_cas = {\n\n}\n\n\ndef synonyms_from_ilcd_flow(flow, skip_syns=False):\n \"\"\"\n ILCD flow files have long synonym blocks at the top. They also have a CAS number and a basename.\n\n Skips synonym blocks for ILCD flows known to have bad synonyms:\n * \"Crude Oil; 42.3 MJ/kg\" is not a synonym for \"Benzene, pure\", etc.\n * \"Carbon [resource, in ground]\" is not a synonym for the variety of compounds that may be manufactured from it\n * \"Wood; 14.7 MJ/kg\" says synonyms removed but weren't.\n\n Skips 'wood' from any list, which is abused badly in the ILCD synonyms list. Methanol and wood are not synonymous.\n :param flow:\n :param skip_syns: return name, uuid, and cas but skip synonyms\n :return: uuid (str), name (str), syns (set, includes name, excludes uuid)\n \"\"\"\n ns = find_ns(flow.nsmap, 'Flow')\n uid = str(find_common(flow, 'UUID')).strip()\n syns = set()\n name = grab_flow_name(flow, ns=ns)\n syns.add(name)\n cas = str(find_tag(flow, 'CASNumber', ns=ns)).strip()\n if cas != '':\n syns.add(cas)\n if skip_syns:\n print('Skipping synonyms')\n elif uid in ilcd_bad_synonyms:\n print('Skipping Synonyms for %s' % name)\n else:\n for syn in find_tags(flow, 'synonyms', ns='common'):\n for x in str(syn).split(';'):\n if x.strip() != '' and x.strip().lower() != 'wood':\n syns.add(x.strip())\n return uid, name, syns\n","repo_name":"bkuczenski/lca-tools","sub_path":"antelope_catalog/providers/ilcd/ilcd_flowables.py","file_name":"ilcd_flowables.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"5"} +{"seq_id":"13554031485","text":"from flask import Flask, render_template, request\nimport requests\nimport json\n\napp = Flask(__name__)\n\n@app.route('/weather', methods=['GET', 'POST'])\ndef weather():\n if request.method == 'POST':\n location = request.form['location']\n api_key = 'fce1c8ed838180cacc6f2bf5079a3b4d'\n url = f'http://api.openweathermap.org/data/2.5/weather?q={location}&appid={api_key}&units=metric'\n response = requests.get(url)\n data = json.loads(response.text)\n weather = {\n 'description': data['weather'][0]['description'],\n 'temperature': data['main']['temp'],\n 'humidity': data['main']['humidity'],\n 'wind_speed': data['wind']['speed']\n }\n return render_template('weather.html', weather=weather)\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"drone-droid/Python-Projects","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29343392678","text":"# %% [markdown]\n# # Dataset\n\n# %%\nfrom gaitor_function_calling.data.prompting_utils import parse_prompt_back_to_data, function_calling_tokens, INSTRUCTION\nfrom gaitor_function_calling.evaluation.evaluation_utils import FunctionCallingMetric\nfrom datasets import load_dataset\nimport numpy\nrelative_path_to_data = './production_eval_chat-instruction.json'\ndataset = load_dataset('json', data_files={'train': relative_path_to_data}, split=\"train\")\nprint(\"Dataset size: \", len(dataset), end=\"\\n\\n\")\nprint(\"Sample data: \", dataset[0][\"text\"], end=\"\\n\\n\")\ninstruction = INSTRUCTION\n\n# %% [markdown]\n# # Load Models\n\n# %%\nfrom transformers import AutoTokenizer\nfrom peft import AutoPeftModelForCausalLM\nimport torch\n\n# Set the path to the checkpoint directory\nhub_id = \"SebastianS/function_calling-llama_7b-nat-fc_only\"\n\n# load base LLM model and tokenizer\nfc_model = AutoPeftModelForCausalLM.from_pretrained(\n hub_id,\n low_cpu_mem_usage=True,\n torch_dtype=torch.float16,\n load_in_4bit=True,\n)\nfc_tokenizer = AutoTokenizer.from_pretrained(hub_id)\n\n\n# %% [markdown]\n# # Metric\n\n# %%\nimport re\nimport json\nfrom transformers import AutoTokenizer, AutoModel\nfrom scipy.spatial.distance import cosine\n\n# Load a pre-trained model for sentence embedding (e.g., SBERT)\nmetric = FunctionCallingMetric()\n\n# %% [markdown]\n# # Iterator\n\n# %%\nmetric_scores = []\nfor i, data in enumerate(dataset):\n if 0 < i < len(dataset):\n try:\n inp, target = data[\"text\"].split(\"[/INST]\")\n prompt = inp + \"[/INST]\"\n input_ids = fc_tokenizer(prompt, return_tensors=\"pt\", truncation=True).input_ids.cuda()\n outputs = fc_model.generate(input_ids=input_ids, do_sample=True, top_p=0.9,temperature=0.9)\n \n expected_str = data[\"text\"]\n generated_str = fc_tokenizer.batch_decode(outputs.detach().cpu().numpy())[0]\n \n \n expected_data = parse_prompt_back_to_data(expected_str, instruction)\n generated_data = parse_prompt_back_to_data(generated_str, instruction)\n parse_prompt_back_to_data(generated_str, instruction)\n \n if \"function_call\" not in generated_data[\"target\"][\"chatgptMessage\"]:\n metric_scores.append(0)\n print(f\"{i} Metric score: {0}\")\n continue\n generated_arguments = json.loads(generated_data[\"target\"][\"chatgptMessage\"][\"function_call\"][\"arguments\"])\n expected_arguments = json.loads(expected_data[\"target\"][\"chatgptMessage\"][\"function_call\"][\"arguments\"])\n \n metric_score = metric.run(generated_arguments, expected_arguments)\n metric_scores.append(metric_score)\n \n print(f\"{i} Metric score: {metric_score:.2f}\")\n except Exception as e:\n print(\"Error: \", e)\n\nprint(f\"Average metric score: {sum(metric_scores) / len(metric_scores):.2f}\")\n\n\n# %%\n\n\n\n","repo_name":"CakeCrusher/gaitor-function-calling","sub_path":"evaluation/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"17232655096","text":"'''\nEvaluate the value of an arithmetic expression in Reverse Polish Notation.\n\nValid operators are +, -, *, and /. Each operand may be an integer or another expression.\n\nNote that division between two integers should truncate toward zero.\n\nIt is guaranteed that the given RPN expression is always valid. That means the expression would always evaluate to a result, and there will not be any division by zero operation.\n'''\n\n'''\n注意只有一个的情况\n不用eval,就分四种情况\n'''\n\n'''\npython是向下取整\nc语言是向0取整\n这道题是向0取整\n'''\n\ndef evalRPN(tokens: List[str]) -> int:\n stack = []\n tokens = tokens[::-1]\n operator = '+-*/'\n s = ''\n while tokens:\n cur = tokens.pop()\n while tokens and not cur in operator:\n stack.append(cur)\n cur = tokens.pop()\n if not cur in operator:\n result = int(cur)\n else:\n b = stack.pop()\n a = stack.pop()\n # print(a,b,''.join([a,cur,b]))\n result = int(eval(''.join([a,cur,b])))\n stack.append(str(result))\n result = int(stack[0])\n return result","repo_name":"cenjianxun/leetcode-record","sub_path":"circle1/150-EvaluateReversePolishNotation-M.py","file_name":"150-EvaluateReversePolishNotation-M.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31429256341","text":"def swapFileData(fileName1, fileName2):\n \"\"\"\n Swap the contents of two files\n \"\"\"\n with open(fileName1, 'r') as file1:\n with open(fileName2, 'r') as file2:\n file1Data = file1.read()\n file2Data = file2.read()\n with open(fileName1, 'w') as file1:\n with open(fileName2, 'w') as file2:\n file1.write(file2Data)\n file2.write(file1Data)\n\ninputFile1 = input(\"Enter the name of the first file: \")\ninputFile2 = input(\"Enter the name of the second file: \")\nswapFileData(inputFile1, inputFile2)\nprint(\"The files have been swapped. have fun\")\n","repo_name":"hrichiksite/python-whjr","sub_path":"c98/project/swappingFile.py","file_name":"swappingFile.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"39410911547","text":"#Calcular e imprimir la suma de los números enteros comprendidos entre dos\n#números enteros A y B ingresados por teclado. Tener en cuenta que A puede ser\n#mayor, menor o igual que B.\nprint('Calcular la suma de los numeros enteros comprendidos entre dos numeros enteros')\nA=int(input('Ingrese el valor de A: '))\nB=int(input('Ingrese el valor de B: '))\nif A<=B:\n A=A\n B=B\nelse:\n C=B\n B=A\n A=C\ncont=A+1\nsuma=0\nwhile cont Scope:\n scope: Scope = copy.deepcopy(default_scope)\n if options.headers_only:\n if not options.command:\n options.command = \"HEAD\"\n\n scope[\"method\"] = (options.command or \"GET\").upper()\n\n for line in options.header:\n name, value = line.encode(\"latin-1\").split(b\": \", 1)\n scope[\"headers\"].append((name.lower(), value))\n\n url = urljoin(base_url, options.url)\n scheme, netloc, path, query, _ = urlsplit(url)\n scope[\"scheme\"] = scheme or \"http\"\n if \":\" in netloc:\n host, sport = netloc.split(\":\", 1)\n port = int(sport)\n else:\n host = netloc\n port = default_port[scheme]\n scope[\"server\"] = (host, port)\n scope[\"headers\"].append((b\"host\", netloc.encode(\"latin-1\")))\n scope[\"raw_path\"] = urljoin(path, query).encode()\n scope[\"path\"] = unquote(path)\n scope[\"query_string\"] = query.encode()\n return scope\n\n\nasync def gen_chunks(options: Options) -> typing.AsyncIterator[Message]:\n if not options.multipart:\n yield {\"type\": \"http.request\", \"body\": options.data}\n return\n boundary = b\"--\" + options.boundary\n for multipart in options.multipart:\n name, value = multipart.split(\"=\", 1)\n if value.startswith(\"@\"):\n fn = value[1:]\n mime_type, _ = guess_type(fn)\n if not mime_type:\n mime_type = \"application/octet-stream\"\n yield {\n \"type\": \"http.request\",\n \"more_body\": True,\n \"body\": b\"\".join(\n (\n boundary,\n b\"\\r\\n\" b'Content-Disposition: form-data; name=\"',\n name.encode(\"utf-8\"),\n b'\"; filename=\"',\n fn.encode(\"utf-8\"),\n b'\"\\r\\n' b\"Content-Type: \",\n mime_type.encode(\"latin-1\"),\n b\"\\r\\n\\r\\n\",\n )\n ),\n }\n f = open(fn, mode=\"rb\")\n while True:\n data = f.read(1024)\n if not data:\n break\n yield {\n \"type\": \"http.request\",\n \"more_body\": True,\n \"body\": data,\n }\n f.close()\n yield {\n \"type\": \"http.request\",\n \"more_body\": True,\n \"body\": b\"\\r\\n\",\n }\n else:\n yield {\n \"type\": \"http.request\",\n \"more_body\": True,\n \"body\": b\"\".join(\n (\n boundary,\n b'\\r\\nContent-Disposition: form-data; name=\"',\n name.encode(\"utf-8\"),\n b'\"\\r\\n\\r\\n',\n value.encode(\"utf-8\"),\n b\"\\r\\n\",\n )\n ),\n }\n yield {\"type\": \"http.request\", \"body\": boundary + b\"--\"}\n","repo_name":"akornatskyy/asgi-cli","sub_path":"src/asgi_cli/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"12265820173","text":"import cv2\nfaceCascade=cv2.CascadeClassifier(\"Resources/haarcascade_frontalface_default.xml\")\ncap=cv2.VideoCapture(0) # 0 for default webcam\n# Set size\ncap.set(3,680) # ID: 3 . width\ncap.set(4,480) # ID: 4 , height\n# Set brightness \ncap.set(10,200) # ID 10: brightness\nwhile True:\n # success is boolean value\n success, img = cap.read()\n img=cv2.flip(img, 1) # 0, flip vertical, 1 flip horizontal\n imgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces=faceCascade.detectMultiScale(imgGray,1.1,4)\n for(x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(50,255,0),2)\n cv2.putText(img,\"Human\",(x+(w//2),y-10),cv2.FONT_HERSHEY_COMPLEX,0.5,(50,255,0),2)\n cv2.imshow(\"My Webcam\",img)\n if cv2.waitKey(1) & 0xFF == ord('q'): #Press q to quite\n break\n ","repo_name":"HaColab2k/DEEP-LEARNING","sub_path":"Projects/OPENCV/Projects/CameraFaceDetection.py","file_name":"CameraFaceDetection.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20528039408","text":"# Given a set of distinct integers, nums, return all possible subsets (the power set).\n\n# Note: The solution set must not contain duplicate subsets.\n\n# Example:\n\n# Input: nums = [1,2,3]\n# Output:\n# [\n# [3],\n# [1],\n# [2],\n# [1,2,3],\n# [1,3],\n# [2,3],\n# [1,2],\n# []\n# ]\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n subsets = list()\n \n stack = [([], 0)]\n end = len(nums) - 1\n while len(stack):\n subset, i = stack.pop()\n \n subset_copy = subset[:]\n subset_copy.append(nums[i])\n \n if i == end:\n subsets.append(subset)\n subsets.append(subset_copy)\n else:\n stack.append((subset, i+1))\n stack.append((subset_copy, i+1))\n \n return subsets\n ","repo_name":"frvnkly/algorithm-practice","sub_path":"leetcode/july-2020-challenge/day11/subsets.py","file_name":"subsets.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11497911160","text":"pumps = int(input())\ntank = 0\nindex = 0\n\nfor x in range(0, pumps):\n line = list(map(int, input().split(' ')))\n tank += line[0]\n\n if tank < line[1]:\n tank = 0\n index = x + 1\n continue\n\n else:\n tank -= line[1]\n\nprint(index)\n","repo_name":"PetarWho/SoftUni","sub_path":"Python Advanced/Lists as Stacks and Queues - Exercise/05. Truck Tour.py","file_name":"05. Truck Tour.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"70743784793","text":"from rest_framework import serializers\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom .models import Comments\nimport jwt\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comments\n fields = ['comentarioId', 'created_at', 'comentario', 'rating', 'user', 'mitos']\n\n def validate_rating(self, value):\n if value < 1 or value > 5:\n raise serializers.ValidationError(\"El rating debe estar entre 1 y 5\")\n return value\n def create(self, validated_data):\n request = self.context['request']\n jwt_auth = JWTAuthentication()\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n validated_token = jwt_auth.get_validated_token(token)\n \n # Obtain the secret key from your Django settings\n secret_key = settings.SECRET_KEY\n \n # Decode the JWT and access its payload\n decoded_payload = jwt.decode(token, secret_key, algorithms=['HS256'])\n \n # Imprimir el valor de decoded_payload\n print(decoded_payload)\n \n user_id = decoded_payload['user_id']\n User = get_user_model()\n user = User.objects.get(pk=user_id)\n\n # Crear y guardar una instancia del modelo User utilizando los datos validados\n instance = Comments.objects.create(user=user, **validated_data)\n\n # Devolver la instancia creada\n return instance\n ","repo_name":"GustavoNeiraGonzalez/mitosDjango","sub_path":"Backend/comentario/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"24963706840","text":"# -*- coding: utf-8 -*-\n# @author: erwei.zheng\n# @file: 后进先出队列.py\n# @datatime: 2023/5/1 14:24\n\n\"\"\"\n目标:掌握后进先出队列(栈)的用法\n\"\"\"\n\nfrom queue import LifoQueue\n\nq = LifoQueue()\nq.put(1)\nq.put(2)\nq.put(3)\nfor i in range(3):\n print(q.get(), end=' ') # 3 2 1 后进来的先出去","repo_name":"zhengew/R2CodingForPython","sub_path":"day35_并发编程_线程第二天_守护线程、线程不安全问题、线程锁、线程安全的单例模式、线程的互斥锁和递归锁、死锁现象、如何解决死锁、线程队列/exam/队列/后进先出队列.py","file_name":"后进先出队列.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41784266073","text":"from cards import *\n\nclass Game():\n\tdef __init__(self):\n\t\tself.deck = Deck()\n\t\tself.user_hand = Hand()\n\t\tself.computer_hand = Hand()\n\t\tself.user_points = 0\n\t\tself.computer_points = 0\n\n\tdef create_hands(self):\n\t\tself.deck.shuffle()\n\t\tself.draw = input(\"The deck has been shuffled, write 'd' to draw your hand: \")\n\t\tif self.draw == 'd':\n\t\t\tself.user_hand.cards = self.deck.draw(26)\n\t\t\tprint(\"Here's your hand :\\n {}\".format(self.user_hand.cards))\n\t\t\tself.computer_hand.cards = self.deck.draw(26)\n\n\tdef play(self):\n\t\tuser_input = input(\"write 'p' to play a card: \")\n\t\tif user_input == 'p':\n\t\t\tself.user_hand.shuffle()\n\t\t\tself.user_card = self.user_hand.draw(1)\n\t\t\tprint(\"You played the {}\".format(self.user_card[0]))\n\t\t\tself.computer_hand.shuffle()\n\t\t\tself.computer_card = self.computer_hand.draw(1)\n\t\t\tprint(\"The computer plays {}\".format(self.computer_card[0]))\n\n\tdef winner(self):\n\t\t\n\t\tif self.user_card > self.computer_card:\n\t\t\tself.user_points += 1\n\t\t\tprint(\"You win! You have now {} points\".format(self.user_points))\n\n\t\telif self.user_card < self.computer_card:\n\t\t\tself.computer_points += 1\n\t\t\tprint(\"The computer wins! He has now {} points\".format(self.computer_points))\n\n\t\telif self.user_card\t== computer_card:\n\t\t\tprint(\"Nobody wins, play again!\")\n\n\t\t\t\ng = Game()\ng.create_hands()\nwhile len(g.user_hand.cards) > 0:\n\tg.play()\n\tg.winner()\nif g.user_points > g.computer_points:\n\tprint(\"Game over. You win with {} points while the computer got only {} points\".format(g.user_points, g.computer_points))\nelif g.user_points < g.computer_points:\n\tprint(\"Game over. The computer wins with {} points while you got only {} points\".format(g.computer_points, g.user_points))\nelse:\n\tprint(\"Its a tie!\")\n\n\n","repo_name":"SophieHau/War-game-exercise","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27954583427","text":"#!/usr/bin/python3\n\nimport pygame#import pygame for 2d and image\nfrom pygame.locals import *#import constante pygame\n\nimport functions\nimport game\n\npygame.init()#initialize pygame\n\ndef main():\n \"\"\"Game laucher\"\"\"\n main_menu = True#initialise main_menu to true\n\n pygame.key.set_repeat(200, 200)#enabled key repeat\n\n screen = pygame.display.set_mode((500, 400))#initialize screen\n pygame.display.set_caption(\"Labyrinth\")#rename screen\n\n #main loop\n while main_menu:\n if functions.main_menu(screen) == True:#if user wants to play\n game.game(screen)#charged game\n else:#else\n break#quit\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tetrew88/labyrinth","sub_path":"MacGyver.py","file_name":"MacGyver.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"20721291790","text":"class BankAccount:\n def __init__(self, interest_rate = 0.0, balance = 0):\n \n if (interest_rate < 0):\n print(\"Error: Interest rate must be a non-negative value\")\n else:\n self.interest_rate = interest_rate\n \n self.balance = balance\n \n def deposit(self, amount):\n if (amount <= 0):\n print(\"Error: Please deposit an amount greater than $0\")\n else:\n previous_balance = self.balance\n self.balance += amount\n print(f\"Thank you for your deposit of ${amount}\")\n print(f\"Previous balance: ${previous_balance}\")\n print(f\"Final balance: ${self.balance}\")\n print(\"- - - - - - - - - - - - - - - - - - - -\")\n \n def withdraw(self, amount):\n if (amount <= 0):\n print(\"Error: Please withdraw an amount greater than $0\")\n else:\n previous_balance = self.balance\n self.balance -= amount\n if (self.balance < 0):\n print(\"Insufficient funds: Charging a $5 fee.\")\n self.balance -= 5\n print(f\"Thank you for your withdrawal of ${amount}\")\n print(f\"Previous balance: ${previous_balance}\")\n print(f\"Final balance: ${self.balance}\")\n print(\"- - - - - - - - - - - - - - - - - - - -\")\n \n def display_account_info(self):\n print(\"ACCOUNT INFORMATION\")\n print(f\"Account Balance: ${self.balance}\")\n print(f\"Current Interest Rate: {self.interest_rate * 100}%\")\n print(\"- - - - - - - - - - - - - - - - - - - -\")\n \n def yield_interest(self):\n if (self.balance < 0):\n print(\"Error: Must have a non-negative balance to yield interest\")\n print(f\"Current balance: ${self.balance}\")\n print(\"- - - - - - - - - - - - - - - - - - - -\")\n else:\n previous_balance = self.balance\n self.balance += self.balance * self.interest_rate\n print(f\"Previous balance: ${previous_balance}\")\n print(f\"Final balance after interest yield: ${self.balance}\")\n print(\"- - - - - - - - - - - - - - - - - - - -\")\n \n\naccount_1 = BankAccount(0.08, 100)\naccount_2 = BankAccount(0.02, 1000)\n\naccount_1.deposit(200), account_1.deposit(50), account_1.deposit(1200),\naccount_1.withdraw(500),\naccount_1.yield_interest(),\naccount_1.display_account_info()\n\naccount_2.deposit(1000), account_2.deposit(15000),\naccount_2.withdraw(500), account_2.withdraw(250), account_2.withdraw(1000), account_2.withdraw(20000)\naccount_2.yield_interest(),\naccount_2.display_account_info()","repo_name":"tajohnson0316/bank-account-assignment","sub_path":"bank-account.py","file_name":"bank-account.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"35857905743","text":"import time\nfrom digitalio import DigitalInOut, Direction, Pull\nfrom adafruit_seesaw.seesaw import Seesaw\nfrom adafruit_seesaw.pwmout import PWMOut\nfrom adafruit_motor import servo, motor\nfrom busio import I2C\nimport board\n\ni2c = I2C(board.SCL, board.SDA)\nss = Seesaw(i2c)\n\nprint(\"Crickit demo!\")\n\n# use the CPX onboard switch to turn on/off (helps calibrate)\nswitch = DigitalInOut(board.SLIDE_SWITCH)\nswitch.direction = Direction.INPUT\nswitch.pull = Pull.UP\n\n#################### 4 Servos\nservos = []\nfor ss_pin in (17, 16, 15, 14):\n pwm = PWMOut(ss, ss_pin)\n pwm.frequency = 50\n _servo = servo.Servo(pwm)\n _servo.angle = 90 # starting angle, middle\n servos.append(_servo)\n\n#################### 2 DC motors\nmotors = []\nfor ss_pin in ((22, 23), (18, 19)):\n pwm0 = PWMOut(ss, ss_pin[0])\n pwm1 = PWMOut(ss, ss_pin[1])\n _motor = motor.DCMotor(pwm0, pwm1)\n motors.append(_motor)\n\nservos[0].angle = 180\n\nwhile True:\n if switch.value:\n # Switch is on, activate MUSIC POWER!\n\n # motor forward slowly\n motors[0].throttle = 0.2\n # mote the head forward slowly, over 0.9 seconds\n for a in range(180, 90, -1):\n servos[0].angle = a\n time.sleep(0.01)\n\n # motor stop\n motors[0].throttle = 0\n time.sleep(1)\n\n # motor backwards slowly\n motors[0].throttle = -0.2\n # move the head back slowly too, over 0.9 seconds\n for a in range(90, 180):\n servos[0].angle = a\n time.sleep(0.01)\n # calibration! its a *tiny* bit slower going back so give it a few ms\n time.sleep(0.007)\n\n # motor stop\n motors[0].throttle = 0\n time.sleep(1)\n else:\n # switch is 'off' so dont do anything!\n pass\n","repo_name":"adafruit/Adafruit_Learning_System_Guides","sub_path":"Crickits/metallobot/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":913,"dataset":"github-code","pt":"5"} +{"seq_id":"28160852533","text":"# say hello to user and ask him to tell you his name\nprint('Welcome to the game of Guessing the number.')\nplayer_name = input('Please tell me your name: ')\n\n\ndef game():\n\t# import function for random number\n\tfrom random import randint\n\n\t# set highest number\n\thighest = 2\n\n\t# set range\n\tnumber = randint(1, highest)\n\n\t# number of tries\n\ttries = 0\n\n\t# ask him to set highest number\n\thighest = int(input('Please enter highest number {}: '.format(player_name)))\n\n\t# ask him to guess the random number\n\tcurrent_num = int(input(\"Guess the number between 1-{0} {1}: \".format(highest, player_name)))\n\n\t# check the number\n\twhile current_num != number:\n\t\tif current_num > number:\n\t\t\tprint(\"Aim lower {}\".format(player_name))\n\t\t\tcurrent_num = int(input(\"Try smaller number: \"))\n\t\telif current_num < number:\n\t\t\tprint(\"Aim higher {}\".format(player_name))\n\t\t\tcurrent_num = int(input(\"Try higher number: \"))\n\t\ttries += 1 # number of tries\n\n\telse:\n\t\tprint(\"You guessed it {}\".format(player_name))\n\t\tprint(\"You needed {} tries\".format(tries))\n\t\tplay_again = input(\"Would you like to play again {}? y/n\".format(player_name))\n\t\tif play_again == 'y':\n\t\t\tgame()\n\n\n# start the game\ngame()\n","repo_name":"vasjanovak/random-number","sub_path":"random_number.py","file_name":"random_number.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74612808471","text":"# pylint: disable=C0103,R0902,R0904,R0914\r\nfrom __future__ import (nested_scopes, generators, division, absolute_import,\r\n print_function, unicode_literals)\r\nfrom math import sqrt, degrees, radians, atan2, acos, sin, cos\r\nfrom itertools import izip\r\n\r\nfrom numpy import array, cross, dot, transpose, zeros\r\nfrom numpy.linalg import norm\r\n\r\nfrom pyNastran.bdf.fieldWriter import set_blank_if_default\r\nfrom pyNastran.bdf.cards.baseCard import BaseCard, BDFCard\r\nfrom pyNastran.general.general import ListPrint\r\n\r\n\r\nclass Coord(BaseCard):\r\n type = 'COORD'\r\n\r\n def __init__(self, card, data):\r\n \"\"\"\r\n Defines a general CORDxx object\r\n @param self the object pointer\r\n @param card a BDFCard object\r\n @param data a list analogous to the card\r\n \"\"\"\r\n ## has the coordinate system been linked yet\r\n self.isCrossReferenced = False\r\n ## have all the transformation matricies been determined\r\n self.isResolved = False\r\n self.cid = None\r\n self.e1 = None\r\n self.e2 = None\r\n self.e3 = None\r\n\r\n def Cid(self):\r\n \"\"\"returns the coordinate ID\"\"\"\r\n return self.cid\r\n\r\n def setup(self, debug=False):\r\n r\"\"\"\r\n \\f[ e_{13} = e_3 - e_1 \\f]\r\n \\f[ e_{12} = e_2 - e_1 \\f]\r\n \\f[ k = \\frac{e_{12}}{|e_{12}|} \\f]\r\n \\f[ j_{dir} = k \\times e_{13} \\f]\r\n \\f[ j = \\frac{j_{dir}}{|j_{dir}|} \\f]\r\n \\f[ i = j \\times k \\f]\r\n \"\"\"\r\n try:\r\n assert len(self.e1) == 3, self.e1\r\n assert len(self.e2) == 3, self.e2\r\n assert len(self.e3) == 3, self.e3\r\n ## e_{13}\r\n e13 = self.e3 - self.e1\r\n ## e_{12}\r\n e12 = self.e2 - self.e1\r\n #print \"e13 = %s\" %(e13)\r\n #print \"e12 = %s\" %(e12)\r\n except TypeError:\r\n msg = ''\r\n msg += \"\\ntype = %s\\n\" % (self.type)\r\n msg += \"\\ncid = %s\\n\" % (self.Cid())\r\n msg += \"e1 = %s\\n\" % (self.e1)\r\n msg += \"e2 = %s\\n\" % (self.e2)\r\n msg += \"e3 = %s\\n\" % (self.e3)\r\n raise TypeError(msg)\r\n\r\n #print self\r\n #print \"e1 = \",self.e1\r\n #print \"e2 = \",self.e2\r\n #print \"e3 = \",self.e3\r\n\r\n try:\r\n ## k = (G3 cross G1) normalized\r\n self.k = self.normalize(e12)\r\n ## j = (k cross e13) normalized\r\n self.j = self.normalize(cross(self.k, e13))\r\n except RuntimeError:\r\n print(\"---InvalidUnitVectorError---\")\r\n print(\"Cp = %s\" % (self.Cid()))\r\n print(\"e1 = %s\" % (self.e1))\r\n print(\"e2 = %s\" % (self.e2))\r\n print(\"e3 = %s\" % (self.e3))\r\n print(\"e13 = %s\" % (e13))\r\n print(\"e12 = %s\" % (e12))\r\n print(\"k = norm(e12)\")\r\n print(\"k = %s\\n\" % (self.k))\r\n print(\"j = norm(cross(k,e13))\")\r\n raise\r\n try:\r\n ## i = j cross k\r\n self.i = cross(self.j, self.k)\r\n except RuntimeError:\r\n print(\"---InvalidUnitVectorError---\")\r\n print(\"Cp = %s\" % (self.Cid()))\r\n print(\"e1 = %s\" % (self.e1))\r\n print(\"e2 = %s\" % (self.e2))\r\n print(\"e3 = %s\" % (self.e3))\r\n print(\"e13 = %s\" % (e13))\r\n print(\"e12 = %s\" % (e12))\r\n print(\"k = norm(e12)\")\r\n print(\"k = %s\\n\" % (self.k))\r\n print(\"j = norm(cross(k,e13))\")\r\n print(\"j = %s\" % (self.j))\r\n raise\r\n\r\n if debug:\r\n print(\"Cp = %s\" % (self.Cid()))\r\n print(\"e1 = %s\" % (self.e1))\r\n print(\"e2 = %s\" % (self.e2))\r\n print(\"e3 = %s\" % (self.e3))\r\n print('-----')\r\n print(\"e13 = %s\" % (e13))\r\n print(\"e12 = %s\" % (e12))\r\n print('-----')\r\n print(\"i = %s\" % (self.i))\r\n print(\"j = %s\" % (self.j))\r\n print(\"k = %s\\n\" % (self.k))\r\n print('-----')\r\n\r\n #except TypeError:\r\n # msg = 'There is a problem handling these lines:\\n'\r\n # msg += ' self.k = self.normalize(self.e3-self.e1)\\n'\r\n # msg += ' self.ex0 = self.normalize(self.e2-self.e1)\\n'\r\n # msg += 'e1=%s Type=%s\\n' %(self.e1,type(self.e1))\r\n # msg += 'e2=%s Type=%s\\n' %(self.e2,type(self.e2))\r\n # msg += 'e3=%s Type=%s\\n' %(self.e3,type(self.e3))\r\n # #print msg\r\n # raise CoordTypeError(msg)\r\n #print \"k = %s\" %(self.k)\r\n #print \"e13 = %s\" %(e13)\r\n\r\n def transformToLocal(self, p, matrix, debug=False):\r\n r\"\"\"\r\n Transforms the global point p to the local coordinate system\r\n @param self\r\n the object pointer\r\n @param p\r\n the point to transform\r\n @param matrix\r\n the transformation matrix to apply - created by transformToGlobal\r\n @param debug\r\n developer debug\r\n @note\r\n uses the matrix as there is no linking from a global coordinate\r\n system to the local\r\n @note\r\n the matrix that comes in is the local to global, so we need to invert\r\n the matrix. Luckily the inverse of a tranformation matrix\r\n \\f$ [\\phi] \\f$ is the transpose of the matrix.\r\n \\f[ p_{Global} = (p_{Local}-e_1 )[\\phi]+e_1 \\f]\r\n \\f[ [phi]^{-1} = [phi]^T \\f]\r\n (pc-e1) =(pG-e1)mT\r\n (pc-e1)*m = pG-e1\r\n (pc-e1)*m+e1 = pG\r\n\r\n @note\r\n be very careful of when you apply e1. It gets removed whenever\r\n rotations are applied. These equations need some TLC, but the\r\n methods are ok.\r\n \"\"\"\r\n #pGlobal = self.transformToGlobal(p, debug=False)\r\n pCoord = dot(p - self.e1, transpose(matrix))\r\n pLocal = self.XYZtoCoord(pCoord)\r\n if debug:\r\n print(\"p = %s\" % (p))\r\n print(\"p-e1 = %s\" % (p - self.e1))\r\n print(\"pLocal = %s\\n\" % (pLocal))\r\n print(\"pCoord = %s\" % (pCoord))\r\n return pLocal\r\n #return pGlobal\r\n\r\n def normalize(self, v):\r\n \"\"\"\r\n Normalizes v into a unit vector\r\n @param self\r\n the object pointer\r\n @param v\r\n the vector to normalize\r\n @retval\r\n nNorm v has been normalized\r\n \"\"\"\r\n normV = norm(v)\r\n if not normV > 0.:\r\n raise RuntimeError('v=%s norm(v)=%s' % (v, normV))\r\n return v / normV\r\n\r\n def T(self):\r\n r\"\"\"\r\n Returns the 6x6 transformation\r\n \\f[ \\large [\\lambda] = [B_{ij}] \\f]\r\n \\f[\r\n [T] =\r\n \\left[\r\n \\begin{array}{cc}\r\n \\lambda & 0 \\\\\r\n 0 & \\lambda \\\\\r\n \\end{array}\r\n \\right]\r\n \\f]\r\n \"\"\"\r\n (a, matrix) = self.transformToGlobal(self.e1)\r\n t = zeros((6, 6)) # transformation matrix\r\n t[0:2, 0:2] = matrix\r\n t[3:5, 3:5] = matrix\r\n return t\r\n\r\n def reprFields(self):\r\n return self.rawFields()\r\n\r\n #def resolveCid(self):\r\n #pass\r\n\r\n\r\nclass RectangularCoord(object):\r\n def coordToXYZ(self, p):\r\n #print(\"p = %s\" %(p))\r\n #print(\"e1 = %s\" %(self.e1))\r\n return p + self.e1\r\n\r\n def XYZtoCoord(self, p):\r\n return p\r\n\r\n\r\nclass CylindricalCoord(object):\r\n r\"\"\"\r\n \\f[ r = \\sqrt(x^2+y^2) \\f]\r\n \\f[ \\theta = tan^-1(\\frac{y}{x}) \\f]\r\n \\f[ z = z \\f]\r\n\r\n \\f[ x = r cos(\\theta) \\f]\r\n \\f[ y = r sin(\\theta) \\f]\r\n \\f[ z = z \\f]\r\n \\f[ p = [x,y,z] + e_1 \\f]\r\n http://en.wikipedia.org/wiki/Cylindrical_coordinate_system\r\n @note\r\n \\f$ \\phi \\f$ and \\f$ \\theta \\f$ are flipped per wikipedia to be\r\n consistent with nastran's documentation\r\n @see refman.pdf\r\n \"\"\"\r\n def coordToXYZ(self, p):\r\n r\"\"\"\r\n @code\r\n y R\r\n | /\r\n | /\r\n | / theta\r\n *------------x\r\n @endcode\r\n\r\n \\f[ \\large x = R \\cos(\\theta) \\f]\r\n \\f[ \\large y = R \\sin(\\theta) \\f]\r\n \"\"\"\r\n R = p[0]\r\n theta = radians(p[1])\r\n x = R * cos(theta)\r\n y = R * sin(theta)\r\n return array([x, y, p[2]]) + self.e1\r\n\r\n def XYZtoCoord(self, p):\r\n (x, y, z) = p\r\n theta = degrees(atan2(y, x))\r\n R = sqrt(x * x + y * y)\r\n return array([R, theta, z])\r\n\r\n\r\nclass SphericalCoord(object):\r\n r\"\"\"\r\n \\f[ r = \\rho = \\sqrt(x^2+y^2+z^2) \\f]\r\n \\f[ \\theta = tan^-1(\\frac{y}{x}) \\f]\r\n \\f[ \\phi = cos^-1(\\frac{z}{r}) \\f]\r\n\r\n \\f[ x = r cos(\\theta)sin(\\phi) \\f]\r\n \\f[ y = r sin(\\theta)sin(\\phi) \\f]\r\n \\f[ z = r cos(\\phi) \\f]\r\n \\f[ p = [x,y,z] + e_1 \\f]\r\n http://en.wikipedia.org/wiki/Spherical_coordinate_system\r\n @note\r\n \\f$ \\phi \\f$ and \\f$ \\theta \\f$ are flipped per wikipedia to be\r\n consistent with nastran's documentation\r\n @see refman.pdf\r\n \"\"\"\r\n def coordToXYZ(self, p):\r\n R = p[0]\r\n theta = radians(p[1])\r\n phi = radians(p[2])\r\n x = R * cos(theta) * sin(phi)\r\n y = R * sin(theta) * sin(phi)\r\n z = R * cos(phi)\r\n return array([x, y, z]) + self.e1\r\n\r\n def XYZtoCoord(self, p):\r\n (x, y, z) = p\r\n R = sqrt(x * x + y * y + z * z)\r\n theta = degrees(atan2(y, x))\r\n if R > 0:\r\n phi = degrees(acos(z / R))\r\n else:\r\n phi = 0.\r\n return array([R, theta, phi])\r\n\r\n\r\nclass Cord2x(Coord):\r\n def __init__(self, card, data):\r\n \"\"\"\r\n defines the CORD2x class\r\n @param self the object pointer\r\n @param card a BDFCard object\r\n @param data a list analogous to the card\r\n \"\"\"\r\n self.isResolved = False\r\n Coord.__init__(self, card, data)\r\n\r\n if card:\r\n ## coordinate system ID\r\n self.cid = card.field(1)\r\n ## reference coordinate system ID\r\n self.rid = card.field(2, 0)\r\n\r\n ## origin in a point relative to the rid coordinate system\r\n self.e1 = array(card.fields(3, 6, [0., 0., 0.]))\r\n ## z-axis in a point relative to the rid coordinate system\r\n self.e2 = array(card.fields(6, 9, [0., 0., 0.]))\r\n ## a point on the xz-plane relative to the rid coordinate system\r\n self.e3 = array(card.fields(9, 12, [0., 0., 0.]))\r\n else:\r\n self.cid = data[0]\r\n self.rid = data[1]\r\n self.e1 = array(data[2:5])\r\n self.e2 = array(data[5:8])\r\n self.e3 = array(data[8:11])\r\n assert len(data) == 11, 'data = %s' % (data)\r\n\r\n assert len(self.e1) == 3\r\n assert len(self.e2) == 3\r\n assert len(self.e3) == 3\r\n if self.rid == 0:\r\n self.isResolved = True\r\n self.setup()\r\n\r\n def resolveCid(self):\r\n \"\"\"\r\n Turns the coordinate system from being a coordinate system of\r\n type 1 depending on a type 2 to a type 1 depending on nothing.\r\n\r\n More generally, takes a coordinate system that depends on multiple\r\n levels of coordinate systems and resolves them in order to resolve\r\n it's coordinate frame. So, if a coordinate system is of type 2, this\r\n will effectively set rid to 0 with a type 2.\r\n\r\n This should handle any number of coordinate systems or coordinate\r\n system types assuming there is no circular references.\r\n \"\"\"\r\n #print str(self)\r\n #print self.rid\r\n #print \"cid=%s rid=%s\"%(self.cid, self.Rid())\r\n if self.cid == 0 or isinstance(self.rid, int) or self.rid.isResolved:\r\n return # rid=0 so already resolved\r\n elif self.rid.isResolved is False: # rid\r\n msg = ('there is a circular reference between Coord %s and '\r\n 'Coord %s' %(self.cid,self.Rid()))\r\n #assert self.rid.isCrossReferenced==False,msg)\r\n #print \" resolving cid=%s rid=%s\" %(self.cid,self.Rid())\r\n self.rid.resolveCid()\r\n\r\n ## rid coordinate system is now resolved, time to resolve the cid\r\n ## coordinate system. rid may be in a different coordinate system\r\n ## than cid\r\n self.isResolved = True\r\n self.e1, matrix = self.transformToGlobal(self.e1)\r\n\r\n ## the axes are normalized, so assume they're points and\r\n ## resolve them in the XYZ system, but dont subtract e1 off\r\n ## (hence the False)\r\n self.e1, matrix = self.rid.transformToGlobal(self.e1) # origin\r\n i, matrix = self.rid.transformToGlobal(self.i, False)\r\n j, matrix = self.rid.transformToGlobal(self.j, False)\r\n k, matrix = self.rid.transformToGlobal(self.k, False)\r\n\r\n ## the axes are global, so now we put them in the cid\r\n self.i = i\r\n self.j = j\r\n self.k = k\r\n\r\n def cross_reference(self, model):\r\n \"\"\"\r\n Links self.rid to a coordinate system.\r\n @param self the object pointer\r\n @param model the BDF object\r\n @warning\r\n Doesn't set rid to the coordinate system if it's in the global.\r\n This isn't a problem, it's meant to speed up the code in order\r\n to resolve extra coordinate systems.\r\n \"\"\"\r\n self.isCrossReferenced = True\r\n if self.rid != 0:\r\n self.rid = model.Coord(self.rid)\r\n ###\r\n\r\n def transformToGlobal(self, p, resolveAltCoord=True, debug=False):\r\n r\"\"\"\r\n Transforms a point from the local coordinate system to the reference\r\n coordinate frames \"global\" coordinate system.\r\n\r\n \\f[ \\large [p_{global}]_{1\\times 3} =\r\n [p_{local} -p_{origin}]_{1\\times 3}[\\beta_{ij}]_{3\\times 3} \\f]\r\n\r\n where \\f$ [\\beta]_{ij} \\f$ is the transformation matrix\r\n \\f[ \\large [\\beta]_{ij} \\left[\r\n \\begin{array}{ccc}\r\n g_x \\cdot i & g_x \\cdot j & g_x \\cdot k \\\\\r\n g_y \\cdot i & g_y \\cdot j & g_y \\cdot k \\\\\r\n g_z \\cdot i & g_z \\cdot j & g_z \\cdot k\r\n \\end{array} \\right]\r\n \\f]\r\n\r\n * \\f$ g \\f$ is the global directional vector (e.g. \\f$ g_x = [1,0,0]\\f$)\r\n * \\f$ ijk \\f$ is the ith direction in the local coordinate system\r\n \r\n @warning\r\n make sure you cross-reference before calling this\r\n @warning\r\n you probably shouldnt call this, call the Node methods Position\r\n and PositionWRT\r\n \"\"\"\r\n if debug:\r\n print(\"p = %s\" % (p))\r\n print(\"p-e1 = %s\" % (p - self.e1))\r\n\r\n if not self.isResolved:\r\n self.resolveCid()\r\n if self.cid == 0:\r\n return p, array([[1., 0., 0.],\r\n [0., 1., 0.],\r\n [0., 0., 1.]])\r\n\r\n # the ijk axes arent resolved as R-theta-z, only points\r\n if resolveAltCoord:\r\n #print(\"p* = %s\" %(p))\r\n p = self.coordToXYZ(p)\r\n #p2 = p-self.eo\r\n\r\n # Bij = Bip*j\r\n i = self.i\r\n j = self.j\r\n k = self.k\r\n if isinstance(self.rid, int): # rid=0\r\n gx = array([1., 0., 0.])\r\n gy = array([0., 1., 0.])\r\n gz = array([0., 0., 1.])\r\n else:\r\n gx = self.rid.i\r\n gy = self.rid.j\r\n gz = self.rid.k\r\n ###\r\n\r\n matrix = array([[dot(gx, i), dot(gy, i), dot(gz, i)],\r\n [dot(gx, j), dot(gy, j), dot(gz, j)],\r\n [dot(gx, k), dot(gy, k), dot(gz, k)]])\r\n p2 = dot(p - self.e1, matrix)\r\n p3 = p2 + self.e1\r\n\r\n if debug:\r\n print(\"Cp = \", self.Cid())\r\n print(\"gx = %s\" % (gx))\r\n print(\"gy = %s\" % (gy))\r\n print(\"gz = %s\" % (gz))\r\n print(\"p = %s\" % (ListPrint(p)))\r\n print(\"matrix = \\n\", matrix)\r\n print(\"e1 = %s\" % (ListPrint(self.e1)))\r\n print(\"p2 = %s\" % (ListPrint(p2)))\r\n print('------------------------')\r\n print(\"p3 = %s\\n\" % (ListPrint(p3)))\r\n\r\n #print str(self)\r\n if isinstance(self.rid, int):\r\n return (p3, matrix)\r\n else:\r\n ## @todo do i need to multiply rid.transform(p3)[1]*matrix\r\n return (self.rid.transformToGlobal(p3)[0], matrix)\r\n ###\r\n\r\n def Rid(self):\r\n \"\"\"Returns the reference coordinate system self.rid\"\"\"\r\n if isinstance(self.rid, int):\r\n return self.rid\r\n return self.rid.cid\r\n\r\n\r\nclass Cord1x(Coord):\r\n rid = 0 # used only for transform to global\r\n\r\n def __init__(self, card, nCoord, data):\r\n Coord.__init__(self, card, data)\r\n\r\n self.isResolved = False\r\n if nCoord is not None:\r\n assert nCoord == 0 or nCoord == 1, 'nCoord=|%s|' % (nCoord)\r\n nCoord *= 4 # 0 if the 1st coord, 4 if the 2nd\r\n\r\n ## the coordinate ID\r\n self.cid = card.field(1 + nCoord)\r\n ## a Node at the origin\r\n self.g1 = card.field(2 + nCoord)\r\n ## a Node on the z-axis\r\n self.g2 = card.field(3 + nCoord)\r\n ## a Node on the xz-plane\r\n self.g3 = card.field(4 + nCoord)\r\n else:\r\n self.cid = data[0]\r\n self.g1 = data[1]\r\n self.g2 = data[2]\r\n self.g3 = data[3]\r\n assert len(data) == 4, 'data = %s' % (data)\r\n ###\r\n assert self.g1 != self.g2\r\n assert self.g1 != self.g3\r\n assert self.g2 != self.g3\r\n\r\n self.e1 = None\r\n self.e2 = None\r\n self.e3 = None\r\n self.i = None\r\n self.j = None\r\n self.k = None\r\n\r\n def cross_reference(self, model):\r\n \"\"\"\r\n Links self.rid to a coordinate system.\r\n @param self the object pointer\r\n @param model the BDF object\r\n \"\"\"\r\n self.isCrossReferenced = True\r\n ## grid point 1\r\n self.g1 = model.Node(self.g1)\r\n ## grid point 2\r\n self.g2 = model.Node(self.g2)\r\n ## grid point 3\r\n self.g3 = model.Node(self.g3)\r\n\r\n def resolveCid(self):\r\n \"\"\"\r\n finds the position of the nodes used define the coordinate system\r\n and sets the ijk vectors\r\n \"\"\"\r\n ## the origin\r\n self.e1 = self.g1.Position()\r\n ## a point on the z-axis\r\n self.e2 = self.g2.Position()\r\n ## a point on the xz-plane\r\n self.e3 = self.g3.Position()\r\n self.setup()\r\n\r\n def G1(self):\r\n if isinstance(self.g1, int):\r\n return self.g1\r\n return self.g1.nid\r\n\r\n def G2(self):\r\n if isinstance(self.g2, int):\r\n return self.g2\r\n return self.g2.nid\r\n\r\n def G3(self):\r\n if isinstance(self.g3, int):\r\n return self.g3\r\n return self.g3.nid\r\n\r\n def NodeIDs(self):\r\n \"\"\"\r\n returns [g1,g2,g3]\r\n \"\"\"\r\n grids = [self.G1(), self.G2(), self.G3()]\r\n return grids\r\n\r\n\r\nclass CORD3G(Coord): # not done\r\n \"\"\"\r\n Defines a general coordinate system using three rotational angles as\r\n functions of coordinate values in the reference coordinate system.\r\n The CORD3G entry is used with the MAT9 entry to orient material principal\r\n axes for 3-D composite analysis\r\n\r\n @code\r\n CORD3G CID METHOD FORM THETAID1 THETAID2 THETAID3 CIDREF\r\n CORD3G 100 E313 EQN 110 111 112 0\r\n @endcode\r\n \"\"\"\r\n \r\n type = 'CORD3G'\r\n\r\n def __init__(self, card=[0, 0, 0, 0, 0, 0, 0], data=None):\r\n \"\"\"\r\n Intilizes the CORD3G\r\n @param self the object pointer\r\n @param card a list version of the fields\r\n \"\"\"\r\n if isinstance(card, list):\r\n assert len(card) == 8\r\n card = BDFCard(card)\r\n Coord.__init__(self, card, data)\r\n\r\n self.cid = card.field(1)\r\n method = card.field(2)\r\n self.methodES = method[0]\r\n self.methodInt = int(method[1:])\r\n assert self.methodES in ['E', 'S']\r\n assert 0 < self.methodInt < 1000\r\n\r\n self.form = card.field(3, 'EQN')\r\n self.thetas = card.field(4, 7)\r\n assert len(self.thetas) == 3, 'thetas=%s' % (self.thetas)\r\n self.cidRef = card.field(7)\r\n\r\n # EQN for DEQATN, TABLE for TABLE3D\r\n assert self.form in ['EQN', 'TABLE']\r\n\r\n def cross_reference(self, model):\r\n self.cidRef = model.Coord(self.cidRef)\r\n\r\n def CidRef(self):\r\n if isinstance(self.cidRef, int):\r\n return self.cidRef\r\n return self.cidRef.cid\r\n\r\n def transformToGlobal(self, p, debug=False):\r\n \"\"\"\r\n @warning not done, just setting up how you'd do this\r\n @note per http://en.wikipedia.org/wiki/Euler_angles\r\n \"This means for example that a convention named (YXZ) is the result\r\n of performing first an intrinsic Z rotation, followed by X and\r\n Y rotations, in the moving axes (Note: the order of multiplication\r\n of matrices is the opposite of the order in which they're\r\n applied to a vector).\"\r\n \"\"\"\r\n for (rotation, theta) in izip(self.rotations, self.thetas):\r\n ct = cos(radians(theta))\r\n st = sin(radians(theta))\r\n if rotation == 1:\r\n p = dot(self.RotationX(ct, st), p)\r\n elif rotation == 2:\r\n p = dot(self.RotationY(ct, st), p)\r\n elif rotation == 3:\r\n p = dot(self.RotationZ(ct, st), p)\r\n return p\r\n\r\n def RotationX(self, ct, st):\r\n matrix = array([[1., 0., 0.],\r\n [ct, 0., -st],\r\n [-st, 0., ct]])\r\n return matrix\r\n\r\n def RotationY(self, ct, st):\r\n matrix = array([[ct, 0., st],\r\n [0., 1., 0.],\r\n [-st, 0., ct]])\r\n return matrix\r\n\r\n def RotationZ(self, ct, st):\r\n matrix = array([[ct, st, 0.],\r\n [-st, ct, 0.],\r\n [0., 0., 1.]])\r\n return matrix\r\n\r\n def rawFields(self):\r\n method = self.methodES + str(self.methodInt)\r\n fields = (['CORD3G', self.cid, method, self.form] + self.thetas +\r\n [self.CidRef()])\r\n return fields\r\n\r\n\r\nclass CORD1R(Cord1x, RectangularCoord):\r\n \"\"\"\r\n CORD1R CIDA G1A G2A G3A CIDB G1B G2B G3B\r\n \"\"\"\r\n type = 'CORD1R'\r\n\r\n def __init__(self, card=None, nCoord=0, data=None):\r\n \"\"\"\r\n Intilizes the CORD1R\r\n @param self\r\n the object pointer\r\n @param nCoord\r\n the coordinate location on the line (there are possibly 2 coordinates\r\n on 1 card)\r\n @param card\r\n a list version of the fields (1 CORD1R only)\r\n \"\"\"\r\n Cord1x.__init__(self, card, nCoord, data)\r\n\r\n def rawFields(self):\r\n fields = ['CORD1R', self.cid] + self.NodeIDs()\r\n return fields\r\n\r\n\r\nclass CORD1C(Cord1x, CylindricalCoord):\r\n \"\"\"\r\n CORD1C CIDA G1A G2A G3A CIDB G1B G2B G3B\r\n \"\"\"\r\n type = 'CORD1C'\r\n\r\n def __init__(self, card=None, nCoord=0, data=None):\r\n \"\"\"\r\n Intilizes the CORD1R\r\n @param self\r\n the object pointer\r\n @param card\r\n a BDFCard object\r\n @param nCoord\r\n the coordinate location on the line (there are possibly 2 coordinates\r\n on 1 card)\r\n @param data\r\n a list version of the fields (1 CORD1R only)\r\n\r\n \"\"\"\r\n Cord1x.__init__(self, card, nCoord, data)\r\n\r\n def rawFields(self):\r\n fields = ['CORD1C', self.cid] + self.NodeIDs()\r\n return fields\r\n\r\n\r\nclass CORD1S(Cord1x, SphericalCoord):\r\n type = 'CORD1S'\r\n \"\"\"\r\n CORD1S CIDA G1A G2A G3A CIDB G1B G2B G3B\r\n \"\"\"\r\n def __init__(self, card=None, nCoord=0, data=None):\r\n \"\"\"\r\n Intilizes the CORD1S\r\n @param self\r\n the object pointer\r\n @param card\r\n a BDFCard object\r\n @param nCoord\r\n the coordinate location on the line (there are possibly 2 coordinates\r\n on 1 card)\r\n @param data\r\n a list version of the fields (1 CORD1S only)\r\n \"\"\"\r\n Cord1x.__init__(self, card, nCoord, data)\r\n\r\n def rawFields(self):\r\n fields = ['CORD1S', self.cid] + self.NodeIDs()\r\n return fields\r\n\r\n\r\nclass CORD2R(Cord2x, RectangularCoord):\r\n type = 'CORD2R'\r\n\r\n def __init__(self, card=None,\r\n data=[0, 0, 0., 0., 0., 0., 0., 1., 1., 0., 0.]):\r\n \"\"\"\r\n Intilizes the CORD2R\r\n @param self\r\n the object pointer\r\n @param card\r\n a BDFCard object\r\n @param data\r\n a list version of the fields (1 CORD2R only)\r\n \"\"\"\r\n Cord2x.__init__(self, card, data)\r\n\r\n def rawFields(self):\r\n rid = set_blank_if_default(self.Rid(), 0)\r\n fields = ['CORD2R', self.cid, rid] + list(self.e1) + list(\r\n self.e2) + list(self.e3)\r\n return fields\r\n\r\n\r\nclass CORD2S(Cord2x, SphericalCoord):\r\n type = 'CORD2S'\r\n\r\n def __init__(self, card=None, data=None):\r\n \"\"\"\r\n Intilizes the CORD2R\r\n @param self\r\n the object pointer\r\n @param card\r\n a BDFCard object\r\n @param data\r\n a list version of the fields (1 CORD2S only)\r\n \"\"\"\r\n Cord2x.__init__(self, card, data)\r\n\r\n def rawFields(self):\r\n rid = set_blank_if_default(self.Rid(), 0)\r\n fields = (['CORD2S', self.cid, rid] + list(self.e1) + list(self.e2) +\r\n list(self.e3))\r\n return fields\r\n\r\n\r\nclass CORD2C(Cord2x, CylindricalCoord):\r\n type = 'CORD2C'\r\n\r\n def __init__(self, card=None, data=None):\r\n \"\"\"\r\n Intilizes the CORD2C\r\n @param self\r\n the object pointer\r\n @param card\r\n a BDFCard object\r\n @param data\r\n a list version of the fields (1 CORD2C only)\r\n \"\"\"\r\n Cord2x.__init__(self, card, data)\r\n\r\n def rawFields(self):\r\n rid = set_blank_if_default(self.Rid(), 0)\r\n fields = (['CORD2C', self.cid, rid] + list(self.e1) + list(self.e2) +\r\n list(self.e3))\r\n return fields\r\n","repo_name":"xirxa/pynastran-locr","sub_path":"pyNastran/bdf/cards/coordinateSystems.py","file_name":"coordinateSystems.py","file_ext":"py","file_size_in_byte":26418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"73665338072","text":"import pygame\nimport sys\nfrom collections import deque\n\npygame.init()\n\nLARGURA, ALTURA = 800, 600\nTELA = pygame.display.set_mode((LARGURA, ALTURA))\npygame.display.set_caption(\"Labirinto\")\n\nBRANCO = (255, 255, 255)\nVERMELHO = (255, 0, 0)\n\nIMAGEM_RATO = pygame.image.load(\"rato1.jpg\")\nIMAGEM_QUEIJO = pygame.image.load(\"queijo.jpg\")\n\nTAMANHO_CELULA = 20\n\ndef carregar_labirinto(arquivo):\n with open(arquivo, \"r\") as arquivo_labirinto:\n linhas = arquivo_labirinto.readlines()\n \n labirinto = []\n encontrou_rato = False\n\n for y, linha in enumerate(linhas):\n linha_celulas = []\n for x, celula in enumerate(linha.strip()):\n if celula == '1':\n linha_celulas.append(1)\n elif celula == '0':\n linha_celulas.append(0)\n elif celula == 'm':\n linha_celulas.append(2)\n encontrou_rato = True\n elif celula == 'e':\n linha_celulas.append(3)\n labirinto.append(linha_celulas)\n \n if not encontrou_rato:\n print(\"Labirinto não possui entrada para o rato (m).\")\n sys.exit()\n\n return labirinto\n\ndef desenhar_labirinto():\n TELA.fill(BRANCO)\n for y, linha in enumerate(labirinto):\n for x, celula in enumerate(linha):\n if (x, y) in caminhos_visitados:\n pygame.draw.rect(TELA, VERMELHO, (x * TAMANHO_CELULA, y * TAMANHO_CELULA, TAMANHO_CELULA, TAMANHO_CELULA))\n else:\n imagem = imagens_celulas.get(celula, None)\n if imagem is not None:\n TELA.blit(imagem, (x * TAMANHO_CELULA, y * TAMANHO_CELULA))\n\ndef movimento_possivel(pos_x, pos_y):\n movimentos = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n for movimento in movimentos:\n nova_pos_x = pos_x + movimento[0]\n nova_pos_y = pos_y + movimento[1]\n if (\n 0 <= nova_pos_x < len(labirinto[0])\n and 0 <= nova_pos_y < len(labirinto)\n and labirinto[nova_pos_y][nova_pos_x] != 1\n and (nova_pos_x, nova_pos_y) not in caminhos_visitados\n ):\n return True\n return False\n\ndef movimentar_jogador():\n global posicao_jogador_x, posicao_jogador_y\n\n movimentos = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n movimento_valido = False\n\n for movimento in movimentos:\n nova_pos_x = posicao_jogador_x + movimento[0]\n nova_pos_y = posicao_jogador_y + movimento[1]\n if (\n 0 <= nova_pos_x < len(labirinto[0])\n and 0 <= nova_pos_y < len(labirinto)\n and labirinto[nova_pos_y][nova_pos_x] != 1\n and (nova_pos_x, nova_pos_y) not in caminhos_visitados\n ):\n movimento_valido = True\n pilha.append((posicao_jogador_x, posicao_jogador_y))\n caminhos_visitados.add((posicao_jogador_x, posicao_jogador_y))\n labirinto[posicao_jogador_y][posicao_jogador_x] = 0\n posicao_jogador_x, posicao_jogador_y = nova_pos_x, nova_pos_y\n\n if posicao_jogador_x == posicao_objetivo_x and posicao_jogador_y == posicao_objetivo_y:\n global encontrou_queijo\n encontrou_queijo = True\n break\n\n return movimento_valido\n\ndef retroceder_jogador():\n global posicao_jogador_x, posicao_jogador_y\n\n if pilha:\n pilha_solucao.append((posicao_jogador_x, posicao_jogador_y))\n caminhos_visitados.add((posicao_jogador_x, posicao_jogador_y))\n posicao_anterior_x, posicao_anterior_y = posicao_jogador_x, posicao_jogador_y\n posicao_jogador_x, posicao_jogador_y = pilha.pop()\n pygame.draw.rect(TELA, VERMELHO, (posicao_jogador_x * TAMANHO_CELULA, posicao_jogador_y * TAMANHO_CELULA, TAMANHO_CELULA, TAMANHO_CELULA))\n\ndef main():\n global posicao_jogador_x, posicao_jogador_y, posicao_anterior_x, posicao_anterior_y\n global encontrou_queijo\n\n while True:\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n desenhar_labirinto()\n\n if not encontrou_queijo:\n movimento_valido = movimentar_jogador()\n\n if not movimento_valido:\n if not movimento_possivel(posicao_jogador_x, posicao_jogador_y) and not pilha:\n print(\"Labirinto sem saída\")\n pygame.quit()\n sys.exit()\n\n retroceder_jogador()\n\n else:\n if pilha_solucao:\n posicao_anterior_x, posicao_anterior_y = posicao_jogador_x, posicao_jogador_y\n posicao_jogador_x, posicao_jogador_y = pilha_solucao.pop()\n caminhos_visitados.add((posicao_jogador_x, posicao_jogador_y))\n labirinto[posicao_jogador_y][posicao_jogador_x] = 0\n pilha_solucao.append((posicao_jogador_x, posicao_jogador_y))\n pygame.draw.rect(TELA, VERMELHO, (posicao_jogador_x * TAMANHO_CELULA, posicao_jogador_y * TAMANHO_CELULA, TAMANHO_CELULA, TAMANHO_CELULA))\n\n TELA.blit(IMAGEM_RATO, (posicao_jogador_x * TAMANHO_CELULA, posicao_jogador_y * TAMANHO_CELULA))\n pygame.display.update()\n\n pygame.time.delay(100) \n\n if encontrou_queijo:\n print(\"Achou o queijo!\")\n pygame.quit()\n sys.exit()\n\nif __name__ == \"__main__\":\n labirinto = carregar_labirinto(\"labirinto.txt\")\n posicao_jogador_x, posicao_jogador_y = None, None\n posicao_objetivo_x, posicao_objetivo_y = None, None\n posicao_anterior_x, posicao_anterior_y = None, None\n\n imagens_celulas = {\n 1: pygame.Surface((TAMANHO_CELULA, TAMANHO_CELULA)),\n 2: IMAGEM_RATO,\n 3: IMAGEM_QUEIJO,\n }\n\n for y, linha in enumerate(labirinto):\n for x, celula in enumerate(linha):\n if celula == 2:\n posicao_jogador_x = x\n posicao_jogador_y = y\n posicao_anterior_x = x\n posicao_anterior_y = y\n elif celula == 3:\n posicao_objetivo_x = x\n posicao_objetivo_y = y\n\n pilha = deque()\n pilha_solucao = deque()\n caminhos_visitados = set()\n\n posicao_anterior_x, posicao_anterior_y = posicao_jogador_x, posicao_jogador_y\n encontrou_queijo = False\n\n main()\n","repo_name":"taysa-fernandes/labirinto","sub_path":"labirinto.py","file_name":"labirinto.py","file_ext":"py","file_size_in_byte":6292,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"36954414208","text":"import sys\ninput = sys.stdin.readline\nfrom collections import deque\n\nN = int(input())\nqueue = deque()\n\nfor i in range(N):\n word = input().split()\n if len(word) == 2:\n queue.append(word[-1])\n else:\n if word[0] == 'front':\n if queue:\n print(queue[0])\n else:\n print(-1)\n elif word[0] == 'back':\n if queue:\n print(queue[-1])\n else:\n print(-1)\n elif word[0] == 'size':\n print(len(queue))\n elif word[0] == 'empty':\n if queue:\n print(0)\n else:\n print(1)\n elif word[0] == 'pop':\n if queue:\n print(queue.popleft())\n else:\n print(-1)","repo_name":"AndreaStudy/Algorithm","sub_path":"queue/s4_18258.py","file_name":"s4_18258.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5134657070","text":"#Jeeson Baktha\r\n#Iterations Stretch and Challenge\r\n#16 October 2014\r\n\r\nnumber = int(input(\"Enter a non-negative integer to take the factorial of: \"))\r\n\r\nproduct = 1\r\n\r\nfor count in range(number):\r\n product = product * (count + 1)\r\n\r\nprint(product)\r\n","repo_name":"JJBaktha/Iterations","sub_path":"Iteration Development 1.py","file_name":"Iteration Development 1.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15907060568","text":"\"\"\"\n * 你在一个城市里,城市由 n 个路口组成,路口编号为 0 到 n - 1 ,某些路口之间有 双向 道路。输入保证你可以从任意路口出发到达其他任意路口,且任意两个路口之间最多有一条路。\n * 给你一个整数 n 和二维整数数组 roads ,其中 roads[i] = [ui, vi, time_i] 表示在路口 ui 和 vi 之间有一条需要花费 time_i 时间才能通过的道路。\n * 你想知道花费 最少时间 从路口 0 出发到达路口 n - 1 的方案数。\n * 请返回花费 最少时间 到达目的地的 路径数目 。由于答案可能很大,将结果对 10^9 + 7 取余 后返回。\n * 提示:\n * 1、1 <= n <= 200\n * 2、n - 1 <= roads.length <= n * (n - 1) / 2\n * 3、roads[i].length == 3\n * 4、0 <= ui, vi <= n - 1\n * 5、1 <= time_i <= 10^9\n * 6、ui != vi\n * 7、任意两个路口之间至多有一条路。\n * 8、从任意路口出发,你能够到达其他任意路口。\n * 链接:https://leetcode.cn/problems/number-of-ways-to-arrive-at-destination/\n\"\"\"\nfrom typing import List\nfrom heapq import heappush, heappop\n\n\nclass Solution:\n\n def countPaths(self, n: int, roads: List[List[int]]) -> int:\n inf = int(1e18)\n MOD = 10**9 + 7\n g = [[] for _ in range(n)]\n dis, vis, cnt = [inf] * n, [False] * n, [0] * n\n dis[0] = 0\n cnt[0] = 1\n for s, e, c in roads:\n g[s].append([e, c])\n g[e].append([s, c])\n q = [[0, 0]]\n while q:\n mn_cost, idx = heappop(q)\n if vis[idx]: continue\n vis[idx] = True\n for nx_i, nx_c in g[idx]:\n nc = nx_c + mn_cost\n if nc < dis[nx_i]:\n cnt[nx_i] = cnt[idx]\n dis[nx_i] = nc\n heappush(q, [nc, nx_i])\n elif nc == dis[nx_i]:\n cnt[nx_i] += cnt[idx]\n return cnt[-1] % MOD\n\n\nif __name__ == '__main__':\n # 4\n print(Solution().countPaths(7, [[0, 6, 7], [0, 1, 2], [1, 2, 3], [1, 3, 3], [6, 3, 3], [3, 5, 1], [6, 5, 1], [2, 5, 1], [0, 4, 5], [4, 6, 2]]))\n # 1\n print(Solution().countPaths(2, [[1, 0, 10]]))","repo_name":"adanzl/leetcode-practice","sub_path":"py/q1900/Q1976.py","file_name":"Q1976.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34722289968","text":"from wfn.encoding import Decoder, Encoder\n\n\nclass WFNConverter:\n\n def __init__(self):\n self.wfn_doc = {}\n self.wfn_keys = ['part', 'vendor', 'product', 'version', 'update', 'edition', 'language', 'sw_edition',\n 'target_sw', 'target_hw', 'other']\n self.wfn_keys_edition_special_case = ['part', 'vendor', 'product', 'version', 'update', 'language', 'edition',\n 'sw_edition', 'target_sw', 'target_hw', 'other']\n\n def convert_cpe_uri_to_wfn(self, cpe_uri):\n self.set_wfn_default_values()\n cpe_uri = self.encode_cpe_uri(cpe_uri)\n wfn_values = self.get_wfn_values_from_cpe_uri(cpe_uri)\n self.set_wfn_values(wfn_values)\n return self.wfn_doc\n\n def set_wfn_default_values(self):\n self.wfn_doc = {'part': 'ANY', 'vendor': 'ANY', 'product': 'ANY', 'version': 'ANY', 'update': 'ANY',\n 'edition': 'ANY', 'language': 'ANY', 'sw_edition': 'ANY', 'target_sw': 'ANY',\n 'target_hw': 'ANY', 'other': 'ANY'}\n\n @staticmethod\n def encode_cpe_uri(cpe_uri):\n cpe_uri = Encoder.encode_escaped_double_points(cpe_uri)\n cpe_uri = Encoder.encode_escaped_tildes(cpe_uri)\n return cpe_uri\n\n @staticmethod\n def get_wfn_values_from_cpe_uri(cpe_uri):\n wfn_first_part, wfn_second_part = WFNConverter.get_wfn_parts(cpe_uri)\n wfn_values = WFNConverter.merge_wfn_parts(wfn_first_part, wfn_second_part)\n WFNConverter.clean_values(wfn_values)\n return wfn_values\n\n @staticmethod\n def clean_values(values):\n values.remove('cpe') # discard 'cpe' value\n values[0] = WFNConverter.remove_slash_from_value(values[0])\n\n @staticmethod\n def get_wfn_parts(cpe_uri):\n first_part = cpe_uri.split(':')\n second_part = first_part[-1].split('~')\n return first_part, second_part\n\n @staticmethod\n def merge_wfn_parts(wfn_first_part, wfn_second_part):\n if len(wfn_second_part) > 1:\n lang = WFNConverter.get_lang_from_wfn_first_part(wfn_first_part)\n del wfn_first_part[-1] # remove value of second part\n wfn_first_part.append(lang)\n del wfn_second_part[0] # remove value of first part\n wfn_first_part.extend(wfn_second_part)\n return wfn_first_part\n\n @staticmethod\n def get_lang_from_wfn_first_part(first_part_values):\n return first_part_values[-1].split('~')[0]\n\n def set_wfn_values(self, wfn_values):\n wfn_keys_index = 0\n wfn_keys = self.get_wfn_keys(wfn_values)\n for wfn_value in wfn_values:\n wfn_value = Decoder.decode_non_alphanumeric_characters(wfn_value)\n wfn_key = wfn_keys[wfn_keys_index]\n self.set_wfn_value(wfn_key, wfn_value)\n wfn_keys_index += 1\n\n def get_wfn_keys(self, wfn_values):\n if len(wfn_values) > 7:\n return self.wfn_keys_edition_special_case\n return self.wfn_keys\n\n def set_wfn_value(self, key, value):\n if not self.is_value_any(value):\n if value == '-':\n self.set_wfn_value(key, 'NA')\n else:\n self.wfn_doc.__setitem__(key, value)\n\n @staticmethod\n def is_value_any(value):\n return value == '' or value == '*' or value == 'ANY'\n\n @staticmethod\n def remove_slash_from_value(wfn_value):\n return wfn_value.replace('/', '')\n\n def get_uri_binding_version(self, uri_binding):\n return self.convert_cpe_uri_to_wfn(uri_binding).get('version')\n\n def get_uri_binding_target_sw(self, uri_binding):\n return self.convert_cpe_uri_to_wfn(uri_binding).get('target_sw')\n\n def convert_wfn_to_uri(self, wfn):\n uri = 'cpe:/'\n special_case = self.is_wfn_special_case(wfn)\n if not special_case:\n uri_first_part_attributes = self.get_uri_first_part_attributes(wfn)\n uri += self.concat_uri_attributes(':', uri_first_part_attributes)\n else:\n uri_first_part_attributes = self.get_uri_first_part_attributes(wfn, True)\n uri += self.concat_uri_attributes(':', uri_first_part_attributes, True)\n uri = self.concatenate_uri_second_part_attributes(uri, wfn)\n return uri\n\n def get_uri_first_part_attributes(self, wfn, edition_special_case=False):\n uri_first_part_attributes = []\n range_limit = 7\n wfn_keys = self.wfn_keys\n if edition_special_case:\n range_limit = 6\n wfn_keys = self.wfn_keys_edition_special_case\n for i in range(range_limit):\n uri_first_part_attributes.append(wfn.get(wfn_keys[i]))\n return uri_first_part_attributes\n\n def is_wfn_special_case(self, wfn):\n special_case = False\n for i in range(7, 11):\n attribute = wfn.get(self.wfn_keys[i])\n if not self.is_value_any(attribute):\n return True\n return special_case\n\n @staticmethod\n def concat_uri_attributes(splitter_char, attributes, edition_special_case=False):\n uri = ''\n if not edition_special_case:\n while WFNConverter.is_value_any(attributes[-1]):\n attributes.pop()\n for attribute in attributes:\n uri = WFNConverter.concat_uri_attribute(uri, attribute, splitter_char)\n return uri[:-1]\n\n def concatenate_uri_second_part_attributes(self, uri, wfn):\n uri += '~'\n for i in range(6, 11):\n uri_attribute = wfn.get(self.wfn_keys_edition_special_case[i])\n uri = self.concat_uri_attribute(uri, uri_attribute, '~')\n return uri[:-1]\n\n @staticmethod\n def concat_uri_attribute(uri, attribute, splitter_char):\n attribute = Encoder.encode_non_alphanumeric_characters(attribute)\n if attribute == 'NA':\n uri += '-' + splitter_char\n elif attribute != 'ANY':\n uri += attribute + splitter_char\n else:\n uri += splitter_char\n return uri\n\n def create_wfn_from_user_input(self, user_input):\n self.set_wfn_default_values()\n for key in self.wfn_keys:\n value = dict(user_input).get(key)\n if value is not None:\n self.set_wfn_value(key, value[0])\n return self.wfn_doc\n\n","repo_name":"fkie-cad/iva","sub_path":"wfn/wfn_converter.py","file_name":"wfn_converter.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"5"} +{"seq_id":"14110514475","text":"from random import random, choice\n\n\ndef bkfst_gen(*, endp=30, spam=5):\n tableau = ['bacon', 'egg'] + (['spam'] * int(random() * spam + 1))\n while True:\n if random() * 100 < endp: yield choice(tableau)\n return None\n\ndef main():\n for curseur in range(3):\n print(*bkfst_gen())\n\n\nmain()\n","repo_name":"UlysseARNAUD-IPSSI/Module-python","sub_path":"Chapitre 1 : Bases du langage Python/exercices/1.4 : Générateurs/bkfst_gen.py","file_name":"bkfst_gen.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20300005512","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"ipetrash\"\n\n\nimport logging\nimport sys\n\nfrom logging.handlers import RotatingFileHandler\nfrom pathlib import Path\n\nDIR = Path(__file__).resolve().parent\nROOT_DIR = DIR.parent\n\n# pip install tabulate\nfrom tabulate import tabulate\n\n\ndef get_table(assigned_open_issues_per_project: dict[str, int]) -> str:\n return tabulate(\n list(assigned_open_issues_per_project.items()),\n headers=(\"PROJECT\", \"Issues\"),\n tablefmt=\"grid\",\n )\n\n\ndef print_table(assigned_open_issues_per_project: dict[str, int]):\n print(get_table(assigned_open_issues_per_project))\n # PROJECT | Issues\n # --------+-------\n # xxx | 1\n # yyy | 2\n # zzz | 3\n\n\ndef get_logger(\n name, file=\"log.txt\", encoding=\"utf-8\", log_stdout=True, log_file=True\n) -> logging.Logger:\n log = logging.getLogger(name)\n log.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\n \"[%(asctime)s] %(filename)s:%(lineno)d %(levelname)-8s %(message)s\"\n )\n\n if log_file:\n fh = RotatingFileHandler(\n file, maxBytes=10000000, backupCount=5, encoding=encoding\n )\n fh.setFormatter(formatter)\n log.addHandler(fh)\n\n if log_stdout:\n sh = logging.StreamHandler(stream=sys.stdout)\n sh.setFormatter(formatter)\n log.addHandler(sh)\n\n return log\n\n\nlogger = get_logger(\"parse_jira_Assigned_Open_Issues_per_Project\")\n","repo_name":"gil9red/SimplePyScripts","sub_path":"job_compassplus/parse_jira_Assigned_Open_Issues_per_Project/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"5"} +{"seq_id":"10242028352","text":"\"\"\"Helper functions for test tear down\"\"\"\nimport os\nimport re\nimport tempfile\nimport time\nfrom typing import List\n\nfrom mapswipe_workers import auth\n\n\ndef delete_test_data(project_id: str) -> None:\n \"\"\"\n Delete test project indluding groups, tasks and results\n from Firebase and Postgres\n \"\"\"\n\n if not re.match(r\"[-a-zA-Z0-9]+\", project_id):\n raise ValueError(\n \"Given argument resulted in invalid Firebase Realtime Database reference. \"\n )\n\n fb_db = auth.firebaseDB()\n ref = fb_db.reference(f\"v2/results/{project_id}\")\n ref.delete()\n ref = fb_db.reference(f\"v2/tasks/{project_id}\")\n ref.delete()\n ref = fb_db.reference(f\"v2/groupsUsers/{project_id}\")\n ref.delete()\n time.sleep(1) # Wait for Firebase Functions to complete\n ref = fb_db.reference(f\"v2/groups/{project_id}\")\n ref.delete()\n ref = fb_db.reference(f\"v2/projects/{project_id}\")\n ref.delete()\n ref = fb_db.reference(f\"v2/projectDrafts/{project_id}\")\n ref.delete()\n ref = fb_db.reference(f\"v2/users/{project_id}\")\n ref.delete()\n\n # Clear out the user-group used in test.\n # XXX: Use a firebase simulator for running test.\n # For CI/CD, use a real firebase with scope using commit hash,\n # and clear all data at the end.\n for user_group_id in [\n \"dummy-user-group-1\",\n \"dummy-user-group-2\",\n \"dummy-user-group-3\",\n \"dummy-user-group-4\",\n ]:\n ref = fb_db.reference(f\"v2/userGroups/{user_group_id}\")\n ref.delete()\n\n pg_db = auth.postgresDB()\n # Delete user results data\n sql_query = (\n \"DELETE FROM mapping_sessions_results \"\n \"WHERE mapping_session_id IN (\"\n \"SELECT mapping_session_id \"\n \"FROM mapping_sessions WHERE project_id = %s)\"\n )\n pg_db.query(sql_query, [project_id])\n # Delete user-groups results data\n sql_query = (\n \"DELETE FROM mapping_sessions_user_groups \"\n \"WHERE mapping_session_id IN (\"\n \"SELECT mapping_session_id \"\n \"FROM mapping_sessions WHERE project_id = %s)\"\n )\n pg_db.query(sql_query, [project_id])\n # Delete mapping sessions\n sql_query = \"DELETE FROM mapping_sessions WHERE project_id = %s\"\n pg_db.query(sql_query, [project_id])\n sql_query = \"DELETE FROM results_temp WHERE project_id = %s\"\n pg_db.query(sql_query, [project_id])\n sql_query = \"DELETE FROM tasks WHERE project_id = %s\"\n pg_db.query(sql_query, [project_id])\n sql_query = \"DELETE FROM groups WHERE project_id = %s\"\n pg_db.query(sql_query, [project_id])\n sql_query = \"DELETE FROM projects WHERE project_id = %s\"\n pg_db.query(sql_query, [project_id])\n\n sql_query = \"DELETE FROM users WHERE user_id = 'test_build_area'\"\n pg_db.query(sql_query)\n sql_query = \"DELETE FROM users_temp WHERE user_id = 'test_build_area'\"\n pg_db.query(sql_query)\n\n sql_query = \"DELETE FROM users WHERE user_id = 'test_build_area_heidelberg'\"\n pg_db.query(sql_query)\n sql_query = \"DELETE FROM users_temp WHERE user_id = 'test_build_area_heidelberg'\"\n pg_db.query(sql_query)\n\n filename = os.path.join(\n tempfile._get_default_tempdir(), f\"results_{project_id}.csv.gz\"\n )\n try:\n os.remove(filename)\n except FileNotFoundError:\n pass\n\n\ndef delete_test_user_group(user_group_ids: List) -> None:\n # Make sure delete_test_data is runned first.\n fb_db = auth.firebaseDB()\n ref = fb_db.reference(\"v2/usersGroups\")\n ref.delete()\n\n pg_db = auth.postgresDB()\n pg_db.query(\n \"DELETE FROM user_groups_user_memberships WHERE user_group_id = ANY(%s);\",\n [user_group_ids],\n )\n pg_db.query(\n \"DELETE FROM user_groups WHERE user_group_id = ANY(%s);\",\n [user_group_ids],\n )\n\n\ndef delete_test_user(user_ids: List) -> None:\n for user_id in user_ids:\n if not re.match(r\"[-a-zA-Z0-9]+\", user_id):\n raise ValueError(\n \"Given argument resulted in invalid \"\n \"Firebase Realtime Database reference. \"\n )\n\n fb_db = auth.firebaseDB()\n ref = fb_db.reference(f\"v2/users/{user_id}\")\n ref.delete()\n\n pg_db = auth.postgresDB()\n sql_query = \"DELETE FROM users WHERE user_id = ANY( %(user_ids)s );\"\n pg_db.query(sql_query, {\"user_ids\": user_ids})\n sql_query = \"DELETE FROM users_temp WHERE user_id = ANY( %(user_ids)s );\"\n pg_db.query(sql_query, {\"user_ids\": user_ids})\n\n\nif __name__ == \"__main__\":\n delete_test_data(\"test_build_area\")\n delete_test_data(\"test_build_area_heidelberg\")\n","repo_name":"mapswipe/python-mapswipe-workers","sub_path":"mapswipe_workers/tests/integration/tear_down.py","file_name":"tear_down.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"5"} +{"seq_id":"75252220632","text":"import sys\n\ndef main():\n try:\n resurs = sys.argv[1]\n adrese = {}\n p = sys.argv[2]\n with open(p, 'r') as file:\n lines = file.readlines()\n for line in lines:\n splitted_line = line.split()\n if resurs in splitted_line:\n splitted_address = splitted_line[0].split('.')\n prva_dva = \".\".join(splitted_address[:2])\n print(prva_dva)\n if prva_dva not in adrese.keys():\n adrese[prva_dva] = 1\n else:\n adrese[prva_dva] += 1\n except IndexError:\n print(\"Potrebno je za prvi argument unijeti resurs, a za drugi argument putanju do log datoteke.\")\n exit(0)\n\n print(\"-------------------------\")\n print(\"Broj pristupa stranici: \" + resurs)\n print(\" IP podmreza : Broj pristupa\")\n print(\"-------------------------\")\n\n sortirano = dict(sorted(adrese.items(), key=lambda x: x[1], reverse=True))\n for k, v in sortirano.items():\n if v > 1:\n print('%9s.*.* :%3d' % (k, v))\n\nmain()","repo_name":"zvonimir-rezo/scripting-languages-course","sub_path":"Lab3/zadatak1.py","file_name":"zadatak1.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"sh","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"19740410189","text":"\"\"\"Job Status Blueprint.\"\"\"\nimport logging\nimport random\n\nfrom flask import (Blueprint, current_app, jsonify, redirect, render_template,\n request, url_for)\n\nfrom .tasks import add_two_numbers, celery\n\n# from flask_celery_job_status.celery import celery\n\n\nlogger = logging.getLogger(__name__)\n\njob_status_handler = Blueprint(name='job_status',\n import_name=__name__,\n template_folder='templates',\n static_folder='static')\n\n\ndef get_all_tasks():\n \"\"\"Return list of all tasks.\"\"\"\n return current_app.config['all_tasks']\n\n\ndef delete_all_tasks():\n \"\"\"Empty the list of all tasks.\"\"\"\n current_app.config['all_tasks'] = []\n\n\n@job_status_handler.before_app_first_request\ndef before_app_first_request():\n \"\"\"Create some global resources before first request.\"\"\"\n current_app.config['all_tasks'] = []\n\n\n@job_status_handler.route('/', methods=['GET'])\ndef index():\n \"\"\"Job Status Index.\"\"\"\n return render_template('job_status_index.html')\n\n\n@job_status_handler.route('/create_task', methods=['GET'])\ndef create_task():\n \"\"\"Create a task.\"\"\"\n # Generating random argument values\n a, b = random.randint(0, 10), random.randint(0, 10)\n task = add_two_numbers.apply_async(args=[a, b])\n\n # Creating a custom task representation attribute in order to render it on the HTML\n task.task_repr = \"{}({}, {})\".format(add_two_numbers.__name__, a, b)\n\n # Adding the task to a global tasks list.\n # WARNING!!! This is BAD practice. Use a database instead.\n all_tasks = get_all_tasks()\n all_tasks.append(task)\n\n # Redirecting to the task status.\n return redirect(url_for('job_status.task_status'))\n\n\n@job_status_handler.route('/task_status', methods=['GET'])\ndef task_status():\n \"\"\"Show all task status.\"\"\"\n all_tasks = get_all_tasks()\n return render_template('task_status.html', tasks=all_tasks)\n\n\n@job_status_handler.route('/clear_tasks', methods=['GET'])\ndef clear_tasks():\n \"\"\"Create a task.\"\"\"\n # NOTE: Purging does not work!\n celery.control.purge()\n delete_all_tasks()\n\n # Redirecting to the task status.\n return redirect(url_for('job_status.task_status'))\n\n\n@job_status_handler.route('/create_job', methods=['GET'])\ndef create_job():\n \"\"\"Create Job.\"\"\"\n ret_dict = {\n 'good_job': url_for('job_status.index')\n }\n return jsonify(ret_dict)\n\n\n@job_status_handler.route('/active_job_ids', methods=['GET'])\ndef active_job_ids():\n \"\"\"Running Job IDs.\"\"\"\n ret_dict = {\n 'good_job': url_for('job_status.index')\n }\n return jsonify(ret_dict)\n\n\n@job_status_handler.route('/job_id_status', methods=['GET', 'POST'])\ndef job_id_status():\n \"\"\"Job ID Status.\"\"\"\n ret_dict = {\n 'job_id': request.args['job_id']\n }\n return jsonify(ret_dict)\n","repo_name":"nitred/flask-celery-job-status","sub_path":"flask_celery_job_status/blueprints/job_status/job_status.py","file_name":"job_status.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44556378324","text":"import os\nfrom django.contrib.gis.utils import LayerMapping\nfrom .models import stazioni_retevista,stazioni_umbria\n\nstazioni_retevista_mapping = {\n 'lat' : 'lat',\n 'long' : 'long',\n 'nome' : 'nome',\n 'did' : 'did',\n 'geom' : 'MULTIPOINT',\n}\n\nstazioni_mapping = {\n 'station_id' : 'station_id',\n 'name' : 'name',\n 'river_id' : 'river_id',\n 'old_id_pt' : 'old_id_pt',\n 'old_id_h' : 'old_id_h',\n 'link' : 'link',\n 'coord_n' : 'coord_n',\n 'coord_e' : 'coord_e',\n 'height' : 'height',\n 'instrument' : 'instrument',\n 'area' : 'area',\n 'notes' : 'notes',\n 'country' : 'country',\n 'daily_prec' : 'daily_prec',\n 'daily_temp' : 'daily_temp',\n 'm_daily_fl' : 'm_daily_fl',\n 'oid' : 'oid',\n 'geom' : 'MULTIPOINT',\n}\n\n\nstazioni_retevista_shp = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'data', 'stazioni_retevista.shp'),\n)\nstazioni_retevista_shp2 = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'data', 'stazioni_retevista2_new.shp'),\n)\nstazioni_umbria_shp = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'data', 'stazioni.shp'),\n)\n\ndef run(verbose=True):\n lm = LayerMapping(\n stazioni_retevista, stazioni_retevista_shp, stazioni_retevista_mapping,\n transform=False, encoding='iso-8859-1',\n )\n lm.save(strict=True, verbose=verbose)\n\ndef run2(verbose=True):\n lm = LayerMapping(\n stazioni_retevista, stazioni_retevista_shp2, stazioni_retevista_mapping,\n transform=False, encoding='iso-8859-1',\n )\n lm.save(strict=True, verbose=verbose)\n\ndef run_umbria(verbose=True):\n lm = LayerMapping(\n stazioni_umbria, stazioni_umbria_shp, stazioni_mapping,\n transform=False, encoding='iso-8859-1',\n )\n lm.save(strict=True, verbose=verbose)","repo_name":"pierluigiderosa/retevista","sub_path":"income/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"34308610820","text":"\nfrom pyspark.sql import SparkSession\nfrom effulge.effulge import spot_variance\nfrom effulge.effulge import summarize_variance\n\ndef simple_run():\n spark = SparkSession.builder.appName(\"Effulge\").config(\"spark.sql.shuffle.partitions\", 5).getOrCreate()\n #\n df_expected = spark.read.option(\"header\", True ).csv(\"/app/effulge/SampleData/table1.csv\")\n df_available = spark.read.option(\"header\", True ).csv(\"/app/effulge/SampleData/table2.csv\")\n candidate_key = (\"ProductID\", \"Colour\")\n #\n result = spot_variance(df_expected, df_available, candidate_key)\n result.show(truncate=False)\n #\n result_summary = summarize_variance(result)\n result_summary.show(truncate=False)\n\ndef test_simple():\n simple_run()\n assert True\n\nif __name__ == '__main__':\n simple_run()\n","repo_name":"Regish/Effulge","sub_path":"tests/simple_run.py","file_name":"simple_run.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"21546599933","text":"from othello_imports import possible_moves, make_move\nimport time\nimport sys\n\"\"\"\ndef negamax(board,player,depth,maxdepth):\n if depth > maxdepth or both None:\n return score of game\n nextMoves = possible_moves(board,token)\n \n scores = []\n for move in next moves:\n scores.append(-1*negamax(move,other,player,depth+1,maxdepth))\n # if nextMoves = []:\n # return None\n return max(scores)\n\ndef score(board,player,depth,maxdepth,myMoves,otherMoves):\n x = board.count(\"x\")\n o = board.count(\"o\")\n if len(myMoves) > 0 and len(otherMoves) > 0:\n count corners taken by black, corners taken by white\n count corner adjacent squares(subtract from score)\n x + o + 4* (black-white corners) - 4 * (corners taken by other) - 2* count taken by me\n\n \n\"\"\"\n\ndef find_next_move(board, player, depth):\n\n # Based on whether player is x or o, start an appropriate version of minimax\n # that is depth-limited to \"depth\". Return the best available move.\n other = \"o\"\n if player == \"o\":\n other = \"x\"\n maxScore = -1*float('inf')\n nextMoves = possible_moves(board,player)\n if len(nextMoves) == 0:\n return None\n bestMove = nextMoves[0]\n for move in nextMoves:\n new_board = make_move(board,player,move)\n score = -1*negamax(new_board,other,0,depth)\n if score > maxScore:\n maxScore = score\n bestMove = move\n return bestMove\n\n\n\n# All your other functions\ndef negamax(board,player,depth,maxdepth):\n other = \"o\"\n if player == \"o\":\n other = \"x\"\n if depth > maxdepth: # terminates call and returns score if depth is reached\n return scoreGame(board,player)\n nextMoves = possible_moves(board,player)\n if len(nextMoves) == 0:\n return scoreGame(board,player)\n scores = []\n for move in nextMoves:\n new_board = make_move(board,player,move)\n otherturn = negamax(new_board,other,depth+1,maxdepth)\n scores.append(-1*otherturn)\n return max(scores)\n\ndef scoreGame(board,player):\n other = \"o\"\n if player == \"o\":\n other = \"x\"\n ptokens = possible_moves(board,player)\n otokens = possible_moves(board,other)\n if len(ptokens) == 0 and len(otokens)==0: # if the game is over\n pcount = board.count(player)\n ocount = board.count(other)\n return 1000000+(pcount-ocount)\n # difference in the avalible moves\n score = (len(ptokens) - len(otokens))*4 # newly captured pieces\n corners_dict = {\n 0: {1, 8, 9},\n 7: {6, 14, 15},\n 56: {57, 48, 49},\n 63: {62, 54, 55}\n }\n # adds additional points if corners are occupied, deducts points if \n for c in corners_dict:\n if board[c] == player:\n score += 4\n elif board[c] == other:\n score -= 4\n for sq in corners_dict[c]:\n if board[sq] == player:\n score -= 2\n elif board[c] == other:\n score += 2\n return score\n\n\n# print(scoreGame(\"oxxxxx.xoooooxx.xooo.xoo.xooxoox.oxxooxx.xoxxoxxxxxxxxooo.oxxooo\",\"o\"))\n# print(find_next_move(\"...........................ox......xo...........................\",\"x\",))\nboard = sys.argv[1]\nplayer = sys.argv[2]\n# board = \"...........................ox......xo...........................\"\n# player = \"x\"\ndepth = 1\n\nfor count in range(board.count(\".\")): # No need to look more spaces into the future than exist at all\n# start = time.perf_counter()\n# while time.perf_counter() - start < 1.5:\n print(find_next_move(board, player, depth))\n depth += 1\n\n# results = []\n# with open(\"boards_timing.txt\") as f:\n# for line in f:\n# board, token = line.strip().split()\n# temp_list = [board, token]\n# print(temp_list)\n# for count in range(1, 7):\n# print(\"depth\", count)\n# start = time.perf_counter()\n# find_next_move(board, token, count)\n# end = time.perf_counter()\n# temp_list.append(str(end -start))\n# print(temp_list)\n# print()\n# results.append(temp_list)\n# with open(\"boards_timing_my_results.csv\", \"w\") as g:\n# for l in results:\n# g.write(\", \".join(l) + \"\\n\")","repo_name":"vfeng2023/AI","sub_path":"AI/AI 1/Unit 3/Othello/othello_ai.py","file_name":"othello_ai.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12934516416","text":"import torch\r\nfrom model.utils.config import cfg\r\nfrom torch.utils.data.dataloader import default_collate\r\nimport numpy as np\r\n\r\ndef collate_minibatch(list_of_inputs):\r\n \"\"\"Stack samples seperately and return a list of minibatches\r\n A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.\r\n Hence, we need to stack smaples from each minibatch seperately.\r\n \"\"\"\r\n if isinstance(list_of_inputs[0], torch.Tensor):\r\n list_of_inputs = check_pad_tensor_data(list_of_inputs)\r\n out = None\r\n return torch.stack(list_of_inputs, 0, out=out)\r\n elif isinstance(list_of_inputs[0], list):\r\n transposed = zip(*list_of_inputs)\r\n return [collate_minibatch(b) for b in transposed]\r\n elif isinstance(list_of_inputs[0], tuple):\r\n transposed = zip(*list_of_inputs)\r\n return [collate_minibatch(b) for b in transposed]\r\n else:\r\n return default_collate(list_of_inputs)\r\n\r\ndef check_pad_tensor_data(list_of_tensors):\r\n tensor0 = list_of_tensors[0]\r\n if tensor0.dim() != 3:\r\n return list_of_tensors\r\n else:\r\n are_tensors_same_sz = True\r\n max_h = tensor0.size(1)\r\n max_w = tensor0.size(2)\r\n for i in range(1,len(list_of_tensors)):\r\n tensor = list_of_tensors[i]\r\n if are_tensors_same_sz is False or tensor0.size() != tensor.size():\r\n are_tensors_same_sz = False\r\n max_h = max(max_h, tensor.size(1))\r\n max_w = max(max_w, tensor.size(2))\r\n if are_tensors_same_sz is False:\r\n list_of_tensors = pad_image_data(list_of_tensors, torch.Size((tensor0.size(0),max_h,max_w)))\r\n return list_of_tensors\r\n\r\ndef pad_image_data(list_of_tensors, sz):\r\n '''\r\n :param list_of_tensors:\r\n :param sz: torch.Size of dim 3.\r\n :return:\r\n '''\r\n list_of_tensors = list(list_of_tensors)\r\n tensor0 = list_of_tensors[0]\r\n for i in range(len(list_of_tensors)):\r\n tnsr = list_of_tensors[i]\r\n sz_tnsr = tnsr.size()\r\n if sz_tnsr != sz:\r\n # padding the data if the sizes are not equal.\r\n new_tensor = tensor0.new_zeros(sz)\r\n new_tensor[:,:sz_tnsr[1],:sz_tnsr[2]] = tnsr\r\n list_of_tensors[i] = new_tensor\r\n list_of_tensors = tuple(list_of_tensors)\r\n return list_of_tensors\r\n","repo_name":"YeLyuUT/FastVOD","sub_path":"lib/roi_data_layer/collate_minibatch.py","file_name":"collate_minibatch.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"72587843671","text":"import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library\nimport time\nfrom datetime import datetime\nfrom enum import Enum\nimport subprocess\nimport logging\n\n# import \"my\" created classes\nfrom gmailer import Gmailer\n\n# setting up the motor output gpio\ngpio_pin = 4\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(gpio_pin, GPIO.OUT, initial=GPIO.LOW)\n\n\n# Create an emailer for when the door locks/unlocks\nwith open('/program/gmailpw.txt','r') as file:\n gmailpw = file.read()\nmailer = Gmailer('bradsraspberrypi@gmail.com', gmailpw, 'chuggles8cookies@gmail.com', 'automatic doorlock')\n\ndef ping(host):\n # Building the command\n command = ['ping', '-c', '1', '-W', '3', host]\n return subprocess.call(command, \n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT) == 0\n\ndef lock_door():\n GPIO.output(gpio_pin, GPIO.HIGH) # Turn on lock if we are gone\n # send confirmation email\n mailer.send_gmail(\"Door locked\")\n\ndef unlock_door():\n GPIO.output(gpio_pin, GPIO.LOW) # Turn off lock if we are home\n # send confirmation email\n mailer.send_gmail(\"Door unlocked\")\n\nclass State(Enum):\n BRAD_GONE = 1\n BRAD_ARRIVED = 2\n BRAD_CHILLIN = 3\n BRAD_LEAVING = 4\n\n# Initialize some stuff\nlogging.basicConfig(\n level=logging.INFO,\n filename='/dev/shm/doorlock.log',\n format='%(asctime)s [%(levelname)s] - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\nlogger = logging.getLogger() # get the root logger\n\nstate = State.BRAD_CHILLIN\nprev_state = State.BRAD_CHILLIN\nphone_present = True\narrive_time = 0\ngone_count = 0\n\n# call some functions beofre looping forever\nlock_door()\n\n\nwhile True:\n phone_present = ping('192.168.1.42')\n \n # Run through what we should do depending on the state of me\n if state == State.BRAD_GONE:\n if phone_present:\n unlock_door()\n state = State.BRAD_ARRIVED\n arrive_time = time.time()\n elif state == State.BRAD_ARRIVED:\n if phone_present:\n # I want the door to unlock upon arrival, then relock ~120s after I've arrived\n if time.time() - arrive_time > 120:\n lock_door()\n state = State.BRAD_CHILLIN\n\n # If its past 7:30 am and im still here, we want to unlock the door for me\n if phone_present and datetime.now().hour == 7 and datetime.now().minute > 30:\n unlock_door()\n state = State.BRAD_LEAVING\n\n # Always lock the door if it sees that my phone isn't here\n if not phone_present and state!=State.BRAD_GONE:\n gone_count = gone_count + 1\n # Ping needs to not see my phone three times in a row for it to think im gone\n if gone_count > 2:\n lock_door()\n state = State.BRAD_GONE\n else:\n gone_count = 0\n\n # Helpful debugging\n if state != prev_state:\n logging.info(f'State went from {prev_state} ---> {state}')\n print(logger.handlers)\n prev_state = state\n\n time.sleep(0.25)\n","repo_name":"brjohns97/Automatic_doorlock","sub_path":"program/doorlock.py","file_name":"doorlock.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3847427511","text":"from turtle import title\nimport aiml\nfrom argon2 import extract_parameters\nimport wikipediaapi\nimport os\nfrom search import search_answers\nfrom flask import Flask, jsonify, request, render_template\napp = Flask(__name__)\n\n\nSESSION_ID = 1234\n\n\n@app.route(\"/\")\ndef hello():\n return render_template('index.html', title='Debug Assistant')\n\n\n@ app.route('/send', methods=['POST'])\ndef send():\n data = request.json\n message = data['message']\n bot_response = kernel.respond(message, SESSION_ID)\n message = message.lower()\n\n if message == 'quit' or message == 'exit' or message == 'bye':\n return jsonify({'answer': 'See ya next time!'})\n elif message == 'save':\n kernel.saveBrain('bot_brain.brn')\n return jsonify({'answer': 'Saved conversation!'})\n\n # If the user text starts with \"what is a...\", get the rest of the sentence\n # and use it as input for searching StackOverflow for an answer, then send it\n elif message.startswith('i have a question'):\n theme = kernel.getPredicate('query', SESSION_ID)\n answers = search_answers(theme, 1)\n print('Debug Assistant> This was the original question:',\n answers[0][0])\n print('Debug Assistant>', answers[0][1])\n print('Debug Assistant> For more details, please check',\n answers[0][2])\n return jsonify({\n 'original_question': answers[0][0],\n 'answer': answers[0][1],\n 'question_link': answers[0][2]\n })\n elif message.startswith('tell me more about'):\n theme = kernel.getPredicate('factual', SESSION_ID)\n wiki = wikipediaapi.Wikipedia(\n language='en', extract_format=wikipediaapi.ExtractFormat.WIKI)\n page = wiki.page(theme)\n if (page.exists()):\n return jsonify({\n 'answer': f'Here is what I found: {page.summary}',\n 'question_link': page.fullurl\n })\n else:\n return jsonify({'answer': 'Sorry, I could not find info about that...'})\n else:\n print('Debug Assistant>', bot_response)\n return jsonify({'answer': bot_response})\n\n\nif __name__ == '__main__':\n kernel = aiml.Kernel()\n\n # Check for existing \"brain\", meaning existing kernel progress saved\n # If there is, load it, if not, learn from existing AIML file\n if os.path.isfile('bot_brain.brn'):\n kernel.bootstrap(brainFile='bot_brain.brn')\n else:\n kernel.bootstrap(learnFiles='std-startup.xml',\n commands='load aiml b')\n kernel.saveBrain('bot_brain.brn')\n\n app.run(port=5000, debug=True)\n","repo_name":"Nenma/va-debug-assistant","sub_path":"bot-api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22864202378","text":"import requests\n\nclass Location:\n\n def __init__(self) -> None:\n self._api_key = 'AIzaSyDMfqJfekH6iVvgpEXEiU3Mbo3q4LZTv5I'\n\n def get_all_location(self, address:str) -> tuple:\n\n try:\n url = f'https://maps.googleapis.com/maps/api/geocode/json?address={address}&key={self._api_key}'\n\n response = requests.get(url)\n\n if response.status_code == 200:\n data = response.json()\n if data['status'] == 'OK':\n location = data['results'][0]['geometry']['location']\n latitud = location['lat']\n longitud = location['lng']\n\n print(latitud, longitud)\n return latitud, longitud\n else:\n print('No se pudo encontrar la ubicación.')\n return 'error', 'No se pudo encontrar la ubicación.'\n else:\n print('Error al hacer la solicitud a la API Geocoding.')\n return 'error', 'Error al hacer la solicitud a la API Geocoding.'\n \n except Exception as e:\n print('Error al hacer la solicitud a la Api')\n return 'error', 'Error al hacer la solicitud a la Api'\n\n\nif __name__ == '__main__':\n location = Location()\n direccion = 'TECAMACHALCO 161,REFORMA SOCIAL ,11650'\n\n latitude, longitude = location.get_all_location(direccion)\n\n print(f'Latitude: {latitude}\\nLongitude: {longitude}')","repo_name":"Humberto12-Xaxy/coverage_izzi","sub_path":"location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"15854740159","text":"'''\n\tMulti-thread Cache system\n\tLICENSE MIT @2017 Ivan LAusuch \n\n\tCahceItem, CacheBank and Cache\n'''\n\nimport time\nimport threading\n\n\nclass CacheItem:\n \"\"\"\n Cache Item\n \"\"\"\n\n def __init__(self, key, value, lifetime=0):\n '''\n Constructor\n '''\n\n # Some inits\n self.key = key\n self.value = value\n self.lifetime = lifetime\n\n # Start timeout\n self.resetTimeout()\n\n def checkTimeout(self):\n '''\n Check timeout\n '''\n if self.timeout == 0:\n return True\n else:\n return self.timeout > time.time()\n\n def resetTimeout(self):\n '''\n Reset timeout\n '''\n # Calcule timeout when it will die\n if self.lifetime == 0:\n self.timeout = 0\n else:\n self.timeout = time.time() + self.lifetime\n\n\nclass CacheBank:\n \"\"\"\n Cache Bank\n \"\"\"\n\n def __init__(self, name):\n '''\n Constructor\n '''\n self.name = name\n\n # Init dictionary\n self.dictionary = {}\n\n # Create a lock for each bank\n self.lock = threading.RLock()\n\n def put(self, item):\n '''\n Put a new element in this bank.\n '''\n\n # Adquire the lock to protect this code\n self.lock.acquire()\n\n # Put the new item into the dictionary\n self.dictionary[item.key] = item\n\n # Release the lock\n self.lock.release()\n\n def get(self, key):\n '''\n Get an element from this bank\n '''\n\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n try:\n # Get the item from the dictionary\n item = self.dictionary[key]\n\n result = True\n\n except:\n # Return none if i wasn't found\n item = None\n\n result = False\n\n else:\n # Check if this item is alive yet\n if not item.checkTimeout():\n # If isn't alive delete it from the bank\n self.delete(key)\n\n # Return none in this case\n item = None\n\n # Will generate an exception\n result = False\n else:\n\n # Will return the item\n result = True\n\n finally:\n # Release the lock\n self.lock.release()\n\n if not result:\n raise(Exception(\"Key {} doesn't exist\".format(key)))\n else:\n return item\n\n def touch(self, key):\n '''\n Get an element from this bank\n '''\n\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n try:\n # Get the item from the dictionary\n item = self.dictionary[key]\n\n # Will return the item\n result = True\n\n except:\n # Will generate an exception\n result = False\n\n else:\n # Check if this item is alive yet\n if not item.checkTimeout():\n # If isn't alive delete it from the bank\n self.delete(key)\n\n # Return none in this case\n item = None\n\n # Will generate an exception\n result = False\n else:\n # Reset timeout\n item.resetTimeout()\n\n # Will return the item\n result = True\n\n finally:\n # Release the lock\n self.lock.release()\n\n if not result:\n raise(Exception(\"Key {} doesn't exist\".format(key)))\n else:\n return item\n\n def delete(self, key):\n '''\n Delete an entry\n '''\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n try:\n # Remove the item from the dictionary\n del self.dictionary[key]\n\n except:\n # Will generate an exception\n result = False\n\n else:\n # Will return the item\n result = True\n\n finally:\n # Release the lock\n self.lock.release()\n\n if not result:\n raise(Exception(\"Key {} doesn't exist\".format(key)))\n\n def incr(self, key, value):\n '''\n Increment the value of an element and returns it\n '''\n\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n try:\n # Get the item from the dictionary\n item = self.dictionary[key]\n\n result = True\n\n except:\n # Return none if i wasn't found\n item = None\n\n result = False\n\n else:\n # Check if this item is alive yet\n if not item.checkTimeout():\n # If isn't alive delete it from the bank\n self.delete(key)\n\n # Return none in this case\n item = None\n\n # Will generate an exception\n result = False\n else:\n\n # Will return the item\n result = True\n\n finally:\n # Release the lock\n self.lock.release()\n\n if not result:\n raise(Exception(\"Key {} doesn't exist\".format(key)))\n else:\n try:\n item.value = float(item.value) + value\n return item\n except ValueError:\n raise(Exception(\"The value for key {} isn't a number\".format(key)))\n\n def reset(self):\n '''\n Remove all elemnts of the bank\n '''\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n # Set a new dictionary\n self.dictionary = {}\n\n # Release the lock\n self.lock.release()\n\n def update(self):\n '''\n Check the expiration of all elements of the bank\n '''\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n # Prepare and empty list of elements to remove\n listToRemove = []\n\n # Extract the list of elements to remove\n for key in self.dictionary:\n if not self.dictionary[key].checkTimeout():\n listToRemove.append(key)\n\n # Remove all these elements\n for key in listToRemove:\n self.dictionary[key] = None\n\n # Release the lock\n self.lock.release()\n\n def keys(self):\n '''\n Get the list of keys\n '''\n\n # Update all elements of the bank\n self.update()\n\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n # Return the dictionary keys\n keys = list(self.dictionary.keys())\n\n # Release the lock\n self.lock.release()\n\n return keys\n\n\nclass Cache:\n \"\"\"\n Cache library\n \"\"\"\n\n def __init__(self):\n # Init bank dictionary\n self.banks = {}\n\n # Create a lock for bank manipulation\n self.lock = threading.RLock()\n\n def getBank(self, name):\n '''\n Gets a bank or creates it\n '''\n\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n try:\n # Get a bank\n bank = self.banks[name]\n except:\n # If doesn't exist create a new one\n bank = CacheBank(name)\n\n # Put the new bank in the dictionary\n self.banks[name] = bank\n\n # Release the lock\n self.lock.release()\n\n # Return the selected bank\n return bank\n\n def getBanks(self):\n\n # Adquire the lock to protect folowing code\n self.lock.acquire()\n\n # Get the keys\n keys = list(self.banks.keys())\n\n # Release the lock\n self.lock.release()\n\n # Return the list of key names\n return keys\n\n def get(self, bankName, key):\n # Get a item from a bank\n return self.getBank(bankName).get(key)\n\n def put(self, bankName, item):\n # Put a intem into a bank\n self.getBank(bankName).put(item)\n\n def touch(self, bankName, item):\n self.getBank(bankName).touch(item)\n\n def delete(self, bankName, item):\n self.getBank(bankName).delete(item)\n\n def incr(self, bankName, key, value):\n return self.getBank(bankName).incr(key, value)\n","repo_name":"ilausuch/CacheServer","sub_path":"src/Core/Cache.py","file_name":"Cache.py","file_ext":"py","file_size_in_byte":8313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4243317496","text":"import requests\nimport json\nimport pyttsx3 \n\nurl = \"http://v2.jokeapi.dev/joke/Any?blacklistFlags=nsfw,religious,political,racist,sexist,explicit&type=single\"\nresponse = requests.get(url) \nprint(response.status_code)\n\njsonData = json.loads(response.text)\nprint(jsonData[\"joke\"])\n\nengine = pyttsx3.init()\nengine.setProperty('rate', 150) \nengine.say(jsonData[\"joke\"])\nengine.runAndWait()","repo_name":"Reginld1408/Random-Jokes-TTS","sub_path":"random_jokes.py","file_name":"random_jokes.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5041870896","text":"## gets sale price and regular price for dell xps\n\nfrom bs4 import BeautifulSoup\nimport re\nimport requests\nimport datetime\nimport os\nimport csv\nimport sys\n\ndef main():\n \n non_decimal = re.compile(r'[^\\d.]+')\n\n URL = \"https://www.dell.com/en-us/shop/dell-laptops/new-xps-13-touch/spd/xps-13-9380-laptop/xnita3ws707h\"\n\n page = requests.get(URL)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n dellValue = soup.find_all(class_='cf-rr-total cf-rr-price-display')[0].find_all(\"div\")[0].get_text()\n\n salePrice = float(non_decimal.sub('', dellValue))\n print(\"sale price\", salePrice)\n\n estimatedValue = soup.find_all(class_='cf-i')\n\n try:\n evalString = estimatedValue[0].find_all(\"div\", class_ = \"strikethrough cf-price\")[0].get_text()\n regPrice = float(non_decimal.sub('', evalString))\n except:\n regPrice = str(salePrice)\n print(\"regular price\", regPrice)\n\n tmp = [str(regPrice), str(salePrice), str(datetime.datetime.now())]\n \n filePath = r\"D:\\Dropbox\\AcademiaDropbox\\dellXpsPrices.csv\"\n exists = os.path.isfile(filePath)\n \n if sys.version[0:3] == '3.7':\n openType = 'a+'\n elif sys.version[0:3] == '3.5':\n openType = 'a+'\n else:\n openType = 'a+b'\n\n with open(filePath, openType) as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n if not exists:\n wr.writerows([[\"RegPrice\", \"SalePrice\", \"Datetime\"]])\n wr.writerows([tmp])\n\nif __name__== \"__main__\":\n main()\n","repo_name":"callinSwitzer/GarbageCollector","sub_path":"ScrapeAndSaveDellPrices.py","file_name":"ScrapeAndSaveDellPrices.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"21931409714","text":"from WordsCreator.WordsListCreator import WordsListCreator\n\n\ndef CharactersToUse():\n print(\"From which character to which use (a-z:\")\n charactersArray = []\n character_start = input(\"Input beginning letter: \")\n character_end = input(\"Input ending letter: \")\n if CharactersValidator(character_start, character_end):\n AddToArray(character_start, character_end, charactersArray)\n else:\n AddToArray('a', 'z', charactersArray)\n return charactersArray\n\n\ndef CharactersValidator(start_letter, end_letter):\n if ord(start_letter) > ord(end_letter):\n return False\n return True\n\n\ndef AddToArray(start_letter, end_letter, charactersArray):\n for i in range(ord(start_letter), ord(end_letter)):\n charactersArray.append(chr(i))\n\n\ndef ExpressionToUse(_length):\n while True:\n expression = input(\"Input expression to use in your word: \")\n if ExpressionValidator(expression, _length):\n return expression\n\n\ndef ExpressionValidator(expression, _length):\n if len(expression) <= _length:\n return True\n else:\n print(\"Invalid expression!\")\n return False\n\n\ndef isRepeatedEvents():\n while True:\n answer = input(\"Do you want to letters to be repeated? (y/n)\")\n if answer == 'y':\n return True\n elif answer == 'n':\n return False\n else:\n print(\"Wrong answer! Answer y (Yes!) or n (No!)\")\n\n\nif __name__ == '__main__':\n print(\"Welcome to WordListCreator!\")\n length = input(\"Length of word: \")\n words = ExpressionToUse(length)\n charactersArray = CharactersToUse()\n repeatedLetters = isRepeatedEvents()\n wordList = WordsListCreator(length, words, charactersArray, repeatedLetters)\n","repo_name":"Kankarollo/WordListCreator","sub_path":"WordsCreator/WordCreatorMain.py","file_name":"WordCreatorMain.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73226476951","text":"from numpy.lib.scimath import sqrt\nfrom math import floor\nfrom operator import itemgetter\n\n\n_to = [[1, 9, [2, 4, 8, 16, 19, 23, 28, 41, 43]],\n [2, 6, [3, 5, 7, 21, 51, 78]]]\n\n_to.sort(key=itemgetter(0))\nfor i in range(len(_to)):\n _to[i][2].sort()\n\n\n_be = [[1, 8, [1, 2, 3, 5, 7, 42, 51, 60]],\n [2, 7, [8, 18, 19, 27, 29, 50, 77]]]\n\n_be.sort(key=itemgetter(0))\nfor i in range(len(_be)):\n _be[i][2].sort()\n\n\nanswer = []\n\n\ndef checker(l1, l2):\n l1i = 0\n l2i = 0\n while(l1i < len(l1) and l2i < len(l2)):\n if(l1[l1i] == l2[l2i]+1):\n answer.append([l2[l2i], l1[l1i]])\n l1i += 1\n l2i += 1\n elif(l1[l1i] < l2[l2i] and l1i < len(l1)):\n l1i += 1\n elif (l2[l2i] < l1[l1i] and l2i < len(l2)):\n l2i += 1\n\n print(answer)\n\n\ndef samelist(f, s):\n i = 0\n x = 0\n while i < len(f) and x < len(s):\n if f[i][0] == s[x][0]:\n print(\"Document # {0} :\".format(f[i][0]))\n checker(f[i][2], s[x][2])\n i += 1\n x += 1\n elif f[i][0] < s[x][0]:\n i += 1\n else:\n x += 1\n\n\nsamelist(_to, _be)\n","repo_name":"MahmoudM69/IR-Task-6","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3501103457","text":"\nfrom random import random\nfrom random import shuffle\nfrom threading import Thread\nimport sys\nfrom time import sleep\nfrom time import time\nimport signal\n\nfrom neopixel import *\nfrom websocket_server import WebsocketServer\n\n\ndef wheel(pos, bri = 1):\n\t\"\"\"Generate rainbow colors across 0-255 positions.\"\"\"\n\tif pos < 85:\n\t\treturn Color(int((pos * 3)*bri), int((255 - pos * 3) * bri), 0)\n\telif pos < 170:\n\t\tpos -= 85\n\t\treturn Color(int((255 - pos * 3) * bri), 0, int(pos * 3 * bri))\n\telse:\n\t\tpos -= 170\n\t\treturn Color(0, int(pos * 3 * bri), int((255 - pos * 3) * bri))\n\n\n#================================================\n#\n# PATTERNS\n#\n#------------------------------------------------\nclass PatternBase(object):\n\tdef __init__(self, numPixels):\n\t\tself.numPx = numPixels\n\t\tself.state = 0\n\t\tself.loopCount = 0\n\t\tself.strip_order = range(numPixels)\n\t\tshuffle(self.strip_order)\n\t\tself.clear()\n\n\tdef clear(self):\n\t\tpass\n\n\tdef step(self, strip):\n\t\tself.loopCount += 1\n\t\tself.state = self._step(self.state, strip)\n\n\n\nclass Twinkle(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Twinkle, self).__init__(numPixels)\n\n\tdef clear(self):\n\t\tself.stars = []\n\n\tdef _step(self, state, strip):\n\t\tfor i,x in enumerate(self.stars):\n\t\t\tif x[1] == 0:\n\t\t\t\t# dimming\n\t\t\t\tif x[2] == [0,0,0]:\n\t\t\t\t\tif state == 3:\n\t\t\t\t\t\tself.stars.remove(x)\n\t\t\t\t\t\tif len(self.stars) == 0:\n\t\t\t\t\t\t\tprint(\"---twinkle done\")\n\t\t\t\t\t\t\treturn 0\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\tidx = int(random() * 900) % self.numPx\n\t\t\t\t\t\t\tfor st in self.stars:\n\t\t\t\t\t\t\t\tif idx == st[0]:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tself.stars[i][0] = idx\n\t\t\t\t\t\tself.stars[i][1] = 1\n\t\t\t\telse:\n\t\t\t\t\tself.stars[i][2] = [max(0, c*9/10) for c in x[2]]\n\t\t\telse:\n\t\t\t\t# brightening\n\t\t\t\tif x[2] == [255,255,255]:\n\t\t\t\t\tself.stars[i][1] = 0\n\t\t\t\telse:\n\t\t\t\t\tself.stars[i][2] = [min(255, int(c + (random()**3)*25)) for c in x[2]]\n\t\t\tstrip.setPixelColor(x[0], Color(*x[2]))\n\t\tif state == 1:\n\t\t\tif len(self.stars) < 50:\n\t\t\t\tif self.loopCount % 4 == 0:\n\t\t\t\t\tself.stars.append([int(random() * self.numPx), 1, [0,0,0]])\n\t\t\telse:\n\t\t\t\tprint(\"---twinkle full\")\n\t\t\t\treturn 2\n\t\treturn state\n\n\n\nclass Classic(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Classic, self).__init__(numPixels)\n\t\tself.strip_order = range(0, numPixels, 4)\n\t\tshuffle(self.strip_order)\n\n\tdef clear(self):\n\t\tself.dots = []\n\n\tdef newDot(self, strip, idx):\n\t\tx = self.strip_order[idx] + (int(random() * 100) % 4)\n\t\tif random() > 0.05 and idx < len(self.dots):\n\t\t\tx = self.dots[idx][0]\n\t\tstrip.setPixelColor(x, Color(220,180,50))\n\t\treturn [x, int(random() * 100)]\n\n\tdef _step(self, state, strip):\n\t\tfor i in range(len(self.dots)):\n\t\t\tif self.dots[i][1] == 0:\n\t\t\t\tstrip.setPixelColor(self.dots[i][0], 0x0)\n\t\t\t\tif state != 3:\n\t\t\t\t\tself.dots[i] = self.newDot(strip, i)\n\t\t\t\telse:\n\t\t\t\t\tdel self.dots[i]\n\t\t\t\t\tif len(self.dots) == 0:\n\t\t\t\t\t\tshuffle(self.strip_order)\n\t\t\t\t\t\tprint(\"---classic done\")\n\t\t\t\t\t\treturn 0\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself.dots[i][1] -= 1\n\t\tif state == 1:\n\t\t\tif len(self.dots) < 75:\n\t\t\t\tself.dots.append(self.newDot(strip, len(self.dots)))\n\t\t\telse:\n\t\t\t\tprint(\"---classic full\")\n\t\t\t\treturn 2\n\t\treturn state\n\n\n\nclass Candycane(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Candycane, self).__init__(numPixels)\n\n\tdef clear(self):\n\t\tself.stripes = []\n\n\tdef newStripe(self):\n\t\tr = int(random() * 5)+2 # stripe radius\n\t\treturn [-r, r, int(random()*2+0.5)+1, Color(255,0,0) if random() < 0.5 else Color(255,255,255)]\n\n\tdef _step(self, state, strip):\n\t\tfor i in range(len(self.stripes)):\n\t\t\tif self.stripes[i][0] - self.stripes[i][1] > self.numPx:\n\t\t\t\tif state != 3:\n\t\t\t\t\tself.stripes[i] = self.newStripe()\n\t\t\t\telse:\n\t\t\t\t\tdel self.stripes[i]\n\t\t\t\t\tif len(self.stripes) == 0:\n\t\t\t\t\t\tprint(\"---candycane done\")\n\t\t\t\t\t\treturn 0\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tfor speed in range(self.stripes[i][2] + (2 if state == 3 else 0)):\n\t\t\t\t\tstrip.setPixelColor(min(self.numPx, self.stripes[i][0]+self.stripes[i][1]), self.stripes[i][3])\n\t\t\t\t\tstrip.setPixelColor(max(0,self.stripes[i][0]-self.stripes[i][1]), 0x0)\n\t\t\t\t\tself.stripes[i][0] += 1\n\n\t\tif state == 1:\n\t\t\tif len(self.stripes) < 20:\n\t\t\t\tif self.loopCount % 5 == 0:\n\t\t\t\t\tself.stripes.append(self.newStripe())\n\t\t\telse:\n\t\t\t\tprint(\"---candycane full\")\n\t\t\t\treturn 2\n\n\t\treturn state\n\n\n\nclass Wind(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Wind, self).__init__(numPixels)\n\n\tdef clear(self):\n\t\tself.wisp = []\n\n\tdef newWisp(self):\n\t\te = int(random() * 30)+10\n\t\ts = int(random() * (self.numPx-e))\n\t\treturn [s, e+s, s, min(1.0, random()+0.5)]\n\n\tdef _step(self, state, strip):\n\t\tfor i in range(len(self.wisp)):\n\t\t\tif self.wisp[i][0] > self.wisp[i][1] + 1:\n\t\t\t\tstrip._led_data[self.wisp[i][0]] = 0x0\n\t\t\t\tstrip._led_data[self.wisp[i][0]+1] = 0x0\n\t\t\t\tif state != 3:\n\t\t\t\t\tself.wisp[i] = self.newWisp()\n\t\t\t\telse:\n\t\t\t\t\tdel self.wisp[i]\n\t\t\t\t\tif len(self.wisp) == 0:\n\t\t\t\t\t\tprint(\"---wind done\")\n\t\t\t\t\t\treturn 0\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tc = max(0,int(255.0 * ((0.5 - abs( ((1.0 * self.wisp[i][1]-self.wisp[i][0])/(1.0 * self.wisp[i][1] - self.wisp[i][2])) - 0.5))*2.0)**4.0))\n\t\t\t\tstrip._led_data[self.wisp[i][0] - 1] = 0x0\n\t\t\t\tstrip._led_data[self.wisp[i][0]] = Color(int(c * self.wisp[i][3]/4),int(c * self.wisp[i][3]/4),c/4)\n\t\t\t\tself.wisp[i][0] += 1\n\t\t\t\tstrip._led_data[self.wisp[i][0]] = Color(int(c * self.wisp[i][3]),int(c * self.wisp[i][3]),c)\n\t\t\t\tstrip._led_data[self.wisp[i][0]+1] = Color(int(c * self.wisp[i][3]/4),int(c * self.wisp[i][3]/4),c/4)\n\n\t\tif state == 1:\n\t\t\tif len(self.wisp) < 20:\n\t\t\t\tif self.loopCount % 6 == 0:\n\t\t\t\t\tself.wisp.append(self.newWisp())\n\t\t\telse:\n\t\t\t\tprint(\"---wind full\")\n\t\t\t\treturn 2\n\t\treturn state\n\n\n\nclass Rainbow(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Rainbow, self).__init__(numPixels)\n\t\tself.buff = [0] * numPixels\n\n\tdef clear(self):\n\t\tself.i = 0\n\t\tself.cleared = 0\n\t\tshuffle(self.strip_order)\n\n\tdef _step(self, state, strip):\n\t\tfor t in range(10):\n\t\t\tif self.i >= len(self.strip_order):\n\t\t\t\tself.i = 0\n\t\t\t\tif state == 1:\n\t\t\t\t\tself.buff = strip._led_data\n\t\t\t\t\tprint(\"---rainbow full\")\n\t\t\t\t\treturn 2\n\t\t\tif self.i == 0 and state == 3:\n\t\t\t\tif self.cleared == 2:\n\t\t\t\t\tself.cleared = 0\n\t\t\t\t\tprint(\"---rainbow done\")\n\t\t\t\t\treturn 0\n\t\t\t\tself.cleared += 1\n\t\t\tpos = self.strip_order[self.i]\n\t\t\tcolor = wheel((pos + int(time()*30)) % 256) if state != 3 else 0x0\n\t\t\tself.buff[pos] = color\n\t\t\tself.i += 1\n\n\t\treturn state\n\n\n\nclass Blur(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Blur, self).__init__(numPixels)\n\t\tself.strip_order = range(numPixels)\n\t\tshuffle(self.strip_order)\n\t\tself.buff = [0] * numPixels\n\n\tdef clear(self):\n\t\tself.i = 0\n\t\tself.cleared = 0\n\t\tself.baseC = int(random()*1024)%256\n\t\tself.dots = []#[self.newDot() for x in range(15)]\n\n\tdef newDot(self):\n\t\treturn [int(random()*900)%self.numPx, wheel((self.baseC + int(random() * 40))%256, random()**2)]\n\n\tdef _step(self, state, strip):\n\t\tif state == 1:\n\t\t\tself.buff = strip._led_data\n\t\t\tprint(\"---blur full\")\n\t\t\treturn 2\n\t\tfor t in range(40):\n\t\t\tif self.i >= len(self.strip_order):\n\t\t\t\tself.i = 0\n\t\t\t\tshuffle(self.strip_order)\n\t\t\tif self.i == 0 and state == 3:\n\t\t\t\tif self.cleared == 2:\n\t\t\t\t\tself.cleared = 0\n\t\t\t\t\tprint(\"---blur done\")\n\t\t\t\t\treturn 0\n\t\t\t\tself.cleared += 1\n\t\t\tpos = self.strip_order[self.i]\n\t\t\tif state != 3:\n\t\t\t\tc0 = self.buff[pos-1]\n\t\t\t\t# c1 = self.buff[pos]\n\t\t\t\tc2 = self.buff[(pos+1)%self.numPx]\n\t\t\t\t# c = ((((c0&0xff0000)+(c1&0xff0000)+(c2&0xff0000))/3)&0xff0000) |\\\n\t\t\t\t# ((((c0& 0xff00)+(c1& 0xff00)+(c2& 0xff00))/3)&0xff00) |\\\n\t\t\t\t# ((((c0& 0xff)+(c1& 0xff)+(c2& 0xff))/3)&0xff)\n\t\t\t\tc = ((((c0&0xff0000)+(c2&0xff0000))>>1)&0xff0000) |\\\n\t\t\t\t ((((c0& 0xff00)+(c2& 0xff00))>>1)&0xff00) |\\\n\t\t\t\t ((((c0& 0xff)+(c2& 0xff))>>1)&0xff)\n\t\t\t\tself.buff[pos] = c\n\t\t\telse:\n\t\t\t\tself.buff[pos] = 0\n\t\t\tself.i += 1\n\t\tif state != 3:\n\t\t\t# update base dots\n\t\t\tfor t in self.dots:\n\t\t\t\tself.buff[t[0]] = t[1]\n\t\t\t# add dots\n\t\t\tif len(self.dots) < 10 and self.loopCount % 10 == 0:\n\t\t\t\tself.dots.append(self.newDot())\n\t\t\t# base color\n\t\t\tif self.loopCount % 10 == 0:\n\t\t\t\ti = int(random()*1000)%len(self.dots)\n\t\t\t\tself.dots[i] = self.newDot()\n\t\t\t# color burst\n\t\t\tif self.loopCount % 30 == 0:\n\t\t\t\tc = wheel(int(random()*1024)%256)\n\t\t\t\ti = int(random()*900)%(self.numPx-4)\n\t\t\t\tself.buff[i:i+4] = [c]*4\n\t\t\t# change base color\n\t\t\tif self.loopCount % 100 == 0 and random() < 0.1:\n\t\t\t\tself.baseC = int(random()*1024)%256\n\t\t\t\tprint(\"---blur base color change %d\"%self.baseC)\n\t\treturn state\n\n\nclass Fairy(PatternBase):\n\tdef __init__(self, numPx):\n\t\tsuper(Fairy, self).__init__(numPx)\n\t\tself.strip_b = [random() ** 2 for x in range(numPx)]\n\t\tself.strip_c = [int(random() * 40) for x in range(numPx)]\n\n\tdef clear(self):\n\t\tself.wisp = []\n\t\tself.spawn = 0\n\n\tdef newWisp(self, i = -1):\n\t\td = (int(random()*100)%2) * 2 - 1\n\t\tlength = int(random() * 15 + 8)\n\t\tpx = range(1, length)\n\t\tc = int(random() * 1024)%256\n\t\treturn [0 if d > 0 else self.numPx - 1, d, c, length, px]\n\n\tdef _step(self, state, strip):\n\t\tfor i in range(len(self.wisp)):\n\t\t\tshuffle(self.wisp[i][4])\n\t\t\tif state == 3:\n\t\t\t\tif self.wisp[i][1] > 0 and self.wisp[i][0] < self.numPx / 2 or self.wisp[i][1] < 0 and self.wisp[i][0] > self.numPx / 2:\n\t\t\t\t\tself.wisp[i][1] = -self.wisp[i][1]\n\t\t\tif self.wisp[i][0] > self.numPx + self.wisp[i][3] or self.wisp[i][0] < -self.wisp[i][3]:\n\t\t\t\tif state != 3:\n\t\t\t\t\tif random() < 0.02 and self.spawn > 50:\n\t\t\t\t\t\tself.wisp[i] = self.newWisp(i)\n\t\t\t\t\t\tself.spawn = 0\n\t\t\t\t\tself.spawn += 1\n\t\t\t\telse:\n\t\t\t\t\tdel self.wisp[i]\n\t\t\t\t\tif len(self.wisp) == 0:\n\t\t\t\t\t\tshuffle(self.strip_b)\n\t\t\t\t\t\tshuffle(self.strip_c)\n\t\t\t\t\t\tprint(\"---fairy done\")\n\t\t\t\t\t\treturn 0\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif self.wisp[i][0] - self.wisp[i][3] * self.wisp[i][1] >= 0 and self.wisp[i][0] - self.wisp[i][3] * self.wisp[i][1] < self.numPx:\n\t\t\t\t\tstrip._led_data[self.wisp[i][0] - self.wisp[i][3] * self.wisp[i][1]] = 0x0\n\t\t\t\tif self.wisp[i][0] >= 0 and self.wisp[i][0] < self.numPx:\n\t\t\t\t\tstrip._led_data[self.wisp[i][0]] = Color(255,255,255)\n\t\t\t\tfor x in self.wisp[i][4][0:self.wisp[i][3]/3]:\n\t\t\t\t\tx = x * self.wisp[i][1]\n\t\t\t\t\tif self.wisp[i][0] - x >= 0 and self.wisp[i][0] - x < self.numPx:\n\t\t\t\t\t\tb = (((self.wisp[i][3]+1)-abs(x))/float(self.wisp[i][3]-1))**3 * self.strip_b[(self.wisp[i][0] + x)%self.numPx]\n\t\t\t\t\t\tc = wheel((self.wisp[i][2] + self.strip_c[(self.wisp[i][0] + x)%self.numPx]) % 256, b)\n\t\t\t\t\t\tstrip._led_data[self.wisp[i][0] - x] = c\n\t\t\t\tself.wisp[i][0] += self.wisp[i][1]\n\t\tif state == 1:\n\t\t\tif len(self.wisp) < 6:\n\t\t\t\tif (self.spawn > 50 and random() < 0.1) or len(self.wisp) == 0:\n\t\t\t\t\tself.wisp.append(self.newWisp())\n\t\t\t\t\tself.spawn = 0\n\t\t\t\tself.spawn += 1\n\t\t\telse:\n\t\t\t\tprint(\"---fairy full\")\n\t\t\t\treturn 2\n\t\treturn state\n\n\n\n\nclass Off(PatternBase):\n\tdef __init__(self, numPixels):\n\t\tsuper(Off, self).__init__(numPixels)\n\n\tdef clear(self):\n\t\tself.i = 0\n\n\tdef _step(self, state, strip):\n\t\tif self.i >= len(self.strip_order):\n\t\t\tself.i = 0\n\t\t\tshuffle(self.strip_order)\n\t\t\tif state == 1:\n\t\t\t\treturn 2\n\t\t\telif state == 3:\n\t\t\t\treturn 0\n\t\tstrip.setPixelColor(self.strip_order[self.i], 0)\n\t\tself.i += 1\n\n\t\treturn state\n\n\n\n\n#================================================\n#\n# STATES / CONTROLS\n#\n#------------------------------------------------\n\n\n#--------------------------------------\n# available catalog\npatterns = [\n\t# event , func , full stop ,\n\t[-1 , Off(300) , 0] ,\n\t[-1 , Rainbow(300) , 1] ,\n\t[-1 , Candycane(300) , 0] ,\n\t[-1 , Classic(300) , 0] ,\n\t[-1 , Wind(300) , 0] ,\n\t[-1 , Twinkle(300) , 0] ,\n\t[-1 , Fairy(300) , 0] ,\n\t[-1 , Blur(300) , 1] ,\n]\n\nallPats = [\n\t\"off\",\n\t\"rainbow\",\n\t\"candycane\",\n\t\"classic\",\n\t\"wind\",\n\t\"twinkle\",\n\t\"fairy\",\n\t\"blur\",\n]\n\n\n#================================================\n#\n# SERVER\n#\n#------------------------------------------------\ndef start(name):\n\tif name in allPats:\n\t\tif patterns[allPats.index(name)][1].state != 2:\n\t\t\tpatterns[allPats.index(name)][0] = 1\n\ndef stop(name, offMode):\n\tif name in allPats:\n\t\tif patterns[allPats.index(name)][1].state != 0:\n\t\t\tpatterns[allPats.index(name)][0] = 4 if offMode else 3\n\ndef solo(name):\n\toffMode = patterns[allPats.index(name)][2]\n\tfor key in allPats:\n\t\tif key == name:\n\t\t\tstart(key)\n\t\telse:\n\t\t\tstop(key, offMode)\n\ndef serv_recvParser(cli, serv, msg):\n\tprint(msg)\n\tsolo(msg)\n\ndef signal_handler(signal, frame):\n\tglobal serv_thread\n\tglobal server\n\tprint(\"Exiting...\")\n\tserver.server_close()\n\tserv_thread.join()\n\tsys.exit(0)\n\n\n\n#================================================\n#\n# MAIN / INIT\n#\n#------------------------------------------------\n\n\nsignal.signal(signal.SIGINT, signal_handler)\nprint('Press Ctrl+C to exit')\n\nserver = WebsocketServer(12000, host=\"0.0.0.0\")\nserver.set_fn_message_received(serv_recvParser)\nserv_thread = Thread(target=server.run_forever, args=())\nserv_thread.start()\n\nstrip = Adafruit_NeoPixel(300, 12, strip_type = ws.WS2811_STRIP_GRB)\nstrip.begin()\nstrip_order = range(strip.numPixels())\nshuffle(strip_order)\n\nwhile True:\n\tlooptime = time()\n\tfor idx in range(len(patterns)):\n\t\tif patterns[idx][1].state > 0:\n\t\t\tpatterns[idx][1].step(strip)\n\t\tif patterns[idx][0] >= 0:\n\t\t\tif patterns[idx][0] == 1: # turn on\n\t\t\t\tpatterns[idx][1].state = 1\n\t\t\telif patterns[idx][0] == 3: # turn off (gentle)\n\t\t\t\tpatterns[idx][1].state = 3\n\t\t\telif patterns[idx][0] == 4: # turn off (hard stop)\n\t\t\t\tpatterns[idx][1].state = 0\n\t\t\t\tpatterns[idx][1].clear()\n\t\t\tpatterns[idx][0] = -1\n\tstrip.show()\n\n\tdelta = time() - looptime\n\t# print(\"%.4f\"%(delta*40))\n\tif delta < 1.0/40:\n\t\tsleep(1.0/40 - delta)\n\n\n\n","repo_name":"davidhay25/Christmas-Lights","sub_path":"rpi_ws281x-master/python/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":13414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28978322206","text":"import sys\nimport time\nimport warnings\nimport requests\nimport threading\nimport itertools\ntry:\n from Queue import Queue\nexcept ImportError:\n from queue import Queue\n\n_CALL_BATCH_SIZE = 200\n_NUM_THREADS_DEFAULT = 4\n\n\ndef _create_rest_url(host, version, species, category, subcategory,\n resource, query_id, options):\n \"\"\"Creates the URL for querying the REST service\"\"\"\n\n # Creating the basic URL\n url_items = [host, 'webservices/rest', version, species, category,\n subcategory, query_id, resource]\n url_items = filter(None, url_items) # Some url items can be empty\n url = ('/'.join(url_items))\n\n # Checking optional params\n if options is not None:\n opts = []\n for k, v in options.items():\n if k == 'debug':\n continue\n if isinstance(v, list):\n opts.append(k + '=' + ','.join(map(str, v)))\n else:\n opts.append(k + '=' + str(v))\n if opts:\n url += '?' + '&'.join(opts)\n\n return url\n\n\ndef _fetch(session, host, version, species, category, subcategory, resource,\n query_id=None, options=None, method='get', data=None):\n \"\"\"Queries the REST service retrieving results until exhaustion or limit\"\"\"\n # HERE BE DRAGONS\n final_response = None\n\n # Setting up skip and limit default parameters\n call_skip = 0\n call_limit = 1000\n max_limit = None\n if options is None:\n opts = {'skip': call_skip, 'limit': call_limit}\n else:\n opts = options.copy() # Do not modify original data!\n if 'skip' not in opts:\n opts['skip'] = call_skip\n # If 'limit' is specified, a maximum of 'limit' results will be returned\n if 'limit' in opts:\n max_limit = opts['limit']\n # Server must be always queried for results in groups of 1000\n opts['limit'] = call_limit\n\n # If there is a query_id, the next variables will be used\n total_id_list = [] # All initial ids\n next_id_list = [] # Ids which should be queried again for more results\n next_id_indexes = [] # Ids position in the final response\n if query_id is not None:\n total_id_list = query_id.split(',')\n\n # If some query has more than 'call_limit' results, the server will be\n # queried again to retrieve the next 'call_limit results'\n call = True\n current_query_id = None # Current REST query\n current_id_list = None # Current list of ids\n time_out_counter = 0 # Number of times a query is repeated due to time-out\n while call:\n # Check 'limit' parameter if there is a maximum limit of results\n if max_limit is not None and max_limit <= call_limit:\n opts['limit'] = max_limit\n\n # Updating query_id and list of ids to query\n if query_id is not None:\n if current_query_id is None:\n current_query_id = query_id\n current_id_list = total_id_list\n current_id_indexes = range(len(total_id_list))\n else:\n current_query_id = ','.join(next_id_list)\n current_id_list = next_id_list\n current_id_indexes = next_id_indexes\n\n # Retrieving url\n url = _create_rest_url(host=host,\n version=version,\n species=species,\n category=category,\n subcategory=subcategory,\n query_id=current_query_id,\n resource=resource,\n options=opts)\n\n # DEBUG\n if options is not None:\n if 'debug' in options and options['debug']:\n sys.stderr.write(url + '\\n')\n\n # Getting REST response\n if method == 'get':\n r = session.get(url)\n elif method == 'post':\n r = session.post(url, data=data)\n else:\n msg = 'Method \"' + method + '\" not implemented'\n raise NotImplementedError(msg)\n\n if r.status_code == 504: # Gateway Time-out\n if time_out_counter == 99:\n msg = 'Server not responding in time'\n raise requests.ConnectionError(msg)\n time_out_counter += 1\n time.sleep(1)\n continue\n time_out_counter = 0\n\n try:\n json_obj = r.json()\n\n # TODO Remove deprecated response and result in future release. Added for backwards compatibility\n if 'response' in json_obj:\n json_obj['responses'] = json_obj['response']\n for query_result in json_obj['responses']:\n if 'result' in query_result:\n query_result['results'] = query_result['result']\n\n response = json_obj['responses']\n\n except ValueError:\n msg = 'Bad JSON format retrieved from server'\n raise ValueError(msg)\n\n # Setting up final_response\n if final_response is None:\n final_response = response\n # Concatenating results\n else:\n if query_id is not None:\n for index, res in enumerate(response):\n id_index = current_id_indexes[index]\n final_response[id_index]['results'] += res['results']\n else:\n final_response[0]['results'] += response[0]['results']\n\n if query_id is not None:\n # Checking which ids are completely retrieved\n next_id_list = []\n next_id_indexes = []\n for index, res in enumerate(response):\n if res['numResults'] == call_limit:\n next_id_list.append(current_id_list[index])\n next_id_indexes.append(current_id_indexes[index])\n # Ending REST calling when there are no more ids to retrieve\n if not next_id_list:\n call = False\n else:\n # Ending REST calling when there are no more results to retrieve\n if response[0]['numResults'] != call_limit:\n call = False\n\n # Skipping the first 'limit' results to retrieve the next ones\n opts['skip'] += call_limit\n\n # Subtracting the number of returned results from the maximum goal\n if max_limit is not None:\n max_limit -= call_limit\n # When 'limit' is 0 returns all the results. So, break the loop if 0\n if max_limit == 0:\n break\n\n return final_response\n\n\ndef _worker(queue, results, session, host, version, species, category,\n subcategory, resource, options=None, method='get', data=None):\n \"\"\"Manages the queue system for the threads\"\"\"\n while True:\n # Fetching new element from the queue\n index, query_id = queue.get()\n response = _fetch(session, host, version, species, category,\n subcategory, resource, query_id, options, method,\n data)\n # Store data in results at correct index\n results[index] = response\n # Signaling to the queue that task has been processed\n queue.task_done()\n\n\ndef get(session, host, version, species, category, subcategory, resource,\n query_id=None, options=None, method='get', data=None):\n \"\"\"Queries the REST service using multiple threads if needed\"\"\"\n\n # If query_id is an array, convert to comma-separated string\n if query_id is not None and isinstance(query_id, list):\n query_id = ','.join(query_id)\n\n # If data is an array, convert to comma-separated string\n if data is not None and isinstance(data, list):\n data = ','.join(data)\n\n # Multithread if the number of queries is greater than _CALL_BATCH_SIZE\n if query_id is None or len(query_id.split(',')) <= _CALL_BATCH_SIZE:\n response = _fetch(session, host, version, species, category,\n subcategory, resource, query_id, options, method,\n data)\n return response\n else:\n if options is not None and 'num_threads' in options:\n num_threads = options['num_threads']\n else:\n num_threads = _NUM_THREADS_DEFAULT\n\n # Splitting query_id into batches depending on the call batch size\n id_list = query_id.split(',')\n id_batches = [','.join(id_list[x:x+_CALL_BATCH_SIZE])\n for x in range(0, len(id_list), _CALL_BATCH_SIZE)]\n\n # Setting up the queue to hold all the id batches\n q = Queue(maxsize=0)\n # Creating a size defined list to store thread results\n res = [''] * len(id_batches)\n\n # Setting up the threads\n for thread in range(num_threads):\n t = threading.Thread(target=_worker,\n kwargs={'queue': q,\n 'results': res,\n 'session': session,\n 'host': host,\n 'version': version,\n 'species': species,\n 'category': category,\n 'subcategory': subcategory,\n 'resource': resource,\n 'options': options,\n 'method': method,\n 'data': data})\n # Setting threads as \"daemon\" allows main program to exit eventually\n # even if these do not finish correctly\n t.setDaemon(True)\n t.start()\n\n # Loading up the queue with index and id batches for each job\n for index, batch in enumerate(id_batches):\n q.put((index, batch)) # Notice this is a tuple\n\n # Waiting until the queue has been processed\n q.join()\n\n # Joining all the responses into a one final response\n final_response = list(itertools.chain.from_iterable(res))\n\n return final_response\n\n\ndef deprecated(func):\n \"\"\"Prints a warning for functions marked as deprecated\"\"\"\n def new_func(*args, **kwargs):\n warnings.simplefilter('always', DeprecationWarning) # turn off filter\n warnings.warn('Call to deprecated function \"{}\".'.format(func.__name__),\n category=DeprecationWarning, stacklevel=2)\n warnings.simplefilter('default', DeprecationWarning) # reset filter\n return func(*args, **kwargs)\n return new_func\n","repo_name":"opencb/cellbase","sub_path":"cellbase-client/src/main/python/pycellbase/commons.py","file_name":"commons.py","file_ext":"py","file_size_in_byte":10601,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"5"} +{"seq_id":"1753781591","text":"import torch\nimport random\nimport os, sys\n\nSEED = 123456\nrandom.seed(SEED)\n\n# DEVICE = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n# LR = 5e-4\nLR = 1e-4\nWARMUP_DURATION = 3\nBATCH_SIZE = 5\n\nMODELS_OUT_FOLDER = \"./\"\nUNK_TOKEN = \"$unk$\"\nPAD_TOKEN = \"$pad$\"\nEOS_TOKEN = \"$eos$\"\nBOS_TOKEN = \"$bos$\"\n\nMAX_SEQUENCE_SIZE = 550\n","repo_name":"Samanek-Jan/Project","sub_path":"src/t5_small/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"29359495106","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 1 11:19:17 2022\r\n\r\n@author: alex_wheelis\r\n\"\"\"\r\n\r\n\"\"\"\r\nfor GSD of img height\r\n(flight_altitude * sensor_height)\r\n---------------------------------\r\n(focal_length * image_height)\r\n\r\nfor GSD of img width\r\n(flight_altitude * sensor_width)\r\n---------------------------------\r\n(focal_length * image_width)\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n # do all calculations in meters\r\n\r\ndef CalculateOffset(lat, lon, alt, pitch, roll):\r\n # Position, decimal degrees\r\n # PULL GPS COORDINATES\r\n drone_lat = lat\r\n drone_lon = lon\r\n\r\n \"\"\"\r\n If your displacements aren't too great (less than a few kilometers) \r\n and you're not right at the poles, use the quick and dirty estimate \r\n that 111,111 meters (111.111 km) in the y direction is 1 degree (of\r\n latitude) and 111,111 * cos(latitude) meters in the x direction is \r\n 1 degree (of longitude).\r\n \r\n \"\"\"\r\n m_to_deg = 1 / 111111 # deg/meters\r\n\r\n # PITCH/ROLL CORRECTIONS\r\n # we need to account for the pitch and roll of the drone\r\n # the following code will support this\r\n DRONE_ALTITUDE = alt # meters (variable)\r\n PITCH = pitch # make sure these are in RADIANS\r\n ROLL = roll\r\n\r\n # get offset from angles in meters\r\n Fy = np.tan(PITCH) * DRONE_ALTITUDE\r\n Fx = np.tan(ROLL) * DRONE_ALTITUDE\r\n\r\n # convert Fy and Fx offset to degrees\r\n Cy = Fy * m_to_deg\r\n Cx = Fx * m_to_deg\r\n\r\n lat = Cy + drone_lat\r\n lon = Cx + drone_lon\r\n\r\n # -----------------------------------------\r\n\r\n\r\n # GSD CONVERSION\r\n # img dimensios 720x1280 (heightxwidth)\r\n IMAGE_HEIGHT = 720\r\n IMAGE_WIDTH = 1280\r\n\r\n FOCAL_LENGTH = 0.0165 # pulled from https://improvephotography.com/54797/gopro-good-enough-for-an-advanced-photographers/\r\n SENSOR_HEIGHT = .00617\r\n SENSOR_WIDTH = .00455\r\n\r\n GSD_height = (DRONE_ALTITUDE * SENSOR_HEIGHT) / (FOCAL_LENGTH * IMAGE_HEIGHT)\r\n GSD_width = (DRONE_ALTITUDE * SENSOR_WIDTH) / (FOCAL_LENGTH * IMAGE_WIDTH)\r\n\r\n # to calculate the coordinates of target\r\n # 1) get px coordinates of target\r\n # ----> using (-300, 20) as a dummy varible\r\n # 2) calculate distance away from origin in meters\r\n # ----> GSD_height * 20\r\n # ----> GSD_width * -300\r\n object_coor = (0, 300)\r\n object_x = GSD_width * object_coor[0]\r\n object_y = GSD_height * object_coor[1]\r\n\r\n # Earth’s radius, sphere\r\n R = 6378137\r\n\r\n # offsets in meters\r\n dn = object_y\r\n de = object_x\r\n\r\n # Coordinate offsets in radians\r\n dLat = dn / R\r\n dLon = de / (R * np.cos(np.pi * lat / 180))\r\n\r\n # OffsetPosition, decimal degrees\r\n latO = lat + dLat * 180 / np.pi\r\n lonO = lon + dLon * 180 / np.pi\r\n\r\n print(latO, lonO, sep='\\n')\r\n","repo_name":"botaxe/ECE592-Autonomous-Bomber-main","sub_path":"basic-socket/calc_GSD.py","file_name":"calc_GSD.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11329318193","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head) -> bool:\n fast = head\n faster = head\n while faster and faster.next:\n fast = fast.next\n faster = faster.next.next\n if faster == fast:\n return True\n elif faster is None:\n return False","repo_name":"SadriddinDev/LeetCode","sub_path":"Problems/linked-list-cycle.py","file_name":"linked-list-cycle.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7124963772","text":"import numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.fftpack import fft, fftfreq\n\n#almacenamiento de las fotos\ncaraseria=plt.imread(\"cara_02_grisesMF.png\")\ncarafeliz=plt.imread(\"cara_03_grisesMF.png\")\n\n#Transformada de Fourier\ncara2_ft = np.fft.fft2(caraseria)\ncara3_ft = np.fft.fft2(carafeliz)\n\nplt.figure(figsize=(8,8))\nplt.subplot(2,2,1)\nplt.imshow(cara2_ft.real, vmin=0, vmax=2)\nplt.title(\"FFT cara 2\")\nplt.subplot(2,2,2)\nplt.imshow(cara3_ft.real, vmin=0, vmax=2)\nplt.title(\"FFT cara 3\")\n\nplt.savefig(\"FFtIm.pdf\")\n\n#Frecuencia \ncara2_fr = np.fft.fftshift(cara2_ft)\ncara3_fr = np.fft.fftshift(cara3_ft)\n\nplt.figure(figsize=(8,8))\nplt.subplot(2,2,1)\nplt.imshow(cara2_fr.real, vmin=0,vmax=2)\nplt.title(\"Frecuencia cara 2\")\nplt.subplot(2,2,2)\nplt.imshow(cara3_fr.real,vmin=0, vmax=2)\nplt.title(\"Frecuencia cara 3\")\n\nfor i in range(254):\n for j in range (170):\n if (abs(cara2_fr[i,j])>=250):\n cara2_fr[i,j]=0\n \nfor k in range(254):\n for l in range (170):\n if (abs(cara3_fr[k,l])<=250):\n cara3_fr[k,l]=0\n\n#Transformada Inversa \nimg2= np.fft.ifft2(cara2_fr)\nimg3= np.fft.ifft2(cara3_fr)\n\nplt.figure(figsize=(6,6))\nplt.subplot(2,2,1)\nplt.imshow(cara2_fr.real, vmin=0, vmax=0.0001)\nplt.title(\"Frecuencia filtrada cara 2\")\nplt.subplot(2,2,2)\nplt.imshow(cara3_fr.real, vmin=0, vmax=0.0001)\nplt.title(\"Frecuencia filtrada cara 3\")\nplt.subplot(2,2,3)\nplt.imshow(img2.real, vmin=0, vmax=0.1)\nplt.title(\"iFF cara 2\")\nplt.subplot(2,2,4)\nplt.imshow(img3.real, vmin=0, vmax=0.999)\nplt.title(\"iFF cara 3\")\nplt.savefig(\"ImProces.pdf\")\n\n\n#Imagen hibrida\nfinal1=img2+img3\nplt.figure(figsize=(6,5))\nplt.subplot(2,2,1)\nplt.imshow(final1.real, plt.cm.gray,vmin=0,vmax=1)\nplt.title(\"Fotografia hibrida\")\nplt.savefig(\"ImHybrid.pdf\")\n\n\n","repo_name":"RianoAngela/Metodos-Computacionales","sub_path":"RianoAngela_hw2/Fourier.py","file_name":"Fourier.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38434723901","text":"list_of_guests = []\nkeep_asking = True\n\nwhile keep_asking is True: # w poradniku było boolean \"==\", ale best practice mówi, żeby używać \"is\" zamiast \"==\"\n name = input('What is your name? ')\n if name == '':\n print('Hello world')\n keep_asking = False\n else:\n if name not in list_of_guests:\n list_of_guests.append(name)\n print('Hello ' + name)\n else:\n print('Hello again ' + name)\n","repo_name":"bartoszmaleta/2nd-Self-instructed-week-exercises-","sub_path":"hello_world/Ex1 v2.py","file_name":"Ex1 v2.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23601889053","text":"import random\nfrom collections import deque, namedtuple\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom agilerl.components.replay_buffer import (\n MultiStepReplayBuffer,\n PrioritizedReplayBuffer,\n ReplayBuffer,\n)\nfrom agilerl.components.segment_tree import MinSegmentTree, SumSegmentTree\n\n\n##### ReplayBuffer class tests #####\n# Can create an instance of ReplayBuffer with valid arguments\ndef test_create_instance_with_valid_arguments():\n action_dim = 2\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n assert buffer.action_dim == action_dim\n assert buffer.memory_size == memory_size\n assert buffer.field_names == field_names\n assert buffer.device == device\n\n\n# Can get length of memory with __len__ method\ndef test_get_length_of_memory():\n action_dim = 2\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n # Add experiences to memory\n buffer.save2memorySingleEnv(1, 2, 3)\n buffer.save2memorySingleEnv(4, 5, 6)\n buffer.save2memorySingleEnv(7, 8, 9)\n\n assert len(buffer) == 3\n\n\n# Can add experiences to memory and appends to end of deque\ndef test_append_to_memory_deque():\n buffer = ReplayBuffer(\n action_dim=4,\n memory_size=1000,\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"],\n )\n buffer._add([0, 0, 0, 0], [1, 1, 1, 1], 1, [0, 0, 0, 0], False)\n buffer._add([1, 1, 1, 1], [2, 2, 2, 2], 2, [1, 1, 1, 1], True)\n assert len(buffer.memory) == 2\n assert buffer.memory[0].state == [0, 0, 0, 0]\n assert buffer.memory[0].action == [1, 1, 1, 1]\n assert buffer.memory[0].reward == 1\n assert buffer.memory[0].next_state == [0, 0, 0, 0]\n assert buffer.memory[0].done is False\n assert buffer.memory[1].state == [1, 1, 1, 1]\n assert buffer.memory[1].action == [2, 2, 2, 2]\n assert buffer.memory[1].reward == 2\n assert buffer.memory[1].next_state == [1, 1, 1, 1]\n assert buffer.memory[1].done is True\n\n\n# Can add an experience when memory is full and maxlen is reached\ndef test_add_experience_when_memory_full():\n buffer = ReplayBuffer(\n action_dim=4,\n memory_size=2,\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"],\n )\n buffer._add([0, 0, 0, 0], [1, 1, 1, 1], 1, [0, 0, 0, 0], False)\n buffer._add([1, 1, 1, 1], [2, 2, 2, 2], 2, [1, 1, 1, 1], True)\n buffer._add([2, 2, 2, 2], [3, 3, 3, 3], 3, [2, 2, 2, 2], False)\n assert len(buffer.memory) == 2\n assert buffer.memory[0].state == [1, 1, 1, 1]\n assert buffer.memory[0].action == [2, 2, 2, 2]\n assert buffer.memory[0].reward == 2\n assert buffer.memory[0].next_state == [1, 1, 1, 1]\n assert buffer.memory[0].done is True\n assert buffer.memory[1].state == [2, 2, 2, 2]\n assert buffer.memory[1].action == [3, 3, 3, 3]\n assert buffer.memory[1].reward == 3\n assert buffer.memory[1].next_state == [2, 2, 2, 2]\n assert buffer.memory[1].done is False\n\n\n# Can add single experiences to memory with save2memorySingleEnv method\ndef test_add_single_experiences_to_memory():\n action_dim = 2\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n state = np.array([1, 2])\n action = np.array([0])\n reward = np.array([0])\n\n buffer.save2memorySingleEnv(state, action, reward)\n\n assert len(buffer.memory) == 1\n assert buffer.memory[0].state.tolist() == state.tolist()\n assert buffer.memory[0].action.tolist() == action.tolist()\n assert buffer.memory[0].reward.tolist() == reward.tolist()\n\n\n# Can add multiple experiences to memory with save2memoryVectEnvs method\ndef test_add_multiple_experiences_to_memory():\n action_dim = 2\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n states = np.array([[1, 2], [3, 4]])\n actions = np.array([[0], [1]])\n rewards = np.array([[0], [1]])\n next_states = np.array([[5, 6], [7, 8]])\n dones = np.array([[False], [True]])\n\n buffer.save2memoryVectEnvs(states, actions, rewards, next_states, dones)\n\n assert len(buffer.memory) == 2\n assert buffer.memory[0].state.tolist() == states[0].tolist()\n assert buffer.memory[0].action.tolist() == actions[0].tolist()\n assert buffer.memory[0].reward.tolist() == rewards[0].tolist()\n assert buffer.memory[0].next_state.tolist() == next_states[0].tolist()\n assert buffer.memory[0].done.tolist() == dones[0].tolist()\n assert buffer.memory[1].state.tolist() == states[1].tolist()\n assert buffer.memory[1].action.tolist() == actions[1].tolist()\n assert buffer.memory[1].reward.tolist() == rewards[1].tolist()\n assert buffer.memory[1].next_state.tolist() == next_states[1].tolist()\n assert buffer.memory[1].done.tolist() == dones[1].tolist()\n\n\n# Can handle vectorized and un-vectorized experiences from environment\ndef test_add_any_experiences_to_memory():\n action_dim = 2\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n states = np.array([[1, 2], [3, 4]])\n actions = np.array([[0], [1]])\n rewards = np.array([[0], [1]])\n next_states = np.array([[5, 6], [7, 8]])\n dones = np.array([[False], [True]])\n\n buffer.save2memory(states, actions, rewards, next_states, dones, is_vectorised=True)\n\n assert len(buffer.memory) == 2\n assert buffer.memory[0].state.tolist() == states[0].tolist()\n assert buffer.memory[0].action.tolist() == actions[0].tolist()\n assert buffer.memory[0].reward.tolist() == rewards[0].tolist()\n assert buffer.memory[0].next_state.tolist() == next_states[0].tolist()\n assert buffer.memory[0].done.tolist() == dones[0].tolist()\n assert buffer.memory[1].state.tolist() == states[1].tolist()\n assert buffer.memory[1].action.tolist() == actions[1].tolist()\n assert buffer.memory[1].reward.tolist() == rewards[1].tolist()\n assert buffer.memory[1].next_state.tolist() == next_states[1].tolist()\n assert buffer.memory[1].done.tolist() == dones[1].tolist()\n\n new_state = np.array([1, 2])\n new_action = np.array([0])\n new_reward = np.array([0])\n new_next_state = np.array([3, 4])\n new_done = np.array([False])\n\n buffer.save2memory(\n new_state, new_action, new_reward, new_next_state, new_done, is_vectorised=False\n )\n\n assert len(buffer.memory) == 3\n assert buffer.memory[2].state.tolist() == new_state.tolist()\n assert buffer.memory[2].action.tolist() == new_action.tolist()\n assert buffer.memory[2].reward.tolist() == new_reward.tolist()\n assert buffer.memory[2].next_state.tolist() == new_next_state.tolist()\n assert buffer.memory[2].done.tolist() == new_done.tolist()\n\n\n# Can sample experiences from memory of desired batch size with sample method\ndef test_sample_experiences_from_memory():\n action_dim = 1\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n # Add experiences to memory\n buffer.save2memorySingleEnv(np.array([1, 1]), 2, 3)\n buffer.save2memorySingleEnv(np.array([4, 4]), 5, 6)\n buffer.save2memorySingleEnv(np.array([7, 7]), 8, 9)\n\n # Sample experiences from memory\n batch_size = 2\n experiences = buffer.sample(batch_size)\n\n assert len(experiences[0]) == batch_size\n assert len(experiences[1]) == batch_size\n assert len(experiences[2]) == batch_size\n assert isinstance(experiences[0], torch.Tensor)\n assert experiences[0].shape == (batch_size, 2)\n assert isinstance(experiences[1], torch.Tensor)\n assert experiences[1].shape == (batch_size, action_dim)\n assert isinstance(experiences[2], torch.Tensor)\n assert experiences[2].shape == (batch_size, 1)\n\n\n# Can process transition from experiences with _process_transition method\ndef test_process_transition_from_experiences():\n action_dim = 1\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n # Create experiences\n experience1 = buffer.experience(1, 2, 3)\n experience2 = buffer.experience(4, 5, 6)\n experiences = [experience1, experience2]\n\n # Process transition from experiences\n transition = buffer._process_transition(experiences)\n\n assert isinstance(transition[\"state\"], torch.Tensor)\n assert transition[\"state\"].shape == (len(experiences), 1)\n assert isinstance(transition[\"action\"], torch.Tensor)\n assert transition[\"action\"].shape == (len(experiences), action_dim)\n assert isinstance(transition[\"reward\"], torch.Tensor)\n assert transition[\"reward\"].shape == (len(experiences), 1)\n\n\n# Can process single transition from experiences with _process_transition method\ndef test_process_single_transition_from_experiences():\n action_dim = 1\n memory_size = 100\n field_names = [\"state\", \"action\", \"reward\"]\n device = \"cpu\"\n\n buffer = ReplayBuffer(action_dim, memory_size, field_names, device)\n\n # Create experiences\n experience1 = buffer.experience(np.array([1, 1]), 2, 3)\n experiences = [experience1]\n\n # Process transition from experiences\n transition = buffer._process_transition(experiences)\n\n assert isinstance(transition[\"state\"], torch.Tensor)\n assert transition[\"state\"].shape == (len(experiences), 2)\n assert isinstance(transition[\"action\"], torch.Tensor)\n assert transition[\"action\"].shape == (len(experiences), action_dim)\n assert isinstance(transition[\"reward\"], torch.Tensor)\n assert transition[\"reward\"].shape == (len(experiences), 1)\n\n\n##### MultiStepReplayBuffer class tests #####\n# Initializes the MultiStepReplayBuffer class with the given parameters.\ndef test_initializes_nstep_replay_buffer_with_given_parameters():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 2\n n_step = 5\n gamma = 0.95\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n replay_buffer = MultiStepReplayBuffer(\n action_dim, memory_size, field_names, num_envs, n_step, gamma, device\n )\n\n assert replay_buffer.action_dim == action_dim\n assert replay_buffer.memory_size == memory_size\n assert replay_buffer.field_names == field_names\n assert replay_buffer.num_envs == num_envs\n assert replay_buffer.n_step == n_step\n assert replay_buffer.gamma == gamma\n assert replay_buffer.device == device\n\n\n# Can save a single environment transition to memory\ndef test_save_single_env_transition():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n n_step = 3\n gamma = 0.99\n\n replay_buffer = MultiStepReplayBuffer(\n action_dim, memory_size, field_names, num_envs, n_step, gamma\n )\n\n state = np.array([1, 2, 3, 4])\n action = np.array([0, 1, 0, 1])\n reward = np.array([0.1])\n next_state = np.array([5, 6, 7, 8])\n done = np.array([False])\n\n replay_buffer.save2memory(state, action, reward, next_state, done)\n\n assert len(replay_buffer.memory) == 0\n assert len(replay_buffer.n_step_buffers[0]) == 1\n\n replay_buffer.save2memorySingleEnv(state, action, reward, next_state, done)\n\n assert len(replay_buffer.memory) == 0\n assert len(replay_buffer.n_step_buffers[0]) == 2\n\n replay_buffer.save2memory(state, action, reward, next_state, done)\n\n assert len(replay_buffer.memory) == num_envs\n assert len(replay_buffer.n_step_buffers[0]) == n_step\n\n\n# Can save vectorized environment transitions to memory\ndef test_save_multiple_env_transitions():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 2\n n_step = 2\n gamma = 0.99\n\n replay_buffer = MultiStepReplayBuffer(\n action_dim, memory_size, field_names, num_envs, n_step, gamma\n )\n\n state = np.array([[1, 2, 3, 4], [9, 10, 11, 12]])\n action = np.array([[0, 1, 0, 1], [1, 0, 1, 0]])\n reward = np.array([[0.1], [0.5]])\n next_state = np.array([[5, 6, 7, 8], [13, 14, 15, 16]])\n done = np.array([[False], [True]])\n\n replay_buffer.save2memory(\n state, action, reward, next_state, done, is_vectorised=True\n )\n\n assert len(replay_buffer.memory) == 0\n assert len(replay_buffer.n_step_buffers[0]) == 1\n assert len(replay_buffer.n_step_buffers[1]) == 1\n\n one_step_transition = replay_buffer.save2memoryVectEnvs(\n state, action, reward, next_state, done\n )\n\n assert len(replay_buffer.memory) == num_envs\n assert len(replay_buffer.n_step_buffers[0]) == n_step\n assert len(replay_buffer.n_step_buffers[1]) == n_step\n assert len(one_step_transition) == len(field_names)\n assert one_step_transition[0].shape == (num_envs, 4)\n assert one_step_transition[1].shape == (num_envs, 4)\n assert one_step_transition[2].shape == (num_envs, 1)\n assert one_step_transition[3].shape == (num_envs, 4)\n assert one_step_transition[4].shape == (num_envs, 1)\n\n\n# Can sample experiences from memory\ndef test_sample_nstep_experiences_from_memory():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n n_step = 3\n gamma = 0.99\n\n replay_buffer = MultiStepReplayBuffer(\n action_dim, memory_size, field_names, num_envs, n_step, gamma\n )\n\n state = np.array([1, 2, 3, 4])\n action = np.array([0, 1, 0, 1])\n reward = np.array([0.1])\n next_state = np.array([5, 6, 7, 8])\n done = np.array([False])\n\n print(state, state.shape)\n\n replay_buffer.save2memory(state, action, reward, next_state, done)\n replay_buffer.save2memory(state, action, reward, next_state, done)\n replay_buffer.save2memory(state, action, reward, next_state, done)\n replay_buffer.save2memory(state, action, reward, next_state, done)\n\n batch_size = 2\n experiences = replay_buffer.sample(batch_size)\n\n assert experiences[0].shape == (batch_size, 4)\n assert experiences[1].shape == (batch_size, 4)\n assert experiences[2].shape == (batch_size, 1)\n assert experiences[3].shape == (batch_size, 4)\n assert experiences[4].shape == (batch_size, 1)\n\n\n# Can sample experiences from memory using provided indices\ndef test_sample_experiences_from_memory_with_indices():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n n_step = 3\n gamma = 0.99\n\n replay_buffer = MultiStepReplayBuffer(\n action_dim, memory_size, field_names, num_envs, n_step, gamma\n )\n\n state = np.array([1, 2, 3, 4])\n action = np.array([0, 1, 0, 1])\n reward = np.array([0.1])\n next_state = np.array([5, 6, 7, 8])\n done = np.array([False])\n\n replay_buffer.save2memory(state, action, reward, next_state, done)\n replay_buffer.save2memory(state, action, reward, next_state, done)\n replay_buffer.save2memory(state, action, reward, next_state, done)\n\n indices = [0]\n experiences = replay_buffer.sample_from_indices(indices)\n\n assert len(experiences) == len(field_names)\n assert experiences[0].shape == (len(indices),) + state.shape\n assert experiences[1].shape == (len(indices),) + action.shape\n assert experiences[2].shape == (len(indices),) + reward.shape\n assert experiences[3].shape == (len(indices),) + next_state.shape\n assert experiences[4].shape == (len(indices),) + done.shape\n\n\n# Can return transition with n-step rewards\ndef test_returns_tuple_of_n_step_reward_next_state_and_done():\n n_step_buffer = deque(maxlen=5)\n gamma = 0.9\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"termination\"]\n\n # Create a namedtuple to represent a transition\n Transition = namedtuple(\"Transition\", field_names)\n\n # Add some transitions to the n_step_buffer\n n_step_buffer.append(Transition([0, 0, 0], 0, 1, [0, 0, 0], False))\n n_step_buffer.append(Transition([1, 1, 1], 1, 2, [1, 1, 1], False))\n n_step_buffer.append(Transition([2, 2, 2], 0, 3, [2, 2, 2], True))\n n_step_buffer.append(Transition([3, 3, 3], 1, 4, [3, 3, 3], False))\n n_step_buffer.append(Transition([4, 4, 4], 0, 5, [4, 4, 4], False))\n\n # Create an instance of the MultiStepReplayBuffer class\n replay_buffer = MultiStepReplayBuffer(\n action_dim=1, memory_size=100, field_names=field_names, num_envs=1\n )\n\n # Invoke the _get_n_step_info method\n result = replay_buffer._get_n_step_info(n_step_buffer, gamma)\n\n assert isinstance(result, tuple)\n assert len(result) == len(field_names)\n\n\n# Can calculate n-step reward using n-step buffer and gamma\ndef test_calculates_n_step_reward():\n n_step_buffer = deque(maxlen=5)\n gamma = 0.9\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n\n # Create a namedtuple to represent a transition\n Transition = namedtuple(\"Transition\", field_names)\n\n # Add some transitions to the n_step_buffer\n n_step_buffer.append(Transition([0, 0, 0], 0, 1, [0, 0, 0], False))\n n_step_buffer.append(Transition([1, 1, 1], 1, 2, [1, 1, 1], False))\n n_step_buffer.append(Transition([2, 2, 2], 0, 3, [2, 2, 2], False))\n n_step_buffer.append(Transition([3, 3, 3], 1, 4, [3, 3, 3], False))\n n_step_buffer.append(Transition([4, 4, 4], 0, 5, [4, 4, 4], True))\n\n # Create an instance of the MultiStepReplayBuffer class\n replay_buffer = MultiStepReplayBuffer(\n action_dim=3, memory_size=100, field_names=field_names, num_envs=1\n )\n\n # Invoke the _get_n_step_info method\n result = replay_buffer._get_n_step_info(n_step_buffer, gamma)\n\n expected_reward = 1 + gamma * (2 + gamma * (3 + gamma * (4 + gamma * 5)))\n assert np.array_equal(result[2], np.array([expected_reward]))\n\n\n##### PrioritizedReplayBuffer class tests #####\n# Can initialize object with given parameters\ndef test_initializes_pe_replay_buffer_with_given_parameters():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n alpha = 0.6\n n_step = 1\n gamma = 0.99\n device = \"cpu\"\n\n replay_buffer = PrioritizedReplayBuffer(\n action_dim, memory_size, field_names, num_envs, alpha, n_step, gamma, device\n )\n\n assert replay_buffer.action_dim == action_dim\n assert replay_buffer.memory_size == memory_size\n assert replay_buffer.field_names == field_names\n assert replay_buffer.num_envs == num_envs\n assert replay_buffer.alpha == alpha\n assert replay_buffer.n_step == n_step\n assert replay_buffer.gamma == gamma\n assert replay_buffer.device == device\n\n\n# Can add experience to replay buffer\ndef test_add_experience_to_per_memory():\n action_dim = 4\n memory_size = 1000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n alpha = 0.6\n n_step = 1\n gamma = 0.99\n device = \"cpu\"\n\n buffer = PrioritizedReplayBuffer(\n action_dim, memory_size, field_names, num_envs, alpha, n_step, gamma, device\n )\n buffer._add(1, 2, 3, 4, 5)\n\n assert len(buffer.memory) == 1\n assert buffer.memory[0] == (1, 2, 3, 4, 5)\n\n\n# Save experience to memory and retrieve it\ndef test_save_and_sample_experience():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n alpha = 0.6\n n_step = 1\n gamma = 0.99\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n replay_buffer = PrioritizedReplayBuffer(\n action_dim, memory_size, field_names, num_envs, alpha, n_step, gamma, device\n )\n\n state = np.random.rand(4)\n action = np.random.randint(0, action_dim)\n reward = np.random.rand()\n next_state = np.random.rand(4)\n done = False\n\n replay_buffer.save2memorySingleEnv(state, action, reward, next_state, done)\n\n batch_size = 1\n transition = replay_buffer.sample(batch_size)\n\n assert len(transition) == 7\n assert len(transition[0]) == batch_size\n assert isinstance(transition[0], torch.Tensor)\n assert isinstance(transition[1], torch.Tensor)\n assert isinstance(transition[2], torch.Tensor)\n assert isinstance(transition[3], torch.Tensor)\n assert isinstance(transition[4], torch.Tensor)\n assert isinstance(transition[5], torch.Tensor)\n assert isinstance(transition[6], list)\n\n\n# Update priorities of sampled transitions\ndef test_update_priorities():\n action_dim = 4\n memory_size = 10000\n field_names = [\"state\", \"action\", \"reward\", \"next_state\", \"done\"]\n num_envs = 1\n alpha = 0.6\n n_step = 1\n gamma = 0.99\n device = \"cpu\"\n\n replay_buffer = PrioritizedReplayBuffer(\n action_dim, memory_size, field_names, num_envs, alpha, n_step, gamma, device\n )\n\n state = np.array([1, 2, 3, 4])\n action = np.array([0, 1, 0, 1])\n reward = np.array([0.1])\n next_state = np.array([5, 6, 7, 8])\n done = np.array([False])\n\n replay_buffer.save2memory(state, action, reward, next_state, done)\n\n transition = replay_buffer.sample(1)\n\n idxs = transition[-1]\n priorities = [np.random.rand()]\n\n replay_buffer.update_priorities(idxs, priorities)\n\n updated_transition = replay_buffer.sample_from_indices(idxs)\n\n state = torch.from_numpy(state).float()\n action = torch.from_numpy(action)\n reward = torch.from_numpy(reward).float()\n next_state = torch.from_numpy(next_state).float()\n done = torch.from_numpy(done.astype(np.uint8)).float()\n\n assert torch.equal(updated_transition[0][0], state)\n assert torch.equal(updated_transition[1][0], action)\n assert torch.equal(updated_transition[2][0], reward)\n assert torch.equal(updated_transition[3][0], next_state)\n assert torch.equal(updated_transition[4][0], done)\n\n\n# Proportions are calculated based on sum_tree\ndef test_proportions_calculated_based_on_sum_tree():\n buffer = PrioritizedReplayBuffer(\n action_dim=4,\n memory_size=1000,\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"],\n num_envs=1,\n )\n batch_size = 32\n indices = buffer._sample_proprtional(batch_size)\n p_total = buffer.sum_tree.sum(0, len(buffer) - 1)\n segment = p_total / batch_size\n for i, idx in enumerate(indices):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n assert buffer.sum_tree.retrieve(upperbound) == idx\n\n\n# Calculates the weight of the experience at idx\ndef test_calculate_weight_normal_case():\n buffer = PrioritizedReplayBuffer(\n action_dim=4,\n memory_size=1000,\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"],\n num_envs=1,\n )\n\n state = np.array([1, 2, 3, 4])\n action = np.array([0, 1, 0, 1])\n reward = np.array([0.1])\n next_state = np.array([5, 6, 7, 8])\n done = np.array([False])\n\n buffer.save2memory(state, action, reward, next_state, done)\n\n idx = 0\n beta = 0.4\n\n p_sample = p_min = 1.0\n\n weight = buffer._calculate_weight(idx, beta)\n\n assert weight == pytest.approx(p_sample ** (-0.4) / (p_min ** (-0.4)), abs=1e-6)\n\n\n# Calculates weight from pre-set values\ndef test_calculate_weight_parameterized():\n buffer = PrioritizedReplayBuffer(\n action_dim=4,\n memory_size=1000,\n field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"],\n num_envs=1,\n )\n\n state = np.array([1, 2, 3, 4])\n action = np.array([0, 1, 0, 1])\n reward = np.array([0.1])\n next_state = np.array([5, 6, 7, 8])\n done = np.array([False])\n\n buffer.save2memory(state, action, reward, next_state, done)\n\n buffer.sum_tree = SumSegmentTree(128)\n buffer.sum_tree[0] = 0.5\n buffer.sum_tree[1] = 0.3\n buffer.sum_tree[2] = 0.2\n buffer.min_tree = MinSegmentTree(128)\n buffer.min_tree[0] = 0.2\n buffer.min_tree[1] = 0.1\n buffer.min_tree[2] = 0.05\n buffer.max_priority = 1.0\n beta = 0.4\n weight = (0.5 ** (-0.4)) / (0.05 ** (-0.4))\n\n assert buffer._calculate_weight(0, beta) == pytest.approx(weight, abs=1e-6)\n","repo_name":"AgileRL/AgileRL","sub_path":"tests/test_replay_buffer.py","file_name":"test_replay_buffer.py","file_ext":"py","file_size_in_byte":24434,"program_lang":"python","lang":"en","doc_type":"code","stars":432,"dataset":"github-code","pt":"5"} +{"seq_id":"37916947229","text":"from __future__ import print_function\n\nimport ConfigParser\nimport argparse\n\nfrom typing import Dict, List, Tuple # NOQA, pylint: disable=unused-import\n\nimport pynullweb.handler\nimport pynullweb.server\n\n\ndef parser2dict(config_parser):\n # type: (ConfigParser.ConfigParser) -> Dict[str, Dict[str, str]]\n \"\"\"Transform the data from a config parser into a dict of dicts.\"\"\"\n result = {}\n for section in config_parser.sections():\n items = {}\n for name, value in config_parser.items(section):\n items[name] = value\n result[section] = items\n return result\n\n\ndef get_config(args):\n # type: (argparse.Namespace) -> Dict[str, Dict[str, str]]\n \"\"\"Read config file and set configuration.\"\"\"\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.add_section(\"server\")\n config_parser.set(\"server\", \"verbose\", \"0\")\n config_parser.set(\"server\", \"port\", \"2468\")\n config_parser.set(\"server\", \"redirect\", \"\")\n config_parser.set(\"server\", \"localhosts\", \"\")\n # read config\n config = parser2dict(config_parser)\n if args.verbose:\n config[\"server\"][\"verbose\"] = args.verbose\n else:\n config[\"server\"][\"verbose\"] = str(int(config[\"server\"][\"verbose\"]))\n if args.port:\n config[\"server\"][\"port\"] = args.port\n else:\n config[\"server\"][\"port\"] = str(int(config[\"server\"][\"port\"]))\n if args.redirect:\n config[\"server\"][\"redirect\"] = args.redirect\n if args.localhosts:\n config[\"server\"][\"localhosts\"] = args.localhosts\n return config\n\n\ndef main():\n # type: () -> None\n \"\"\"Start the main program.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-p', '--port', type=int, help=\"port number of web server\")\n parser.add_argument(\n \"-l\", \"--localhosts\", type=str, help=\"list of local hosts\")\n parser.add_argument(\n \"-r\", \"--redirect\", type=str, help=\"redirect local traffic\")\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"count\", default=0,\n help=\"increase verbosity\")\n args = parser.parse_args()\n print(args)\n config = get_config(args)\n\n server_address = (\"\", int(config[\"server\"][\"port\"]))\n httpd = pynullweb.server.NullWebServer(\n server_address, pynullweb.handler.NullWebHandler, config)\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"t73fde/pynullweb","sub_path":"pynullweb/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"10513034910","text":"# coding: utf-8\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import fields\nfrom django.utils.translation import ugettext_lazy as _\nfrom filer.fields.image import FilerImageField\n\n\nclass GenericLink(models.Model):\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n content_object = fields.GenericForeignKey('content_type', 'object_id')\n linkset = models.ForeignKey('LinkSet')\n order = models.IntegerField(_('order'), default=100)\n\n def __unicode__(self):\n return str(self.order)\n\n @property\n def picture(self):\n if hasattr(self.content_object, 'picture'):\n return self.content_object.picture\n return None\n\n def get_absolute_url(self):\n if hasattr(self.content_object, 'get_absolute_url'):\n return self.content_object.get_absolute_url()\n elif hasattr(self.content_object, 'url'):\n return self.content_object.url\n return None\n\n @property\n def title(self):\n if hasattr(self.content_object, 'title'):\n return self.content_object.title\n return None\n\n @property\n def description(self):\n if hasattr(self.content_object, 'description'):\n return self.content_object.description\n if hasattr(self.content_object, 'summary'):\n return self.content_object.summary\n if hasattr(self.content_object, 'introduction'):\n return self.content_object.introduction\n return None\n\n class Meta:\n ordering = ('order',)\n\n\nclass LinkSet(models.Model):\n code = models.CharField(_('code'), max_length=20)\n description = models.CharField(_('description'), max_length=200,\n blank=True, null=True)\n\n def __unicode__(self):\n return self.code\n\n\nclass URLLink(models.Model):\n url = models.URLField(_('URL'))\n title = models.CharField(_('title'), max_length=200)\n description = models.CharField(_('description'), max_length=900,\n blank=True, null=True)\n picture = FilerImageField(verbose_name=_('picture'), null=True, blank=True,\n on_delete=models.SET_NULL)\n css_class = models.CharField(_('CSS class'), max_length=50,\n null=True, blank=True)\n # Audit\n created_on = models.DateTimeField(_('date added'), auto_now_add=True)\n created_by = models.ForeignKey(User, blank=True, null=True,\n editable=False, related_name='created_urllink',\n verbose_name=_('created by'))\n updated_on = models.DateTimeField(_('date modified'), auto_now=True)\n updated_by = models.ForeignKey(User, blank=True, null=True,\n editable=False, related_name='updated_urllink',\n verbose_name=_('update by'))\n\n class Meta:\n verbose_name = _('URL link')\n verbose_name_plural = _('URL links')\n\n def __unicode__(self):\n return \"%s: %s\" % (self.title, self.url)\n\n def save(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n self.updated_by = user\n if not self.id:\n self.created_by = user\n return super(URLLink, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n return self.url\n","repo_name":"socib/django-socib-cms","sub_path":"socib_cms/cmsutils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"3902801067","text":"import random\nimport sys\nimport pygame \nimport os\n\n\nclass Block(pygame.sprite.Sprite):\n\tdef __init__(self,path,init_pos):\n\t\tsuper().__init__()\n\t\tself.image = pygame.image.load(path).convert_alpha()\n\t\tself.rect = self.image.get_rect(center=init_pos)\n\n\nclass Player(Block):\n\tdef __init__(self,path,init_pos,speed=3,timer=None):\n\t\tsuper().__init__(path,init_pos)\n\n\t\tself.speed = speed\n\t\tself.movement = 0\n\n\tdef move(self):\n\t\tself.rect.y += self.speed*self.movement\n\t\n\tdef boundaries(self):\n\t\tif self.rect.top < 0:\n\t\t\tself.rect.top = 0\n\t\tif self.rect.bottom > 350:\n\t\t\tself.rect.bottom = 350\n\n\tdef update(self,ball_group):\t\t\n\t\tself.move()\n\t\tself.boundaries()\n\n\nclass Opponent(Block):\n\tdef __init__(self,path,init_pos, speed=3):\n\t\tsuper().__init__(path,init_pos)\n\t\tself.speed = speed\n\t\n\tdef move(self,ball_group):\n\t\tif ball_group.sprite.rect.center[0] > 100:\n\t\t\tif ball_group.sprite.rect.center[1] > self.rect.center[1]:\n\t\t\t\tself.rect.y += self.speed\n\t\t\tif ball_group.sprite.rect.center[1] < self.rect.center[1]:\n\t\t\t\tself.rect.y -= self.speed\n\n\tdef boundaries(self):\n\t\tif self.rect.top <= 0:\n\t\t\tself.rect.top = 0\n\t\tif self.rect.bottom >= 350:\n\t\t\tself.rect.bottom = 350\n\n\tdef update(self,ball_group):\n\t\tself.move(ball_group)\n\t\tself.boundaries()\n\n\nclass Ball(Block):\n\tdef __init__(self,screen, path,init_pos,blocks_group, speed=3.5):\n\t\tsuper().__init__(path,init_pos)\n\t\tself.screen = screen\n\t\tself.init_pos = init_pos\n\t\tself.blocks_group = blocks_group\n\t\t\n\t\tself.init_speed = speed\n\t\tself.speed_x = speed * random.choice([1,-1])\n\t\tself.speed_y = speed * random.choice([1,-1])\n\n\t\tself.score_time = 0\n\t\tself._reset_time = 3000\n\n\t\tself.num_of_collisions = 0\n\t\tself.increment = 0\n\n\t\tself.pong_sound = pygame.mixer.Sound('../include/sound/pong.ogg')\n\n\tdef reset_ball(self):\n\t\tself.rect = self.image.get_rect(center=(self.init_pos))\n\t\tself.score_time = pygame.time.get_ticks()\n\n\t\tself.speed_x = self.init_speed * random.choice([1,-1])\n\t\tself.speed_y = self.init_speed * random.choice([1,-1])\n\n\t\tself.num_of_collisions = 0\n\t\tself.increment = 0\n\n\tdef move(self):\n\t\tif pygame.time.get_ticks() - self.score_time > self._reset_time:\n\t\t\tself.rect.x += (self.speed_x+self.increment)\n\t\t\tself.rect.y += self.speed_y\n\t\telse: \n\t\t\tself.display_time()\n\t\n\tdef display_time(self):\n\t\ttime_left_text = pygame.font.Font(None,25).render(f'{int((self._reset_time - (pygame.time.get_ticks() - self.score_time))/1000)+1}',True,'white')\n\t\ttime_left_rect = time_left_text.get_rect(center=(300,150))\n\n\t\tself.screen.blit(time_left_text,time_left_rect)\n\n\tdef boundaries(self):\n\t\tif self.rect.top <= 0:\n\t\t\tself.speed_y = -self.speed_y\n\t\tif self.rect.bottom >= 350:\n\t\t\tself.speed_y = -self.speed_y\n\n\tdef collision(self):\n\t\tif (bool(pygame.sprite.spritecollide(self,self.blocks_group,False))):\n\t\t\tcollision_block_rect = pygame.sprite.spritecollide(self,self.blocks_group,False)[0].rect\n\n\t\t\tself.num_of_collisions += 1\n\t\t\tself.increment = self.num_of_collisions//7\n\n\t\t\tself.pong_sound.play()\n\n\t\t\tif abs(self.rect.right - collision_block_rect.left) < 10 and self.speed_x > 0:\n\t\t\t\tself.speed_x = -self.speed_x\n\t\t\tif abs(self.rect.left - collision_block_rect.right) < 10 and self.speed_x < 0:\n\t\t\t\tself.speed_x = -self.speed_x\n\t\t\tif abs(self.rect.top - collision_block_rect.bottom) < 10 and self.speed_y < 0:\n\t\t\t\tself.rect.top = collision_block_rect.bottom\n\t\t\t\tself.speed_y = -self.speed_y\n\t\t\tif abs(self.rect.bottom - collision_block_rect.top) < 10 and self.speed_y > 0:\n\t\t\t\tself.rect.bottom = collision_block_rect.top\n\t\t\t\tself.speed_y = -self.speed_y\t\t\n\t\n\tdef update(self):\n\t\tself.boundaries()\n\t\tself.move()\n\t\tself.collision()\n\n\nclass Game:\n\tdef __init__(self):\n\t\t# Init\n\t\tpygame.init()\n\t\tpygame.display.set_caption(\"Ping Pong\")\n\n\t\t# Screen\n\t\tself._width = 600\n\t\tself._height = 350\n\t\tself.screen = pygame.display.set_mode((self._width, self._height))\n\t\t\n\t\t# Clock\n\t\tself.clock = pygame.time.Clock()\n\n\t\t# Music\n\t\tself.score_sound = pygame.mixer.Sound('../include/sound/score.ogg')\n\n\t\t# Groups \n\t\tself.blocks_group = pygame.sprite.Group()\n\t\tself.ball_group = pygame.sprite.GroupSingle()\n\n\t\t# Objects\n\t\tself.player = Player(\n\t\t\tpath = '../include/icons/paddle.png',\n\t\t\tinit_pos = (10,self._height/2)\n\t\t\t)\n\t\tself.blocks_group.add(self.player)\n\n\t\tself.opponent = Opponent(\n\t\t\tpath = '../include/icons/paddle.png',\n\t\t\tinit_pos = (self._width-10,self._height/2)\n\t\t\t)\n\t\tself.blocks_group.add(self.opponent)\n\n\t\tself.ball = Ball(\n\t\t\tpath = '../include/icons/ball.png',\n\t\t\tinit_pos = (self._width/2,self._height/2),\n\t\t\tblocks_group = self.blocks_group,\n\t\t\tscreen=self.screen\n\t\t\t)\t\t\n\t\tself.ball_group.add(self.ball)\n\n\t\t# Score\n\t\tself.score_player\t= 0\n\t\tself.score_opponent = 0\n\n\tdef run(self):\n\t\texit = False\n\t\tgame_active = False\n\n\t\twhile not exit:\t\n\t\t\tfor event in pygame.event.get():\n\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\texit = True\n\n\t\t\tkeys = pygame.key.get_pressed()\t\t\n\n\t\t\tif keys[pygame.K_SPACE]:\n\t\t\t\tgame_active = True\n\t\t\t\n\t\t\tif keys[pygame.K_ESCAPE]:\n\t\t\t\tgame_active = False\n\t\t\t\n\t\t\tif keys[pygame.K_UP]:\n\t\t\t\tself.player.movement = -1\n\n\t\t\tif keys[pygame.K_DOWN]:\n\t\t\t\tself.player.movement = 1\n\t\t\t\t\t\t\n\t\t\tif keys[pygame.K_UP]==False and keys[pygame.K_DOWN]==False:\n\t\t\t\tself.player.movement = 0\n\t\t\t\n\t\t\tif game_active:\n\t\t\t\tself.display_score()\n\n\t\t\t\tself.check_ball()\n\n\t\t\t\tself.blocks_group.draw(self.screen)\n\t\t\t\tself.ball_group.draw(self.screen)\n\n\t\t\t\tself.blocks_group.update(self.ball_group)\n\t\t\t\tself.ball_group.update()\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tself.ball.reset_ball()\n\t\t\t\tself.display_init()\n\n\t\t\tpygame.display.flip()\n\t\t\tself.clock.tick(60)\n\n\tdef exit(self):\n\t\tpygame.quit()\n\t\tsys.exit()\n\n\tdef display_init(self):\n\t\tself.screen.fill([20,20,20])\n\n\t\tmask = self.screen.convert_alpha()\n\t\tmask.fill([75,75,75,150])\n\t\tmask_rect = mask.get_rect(center=(self._width/2,self._height/2))\n\n\t\tself.screen.blit(mask,mask_rect)\n\n\t\tinit = pygame.font.Font(None,36).render(\"Pulsa ESPACIO para comenzar\", True, 'white')\n\t\tinit_rect = init.get_rect(center=(self._width/2,self._height/2))\n\n\t\tself.screen.blit(init,init_rect)\n\n\tdef display_score(self):\n\t\tself.screen.fill([20,20,20])\n\n\n\t\tpygame.draw.line(\n\t\t\tsurface = self.screen,\n\t\t\tcolor = 'white',\n\t\t\tstart_pos = (self._width/2,0),\n\t\t\tend_pos = (self._width/2,self._height),\n\t\t\twidth = 3\n\t\t\t)\n\n\t\tscore1 = pygame.font.Font(None,16).render(f'{self.score_player}', True, 'white')\n\t\tscore2 = pygame.font.Font(None,16).render(f'{self.score_opponent}', True, 'white')\n\n\t\tscore1_rect = score1.get_rect(center=(self._width/2-10,self._height/2+25))\n\t\tscore2_rect = score2.get_rect(center=(self._width/2+10,self._height/2+25))\n\n\t\tself.screen.blit(score1,score1_rect)\n\t\tself.screen.blit(score2,score2_rect)\n\n\t\tmask = self.screen.convert_alpha()\n\t\tmask_rect = mask.get_rect(center=(self._width/2,self._height/2))\n\t\tmask.fill([75,75,75,150])\n\t\t\n\t\tself.screen.blit(mask,mask_rect)\n\n\tdef check_ball(self):\n\t\tif self.ball.rect.x <= -10:\n\t\t\tself.score_opponent += 1\n\t\t\tself.ball.score_time = pygame.time.get_ticks()\n\t\t\tself.score_sound.play()\n\t\t\tself.ball.reset_ball()\n\t\tif self.ball.rect.x >= self._width + 10:\n\t\t\tself.score_player += 1\n\t\t\tself.ball.score_time = pygame.time.get_ticks()\n\t\t\tself.score_sound.play()\n\t\t\tself.ball.reset_ball()","repo_name":"nachoperezzv/PingPong","sub_path":"src/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":7098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"28382425143","text":"#!/usr/bin/env python\n\nimport logging\nimport select\nimport socket\nimport sys\nfrom observer.observer import observerObject\n\nfrom common.MessageProtocol import MessageProtocol\n\n# reference the subject\nobsObj = observerObject()\n\nclass Client:\n __socket_timeout = 2\n __select_timeout = 0.5\n __read_size = 4096\n \n def __init__(self, input_queue, send_queue):\n self._logger = logging.getLogger('Client')\n \n self._input_queue = input_queue\n self._output_queue = send_queue\n \n # Connect to the server\n def connect_to_server(self, host, port):\n # Create the client socket\n self._logger.debug('Creating client socket.')\n self._client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._client_socket.settimeout(self.__socket_timeout)\n \n # Attempt to connect to the server\n self._logger.debug('Attempting to connect to server.')\n \n try:\n self._client_socket.connect((host, port))\n except:\n self._logger.error('Unable to connect to the server.')\n \n return False\n \n self._logger.debug('Connected to %s on port %s', host, port)\n \n return True\n \n def run(self):\n while True:\n fd_list = [self._client_socket]\n \n # Get the list of sockets that are readable\n ready_to_read, ready_to_write, input_error = select.select(fd_list, [], [], self.__select_timeout)\n \n for sock in ready_to_read:\n # Received message from server\n if sock == self._client_socket:\n # This should ensure all the data from the socket is received\n message_list = []\n \n while 1:\n message, bytes_read = MessageProtocol.recv_msg(sock)\n \n if bytes_read > 0:\n message_list.append(message)\n else:\n break\n\n # Check to see if data is available\n message_list_length = len(message_list)\n \n if message_list_length > 0:\n for message in message_list:\n # Place the server message into the output queue and notify the client that data has been received\n self._output_queue.put(message)\n \n self.notify_client_message()\n # Disconnected from server\n else:\n self._logger.error('Disconnected from the server.')\n sys.exit()\n \n # Check to see if data is available on the input queue\n # Note: Normally the queue would be in the select call, but I don't think \n # Queue is implemented as a file descriptor in Python (or Windows sucks)\n if self._input_queue.qsize() > 0:\n self._logger.debug('Retrieving message from input queue.')\n \n try:\n message = self._input_queue.get_nowait()\n except:\n break\n \n # Send message to the server\n self._logger.debug('Sending message to server.')\n MessageProtocol.send_msg(self._client_socket, message)\n \n # only called if there is a message in the queue\n def notify_client_message(self):\n obsObj.subject.notify_observers()","repo_name":"shahsarick/clueless","sub_path":"client/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38107856623","text":"#!/usr/bin/env python3\n\n\"\"\"\nScript for calling different templates on corresponding addresses\n\"\"\"\n\nfrom flask import Flask, request, render_template, url_for, redirect\nfrom file_reader import *\n\n__author__ = 'Niek Scholten'\n\nAPP = Flask(__name__)\n\n\n@APP.route('/')\ndef index():\n \"\"\"\n The homepage, contains general information\n \"\"\"\n return render_template('index.html', title='Home', active1='active')\n\n\n@APP.route('/upload')\ndef upload():\n \"\"\"\n Used for selecting the file you want to upload\n \"\"\"\n return render_template('upload.html', title='Upload', active2='active')\n\n\n@APP.route('/results', methods=['GET', 'POST'])\ndef results():\n \"\"\"\n Will check for an input, if there is a valid one, load the template\n \"\"\"\n if request.method == 'POST':\n try:\n form_input_file = request.files['file']\n process = FQProcessor(form_input_file) # Initializes the class\n process.read_file()\n process.get_quality()\n return render_template('results.html', title='Results',\n results=process.create_graph(), active3='active')\n except (KeyError, IndexError, UnicodeDecodeError): # Checks for failures in the reading process\n return render_template('upload.html', active2='active',\n message='Please select a valid FASTQ file')\n else:\n return redirect('/upload')\n\n\n@APP.route('/about')\ndef about():\n \"\"\"\n Contains more in depth information about the webapp\n \"\"\"\n return render_template('about.html', title='About', active4='active')\n\n\nif __name__ == '__main__':\n APP.run(debug=True)\n","repo_name":"niek265/Bio-Informatica-Hanze","sub_path":"Thema 03 - DNA harde schijf van de cel/Praktijkopdracht/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20779629816","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nfrom chatterbot import ChatBot\nfrom flask import Flask, render_template, request\nfrom chatterbot.input import InputAdapter\nfrom chatterbot.output import OutputAdapter\nfrom chatterbot.comparisons import levenshtein_distance\napp = Flask(__name__)\n# to enable verbose logging\nimport logging\n# logging.basicConfig(level=logging.INFO) \n# Create a new instance of a ChatBot\nchatbot = ChatBot(\n \"TourismBot\", # These are the adapters that are required to build a chatbot\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n output_adapter='chatterbot.output.OutputAdapter',\n output_format=\"text\", # The output format is \"text\"\n statement_comparison_function=levenshtein_distance, # The sentence comparison is calculated by using Levenshtein distance function\n # These are the logic adapters that are required for normal operation \n logic_adapters=[\n {\n 'import_path': 'chatterbot.logic.BestMatch' \n },\n {\n 'import_path': 'chatterbot.logic.LowConfidenceAdapter',\n 'threshold': 0.5, \n 'default_response': 'വെരെ ഏതെങ്കിലും അറിയാനുണ്ടോ?'\n }\n ],\n \n \n )\n@app.route(\"/\")\ndef home():\n return render_template(\"index3.html\")\n@app.route(\"/get\")\ndef get_bot_response():\n while True: # The following loop will execute each time the user enters input\n try:\n response = request.args.get('msg')\n return str(chatbot.get_response(response)) # Select a response to the input statement\n\n\n except(KeyboardInterrupt, EOFError, SystemExit): # Press ctrl-c or ctrl-d on the keyboard to exit\n print('\\nനന്ദി!\\n')\n break\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"sandhinisukumar/Malayalam_Chatbot","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"17225085463","text":"from typing import Iterable\n\n\"\"\" In a given list the first element should become the last one. An empty list or list with only one element should stay the same.\n\nexample\n\nInput: List.\n\nOutput: Iterable. \"\"\"\n\ndef replace_first(items: list) -> Iterable:\n return [] if len(items) == 0 else items[1:] + [items[0]]\n\n\nif __name__ == \"__main__\":\n print(\"Example:\")\n print(list(replace_first([1, 2, 3, 4])))\n\n assert list(replace_first([1, 2, 3, 4])) == [2, 3, 4, 1]\n assert list(replace_first([1])) == [1]\n assert list(replace_first([])) == []\n print(\"Coding completed!\")","repo_name":"franciscocruz29/CheckiO_Python","sub_path":"Initiation/replace_first.py","file_name":"replace_first.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6712406748","text":"import hashlib\nimport itertools\nimport pandas as pd\nimport random\nimport numpy as np\nimport scipy.optimize as opt\nimport math\nfrom collections import defaultdict\n\ndef shingle_document(string, k):\n # initialize set data structure\n shingle_set = set()\n \n # for each position in string,\n for i in range(len(string)-k):\n # extract substring of length k\n substr = string[i:i + k]\n \n # hash substring into 32-bit integer using crc32\n hashed = hashlib.sha1(substr).hexdigest()\n \n # insert into set\n shingle_set.add(hashed)\n \n # return set\n return(shingle_set)\n\n\ndef jaccard(a, b):\n # compute union size\n union = a | b\n \n # compute intersection size\n intersection = a & b\n \n # return ratio of union and intersection\n return(float(len(intersection))/float(len(union)))\n\n\ndef invert_shingles(shingled_documents):\n # initialize list for tuples\n inverted_shingles = []\n \n # initialize list for document ids\n document_ids = []\n \n # for each document in input\n for doc in shingled_documents:\n # append document id to list\n document_ids.append(doc[0])\n \n # for each item in document\n for item in doc[1]:\n # append (item, docid) tuple\n inverted_shingles.append((item, doc[0]))\n \n # sort tuple list\n inverted_shingles.sort(key=lambda x: x[0])\n \n # return sorted tuple list, and document list\n return inverted_shingles, document_ids\n\n\ndef make_random_hash_fn(p=2**33-355, m=4294967295):\n a = random.randint(1,p-1)\n b = random.randint(0, p-1)\n return lambda x: ((a * x + b) % p) % m\n\n\ndef make_hashes(num_hash):\n function_list = []\n \n for i in range(num_hash):\n function_list.append(make_random_hash_fn())\n \n return function_list\n\n\ndef make_minhash_signature(shingled_data, num_hashes):\n inv_index, docids = invert_shingles(shingled_data)\n num_docs = len(docids)\n \n # initialize the signature matrix with infinity in every entry\n sigmatrix = np.full([num_hashes, num_docs], np.inf)\n \n # generate hash functions\n hash_funcs = make_hashes(num_hashes)\n \n # iterate over each non-zero entry of the characteristic matrix\n for row, docid in inv_index:\n doc_ind = docids.index(docid)\n \n # update signature matrix if needed\n for h in range(num_hashes):\n hsh = hash_funcs[h](row)\n \n if hsh < sigmatrix[h, doc_ind]:\n sigmatrix[h, doc_ind] = hsh\n \n return sigmatrix, docids\n\n\ndef minhash_similarity(id1, id2, minhash_sigmat, docids):\n id1_ind = docids.index(id1)\n id2_ind = docids.index(id2)\n \n id1_column = minhash_sigmat[ : , id1_ind]\n id2_column = minhash_sigmat[ : , id2_ind]\n \n matches = 0\n for i in range(len(id1_column)):\n if id1_column[i] == id2_column[i]:\n matches += 1\n \n return float(matches)/len(id1_column)\n\n\n\ndef minhash_pair_similarity(shingled_documents, num_hashes):\n sigmat, docids = make_minhash_signature(shingled_documents, num_hashes)\n \n tups = []\n for comb in itertools.combinations(docids, 2):\n id1 = comb[0]\n id2 = comb[1]\n \n sim = minhash_similarity(id1, id2, sigmat, docids)\n \n tups.append((id1, id2, sim))\n \n return tups\n\n\n\ndef _choose_nbands(t, n):\n def _error_fun(x):\n cur_t = (1/x[0])**(x[0]/n)\n return (t-cur_t)**2\n \n opt_res = opt.minimize(_error_fun, x0=(10), method='Nelder-Mead')\n b = int(math.ceil(opt_res['x'][0]))\n r = int(n / b)\n final_t = (1/b)**(1/r)\n return b, final_t\n\ndef _make_vector_hash(num_hashes, m=4294967295):\n hash_fns = make_hashes(num_hashes)\n def _f(vec):\n acc = 0\n for i in range(len(vec)):\n h = hash_fns[i]\n acc += h(vec[i])\n return acc % m\n return _f\n\n\ndef do_lsh(minhash_sigmatrix, numhashes, docids, threshold):\n # choose the number of bands, and rows per band to use in LSH\n b, _ = _choose_nbands(threshold, numhashes)\n r = int(numhashes / b)\n \n narticles = len(docids)\n \n # generate a random hash function that takes vectors of lenght r as input\n hash_func = _make_vector_hash(r)\n \n # setup the list of hashtables, will be populated with one hashtable per band\n buckets = []\n \n # fill hash tables for each band\n for band in range(b):\n # figure out which rows of minhash signature matrix to hash for this band\n start_index = int(band * r)\n end_index = min(start_index + r, numhashes)\n \n # initialize hashtable for this band\n cur_buckets = defaultdict(list)\n \n for j in range(narticles):\n # THIS IS WHAT YOU NEED TO IMPLEMENT\n hashed = hash_func(minhash_sigmatrix[start_index:end_index, j])\n \n cur_buckets[hashed].append(docids[j])\n \n # add this hashtable to the list of hashtables\n buckets.append(cur_buckets)\n \n return buckets\n\n\ndef candidate_article_pairs(buckets):\n pairs = []\n for band in buckets:\n for hash in band:\n if len(band[hash]) > 1:\n for comb in itertools.combinations(band[hash], 2):\n if (comb) not in pairs:\n pairs.append((comb))\n return pairs\n\n\nhash_size = 4\n\nabstacts = pd.read_csv('abstracts.csv')\n\ndat = zip(range(abstacts.shape[0]), abstacts['text'])\n\nshingled_documents = []\nfor comb in dat:\n shingled_documents.append((comb[0], shingle_document(comb[1], hash_size)))\n\nminhash_sigmatrix, docids = make_minhash_signature(shingled_documents, hash_size)\n\n\n\nmpldf = pd.DataFrame()\nfor t in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:\n print(t)\n \n buckets = do_lsh(minhash_sigmatrix, hash_size, docids, t)\n\n pairs = candidate_article_pairs(buckets)\n\n truth = open('articles_' + str(n) + '.truth').read().splitlines()\n truth = list(map(lambda x: tuple(x.split(' ')), truth))\n\n tp = len([pairs for p in pairs if p in truth])\n fn = len([truth for t in truth if t not in pairs])\n fp = len([pairs for p in pairs if p not in truth])\n tn = n**2 - (fp + fn + tp)\n \n temp = pd.DataFrame.from_dict({'threshold': [t], 'sensitivity': [tp/(tp + fn)], 'specificity': [tn/(tn + fp)]})\n\n mpldf = mpldf.append(temp)\n","repo_name":"mcooper/mine-food-security","sub_path":"process/MinHash.py","file_name":"MinHash.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31688892656","text":"from __future__ import absolute_import\n\nfrom . import LookupBase\n\n__all__ = ['DictLookup']\n\n\nclass DictLookup(LookupBase):\n \"\"\"Instantiate this class with a Python dictionary-like object and it may\n be used as a slimta lookup interface.\n\n :param backend: The backend dictionary-like object that will be queried for\n data lookups. The values in this mapping **must** also be\n dictionary-like objects.\n :type backend: collections.Mapping\n :param key_template: This template string is used to determine the key\n string to lookup. The :py:meth:`str.format` method is\n called with keyword arguments, given the keyword\n arguments passed in to :meth:`.lookup`.\n :type key_template: str\n\n \"\"\"\n\n def __init__(self, backend, key_template):\n super(DictLookup, self).__init__()\n self.backend = backend\n self.key_template = key_template\n\n def lookup(self, **kwargs):\n key = self._format_key(self.key_template, kwargs)\n try:\n ret = self.backend[key]\n except KeyError:\n ret = None\n self.log(__name__, kwargs, ret)\n return ret\n\n\n# vim:et:fdm=marker:sts=4:sw=4:ts=4\n","repo_name":"slimta/python-slimta","sub_path":"slimta/lookup/drivers/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"54"} +{"seq_id":"11463062548","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport os\n\nvalidExts = [\"jar\", \"pom\", \"asc\", \"md5\", \"sha256\", \"sha512\", \"sha1\"]\n\ndef request(url: str) -> str:\n print(\"Requesting:\" + url)\n with urllib.request.urlopen(url) as url:\n return url.read().decode()\n\ndef download(url: str, file: str):\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, file)\n\ndef clone(notation: str):\n print(\"Cloning: \" + notation)\n baseUrl = \"https://repo1.maven.org/maven2\"\n\n group, artifact, version = notation.split(\":\")\n\n dirURL = \"{}/{}/{}/{}\".format(baseUrl, group.replace(\".\", \"/\"), artifact, version)\n dir = \"{}/{}/{}\".format(group.replace(\".\", \"/\"), artifact, version)\n\n anchors = findAnchors(dirURL + \"/\")\n\n for anchor in anchors:\n fileName = anchor.get('href')\n if isWantedFile(fileName) :\n downloadFile(dirURL, dir, fileName)\n\n\ndef findAnchors(url: str):\n soup = BeautifulSoup(request(url), features=\"html.parser\")\n return soup.findAll('a')\n\ndef isWantedFile(fileName: str) -> bool:\n ext = fileName.split(\".\")[-1]\n return ext in validExts\n\ndef downloadFile(dirUrl: str, dir: str, file: str):\n fileName = \"files/\" + dir + \"/\" + file\n\n if not os.path.isdir(\"files/\" + dir):\n os.makedirs(\"files/\" + dir)\n\n download(dirUrl + \"/\" + file, fileName)\n\ndef main():\n clone(\"org.ow2.asm:asm:9.3\")\n clone(\"org.ow2.asm:asm-analysis:9.3\")\n clone(\"org.ow2.asm:asm-commons:9.3\")\n clone(\"org.ow2.asm:asm-tree:9.3\")\n clone(\"org.ow2.asm:asm-util:9.3\")\n clone(\"org.ow2.asm:asm-bom:9.3\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"modmuss50/maven-clone","sub_path":"clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70137300961","text":"import os\nimport requests\nimport shutil\nfrom datetime import datetime\n\n\nclass Download():\n def download_file_from_url(self, url, output_path, filename, overwrite=True, ignore_errors=False, timeout=30, auth=None):\n \"\"\"\n Esta funcao faz o download de um arquivo\n :param url: url completa de qual arquivo deve ser baixado\n :param dir: path completo onde o arquivo devera se salvo\n :param filename: nome do arquivo apos baixado\n :param auth: Tupla (username, password)\n :return: file_path: file path completo do arquivo salvo\n \"\"\"\n\n file_path = os.path.join(output_path, filename)\n\n start = datetime.now()\n\n # Se a sobreescreita estiver ativa tenta apagar o arquivo\n if overwrite:\n if os.path.isfile(file_path):\n try:\n os.remove(file_path)\n except OSError as e:\n print(\"Failed to remove an existing file\")\n print(e)\n if ignore_errors:\n return None, None\n else:\n raise e\n\n if not os.path.exists(file_path):\n\n try:\n requests.packages.urllib3.disable_warnings()\n # Resolve problema de SSL precisa da lib pyopenssl.\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':RC4-SHA'\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL'\n try:\n requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += ':RC4-SHA'\n requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += ':HIGH:!DH:!aNULL'\n except AttributeError:\n # no pyopenssl support used / needed / available\n pass\n\n r = requests.get(url, stream=True, verify=False,\n timeout=timeout, auth=auth)\n if r.status_code == 200:\n with open(file_path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n except Exception as e:\n if ignore_errors:\n return None, None\n else:\n raise e\n\n try:\n\n finish = datetime.now()\n\n size = os.path.getsize(file_path)\n\n tdelta = finish - start\n seconds = tdelta.total_seconds()\n\n # print(\"Downloading Done! File: %s Size: %s bytes Time %s seconds\" % (filename, size, seconds))\n\n download_stats = dict({\n \"start_time\": start.isoformat(),\n \"finish_time\": finish.isoformat(),\n \"download_time\": seconds,\n \"file_size\": size,\n \"filename\": filename,\n \"file_path\": file_path,\n \"path\": output_path\n })\n\n return file_path, download_stats\n except OSError as e:\n msg = \"File %s was not downloaded\" % file_path\n # print(msg)\n if ignore_errors:\n return None, None\n else:\n raise Exception(msg)\n\n else:\n msg = \"File %s already exists\" % file_path\n # print(msg)\n if ignore_errors:\n return None, None\n else:\n raise Exception(msg)\n","repo_name":"linea-it/tno_pipelines","sub_path":"external_inputs/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31847263845","text":"def create_sorted_word(mp):\n ls = []\n for i, v in enumerate(mp):\n for j in range(v):\n ls.append(chr(i+97))\n return \"\".join(ls)\n\n\ndef Anagrams(self, words, n):\n '''\n words: list of word\n n: no of words\n return : list of group of anagram {list will be sorted in driver code (not word in grp)}\n '''\n # Instead of sorting each element\n # just max 26 char if same elements are present then it is anagram O(nk)\n anagrams = dict()\n\n for word in words:\n mp = [0]*26\n for w in word:\n mp[ord(w) - ord('a')] += 1\n # make sorted word\n sorted_word = create_sorted_word(mp)\n if not anagrams.get(sorted_word, None):\n anagrams[sorted_word] = [word]\n else:\n anagrams[sorted_word].append(word)\n\n return anagrams.values()\n","repo_name":"okmd/leetcode","sub_path":"string/lb-13-anagram-together.py","file_name":"lb-13-anagram-together.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39928970664","text":"# Name : Myung Joon Kim (김명준)\n# School : Pennsylvania State University\n\ndata = []\n\nfor i in range(40):\n data.append(input())\n#print(data)\n# the number of stars\ns = 0\nfor i in data:\n if i == \"\":\n print()\n s = 0\n continue\n\n m = i.count(\"*\")\n print(\".\" * (len(i) - m - s), end=\"\")\n print(\"*\" * m, end=\"\")\n print(\".\" * s)\n s += m\n","repo_name":"RayMJK/Python_DataStructures_Algorithms","sub_path":"Programmers/Problem_B.py","file_name":"Problem_B.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25345880794","text":"import os\r\nimport datetime\r\nimport playsound\r\nimport speech_recognition as sr\r\nfrom gtts import gTTS\r\nimport subprocess\r\nimport webbrowser\r\n\r\nclass Voice_Assistant:\r\n \r\n def __init__(self):\r\n self.Assistant=' Maaya '\r\n self.User='Inspire'\r\n self.User_info='Computer Engineering Student'\r\n self.txt=1\r\n\r\n\r\n def Speak(self,text):\r\n tts=gTTS(text=text,lang=\"en\")\r\n file=str(self.txt)+\".mp3\"\r\n tts.save(file)\r\n playsound.playsound(file,True)\r\n self.txt+=1\r\n os.remove(file)\r\n\r\n\r\n def Listen_to_source(self):\r\n rec_obj=sr.Recognizer()\r\n print('listening')\r\n with sr.Microphone() as source:\r\n audio=rec_obj.listen(source)\r\n resp_text=\"\"\r\n try:\r\n resp_text=rec_obj.recognize_google(audio).lower()\r\n return resp_text\r\n except:\r\n self.Speak(\"Sorry,I didn't get you.Try again\") \r\n \r\n\r\n def Initialize_VA(self):\r\n curr_time=int(datetime.datetime.now().hour)\r\n if curr_time>=0 and curr_time<12:\r\n self.Speak('Good morning '+self.User)\r\n elif curr_time>=12 and curr_time<17:\r\n self.Speak('Good afternoon '+self.User)\r\n else:\r\n self.Speak('Good evening '+self.User)\r\n self.Speak('How can I help you')\r\n \r\n\r\n def Notepad(self):\r\n self.Speak('Ok,tell me how should I save it')\r\n filename=self.Listen_to_source()\r\n self.Speak('What should I write down')\r\n content=self.Listen_to_source()\r\n filename=str(filename)+\".txt\"\r\n with open(filename,\"w\") as f:\r\n f.write(content)\r\n subprocess.Popen([\"notepad.exe\",filename])\r\n self.Speak(\"Do you want me to save it?\")\r\n text=self.Listen_to_source()\r\n if 'no' in text:\r\n os.remove(filename)\r\n else:\r\n self.speak(\"Saved it for future reference\")\r\n\r\n\r\n def Open_browser(self):\r\n url=\"http://www.google.com\"\r\n chrome_path='C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'\r\n webbrowser.open_new_tab(url)\r\n self.Speak(\"here you go\")\r\n\r\n \r\n def Change_user_details(self):\r\n self.Speak('How should I call you')\r\n self.User=self.Listen_to_source()\r\n self.Speak=('What are you')\r\n self.User_info=self.Listen_to_source()\r\n self.Speak('User information changed successfully')\r\n \r\n def Details(self):\r\n self.Speak(\"I am \"+self.Assistant+\" voice assistant of \"+self.User)\r\n self.Speak('User '+self.User+' she is '+self.User_info +'she made me')\r\n\r\n\r\nVA_obj=Voice_Assistant()\r\nVA_obj.Initialize_VA()\r\ntext=VA_obj.Listen_to_source()\r\nwhile(True):\r\n if 'stop' in text:\r\n break\r\n elif 'who are you' in text or 'details' in text:\r\n VA_obj.Details()\r\n elif 'notepad' in text or 'make a note' in text:\r\n VA_obj.Notepad()\r\n elif 'browser' in text or 'google' in text:\r\n VA_obj.Open_browser()\r\n elif 'change user information' in text:\r\n VA_obj.Change_user_details()\r\n text=VA_obj.Listen_to_source()","repo_name":"SwethaSarah/Python-Projects","sub_path":"Desktop_Assistant/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35201271392","text":"#File Path\nTRAIN_FILE_PATH = '/content/MyDrive/MyDrive/Data Science/Projects/SpoonShot/train.csv'\nTEST_FILE_PATH = '/content/MyDrive/MyDrive/Data Science/Projects/SpoonShot/test.csv'\n\n#Load Data\ndata = pd.read_csv(TRAIN_FILE_PATH)\ntestdata = pd.read_csv(TEST_FILE_PATH)\n\n#Set Column Names \ndata.columns = ['ClassIndex', 'Title', 'Description']\ntestdata.columns = ['ClassIndex', 'Title', 'Description']\n\n#Combine Title and Description\nX_train = data['Title'] + \" \" + data['Description'] # Combine title and description (better accuracy than using them as separate features)\ny_train = data['ClassIndex'].apply(lambda x: x-1).values # Class labels need to begin from 0\n\nx_test = testdata['Title'] + \" \" + testdata['Description'] # Combine title and description (better accuracy than using them as separate features)\ny_test = testdata['ClassIndex'].apply(lambda x: x-1).values # Class labels need to begin from 0\n\n#Max Length of sentences in Train Dataset\nmaxlen = X_train.map(lambda x: len(x.split())).max()\ndata.head()\n","repo_name":"ishandutta0098/AG_NewsClassification","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"13510160550","text":"import logging\nfrom typing import List\nimport numpy as np\nimport open3d as o3d\nfrom matplotlib import pyplot as plt\n\nimport a0\n\nimport graspnetAPI\nfrom . import wait_until_a0_server_ready, start_a0_server_heartbeat\nfrom polygrasp import serdes\nfrom polygrasp.serdes import polygrasp_msgs\n\nimport signal\n\n\nlog = logging.getLogger(__name__)\ntopic_key = \"grasp_server\"\ngrasp_topic_key = f\"{topic_key}/grasp\"\ncollision_topic_key = f\"{topic_key}/collision\"\n\n\ndef save_img(img, name):\n f = plt.figure()\n plt.imshow(img)\n f.savefig(f\"{name}.png\")\n plt.close(f)\n\n\nclass GraspServer:\n def _get_grasps(self, pcd: o3d.geometry.PointCloud) -> np.ndarray:\n raise NotImplementedError\n\n def _get_collisions(\n self, grasps: graspnetAPI.GraspGroup, scene_pcd: o3d.geometry.PointCloud\n ) -> graspnetAPI.GraspGroup:\n raise NotImplementedError\n\n def start(self):\n log.info(f\"Starting grasp server...\")\n\n def grasp_onrequest(req):\n log.info(f\"Got request; computing grasp group...\")\n\n payload = req.pkt.payload\n pcd = serdes.capnp_to_pcd(payload)\n grasp_group = self._get_grasps(pcd)\n\n log.info(f\"Done. Replying with serialized grasp group...\")\n req.reply(serdes.grasp_group_to_capnp(grasp_group).to_bytes())\n\n def collision_onrequest(req):\n \"\"\"\n Calls the collision detector from graspnet-baseline's server.\n \"\"\"\n log.info(f\"Got request; computing collisions...\")\n\n payload = req.pkt.payload\n with polygrasp_msgs.CollisionRequest.from_bytes(payload) as msg:\n grasp_group = serdes.bytes_to_grasp_group(msg.grasps)\n scene_pcd = serdes.capnp_to_pcd(msg.pcd)\n\n filtered_grasp_group = self._get_collisions(grasp_group, scene_pcd)\n log.info(f\"Done. Replying with serialized filtered grasps...\")\n req.reply(serdes.grasp_group_to_bytes(filtered_grasp_group))\n\n self.grasp_server = a0.RpcServer(grasp_topic_key, grasp_onrequest, None)\n start_a0_server_heartbeat(grasp_topic_key)\n self.collision_server = a0.RpcServer(\n collision_topic_key, collision_onrequest, None\n )\n start_a0_server_heartbeat(collision_topic_key)\n\n signal.pause()\n\n\nclass GraspClient:\n def __init__(self, view_json_path):\n wait_until_a0_server_ready(grasp_topic_key)\n wait_until_a0_server_ready(collision_topic_key)\n\n self.grasp_client = a0.RpcClient(grasp_topic_key)\n self.collision_client = a0.RpcClient(collision_topic_key)\n self.view_json_path = view_json_path\n\n def downsample_pcd(\n self, pcd: o3d.geometry.PointCloud, max_num_bits=8 * 1024 * 1024\n ):\n # a0 default max msg size 16MB; make sure every msg < 1/2 of max\n i = 1\n while True:\n downsampled_pcd = pcd.uniform_down_sample(i)\n bits = serdes.pcd_to_capnp(downsampled_pcd).to_bytes()\n if len(bits) > max_num_bits:\n log.warning(f\"Downsampling pointcloud...\")\n i += 1\n else:\n break\n if i > 1:\n log.warning(f\"Downsampled to every {i}th point.\")\n\n return bits\n\n def get_grasps(self, pcd: o3d.geometry.PointCloud) -> graspnetAPI.GraspGroup:\n bits = self.downsample_pcd(pcd)\n result_bits = self.grasp_client.send_blocking(bits).payload\n return serdes.capnp_to_grasp_group(result_bits)\n\n def get_collision(\n self, grasps: graspnetAPI.GraspGroup, scene_pcd: o3d.geometry.PointCloud\n ):\n request = polygrasp_msgs.CollisionRequest()\n request.pcd = self.downsample_pcd(scene_pcd)\n request.grasps = serdes.grasp_group_to_bytes(grasps)\n\n bits = request.to_bytes()\n result_bits = self.collision_client.send_blocking(bits).payload\n\n return serdes.bytes_to_grasp_group(result_bits)\n\n def visualize(self, scene_pcd, render=False, save_view=False):\n \"\"\"Render a scene's pointcloud and return the Open3d Visualizer.\"\"\"\n vis = o3d.visualization.Visualizer()\n vis.create_window()\n vis.add_geometry(scene_pcd, reset_bounding_box=True)\n\n if render:\n \"\"\"Render the window. You can rotate it & save the view.\"\"\"\n # Actually render the window:\n log.info(f\"Rendering scene in Open3D\")\n vis.run()\n param = vis.get_view_control().convert_to_pinhole_camera_parameters()\n if save_view:\n log.info(f\"Saving new view to {self.view_json_path}\")\n # Save the view\n o3d.io.write_pinhole_camera_parameters(self.view_json_path, param)\n\n param = o3d.io.read_pinhole_camera_parameters(self.view_json_path)\n vis.get_view_control().convert_from_pinhole_camera_parameters(param)\n\n return vis\n\n def visualize_grasp(\n self,\n scene_pcd: o3d.geometry.PointCloud,\n grasp_group: graspnetAPI.GraspGroup,\n n=5,\n render=False,\n save_view=False,\n name=\"scene\",\n ) -> None:\n \"\"\"Visualize grasps upon a scene's pointcloud.\"\"\"\n grasp_o3d_geometries = grasp_group.to_open3d_geometry_list()\n grasp_pointclouds = [\n grasp_o3d_geometry.sample_points_uniformly(number_of_points=5000)\n for grasp_o3d_geometry in grasp_o3d_geometries\n ]\n vis = self.visualize(scene_pcd=scene_pcd, render=render, save_view=save_view)\n\n # Save scene\n grasp_image = np.array(vis.capture_screen_float_buffer(do_render=True))\n save_img(grasp_image, name)\n\n n = min(n, len(grasp_o3d_geometries))\n log.info(f\"Visualizing top {n} grasps in Open3D...\")\n\n # Save scene with each top grasp individually\n for i, grasp_pointcloud in enumerate(grasp_pointclouds[:n]):\n vis.add_geometry(grasp_pointcloud, reset_bounding_box=False)\n grasp_image = np.array(vis.capture_screen_float_buffer(do_render=True))\n save_img(grasp_image, f\"{name}_with_grasp_{i + 1}\")\n vis.remove_geometry(grasp_pointcloud, reset_bounding_box=False)\n\n # Save scene with all grasps\n for grasp_pointcloud in grasp_pointclouds[:n]:\n vis.add_geometry(grasp_pointcloud, reset_bounding_box=False)\n grasp_image = np.array(vis.capture_screen_float_buffer(do_render=True))\n save_img(grasp_image, f\"{name}_with_grasps\")\n\n return vis\n\n def get_obj_grasps(\n self,\n obj_pcds: List[o3d.geometry.PointCloud],\n scene_pcd: o3d.geometry.PointCloud,\n ):\n \"\"\"\n Get grasps for each object pointcloud, then filter by\n checking collisions against the scene pointcloud.\n \"\"\"\n for obj_i, obj_pcd in enumerate(obj_pcds):\n print(f\"Getting obj {obj_i} grasp...\")\n grasp_group = self.get_grasps(obj_pcd)\n filtered_grasp_group = self.get_collision(grasp_group, scene_pcd)\n if len(filtered_grasp_group) < len(grasp_group):\n print(\n \"Filtered\"\n f\" {len(grasp_group) - len(filtered_grasp_group)}/{len(grasp_group)} grasps\"\n \" due to collision.\"\n )\n if len(filtered_grasp_group) > 0:\n return obj_i, filtered_grasp_group\n raise Exception(\n \"Unable to find any grasps after filtering, for any of the\"\n f\" {len(obj_pcds)} objects\"\n )\n","repo_name":"facebookresearch/fairo","sub_path":"perception/sandbox/polygrasp/src/polygrasp/grasp_rpc.py","file_name":"grasp_rpc.py","file_ext":"py","file_size_in_byte":7529,"program_lang":"python","lang":"en","doc_type":"code","stars":826,"dataset":"github-code","pt":"54"} +{"seq_id":"73039374561","text":"\"\"\"\n1차 시도. 0(바��) 를 기준으로 주변에 0이 아닌 타일이 있으면 하나씩 빼주는 방식으로 접근 했다 -> 빙산으로 둘러쌓인 타일 0에 의한 계산이 불가능했다. \n2차 시도. 0이 아닌 타일을 기준으로, 동서남북으로 0 타일이 있으면 그 갯수 만큼 \n타일을 빼준다. -> 답은 맞게 나오는데 recursionError가 뜬다. \n3차 시도 -> DFS는 recursionError가 뜨므로, bfs로 푼다.\n\n\"\"\"\n\nimport sys\nsys.setrecursionlimit(10**6)\nn, m = map(int, input().split())\ngraph = []\ndy = [-1, 0, 0, 1]\ndx = [0, -1, 1, 0]\nvisited = [[False] * m for _ in range(n)]\nfor _ in range(n):\n graph.append(list(map(int, input().split())))\n\ndef dfs(y, x):\n # 0이 아닌 타일들을 탐색하고, 접해있는 0의 갯수만큼 빼주는 함수\n global visited\n visited[y][x] = True\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if 0<=ny 1:\n graph[y][x] -=1\n else:\n graph[y][x] = -1\n elif graph[ny][nx] > 0 and not visited[ny][nx]:\n dfs(ny, nx)\ndef cover():\n # -1로 된 타일을 0으로 바꿔주는 함수\n for i in range(n):\n for j in range(m):\n if graph[i][j] == -1:\n graph[i][j] = 0\ndef divided(y, x):\n # 그래프가 두 집합 이상으로 분리되었는지 검사하는 함수\n global checked\n checked[y][x] = True\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if 0<=ny 0:\n if temp >= 1:\n temp += 1\n break\n divided(i, j)\n temp += 1\n #print(graph, cnt, temp)\n if temp > 1:\n print(cnt)\n exit()\n elif temp == 0:\n print(0)\n exit()\n \n visited = [[False] * m for _ in range(n)]\n for i in range(1, n):\n for j in range(1, m):\n if graph[i][j] != 0 and not visited[i][j]:\n dfs(i, j)\n cover()\n cnt += 1\n \n \n \n \n ","repo_name":"jeean0668/algorithm","sub_path":"graph/DFS/gold/2573_r.py","file_name":"2573_r.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17831298610","text":"import os\nfd=\"./Data/Heroes.txt\"\nfile=open(fd,'r')\ntext=file.read()\nfile.close()\n# print(text)\nl=list(text.split(\"\\n\"))\nl2=l\nfor i in range(len(l)):\n\tl2[i]=l[i].strip(\"\\r\")\n# print(l)\ni=0\nos.mkdir(\"./Data/Survivors_of_Snap\")\nnewdir=\"./Data/Survivors_of_Snap/\"\npath=\"./Data/Avengers_Universe/\"\nfor filename in os.listdir(path):\n\t# i+=1\n\tfile=open(path+filename,'r')\n\ttext=file.read()\n\tl2=list(text.split(\"\\n\"))\n\tfor j in l2:\n\t\tif j.strip(\"\\r\") in l:\n\t\t\t# print(filename)\n\t\t\tos.rename(path+filename,newdir+filename)\n\t\t\tbreak\n\tfile.close()\n\n# print(i)","repo_name":"niyati2k/SSL","sub_path":"180050078_InLab3/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"144532407","text":"import json\nimport re\nimport os\nimport html\nimport requests\nfrom RajniiRobot import AI_API_KEY\nimport RajniiRobot.modules.sql.kuki_sql as sql\n\nfrom time import sleep\nfrom telegram import ParseMode\nfrom telegram import (CallbackQuery, Chat, MessageEntity, InlineKeyboardButton,\n InlineKeyboardMarkup, Message, ParseMode, Update, Bot,\n User)\nfrom telegram.ext import (CallbackContext, CallbackQueryHandler,\n CommandHandler, DispatcherHandlerStop, Filters,\n MessageHandler, run_async)\nfrom telegram.error import BadRequest, RetryAfter, Unauthorized\nfrom telegram.utils.helpers import mention_html, mention_markdown, escape_markdown\n\nfrom RajniiRobot.modules.helper_funcs.filters import CustomFilters\nfrom RajniiRobot.modules.helper_funcs.chat_status import user_admin, user_admin_no_reply\nfrom RajniiRobot import dispatcher, updater, SUPPORT_CHAT\nfrom RajniiRobot.modules.log_channel import gloggable\n\n\n@user_admin_no_reply\n@gloggable\n@run_async\ndef rm_ai(update: Update, context: CallbackContext) -> str:\n query: Optional[CallbackQuery] = update.callback_query\n user: Optional[User] = update.effective_user\n match = re.match(r\"rm_chat\\((.+?)\\)\", query.data)\n if match:\n user_id = match.group(1)\n chat: Optional[Chat] = update.effective_chat\n is_kuki = sql.rem_kuki(chat.id)\n if is_kuki:\n is_kuki = sql.rem_kuki(user_id)\n return (\n f\"{html.escape(chat.title)}:\\n\"\n f\"AI_DEACTIVATED\\n\"\n f\"Admin: {mention_html(user.id, html.escape(user.first_name))}\\n\"\n )\n else:\n update.effective_message.edit_text(\n \"Rajni AI Chat module Deactivated by {}.\".format(\n mention_html(user.id, user.first_name)),\n parse_mode=ParseMode.HTML,\n )\n\n return \"\"\n\n\n@user_admin_no_reply\n@gloggable\n@run_async\ndef add_ai(update: Update, context: CallbackContext) -> str:\n query: Optional[CallbackQuery] = update.callback_query\n user: Optional[User] = update.effective_user\n match = re.match(r\"add_chat\\((.+?)\\)\", query.data)\n if match:\n user_id = match.group(1)\n chat: Optional[Chat] = update.effective_chat\n is_kuki = sql.set_kuki(chat.id)\n if is_kuki:\n is_kuki = sql.set_kuki(user_id)\n return (\n f\"{html.escape(chat.title)}\\n\"\n f\"AI_ACTIVATED\\n\"\n f\"Admin: {mention_html(user.id, html.escape(user.first_name))}\\n\"\n )\n else:\n update.effective_message.edit_text(\n \"Rajni AI Chat module Activated by {}.\".format(\n mention_html(user.id, user.first_name)),\n parse_mode=ParseMode.HTML,\n )\n\n return \"\"\n\n\n@user_admin\n@gloggable\n@run_async\ndef aichat_toggle(update: Update, context: CallbackContext):\n user = update.effective_user\n message = update.effective_message\n msg = f\"Choose an option below to toggle ai chatbot.\"\n keyboard = InlineKeyboardMarkup([[\n InlineKeyboardButton(text=\"◇ Activate\", callback_data=\"add_chat({})\")\n ], [\n InlineKeyboardButton(text=\"◇ Deactivate\", callback_data=\"rm_chat({})\")\n ]])\n message.reply_text(\n msg,\n reply_markup=keyboard,\n parse_mode=ParseMode.HTML,\n )\n\n\ndef get_message(context: CallbackContext, message):\n reply_message = message.reply_to_message\n if message.text.lower() == \"rajni\":\n return True\n if reply_message:\n if reply_message.from_user.id == context.bot.get_me().id:\n return True\n else:\n return False\n\n\n@run_async\ndef ai_reply(update: Update, context: CallbackContext):\n message = update.effective_message\n chat_id = update.effective_chat.id\n bot = context.bot\n is_kuki = sql.is_kuki(chat_id)\n if not is_kuki:\n return\n\n if message.text and not message.document:\n if not get_message(context, message):\n return\n Message = message.text\n bot.send_chat_action(chat_id, action=\"typing\")\n kukiurl = requests.get(f'{AI_API_KEY}' + Message)\n Kuki = json.loads(kukiurl.text)\n kuki = Kuki['reply']\n sleep(0.3)\n message.reply_text(kuki, timeout=60)\n\n\n@run_async\ndef ai_chats(update: Update, context: CallbackContext):\n chats = sql.get_all_kuki_chats()\n text = \"AI Chatbot-Enabled in these Chats\\n\"\n for chat in chats:\n try:\n x = context.bot.get_chat(int(*chat))\n name = x.title or x.first_name\n text += f\"◇ {name}\\n\"\n except (BadRequest, Unauthorized):\n sql.rem_kuki(*chat)\n except RetryAfter as e:\n sleep(e.retry_after)\n update.effective_message.reply_text(text, parse_mode=\"HTML\")\n\n\n__help__ = f\"\"\"\nAI Chat Module utilizes the Kuki's api which allows {dispatcher.bot.first_name} to talk and provide a more interactive group chat experience.\n*Admins only Commands*:\n ◇ `/aichat`*:* Will ask you to on/off AI Chatbot.\n*Powered by Itel Ai*\n\"\"\"\n\n__mod_name__ = \"AIChat\"\n\nCHATBOTK_HANDLER = CommandHandler(\"aichat\", aichat_toggle)\nADD_CHAT_HANDLER = CallbackQueryHandler(add_ai, pattern=r\"add_chat\")\nRM_CHAT_HANDLER = CallbackQueryHandler(rm_ai, pattern=r\"rm_chat\")\nCHATBOT_HANDLER = MessageHandler(\n Filters.text & (~Filters.regex(r\"^#[^\\s]+\") & ~Filters.regex(r\"^!\")\n & ~Filters.regex(r\"^\\/\")), ai_reply)\nLIST_ALL_CHATS_HANDLER = CommandHandler(\"aichats\",\n ai_chats,\n filters=CustomFilters.dev_filter)\n\ndispatcher.add_handler(ADD_CHAT_HANDLER)\ndispatcher.add_handler(CHATBOTK_HANDLER)\ndispatcher.add_handler(RM_CHAT_HANDLER)\ndispatcher.add_handler(LIST_ALL_CHATS_HANDLER)\ndispatcher.add_handler(CHATBOT_HANDLER)\n\n__handlers__ = [\n ADD_CHAT_HANDLER,\n CHATBOTK_HANDLER,\n RM_CHAT_HANDLER,\n LIST_ALL_CHATS_HANDLER,\n CHATBOT_HANDLER,\n]\n","repo_name":"TeamUltraUnion/rajniixrobot","sub_path":"RajniiRobot/modules/aichat.py","file_name":"aichat.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"18900917929","text":"import pandas as pd\nimport os\nimport connectionDatabase as Conn\nfrom createTable import criarTabela\nfrom decimal import Decimal\n\n# docker run --name=Mysql56 -e MYSQL_ROOT_PASSWORD=root -p 3308:3306 -d mysql:5.6\n\n# Conexão com o banco de dados\ndatabase = Conn.connectionDB()\n\n# Variaveis que definem a localização do arquivo\nuser = os.getlogin()\n\npath = f'C:/Users/{user}/Downloads/'\nname_file = 'BTLG11'\ntype_file = 'csv'\n\n# Variável que define o arquivo que será lido\nfile = f'{path}{name_file}.{type_file}'\n\n# Variável que recebe a saida de uma função Pandas que lê o CSV\ndf = pd.read_csv(file)\n\ncursor = database.cursor()\n\n# Função do arquivo createTable.py que cria uma tabela a ser populada\ncriarTabela(name_file)\n\n# Função que limpa a tabela caso a tabela já exista e tenha conteudo\n# Essa limpeza é mais utilizada quando ocorre atualizações nos registros\ncursor.execute('TRUNCATE FUNDOS.' + name_file + ';')\ndatabase.commit()\n\n\n# Função for que popula a tabela de forma sequencial, utilizando somente da Data, do Valor de fechamento e de abertura\nfor num in range(df['Date'].size):\n dateFii = df['Date'][num]\n openFii = Decimal(df['Open'][num])\n closeFii = Decimal(df['Close'][num])\n cursor.execute('INSERT into FUNDOS.'+name_file+' (dia, abertura, fechamento) VALUES(%s,%s,%s)', (dateFii,\n openFii,\n closeFii))\n database.commit()\n\n","repo_name":"dgfernandes/TCC","sub_path":"populateTable.py","file_name":"populateTable.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33193842747","text":"\nimport os\nfrom pathlib import Path\nfrom typing import Dict, Tuple\n\nfrom sklearn.metrics import roc_curve, precision_recall_curve, auc\nimport numpy as np\nimport pandas as pd\nfrom repath.data.annotations import AnnotationSet\nfrom repath.data.annotations.asapxml import load_annotations\nfrom repath.data.datasets import Dataset\nfrom repath.data.slides.openslide import Slide\nfrom repath.data.slides import SlideBase\nfrom repath.utils.paths import project_root\nfrom repath.utils.metrics import conf_mat_raw, plotROC, plotROCCI, pre_re_curve\n\n\n\nclass Camelyon16(Dataset):\n def __init__(self, root: Path, paths: pd.DataFrame) -> None:\n super().__init__(root, paths)\n\n def load_annotations(self, file: Path, label: str) -> AnnotationSet:\n \"\"\" Load annotations form a given annotation file path.\n\n Args:\n file (Path): Path to the annotation file. \n \n Returns:\n AnnotationSet which includes: annotations, labels, labels_order and fill_label\n \"\"\"\n group_labels = {\"Tumor\": \"tumor\", \"_0\": \"tumor\", \"_1\": 'tumor', \"_2\": 'normal', 'Exclusion': 'normal', 'None': 'normal'}\n annotations = load_annotations(file, group_labels) if file else []\n labels_order = [\"background\", \"tumor\", \"normal\"]\n return AnnotationSet(annotations, self.labels, labels_order, \"normal\")\n\n @property\n def slide_cls(self) -> SlideBase:\n return Slide\n\n @property\n def labels(self) -> Dict[str, int]:\n return {\"background\": 0, \"normal\": 1, \"tumor\": 2}\n\n @property\n def slide_labels(self) -> Dict[str, int]:\n return {\"normal\": 0, \"tumor\": 1}\n\n\ndef training():\n \"\"\" Generated a data-frame of slide_path, annotation_path, label and tags for train dataset.\n\n Returns:\n DataFrame (pd.DataFrame): Train data frame\n \"\"\"\n # set up the paths to the slides and annotations\n root = project_root() / \"data\" / \"camelyon16\" / \"raw\" / \"training\"\n annotations_dir = root / \"lesion_annotations\"\n tumor_slide_dir = root / \"tumor\"\n normal_slide_dir = root / \"normal\"\n\n # all paths are relative to the dataset 'root'\n annotation_paths = sorted([p.relative_to(root) for p in annotations_dir.glob(\"*.xml\")])\n tumor_slide_paths = sorted([p.relative_to(root) for p in tumor_slide_dir.glob(\"*.tif\")])\n normal_slide_paths = sorted([p.relative_to(root) for p in normal_slide_dir.glob(\"*.tif\")])\n\n # turn them into a data frame and pad with empty annotation paths\n df = pd.DataFrame()\n df[\"slide\"] = tumor_slide_paths + normal_slide_paths\n df[\"annotation\"] = annotation_paths + [\"\" for _ in range(len(normal_slide_paths))]\n df[\"label\"] = ['tumor'] * len(tumor_slide_paths) + ['normal'] * len(normal_slide_paths)\n df[\"tags\"] = \"\"\n\n return Camelyon16(root, df)\n\n\ndef training_small():\n # set up the paths to the slides and annotations\n cam16 = training()\n df = cam16.paths.sample(12, random_state=777)\n\n return Camelyon16(project_root() / cam16.root, df)\n\n\ndef testing():\n \"\"\" Generated a data-frame of slide_path, annotation_path, label and tags for test dataset.\n\n Returns:\n DataFrame (pd.DataFrame): Test data frame\n \"\"\"\n # set up the paths to the slides and annotations\n root = project_root() / \"data\" / \"camelyon16\" / \"raw\" / \"testing\"\n annotations_dir = root / \"lesion_annotations\"\n slide_dir = root / \"images\"\n \n\n # all paths are relative to the dataset 'root'\n slide_paths = sorted([p.relative_to(root) for p in slide_dir.glob(\"*.tif\")])\n annotation_paths = sorted([p.relative_to(root) for p in annotations_dir.glob(\"*.xml\")])\n\n #get the slide name\n slide_names = []\n for path in slide_paths:\n head, tail = os.path.split(path)\n slide_names.append(tail.split('.')[0])\n\n #search for slides with annotations, add the annotation path if it exists else add empty string\n slides_annotations_paths = []\n for name in slide_names:\n a_path = \"\"\n for anno_path in annotation_paths:\n if name in str(anno_path):\n a_path = anno_path\n slides_annotations_paths.append(a_path)\n \n #get the slide labels by reading the csv file\n csv_path = root / 'reference.csv'\n label_csv_file = pd.read_csv(csv_path, header = None)\n slide_labels = label_csv_file.iloc[:, 1]\n\n # turn them into a data frame and pad with empty annotation paths\n df = pd.DataFrame()\n df[\"slide\"] = slide_paths \n df[\"annotation\"] = slides_annotations_paths\n df[\"label\"] = slide_labels\n df[\"tags\"] = \"\"\n\n return Camelyon16(root, df)\n\n\n\n\n","repo_name":"StAndrewsMedTech/icairdpath-public","sub_path":"repath/data/datasets/camelyon16.py","file_name":"camelyon16.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15612583851","text":"# V0 \n\n# V1 \n# https://blog.csdn.net/fuxuemingzhu/article/details/86563872\n# https://blog.csdn.net/danspace1/article/details/88737508\n# IDEA : \n# TOTAL MOVES = abs(coins left need) + abs(coins right need)\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution(object):\n def distributeCoins(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.ans = 0\n \n def dfs(root):\n # return the balance of the node\n if not root: return 0\n left = dfs(root.left)\n right = dfs(root.right)\n self.ans += abs(left) + abs(right)\n return root.val -1 + left + right \n dfs(root)\n return self.ans\n \n# V2 \n# Time: O(n)\n# Space: O(h)\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n \nclass Solution(object):\n def distributeCoins(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n def dfs(root, result):\n if not root:\n return 0\n left, right = dfs(root.left, result), dfs(root.right, result)\n result[0] += abs(left) + abs(right)\n return root.val + left + right - 1\n\n result = [0]\n dfs(root, result)\n return result[0]","repo_name":"yennanliu/CS_basics","sub_path":"leetcode_python/Tree/distribute-coins-in-binary-tree.py","file_name":"distribute-coins-in-binary-tree.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"54"} +{"seq_id":"38369878883","text":"import os\n\nimport random\nimport numpy as np\nimport cupy as cp\n\nimport chainer\nimport chainer.links as L\nfrom chainer.training import extensions\n\nimport chainercv\nfrom chainercv.visualizations import vis_image\nfrom matplotlib import pyplot as plt\n\nimport cifar10_dataset\nimport net\n\n\nclass Transform():\n\n def __init__(self, debug=False):\n self.debug = debug\n\n def __call__(self, in_data):\n img, label = in_data\n self._debug_imshow(img)\n\n # Color augmentation\n img = chainercv.transforms.pca_lighting(img, 25.5)\n img = np.clip(img, 0, 255)\n self._debug_imshow(img)\n\n # Random horizontal flipping\n img = chainercv.transforms.random_flip(img, x_random=True)\n self._debug_imshow(img)\n\n # ランダムにキャンバスを拡張し,画像を配置\n img = chainercv.transforms.random_expand(img, max_ratio=1.5)\n self._debug_imshow(img)\n\n # 指定したサイズの画像を,ランダムな位置からクロップ\n img = chainercv.transforms.random_crop(img, (32, 32))\n self._debug_imshow(img)\n\n img /= 255.\n\n return img, label\n\n def _debug_imshow(self, img):\n if self.debug:\n vis_image(img)\n plt.show()\n\n\nclass TrainClassificationTask():\n\n def __init__(\n self,\n classes,\n train_dataset, test_dataset,\n gpu,\n model,\n batch_size, n_epoch, alpha, weight_decay,\n snapshot_interval, print_interval, output_dir\n ):\n # ラベルを読込み\n self.classes = classes\n\n # データセットを読込み\n self.train_dataset = train_dataset\n self.test_dataset = test_dataset\n\n # モデルを読込み\n self.model = model\n\n # GPUを使用する場合は,モデルをGPUにコピーする\n self.gpu = gpu\n if self.gpu >= 0:\n chainer.cuda.get_device(self.gpu).use()\n self.model.to_gpu()\n\n # Optimizerのセットアップ\n print(\"setting optimizer...: alpha=%e\" % (alpha))\n self.optimizer = chainer.optimizers.Adam(alpha=alpha)\n self.optimizer.setup(self.model)\n if weight_decay:\n print(\"setting optimizer...: weight_decay=%e\" % (weight_decay))\n self.optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))\n\n # イテレーションの設定\n print(\"setting iterator, updater and trainer...\")\n self.train_iter = chainer.iterators.SerialIterator(\n self.train_dataset,\n batch_size,\n repeat=True,\n shuffle=True\n )\n self.test_iter = chainer.iterators.SerialIterator(\n self.test_dataset,\n batch_size,\n repeat=False,\n shuffle=False\n )\n self.updater = chainer.training.StandardUpdater(self.train_iter, self.optimizer, device=self.gpu)\n self.output_dir = output_dir\n self.trainer = chainer.training.Trainer(self.updater, (n_epoch, 'epoch'), out=self.output_dir)\n\n # 検証用データセットで評価する\n self.trainer.extend(extensions.Evaluator(self.test_iter, self.model, device=self.gpu))\n # 学習途中でスナップショットを取得する\n self.trainer.extend(\n extensions.snapshot(filename='snapshot_iter_{.updater.epoch}'),\n trigger=(snapshot_interval, 'epoch')\n )\n # 学習途中でモデルのスナップショットを取得する\n self.trainer.extend(\n extensions.snapshot_object(\n self.model,\n filename='snapshot_model_{.updater.epoch}',\n savefun=chainer.serializers.save_hdf5\n ),\n trigger=(snapshot_interval, 'epoch')\n )\n # グラフを取得する\n self.trainer.extend(\n extensions.PlotReport(\n [\n 'main/loss',\n 'validation/main/loss'\n ],\n x_key='epoch',\n file_name='loss.png',\n marker=\"\"\n )\n )\n self.trainer.extend(\n extensions.PlotReport(\n [\n 'main/accuracy',\n 'validation/main/accuracy'\n ],\n x_key='epoch',\n file_name='accuracy.png',\n marker=\"\"\n )\n )\n # ログを取得する\n self.trainer.extend(extensions.LogReport())\n # 学習と検証の状況を表示する\n self.trainer.extend(\n extensions.PrintReport(\n [\n 'epoch',\n 'main/loss',\n 'validation/main/loss',\n 'main/accuracy',\n 'validation/main/accuracy',\n 'elapsed_time'\n ]\n ),\n trigger=(print_interval, 'epoch')\n )\n # プログレスバーを表示する\n self.trainer.extend(extensions.ProgressBar())\n\n def run(self):\n print(\"starting training...\")\n self.trainer.run()\n\n print(\"saving model...\")\n model_file_path = os.path.join(self.output_dir, 'net.model')\n chainer.serializers.save_hdf5(model_file_path, model)\n \n print(\"training is complete\")\n\n\ndef set_random_seed(seed):\n # set Python random seed\n random.seed(seed)\n\n # set NumPy random seed\n np.random.seed(seed)\n\n # set Chainer (CuPy) random seed\n cp.random.seed(seed)\n\n\nif __name__ == '__main__':\n # 乱数のシードを設定\n set_random_seed(0)\n\n # データセットを読込み\n dataset = cifar10_dataset.CIFAR10Dataset(\n train=True\n )\n\n # データセットを学習用と検証用に分割する\n # 90%を学習用とする\n dataset_split_rate = int(len(dataset) * 0.9)\n train_dataset, test_dataset = chainer.datasets.split_dataset_random(\n dataset,\n dataset_split_rate,\n seed=0\n )\n\n # Transformクラスを使用して,データセットの水増しを行う\n train_transform_dataset = chainer.datasets.TransformDataset(\n train_dataset,\n Transform(debug=False)\n )\n test_transform_dataset = chainer.datasets.TransformDataset(\n test_dataset,\n Transform(debug=False)\n )\n\n # モデルを読込み\n # * L.Classifierでは予測した値とラベルとの誤差を計算する.\n # デフォルトではsoftmax_cross_entropy\n print(\"loading model...\")\n model = L.Classifier(net.Net(len(dataset.classes)))\n\n train_task = TrainClassificationTask(\n classes=dataset.classes,\n train_dataset=train_transform_dataset,\n test_dataset=test_transform_dataset,\n gpu=0,\n model=model,\n batch_size=100,\n n_epoch=200,\n alpha=0.00005,\n weight_decay=0.0001,\n snapshot_interval=10,\n print_interval=1,\n output_dir='./logs'\n )\n\n train_task.run()\n","repo_name":"KeitetsuWorks/chainer-cifar10","sub_path":"train_classification.py","file_name":"train_classification.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7908561043","text":"#!/usr/bin/python3\n\nimport os,sys\n\ndef createArchive():\n\tos.system(\"tar -cf \"+sys.argv[2].split(\"/\")[-1]+\"_compress.tar -T /dev/null\")\n\ndef extractArchive(fichier):\n\tos.system(\"mkdir -p uncompressed && tar -xf \"+fichier.split(\"/\")[-1]+\".tar.gz -C uncompressed\")\n\n\ndef check(repertoire):\n\tfilename = os.path.realpath(__file__)\n\tfor r,d,f in os.walk(repertoire):\n\t\tfor each in f:\n\t\t\tif each==sys.argv[0].split(\"./\")[1]:\n\t\t\t\tprint(\"Script is in requested directory. Cannot compress\")\n\t\t\t\tquit()\n\ndef create_copy(repertoire):\n\tos.system(\"mkdir \"+repertoire.split(\"/\")[-1]+\"_compressed && cp -r --parents \"+repertoire+\" \"+repertoire.split(\"/\")[-1]+\"_compressed/\")\n\tfor root,dirs,files in os.walk(os.path.realpath(sys.argv[0]).rsplit(\"/\",1)[0]+\"/\"+repertoire.split(\"/\")[-1]+\"_compressed\"):\n\t\tfor each in files:\n\t\t\tif not each.endswith(\"hman\"):\n\t\t\t\tos.remove(os.path.join(root,each))\n\n\tfor r,d,f in os.walk(repertoire):\n\t\tfor each in f:\n\t\t\tif each.endswith(\"hman\"):\n\t\t\t\tos.remove(os.path.join(r,each))\n\n\ndef compress(repertoire):\n\n\tforbidden_ext=['pdf','o','ppt','zip','dot','c','cpp','py','gz']\n\tlst = os.listdir(repertoire)\n\tfor each in lst:\n\t\tif os.path.isdir(repertoire+'/'+each):\n\t\t\tcompress(repertoire+'/'+each)\n\t\telif os.path.isfile(repertoire+'/'+each):\n\t\t\t#On peut chercher les fichier.endswith(\".txt\") mais pas toutes les fichiers du type text\n\t\t\t#finissent par .txt .\n\t\t\tif not each.split(\".\")[-1] in forbidden_ext:\n\t\t\t\ttry:\n\t\t\t\t\tos.system(\"./huffman -c \"+os.path.realpath(repertoire)+'/'+each)\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"couldnt compress this file: \"+os.path.realpath(repertoire)+'/'+each)\n\n\n\ndef archive(repertoire):\n\tos.system(\"tar uf \"+repertoire.split('/')[-1]+\"_compress.tar \"+repertoire.split(\"/\")[-1]+\"_compressed \")\n\tos.system(\"gzip \"+repertoire.split('/')[-1]+\"_compress.tar\")\n\tos.system(\"rm -rf \"+repertoire.split(\"/\")[-1]+\"_compressed\")\n\ndef decompress(repertoire):\n\tlst=[]\n\tfor root,dirs,files in os.walk(repertoire):\n\t\tfor each in files:\n\t\t\tif each.endswith(\".hman\"):\n\t\t\t\tlst.append(os.path.join(root,each))\n\n\tfor every in lst:\n\t\tos.system(\"./huffman -d \"+every)\n\t\tos.system(\"rm \"+every)\n\t\t\t\n\nif __name__==\"__main__\":\n\tif(len(sys.argv)!=3):\n\t\tprint(\"Usage: ./script.py [-c/-d] repertoire\")\n\t\tsys.exit()\n\telse:\n\t\tif(sys.argv[1]==\"-c\"):\n\t\t\tcheck(sys.argv[2]);\n\t\t\tcreateArchive()\n\t\t\tcompress(sys.argv[2])\n\t\t\tcreate_copy(sys.argv[2])\n\t\t\tarchive(sys.argv[2])\n\t\t\tprint(\"\\nsuccesfully compressed folder\")\n\t\telif(sys.argv[1]==\"-d\"):\n\t\t\textractArchive(sys.argv[2])\n\t\t\tdecompress(\"uncompressed\")\n\t\t\tprint(\"Succesfully decompressed\")","repo_name":"chi43to/HuffmanAlgorithm","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40830502580","text":"import argparse\nfrom datetime import datetime\nimport random\nimport numpy as np\nimport os\nimport time\nfrom utils import *\nfrom utils.dataloader_utils import load_data_graphalg,load_dataset, analyze_dataset_hyperbolicity,relations_to_degree_analysis\nfrom params import *\nimport sys\nfrom datetime import date\nimport pickle\nimport train_inductive\nimport train\nfrom trials.dataset_config import config\nimport pickle\nimport optuna\nimport copy\nfrom main import parse_default_args\nfrom matplotlib import pyplot as plt\nfrom data.MEG.get_data import preprocess\nfrom data.MEG.create_relations_dir import create_relations\nif __name__ == '__main__':\n # tunable_param_fn = config['tunable_param_fn']\n\n # set_params = config['set_params']\n # params = config\n # print()\n\n # args = parse_default_args(set_params)\n# \n # print(args,'args?')\n # train_file,dev_file,test_file = preprocess(args)\n # results = analyze_dataset_hyperbolicity(args,'graph',80,node_samples=100)\n\n # relations_dir=r\"C:\\Users\\Cole S Baker\\Desktop\\Thesis\\Thesis\\hgcn\\data\\MEG\\35\\plv\\alpha\\relations\")\n # output_dir=r\"C:\\Users\\Cole S Baker\\Desktop\\Thesis\\Thesis\\hgcn\\data\\MEG\\35\\plv\\alpha\")\n # C:\\Users\\Cole S Baker\\Desktop\\Thesis\\Thesis\\hgcn\\data\\MEG\\32\\plv\\alpha\n # relations_to_degree_analysis(relations_dir,output_dir=output_dir)\n\n # (?# model_dir= r\"C:\\Users\\Cole S Baker\\Desktop\\Thesis\\Thesis\\hgcn\\logs\\meg\\lp\\2022_5_11\\55\" ## allpha id_sim loss= .381 w/ 32noisey. seems to not be helping anything. training was unstable, slow)\n # use_band='gamma'\n use_band='alpha'\n use_percentiles=True\n plot_percentiles=True\n model_dir=r\"C:\\Users\\Cole S Baker\\Desktop\\Thesis\\Thesis\\hgcn\\logs\\meg\\lp\\2022_5_11\\60\"\n model_name ='model'\n model_path = os.path.join(model_dir,\"{}.pt\".format(model_name)) ### args should be saved with the model\n out_embedding_path = model_dir\n model = th.load(model_path)\n raw_scans = np.load(model.args.raw_scan_file)\n setattr(model.args,'refresh_data',1)\n setattr(model.args,'train_noise_num',0)\n setattr(model.args,'use-plv',0)\n setattr(model.args,'use-identity',1)\n setattr(model.args,'use-identity-sim',0)\n setattr(model.args,'band',use_band)\n BAND_TO_INDEX = {'theta':0,'alpha':1,'beta1':2,'beta2':3,'beta':4,'gamma':5}\n\n args=model.args\n # print(set_params,'set_params')\n\n \n if use_percentiles:\n percentiles=[70,72,74,75,77,78,79,80,81,82,84,86]\n # percentiles=[75,77,79]\n scan_data = raw_scans[:,BAND_TO_INDEX[use_band],0]\n train_scans = scan_data\n \n\n X_weight = np.array([a[np.triu_indices(a.shape[0],k=1)] for a in train_scans])\n # plt.hist(X_weight.flatten(),bins=20)\n # plt.show()\n threshold=[np.percentile(X_weight,p) for p in percentiles]\n # print(threshold,'THRESHOLS')\n # adda\n\n\n # dffff\n else:\n raise Exception('fuck this ya')\n # threshold = [.33,.35]\n # threshold = [.17,.18,.19,.2,.21,.22,.23,.24,.25,.26,.28,.29,.3,.31,.32,.33,.35,.36,.37,.38,.39,.4]\n\n # threshold = [.93,.94]\n # n_samp = [10000,50000]\n n_samp=[2000]\n # samps={}\n results = []\n # hyps=[1.0, 1.0285714285714285, 1.0714285714285714, 1.1785714285714286, 1.292857142857143, 1.457142857142857, 1.5071428571428571, 1.5357142857142858]\n props = []\n stat_results={}\n groups = ['all','scd','hc']\n stats= ['hyp_mean','hyp_std','edge_prop','largest_comp']\n title_stats = ['Mean Hyperbolicity','STD Hyperbolicity','% nodes pairs w/ edge','Avg Largest Componented']\n handle_stats = ['Mean Hyper','STD Hyper','%edges','Lgst Comp/100']\n for s in stats:\n stat_results[s]={}\n for g in groups:\n stat_results[s][g]=[]\n for t in threshold:\n t=t\n setattr(args,'adj_threshold',t)\n\n # print(args.adj_threshold,'ARGMUIIIE')\n # continue\n train_file,valid_file,test_file,all_file,idxs_dict,indx_file = preprocess(args)\n # d\n # d\n setattr(args,'train_file',train_file)\n setattr(args,'dev_file',valid_file)\n setattr(args,'test_file',test_file)\n setattr(args,'all_file',all_file)\n \n for n in n_samp:\n print(n,'MPDE SAMPS')\n results = analyze_dataset_hyperbolicity(args,'graph',20,node_samples=n)\n for g in groups: \n for s in stats:\n stat_results[s][g].append(results[g][s])\n # print('out')\n # print(results)\n # hyps.append(res)\n # props.append(props)\n # # print(res,'res')\n # samps[(t,n)]=res\n # results\n\n\n # print(threshold)\n # print(hyps)\n # print(props)\n x_label=percentiles if plot_percentiles else False\n colors = ['red','blue','green','yellow']\n \n \n handles = []\n # fake= [1,2]\n num_stats = len(stats)\n num_cols=2\n num_rows = num_stats//num_cols\n fig, axes = plt.subplots(num_rows, num_cols) ##TOP TO BOTTOM?\n # fig.legend( lines, labels, loc = (0.5, 0), ncol=5 )\n first =True\n last=False\n for a in range(num_stats):\n s =stats[a]\n title =title_stats[a]\n col=0\n col=a%num_cols\n row= a //num_cols\n ax = axes[row,col] \n ax.title.set_text(title)\n if row!=(num_rows-1):\n ax.xaxis.set_visible(False)\n for i in range(len(groups)):\n g=groups[i]\n line, = ax.plot(x_label,stat_results[s][g])\n # line, = ax.plot(fake,[1+i,2+i])\n handles.append(line)\n if first:\n ax.legend(handles,groups,loc='lower right')\n first=False\n\n plt.show()\n fig, axes = plt.subplots(num_rows, num_cols) ##TOP TO BOTTOM?\n first =True\n last=False\n handles = []\n for a in range(num_stats):\n s =stats[a]\n title =title_stats[a]\n col=0\n col=a%num_cols\n row= a //num_cols\n ax = axes[row,col] \n # print(ax)\n # print(axes)\n # ax.\n \n ax.title.set_text(title)\n if row!=(num_rows-1):\n ax.xaxis.set_visible(False)\n for i in range(len(groups)):\n\n g=groups[i]\n if g!='all':\n continue\n line, = ax.plot(x_label,stat_results[s][g])\n # line, = ax.plot(fake,[1+i,2+i])\n handles.append(line)\n if first:\n ax.legend(handles,['all'],loc='lower right')\n first=False\n fig, ax = plt.subplots(1) ##TOP TO BOTTOM?\n first =True\n last=False\n handles = []\n for a in range(num_stats):\n s =stats[a]\n title =\"All stats\"\n ax.title.set_text(title)\n for i in range(len(groups)):\n\n g=groups[i]\n if g!='all':\n continue\n stat = stat_results[s][g] if s!='largest_comp' else [val/100 for val in stat_results[s][g]]\n line, = ax.plot(x_label,stat)\n handles.append(line)\n\n ax.legend(handles,handle_stats,loc='best')\n plt.show()\n # plt.plot(threshold,props)\n # plt.show()\n # plt.plot(threshold,hyps)\n \n # plt.plot(threshold,props)\n # plt.show()\n # print(samps)\n\n","repo_name":"ColeSBaker/hyperBrain","sub_path":"analyze_dataset.py","file_name":"analyze_dataset.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4223126544","text":"import pastStats\nimport schedule\nimport dataFunctions\n\nfrom datetime import datetime\nimport dateutil\nfrom dateutil.parser import parse\nfrom pprint import pprint\nimport time\nimport pdb\n\n\nscheduleOutput = schedule.getSchedule()\ntry:\n teamStats = dataFunctions.importLocalJSON('teamStats.txt')\nexcept:\n teamStats = {'league': {}}\n\n\naverages1 = ['RYA', 'RTDA', 'PYA', 'PTDA', 'GrossYA']\n# teamFields['averages'].update(averages)\n\n\ndef loopGames():\n for game in scheduleOutput['fullgameschedule']['gameentry']:\n while True:\n try:\n homeTeam = game['homeTeam']['Abbreviation']\n awayTeam = game['awayTeam']['Abbreviation']\n date = game['date'].replace('-', '')\n dateDT = dateutil.parser.parse(date)\n addTeamsToDict(homeTeam, awayTeam)\n if dateDT < datetime.now() and date not in teamStats[\n homeTeam]['games']:\n print('pulling stats and depositing', homeTeam, awayTeam)\n gameStats = callGame(homeTeam, awayTeam, date)\n extractStats(gameStats) # Opens new key for game date\n break\n except ValueError as e:\n print('error', e)\n print('sleeping at', datetime.now().time())\n pdb.set_trace()\n time.sleep(301)\n return teamStats\n\n\ndef addTeamsToDict(homeTeam, awayTeam):\n if homeTeam not in teamStats:\n teamStats[homeTeam] = {\n 'averages': {},\n 'games': {}\n }\n for key in averages1:\n teamStats[homeTeam]['averages'][key] = {\n 'total': 0,\n 'average': 0\n }\n if awayTeam not in teamStats:\n teamStats[awayTeam] = {\n 'averages': {},\n 'games': {}\n }\n for key in averages1:\n teamStats[awayTeam]['averages'][key] = {\n 'total': 0,\n 'average': 0\n }\n\n\ndef callGame(homeTeam, awayTeam, date):\n endpoint = 'game_boxscore.json'\n payload = {\n 'gameid': date + '-' + awayTeam + '-' + homeTeam\n }\n gameStats = dataFunctions.apiGet(endpoint, payload)\n return gameStats\n\n\ndef extractStats(gameStats):\n homeTeam = gameStats['gameboxscore']['game']['homeTeam']['Abbreviation']\n homeTeamStats = gameStats['gameboxscore']['homeTeam']['homeTeamStats']\n awayTeam = gameStats['gameboxscore']['game']['awayTeam']['Abbreviation']\n awayTeamStats = gameStats['gameboxscore']['awayTeam']['awayTeamStats']\n date = gameStats['gameboxscore']['game']['date'].replace('-', '')\n teamStats[homeTeam]['games'][date] = {\n 'RYA': int(awayTeamStats['RushYards']['#text']),\n 'RTDA': int(awayTeamStats['RushTD']['#text']),\n 'PYA': int(awayTeamStats['PassGrossYards']['#text']),\n 'PTDA': int(awayTeamStats['PassTD']['#text']),\n 'GrossYA': int(awayTeamStats['OffenseYds']['#text'])\n }\n teamStats[awayTeam]['games'][date] = {\n 'RYA': int(homeTeamStats['RushYards']['#text']),\n 'RTDA': int(homeTeamStats['RushTD']['#text']),\n 'PYA': int(homeTeamStats['PassGrossYards']['#text']),\n 'PTDA': int(homeTeamStats['PassTD']['#text']),\n 'GrossYA': int(homeTeamStats['OffenseYds']['#text'])\n }\n\n\ndef calculateAverages(teamStats):\n for team in teamStats:\n if team != 'league':\n teamDict = teamStats[team]\n\n # Zero it from the imported file\n for key in teamDict['averages']:\n teamDict['averages'][key]['average'] = 0\n teamDict['averages'][key]['total'] = 0\n\n # Go through each game and tabulate averages\n gamesPlayed = len(teamDict['games'])\n for game in teamDict['games']:\n game = teamDict['games'][game]\n for key in game:\n teamDict['averages'][key]['total'] = \\\n teamDict['averages'][key]['total'] + game[key]\n for key in teamDict['averages']:\n if 'TD' in key:\n teamDict['averages'][key]['average'] = \\\n round(teamDict['averages'][key]['total']\n / gamesPlayed, 1)\n else:\n teamDict['averages'][key]['average'] = \\\n round(teamDict['averages'][key]['total']\n / gamesPlayed, 0)\n # We now have all averages. Let's sort them into rankings\n rankings = statsListsByCategory(teamStats)\n # Now, we want to go through each ranking category and dep\n for category in rankings:\n categoryEntries = rankings[category]\n for statTuple in categoryEntries:\n rank = categoryEntries.index(statTuple) + 1\n team = statTuple[1]\n teamStats[team]['averages'][category]['rank'] = rank\n return teamStats\n\n\ndef calculateLeagueAverages(teamStats):\n teamStats['league'] = {\n 'averages': {},\n 'currentWeek': schedule.currentWeek\n }\n for key in averages1:\n teamStats['league']['averages'][key] = {\n 'average': 0,\n 'total': 0\n }\n for team in teamStats:\n # Calculate totals\n if team != 'league':\n teamAverages = teamStats[team]['averages']\n leagueAverages = teamStats['league']['averages']\n for key in teamAverages:\n stat = teamAverages[key]['average']\n leagueAverages[key]['total'] = \\\n round(leagueAverages[key]['total'], 1) + stat\n # Calculate averages\n for key in leagueAverages:\n leagueAverages[key]['average'] = \\\n round(leagueAverages[key]['total']/32, 1)\n return teamStats\n\n\ndef statsListsByCategory(teamStatsComplete):\n rankings = {}\n for key in averages1:\n rankings[key] = []\n for team in teamStats:\n if team != 'league':\n teamAverages = teamStats[team]['averages']\n for key in teamAverages:\n stat = teamAverages[key]['average']\n rankings[key].append((stat, team))\n for key in rankings:\n rankings[key] = sorted(rankings[key])\n return rankings\n","repo_name":"bholmquist11/bholmquist11.github.io","sub_path":"Batch Data Processing/teamStats.py","file_name":"teamStats.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73377164001","text":"from fastapi import FastAPI, HTTPException, Depends\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\nfrom typing import List, Annotated\n\nimport models.user as user_model\n\nimport platforms.sleeper as sleeper\nimport platforms.mfl as mfl\n\nfrom database.database import SessionLocal, engine\nfrom sqlalchemy.orm import Session\n\ntags = [\n {\n \"name\": \"User\",\n \"description\": \"Basic information for a user\",\n },\n {\n \"name\": \"Sleeper\",\n \"description\": \"Information from the Sleeper platform\",\n },\n {\n \"name\": \"MFL\",\n \"description\": \"Information from the MFL Platinum platform\",\n }\n]\n\n\napp = FastAPI(\n title=\"Fantasy Dashboard\",\n summary=\"API for unifying fantasy football applications!\",\n version=\"0.0.1\",\n openapi_tags=tags\n)\n\nuser_model.Base.metadata.create_all(bind=engine)\n\norigins = [\n \"http://localhost:53969\", # Replace with the origin of your Flutter app\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"OPTIONS\", \"GET\", \"PUT\", \"POST\", \"DELETE\"],\n allow_headers=[\"/users/\"],\n)\n\nclass User(BaseModel):\n username: str\n email: str\n password: str\n sleeper_id: str | None = None\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\ndb_dependency = Annotated[Session, Depends(get_db)]\n\napp.include_router(sleeper.router)\napp.include_router(mfl.router)\n\n\n# Create user\n@app.post(\"/users/\", tags=[\"User\"])\nasync def create_user(user: User, db: db_dependency):\n db_user = user_model.User(\n username=user.username,\n email=user.email,\n password=user.password,\n sleeper_id=user.sleeper_id,\n )\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n\n return {\"message\": \"User created successfully!\"}\n\n\n# Get user\n@app.get(\"/users/{username}\", tags=[\"User\"])\nasync def get_user(username: str, db: db_dependency):\n db_user = db.query(user_model.User).filter(user_model.User.username == username).first()\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n return db_user\n\n\n# Update user\n@app.put(\"/users/{username}\", tags=[\"User\"])\nasync def update_user(username: str, user: User, db: db_dependency):\n db_user = db.query(user_model.User).filter(user_model.User.username == username).first()\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n db_user.username = user.username\n db_user.email = user.email\n db_user.password = user.password\n db_user.sleeper_id = user.sleeper_id\n db.commit()\n db.refresh(db_user)\n\n return {\"message\": \"User updated successfully!\"}\n\n\n# Delete user\n@app.delete(\"/users/{username}\", tags=[\"User\"])\nasync def delete_user(username: str, db: db_dependency):\n db_user = db.query(user_model.User).filter(user_model.User.username == username).first()\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n db.delete(db_user)\n db.commit()\n\n return {\"message\": \"User deleted successfully!\"}","repo_name":"ndefelice/Fantasy-Dashboard","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27103386855","text":"import metaknowledge as mk\nfrom . import similarity as sims\nimport itertools as it\nfrom functools import reduce\nimport pandas as pd\n\nfrom wosis.KeywordMatch import KeywordMatch\n\nimport warnings\n\ndef search_records(records, keywords, threshold=60.0):\n \"\"\"Deprecated function: Search records for a given set of keywords.\n\n Use `find_keywords()` instead.\n \"\"\"\n warnings.warn(\"Deprecated function! Use `find_keywords()` instead\")\n return find_keywords(records, keywords, threshold=60.0)\n# End search_records()\n\n\ndef find_keywords(records, keywords, threshold=60.0):\n \"\"\"Search records for a given set of keywords.\n\n Keywords will be transformed to lower case.\n\n Parameters\n ==========\n * records : Metaknowledge RecordCollection\n * keywords : str or set or list, of keywords. Will be transformed to lowercase\n * threshold : float, similarity must be equal to or above this percentage threshold\n\n Returns\n ==========\n * Metaknowledge RecordCollection, of matched records\n \"\"\"\n if isinstance(keywords, str):\n keywords = [keywords]\n\n keywords = set([kw.lower() for kw in keywords])\n\n matches = mk.RecordCollection()\n for record in records:\n kwds = record.get('DE', None)\n other_kw_field = record.get('keywords', None)\n\n try:\n kwds = kwds + other_kw_field\n except TypeError:\n if other_kw_field:\n # If other_kw_field is a truthy value then\n # kwds must be Falsey (empty) so we can just replace it\n kwds = other_kw_field\n # End try\n\n abstract = record.get('AB', None)\n\n if kwds:\n tmp = [kw.lower() for kw in kwds]\n\n subset = keywords.intersection(set(tmp))\n if len(subset) > 0:\n matches.add(record)\n else:\n combinations = [(a, b) for a in keywords for b in tmp]\n for kwi, kw in combinations:\n if sims.string_match(kwi, kw) >= threshold:\n if record not in matches:\n matches.add(record)\n # End if\n # End for\n # End if\n # End if\n\n if record not in matches and abstract is not None:\n tmp = abstract.lower()\n for kwi in keywords:\n if tmp.find(kwi) > -1:\n matches.add(record)\n break\n # End if\n # End if\n # End for\n\n matches.name = '{}'.format(keywords)\n\n return matches\n# End find_keywords()\n\n\ndef keyword_matches(records, keywords, threshold=60.0):\n \"\"\"Get records for each individiual keyword of interest\n\n Parameters\n ==========\n * records : iterable, of RIS records\n * keywords : list[str], of keywords\n * threshold : float, similarity score threshold - has to be above this to indicate a match.\n\n Returns\n ==========\n * dict, matching records by keyword\n \"\"\"\n if isinstance(keywords, str):\n keywords = [keywords, ]\n\n matching_records = {}\n for kw in keywords:\n matching_records[kw] = find_keywords(records, set([kw, ]), threshold)\n # End for\n\n return KeywordMatch(matching_records)\n\n# End keyword_matches()\n\n\ndef keyword_matches_by_criteria(records, keyword_criteria, threshold=60.0):\n \"\"\"Match keywords based on criteria.\n\n Parameters\n ==========\n * records : Metaknowledge record collection\n * keyword_criteria : dict, of sets with each set being a collection of\n keywords\n * threshold : float, similarity score threshold - has to be above this\n to indicate a match.\n\n Returns\n ==========\n * dict, matching records by keyword, and\n {keyword: number of matching records}\n \"\"\"\n criteria_matches = {}\n for criteria in list(keyword_criteria):\n criteria_kws = keyword_criteria[criteria]\n search_results = find_keywords(\n records, criteria_kws, threshold=threshold)\n kw_match = keyword_matches(search_results, criteria_kws, threshold)\n\n criteria_matches[criteria] = kw_match\n # End for\n\n return criteria_matches\n# End keyword_matches_by_criteria()\n\n\ndef collate_keyword_criteria_matches(records, criteria_matches):\n \"\"\"Takes dictionary of keyword matches by criteria and collates into\n a single DataFrame.\n\n Parameters\n ==========\n * records : Metaknowledge record collection\n * criteria_records : dict, of KeywordMatch\n\n Returns\n ==========\n * tuple[dict], matching records by keyword, and\n {keyword: number of matching records}\n\n See Also\n ==========\n * keyword_matches_by_criteria()\n \"\"\"\n criteria_records = {}\n for cm in criteria_matches:\n criteria_records[cm] = criteria_matches[cm].combine_recs()\n\n corpora_df = pd.DataFrame(records.forNLP())\n corpora_df['num_criteria_match'] = 0\n\n for wos_id in corpora_df['id']:\n for cm in criteria_records:\n if criteria_records[cm].containsID(wos_id):\n corpora_df.loc[corpora_df['id'] ==\n wos_id, 'num_criteria_match'] += 1\n # End if\n # End for\n # End for\n\n return corpora_df\n# End collate_keyword_criteria_matches()\n\n\ndef preview_matches_by_keyword(match_records, specific_keyword=None):\n \"\"\"\n Parameters\n ==========\n * match_records : dict, records sorted by matching keywords.\n * specific_keyword : str, keyword of interest\n\n See Also\n ==========\n * keyword_matches()\n \"\"\"\n if specific_keyword:\n match_records = match_records[specific_keyword]\n # End if\n\n for kw_name in match_records:\n if len(match_records[kw_name]) > 0:\n print('Keyword:', kw_name)\n for rec in match_records[kw_name]:\n print(' Title:', rec.get('TI'))\n print(' Authors:', '; '.join(rec.get('AU')))\n print(' Journal:', rec.get('SO').title())\n print(' Year:', rec.get('PY'))\n print(' -----------------------------------------------')\n print('===================================================')\n # End if\n # End for\n\n# End preview_matches_by_keyword()\n\n\ndef get_unique_kw_titles(match_records):\n \"\"\"Get unique titles from a record list.\n\n Parameters\n ==========\n * match_records : dict, records sorted by matching keywords.\n\n Returns\n ==========\n set of unique elements of manuscript titles\n\n See Also\n ==========\n * keyword_matches()\n \"\"\"\n titles = set()\n for kw in match_records:\n for rec in match_records[kw]:\n titles.update([rec.get('TI')])\n # End for\n # End for\n\n return titles\n# End get_unique_kw_titles()\n\n\ndef find_pubs_by_title(records, titles):\n \"\"\"Find publications by title.\n\n Parameters\n ==========\n * records : Metaknowledge RecordCollection\n * titles : list, of titles to search for (has to be exact match)\n\n Returns\n ==========\n * Metaknowledge RecordCollection or None if no matches found\n \"\"\"\n if hasattr(titles, 'lower'):\n # titles is a string, convert to list\n titles = [titles]\n\n titles = set(titles)\n\n new_rc = mk.RecordCollection()\n for rec in records:\n curr_doi = rec.get('DI')\n if rec.title in titles and not new_rc.containsID(curr_doi):\n new_rc.add(rec)\n\n if len(new_rc) == 0:\n return None\n\n return new_rc\n# End find_pubs_by_title()\n\n\ndef find_pubs_by_doi(records, dois):\n \"\"\"Find publications by title.\n\n Parameters\n ==========\n * records : Metaknowledge RecordCollection\n * titles : list, of titles to search for (has to be exact match)\n\n Returns\n ==========\n * Metaknowledge RecordCollection or None if no matches found\n \"\"\"\n if hasattr(dois, 'lower'):\n # titles is a string, convert to list\n dois = [dois]\n\n dois = set(dois)\n\n new_rc = mk.RecordCollection()\n for rec in records:\n curr_doi = rec.get('DI')\n if curr_doi in dois and not new_rc.containsID(curr_doi):\n new_rc.add(rec)\n\n if len(new_rc) == 0:\n return None\n\n return new_rc\n# End find_pubs_by_title()\n\n\ndef find_pubs_by_authors(records, author_list, threshold=60.0):\n \"\"\"Get publications by specific authors.\n\n Parameters\n ==========\n * records : dict, records sorted by matching keywords.\n * author_list : list, of authors\n * threshold : float, similarity of author names have to be above this\n threshold to be included.\n (0 to 100, where 100 is exact match)\n\n Returns\n ==========\n Metaknowledge Record, set of unique elements of manuscript titles\n\n See Also\n ==========\n * keyword_matches()\n \"\"\"\n matching_pubs = {au_i: mk.RecordCollection() for au_i in author_list}\n for rec in records:\n for au, au_i in it.product(rec.authors, author_list):\n # Get first portion of name string\n tmp = au_i.split(' ')[0].split(',')[0].lower()\n inside = tmp in au.lower()\n if inside:\n similar = sims.string_match(au, au_i) >= threshold\n if similar:\n matching_pubs[au_i].add(rec)\n # End if\n # End if\n # End for\n # End for\n\n for k, rec in matching_pubs.items():\n rec.name = len(rec)\n\n return matching_pubs\n# End find_pubs_by_authors()\n\n\ndef find_pubs_by_journal(records, journal_list):\n \"\"\"Get publications in specific journals\n\n Parameters\n ==========\n * records : Metaknowledge RecordCollection, representing corpora\n * journal_list : list, of journals (has to be exact match)\n\n Returns\n ==========\n dict[mk.RecordCollection]\n \"\"\"\n journal_pubs = {}\n rec_name = records.name\n if 'empty' in rec_name.lower():\n raise ValueError(\"Cannot create unique collection - give the RecordCollection a name first!\")\n for rec in records:\n j_name = rec.get('SO')\n u_name = rec_name + '_' + j_name\n if j_name in journal_list:\n journal_pubs[u_name] = journal_pubs.get(u_name, mk.RecordCollection(name=u_name))\n journal_pubs[u_name].add(rec)\n\n return journal_pubs\n# End find_pubs_by_journal()\n\n\ndef preview_matches(search_results, num=5, limit_abstract=None):\n \"\"\"Preview items in results.\n\n Parameters\n ==========\n * search_results : iterable, of RIS records\n * num : int, number of records to preview\n * limit_abstract : int, Number of characters to display in the abstract.\n Defaults to None.\n \"\"\"\n count = 0\n for rec in search_results:\n title = rec.title\n year = rec.get('PY', None)\n authors = rec.get('AU', None)\n if authors:\n authors = \"; \".join(authors)\n year = '- No Publication Year -' if not year else year\n\n tmp = rec.get('DE') + rec.get('ID')\n kwds = '; '.join([kw.strip() for kw in tmp if kw.strip() != ''])\n journal = rec.get('SO')\n\n ab = rec.get('AB', None)\n\n if limit_abstract:\n ab = ab[0:limit_abstract]\n\n print(\"{a}\\n {b} ({c})\\n {d}\\nKeywords: {e}\\n\\n{f}\\n\".format(a=title, b=authors, c=year,\n d=journal, e=kwds, f=ab))\n print('=========================================')\n count += 1\n if count > num:\n break\n # End for\n# End preview_matches()\n","repo_name":"ConnectedSystems/wosis","sub_path":"wosis/analysis/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":11607,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"73165036963","text":"from debugging_benchmark.refactory import Question1RefactoryBenchmarkRepository\nfrom debugging_framework.tools import InputsFromHellEvaluationFuzzer\n\n\ndef main():\n subjects = Question1RefactoryBenchmarkRepository().build()\n for subject in subjects:\n param = subject.to_dict()\n print(InputsFromHellEvaluationFuzzer(**param).run())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"martineberlein/debugging-benchmark","sub_path":"playground/fuzz_refactory.py","file_name":"fuzz_refactory.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1846783169","text":"import pandas as pd\r\nimport os\r\nimport datetime\r\nimport string\r\nimport requests\r\n\r\n\r\ndef main():\r\n\r\n #get dates\r\n todaysDate = str(datetime.date.today())\r\n todaysYear, todaysMonth, todaysDay = todaysDate.split('-')\r\n nextDay = int(todaysDay) + 1\r\n nextDay = str(nextDay)\r\n\r\n\r\n\r\n #url\r\n url = f\"https://maps.fayetteville-ar.gov/DispatchLogs/json/getIncidents.cshtml/{todaysYear}-{todaysMonth}-{todaysDay}/{todaysYear}-{todaysMonth}-{nextDay}\"\r\n\r\n #get data, store in dataframe\r\n #teest test\r\n r = requests.get(url)\r\n data = r.json()\r\n df = pd.json_normalize(data)\r\n\r\n #writing to csv file\r\n path = r\"C:\\Users\\sdman\\OneDrive\\Desktop\\Fpd_report_generator\\Fpd_data.csv\"\r\n df.to_csv(path, mode='a', index=False, header=None)\r\n \r\n\r\n\r\nmain()\r\n","repo_name":"even-man/fpd-data-scraper","sub_path":"data_scraper/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18874082428","text":"import codecs\nimport logging\nimport asyncio\nfrom enum import IntEnum, unique\nimport struct\nimport zlib\nfrom collections import defaultdict\nimport typing\n\nimport bleak\n\nimport syss_crc\n\nlogger = logging.getLogger(__name__)\n\n\ncrc = syss_crc.CRC()\ncrc.set_config_by_name(\"CRC-16/MODBUS\")\n\n\ndef write_png(buf, width, height):\n assert len(buf) == width*height\n\n def chunk(data):\n return ((len(data) - 4).to_bytes(4, \"big\") +\n data +\n zlib.crc32(data).to_bytes(4, \"big\"))\n\n data = b\"\".join(b\"\\x00\" + buf[row*width:(row + 1)*width]\n for row in range(height))\n\n return (\n b\"\\x89PNG\\r\\n\\x1a\\n\" +\n chunk(b\"IHDR\" + struct.pack(\"!2I5B\", width, height, 8, 0, 0, 0, 0)) +\n chunk(b\"IDAT\" + zlib.compress(data, 9)) +\n chunk(b\"IEND\"))\n\n\nclass DateTime(typing.NamedTuple):\n _fmt = struct.Struct(\">BBBBBB\")\n year: int\n month: int\n day: int\n hour: int\n minute: int\n second: int\n\n @classmethod\n def unpack(cls, buf):\n return cls._make(cls._fmt.unpack(buf))\n\n def pack(self):\n return self._fmt.pack(*self)\n\n\nclass FiberSettings(typing.NamedTuple):\n _fmt = struct.Struct(\">16B\")\n type: int # SM,MM,DS,NZ,BIF,CZ1,CZ2,AUTO\n z0: int\n arc_current: int # 10 mV (through a TIA?)\n arc_time: int # 100 ms\n pre_current: int # 10 mV\n pre_time: int # 100 ms\n clean_current: int # 10 mV\n clean_time: int # 100 ms\n overlap: int # 1 µm\n angle_limit: int # 0.1 deg\n face_quality: int # normal,standard,precise\n align_mode: int\n focus_target: int\n arc_cal_target: int\n arc_center: int # ?\n z1: int\n\n @classmethod\n def unpack(cls, buf):\n return cls._make(cls._fmt.unpack(buf))\n\n def pack(self):\n return self._fmt.pack(*self)\n\n\nclass FiberFunc(typing.NamedTuple):\n _fmt = struct.Struct(\">14B\")\n screen: int # X,Y,XY,YX\n angle_detection_dis: int\n face_detection_dis: int\n tensile_test_dis: int\n arc_pause_dis: int\n failure_image_dis: int\n auto_focus_dis: int\n auto_off_dis: int\n brightness: int # 10%\n language: int # CN,EN,PT,FR,RU,ES,PL,HI,AR,IT\n beep_mode: int\n z0: int\n z1: int\n z2: int\n\n @classmethod\n def unpack(cls, buf):\n print(len(buf))\n return cls._make(cls._fmt.unpack(buf))\n\n def pack(self):\n return self._fmt.pack(*self)\n\n\nclass HeatSettings(typing.NamedTuple):\n _fmt = struct.Struct(\">8B\")\n config: int # index\n time0: int # 60mm\n time1: int # 40mm\n time2: int # 34mm\n time3: int # 15mm\n time4: int # custom\n preheat_dis: int\n z0: int\n\n @classmethod\n def unpack(cls, buf):\n return cls._make(cls._fmt.unpack(buf))\n\n def pack(self):\n return self._fmt.pack(self)\n\n\nclass AdminSettings(typing.NamedTuple):\n _fmt = struct.Struct(\">12B\")\n # 070507070905040412000000\n et: typing.List[int]\n zero: typing.List[int]\n\n @classmethod\n def unpack(cls, buf):\n d = cls._fmt.unpack(buf)\n assert d[9:12] == (0,)*3\n return cls._make((d[:9], d[9:12]))\n\n def pack(self):\n return self._fmt.pack(*self.et, *self.zero)\n\n\nclass RecordMeta(typing.NamedTuple):\n datetime: DateTime\n failure: int\n loss: int\n angles: typing.List[int]\n face_quality: int\n coordinates: typing.List[int]\n settings: FiberSettings\n face_detection: int\n angle_detection: int\n autofocus: int\n admin: AdminSettings\n charge: int\n image_len: int\n image_handle: int\n\n @classmethod\n def unpack(cls, buf):\n dt = DateTime.unpack(buf[:6])\n fa, lo, *angles, fq = struct.unpack(\">6B\", buf[6:12])\n co = struct.unpack(\">12H\", buf[12:36])\n se = FiberSettings.unpack(buf[36:52])\n fd, ad, af = struct.unpack(\">3B\", buf[52:55])\n adm = AdminSettings.unpack(buf[55:67])\n ch, img_len, img_hdl = struct.unpack(\">BHB\", buf[67:])\n return cls._make((dt, fa, lo, angles, fq, co, se, fd, ad, af, adm,\n ch, img_len, img_hdl))\n\n def pack(self):\n return (\n self.datetime.pack() +\n struct.pack(\">6B12H\", self.failure, self.loss, *self.angles,\n self.face_quality, *self.coordinates) +\n self.settings.pack() +\n struct.pack(\">3B\", self.face_detection, self.angle_detection,\n self.autofocus) +\n self.admin.pack() +\n struct.pack(\">BHB\", self.charge, self.image_len,\n self.image_handle))\n\n\n@unique\nclass Op(IntEnum):\n UNKNOWN_4 = 0x00 # ? firmware\n SET_FIBER_SETTINGS = 0x10\n GET_FIBER_SETTINGS = 0x11\n SET_FIBER_FUNC = 0x12\n GET_FIBER_FUNC = 0x13\n SET_HEAT_SETTINGS = 0x14\n GET_HEAT_SETTINGS = 0x15\n SET_ADMIN_SETTINGS = 0x16\n GET_ADMIN_SETTINGS = 0x17\n SET_AIO = 0x19 # ? Aio1-4\n GET_AIO = 0x20 # ? Aio1-4\n SET_RECORD_READ = 0x21 # mark read\n GET_RECORD_IMG = 0x22\n GET_CURRENT_RECORD = 0x23\n GET_TOTAL_COUNT = 0x25\n UNKNOWN = 0x26 # ? 0x88\n GET_CURRENT_COUNT = 0x27 # HH: 0, number of splices\n UNKNOWN_1 = 0x32 # set 12+4 bytes\n UNKNOWN_2 = 0x33 # cmd\n UNKNOWN_3 = 0x34 # set var bytes\n GET_SERIAL = 0x35 # string\n GET_DATETIME = 0x39 # 6b YMDhms\n SET_FACTORY_MENU_CALL = 0x41 # cmd\n SET_MODE = 0x42 # normal, manual, arc cal?\n GET_MODE = 0x43\n UNKNOWN_6 = 0x44 # test firmware upgrade\n SET_CONNECTED = 0x45 # ? send connected\n GET_ASYNC = 0x48 # ? some async event response\n GET_RECORD_LAST = 0x49 # last record index\n GET_RECORD = 0x4a\n SET_RECORD_CLEAR = 0x4b # ?\n SET_OPM_VFL_POWERDOWN = 0xa0\n SET_OPM_UNITS = 0xa1\n GET_OPM = 0xa2\n SET_VFL_MODE = 0xa3\n SET_OPM_WAVELENGTH = 0xa4\n UNKNOWN_5 = 0xa6 # query OPM/VFL?\n MOVE_MOTOR = 0xe0\n SET_ARC = 0xe1\n SET_MOTOR_RESET = 0xe2\n SET_CLEAN = 0xe3\n SET_CONTINUE = 0xe9\n SET_FIRMWARE_DATA = 0xf0 # upgrade\n UNKNOWN_7 = 0xf1 # upgrade\n UNKNOWN_8 = 0xf2 # upgrade\n\n\nclass Invalid(Exception):\n pass\n\n\nclass Incomplete(Exception):\n pass\n\n\nclass Failure(Exception):\n pass\n\n\nMESSAGE = \"0000ffe1-0000-1000-8000-00805f9b34fb\"\n\n\nclass AI9:\n def __init__(self):\n self.loop = asyncio.get_event_loop()\n self._buf = b\"\"\n self._queue = asyncio.Queue()\n self._listeners = defaultdict(list)\n self._listeners[Op.GET_ASYNC].append(self._get_async_cb)\n\n async def connect(self, dev):\n self.dev = dev\n await self.dev.start_notify(MESSAGE, self._handle_msg)\n\n def _handle_msg(self, _handle, msg):\n logger.debug(\"recv %s\", codecs.encode(msg, \"hex\").decode(\"ascii\"))\n self._buf += msg\n while self._buf:\n try:\n op, body = self._unpack()\n except Invalid as e:\n logger.error(\"Invalid message %s\", e, exc_info=True)\n continue\n except Incomplete as e:\n logger.debug(\"Incomplete message %s\", e)\n return\n if op in self._listeners:\n for cb in self._listeners[op]:\n try:\n cb(op, body)\n except Exception:\n logger.error(\"Callback exception\", exc_info=True)\n else:\n self._queue.put_nowait((op, body))\n\n def _peek(self, n):\n if len(self._buf) < n:\n raise Incomplete(n)\n return self._buf[:n]\n\n def _pop(self, n):\n data = self._peek(n)\n self._buf = self._buf[n:]\n return data\n\n def _unpack(self):\n (start, op, length) = struct.unpack(\">HBH\", self._peek(5))\n if start != 0x7e7e:\n raise Invalid(self._pop(1)) # resynchronize\n msg = self._pop(5 + length + 3)\n head, body, tail = msg[:5], msg[5:-3], msg[-3:]\n crc_want = crc.compute(head + body)\n crc_have, stop = struct.unpack(\">HB\", tail)\n if (crc_have, stop) != (crc_want, 0xaa):\n raise Invalid(crc_have, crc_want, stop)\n try:\n op = Op(op)\n except ValueError:\n raise Invalid(op)\n logger.info(\"recv msg %s %s\", op,\n codecs.encode(body, \"hex\").decode(\"ascii\"))\n return op, body\n\n def _pack(self, op, body):\n msg = struct.pack(\">HBH\", 0x7e7e, op, len(body)) + body\n return msg + struct.pack(\">HB\", crc.compute(msg), 0xaa)\n\n async def _write(self, op, body):\n while not self._queue.empty():\n logger.error(\"Unhandled message %s\", self._queue.get_nowait())\n logger.info(\"send msg %s %s\", op,\n codecs.encode(body, \"hex\").decode(\"ascii\"))\n msg = self._pack(op, body)\n logger.debug(\"send %s\", codecs.encode(msg, \"hex\").decode(\"ascii\"))\n try:\n await self.dev.write_gatt_char(MESSAGE, msg)\n except AttributeError: # loopback\n self._handle_msg(None, self._pack(op, b\"\\x66\"))\n\n async def _read(self):\n return await self._queue.get()\n\n async def do(self, op, body):\n await self._write(op, body)\n result, ret = await self._read()\n if result != op:\n raise Failure(result, op)\n return ret\n\n async def get(self, op, body=b\"\\x55\"):\n return await self.do(op, body)\n\n async def set(self, op, body=b\"\\x55\", expect=b\"\\x66\"):\n ret = await self.do(op, body)\n if ret != expect:\n raise Failure(ret)\n\n def _get_async_cb(self, op, body):\n event = {\n 0x01: \"lid open\",\n 0x02: \"lid close\",\n 0x0f: \"right fiber misplaced\",\n 0x0d: \"left fiber misplaced\",\n 0x04: \"found/aligned\",\n 0x06: \"arc\",\n 0x07: \"splice success\",\n 0x08: \"splice failure\",\n 0x11: \"fiber already spliced\",\n 0x12: \"left face/angle unacceptable\",\n 0x14: \"both face/angle unacceptable\",\n 0x15: \"fiber not found\",\n 0x31: \"left fiber not found\",\n 0x32: \"right fiber not found\",\n 0x33: \"heater warmup\",\n 0x21: \"heat start\",\n 0x22: \"heat done\",\n }.get(body[0], \"unknown\")\n logger.info(\"event: %s (%#02x) loss=%s dB\",\n event, body[0], body[1]*.01)\n\n async def move(self, side, direction, steps, speed=9):\n motor, move = {\n \"left\": {\n \"down\": (2, 4),\n \"left\": (0, 2),\n \"right\": (0, 1),\n \"up\": (2, 3),\n },\n \"right\": {\n \"down\": (3, 4),\n \"left\": (1, 1),\n \"right\": (1, 2),\n \"up\": (3, 3),\n },\n \"focus\": {\n \"left\": (5, 2), # x\n \"right\": (5, 1), # x\n \"down\": (4, 2), # y?\n \"up\": (4, 1), # y?\n },\n }[side][direction]\n await self.set(Op.MOVE_MOTOR, bytes([motor, move, 0, steps, speed]))\n\n async def _read_img(self, handle):\n buf = b\"\"\n while True:\n op, dat = await self._read()\n if op != Op.GET_RECORD_IMG:\n raise Invalid(op)\n this, total, part = dat[:3]\n if handle != this:\n raise Invalid(handle, this)\n buf += dat[3:]\n if part >= total:\n break\n return buf\n\n def _decode_img(self, img):\n \"\"\"Binary run length encoding\"\"\"\n out = b\"\"\n for i in range(0, len(img), 2):\n di = int.from_bytes(img[i:i + 2], \"big\")\n out += bytes([0xff*(di >> 15)]) * (di & 0x7fff)\n if len(out) != 480*640:\n raise Invalid(len(out))\n return out\n\n async def read_record(self, index):\n await self._write(Op.GET_RECORD, index.to_bytes(2, \"big\"))\n op, meta = await self._read()\n if op != Op.GET_CURRENT_RECORD:\n raise Invalid(op)\n meta = RecordMeta.unpack(meta)\n img = None\n if meta.image_len:\n img = await self._read_img(meta.image_handle)\n assert len(img) == meta.image_len, (len(img), meta.image_len)\n img = self._decode_img(img)\n return meta, img\n\n\ndef main():\n from argparse import ArgumentParser\n\n logging.basicConfig(level=logging.INFO)\n loop = asyncio.get_event_loop()\n p = ArgumentParser()\n p.add_argument(\"address\", nargs=\"?\")\n args = p.parse_args()\n\n async def run():\n dev = AI9()\n\n if args.address is None:\n ble = await bleak.BleakScanner.find_device_by_filter(\n lambda dev, addr:\n dev.name and dev.name.startswith(\"AI-9:\")\n )\n elif args.address != \"\":\n ble = await bleak.BleakScanner.find_device_by_address(args.address)\n\n async with bleak.BleakClient(ble) as ble:\n await dev.connect(ble)\n if True:\n logger.info(\"%s\", FiberSettings.unpack(\n await dev.get(Op.GET_FIBER_SETTINGS))) # body=0x08 0x00?\n logger.info(\"%s\", FiberFunc.unpack(\n await dev.get(Op.GET_FIBER_FUNC))) # body=0xff?\n logger.info(\"%s\", HeatSettings.unpack(\n await dev.get(Op.GET_HEAT_SETTINGS)))\n logger.info(\"%s\", AdminSettings.unpack(\n await dev.get(Op.GET_ADMIN_SETTINGS)))\n logger.info(\"%s\", DateTime.unpack(\n await dev.get(Op.GET_DATETIME)))\n logger.info(\"%s\", (await dev.get(Op.GET_SERIAL)).decode())\n await dev.get(Op.GET_TOTAL_COUNT)\n await dev.get(Op.GET_CURRENT_COUNT)\n # await dev.set(Op.SET_CONNECTED)\n\n if True:\n await dev.set(Op.SET_MODE, b\"\\x01\") # factory mode, manual adjust\n await dev.get(Op.GET_MODE)\n for side in \"left right\".split():\n for move in \"down left right up\".split():\n await dev.move(side, move, steps=100, speed=9)\n # await dev.set(Op.SET_MOTOR_RESET, b\"\\x01\")\n # await dev.set(Op.SET_ARC, b\"\\x03\")\n # await dev.set(Op.SET_CLEAN)\n # await dev.set(Op.SET_CONTINUE)\n await dev.set(Op.SET_MODE, b\"\\x00\")\n await dev.get(Op.GET_MODE)\n\n if True:\n await dev.set(Op.SET_OPM_VFL_POWERDOWN, b\"\\xaa\") # enable\n await dev.set(Op.SET_VFL_MODE, b\"\\x00\")\n await dev.set(Op.SET_OPM_UNITS, b\"\\x00\")\n await dev.set(Op.SET_OPM_WAVELENGTH, b\"\\x04\") # 4:1550\n await dev.get(Op.GET_OPM)\n await dev.set(Op.SET_OPM_VFL_POWERDOWN) # disable\n\n if True:\n n = int.from_bytes(await dev.get(Op.GET_RECORD_LAST), \"big\")\n for i in range(n + 1):\n meta, img = await dev.read_record(i)\n logger.info(\"image meta %s\", meta)\n open(\"img_{}_meta.bin\".format(i), \"wb\").write(meta.pack())\n if img is not None:\n open(\"img_{}.png\".format(i), \"wb\").write(\n write_png(img, 640, 480))\n # await dev.set(Op.SET_RECORD_READ, (0).to_bytes(\"big\"))\n\n await asyncio.sleep(1000)\n\n loop.run_until_complete(run())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"quartiq/ai9","sub_path":"ai9.py","file_name":"ai9.py","file_ext":"py","file_size_in_byte":15475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11310404986","text":"#!/usr/bin/python\nimport sys\n\n\"\"\" \nThis file is used to be able to manipulate the \"number of clicks\"\ndata in a spreadsheet because just having the number of clicks\nfor every user was killing both LibreOffice and Excel as it was\nbasically hundreds of thousands of points to plot...\n\nHere instead we display the number of user for every possible \"number\nof clicks\" value. There still are a lot of value but this time it is\nacceptable. Moreover, this is what we are interested in in fact in the\nend anyway: the distribution of the \"number of clicks\" values in terms\nof number of users\n\n\"\"\"\n\n# Here we are converting from a mapping user_id -> number of clicks\n# To a mapping number of clicks -> number of users with this number of clicks\nf = open(sys.argv[1])\nres = [0] * 279431\nfor l in f:\n a, b = l.strip().split(',')\n res[int(b)] += 1\n\noutput_path = 'aggregated.txt' if len(sys.argv) < 3 else sys.argv[2].strip()\n\no = open(output_path, 'w')\no.writelines('\\n'.join([\"%s,%s\" % (i, res[i]) for i in xrange(0, len(res)) if res[i] is not 0]))","repo_name":"tdubourg/collaborative-personalized-pagerank-public","sub_path":"logs-processing/users_clicks_transform_for_spreadsheet_visualization.py","file_name":"users_clicks_transform_for_spreadsheet_visualization.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"33942125498","text":"from colors import *\nimport time\n\n# Arquivo para armazenar funções genéricas como diálogos e perguntas.\n\n\ndef EscreverTypeWriter(text: str, intervalInSeconds: int = 0.05):\n for x in text:\n print(x,end=\"\",flush=True)\n time.sleep(intervalInSeconds)\n print(f\"{ResetColor}\")\n \n\n# def PerguntaBooleana(pergunta: Pergunta em formato string)\n#Gera uma pergunta que o usuário só pode responder com sim ou não.\ndef PerguntaBooleana(pergunta: str) -> True or False:\n respostaPergunta = input(pergunta + \" s/n \\n\")\n\n if respostaPergunta.lower() in [\"sim\", \"s\"]:\n return True\n elif respostaPergunta.lower() in [\"não\", \"nao\", \"n\"]:\n return False\n else:\n #print(f\"{main.BrightRed}Você só pode responder essa pergunta com sim ou não!\")\n PerguntaBooleana(pergunta)\n\n\n#def ChecarInventario(categoria: Categoria em formato string(opcional))\n'''\ndef ChecarInventario(categoria: str = None):\n\n if categoria == None:\n \n\n print(f\"Carregando inventário do Fracassado...\")\n time.sleep(0.5)\n\n print(f\"{main.BrightWhite}---------Dados Genéricos do Fracassado-----------------{main.Reset}\")\n time.sleep(0.1)\n print(f\"NOME DO FRACASSADO: {main.PerfilDoFracassado['Nome']}\")\n time.sleep(0.1)\n print(f\"IDADE DO FRACASSADO: {main.PerfilDoFracassado['Idade']}\")\n time.sleep(0.1)\n print(f\"IDADE DO FRACASSADO: {main.PerfilDoFracassado['Idade']}\")\n else:\n print(f\"Carrendo informação do Fracassado...\")\n time.sleep(0.3)\n if main.PerfilDoFracassado[categoria]:\n print(f\"{categoria} do FRACASSADO: {main.PerfilDoFracassado[categoria]}\")\n else:\n print(\"O FRACASSADO NÃO SABE NEM QUAIS SÃO SUAS CATEGORIAS DO INVENTARIO!\")\n\n'''\n","repo_name":"VictorLemosDEV/RPGTexto","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31550177561","text":"import pickle\nimport requests\nimport re\nimport threading\nimport time\nimport telepot\nimport logging\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom bs4 import BeautifulSoup\n\n\n# CGV\n# GET방식으로 통신\n# 파라미터\n# arecode : 지역번호 서울 | 경기 | 대전 | 인천 | 대구 | 강원 [01 | 02 | 03, 205 | 202 | 11 | 12]\n# theatercode : 상영관 번호 용산 | 왕십리 | 대학로 | 강남 [0013 | 0074 | 0063 | 0056]\n# date : 날짜 YYYYMMDD\ndef get_cgv_movie_list(date, therater, shall):\n theater_dic = {'강남': '0056', '강변': '0001', '건대입구': '0229', '구로': '0010', '대학로': '0063',\n '동대문': '0252', '등촌': '0230', '명동': '0009', '명동역 씨네라이브러리': '0105',\n '목동': '0011', '미아': '0057', '불광': '0030', '상봉': '0046', '성신여대입구': '0300',\n '송파': '0088', '수유': '0276', '신촌아트레온': '0150', '압구정': '0040', '여의도': '0112',\n '영등포': '0059', '왕십리': '0074', '용산아이파크몰': '0013', '중계': '0131', '천호': '0199',\n '청담씨네시티': '0107', '피카다리1958': '0223', '하계': '0164', '홍대': '0191',\n 'CINE DE CHEF 압구정': 'P001', 'CINE DE CHEF 용산아이파크몰': 'P013'}\n \n shall_dic = {'IMAX':'07', 'CINE de CHEF':'103', 'GOLD CLASS':'99', '씨네앤포레':'0001',\n '씨네앤리빙룸':'LM', 'SphereX':'SPX', 'STARIUM':'110', 'PREMIUM':'PRM',\n 'Sweet Box':'09', 'SKYBOX':'SKY', 'CINE KIDS':'CK', 'SOUNDX':'SDX',\n '4DX':'4D14', 'SCREENX':'SCX', '4DX SCREEN':'4DXSC', }\n URL = \"http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?areacode=01&theatercode=\"\n try:\n URL += theater_dic[therater] + \"&date=\" + date\n except KeyError as e: # therater에 therater_dic에 없는 키값일 경우 키오류 발생\n print(\"Error Message : {}\".format(e))\n URL += \"0013&date=\" + date\n response = requests.get(URL)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n\n chkmonth = soup.select_one('#slider > div:nth-child(1) > ul > li.on > div > a > span').get_text().strip()[0:2]\n chkday = soup.select_one('#slider > div:nth-child(1) > ul > li.on > div > a > strong').get_text().strip()\n chkdate = chkmonth + chkday\n if chkdate != date[4:8]:\n return []\n\n search_special_hall = shall # IMAX, 4DX, CINE de CHEF, SCREENX, Laser\n\n movie_split_list = []\n line = []\n movie_select_list = soup.select('body > div > div.sect-showtimes > ul > li')\n for msl in movie_select_list:\n title = msl.select_one('div > div.info-movie > a > strong').get_text().strip()\n\n table_select_list = msl.select('div > div.type-hall')\n for tsl in table_select_list:\n special_hall = tsl.select_one('div > ul > li:nth-child(2)').get_text().strip()\n movie_tot_seat = tsl.select_one('div.info-hall > ul > li:nth-child(3)').get_text().strip()\n m = re.search(r\"(총)?(\\s)*(?P\\d*)(석)?\", movie_tot_seat)\n if m:\n movie_tot_seat = m['totcnt']\n if search_special_hall in special_hall:\n line.clear()\n line.append(title)\n timetable = tsl.select('div.info-timetable > ul > li')\n for tbl in timetable:\n movie_link = tbl.find('a', href=True)\n if movie_link:\n movie_link = 'http://www.cgv.co.kr' + str(movie_link['href'])\n else:\n movie_link = \"-1\"\n movie_start_time = tbl.select_one('em').get_text().strip()\n movie_rest_seat = tbl.select_one('span').get_text().strip()\n line.append(movie_start_time)\n m = re.search(r\"(잔여좌석)?(?P((\\d)*(마감)?))(석)?\", movie_rest_seat)\n if m:\n movie_rest_seat = m['seatcnt']\n if movie_rest_seat == \"매진\" or movie_rest_seat == \"\":\n movie_rest_seat = \"매진\"\n line.append(movie_rest_seat)\n elif movie_rest_seat == \"마감\":\n line.append(movie_rest_seat)\n elif movie_rest_seat == \"예매준비중\" or movie_rest_seat == \"준비중\":\n movie_rest_seat = \"에매준비중\"\n line.append(movie_rest_seat)\n else:\n movie_rest_seat = \"(\" + movie_rest_seat + \"/\" + movie_tot_seat + \")\"\n line.append(movie_rest_seat)\n line.append(movie_link)\n movie_split_list.append(line)\n else:\n continue\n return movie_split_list\n\n\ndef set_cgv_therater_reg(therater):\n therater = therater.replace(' ', '')\n\n if therater[-1] == \"점\" or therater[-1] == \"관\" or therater[-1] == \"역\":\n therater = therater[:-1]\n\n therater = re.sub(r\"(강남|신논현|논현|역삼|논현동|역삼동|신논현동)\", \"강남\", therater)\n therater = re.sub(r\"(강변|터미널|동서울터미널|동서울|버스터미널|동서울버스터미널|시외버스터미널|동서울시외버스터미널)\", \"강변\", therater)\n therater = re.sub(r\"(건대입구|건대|건국대|건국대입구|건국대학교|건국대학교입구)\", \"건대입구\", therater)\n therater = re.sub(r\"(대학로|혜화|성균관|성균관대|성대|헤화동)\", \"대학로\", therater)\n therater = re.sub(r\"(등촌|가양|증미|등촌동|가양동|증미동)\", \"등촌\", therater)\n therater = re.sub(r\"(명동역씨네라이브러리|명동씨네라이브러리|씨네라이브러리|씨네라이브)\", \"명동역씨네라이브러리\", therater)\n therater = re.sub(r\"(목동|오목교|양평)\", \"목동\", therater)\n therater = re.sub(r\"(미아|미아사거리|미아동)\", \"미아\", therater)\n therater = re.sub(r\"(상봉|망우|상봉터미널|망우터미널|상봉시외버스터미널|상봉버스터미널|망우동|상봉동)\", \"상봉\", therater)\n therater = re.sub(r\"(성신여대입구|성신여대|성신여자대학교|성신여자대학교입구|성신여자대|성신여자대입구)\", \"성신여대입구\", therater)\n therater = re.sub(r\"(송파|장지|문정|북정|송파파크하비오|장지동|문정동|북정동)\", \"송파\", therater)\n therater = re.sub(r\"(수유|쌍문|수유동|쌍문동)\", \"수유\", therater)\n therater = re.sub(r\"(신촌아트레온|신촌|연대|연세대|연세대학교|신촌동)\", \"신촌아트레온\", therater)\n therater = re.sub(r\"(여의도|여의나루)\", \"여의도\", therater)\n therater = re.sub(r\"(영등포|타임스퀘어|영등포타임스퀘어)\", \"영등포\", therater)\n therater = re.sub(r\"(용산아이파크몰|용산|신용산|아이파크몰|아이파크몰용산)\", \"용산아이파크몰\", therater)\n therater = re.sub(r\"(청담씨네시티|씨네시티|청담|씨네시티청담|청담씨네씨티|씨네씨티|씨네씨티청담)\", \"청담씨네시티\", therater)\n therater = re.sub(r\"(피카디리1958|피카디리|종로피카디리|종로피카디리1958|종로3가|종로|종로3가피카디리|종로3가피카디리1958|피카다리\"\n r\"|피카다리1958|종로피카다리|종로3가피카다리|종로피카다리1958|종로3가피카다리1958)\", \"피카다리1958\", therater)\n therater = re.sub(r\"(홍대|홍대입구|홍익대학교)\", \"홍대\", therater)\n therater = re.sub(r\"(CINEDECHEF압구정|씨네드셰프압구정)\", \"CINEDECHEF압구정\", therater, flags=re.IGNORECASE)\n therater = re.sub(r\"(CINEDECHEF용산아이파크몰|CINEDECHEF용산|CINEDECHEF아이파크몰용산|CINEDECHEF신용산|CINEDECHEF아이파크몰\"\n r\"|씨네드셰프용산아이파크몰|씨네드셰프용산|씨네드셰프아이파크몰용산|씨네드셰프신용산|씨네드셰프아이파크몰)\",\n \"CINEDECHEF용산아이파크몰\", therater, flags=re.IGNORECASE)\n\n return therater\n\n\ndef cgv_crawling(date, therater, shall):\n therater = set_cgv_therater_reg(therater)\n filename = 'cgv' + therater + shall + '.pickle'\n\n try:\n with open(filename, 'rb') as f:\n sdate = pickle.load(f)\n\n except (EOFError, FileNotFoundError):\n sdate = date\n\n logger.info('CGV 검색 시작 날짜 : {}'.format(sdate))\n while True:\n movie_list = get_cgv_movie_list(sdate, therater, shall)\n try:\n if not movie_list:\n logger.info(\"CGV {} {} Not Found ({})\".format(therater, shall, sdate))\n raise ValueError\n logger.info(\"CGV {} {} Found ({})\".format(therater, shall, sdate))\n sendmsg = \"*CGV \" + therater + \" \" + shall + \"*\\n\"\n week = t[datetime.strptime(sdate, \"%Y%m%d\").weekday()]\n sendmsg += sdate + \" (\" + week + \") 예매 오픈\\n\"\n for i in range(len(movie_list)):\n sendmsg = sendmsg + \"*\" + movie_list[i][0] + \"*\\n\"\n for j in range(1, len(movie_list[i]), 3):\n if movie_list[i][j + 2] == \"-1\":\n if movie_list[i][j + 1] == \"예매준비중\":\n logger.info(\"CGV {} {} 예매준비중 ({})\".format(therater, shall, sdate))\n raise ValueError\n sendmsg = sendmsg + movie_list[i][j] + \" \"\n sendmsg = sendmsg + movie_list[i][j + 1] + \"\\n\"\n else:\n sendmsg = sendmsg + \"[\" + movie_list[i][j] + \"](\" + movie_list[i][j + 2] + \") \"\n sendmsg = sendmsg + movie_list[i][j + 1] + \"\\n\"\n sendmsg += \"\\n\"\n try:\n bot.sendMessage(mc, sendmsg, parse_mode=\"Markdown\", disable_web_page_preview=True)\n except Exception as e:\n logger.debug(\"CGV {} {} ({})Message Exception : {}\".format(therater, shall, sdate, e))\n raise ValueError\n \n # 결과를 찾았으니 다음날로 넘어간다\n sdate = datetime.strptime(sdate, \"%Y%m%d\")\n sdate += timedelta(days=1)\n sdate = sdate.strftime(\"%Y%m%d\")\n with open(filename, 'wb') as f:\n pickle.dump(sdate, f)\n except ValueError:\n time.sleep(30)\n\n \nif __name__ == \"__main__\":\n t = ['월', '화', '수', '목', '금', '토', '일']\n latest_date = \"20200901\" #datetime.today().strftime(\"%Y%m%d\") # 프로그램을 실행시킨 시간부터 탐색\n # 텔레그램 봇 연결 파트\n mytoken = \"\"\n mc = \"\"\n bot = telepot.Bot(mytoken)\n\n logger = logging.getLogger(__name__)\n formatter = logging.Formatter(fmt='[%(asctime)s][%(levelname)s|%(lineno)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n\n sh = logging.StreamHandler()\n fh = logging.FileHandler('./log.log')\n\n sh.setFormatter(formatter)\n fh.setFormatter(formatter)\n\n logger.addHandler(sh)\n logger.addHandler(fh)\n logger.setLevel(level=logging.INFO)\n\n logger.info('서버가 정상적으로 시작되었습니다.')\n logger.info('검색 디폴트 날짜 : {}'.format(latest_date))\n\n # 영화 리스트 불러오기\n # CGV : threading.Thread(target=cgv_crawling, args=(검색디폴트날짜, 지점, 상영관,))\n cgv = threading.Thread(target=cgv_crawling, args=(latest_date, '천호', 'IMAX',))\n\n cgv.start()\n\n cgv.join()\n\n logger.info(\"Server Exit\")\n","repo_name":"jaminkoodev/moive_notifier","sub_path":"cgv.py","file_name":"cgv.py","file_ext":"py","file_size_in_byte":11750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"33699323408","text":"\nfrom os import listdir\nfrom os.path import join\nfrom scipy.io import loadmat\nimport numpy as np\nimport torch\n\n\nclass speech_data(Dataset):\n\n def __init__(self, folder_path):\n\n self.path = folder_path\n self.files = listdir(self.path)\n self.length = len(self.files)\n\n def __getitem__(self, index):\n\n a = torch.LongTensor(1).random_(0, len(self.files))\n\n folder = join(self.path, self.files[int(a)])\n rand_index = torch.LongTensor(1).random_(0, len(listdir(folder)))\n d = loadmat(join(folder, listdir(folder)[rand_index]))\n\n # print(\"Selected - \",self.files[int(a)])\n if ((self.files[int(a)])[0:2] == \"UA\"):\n e = 1 # for trusted data\n else:\n e = 0 # for weakly-labeled data\n\n return np.array(d['mcc']), np.array(d['label']), np.array(e)\n\n def __len__(self):\n return self.length\n","repo_name":"Mihir3009/Weak-Speech-Supervision","sub_path":"building_model/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26681707960","text":"import numpy as np\nimport pandas as pd\n\ndata = np.fromfile('data.json', dtype=np.uint8)\ndf = pd.DataFrame(data)\n\n# check for packet separator (2 successive bytes with 0x80 set)\npacket_sep = (df.ix[:, 0] & 0x80) == 0x80\npacket_sep = packet_sep & packet_sep.shift(1)\npacket_sep = packet_sep.shift(-1).fillna(0).astype(int)\n\n# increase packet id every time a new packet separator is reported\npacket = np.cumsum(packet_sep)\ndf = pd.concat([df, packet], axis=1)\ndf.columns = ['data', 'packet']\n\n# group bytes by packet id and count the size of each packet\npacket_sizes = df.groupby('packet').count().reset_index()\npacket_sizes.groupby('data').count()\n\n# increase cycle id every time 0x60 shows up and count cycle size in bytes\ncycle = np.cumsum(((df.packet.diff()) == 1).astype(int))\ncycle.name = 'cycle'\ndf = pd.concat([df, cycle], axis=1)\ncycle_sizes = df.groupby('cycle').count().reset_index()\ncycle_sizes.groupby('data').count()\n","repo_name":"HiveTracker/hivetrackerjs","sub_path":"debug/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"43589958441","text":"\r\n###############\r\n# Authored by Weisheng Jiang\r\n# Book 4 | From Basic Arithmetic to Machine Learning\r\n# Published and copyrighted by Tsinghua University Press\r\n# Beijing, China, 2022\r\n###############\r\n\r\n# Bk4_Ch13_01.py\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nA = np.array([[1.25, -0.75],\r\n [-0.75, 1.25]])\r\n\r\nxx1, xx2 = np.meshgrid(np.linspace(-8, 8, 9), np.linspace(-8, 8, 9))\r\nnum_vecs = np.prod(xx1.shape);\r\n\r\nthetas = np.linspace(0, 2*np.pi, num_vecs)\r\n\r\nthetas = np.reshape(thetas, (-1, 9))\r\nthetas = np.flipud(thetas);\r\n\r\nuu = np.cos(thetas);\r\nvv = np.sin(thetas);\r\n\r\nfig, ax = plt.subplots()\r\n\r\nax.quiver(xx1,xx2,uu,vv,\r\n angles='xy', scale_units='xy',scale=1, \r\n edgecolor='none', facecolor= 'b')\r\n\r\nplt.ylabel('$x_2$')\r\nplt.xlabel('$x_1$')\r\nplt.axis('scaled')\r\nax.set_xlim([-10, 10])\r\nax.set_ylim([-10, 10])\r\nax.grid(linestyle='--', linewidth=0.25, color=[0.5,0.5,0.5])\r\nax.set_xticks(np.linspace(-10,10,11));\r\nax.set_yticks(np.linspace(-10,10,11));\r\nplt.show()\r\n\r\n# Matrix multiplication\r\nV = np.array([uu.flatten(),vv.flatten()]).T;\r\nW = V@A;\r\n\r\nuu_new = np.reshape(W[:,0],(-1, 9));\r\nvv_new = np.reshape(W[:,1],(-1, 9));\r\n\r\nfig, ax = plt.subplots()\r\n\r\nax.quiver(xx1,xx2,uu,vv,\r\n angles='xy', scale_units='xy',scale=1, \r\n edgecolor='none', facecolor= 'b')\r\n\r\nax.quiver(xx1,xx2,uu_new,vv_new,\r\n angles='xy', scale_units='xy',scale=1, \r\n edgecolor='none', facecolor= 'r')\r\n\r\nplt.ylabel('$x_2$')\r\nplt.xlabel('$x_1$')\r\nplt.axis('scaled')\r\nax.set_xlim([-10, 10])\r\nax.set_ylim([-10, 10])\r\nax.grid(linestyle='--', linewidth=0.25, color=[0.5,0.5,0.5])\r\nax.set_xticks(np.linspace(-10,10,11));\r\nax.set_yticks(np.linspace(-10,10,11));\r\nplt.show()\r\n\r\n\r\nfig, ax = plt.subplots()\r\nax.quiver(xx1*0,xx2*0,uu,vv,\r\n angles='xy', scale_units='xy',scale=1, \r\n edgecolor='none', facecolor= 'b')\r\n\r\nax.quiver(xx1*0,xx2*0,uu_new,vv_new,\r\n angles='xy', scale_units='xy',scale=1,\r\n edgecolor='none', facecolor= 'r')\r\n\r\nplt.ylabel('$x_2$')\r\nplt.xlabel('$x_1$')\r\nplt.axis('scaled')\r\nax.set_xlim([-2, 2])\r\nax.set_ylim([-2, 2])\r\nax.grid(linestyle='--', linewidth=0.25, color=[0.5,0.5,0.5])\r\nax.set_xticks(np.linspace(-2,2,5));\r\nax.set_yticks(np.linspace(-2,2,5));\r\nplt.show()\r\n","repo_name":"Visualize-ML/Book4_Power-of-Matrix","sub_path":"Book4_Ch13_Python_Codes/Bk4_Ch13_01.py","file_name":"Bk4_Ch13_01.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":6535,"dataset":"github-code","pt":"54"} +{"seq_id":"13629120486","text":"from django.shortcuts import render, redirect, reverse, get_object_or_404, HttpResponse\nfrom django.views.decorators.http import require_POST\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom .forms import OrderForm\nfrom products.models import Product\nfrom profiles.models import UserProfile\nimport stripe\nimport json\n\n\n@require_POST\ndef cache_checkout_data(request):\n try:\n pid = request.POST.get('client_secret').split('_secret')[0]\n stripe.api_key = settings.STRIPE_SECRET_KEY\n stripe.PaymentIntent.modify(pid, metadata={\n 'username': request.user,\n })\n return HttpResponse(status=200)\n except Exception as e:\n messages.error(request, 'Sorry, your payment cannot be \\\n processed right now. Please try again later.')\n return HttpResponse(content=e, status=400)\n\ndef checkout(request, product_id):\n stripe_public_key = settings.STRIPE_PUBLIC_KEY\n stripe_secret_key = settings.STRIPE_SECRET_KEY\n\n if request.user.is_authenticated:\n if request.method == 'POST':\n product = get_object_or_404(Product, pk=product_id)\n\n\n form_data = {\n 'first_name': request.POST['first_name'],\n 'last_name': request.POST['last_name'],\n 'email': request.POST['email'],\n 'country': request.POST['country'],\n }\n \n order_form = OrderForm(form_data)\n \n if order_form.is_valid():\n order = order_form.save(commit=False)\n order.product = product\n order.total = order.product.price\n order.save()\n\n profile = UserProfile.objects.get(user=request.user)\n order.user_profile = profile\n order.save()\n\n messages.success(\n request, 'Your new purchase has been made successfully.')\n\n return redirect('/profile')\n \n else:\n \n product = get_object_or_404(Product, pk=product_id)\n order_form = OrderForm()\n total = product.price\n stripe_total = round(total * 100)\n stripe.api_key = stripe_secret_key\n intent = stripe.PaymentIntent.create(\n amount=stripe_total,\n currency=settings.STRIPE_CURRENCY,\n )\n\n if not stripe_public_key:\n messages.warning(request, 'Stripe public key is missing. \\\n Did you forget to set it in your environment?')\n\n template = 'checkout/checkout.html'\n context = {\n 'order_form': order_form,\n 'stripe_public_key': stripe_public_key,\n 'client_secret': intent.client_secret,\n 'product': product,\n }\n\n return render(request, template, context)\n \n messages.info(request, 'Please log in before purchasing')\n\n return redirect(reverse('account_login'))\n\n","repo_name":"DonnaIB/superior_spanish","sub_path":"checkout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31694571254","text":"from flask import Flask, render_template, request, jsonify\nfrom elasticsearch import Elasticsearch\nfrom flask_cors import CORS\nfrom funk_svd.dataset import fetch_ml_ratings\nfrom funk_svd import SVD\nfrom sklearn.metrics import mean_absolute_error\n\nimport pandas as pd\n\napp = Flask(__name__)\nCORS(app)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\n\n@app.route('/')\ndef index():\n\treturn {}\n\n@app.route('/api/search', methods=['POST','GET'])\ndef search():\n\tbody = request.get_json()\n\tquery = body[\"query\"]\n\tes = Elasticsearch([\"http://localhost:9200/\"], timeout=5000)\n\tresult = es.search(index=\"listing_dataset\",\n\t\t\t\t\t doc_type=\"document\",\n\t\t\t\t\t body={\n\t\t\t\t\t\t \"query\": {\n\t\t\t\t\t\t\t\t\"match\": {\n\t\t\t\t\t\t\t\t\t\"text_content\": query\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t },\n\t\t\t\t\t\t \n\t\t\t\t\t })\n\thits = result[\"hits\"][\"hits\"]\n\tres = {\"Search\": []}\n\tfor i in range(len(hits)):\n\t\tcurr = {}\n\t\tdoc_id = hits[i][\"_source\"]['docno']\n\t\tcontent = hits[i][\"_source\"][\"text_content\"].strip()\n\t\tneig = hits[i][\"_source\"]['neighborhood'].strip()\n\t\tlat = hits[i][\"_source\"]['latitude']\n\t\tlongt = hits[i][\"_source\"]['longitude']\n\t\tpic = hits[i][\"_source\"]['pic_url']\n\t\tname = hits[i][\"_source\"]['name']\n\t\tcurr[\"docno\"] = doc_id\n\t\tcurr[\"name\"] = name\n\t\tcurr[\"description\"] = content\n\t\tcurr[\"neighborhood\"] = neig\n\t\tcurr[\"latitude\"] = lat\n\t\tcurr[\"longitude\"] = longt\n\t\tcurr[\"pic_url\"] = pic\n\t\tres[\"Search\"].append(curr)\n\treturn res\n\n\n@app.route('/api/maps', methods=['POST'])\ndef plot_maps():\n\tbody = request.get_json()\n\tneig = body['neighborhood']\n\tprint(neig)\n\tdf_s = pd.read_csv('./data/neighborhood_data.csv')\n\tcols = ['income_bin', 'white_bin', 'crime_bin']\n\tres = {}\n\tfor col in cols:\n\t\tres[col] = str(df_s[df_s.community == neig][col].iloc[0])\n\tprint(res)\n\treturn res\n\n\n@app.route('/api/recommend', methods=['POST','GET'])\ndef recommend():\n\t# body = request.get_json()\n\n\tdf_rec = pd.read_csv('./data/rating.csv')\n\t#train\n\ttrain = df_rec.sample(frac=0.8, random_state=7)\n\tval = df_rec.drop(train.index.tolist()).sample(frac=0.5, random_state=8)\n\ttest = df_rec.drop(train.index.tolist()).drop(val.index.tolist()) \n\tsvd = SVD(lr=0.001, reg=0.005, n_epochs=100, n_factors=15,\n\t\t early_stopping=True, shuffle=False, min_rating=-1, max_rating=1)\n\tsvd.fit(X=train, X_val=val)\n\tpred = svd.predict(test)\n\tmae = mean_absolute_error(test['rating'], pred)\n\tprint(f'Test MAE: {mae:.2f}')\n\n\tu_id = df_rec.sample().u_id.iloc[0]\n\ti_ids = list(df_rec.i_id.unique())\n\tfm = {'u_id': [u_id for i in range(len(i_ids))], 'i_id': i_ids}\n\tpred = pd.DataFrame.from_dict(fm)\n\tres = svd.predict(pred)\n\tpred['pred_rating'] = res\n\tpred.sort_values(by=['pred_rating'], ascending=False, inplace=True)\n\tN = 6\n\tdf_1 = pred.head(N)\n\n\tdf_2 = pd.read_csv('./data/listings_wrangled.csv')\n\n\tdf = pd.merge(df_1, df_2, on='i_id')\n\n\tres = {\"Recommend\": []}\n\tfor index, row in df.iterrows():\n\t\tcurr = {}\n\t\tcurr[\"docno\"] = row['i_id']\n\t\tcurr[\"name\"] = row['name']\n\t\tcurr[\"description\"] = row['description']\n\t\tcurr[\"neighborhood\"] = row['neighbourhood_cleansed']\n\t\tcurr[\"latitude\"] = row['latitude']\n\t\tcurr[\"longitude\"] = row['longitude']\n\t\tcurr[\"pic_url\"] = row['picture_url']\n\t\tres[\"Recommend\"].append(curr)\n\t\n\treturn res\n\n","repo_name":"yongfeilu/CS6200-FinalProject","sub_path":"Application/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10265380332","text":"import os\nimport sys\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score,confusion_matrix,f1_score\nfrom src.exception import CustomException\nfrom src.logger import logging\n\ndef save_object(file_path,obj):\n try:\n dir_path=os.path.dirname(file_path)\n os.makedirs(dir_path,exist_ok=True)\n\n with open(file_path,'wb') as file_obj:\n pickle.dump(obj,file_obj)\n\n\n except Exception as e:\n raise CustomException(e,sys)\n \n\ndef evaluate_model(X_train,y_train,X_test,y_test,models):\n try:\n accuracy_report={}\n \n for i in range(len(models)):\n model=list(models.values())[i]\n ## train model\n model.fit(X_train,y_train)\n\n #predict the test data\n y_test_pred=model.predict(X_test)\n\n ## get the evaluation scores for the model\n test_model_score=accuracy_score(y_test,y_test_pred)\n accuracy_report[list(models.keys())[i]]=test_model_score\n\n return accuracy_report\n\n\n except Exception as e:\n raise CustomException(e,sys)","repo_name":"Adroit10/AdultCensus_IncomePrediction","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7306208237","text":"import pandas as pd\nimport numpy as np\nimport argparse\nfrom rdkit import Chem\nfrom rdkit.Chem.QED import qed\nfrom sklearn.preprocessing import MinMaxScaler\nfrom SA_Score import sascorer\n\ndef get_sa(smi):\n mol = Chem.MolFromSmiles(smi)\n return sascorer.calculateScore(mol)\n\ndef get_qed(smi):\n mol = Chem.MolFromSmiles(smi)\n return qed(mol)\n\ndef get_counts(smi):\n mol = Chem.MolFromSmiles(smi)\n return len(mol.GetAtoms())\n\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', help='input csv with fastroc and sim columns already computed', type=str)\n parser.add_argument('-o', help='output csv location', type=str)\n parser.add_argument('n', help='how many to sample', type=int)\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = get_args()\n df = pd.read_csv(args.i)\n print(\"Loaded csv with\", df.shape[0], \"rows\")\n\n assert(('fastroc' in df.columns.tolist()) and ('sim' in df.columns.tolist()) and ('smiles' in df.columns.tolist()))\n\n df = df[df.sim <= 0.65]\n df['molsize'] = df.smiles.apply(get_counts)\n df['qed'] = df.smiles.apply(get_counts)\n df['sa'] = df.smiles.apply(get_sa)\n\n df = df.dropna()\n print(\"Loaded csv with\", df.shape[0], \"rows\")\n\n mm = MinMaxScaler()\n df.iloc[:, 1:] = mm.fit_transform(df.iloc[:, 1:])\n\n w1 = -1.0 # sim\n w2 = 5.0 # fast roc\n w3 = 2.0 #size\n w4 = 1.0 # qed\n w5 = 1.0 # sa\n\n reward_func = lambda x : w1 * x['sim'] + w2 * x['fastroc'] + w3 * x['molsize'] + w4 * x['qed'] + w5 * x['sa']\n df['reward'] = df.apply(reward_func)\n\n df.iloc[:, 1:-1] = mm.inverse_transform(df.iloc[:, 1:-1])\n\n output = df.sort_values('reward', ascending=False).iloc[:args.n]\n\n output.to_csv(args.o + \".csv\", index=False)\n output.smiles.to_csv(args.o + \".txt\", index=False, header=False)","repo_name":"aclyde11/RNNGenerator","sub_path":"runreward.py","file_name":"runreward.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31367539876","text":"import csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8');\n\n\nimport os\nimport fnmatch\nfrom ppg import BASE_DIR\nfrom ppg.params import PPG_SAMPLE_RATE\nfrom ppg.utils import exist, load_text, dump_json\nfrom ppg.signal import smooth_ppg_signal\nfrom ppg.feature import extract_ppg45\n\nimport math\nfrom ppg.learn import logistic_regression_classifier\nfrom ppg.learn import support_vector_classifier\nfrom ppg.learn import decision_tree_classifier\nfrom ppg.learn import random_forest_classifier, adaboost_classifier\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn import metrics\n\n\ndef convert():\n splited_data_dir = os.path.join(BASE_DIR, 'data', 'splited')\n output_data = {}\n with open('ppg.csv', 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n participant = row[0]\n signal = row[2:-1]\n signal_value = [float(s) for s in signal]\n if participant not in output_data.keys():\n output_data[participant] = []\n output_data[participant].append(signal_value)\n \n feature_data = {}\n for label,signal_list in output_data.items():\n feature_signals = []\n for signal in signal_list:\n smooth_signal = smooth_ppg_signal(signal)\n sig = extract_ppg45(smooth_signal)\n if len(sig) != 0:\n feature_signals.append(sig)\n if len(feature_signals) != 0:\n feature_data[label] = feature_signals\n output_filename = \"feature_data.json\"\n dump_json(data=feature_data, pathname=os.path.join(splited_data_dir, output_filename), overwrite=True)\n return feature_data\n\ndef Normalization(feature_data):\n signal_lens = []\n final_output = {}\n global_signal_list = []\n for value in feature_data.values():\n signals = len(value)\n signal_lens.append(signals)\n global_signal_list.extend(value)\n x_scaler = MinMaxScaler()\n scaled_signals = x_scaler.fit_transform(global_signal_list)\n count = 0 \n labels = feature_data.keys()\n for index,lens in enumerate(signal_lens):\n signal_list = []\n for i in range(lens):\n signal_list.append(scaled_signals[count])\n count = count + 1\n final_output[labels[index]] = signal_list\n return final_output\n \n\ndef train_test_split(feature_data,train_ratio=0.8):\n train_label = []\n train_feature = []\n test_label = []\n test_feature = []\n \n for label,signal_list in feature_data.items():\n index = math.ceil(len(signal_list)*train_ratio)\n index = int(index)\n for i in range(len(signal_list)):\n if len(signal_list) == 1:\n train_label.append(label)\n train_feature.append(signal_list[i])\n test_feature.append(signal_list[i])\n test_label.append(label)\n else:\n if i < index:\n train_label.append(label)\n train_feature.append(signal_list[i])\n else:\n test_label.append(label)\n test_feature.append(signal_list[i])\n return (train_feature,train_label,test_feature,test_label)\n\n\ndef classify(train_feature,train_label,test_feature,test_label):\n classifiers = [\n ('logistic_regression', logistic_regression_classifier, ),\n ('support_vector', support_vector_classifier, ),\n ('decision_tree', decision_tree_classifier, ),\n ('random_forest', random_forest_classifier, ),\n ('adaboost', adaboost_classifier, ),\n ]\n \n for classifier_name, classifier_object in classifiers:\n classifier = classifier_object(features=train_feature, labels=train_label)\n score = classifier.score(test_feature,test_label)\n print(classifier_name , score)\n \n pred_label = classifier.predict(test_feature)\n \n for ind,value_label in enumerate(test_label):\n print(value_label,pred_label[ind])\n \n print(\"Confusion Matrix\")\n print(\"-----------------------------------------\")\n print(metrics.confusion_matrix(test_label,pred_label))\n print(\"-------------------------------------------------\")\n print(\"Classification Report\")\n print(\"-----------------------------------------------\")\n print(metrics.classification_report(test_label,pred_label))\n print(\"---------------------------------------------------\") \n\n\n\nif __name__ == '__main__':\n feature_data = convert()\n feature_data = Normalization(feature_data)\n train_feature,train_label,test_feature,test_label = train_test_split(feature_data)\n classify(train_feature,train_label,test_feature,test_label)\n \n \n \n","repo_name":"pragyaagrawal19/heart-beat-signal-from-facial-video-for-biometric-recognition","sub_path":"feature_extract.py","file_name":"feature_extract.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32458590561","text":"import lsst.afw.table\nfrom lsst.daf.base import PropertyList\nfrom lsst.meas.algorithms import SourceMeasurementTask\nfrom lsst.pex.config import Config, ConfigurableField, DictField, Field, FieldValidationError\nfrom lsst.pipe.base import Task, CmdLineTask, Struct, timeMethod, ButlerInitializedTaskRunner\nfrom .references import MultiBandReferencesTask\n\n__all__ = (\"ForcedPhotImageTask\",)\n\nclass ForcedPhotImageConfig(Config):\n \"\"\"Configuration for forced photometry.\n \"\"\"\n references = ConfigurableField(target=MultiBandReferencesTask, doc=\"Retrieve reference source catalog\")\n measurement = ConfigurableField(target=SourceMeasurementTask, doc=\"measurement subtask\")\n copyColumns = DictField(\n keytype=str, itemtype=str, doc=\"Mapping of reference columns to source columns\",\n default={\"id\": \"object.id\", \"parent\":\"object.parent\", \"deblend.nchild\": \"object.deblend.nchild\"}\n )\n\n def _getTweakCentroids(self):\n return self.measurement.centroider.name is not None\n\n def _setTweakCentroids(self, doTweak):\n if doTweak:\n self.measurement.centroider.name = \"centroid.sdss\"\n self.measurement.algorithms.names -= [\"centroid.sdss\"]\n self.measurement.algorithms.names |= [\"skycoord\", \"centroid.record\"]\n self.measurement.slots.centroid = \"centroid.sdss\"\n else:\n self.measurement.centroider.name = None\n self.measurement.algorithms.names |= [\"centroid.sdss\", \"centroid.record\"]\n self.measurement.algorithms.names -= [\"skycoord\"]\n self.measurement.slots.centroid = \"centroid.record\"\n\n doTweakCentroids = property(\n _getTweakCentroids, _setTweakCentroids,\n doc=(\"A meta-config option (just a property, really) that sets whether to tweak centroids during \"\n \"measurement by modifying several other config options\")\n )\n\n def setDefaults(self):\n self.doTweakCentroids = False\n\nclass ForcedPhotImageTask(CmdLineTask):\n \"\"\"Base class for performing forced measurement, in which the results (often just centroids) from\n regular measurement on another image are used to perform restricted measurement on a new image.\n\n This task is not directly usable as a CmdLineTask; subclasses must:\n - Set the _DefaultName class attribute\n - Implement makeIdFactory\n - Implement fetchReferences\n - (optional) Implement attachFootprints\n \"\"\"\n\n RunnerClass = ButlerInitializedTaskRunner\n ConfigClass = ForcedPhotImageConfig\n dataPrefix = \"\" # Name to prepend to all input and output datasets (e.g. 'goodSeeingCoadd_')\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the task.\n\n ForcedPhotImageTask takes two keyword arguments beyond the usual CmdLineTask arguments:\n - schema: the Schema of the reference catalog, passed to the constructor of the references subtask\n - butler: a butler that will be passed to the references subtask to allow it to load its Schema\n from disk.\n At least one of these arguments must be present; if both are, schema takes precedence.\n \"\"\"\n butler = kwargs.pop(\"butler\", None)\n refSchema = kwargs.pop(\"schema\", None)\n super(ForcedPhotImageTask, self).__init__(*args, **kwargs)\n self.algMetadata = PropertyList()\n self.makeSubtask(\"references\", butler=butler, schema=refSchema)\n if refSchema is None:\n refSchema = self.references.schema\n # We make a SchemaMapper to transfer fields from the reference catalog\n self.schemaMapper = lsst.afw.table.SchemaMapper(refSchema)\n # First we have to include the minimal schema all SourceCatalogs must have, but we don't\n # want to transfer those fields from the refSchema (hence doMap=False)\n self.schemaMapper.addMinimalSchema(lsst.afw.table.SourceTable.makeMinimalSchema(), False)\n # Now we setup mappings from refSchema to the output schema, setting doReplace=True\n # so we can set minimal schema fields if so configured.\n for refName, targetName in self.config.copyColumns.items():\n refItem = refSchema.find(refName)\n self.schemaMapper.addMapping(refItem.key, targetName, True) # doReplace=True\n # Extract the output schema, and add the actual forced measurement fields to it.\n self.schema = self.schemaMapper.getOutputSchema()\n self.makeSubtask(\"measurement\", schema=self.schema, algMetadata=self.algMetadata, isForced=True)\n\n def getSchemaCatalogs(self):\n catalog = lsst.afw.table.SourceCatalog(self.schema)\n return {self.dataPrefix + \"forced_src\": catalog}\n\n def makeIdFactory(self, dataRef):\n \"\"\"Hook for derived classes to define how to make an IdFactory for forced sources.\n\n Note that this is for forced source IDs, not object IDs, which are usually handled by\n the copyColumns config option.\n \"\"\"\n raise NotImplementedError()\n\n def fetchReferences(self, dataRef, exposure):\n \"\"\"Hook for derived classes to define how to get references objects.\n\n Derived classes should call one of the fetch* methods on the references subtask,\n but which one they call depends on whether the region to get references for is a\n easy to describe in patches (as it would be when doing forced measurements on a\n coadd), or is just an arbitrary box (as it would be for CCD forced measurements).\n \"\"\"\n raise NotImplementedError()\n\n def attachFootprints(self, dataRef, sources, references, exposure, refWcs):\n \"\"\"Hook for derived classes to define how to attach Footprints to blank sources prior to measurement\n\n Footprints for forced photometry must be in the pixel coordinate system of the image being\n measured, while the actual detections may start out in a different coordinate system.\n\n Subclasses for ForcedPhotImageTask must implement this method to define how those Footprints\n should be generated.\n\n The default implementation transforms the Footprints from the reference catalog from the refWcs\n to the exposure's Wcs, which downgrades HeavyFootprints into regular Footprints, destroying\n deblend information.\n \"\"\"\n exposureWcs = exposure.getWcs()\n region = exposure.getBBox(lsst.afw.image.PARENT)\n for srcRecord, refRecord in zip(sources, references):\n srcRecord.setFootprint(refRecord.getFootprint().transform(refWcs, exposureWcs, region))\n\n def getExposure(self, dataRef):\n \"\"\"Read input exposure on which to perform the measurements\n\n @param dataRef Data reference from butler\n \"\"\"\n if dataRef.datasetExists(self.dataPrefix + \"calexp\"):\n return dataRef.get(self.dataPrefix + \"calexp\", immediate=True)\n else:\n return None\n\n def writeOutput(self, dataRef, sources):\n \"\"\"Write forced source table\n\n @param dataRef Data reference from butler\n @param sources SourceCatalog to save\n \"\"\"\n dataRef.put(sources, self.dataPrefix + \"forced_src\")\n\n def generateSources(self, dataRef, references):\n \"\"\"Generate sources to be measured, copying any fields in self.config.copyColumns\n\n @param dataRef Data reference from butler\n @param references Sequence (not necessarily a SourceCatalog) of reference sources\n @param idFactory Factory to generate unique ids for forced sources\n @return Source catalog ready for measurement\n \"\"\"\n if self.schema is None:\n self._buildSchema(dataRef.butlerSubset.butler)\n idFactory = self.makeIdFactory(dataRef)\n table = lsst.afw.table.SourceTable.make(self.schema, idFactory)\n sources = lsst.afw.table.SourceCatalog(table)\n table = sources.table\n table.setMetadata(self.algMetadata)\n table.preallocate(len(references))\n for ref in references:\n sources.addNew().assign(ref, self.schemaMapper)\n return sources\n\n @lsst.pipe.base.timeMethod\n def run(self, dataRef):\n \"\"\"Perform forced measurement on the exposure defined by the given dataref.\n\n The dataRef must contain a 'tract' key, which is used to resolve the correct references\n in the presence of tract overlaps, and also defines the WCS of the reference sources.\n \"\"\"\n refWcs = self.references.getWcs(dataRef)\n exposure = self.getExposure(dataRef)\n if exposure:\n references = list(self.fetchReferences(dataRef, exposure))\n self.log.info(\"Performing forced measurement on %s\" % dataRef.dataId)\n sources = self.generateSources(dataRef, references)\n self.attachFootprints(dataRef, sources, references=references, exposure=exposure, refWcs=refWcs)\n self.measurement.run(exposure, sources, references=references, refWcs=refWcs)\n self.writeOutput(dataRef, sources)\n return Struct(sources=sources)\n else:\n self.log.info(\"No image exists for %s\" % (dataRef.dataId))\n return Struct(sources=None)\n","repo_name":"laurenam/pipe_tasks","sub_path":"python/lsst/pipe/tasks/forcedPhotImage.py","file_name":"forcedPhotImage.py","file_ext":"py","file_size_in_byte":9147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22959482331","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 27 00:39:05 2023\n\n@author: tavo\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\n\n###############################################################################\n\ndata = pd.read_csv('/media/tavo/storage/sunspots/SN_d_tot_V2.0.csv',delimiter=';',header=None)\n\nnewData = pd.DataFrame()\n\nnewData['date'] = pd.to_datetime(data[0].astype(str)+'/'+data[1].astype(str)+'/'+data[2].astype(str),format='%Y/%m/%d')\nnewData['year'] = data[0]\nnewData['dayofyear'] = newData['date'].dt.dayofyear\nnewData['dayofweek'] = newData['date'].dt.dayofweek\nnewData['week'] = newData['date'].dt.isocalendar().week\nnewData['dailysunspots'] = data[4]\nnewData['standarddeviation'] = data[5]\nnewData['observations'] = data[6]\nnewData['provisional'] = data[7]\n\nnewData.to_csv('/media/tavo/storage/sunspots/sunspots.csv')\n","repo_name":"TavoGLC/datasets","sub_path":"sunspots.py","file_name":"sunspots.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37523532254","text":"import sys\n\n\ndef whois(str):\n try:\n num = int(str)\n if (num == 0):\n print(\"I'm Zero.\")\n elif (num % 2 == 0):\n print(\"I'm Even.\")\n else:\n print(\"I'm Odd.\")\n except ValueError:\n print(\"AssertionError: argument is not an integer\")\n\n\nif (len(sys.argv) == 2):\n whois(sys.argv[1])\nelif (len(sys.argv) > 2):\n print(\"AssertionError: more than one argument are provided\")\nelse:\n print(\"AssertionError: arguments are not provided\")\n\n","repo_name":"bbritva/Python_modules","sub_path":"day_00/ex02/whois.py","file_name":"whois.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2296553859","text":"import torch.nn as nn\n\nPADDING_LAYERS = {\n 'zero': nn.ZeroPad2d,\n 'reflect': nn.ReflectionPad2d,\n 'replicate': nn.ReplicationPad2d\n}\n\n\ndef build_padding_layer(cfg, *args, **kwargs):\n \"\"\"Build padding layer.\n\n Args:\n cfg (None or dict): The padding layer config, which should contain:\n - type (str): Layer type.\n - layer args: Args needed to instantiate a padding layer.\n\n Returns:\n nn.Module: Created padding layer.\n \"\"\"\n if not isinstance(cfg, dict):\n raise TypeError('cfg must be a dict')\n if 'typename' not in cfg:\n raise KeyError('the cfg dict must contain the key \"typename\"')\n\n cfg_ = cfg.copy()\n padding_type = cfg_.pop('typename')\n if padding_type not in PADDING_LAYERS:\n raise KeyError(f'Unrecognized padding type {padding_type}.')\n else:\n padding_layer = PADDING_LAYERS.get(padding_type)\n\n layer = padding_layer(*args, **kwargs, **cfg_)\n\n return layer\n","repo_name":"Media-Smart/vedadet","sub_path":"vedacore/modules/bricks/padding.py","file_name":"padding.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":470,"dataset":"github-code","pt":"54"} +{"seq_id":"22132874786","text":"import streamlit as st\n\n\ndef main():\n\n\tst.title('Smart Height Measurement Machine')\n\tst.text('This tool uses advanced technology to measure your height based on a few simple questions')\n\tst.text_input('What is your zip code', value='90210')\n\theight = st.text_input('How tall are you in inches?', value='60')\n\tbananas = round(float(height)/7.0, 2)\n\t\n\n\tif st.button('Measure Me'):\n\t\tst.text('You are '+ height + ' inches tall. That is about '+ str(bananas) + ' bananas long!')\n\t\tst.text('Not satisfied with your result?')\n\t\tst.text('Please make sure the information you provided is accurate, and try again!')\n\n\nif __name__ == '__main__':\n main()","repo_name":"hminluo/height_app_streamlit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13081135080","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as f:\n page_description = f.read()\n\nwith open(\"requirements.txt\") as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"my_image_processing\",\n version=\"0.0.1\",\n author=\"Miguel Mendes Serrano, Karina Kato\",\n author_email=\"my_email\",\n description=\"Image processing package using skimage\",\n long_description=page_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/miguelmendesSerrano\",\n packages=find_packages(),\n install_requires=requirements,\n python_requires='>=3.8',\n)\n","repo_name":"miguelmendesSerrano/my-image-processing-package","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38512537650","text":"from constants import PLAYERS, TEAMS\nimport copy\n\n\nPLAYERS_PER_TEAM = len(PLAYERS) / len(TEAMS)\nteams_copy = copy.deepcopy(TEAMS)\nplayers_copy = copy.deepcopy(PLAYERS)\nbalanced_teams = {key: [] for key in TEAMS}\nexperienced = []\nno_experience = []\n\ndef clean_data():\n for player in players_copy:\n if player.get('experience') == 'NO':\n player['experience'] = False\n else:\n player['experience'] = True\n player['height'] = int(player['height'][:2])\n player['guardians'] = player['guardians'].split( \" and \")\n\n\ndef balance_experience():\n for player in players_copy:\n if player.get('experience') == True:\n experienced.append(player)\n else:\n no_experience.append(player)\n\n\ndef balance_teams():\n balance_counter = 0\n while len(balanced_teams.get('Panthers')) < PLAYERS_PER_TEAM:\n for key, values in balanced_teams.items():\n player_experience = experienced[balance_counter]\n player_no_experience = no_experience[balance_counter]\n items = {key:values.append(player_experience)}\n items = {key:values.append(player_no_experience)}\n balance_counter += 1 \n \n for keys, values in balanced_teams.items():\n if len(values) > PLAYERS_PER_TEAM:\n items = {key:values.pop()}\n\n\ndef menu():\n print('Welcome to the basketball team stats tool!\\n'\n '►►►Menu◄◄◄\\n'\n 'Here are your choices, please select one:\\n\\n'\n '1) Display Team Stats\\n'\n '2) Exit')\n\n choice = input('Enter your choice here. 1 or 2: ')\n\n while True:\n if choice == '1':\n teams_menu() \n break\n elif choice == '2':\n print('Thanks for visiting! See you next time!')\n break\n else:\n choice = input('Please enter a valid option(1 or 2): ')\n\n\ndef teams_menu():\n choice1 = input('Choose a team by selecting the respective number:\\n'\n '1) Panthers\\n'\n '2) Warriors\\n'\n '3) Bandits\\n')\n\n while True:\n if choice1 == '1':\n team_stats(1)\n elif choice1 == '2':\n team_stats(2)\n elif choice1 == '3':\n team_stats(3)\n else:\n choice1 = input('That is not a valid option, please enter a value between 1 and 3 ')\n\n\ndef team_stats(choice1):\n if choice1 == 1:\n format_stats('Panthers')\n elif choice1 == 2:\n format_stats('Warriors')\n elif choice1 == 3:\n format_stats('Bandits')\n\ndef format_stats(team):\n print('Team: {} Stats'.format(team))\n print('-' * 10)\n print('Total players : ' + str(PLAYERS_PER_TEAM))\n print('Players on team:')\n \n player_list = []\n \n for player in balanced_teams.get(team):\n player_list.append(player.get('name'))\n print(', '.join(player_list))\n\n\n show_experience(team)\n average_height(team)\n show_guardians(team)\n\n continue_tool()\n\ndef continue_tool():\n choice = None\n while choice == None:\n choice = input('Would you like to continue? Y or N. ')\n if choice.lower !='n' or choice.lower != 'y':\n print('Please enter a valid choice.')\n \n elif choice.lower == 'n':\n print('Thank for visiting!')\n \n else:\n teams_menu()\n \n\ndef show_experience(team):\n experienced = 0\n not_experienced = 0\n for player in balanced_teams.get(team):\n if player.get('experience') == True:\n experienced += 1 \n else:\n not_experienced += 1 \n print('There are {} experienced players and {} players with no experience at all.'.format(experienced, not_experienced))\n\ndef average_height(team):\n total_height = 0\n for player in balanced_teams.get(team):\n total_height += player.get('height')\n average_height = total_height / PLAYERS_PER_TEAM\n print('The average height of this team is {} inches.'.format(round(float(average_height))))\n\ndef show_guardians(team):\n guardians = []\n for player in balanced_teams.get(team):\n guardians += player.get('guardians')\n team_guardians = ', '.join(guardians)\n print('The guardians for this team are: {}'.format(team_guardians))\n\nif __name__ == '__main__':\n clean_data()\n balance_experience()\n balance_teams()\n menu()","repo_name":"Mcorkern/Unit-2-project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29291987118","text":"import customtkinter as ctk\nimport customtkinter\nroot = ctk.CTk()\nroot.geometry(\"600x688\")\nversion_button = ctk.CTkButton(root, text=\"V1.0\", width=96)\nversion_button.pack(anchor = \"s\", side = \"right\")\n \nroot.mainloop()\nclass ToplevelWindow(customtkinter.CTkToplevel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.geometry(\"400x300\")\n\n self.label = customtkinter.CTkLabel(self, text=\"ToplevelWindow\")\n self.label.pack(padx=20, pady=20)\n\n\nclass App(customtkinter.CTk):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.geometry(\"500x400\")\n\n self.button_1 = customtkinter.CTkButton(self, text=\"open toplevel\", command=self.open_toplevel)\n self.button_1.pack(side=\"top\", padx=20, pady=20)\n\n self.toplevel_window = None\n\n def open_toplevel(self):\n if self.toplevel_window is None or not self.toplevel_window.winfo_exists():\n self.toplevel_window = ToplevelWindow(self) # create window if its None or destroyed\n else:\n self.toplevel_window.focus() # if window exists focus it\n\n\napp = App()\napp.mainloop()","repo_name":"MidnightStudioOfficial/Ava","sub_path":"src/test7.py","file_name":"test7.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"3768134503","text":"# System Modules\n\n\n# Third Party Modules\nimport pyglet\nfrom pyglet.gl import *\n\n# Local Modules\n\nclass Hud(object):\n def __init__(self, window):\n self.window = window\n \n def draw(self):\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n \n glBegin(GL_LINE_LOOP)\n glColor4f(1.0, 1.0, 1.0, 1.0)\n glVertex2f(self.window.height-50, self.window.width-50)\n glVertex2f(self.window.height-50, 50)\n glVertex2f(50, 50)\n glVertex2f(50, self.window.width-50)\n glEnd()\n\n","repo_name":"Artanis/asteroids","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35204582776","text":"import mmcv\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\nfrom ..grasp_transforms import grasp_encode, grasp_decode\n\n@BBOX_CODERS.register_module()\nclass DeltaXYWHSinCosBBoxCoder(BaseBBoxCoder):\n \"\"\"Delta XYWH BBox coder.\n\n Following the practice in `R-CNN `_,\n this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and\n decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).\n\n Args:\n target_means (Sequence[float]): Denormalizing means of target for\n delta coordinates\n target_stds (Sequence[float]): Denormalizing standard deviation of\n target for delta coordinates\n clip_border (bool, optional): Whether clip the objects outside the\n border of the image. Defaults to True.\n \"\"\"\n\n def __init__(self,\n target_means_hbb=(.0, .0, .0, .0),\n target_stds_hbb=(1.0, 1.0, 1.0, 1.0),\n target_means_obb=(.0, .0, .0, .0),\n target_stds_obb=(1.0, 1.0, 1.0, 1.0)):\n super(BaseBBoxCoder, self).__init__()\n self.means_hbb = target_means_hbb\n self.stds_hbb = target_stds_hbb\n self.means_obb = target_means_obb\n self.stds_obb = target_stds_obb\n\n def encode(self, hbbox_pred, hbboxes, gt_hbboxes, gt_obboxes):\n \"\"\"Get box regression transformation deltas that can be used to\n transform the ``bboxes`` into the ``gt_bboxes``.\n\n Args:\n bboxes (torch.Tensor): Source boxes, e.g., object proposals.\n gt_bboxes (torch.Tensor): Target of the transformation, e.g.,\n ground-truth boxes.\n\n Returns:\n torch.Tensor: Box transformation deltas\n \"\"\"\n encode_hbboxes = bbox2delta(hbboxes, gt_hbboxes, self.means_hbb, self.stds_hbb)\n decode_hbboxes = delta2hbboxrec5(hbboxes, hbbox_pred, self.means_hbb, self.stds_hbb)\n encode_obboxes = rec2target(decode_hbboxes, gt_obboxes, self.means_obb, self.stds_obb)\n return encode_hbboxes, encode_obboxes\n\n def decode(self, anchors, bbox_pred, obb_pred):\n \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n Args:\n boxes (torch.Tensor): Basic boxes.\n pred_bboxes (torch.Tensor): Encoded boxes with shape\n max_shape (tuple[int], optional): Maximum shape of boxes.\n Defaults to None.\n wh_ratio_clip (float, optional): The allowed ratio between\n width and height.\n\n Returns:\n torch.Tensor: Decoded boxes.\n \"\"\"\n\n decode_hbboxes = delta2bbox(anchors, bbox_pred, self.means_hbb, self.stds_hbb)\n decode_obboxes = target2poly(decode_hbboxes, obb_pred, self.means_obb, self.stds_obb)\n return decode_obboxes\n\n\ndef bbox2delta(proposals, gt, means=(0, 0, 0, 0), stds=(1, 1, 1, 1)):\n assert proposals.size() == gt.size()\n\n proposals = proposals.float()\n gt = gt.float()\n px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n pw = proposals[..., 2] - proposals[..., 0] + 1.0\n ph = proposals[..., 3] - proposals[..., 1] + 1.0\n\n gx = (gt[..., 0] + gt[..., 2]) * 0.5\n gy = (gt[..., 1] + gt[..., 3]) * 0.5\n gw = gt[..., 2] - gt[..., 0] + 1.0\n gh = gt[..., 3] - gt[..., 1] + 1.0\n\n dx = (gx - px) / pw\n dy = (gy - py) / ph\n dw = torch.log(gw / pw)\n dh = torch.log(gh / ph)\n deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n means = deltas.new_tensor(means).unsqueeze(0)\n stds = deltas.new_tensor(stds).unsqueeze(0)\n deltas = deltas.sub_(means).div_(stds)\n\n return deltas\n\ndef delta2hbboxrec5(rois,\n deltas,\n means=(0, 0, 0, 0),\n stds=(1, 1, 1, 1),\n wh_ratio_clip=16/1000):\n means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)\n stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)\n denorm_deltas = deltas * stds + means # 在bbox2delta中进行了标准化,这里要做逆变换\n dx = denorm_deltas[:, 0::4]\n dy = denorm_deltas[:, 1::4]\n dw = denorm_deltas[:, 2::4]\n dh = denorm_deltas[:, 3::4]\n max_ratio = np.abs(np.log(wh_ratio_clip))\n dw = dw.clamp(min=-max_ratio, max=max_ratio)\n dh = dh.clamp(min=-max_ratio, max=max_ratio)\n # Compute center of each roi\n px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)\n py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)\n # Compute width/height of each roi\n pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)\n ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)\n # Use exp(network energy) to enlarge/shrink each roi\n gw = pw * dw.exp()\n gh = ph * dh.exp()\n # Use network energy to shift the center of each roi\n gx = px + pw * dx\n gy = py + ph * dy\n gtheta = gw.new_zeros((gw.size(0), gw.size(1)))\n rec = torch.stack([gx, gy, gw, gh, gtheta], dim=-1).view(deltas.size(0), -1)\n return rec\n\ndef rec2target(hbbox_rec, gt_rbbox_rec, means=(0, 0, 0, 0), stds=(1, 1, 1, 1)):\n hbbox_w = hbbox_rec[:, 2]\n hbbox_h = hbbox_rec[:, 3]\n hbbox_theta = hbbox_rec[:, 4]\n\n gt_rbbox_w = gt_rbbox_rec[:, 2]\n gt_rbbox_h = gt_rbbox_rec[:, 3]\n gt_rbbox_theta = gt_rbbox_rec[:, 4]\n\n delta_theta = gt_rbbox_theta - hbbox_theta\n\n delta_w = gt_rbbox_w / hbbox_w\n delta_h = gt_rbbox_h / hbbox_h\n\n # t11 = torch.cos(delta_theta) * delta_w\n # t12 = -torch.sin(delta_theta) * delta_h\n # t21 = torch.sin(delta_theta) * delta_w\n # t22 = torch.cos(delta_theta) * delta_h\n\n t11 = delta_w\n t12 = delta_h\n t21 = torch.sin(delta_theta)\n t22 = torch.cos(delta_theta)\n\n t = torch.stack([t11, t12, t21, t22], dim=-1)\n\n means = t.new_tensor(means).unsqueeze(0)\n stds = t.new_tensor(stds).unsqueeze(0)\n t = t.sub_(means).div_(stds)\n\n return t\n\ndef delta2bbox(rois,\n deltas,\n means=(0, 0, 0, 0),\n stds=(1, 1, 1, 1),\n max_shape=None):\n \"\"\"\n Apply deltas to shift/scale base boxes.\n\n Typically the rois are anchor or proposed bounding boxes and the deltas are\n network outputs used to shift/scale those boxes.\n\n Args:\n rois (Tensor): boxes to be transformed. Has shape (N, 4)\n deltas (Tensor): encoded offsets with respect to each roi.\n Has shape (N, 4). Note N = num_anchors * W * H when rois is a grid\n of anchors. Offset encoding follows [1]_.\n means (list): denormalizing means for delta coordinates\n stds (list): denormalizing standard deviation for delta coordinates\n max_shape (tuple[int, int]): maximum bounds for boxes. specifies (H, W)\n wh_ratio_clip (float): maximum aspect ratio for boxes.\n\n Returns:\n Tensor: boxes with shape (N, 4), where columns represent\n tl_x, tl_y, br_x, br_y.\n\n References:\n .. [1] https://arxiv.org/abs/1311.2524\n\n Example:\n >>> rois = torch.Tensor([[ 0., 0., 1., 1.],\n >>> [ 0., 0., 1., 1.],\n >>> [ 0., 0., 1., 1.],\n >>> [ 5., 5., 5., 5.]])\n >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],\n >>> [ 1., 1., 1., 1.],\n >>> [ 0., 0., 2., -1.],\n >>> [ 0.7, -1.9, -0.5, 0.3]])\n >>> delta2bbox(rois, deltas, max_shape=(32, 32))\n tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n [0.2817, 0.2817, 4.7183, 4.7183],\n [0.0000, 0.6321, 7.3891, 0.3679],\n [5.8967, 2.9251, 5.5033, 3.2749]])\n \"\"\"\n means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)\n stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)\n denorm_deltas = deltas * stds + means # 在bbox2delta中进行了标准化,这里要做逆变换\n dx = denorm_deltas[:, 0::4]\n dy = denorm_deltas[:, 1::4]\n dw = denorm_deltas[:, 2::4]\n dh = denorm_deltas[:, 3::4]\n # Compute center of each roi\n px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)\n py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)\n # Compute width/height of each roi\n pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)\n ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)\n # Use exp(network energy) to enlarge/shrink each roi\n gw = pw * dw.exp()\n gh = ph * dh.exp()\n # Use network energy to shift the center of each roi\n gx = px + pw * dx\n gy = py + ph * dy\n bboxes = torch.stack([gx, gy, gw, gh], dim=-1).view_as(deltas)\n return bboxes\n\n\n\ndef target2poly(hbboxes,\n obb_pred,\n means=(0, 0, 0, 0),\n stds=(1, 1, 1, 1)):\n means = obb_pred.new_tensor(means).repeat(1, obb_pred.size(1) // 4)\n stds = obb_pred.new_tensor(stds).repeat(1, obb_pred.size(1) // 4)\n deform_obb_pred = obb_pred * stds + means\n\n t11 = deform_obb_pred[:, 0::4]\n t12 = deform_obb_pred[:, 1::4]\n t21 = deform_obb_pred[:, 2::4]\n t22 = deform_obb_pred[:, 3::4]\n\n d11 = t11 * t22\n d12 = - t12 * t21\n d21 = t11 * t21\n d22 = t12 * t22\n\n # t11 = torch.cos(delta_theta) * delta_w\n # t12 = -torch.sin(delta_theta) * delta_h\n # t21 = torch.sin(delta_theta) * delta_w\n # t22 = torch.cos(delta_theta) * delta_h\n\n # t11 = delta_w\n # t12 = delta_h\n # t21 = torch.sin(delta_theta)\n # t22 = torch.cos(delta_theta)\n\n x_center = hbboxes[:, 0::4]\n y_center = hbboxes[:, 1::4]\n w = hbboxes[:, 2::4] - 1\n h = hbboxes[:, 3::4] - 1\n\n x1 = (-w / 2.0) * d11 + (-h / 2.0) * d12 + x_center\n y1 = (-w / 2.0) * d21 + (-h / 2.0) * d22 + y_center\n x2 = (w / 2.0) * d11 + (-h / 2.0) * d12 + x_center\n y2 = (w / 2.0) * d21 + (-h / 2.0) * d22 + y_center\n x3 = (w / 2.0) * d11 + (h / 2.0) * d12 + x_center\n y3 = (w / 2.0) * d21 + (h / 2.0) * d22 + y_center\n x4 = (-w / 2.0) * d11 + (h / 2.0) * d12 + x_center\n y4 = (-w / 2.0) * d21 + (h / 2.0) * d22 + y_center\n\n poly = torch.cat([x1, y1, x2, y2, x3, y3, x4, y4], dim=-1)\n if obb_pred.size(1) != 4:\n poly = poly.view(poly.size(0), 8, -1)\n poly = poly.permute(0, 2, 1)\n poly = poly.contiguous().view(poly.size(0), -1)\n return poly\n\n\n","repo_name":"mahaoxiang822/dgcan","sub_path":"mmdet/core/bbox/coder/delta_xywhsincos_bbox_coder.py","file_name":"delta_xywhsincos_bbox_coder.py","file_ext":"py","file_size_in_byte":10451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"423948018","text":"import os\nimport time\nimport random\n\nimport redis\nimport tornado.gen\nimport tornado.websocket\nimport tornado.ioloop\nimport tornado.web\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n print('dupa ')\n # self.write(\"Hello, world\")\n self.cache = redis.Redis(host='redis', port=6379)\n count = self.get_hit_count()\n print(count)\n self.render(\"template.html\", title=\"My title\", items=['hello!', str(count), '­Ъци'])\n\n def get_hit_count(self):\n retries = 5\n while True:\n try:\n return self.cache.incr('hits')\n except redis.exceptions.ConnectionError as exc:\n if retries == 0:\n raise exc\n retries -= 1\n time.sleep(0.5)\n\nclass TestSocketHandler(tornado.websocket.WebSocketHandler):\n async def open(self):\n self.dt = .1\n self.size = 200\n self.session_id = random.randint(1,1000)\n self.cache = redis.Redis(host='redis', port=6379)\n self.cache.hset(f'game{self.session_id}', 'alive', 'true')\n self.cache.hset(f'game{self.session_id}', 'x', 50)\n self.cache.hset(f'game{self.session_id}', 'y', 50)\n self.cache.hset(f'game{self.session_id}', 'vx', 0)\n self.cache.hset(f'game{self.session_id}', 'vy', 0)\n print(f'{self.session_id}>> WebSocket opened')\n a = self.cache.hget(f'game{self.session_id}', 'alive')\n print(a)\n\n async def client_loop():\n while True:\n if not self.cache.hget(f'game{self.session_id}', 'alive'):\n print(f' {self.session_id}>> Not alive :(')\n break\n x = float(self.cache.hget(f'game{self.session_id}', 'x'))\n y = float(self.cache.hget(f'game{self.session_id}', 'y'))\n self.write_message(\n f'update:{int(x)},{int(y)}'\n )\n await tornado.gen.sleep(self.dt)\n\n async def game_loop():\n ps = self.cache.pubsub()\n ps.subscribe(f'input{self.session_id}')\n while True:\n if not self.cache.hget(f'game{self.session_id}', 'alive'):\n print(f' {self.session_id}>> Not alive :(')\n break\n \n while True:\n messages = ps.get_message()\n if messages:\n print(f'{self.session_id}>> ', messages.get('data')) \n\n msg = messages.get('data')\n if msg == b'left':\n vy = float(self.cache.hget(f'game{self.session_id}', 'vy'))\n vy = vy-1\n self.cache.hset(f'game{self.session_id}', 'vy', vy)\n if msg == b'right':\n vy = float(self.cache.hget(f'game{self.session_id}', 'vy'))\n vy = vy+1\n self.cache.hset(f'game{self.session_id}', 'vy', vy)\n else: \n break\n\n x = float(self.cache.hget(f'game{self.session_id}', 'x'))\n y = float(self.cache.hget(f'game{self.session_id}', 'y'))\n vx = float(self.cache.hget(f'game{self.session_id}', 'vx'))\n vy = float(self.cache.hget(f'game{self.session_id}', 'vy'))\n x += vx * self.dt\n y += vy * self.dt\n if y < 0:\n y = -y\n self.cache.hset(f'game{self.session_id}', 'vy', -vy*.5)\n if y > self.size:\n y = self.size * 2 - y\n self.cache.hset(f'game{self.session_id}', 'vy', -vy*.5)\n self.cache.hset(f'game{self.session_id}', 'x', x)\n self.cache.hset(f'game{self.session_id}', 'y', y)\n print(f'{self.session_id}>>', x, y, vx, vy)\n\n await tornado.gen.sleep(self.dt)\n\n tornado.ioloop.IOLoop.current().spawn_callback(client_loop)\n tornado.ioloop.IOLoop.current().spawn_callback(game_loop)\n print(f'{self.session_id}>> open end')\n\n def on_message(self, message):\n print(f'{self.session_id}>> recieved: ' + message)\n self.write_message(u'You said: ' + message)\n\n if message == 'left':\n self.cache.publish(f'input{self.session_id}', 'left')\n if message == 'right':\n self.cache.publish(f'input{self.session_id}', 'right')\n\n def on_close(self):\n print(f'{self.session_id}>> WebSocket closed')\n\n def check_origin(self, origin):\n return True\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/ws/\", TestSocketHandler),\n (r\"/static/\", tornado.web.StaticFileHandler, {'path': 'static_files'}),\n ], autoreload=True,\n static_path='static')\n\nif __name__ == \"__main__\":\n print('(re)starting')\n app = make_app()\n app.listen(8000)\n tornado.ioloop.IOLoop.current().start()\n","repo_name":"pracowniaPK/gameTest","sub_path":"game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12762736052","text":"import ckan.plugins as plugins\nimport ckan.model.package\nfrom pylons import config\n\nfrom pymongo import Connection\n\n\nclass MongoMapper(plugins.SingletonPlugin):\n plugins.implements(plugins.IMapper, inherit=True)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n DEFAULT_CONNECTION = 'mongodb://localhost:27017'\n DATABASE = 'ckan_db'\n COLLECTION = 'datasets'\n\n\n def before_update(self, mapper, connection, instance):\n mongocon = self.connect()\n collection = mongocon[self.DATABASE][self.COLLECTION]\n \n package = instance.as_dict()\n\n #Filter out non-package updates\n if isinstance(instance, ckan.model.package.Package):\n package['_id'] = package['id']\n collection.save(package)\n \n mongocon.disconnect()\n\n\n def before_insert(self, mapper, connection, instance):\n return self.before_update(mapper, connection, instance)\n\n def before_delete(self, mapper, connection, instance):\n mongocon = self.connect()\n collection = mongocon[self.DATABASE][self.COLLECTION]\n \n package = instance.as_dict()\n\n #Filter out non-package updates\n if isinstance(instance, ckan.model.package.Package):\n collection.remove({'_id':package['id']})\n\n mongocon.disconnect()\n\n\n #IPackageController\n def before_view(self, pkg_dict):\n mongocon = self.connect()\n collection = mongocon[self.DATABASE][self.COLLECTION]\n\n results = collection.find_one({'_id': pkg_dict['id']})\n\n mongocon.disconnect()\n\n return results\n\n def connect(self):\n conn_string = config.get('mongodb.connection_string',self.DEFAULT_CONNECTION)\n return Connection(conn_string)\n","repo_name":"alexbyrnes/ckanext-mongodb","sub_path":"ckanext/mongodb/mongo_mapper.py","file_name":"mongo_mapper.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"7993043188","text":"#%matplotlib widget\nimport torch\nimport torch.nn as nn\nimport math\nimport numpy as np\nimport tensorflow as tf\n\n# Define the model\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n x = self.fc1(x)\n #x = torch.relu(x)\n x = self.fc2(x)\n return x\n\n\n# Create an instance of the model\nmodel = Net(input_size=2, hidden_size=10, output_size=1)\nNUMBER = 1000\n# Define the optimization algorithm and the loss function\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)\nloss_fn = nn.MSELoss()\n\n# Generate some synthetic data\nx_train = torch.randn(NUMBER, 2)\n#y_train = x_train[:, 0] ** 2 + x_train[:, 1] ** 2\nx_train_np = x_train.numpy()\n# print(x_train_np[:, 0])\n# assert 0\ny_train = torch.cos(x_train[:, 0]) + torch.sin(x_train[:, 1])\n#y_train = tf.convert_to_tensor(y_train, dtype=tf.float32)\n# Train the model\nfor epoch in range(NUMBER):\n # Forward pass\n y_pred = model(x_train).flatten()\n loss = loss_fn(y_pred, y_train)\n\n # Backward pass\n optimizer.zero_grad()\n loss.backward()\n\n # Update the weights\n optimizer.step()\n\n# do some test on the model\nx_test = torch.randn(NUMBER, 2)\n#x_test_np = x_test.numpy()\n#y_test = x_test[:, 0] ** 2 + x_test[:, 1] ** 2\ny_test = torch.cos(x_test[:, 0]) + torch.sin(x_test[:, 1])\n#y_test= tf.convert_to_tensor(y_test, dtype=tf.float32)\ny_pred = model(x_test)\n# print the difference:\ndiff = []\nindex = 0\n\n# ytest and ypred to list\nytest = y_test.tolist()\nypred = y_pred.tolist()\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# Create a figure and a 3D Axes\nfig = plt.figure()\nax1 = fig.add_subplot(121, projection='3d')\nax2 = fig.add_subplot(122, projection='3d')\n\n# Make predictions using the model\ny_pred = model(x_test).flatten()\n# convert x_train and y_pred to list\nx_test_tolist = np.array(x_test.tolist())\ny_test_tolist = np.array(y_test.tolist())\ny_pred = np.array(y_pred.tolist())\n# Create a scatter plot\nax1.scatter(x_test_tolist[:, 0], x_test_tolist[:, 1], y_pred.squeeze(),c=\"g\")\nax2.scatter(x_test_tolist[:, 0], x_test_tolist[:, 1], y_test.squeeze(),c=\"r\")\n# Add axis labels and a title\nax1.set_xlabel('x1')\nax1.set_ylabel('x2')\nax1.set_zlabel('y')\nplt.title('Model predictions for z = cosx1+sinx2')\nax2.set_xlabel('x1')\nax2.set_ylabel('x2')\nax2.set_zlabel('y')\nplt.show()\n","repo_name":"ItsBean/handsonRL","sub_path":"dqn-QNet-proof/neural-network-proof/nn-proof.py","file_name":"nn-proof.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15373292148","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.functional import adaptive_avg_pool2d\n\nimport math\n\n\n#################\n# Discriminator #\n#################\n\nclass Discriminator(nn.Module):\n \"\"\"Discriminator Network for Super Resolution\"\"\"\n def __init__(self, in_channels, ndf, linear_dim, out_dim, disc_type):\n super(Discriminator, self).__init__()\n\n self.disc_type = disc_type\n\n self.main = nn.Sequential(\n nn.Conv2d(in_channels, ndf, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf, ndf, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(ndf),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf, ndf * 2, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf * 2, ndf * 2, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf * 2, ndf * 4, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf * 4, ndf * 4, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf * 4, ndf * 8, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf * 8, ndf * 8, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.fcn = nn.Sequential(\n nn.Linear(ndf * 8, linear_dim),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(linear_dim, out_dim),\n nn.Sigmoid()\n )\n\n self.conv = nn.Sequential(\n nn.Conv2d(ndf * 8, out_dim, kernel_size=1, stride=1, padding=0),\n )\n\n self.patch = nn.Sequential(\n nn.Conv2d(ndf * 8, out_dim, kernel_size=3, stride=1, padding=1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n out = self.main(x)\n\n if self.disc_type == 'fcn':\n out = adaptive_avg_pool2d(input=out, output_size=1)\n out = torch.flatten(out, 1)\n out = self.fcn(out)\n\n elif self.disc_type == 'conv':\n out = adaptive_avg_pool2d(input=out, output_size=1)\n out = self.conv(out)\n\n elif self.disc_type == 'patch':\n out = self.patch(out)\n\n return out\n\n","repo_name":"nikkkkhil/NTIRE21_SR","sub_path":"models_gan.py","file_name":"models_gan.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42358538311","text":"# Core imports\nimport pygame\nfrom sys import exit\nfrom constants import WIDTH, HEIGHT\nfrom snake import Snake\nfrom apple import Apple\nfrom environment import Environment\nfrom pathfinder import Pathfinder\n\n\nclass Game():\n\n def __init__(self, window, algorithm):\n self.window = window\n self.score = 0\n self.snake = [Snake(window.display, (WIDTH/2, HEIGHT/2))]\n self.apple = Apple(window.display, self.snake)\n\n # make graph\n self.graph = Environment(self.snake, self.apple)\n\n # init search algorithm\n self.algorithm = Pathfinder(\n self.snake[0].relative_pos, self.apple.relative_pos, self.graph, algorithm)\n\n # Run loop\n self.loop()\n\n self.window.quit()\n\n def loop(self):\n self.exit = False\n\n while not self.exit:\n self.check_events()\n\n self.check_game_state()\n\n self.snake_pathfinding()\n\n self.window.update_window(self)\n\n def snake_pathfinding(self):\n if self.algorithm.path is not None:\n next_pos, snake_head_pos = self.algorithm.path[0], self.snake[0].relative_pos\n\n if snake_head_pos[0] < next_pos[0] and snake_head_pos[1] == next_pos[1]:\n self.snake[0].update((1, 0))\n elif snake_head_pos[0] > next_pos[0] and snake_head_pos[1] == next_pos[1]:\n self.snake[0].update((-1, 0))\n\n elif snake_head_pos[0] == next_pos[0] and snake_head_pos[1] < next_pos[1]:\n self.snake[0].update((0, 1))\n elif snake_head_pos[0] == next_pos[0] and snake_head_pos[1] > next_pos[1]:\n self.snake[0].update((0, -1))\n\n self.algorithm.path = self.algorithm.path[1:]\n\n def player_movement(self, keys, snake_head):\n if keys[pygame.K_RIGHT] and snake_head.v[0] != -1:\n snake_head.update((1, 0))\n elif keys[pygame.K_LEFT] and snake_head.v[0] != 1:\n snake_head.update((-1, 0))\n elif keys[pygame.K_UP] and snake_head.v[1] != 1:\n snake_head.update((0, -1))\n elif keys[pygame.K_DOWN] and snake_head.v[1] != -1:\n snake_head.update((0, 1))\n\n def check_events(self):\n \"\"\"\n Check the events list one by one for input / exitting.\n \"\"\"\n # Check over incoming events\n for event in pygame.event.get():\n # Check for quit event\n if event.type == pygame.QUIT:\n self.exit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit = True\n elif event.key == pygame.K_r:\n self.restart()\n\n # self.player_movement(pygame.key.get_pressed(), self.snake[0])\n\n def restart(self):\n # exit(0) # remove later\n # time.sleep(10)\n self.score = 0\n self.exit = False\n Game(self.window, self.algorithm.name)\n\n def check_game_state(self):\n if self.snake[0].check_state(self.apple, self.snake, self):\n self.score += 1\n self.apple = Apple(self.window.display, self.snake)\n self.snake.append(Snake(self.window.display, self.snake[0].prev))\n self.graph.update(self.snake, self.apple)\n\n self.algorithm.get_path(\n self.snake[0].relative_pos, self.apple.relative_pos, self.graph)\n","repo_name":"robertazzopardi/snakepathfinding","sub_path":"src/snake/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72720955040","text":"from api.headers import *\nfrom api.serializers import UserSerializer\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProfile\n fields = ('id','display_name','mobile')\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n\n@permission_classes((IsAuthenticated, ))\n@api_view(['post'])\ndef login(request):\n try:\n rd = request.data\n print(request)\n user = auth.authenticate(username=rd['username'],password=rd['password'])\n if user is not None:\n auth.login(request, user)\n if not request.session.session_key:\n session = request.session.create()\n else:\n session = request.session.session_key\n request.session.set_expiry(60*30)\n resp={}\n resp[\"session_key\"] = request.session\n resp[\"csrf\"]= django.middleware.csrf.get_token(request)\n resp['message']='Login Successful'\n return Response(resp,status=status.HTTP_200_OK)\n else:\n return Response({'error':{'code':5000,'message':'Incorrect username/password'}},status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'error':{'code':5000,'message':'Error-{0}'.format(e)}},status=status.HTTP_200_OK)\n\n@api_view(['get'])\ndef logout(request):\n try:\n auth.logout(request)\n response = HttpResponse()\n response.delete_cookie('sessionid')\n response.delete_cookie('csrftoken')\n return Response({'message':'Success'},status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'error':{'code':5000,'message':'Error-{0}'.format(e)}},status=status.HTTP_200_OK)\n\n@permission_classes((IsAuthenticated, ))\n@api_view(['post'])\ndef createuser(request):\n try:\n with transaction.atomic(savepoint=False):\n if request.method == 'POST':\n usrp = UserProfile()\n usrp.create(request.data)\n return Response({'message':'User created'},status=status.HTTP_200_OK)\n else:\n return Response({'error':{'code':5000,'message':'Error -> {0}'.format('Invalid Request')}},status=status.HTTP_200_OK)\n except Django2Exception as e:\n return Response({'error':{'code':e.code,'message':'Error -> {0}'.format(e.message)}},status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'error':{'code':5000,'message':'Error -> {0}'.format(e)}},status=status.HTTP_200_OK)\n\n@api_view(['get'])\ndef getUsers(request):\n try:\n queryset = UserProfile.objects.all()\n users = UserProfileSerializer(queryset,many=True)\n return Response(users.data,status=status.HTTP_200_OK)\n except DjangoException as e:\n return Response({'error':{'code':5000,'message':'Error-{0}'.format(e)}},status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'error':{'code':5000,'message':'Error-{0}'.format(e)}},status=status.HTTP_200_OK)","repo_name":"abhi-72/Django2","sub_path":"api/viewset.py","file_name":"viewset.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43583475544","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass PersonName:\n _male: bool\n _weight: int = 1\n\n prefix: bool = False\n first_name: bool = True\n middle_name: bool = False\n last_name: bool = True\n suffix: bool = False\n\n\n# Possible name \"parts\" that could be used as template keys and imported from localized module\nPOSSIBLE_KEYS = [\n \"first_name_male\",\n \"last_name_male\",\n \"middle_name_male\",\n \"suffix_male\",\n \"prefix_male\",\n \"first_name_female\",\n \"last_name_female\",\n \"middle_name_female\",\n \"suffix_female\",\n \"prefix_female\",\n \"first_name\",\n \"last_name\",\n \"middle_name\",\n \"prefix\",\n \"suffix\",\n]\n","repo_name":"itiievskyi/pyker","sub_path":"pyker/generators/person/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28100004948","text":"from datetime import datetime\n\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom scrapper.page_type_recognizer import PageTypeRecognizer\nfrom scrapper.page_rank import PageRankBuilder\nfrom scrapper.page_dup_filter import PageDupFilter\nfrom scrapper.settings import crawling_db\n\n\nclass PressballSpider(scrapy.spiders.CrawlSpider):\n name = 'pressball'\n start_urls = ['https://www.pressball.by',\n 'https://www.pressball.by/news',\n 'https://www.pressball.by/pbonline',\n 'https://www.pressball.by/articles']\n\n allowed_domains = ['pressball.by']\n\n _ARTICLE_PATTERN_LIST = [\n r'pressball\\.by\\/news\\/\\w+\\/\\d+\\/?$',\n r'pressball\\.by\\/articles\\/\\w+\\/\\w+\\/\\d+\\/?$',\n r'pressball\\.by\\/pbonline\\/\\w+\\/\\d+\\/?$']\n _OTHER_PAGES_PATTERN_LIST = [\n 'pressball\\.by\\/news\\/?\\?page=\\d+$',\n r'pressball\\.by\\/articles\\/?\\?p=\\d+$',\n r'pressball\\.by\\/pbonline\\/?\\?p=\\d+$'\n ]\n\n def __init__(self, *a, **kw):\n super(PressballSpider, self).__init__(*a, **kw)\n\n self._link_extractor = LinkExtractor(allow=tuple(self._ARTICLE_PATTERN_LIST + self._OTHER_PAGES_PATTERN_LIST), deny=())\n self._page_type_recognizer = PageTypeRecognizer(self._ARTICLE_PATTERN_LIST)\n self._page_dup_filter = PageDupFilter(self._page_type_recognizer)\n self._page_rank_builder = PageRankBuilder(self._page_type_recognizer,\n self._link_extractor,\n self._page_dup_filter)\n\n def parse(self, response):\n self._save_meta(response)\n response.meta['page_type'] = self._page_type_recognizer.recognize(response.url)\n\n urls = [link.url for link in self._link_extractor.extract_links(response)]\n new_urls = [link for link in urls if not self._page_dup_filter.url_seen_in_session(link)]\n\n page_rank, history_rank = self._page_rank_builder.build_combined_page_rank(response)\n if page_rank > 0:\n for link in new_urls:\n self._page_dup_filter.update_filter(link)\n yield scrapy.Request(link, meta={'history_rank': history_rank})\n\n def _save_meta(self, response):\n referer = response.request.headers.get('Referer', None)\n referer_str = referer.decode() if referer is not None else None\n\n crawling_db['pressball_crawl_stat'].insert_one({\n 'url': response.url,\n 'latency': response.meta['download_latency'],\n 'links': [link.url for link in self._link_extractor.extract_links(response)],\n 'time': datetime.now(),\n 'referer': referer_str\n })\n","repo_name":"tarnenok/pressball-crawler","sub_path":"scrapper/spiders/pressball_spider.py","file_name":"pressball_spider.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5815687178","text":"\"\"\"\n 0218 Manipulator\n\"\"\"\nfrom hand import Hand\nimport numpy as np\nimport gym\nfrom scipy.spatial.transform import Rotation as R\n\n\nclass HandManipulator(object):\n\n @property\n def hand_center(self):\n r = R.from_euler('xyz', self.state[3:6])\n # m = r.as_dcm() # scipy<1.4\n m = r.as_matrix() # scipy>=1.4\n offset = np.array([1,0,0])\n return self.state[:3]+m@offset\n\n def set_action_space(self, action_dim):\n SCALE = np.ones(action_dim) * 0.025\n self.action_LB = -SCALE\n self.action_UB = SCALE\n self.SCALE = SCALE\n self.action_space = gym.spaces.Box(low=self.action_LB, high=self.action_UB, dtype=np.float32)\n\n def __init__(self, path, action_dim):\n self.action_dim = action_dim\n self.hand = Hand(path, 0.01, use_joint_limit=False, use_quat=True, use_eigen=False) # SCALE\n self.hand.sample(13000)\n # self.hand.sample(13000, re_sample=True)\n lb, ub = self.hand.lb_ub()\n self.joints_LB = np.array(lb)\n self.joints_UB = np.array(ub)\n self.set_action_space(action_dim)\n self.state = None\n self.rot_pose = None\n\n def bound_state(self, action):\n full_action = action.copy()\n new_state = (self.state+full_action)\n new_state[6:] = np.clip(new_state[6:], self.joints_LB, self.joints_UB) # The bound of internal joints\n # The bound of Euler Agnle (RYP) ([-pi,pi],[-pi/2,pi/2],[-pi,pi])\n new_state[3] = (new_state[3]+np.pi)%(2*np.pi) - np.pi\n new_state[4] = np.clip(new_state[4],-np.pi/2, np.pi/2)\n new_state[5] = (new_state[5]+np.pi)%(2*np.pi) - np.pi\n return new_state\n \n def execute(self, action):\n self.state = self.bound_state(action)\n new_quat = self._euler2quat(self.state[3:6])\n ex_dofs = np.array([*self.state[:3], *new_quat])\n self.hand.forward_kinematics(ex_dofs, self.state[6:])\n return self.get_euler_state()\n \n def reset(self, joints):\n if joints.shape[0] == self.action_dim:\n self.state = joints.copy()\n ex_dofs = np.zeros(7)\n ex_dofs[:3] = self.state[:3]\n ex_dofs[3:7] = self._euler2quat(self.state[3:6])\n ######\n self.hand.forward_kinematics(ex_dofs, self.state[6:])\n else:\n self.state = np.zeros(self.action_dim)\n self.state[:3] = joints[:3]\n self.state[3:6] = R.from_quat([*joints[4:7], joints[3]]).as_euler('xyz')\n self.state[6:] = joints[7:]\n ######\n self.hand.forward_kinematics(joints[:7], self.state[6:])\n return self.getPointCloud()\n\n def get_euler_state(self):\n # Dim 24 (3Translation + 3Rotation + 18DoA)\n ret = self.state.copy()\n return ret\n \n def get_quat_state(self):\n # Dim:25 (3Translation + 4Rotation + 18DoA)\n ret = np.zeros(25)\n ret[7:] = self.state[6:]\n ret[:3] = self.state[:3]\n ret[3:7] = self._euler2quat(self.state[3:6])\n return ret\n\n def get_state(self): # old entry\n ret = self.state.copy()\n ret[:3] = 0\n ret[3:6] = (ret[3:6]/np.pi) # remove pi1\n return ret\n \n def get_modified_state(self): # stop2\n ret = self.state.copy()\n ret[:3] = 0\n ret[3:6] = (ret[3:6]/np.pi) # remove pi1\n ret[6:] = (ret[6:]/np.pi) # remove pi2\n return ret\n\n def getPointCloud(self):\n points = self.hand.sample_fwk()\n return points\n\n def getPointNormal(self):\n normals = self.hand.sample_fwk_normal()\n return normals\n\n def get_label(self, idx=None):\n if idx is not None:\n return self.hand.labels[idx.astype('int32')]\n else:\n return self.hand.labels.copy()\n\n def get_part_label(self, idx=None):\n if idx is not None:\n return self.hand.part_labels[idx.astype('int32')]\n else:\n return self.hand.part_labels.copy()\n \n def get_internal_label(self, idx=None):\n if idx is not None:\n return self.hand.internal_labels[idx.astype('int32')]\n else:\n return self.hand.internal_labels.copy()\n\n def get_offset(self, euler):\n if euler.shape[0] == 3:\n r = R.from_euler('xyz', euler)\n else:\n r = R.from_quat([euler[1],euler[2],euler[3],euler[0]])\n m = r.as_matrix() \n offset = np.array([1,0,0])\n return m@offset\n \n def _euler2quat(self, euler):\n new_quat = R.from_euler('xyz', euler).as_quat()\n return [new_quat[3], new_quat[0], new_quat[1], new_quat[2]]\n\n def random_action(self):\n action = (2*np.random.rand(self.action_dim)-1) * self.SCALE\n return action\n\n def hacker_action(self, tar_dof, add_noise=False):\n if tar_dof is None:\n return self.random_action()\n action = np.zeros(self.action_dim)\n action[:3] = (tar_dof[:3]-self.state[:3])\n first = np.zeros(25)\n if np.sum(np.abs(action[:3])) < 0.02:\n action[-18:] = (tar_dof[-18:]-self.state[-18:])\n else:\n action[-18:] = (first[-18:]-self.state[-18:])\n if add_noise:\n noise = 0.1 * (2*np.random.rand(self.action_dim)-1) * self.SCALE\n action += noise\n action = np.clip(action, self.action_LB, self.action_UB)\n return action\n\n","repo_name":"qijinshe/IBS-Grasping","sub_path":"src/ibs_env/scripts/manipulator_euler.py","file_name":"manipulator_euler.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"54"} +{"seq_id":"13234919822","text":"from __future__ import division\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.neighbors import KernelDensity\nfrom scipy import stats\n\npath = os.path.expanduser(\"~/GitHub/EcoEvoMet\")\n\n\ndef CV_KDE(oneD_array):\n # remove +/- inf\n oneD_array = oneD_array[np.logical_not(np.isnan(oneD_array))]\n grid = GridSearchCV(KernelDensity(),\n {'bandwidth': np.logspace(0.1, 5.0, 30)},\n cv=20) # 20-fold cross-validation\n grid.fit(oneD_array[:, None])\n x_grid = np.linspace(np.amin(oneD_array), np.amax(oneD_array), 10000)\n kde = grid.best_estimator_\n pdf = np.exp(kde.score_samples(x_grid[:, None]))\n # returns grod for x-axis, pdf, and bandwidth\n return_tuple = (x_grid, pdf, kde.bandwidth)\n return return_tuple\n\n\ndef resource_fig():\n df = pd.read_csv(path + '/alpha_data_final.txt', sep = '\\t', header = 'infer', index_col = 0)\n\n KDE = CV_KDE(np.log10(df.alpha_mag.values))\n\n print(\"Bandwidth = \" + str(KDE[2]))\n print(\"CV = \" + str(np.std(np.log10(df.alpha_mag.values)) / np.mean(np.log10(df.alpha_mag.values)) ))\n\n fig = plt.figure()\n plt.plot(KDE[0], KDE[1], linewidth=3, alpha=0.5)#, label='bw=%.2f' % KDE[2])\n plt.xlabel('Magnitude of change in \\nresource usage strategy ' + r'$\\| \\Delta\\vec{\\alpha} \\|, \\; \\mathrm{log}_{10}$', fontsize = 16)\n plt.ylabel('Frequency' , fontsize = 16)\n\n\n fig_name = path + '/resource_kde.png'\n fig.savefig(fig_name, bbox_inches = \"tight\", pad_inches = 0.4, dpi = 600)\n plt.close()\n\n\ndef pca_dist_fig():\n gene_by_pop_df = pd.read_csv(path + '/m6_gene_by_pop.txt', sep = '\\t', header = 'infer', index_col = 0)\n X = gene_by_pop_df.values - np.mean(gene_by_pop_df.values, axis=0)\n pca = PCA(n_components=2)\n X_out = pca.fit_transform(X)\n df_pca = pd.DataFrame(data=X_out, index=gene_by_pop_df.index.values)\n gens = [int(x.split('_')[2]) for x in df_pca.index.values.tolist() ]\n df_pca['Generations'] = gens\n m_df_pca = df_pca[df_pca.index.str.contains('_m_')]\n M_df_pca = df_pca[df_pca.index.str.contains('_M_')]\n\n df = pd.read_csv(path + '/time_dist.txt', sep = '\\t', header = 'infer', index_col = 0)\n df = df[np.isfinite(df['Distance'])]\n\n fig = plt.figure()\n plt.subplot(211)\n plt.scatter(M_df_pca.Generations.values, M_df_pca[0].values, label=\"Ara-6 major lineage\", color='#ff7f0e')\n plt.scatter(m_df_pca.Generations.values, m_df_pca[0].values, label=\"Ara-6 minor lineage\", color='#1f77b4')\n plt.ylabel('PCA 1 (' + str(round(pca.explained_variance_ratio_[0],3)*100) + '%)' , fontsize = 12)\n plt.xlim(9500, 65000)\n plt.legend(loc='center right')\n plt.text(10000, 4, r\"$\\mathbf{a)}$\", fontsize=12)\n\n\n plt.subplot(212)\n plt.plot(df.Time.values, df.Distance_m.values, color='#1f77b4')\n plt.plot(df.Time.values, df.Distance_M.values, color='#ff7f0e')\n plt.xlabel('Generation', fontsize = 16)\n #plt.ylabel('Between-clade \\nreaction network ' , fontsize = 12)\n plt.ylabel( r'$\\frac{\\mathrm{between-lineage \\; graph\\;distance}}{\\mathrm{within-lineage \\; graph\\;distance}}$', fontsize = 13)\n plt.hlines(y=1, xmin=9800, xmax=65000, color='k', linestyle=':', alpha = 0.8, zorder=1)\n plt.xlim(9500, 65000)\n plt.ylim(0.6, 2)\n plt.text(10000, 1.85, r\"$\\mathbf{b)}$\", fontsize=12)\n\n fig_name = path + '/pca_dist_time.png'\n fig.savefig(fig_name, bbox_inches = \"tight\", pad_inches = 0.4, dpi = 600)\n plt.close()\n\n\ndef alpha_network():\n df_alpha = pd.read_csv(path + '/alpha_data_final.txt', sep = '\\t', header = 'infer', index_col = 0)\n df_k = pd.read_csv(path + '/directed_rxn_k.txt', sep = '\\t', header = 'infer', index_col = 0)\n df_c = pd.read_csv(path + '/directed_rxn_c.txt', sep = '\\t', header = 'infer', index_col = 0)\n df_d = pd.read_csv(path + '/directed_rxn_d_ex_rxns.txt', sep = '\\t', header = 'infer', index_col = 0)\n\n df_k_merged = df_alpha.merge(df_k, how='outer', left_index=True, right_index=True)\n df_c_merged = df_alpha.merge(df_c, how='outer', left_index=True, right_index=True)\n df_d_merged = df_alpha.merge(df_d, how='outer', left_index=True, right_index=True)\n\n df_d_merged = df_d_merged.dropna(subset=['dist_mean', 'alpha_mag'])\n\n\n fig = plt.figure()\n x = df_d_merged.dist_mean.values\n y = df_d_merged.alpha_mean.values\n plt.scatter(x, y, color='#ff7f0e')\n\n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n print(\"slope is \" + str(slope))\n print(\"r2-value is \" + str(r_value **2))\n print(\"p-value is \" + str(p_value))\n\n predict_y = intercept + slope * x\n pred_error = y - predict_y\n degrees_of_freedom = len(x) - 2\n residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)\n plt.plot(x, predict_y, 'k-')\n\n\n fig_name = path + '/test_alpha_network.png'\n fig.savefig(fig_name, bbox_inches = \"tight\", pad_inches = 0.4, dpi = 600)\n plt.close()\n\n\n#alpha_network()\n#resource_fig()\npca_dist_fig()\n","repo_name":"wrshoemaker/EcoEvoMet","sub_path":"make_figs.py","file_name":"make_figs.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25812278441","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 22 14:21:47 2022\n\n@author: Tugce Su\n\"\"\"\n\n##the covarage experiment\n\n##0.99 top10 \nimport pandas as pd\nc_akid= pd.read_csv(\"C:/Users/Tugce Su/OneDrive - Vrije Universiteit Amsterdam/Desktop/AKID/4cell-akid-result.txt\", delimiter=r\"\\s+\")\nc_akid['kinase_domain'] = c_akid['kinase_domain'].str.replace(\"_Hsap_domain1\", '')\nc_akid['kinase_domain']= c_akid['kinase_domain'].str.upper()\nkinase= c_akid[\"kinase_domain\"].unique()\npsp_all= c_akid[\"peptide\"].unique()\nc9_akid= c_akid.loc[c_akid[\"score\"] >= 0.99].reset_index()\ntop10= c9_akid.groupby(by=\"peptide\")[\"score\"].nlargest(10)\ntop_10=pd.DataFrame(top10)\npep=c9_akid[\"peptide\"].unique()\nkin= c9_akid[\"kinase_domain\"].unique()\nind=[]\nfor i in top_10.index:\n #print(i[1])\n ind.append(i[1])\n\nc10_akid= c9_akid.iloc[ind]\n \n","repo_name":"tcobanoglu/phosphoproteomics","sub_path":"akid_maxquant_experiments/coverage_exp.py","file_name":"coverage_exp.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11083643297","text":"def changeToDec(number):\n row = number[:7]\n column = number[7:11]\n\n rowSeat = 0\n columnSeat = 0\n\n for i in range(0, 7):\n if row[i] == 'B':\n rowSeat += 2 ** (6 - i)\n\n for i in range(0, 3):\n if column[i] == 'R':\n columnSeat += 2 ** (2 - i)\n\n return rowSeat * 8 + columnSeat\n\n\ndef searchSeat(list):\n list.sort()\n for i in range(0, len(list)):\n if i != len(list) - 1 and list[i + 1] - list[i] == 2:\n return list[i] + 1\n\n return -1\n\n\nlistOfData = []\nlistOfSeat = []\n\nwith open('data5.txt') as f:\n for i in f:\n listOfData.append(i)\n\nmaxSeat = changeToDec(listOfData[0])\nlistOfSeat.append(changeToDec(listOfData[0]))\nfor i in listOfData[1:]:\n listOfSeat.append(changeToDec(i))\n if changeToDec(i) > maxSeat:\n maxSeat = changeToDec(i)\n# print(changeToDec('FFFBBBFRRR'))\nprint(maxSeat)\n\nprint(searchSeat(listOfSeat))\n","repo_name":"Kaseed/AdventOFCode","sub_path":"Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38612939208","text":"from itertools import groupby\nfrom datetime import datetime, timedelta\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError, AccessError\nfrom odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT\nfrom odoo.tools.misc import formatLang\nfrom odoo.tools import html2plaintext\nimport odoo.addons.decimal_precision as dp\n\n\nclass GlobalSearch(models.Model):\n _name = 'global.search'\n _description = 'Global Search'\n\n\n def _default_user_ids(self):\n return [(6, 0, [self._uid])]\n\n def erro_fun(self):\n for search in self:\n raise AccessError(_('Something went wrong!!!'))\n\n name = fields.Char('Global Search Name')\n user_id = fields.Many2many('res.users', string='User', required=False,domain=\"[('share', '=', False)]\",default=_default_user_ids)\n model_id = fields.Many2one('ir.model', string='Model', required=True, copy=False, default=lambda self: self.env['ir.model'].sudo().search([],limit=1))\n field_ids = fields.Many2many('ir.model.fields',domain=\"[('ttype','in',['char','many2one','selection','text'])]\")\n\n @api.onchange('model_id')\n def onchange_model_id(self):\n for field in self:\n if field.model_id:\n for field_ in field.field_ids:\n if field.model_id != field_.model_id :\n # reset task when changing project\n field.field_ids = False\n return {'domain': {\n 'field_ids': [('model_id', '=', field.model_id.id)]\n }}\n\n","repo_name":"Doscaal/Dentanor","sub_path":"Doscaal/bi_global_search/models/global_search.py","file_name":"global_search.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11618208295","text":"import os\n\nimport yaml\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport json\nimport sklearn.metrics as metrics\n\n\ndef evaluate():\n params = yaml.safe_load(open(\"params.yaml\"))[\"train\"]\n model = joblib.load(\"resources/out_train/model.joblib\")\n\n df_train = pd.read_csv(\"resources/out_prepare/train.csv\")\n X_train = df_train[params[\"feature_columns\"]]\n y_train = df_train[\"target\"]\n df_test = pd.read_csv(\"resources/out_prepare/test.csv\")\n X_test = df_test[params[\"feature_columns\"]]\n y_test= df_test[\"target\"]\n\n y_train_pred_prob = model.predict_proba(X_train)\n y_train_pred = np.argmax(y_train_pred_prob, axis=1)\n y_test_pred_prob = model.predict_proba(X_test)\n y_test_pred = np.argmax(y_test_pred_prob, axis=1)\n\n train_accuracy = metrics.accuracy_score(y_train, y_train_pred)\n test_accuracy = metrics.accuracy_score(y_test, y_test_pred)\n train_f1 = metrics.f1_score(y_train, y_train_pred, average=\"micro\")\n test_f1 = metrics.f1_score(y_test, y_test_pred, average=\"micro\")\n\n os.makedirs(\"resources/out_evaluate/\", exist_ok=True)\n with open(\"resources/out_evaluate/scores.json\", \"w\") as fd:\n json.dump(\n {\n \"train_accuracy\": train_accuracy, \n \"test_accuracy\": test_accuracy,\n \"train_f1\": train_f1,\n \"test_f1\": test_f1,\n }, \n fd, \n indent=4,\n )\n\n\nif __name__ == \"__main__\":\n evaluate()","repo_name":"yisaienkov/ai-tracking-template","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1236723581","text":"from django.urls import path\nfrom blogPost.api.views import (\n PostCreateApiView,\n\t\tPostUpdateApiView,\n\t\tPostDeleteApiView,\n\t\tPostDetailApiView,\n\t\tPostListApiView\n \t)\n\n\nurlpatterns = [\n \n path('',PostListApiView.as_view(),name='apiHome'),\n path('home/',PostListApiView.as_view(),name='apiHome'),\n path('detail//',PostDetailApiView.as_view(),name='postApiDetail'),\n path('create/',PostCreateApiView.as_view(),name='postApiCreate'),\n path('delete//',PostDeleteApiView.as_view(),name='postApiDelete'),\n path('update//',PostUpdateApiView.as_view(),name='postApiUpdate')\n\n]\n\n\n\n","repo_name":"minhaz044/Django-Simple-Blog-With-Rest-API","sub_path":"blogPost/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71307444321","text":"# Some of the USA share their names with rivers. We have defined two variables\n# with respective place names.\n\n# Print out a new set with river names that don't overlap with given states.\n\n# Sample Input 1:\n# Alabama Missouri Mississippi\n# Georgia Alaska Missouri\n\n# Sample Output 1:\n# {'Alabama', 'Mississippi'}\n\n# work with these variables\nrivers = set(input().split())\nstates = set(input().split())\nrivers.difference_update(states)\nprint(rivers)","repo_name":"christiantriadataro/PYTHON-TRACK-IN-HYPERSKILL","sub_path":"Computer science/Programming languages/Python/Working with data/Collections/Sets/Set operations/Rivers.py","file_name":"Rivers.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11255582780","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home),\n path('login/', views.login_fct),\n path('register/', views.register_fct),\n path('logout/', views.logout),\n path('upvotes/', views.upvotes, name=\"upvotes\"),\n path('downvotes/', views.downvotes, name=\"downvotes\"),\n path('delete/', views.delete, name=\"delete\"),\n]","repo_name":"rcheiko/piscine_jango_python","sub_path":"day06/day06/ex/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74337170081","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 24 23:49:13 2022\n\n@author: ilirsheraj\n\"\"\"\n# Not the best way to open a file, its just for educational purposes\n\njabber = open(\"Jabberwocky.txt\", \"r\")\n\n# # Iterate over\n# for line in jabber:\n# print(line)\n \n# jabber.close()\n\n# # To remove the new empty line: Remove the default \\n\n# for line in jabber:\n# print(line, end=\"\")\n \n# jabber.close()\n\n# The other way is to strip off the line\nfor line in jabber:\n# print(line.strip(), end=\" \")\n print(line.strip())\n# print(len(line))\n\n# Closing is essential, especially when writing data as it may be lost\njabber.close()","repo_name":"ilirsheraj/PythonMasterClass","sub_path":"Files/read_poem.py","file_name":"read_poem.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12346114100","text":"from selenium import webdriver\nimport time\n\ndriver = webdriver.Firefox()\n\ndriver.get(\"https://tsars.company.site/products/OBRAZETS-Cherny-top-p355216933\")\n\ntime.sleep(2)\njs2 = \"\"\"\nEcwid.Cart.addProduct(355216933)\n\"\"\"\ndriver.execute_script(js2)\n\n\"\"\"
ОБРАЗЕЦ. Черный топ\"\"\"\n\ndriver.get(\"https://tsars.company.site/products/cart\")\ntime.sleep(2)\ncheck_js = \"\"\"Ecwid.Cart.get(function(cart){\n if(cart.items[0].product.id == 355216933){\n \talert(\"Добавленная позиция находится в корзине.\");\n }\n});\"\"\"\n\ndriver.execute_script(check_js)\n","repo_name":"tsar0720/test","sub_path":"task3/task3_selenium.py","file_name":"task3_selenium.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74541418081","text":"from functools import reduce\n\n\nhex_convertor = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110',\n '7': '0111', '8': '1000', '9': '1001', 'A': '1010', 'B': '1011', 'C': '1100', 'D': '1101',\n 'E': '1110', 'F': '1111'}\n\n\ndef convert_from_binary(value):\n bits = [int(x) for x in value]\n number = 0\n for i in range(0, len(bits)):\n number += bits[i] * 2 ** (len(bits) - i - 1)\n return number\n\n\ndef get_packet(line, from_index, versions, numbers):\n version = convert_from_binary(line[from_index:from_index+3])\n versions.append(version)\n type_id = convert_from_binary(line[from_index+3:from_index+6])\n from_index += 6\n if type_id == 4:\n # number\n number_bits = ''\n while True:\n bits = line[from_index:from_index + 5]\n number_bits += bits[1:]\n from_index += 5\n if bits[0] == '0':\n break\n number = convert_from_binary(number_bits)\n numbers.append(number)\n # print(number)\n else:\n # operation\n length_type_bit = line[from_index:from_index+1]\n from_index += 1\n child_numbers = []\n if length_type_bit == '0':\n length_bits = line[from_index:from_index + 15]\n from_index += 15\n length = convert_from_binary(length_bits)\n new_index = from_index\n while new_index < from_index + length:\n new_index = get_packet(line, new_index, versions, child_numbers)\n from_index = new_index\n else:\n number_of_subpackets_bits = line[from_index:from_index + 11]\n from_index += 11\n number_of_subpackets = convert_from_binary(number_of_subpackets_bits)\n for si in range(0, number_of_subpackets):\n from_index = get_packet(line, from_index, versions, child_numbers)\n if type_id == 0:\n numbers.append(sum(child_numbers))\n elif type_id == 1:\n numbers.append(reduce((lambda x, y: x*y), child_numbers))\n # numbers.append(np.prod(child_numbers)) -> this gives wrong (negative) answer\n elif type_id == 2:\n numbers.append(min(child_numbers))\n elif type_id == 3:\n numbers.append(max(child_numbers))\n elif type_id == 5:\n numbers.append(1 if child_numbers[0] > child_numbers[1] else 0)\n elif type_id == 6:\n numbers.append(1 if child_numbers[0] < child_numbers[1] else 0)\n elif type_id == 7:\n numbers.append(1 if child_numbers[0] == child_numbers[1] else 0)\n else:\n raise Exception('Unexpected type_id')\n return from_index\n\n\ndef run():\n hex_lines = [x.strip() for x in open('input.txt', 'r').readlines()]\n for hex_line in hex_lines:\n print(\"-----\")\n line = ''\n for hex_number in hex_line:\n line += hex_convertor[hex_number]\n versions = []\n numbers = []\n from_index = get_packet(line, 0, versions, numbers)\n print(versions)\n print(sum(versions))\n print(numbers)\n","repo_name":"dpokluda/Python","sub_path":"advent-of-code/2021/Day 16/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22317788211","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import TimeoutException\r\nimport random\r\nfrom datetime import datetime\r\nfrom ToolBox import write2csv\r\nfrom ToolBox import text2list\r\nimport os.path\r\nfrom os import path\r\nimport time\r\nimport re\r\nfrom ToolBox import write2text\r\nimport csv\r\nimport codecs\r\n\r\ntic = time.perf_counter()\r\nnow = datetime.now() # current date and time\r\n\r\nyear = now.strftime(\"%Y\")\r\nyear = str(int(year)-1911)\r\nmonth = now.strftime(\"%m\")\r\nday = now.strftime(\"%d\")\r\n\r\ndate_ = year + '/' + month + '/' + day\r\nprint(date_)\r\n\r\n# setup the webdriver\r\nPATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\r\n\r\noptions = webdriver.ChromeOptions()\r\noptions.add_argument(\"headless\")\r\ndriver = webdriver.Chrome(PATH, options=options)\r\n\r\nfile_date = year + '_' + month + '_' + day\r\nInterest_list = ['stock', 'stop date Year', 'stop date Month', 'stop date Day', 'reason', 'short percentage', 'short/borrow', 'Big buyer', 'remaining days']\r\n\r\ndriver.get(\"https://www.twse.com.tw/zh/page/trading/exchange/MI_MARGN.html\")\r\ntime.sleep(3)\r\n\r\ntype_stock = driver.find_element_by_xpath('//*[@id=\"main-form\"]/div/div/form/select')\r\n\r\nwith codecs.open('./stock_data/stock_class1.csv', 'w', encoding=\"utf-8\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n for i in range(7, 38):\r\n data = []\r\n type_stock.click()\r\n time.sleep(1)\r\n stock_type = driver.find_element_by_xpath('//*[@id=\"main-form\"]/div/div/form/select/option[' + str(i) + ']')\r\n print(stock_type.text)\r\n data.append(stock_type.text)\r\n stock_type.click()\r\n time.sleep(1)\r\n button = driver.find_element_by_xpath('//*[@id=\"main-form\"]/div/div/form/a')\r\n button.click()\r\n time.sleep(3)\r\n\r\n showAll = driver.find_element_by_xpath('//*[@id=\"report-table_length\"]/label/select')\r\n showAll.click()\r\n time.sleep(1)\r\n All = driver.find_element_by_xpath('//*[@id=\"report-table_length\"]/label/select/option[5]')\r\n All.click()\r\n time.sleep(1)\r\n\r\n list_len = len(driver.find_elements_by_xpath('//*[@id=\"report-table\"]/tbody/tr'))\r\n for j in range(2, list_len):\r\n stock_num = driver.find_element_by_xpath('//*[@id=\"report-table\"]/tbody/tr[' + str(j) + ']/td[1]').text\r\n data.append(stock_num)\r\n print(stock_num)\r\n writer.writerow([item.encode('utf8') for item in data])\r\n\r\n\r\nnum_List = len(driver.find_elements_by_xpath('//*[@id=\"report-table\"]/tbody/tr'))\r\ndata = []\r\n","repo_name":"josephorng/StockBacktesting","sub_path":"Get_official_class.py","file_name":"Get_official_class.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"667952673","text":"# Author: Javad Amirian\n# Email: amiryan.j@gmail.com\n\nfrom abc import ABC, abstractmethod\n\nfrom crowdrep_bot.crowd_sim.grouping_behavior import GroupingBehavior\nfrom crowdrep_bot.scenarios.scenario import Scenario\nimport numpy as np\n\n\nclass SimulationScenario(Scenario, ABC):\n \"\"\"Any non-real scenario should inherit this class\"\"\"\n\n def __init__(self, **kwargs):\n self.ped_radius = kwargs.get(\"pedRadius\", 0.25)\n self.n_peds = kwargs.get(\"numPeds\", 0)\n self.fps = 10 # default fps for simulated scenarios\n self.group_ids = []\n self.grouping_behavior_handler = None\n super(SimulationScenario, self).__init__()\n\n @abstractmethod\n def setup(self, **kwargs):\n super(SimulationScenario, self).setup()\n\n def step_crowd(self, dt):\n self.world.sim.doStep(dt)\n for ii in range(self.n_peds):\n try:\n p = self.sim.getCenterNext(ii)\n v = self.sim.getCenterVelocityNext(ii)\n # apply inertia\n v_new = np.array(v) * (1 - self.inertia_coeff) + self.crowds[ii].vel * self.inertia_coeff\n p_new = self.crowds[ii].pos + v_new * dt\n self.set_ped_position(ii, p_new)\n self.set_ped_velocity(ii, v_new)\n except Exception:\n raise ValueError('exception occurred in running crowd sim')\n\n def step_robots(self, dt, lidar_enabled):\n super(SimulationScenario, self).step_robots(dt, lidar_enabled)\n\n def step(self, dt, lidar_enabled, save=False):\n if not self.world.pause:\n self.world.sim.doStep(dt)\n\n group_vels = self.grouping_behavior_handler.step(crowds=self.world.crowds)\n\n for ii in range(self.n_peds):\n p_new = self.world.sim.getCenterNext(ii)\n v = self.world.sim.getCenterVelocityNext(ii)\n\n # apply inertia\n v_new = np.array(v) * (1 - self.world.inertia_coeff) \\\n + self.world.crowds[ii].vel * self.world.inertia_coeff\n # p_new = self.world.crowds[ii].pos + v_new * dt\n\n v_new += group_vels[ii]\n\n self.world.set_ped_position(ii, p_new)\n self.world.set_ped_velocity(ii, v_new)\n\n self.world.crowds[ii].step(dt)\n\n for jj in range(self.n_peds, self.n_peds + self.n_robots):\n self.world.robots[jj - self.n_peds].vel = np.array(self.world.sim.getCenterVelocityNext(jj))\n\n if self.world.robots[jj - self.n_peds].pos[0] > self.world.world_dim[0][1]:\n self.world.robots[jj - self.n_peds].pos[0] = self.world.world_dim[0][0]\n self.step_robots(dt, lidar_enabled)\n\n return super(SimulationScenario, self).step(dt, save)\n","repo_name":"amiryanj/crowd-imputation","sub_path":"src/crowdrep_bot/scenarios/simulation_scenario.py","file_name":"simulation_scenario.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74228842722","text":"from __future__ import print_function, division\nimport numpy as np\nfrom pyscf.nao.log_mesh import log_mesh\nfrom pyscf.nao.m_libnao import libnao\nfrom ctypes import POINTER, c_double, c_int, byref\n\n# phia,la,ra,phib,lb,rb,rcen,lbdmxa,rhotb,rr,nr,jtb,clbdtb,lbdtb,nterm,ord,pcs,rho_min_jt,dr_jt\n\n\"\"\"\n Reduction of the products of two atomic orbitals placed at some distance\n [1] Talman JD. Multipole Expansions for Numerical Orbital Products, Int. J. Quant. Chem. 107, 1578--1584 (2007)\n ngl : order of Gauss-Legendre quadrature\n\"\"\"\n\nlibnao.prdred.argtypes = (\n POINTER(c_double), # phia(nr)\n POINTER(c_int), # la\n POINTER(c_double), # ra(3)\n POINTER(c_double), # phib(nr)\n POINTER(c_int), # lb\n POINTER(c_double), # rb(3)\n POINTER(c_double), # rcen(3)\n POINTER(c_int), # lbdmxa\n POINTER(c_double), # rhotb(nr,nterm)\n POINTER(c_double), # rr(nr)\n POINTER(c_int), # nr\n POINTER(c_int), # jtb(nterm)\n POINTER(c_int), # clbdtb(nterm)\n POINTER(c_int), # lbdtb(nterm)\n POINTER(c_int), # nterm\n POINTER(c_int), # ord\n POINTER(c_int), # pcs\n POINTER(c_double), # rho_min_jt\n POINTER(c_double)) # dr_jt\n\n#\n#\n#\nclass prod_talman_c(log_mesh):\n \n def __init__(self, lm=None, jmx=7, ngl=96, lbdmx=14):\n \"\"\"\n Expansion of the products of two atomic orbitals placed at given locations and around a center between these locations \n [1] Talman JD. Multipole Expansions for Numerical Orbital Products, Int. J. Quant. Chem. 107, 1578--1584 (2007)\n ngl : order of Gauss-Legendre quadrature\n log_mesh : instance of log_mesh_c defining the logarithmic mesh (rr and pp arrays)\n jmx : maximal angular momentum quantum number of each atomic orbital in a product\n lbdmx : maximal angular momentum quantum number used for the expansion of the product phia*phib\n \"\"\"\n from numpy.polynomial.legendre import leggauss\n from pyscf.nao.m_log_interp import log_interp_c\n from pyscf.nao.m_csphar_talman_libnao import csphar_talman_libnao as csphar_jt\n assert ngl>2 \n assert jmx>-1\n assert hasattr(lm, 'rr') \n assert hasattr(lm, 'pp')\n \n self.ngl = ngl\n self.lbdmx = lbdmx\n self.xx, self.ww = leggauss(ngl)\n log_mesh.__init__(self, rr=lm.rr, pp=lm.pp)\n\n self.plval=np.zeros([2*(self.lbdmx+jmx+1), ngl])\n self.plval[0,:] = 1.0\n self.plval[1,:] = self.xx\n for kappa in range(1,2*(self.lbdmx+jmx)+1):\n self.plval[kappa+1, :]= ((2*kappa+1)*self.xx*self.plval[kappa, :]-kappa*self.plval[kappa-1, :])/(kappa+1)\n\n self.log_interp = log_interp_c(self.rr)\n self.ylm_cr = csphar_jt([0.0,0.0,1.0], self.lbdmx+2*jmx)\n\n return\n\n def prdred(self,phia,la,ra, phib,lb,rb,rcen):\n \"\"\" Reduce two atomic orbitals given by their radial functions phia, phib, \n angular momentum quantum numbers la, lb and their centers ra,rb.\n The expansion is done around a center rcen.\"\"\"\n from numpy import sqrt\n from pyscf.nao.m_thrj import thrj\n from pyscf.nao.m_fact import fact as fac, sgn\n\n assert la>-1 \n assert lb>-1 \n assert len(rcen)==3 \n assert len(ra)==3 \n assert len(rb)==3\n \n jtb,clbdtb,lbdtb=self.prdred_terms(la,lb)\n nterm = len(jtb)\n \n ya = phia/self.rr**la\n yb = phib/self.rr**lb\n raa,rbb=sqrt(sum((ra-rcen)**2)),sqrt(sum((rb-rcen)**2))\n ijmx=la+lb\n fval=np.zeros([2*self.lbdmxa+ijmx+1, self.nr])\n yz = np.zeros(self.ngl)\n kpmax = 0\n for ir,r in enumerate(self.rr):\n for igl,x in enumerate(self.xx):\n a1 = sqrt(r*r-2*raa*r*x+raa**2)\n a2 = sqrt(r*r+2*rbb*r*x+rbb**2)\n yz[igl]=self.log_interp(ya,a1)*self.log_interp(yb,a2)\n\n kpmax = 2*self.lbdmxa+ijmx if raa+rbb>1.0e-5 else 0 \n for kappa in range(kpmax+1):\n fval[kappa,ir]=0.5*(self.plval[kappa,:]*yz*self.ww).sum()\n\n rhotb=np.zeros([nterm,self.nr])\n for ix,[ij,clbd,clbdp] in enumerate(zip(jtb, clbdtb, lbdtb)):\n for lbd1 in range(la+1):\n lbdp1 = la-lbd1\n aa = thrj(lbd1,lbdp1,la,0,0,0)*fac[lbd1]*fac[lbdp1]*fac[2*la+1] / (fac[2*lbd1]*fac[2*lbdp1]*fac[la])\n\n for lbd2 in range(lb+1):\n lbdp2=lb-lbd2\n bb=thrj(lbd2,lbdp2,lb,0,0,0)*fac[lbd2]*fac[lbdp2]*fac[2*lb+1] / (fac[2*lbd2]*fac[2*lbdp2]*fac[lb])\n bb=aa*bb\n \n for kappa in range(kpmax+1):\n sumb=0.0\n lcmin=max(abs(lbd1-lbd2),abs(clbd-kappa))\n lcmax=min(lbd1+lbd2,clbd+kappa)\n for lc in range(lcmin,lcmax+1,2):\n lcpmin=max(abs(lbdp1-lbdp2),abs(clbdp-kappa))\n lcpmax=min(lbdp1+lbdp2,clbdp+kappa)\n for lcp in range(lcpmin,lcpmax+1,2):\n if abs(lc-ij)<=lcp and lcp<=lc+ij:\n sumb = sumb+(2*lc+1)*(2*lcp+1) * \\\n thrj(lbd1,lbd2,lc,0,0,0) * \\\n thrj(lbdp1,lbdp2,lcp,0,0,0) * \\\n thrj(lc,clbd,kappa,0,0,0) * \\\n thrj(lcp,clbdp,kappa,0,0,0) * \\\n sixj(clbd,clbdp,ij,lcp,lc,kappa) * \\\n ninej(la,lb,ij,lbd1,lbd2,lc,lbdp1,lbdp2,lcp)\n \n cc=sgn(lbd1+kappa+lb)*(2*ij+1)*(2*kappa+1) * (2*clbd+1)*(2*clbdp+1)*bb*sumb\n if cc != 0.0:\n lbd1_p_lbd2 = lbd1 + lbd2\n rhotb[ix,:] = rhotb[ix,:] + cc*self.rr[:]**(lbd1_p_lbd2) *(raa**lbdp1)* (rbb**lbdp2)* fval[kappa,:]\n\n return jtb,clbdtb,lbdtb,rhotb\n\n\n def prdred_terms(self,la,lb):\n \"\"\" Compute term-> Lambda,Lambda',j correspondence \"\"\"\n nterm=0\n ijmx=la+lb\n for ij in range(abs(la-lb),ijmx+1):\n for clbd in range(self.lbdmx+1):\n nterm=nterm+ (clbd+ij+1 - abs(clbd-ij))\n\n jtb = np.zeros(nterm, dtype=np.int32)\n clbdtb = np.zeros(nterm, dtype=np.int32)\n lbdtb = np.zeros(nterm, dtype=np.int32)\n \n ix=-1\n for ij in range(abs(la-lb),ijmx+1):\n for clbd in range(self.lbdmx+1):\n for lbd in range(abs(clbd-ij),clbd+ij+1):\n ix=ix+1\n jtb[ix]=ij\n clbdtb[ix]=clbd\n lbdtb[ix]=lbd\n \n return jtb,clbdtb,lbdtb\n \n def prdred_libnao(self,phia,la,ra, phib,lb,rb,rcen):\n \"\"\" By calling a subroutine \"\"\"\n assert len(phia)==self.nr\n assert len(phib)==self.nr\n \n jtb,clbdtb,lbdtb=self.prdred_terms(la,lb)\n nterm = len(jtb)\n \n jtb_cp = np.require(jtb, dtype=c_int, requirements='C')\n clbdtb_cp = np.require(clbdtb, dtype=c_int, requirements='C')\n lbdtb_cp = np.require(lbdtb, dtype=c_int, requirements='C')\n rhotb_cp = np.require( np.zeros([nterm, self.nr]), dtype=c_double, requirements='CW')\n rr_cp = np.require(self.rr,dtype=c_double, requirements='C')\n phia_cp = np.require(phia,dtype=c_double, requirements='C')\n phib_cp = np.require(phib,dtype=c_double, requirements='C')\n ra_cp = np.require(ra,dtype=c_double, requirements='C')\n rb_cp = np.require(rb,dtype=c_double, requirements='C')\n rcen_cp = np.require(rcen,dtype=c_double, requirements='C')\n \n libnao.prdred(phia_cp.ctypes.data_as(POINTER(c_double)), c_int(la), ra_cp.ctypes.data_as(POINTER(c_double)),\n phib_cp.ctypes.data_as(POINTER(c_double)), c_int(lb), rb_cp.ctypes.data_as(POINTER(c_double)),\n rcen_cp.ctypes.data_as(POINTER(c_double)), \n c_int(self.lbdmx),\n rhotb_cp.ctypes.data_as(POINTER(c_double)),\n rr_cp.ctypes.data_as(POINTER(c_double)),\n c_int(self.nr),\n jtb_cp.ctypes.data_as(POINTER(c_int)),\n clbdtb_cp.ctypes.data_as(POINTER(c_int)),\n lbdtb_cp.ctypes.data_as(POINTER(c_int)),\n c_int(nterm),\n c_int(self.ngl),\n c_int(1),\n c_double(self.log_interp.gammin_jt),\n c_double(self.log_interp.dg_jt) )\n rhotb = rhotb_cp\n return jtb,clbdtb,lbdtb,rhotb\n\n \n def prdred_further(self, ja,ma,jb,mb,rcen,jtb,clbdtb,lbdtb,rhotb):\n \"\"\" Evaluate the Talman's expansion at given Cartesian coordinates\"\"\"\n from pyscf.nao.m_thrj import thrj\n from pyscf.nao.m_fact import sgn\n from pyscf.nao.m_csphar_talman_libnao import csphar_talman_libnao as csphar_jt\n from numpy import zeros, sqrt, pi, array\n \n assert all(rcen == zeros(3)) # this works only when center is at the origin\n nterm = len(jtb)\n assert nterm == len(clbdtb)\n assert nterm == len(lbdtb)\n assert nterm == rhotb.shape[0]\n assert self.nr == rhotb.shape[1]\n\n ffr = zeros([self.lbdmx+1,self.nr], np.complex128)\n m = mb + ma\n ylm_cr = csphar_jt([0.0,0.0,1.0], lbdtb.max())\n for j,clbd,lbd,rho in zip(jtb,clbdtb,lbdtb,rhotb):\n ffr[clbd,:]=ffr[clbd,:] + thrj(ja,jb,j,ma,mb,-m)*thrj(j,clbd,lbd,-m,m,0)*rho*ylm_cr[lbd*(lbd+1)]\n return ffr,m\n\n def prdred_further_scalar(self, ja,ma,jb,mb,rcen,jtb,clbdtb,lbdtb,rhotb):\n \"\"\" Evaluate the Talman's expansion at given Cartesian coordinates\"\"\"\n from pyscf.nao.m_thrj import thrj\n from pyscf.nao.m_csphar_talman_libnao import csphar_talman_libnao as csphar_jt\n from numpy import zeros, sqrt, pi, array\n \n assert all(rcen == zeros(3)) # this works only when center is at the origin\n nterm = len(jtb)\n assert nterm == len(clbdtb)\n assert nterm == len(lbdtb)\n assert nterm == len(rhotb)\n\n ffr = zeros([self.lbdmx+1], np.complex128)\n m = mb + ma\n for j,clbd,lbd,rho in zip(jtb,clbdtb,lbdtb,rhotb):\n ffr[clbd]=ffr[clbd] + thrj(ja,jb,j,ma,mb,-m)*thrj(j,clbd,lbd,-m,m,0)*rho*self.ylm_cr[lbd*(lbd+1)]\n return ffr,m\n\n#\n#\n#\nif __name__=='__main__':\n from pyscf.nao import prod_basis_c, system_vars_c\n from pyscf import gto\n import numpy as np\n \n","repo_name":"pyscf/nao","sub_path":"pyscf/nao/m_prod_talman.py","file_name":"m_prod_talman.py","file_ext":"py","file_size_in_byte":9620,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"32983517093","text":"from gidgethub.aiohttp import GitHubAPI\nfrom typing import List, Dict, Any\nimport json\n\n\nclass CommandPayload:\n \"\"\"All data passed to a command when `call` is invoked. see `Command` for more information.\n\n \"\"\"\n\n def __init__(\n self,\n gh: GitHubAPI,\n issue_url: str,\n comment_url: str,\n repo_url: str,\n comment_data: Dict[str, Any],\n args: List[str],\n ):\n self.__gh = gh\n self.__issue_url = issue_url\n self.__comment_url = comment_url\n self.__repo_url = repo_url\n self.__comment_data = comment_data\n self.__args = args\n\n @property\n def gh(self) -> GitHubAPI:\n \"\"\"GitHubAPI: the github api object for this session. See gidgethub for more information\"\"\"\n return self.__gh\n\n @property\n def issue_url(self) -> str:\n \"\"\"str: Url for getting an issue\"\"\"\n return self.__issue_url\n\n @property\n def comment_url(self) -> str:\n \"\"\"str: Url for posting a comment\"\"\"\n return self.__comment_url\n\n @property\n def repo_url(self) -> str:\n \"\"\"str: Url that points to the given repo\"\"\"\n return self.__repo_url\n\n @property\n def comment_data(self) -> Dict[str, Any]:\n \"\"\"json: JSON object with all the data about the comment\"\"\"\n return self.__comment_data\n\n @property\n def args(self) -> List[str]:\n \"\"\"List[str]: All the arguments or \"words\" after the command\"\"\"\n return self.__args\n","repo_name":"The-Indian-Chinna/Code_Review_Bot","sub_path":"cam2_code_review_bot/commands/command_payload.py","file_name":"command_payload.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3488748412","text":"import os\nimport datetime\nfrom glob import glob\n\nfrom PyQt6.QtCore import QFileSystemWatcher, pyqtSignal\n\nfrom helpers import config\nfrom helpers import location_service\nfrom helpers import strip_timestamp\n\n\nclass LogReader(QFileSystemWatcher):\n\n new_line = pyqtSignal(object)\n\n def __init__(self, eq_directory):\n super().__init__()\n\n self._eq_directory = eq_directory\n self._files = glob(os.path.join(eq_directory, 'eqlog*.txt'))\n self._watcher = QFileSystemWatcher(self._files)\n self._watcher.fileChanged.connect(self._file_changed_safe_wrap)\n self._dir_watcher = QFileSystemWatcher([eq_directory])\n self._dir_watcher.directoryChanged.connect(self._dir_changed)\n\n self._stats = {\n 'log_file': '',\n 'last_read': 0,\n }\n\n def _dir_changed(self, changed_dir):\n print(\"Directory '%s' updated, refreshing file list...\" % changed_dir)\n new_files = glob(os.path.join(self._eq_directory, 'eqlog*.txt'))\n if new_files != self._files:\n updated_files = set(new_files) - set(self._files)\n self._watcher.addPaths(updated_files)\n self._files = new_files\n\n def _file_changed_safe_wrap(self, changed_file):\n try:\n self._file_changed(changed_file)\n except FileNotFoundError:\n print(\"File not found: %s; did it move?\")\n\n def _file_changed(self, changed_file):\n if changed_file != self._stats['log_file']:\n self._stats['log_file'] = changed_file\n char_name = os.path.basename(changed_file).split(\"_\")[1]\n if not config.data['sharing']['player_name_override']:\n config.data['sharing']['player_name'] = char_name\n location_service.SIGNALS.config_updated.emit()\n with open(self._stats['log_file'], 'rb') as log:\n log.seek(0, os.SEEK_END)\n current_end = log.tell()\n log.seek(max(log.tell() - 1000, 0), os.SEEK_SET)\n for line in log:\n if line.endswith(b'] Welcome to EverQuest!\\r\\n'):\n break\n self._stats['last_read'] = min(log.tell(), current_end)\n\n with open(self._stats['log_file']) as log:\n try:\n log.seek(self._stats['last_read'], os.SEEK_SET)\n lines = log.readlines()\n self._stats['last_read'] = log.tell()\n for line in lines:\n self.new_line.emit((\n datetime.datetime.now(),\n strip_timestamp(line)\n ))\n except Exception: # do not read lines if they cause errors\n log.seek(0, os.SEEK_END)\n self._stats['last_read'] = log.tell()\n","repo_name":"nomns/nparse","sub_path":"helpers/logreader.py","file_name":"logreader.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"54"} +{"seq_id":"11565599906","text":"def countPairs(n, edges, queries):\n #D = dict()\n #for e in edges:\n # e = sorted(e)\n # if e in D:\n # D[e] += 1\n # else:\n # D[e] = 1\n #return [sum(list(D.values()) > q) for q in queries]\n D = dict()\n for e in edges:\n e = str(sorted(e))\n if e in D:\n D[e] += 1\n else:\n D[e] = 1\n #return [sum(list(D.values()) > q) for q in queries]\n #print(f\"D.values() = {D.values()}\")\n return [sum( [count > q for count in D.values()] ) for q in queries]\n\n\ndef test_case(k, n, edges, queries, expected):\n print(f\"Test case {k:02d}\")\n ans = countPairs(n, edges, queries)\n if ans == expected:\n print(\"Correct.\")\n else:\n print(f\"Incorrect. ans = {ans}, expected = {expected}\")\n\n\nif __name__ == \"__main__\":\n k = 1\n n = 4\n edges = [[1,2],[2,4],[1,3],[2,3],[2,1]]\n queries = [2,3]\n expected = [6,5]\n test_case(k, n, edges, queries, expected)\n","repo_name":"phunc20/leetcode","sub_path":"contest/biweekly/47/pair_nodes.py","file_name":"pair_nodes.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70395367843","text":"import win32gui, win32con, keyboard, smtplib\nthe_program_to_hide = win32gui.GetForegroundWindow()\nwin32gui.ShowWindow(the_program_to_hide , win32con.SW_HIDE)\na = str(keyboard.record(until='esc'))\n\na = a.replace('KeyboardEvent', '')\na = a.replace('down', '')\na = a.replace('(', '')\na = a.replace(')', '')\na = a.replace('[', '')\na = a.replace(']', '')\na = a.replace(', esc', '')\na=a.replace('up', '(extra)')\n\n'''\nsender_email = \"programmingtest73@gmail.com\"\nrec_email = \"programmingtest75@gmail.com\"\npassword = \"passofsendemail\"\nmessage = a\n\nserver = smtplib.SMTP('smtp.gmail.com', 587)\nserver.starttls()\nserver.login(sender_email, password)\nprint(\"Login success\")\nserver.sendmail(sender_email, rec_email, message)\nprint(\"Email has been sent to \", rec_email)'''\n\nfile = open('data.txt', 'w')\nfile.write(a)\nfile.close()\n","repo_name":"Maaz-319/Python","sub_path":"Keylogger/type 1.py","file_name":"type 1.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10860634234","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 2 07:59:00 2018\n\n@author: brendontucker\n\"\"\"\n\n#%% IMPORTS\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#%% LOAD TEST FILE \n#resutl of this is what I submit\nsubmitTest = pd.read_json(\"/Users/brendontucker/KaggleData/StatoilCCORE/data/processed/test.json\")\n# wow, a 1.5 gb file loaded.... \n\n\n#%% LOAD TRAIN\norginal_train = pd.read_json(\"/Users/brendontucker/KaggleData/StatoilCCORE/data-1/processed/train.json\")\n\n# TEST TRAIN SPLIT\nmsk = np.random.rand(len(orginal_train)) < 0.8 \ntrain = orginal_train[msk]\ntest = orginal_train[~msk]\n\n# X TARGET VARIABLE SET UP WITH BOTH RADAR TYPES\n \nXtargetTrain = np.zeros(shape=(len(train)*2,5625))\nfor x in range(len(train)):\n XtargetTrain[x] = train.iloc[x][0]\n XtargetTrain[x+len(train)] = train.iloc[x][1]\nXtargetTrain = XtargetTrain.T\n\n# Y TARGET VARIABLE SET UP \n\nYtargetTrain = np.zeros(shape=(len(train)*2,1))\nfor x in range(len(train)):\n YtargetTrain[x] = train.iloc[x][4]\n YtargetTrain[x+len(train)] = train.iloc[x][4]\nYtargetTrain = YtargetTrain.T\n\n# X TEST VAR SET UP WITH BOTH RADAR TYPES\nXtargetTest = np.zeros(shape=(len(test)*2,5625))\nfor x in range(len(test)):\n XtargetTest[x] = test.iloc[x][0]\n XtargetTest[x+len(test)] = test.iloc[x][1]\nXtargetTest = XtargetTest.T\n\n# Y TEST TARGET VARIABLE SET UP\n\nYtargetTest = np.zeros(shape=(len(test)*2,1))\nfor x in range(len(test)):\n YtargetTest[x] = test.iloc[x][4]\n YtargetTest[x+len(test)] = test.iloc[x][4]\nYtargetTest = YtargetTest.T\n\n# PREPROCESSING (eventually this will have to be its own file)\n\n# BASIC PREPROCESSING VARS\n\nmean = XtargetTrain.mean(axis=0)\nstd = XtargetTrain.std(axis=0)\nstd.shape\n\n# SUPER BASIC PREPROCESSING\n\nXtargetTrain = XtargetTrain/mean\nXtargetTrain = XtargetTrain - std\n\n\n# TESTSET PREPROCESSING \n\nmean1 = XtargetTest.mean(axis=0)\nstd1 = XtargetTest.std(axis=0)\nXtargetTest = XtargetTest/mean1\nXtargetTest = XtargetTest - std1\n\n#%% HELPER FUNCTIONS FOR DEEP LEARNING \n\ndef sigmoid(Z):\n \"\"\"\n Implements the sigmoid activation in numpy\n \n Arguments:\n Z -- numpy array of any shape\n \n Returns:\n A -- output of sigmoid(z), same shape as Z\n cache -- returns Z as well, useful during backpropagation\n \"\"\"\n \n A = 1/(1+np.exp(-Z))\n cache = Z\n \n return A, cache\n\ndef sigmoid_backward(dA, cache):\n \"\"\"\n Implement the backward propagation for a single SIGMOID unit.\n\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n\n Returns:\n dZ -- Gradient of the cost with respect to Z\n \"\"\"\n \n Z = cache\n \n s = 1/(1+np.exp(-Z))\n dZ = dA * s * (1-s)\n \n assert (dZ.shape == Z.shape)\n \n return dZ\n\ndef relu(Z):\n \"\"\"\n Implement the RELU function.\n\n Arguments:\n Z -- Output of the linear layer, of any shape\n\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing \"A\" ; stored for computing the backward pass efficiently\n \"\"\"\n \n A = np.maximum(0,Z)\n \n assert(A.shape == Z.shape)\n \n cache = Z \n return A, cache\n\ndef relu_backward(dA, cache):\n \"\"\"\n Implement the backward propagation for a single RELU unit.\n\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n\n Returns:\n dZ -- Gradient of the cost with respect to Z\n \"\"\"\n \n Z = cache\n dZ = np.array(dA, copy=True) # just converting dz to a correct object.\n \n # When z <= 0, you should set dz to 0 as well. \n dZ[Z <= 0] = 0\n \n assert (dZ.shape == Z.shape)\n \n return dZ\n\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n parameters -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(1)\n \n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = np.random.randn(n_h, n_x) * .01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h) * .01\n b2 = np.zeros((n_y, 1))\n ### END CODE HERE ###\n \n assert(W1.shape == (n_h, n_x))\n assert(b1.shape == (n_h, 1))\n assert(W2.shape == (n_y, n_h))\n assert(b2.shape == (n_y, 1))\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters \n\n\ndef initialize_parameters_deep(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n ### START CODE HERE ### (≈ 2 lines of code)\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n ### END CODE HERE ###\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n \n return parameters\n\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n Returns:\n Z -- the input of the activation function, also called pre-activation parameter \n cache -- a python dictionary containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n Z = np.dot(W, A) + b\n ### END CODE HERE ###\n \n assert(Z.shape == (W.shape[0], A.shape[1]))\n cache = (A, W, b)\n \n return Z, cache\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n Arguments:\n A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n A -- the output of the activation function, also called the post-activation value \n cache -- a python dictionary containing \"linear_cache\" and \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n \n if activation == \"sigmoid\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n ### END CODE HERE ###\n \n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n ### START CODE HERE ### (≈ 2 lines of code)\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n ### END CODE HERE ###\n \n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n \n Arguments:\n X -- data, numpy array of shape (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n \n Returns:\n AL -- last post-activation value\n caches -- list of caches containing:\n every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)\n the cache of linear_sigmoid_forward() (there is one, indexed L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n for l in range(1, L):\n A_prev = A \n ### START CODE HERE ### (≈ 2 lines of code)\n A, cache = linear_activation_forward(A_prev, \n parameters['W' + str(l)], \n parameters['b' + str(l)], \n activation='relu')\n caches.append(cache)\n ### END CODE HERE ###\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n ### START CODE HERE ### (≈ 2 lines of code)\n AL, cache = linear_activation_forward(A, \n parameters['W' + str(L)], \n parameters['b' + str(L)], \n activation='sigmoid')\n caches.append(cache)\n\n ### END CODE HERE ###\n \n assert(AL.shape == (1,X.shape[1]))\n \n return AL, caches\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation (7).\n\n Arguments:\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n Returns:\n cost -- cross-entropy cost\n \"\"\"\n \n m = Y.shape[1]\n\n # Compute loss from aL and y.\n ### START CODE HERE ### (≈ 1 lines of code)\n cost = (-1/m)*np.sum(np.multiply(Y,np.log(AL)) + np.multiply((1-Y), np.log(1-AL)))\n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implement the linear portion of backward propagation for a single layer (layer l)\n\n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW = (1/m)*np.dot(dZ, A_prev.T)\n #db = np.squeeze(np.sum(dZ, axis=1, keepdims=True)) / m\n db = np.sum(dZ, axis=1, keepdims=True) / m\n #db = np.array([[db]])\n #print(\"db is:\", db, \"db shape is:\", db.shape)\n #print(\"b is:\", b, \"b shape is:\", b.shape)\n #db = np.array([[(1/m)*np.sum(dZ)]])\n #print('b:', b, 'shape of b:', b.shape, 'db:', db, 'shape of db:', db.shape)\n dA_prev = np.dot(W.T, dZ)\n ### END CODE HERE ###\n \n assert (dA_prev.shape == A_prev.shape)\n assert (dW.shape == W.shape)\n assert (db.shape == b.shape)\n \n return dA_prev, dW, db\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, cache[0])\n ### END CODE HERE ###\n \n elif activation == \"sigmoid\":\n ### START CODE HERE ### (≈ 2 lines of code)\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, cache[0])\n ### END CODE HERE ###\n \n return dA_prev, dW, db\n\ndef L_model_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n \"\"\"\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n ### START CODE HERE ### (1 line of code)\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n ### END CODE HERE ###\n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"AL, Y, caches\". Outputs: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n ### START CODE HERE ### (approx. 2 lines)\n current_cache = caches[-1]\n grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_backward(sigmoid_backward(dAL,current_cache[1]),current_cache[0])\n \n ### END CODE HERE ###\n \n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 2)], caches\". Outputs: \"grads[\"dA\" + str(l + 1)] \n # ,grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n ### START CODE HERE ### (approx. 5 lines)\n #print(l)\n current_cache = caches[l]\n #dZ = sigmoid_backward(dAL, caches[1][0][0])\n #print(\"dZ:\", dZ)\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)], current_cache, activation=\"relu\")\n #linear_backward(dZ, current_cache[0])\n #linear_activation_backward(grads[\"dA\" + str(l + 2)], current_cache, activation=\"relu\")\n grads[\"dA\" + str(l + 1)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n ### END CODE HERE ###\n\n return grads\n\ndef update_parameters(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n ### START CODE HERE ### (≈ 3 lines of code)\n for l in range(L):\n parameters[\"W\" + str(l + 1)] = parameters[\"W\" + str(l + 1)] - learning_rate * grads[\"dW\" + str(l + 1)]\n parameters[\"b\" + str(l + 1)] = parameters[\"b\" + str(l + 1)] - learning_rate * grads[\"db\" + str(l + 1)]\n\n ### END CODE HERE ###\n return parameters\n\ndef predict(X, y, parameters):\n \"\"\"\n This function is used to predict the results of a L-layer neural network.\n \n Arguments:\n X -- data set of examples you would like to label\n parameters -- parameters of the trained model\n \n Returns:\n p -- predictions for the given dataset X\n \"\"\"\n \n m = X.shape[1]\n n = len(parameters) // 2 # number of layers in the neural network\n p = np.zeros((1,m))\n \n # Forward propagation\n probas, caches = L_model_forward(X, parameters)\n\n \n # convert probas to 0/1 predictions\n for i in range(0, probas.shape[1]):\n if probas[0,i] > 0.5:\n p[0,i] = 1\n else:\n p[0,i] = 0\n \n #print results\n #print (\"predictions: \" + str(p))\n #print (\"true labels: \" + str(y))\n print(\"Accuracy: \" + str(np.sum((p == y)/m)))\n \n return p\n\n#%% TWO LAYER MODEL\n \ndef two_layer_model(X, Y, layers_dims, learning_rate = 0.00285, num_iterations = 3000, print_cost=False):\n \"\"\"\n Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (n_x, number of examples)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)\n layers_dims -- dimensions of the layers (n_x, n_h, n_y)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- If set to True, this will print the cost every 100 iterations \n \n Returns:\n parameters -- a dictionary containing W1, W2, b1, and b2\n \"\"\"\n \n np.random.seed(1)\n grads = {}\n costs = [] # to keep track of the cost\n m = X.shape[1] # number of examples\n (n_x, n_h, n_y) = layers_dims\n \n # Initialize parameters dictionary, by calling one of the functions you'd previously implemented\n ### START CODE HERE ### (≈ 1 line of code)\n parameters = initialize_parameters(n_x, n_h, n_y)\n ### END CODE HERE ###\n \n # Get W1, b1, W2 and b2 from the dictionary parameters.\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n \n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: \"X, W1, b1\". Output: \"A1, cache1, A2, cache2\".\n ### START CODE HERE ### (≈ 2 lines of code)\n A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')\n A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')\n ### END CODE HERE ###\n \n # Compute cost\n ### START CODE HERE ### (≈ 1 line of code)\n cost = compute_cost(A2, Y)\n ### END CODE HERE ###\n \n # Initializing backward propagation\n dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))\n \n # Backward propagation. Inputs: \"dA2, cache2, cache1\". Outputs: \"dA1, dW2, db2; also dA0 (not used), dW1, db1\".\n ### START CODE HERE ### (≈ 2 lines of code)\n dA1, dW2, db2 = linear_activation_backward(dA2, cache2, 'sigmoid')\n dA0, dW1, db1 = linear_activation_backward(dA1, cache1, 'relu')\n ### END CODE HERE ###\n \n # Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2\n grads['dW1'] = dW1\n grads['db1'] = db1\n grads['dW2'] = dW2\n grads['db2'] = db2\n \n # Update parameters.\n ### START CODE HERE ### (approx. 1 line of code)\n parameters = update_parameters(parameters, grads, learning_rate)\n ### END CODE HERE ###\n\n # Retrieve W1, b1, W2, b2 from parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n \n # Print the cost every 100 training example\n if print_cost and i % 100 == 0:\n print(\"Cost after iteration {}: {}\".format(i, np.squeeze(cost)))\n if print_cost and i % 100 == 0:\n costs.append(cost)\n \n # plot the cost\n\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters\n\ndef L_layer_model(X, Y, layers_dims, learning_rate = 0.75, num_iterations = 3000, print_cost=False):#lr was 0.009\n \"\"\"\n Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.\n \n Arguments:\n X -- data, numpy array of shape (number of examples, num_px * num_px * 3)\n Y -- true \"label\" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)\n layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).\n learning_rate -- learning rate of the gradient descent update rule\n num_iterations -- number of iterations of the optimization loop\n print_cost -- if True, it prints the cost every 100 steps\n \n Returns:\n parameters -- parameters learnt by the model. They can then be used to predict.\n \"\"\"\n\n #np.random.seed(1) maybe this put me too close to the true minimized cost?\n costs = [] # keep track of cost\n \n # Parameters initialization.\n ### START CODE HERE ###\n parameters = initialize_parameters_deep(layers_dims)\n ### END CODE HERE ###\n \n # Loop (gradient descent)\n for i in range(0, num_iterations):\n\n # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.\n ### START CODE HERE ### (≈ 1 line of code)\n AL, caches = L_model_forward(X, parameters)\n ### END CODE HERE ###\n \n # Compute cost.\n ### START CODE HERE ### (≈ 1 line of code)\n cost = compute_cost(AL, Y)\n ### END CODE HERE ###\n \n # Backward propagation.\n ### START CODE HERE ### (≈ 1 line of code)\n grads = L_model_backward(AL, Y, caches)\n ### END CODE HERE ###\n \n # Update parameters.\n ### START CODE HERE ### (≈ 1 line of code)\n parameters = update_parameters(parameters, grads, learning_rate)\n ### END CODE HERE ###\n \n # Print the cost every 100 training example\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n if print_cost and i % 100 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n \n return parameters #trying to print costs also\n\n#%% EXPERIMENT WITH TWO-LAYER MODEL \n \nn_x = 5625 # should be pixel count in HV image? + other radar dimension? \nn_h = 17 #what happens if we increase this? was 7\nn_y = 1\nlayers_dims = (n_x, n_h, n_y)\n\n# was: train_x, train_y,\n# is: XtargetTrain, YtargetTrain,\n\nparameters = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = 0.03,\n num_iterations = 50000, print_cost=True)\n\n\n#%% FINDING A BETTER LEARNING RATE for n_h=17\nlearning_rates = [0.02, 0.01, 0.002, 0.001, 0.0005]\nparameters = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n parameters[str(i)] = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = i,\n num_iterations = 2000, print_cost=True)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(parameters[str(i)][\"costs\"]), \n label= str(parameters[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations*100')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()\n#%% ACCURACY CHECK\n\npredictions_train = predict(XtargetTrain, YtargetTrain, parameters)\npredictions_test = predict(XtargetTest, YtargetTest, parameters)\n\n\n\n#%% RESULTS LOG \n#would eventually like a way to plot accuracy predictions per 100. Maybe make\n#that part of gradient checking\n'''\n\nn_h = 17\nparameters = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = 0.00285,\n num_iterations = 10000, print_cost=True)\nCost after iteration 9900: 0.6121965393368791\nAccuracy: 0.651813880126\nAccuracy: 0.639880952381\n\nparameters = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = 0.00285,\n num_iterations = 25000, print_cost=True)\n\nCost after iteration 24900: 0.5559457371623462\nAccuracy: 0.688880126183\nAccuracy: 0.665178571429\n\n\nn_h = 17\nparameters = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = 0.00285,\n num_iterations = 40000, print_cost=True)\nAccuracy: 0.709266409266\nAccuracy: 0.68932038835\n\nn_h = 17\nparameters = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = 0.00285,\n num_iterations = 45000, print_cost=True)\nCost after iteration 44900: 0.5123028478884648\nAccuracy: 0.716602316602\nAccuracy: 0.694174757282\n\nn_h = 17\nparameters = two_layer_model(XtargetTrain, YtargetTrain, \n layers_dims = (n_x, n_h, n_y),\n learning_rate = 0.03,\n num_iterations = 50000, print_cost=True)\nCost after iteration 49900: 0.5280477733712402\n\n\nlearning rate is: 0.285\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6910916423732921\nCost after iteration 200: 0.6910915055066513\nCost after iteration 300: 0.6910912781291456\nCost after iteration 400: 0.691090955934224\nCost after iteration 500: 0.691091828879263\nCost after iteration 600: 0.691091824718516\nCost after iteration 700: 0.6910918197480824\nCost after iteration 800: 0.6910918136428486\nCost after iteration 900: 0.691091805942465\nCost after iteration 1000: 0.691091796353015\nCost after iteration 1100: 0.6910917842668988\nCost after iteration 1200: 0.6910917688949045\nCost after iteration 1300: 0.6910917480676306\nCost after iteration 1400: 0.6910913361890751\nCost after iteration 1500: 0.6910906512002061\nCost after iteration 1600: 0.691091838262931\nCost after iteration 1700: 0.6910918372329317\nCost after iteration 1800: 0.6910918372329314\nCost after iteration 1900: 0.6910918372329314\n\n-------------------------------------------------------\n\nlearning rate is: 0.0285\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6742351804889182\nCost after iteration 200: 0.6691008315401197\nCost after iteration 300: 0.6652674570048586\nCost after iteration 400: 0.6627233028494048\nCost after iteration 500: 0.6615328561052308\nCost after iteration 600: 0.6883745957432594\nCost after iteration 700: 0.6694019882118697\nCost after iteration 800: 0.6602101328607431\nCost after iteration 900: 0.6595229118379374\nCost after iteration 1000: 0.6590205027771038\nCost after iteration 1100: 0.6586116929473533\nCost after iteration 1200: 0.6583343092286582\nCost after iteration 1300: 0.6581138663658466\nCost after iteration 1400: 0.65799165578704\nCost after iteration 1500: 0.6576189667085518\nCost after iteration 1600: 0.6573304609195947\nCost after iteration 1700: 0.6569945843508901\nCost after iteration 1800: 0.656613848682086\nCost after iteration 1900: 0.656193424880438\n\n-------------------------------------------------------\n\nlearning rate is: 0.00285\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6789104589011249\nCost after iteration 200: 0.678354863792582\nCost after iteration 300: 0.6778105706072725\nCost after iteration 400: 0.677273014107972\nCost after iteration 500: 0.6767469713610497\nCost after iteration 600: 0.6762292376030767\nCost after iteration 700: 0.6757187710956342\nCost after iteration 800: 0.6752146076386644\nCost after iteration 900: 0.6747155330994471\nCost after iteration 1000: 0.6742202632621919\nCost after iteration 1100: 0.6737274799069317\nCost after iteration 1200: 0.6732357593797926\nCost after iteration 1300: 0.672743971575757\nCost after iteration 1400: 0.6722500088926611\nCost after iteration 1500: 0.6717508626735565\nCost after iteration 1600: 0.6712437940361453\nCost after iteration 1700: 0.6707280440277581\nCost after iteration 1800: 0.6702008105770436\nCost after iteration 1900: 0.6696585911186352\n\nlearning rate is: 0.03\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6739760571279207\nCost after iteration 200: 0.6684863635149952\nCost after iteration 300: 0.6638341665505468\nCost after iteration 400: 0.6639272442078212\nCost after iteration 500: 0.6620048126770468\nCost after iteration 600: 0.6608005592881836\nCost after iteration 700: 0.6616349008803132\nCost after iteration 800: 0.6607987677057882\nCost after iteration 900: 0.6605045535558809\nCost after iteration 1000: 0.660331408133929\nCost after iteration 1100: 0.6602698044587846\nCost after iteration 1200: 0.6602247019261979\nCost after iteration 1300: 0.660047659435054\nCost after iteration 1400: 0.6598402946978986\nCost after iteration 1500: 0.6595586777685706\nCost after iteration 1600: 0.6591932612088411\nCost after iteration 1700: 0.658764913829915\nCost after iteration 1800: 0.6582865919745154\nCost after iteration 1900: 0.6577686761576642\n\n-------------------------------------------------------\n\nlearning rate is: 0.04\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6722467839359155\nCost after iteration 200: 0.6672265716429222\nCost after iteration 300: 0.6664040574605576\nCost after iteration 400: 0.6660122710245562\nCost after iteration 500: 0.6661105673142648\nCost after iteration 600: 0.6660177961033965\nCost after iteration 700: 0.665669395218434\nCost after iteration 800: 0.6651615525115657\nCost after iteration 900: 0.6644913142864938\nCost after iteration 1000: 0.6638113489145564\nCost after iteration 1100: 0.6630627672304391\nCost after iteration 1200: 0.6623232189782766\nCost after iteration 1300: 0.6615456512220688\nCost after iteration 1400: 0.660749429541625\nCost after iteration 1500: 0.6600112876083787\nCost after iteration 1600: 0.6592085370042204\nCost after iteration 1700: 0.6584316126047565\nCost after iteration 1800: 0.6577305598060624\nCost after iteration 1900: 0.6566523327620639\n\n-------------------------------------------------------\n\nlearning rate is: 0.002\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6790907528633829\nCost after iteration 200: 0.6786839882171811\nCost after iteration 300: 0.6782964296589792\nCost after iteration 400: 0.6779143517907158\nCost after iteration 500: 0.6775350499136987\nCost after iteration 600: 0.6771608911087572\nCost after iteration 700: 0.6767921861081613\nCost after iteration 800: 0.6764276231218348\nCost after iteration 900: 0.6760667808292171\nCost after iteration 1000: 0.6757093950217944\nCost after iteration 1100: 0.6753550978421162\nCost after iteration 1200: 0.6750035462100352\nCost after iteration 1300: 0.6746541051134755\nCost after iteration 1400: 0.6743066360695669\nCost after iteration 1500: 0.6739603132769066\nCost after iteration 1600: 0.6736149691218196\nCost after iteration 1700: 0.6732699746887183\nCost after iteration 1800: 0.6729250703967946\nCost after iteration 1900: 0.6725794365328946\n\n-------------------------------------------------------\n\nlearning rate is: 0.001\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6804954042659552\nCost after iteration 200: 0.6790906069307653\nCost after iteration 300: 0.6788792235563258\nCost after iteration 400: 0.6786831443155711\nCost after iteration 500: 0.6784887820998639\nCost after iteration 600: 0.6782956182286465\nCost after iteration 700: 0.6781041040599862\nCost after iteration 800: 0.6779135713116089\nCost after iteration 900: 0.677723847295489\nCost after iteration 1000: 0.6775342963645022\nCost after iteration 1100: 0.6773462870377353\nCost after iteration 1200: 0.6771601745215491\nCost after iteration 1300: 0.6769752638839822\nCost after iteration 1400: 0.6767914993701443\nCost after iteration 1500: 0.6766087722244409\nCost after iteration 1600: 0.6764269680245293\nCost after iteration 1700: 0.6762461123568432\nCost after iteration 1800: 0.6760661563470979\nCost after iteration 1900: 0.6758870702679387\n\n-------------------------------------------------------\n\nlearning rate is: 0.0005\nCost after iteration 0: 0.6904752463266352\nCost after iteration 100: 0.6846543864635856\nCost after iteration 200: 0.6804979097673449\nCost after iteration 300: 0.6793253269196675\nCost after iteration 400: 0.6790905460465373\nCost after iteration 500: 0.6789787719307535\nCost after iteration 600: 0.678878798731068\nCost after iteration 700: 0.6787804114297994\nCost after iteration 800: 0.6786827229114959\nCost after iteration 900: 0.6785854204719174\nCost after iteration 1000: 0.6784883692047496\nCost after iteration 1100: 0.6783915437298315\nCost after iteration 1200: 0.6782952118843935\nCost after iteration 1300: 0.6781992841436296\nCost after iteration 1400: 0.6781037054949026\nCost after iteration 1500: 0.6780083457021184\nCost after iteration 1600: 0.6779131783243407\nCost after iteration 1700: 0.6778182394921353\nCost after iteration 1800: 0.6777234521307376\nCost after iteration 1900: 0.6776285339460031\n\n\n'''\n\n\n\n\n","repo_name":"Ge0dude/Kaggle","sub_path":"StatoilCCORE/numpyNNattempt/NNwithboth.py","file_name":"NNwithboth.py","file_ext":"py","file_size_in_byte":34222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17077404526","text":"import pandas as pd\r\nimport numpy as np\r\n\r\ntuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\r\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]))\r\n\r\nindex = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])\r\n\r\ndf = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B'])\r\n\r\ndf2 = df[:4]\r\n\r\nprint(df2)\r\n\r\n#The stack() method “compresses” a level in the DataFrame’s columns.\r\n\r\nstacked = df2.stack()\r\n\r\nprint(stacked)\r\n\r\n#With a “stacked” DataFrame or Series (having a MultiIndex as the index), the inverse operation of stack() is unstack(), which by default unstacks the last level:\r\n\r\nprint(stacked.unstack())\r\nprint(stacked.unstack(1))\r\nprint(stacked.unstack(0))\r\n\r\n","repo_name":"vicsho997/PandasReshapingMethods","sub_path":"pandas_Stack.py","file_name":"pandas_Stack.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16416417789","text":"#ROUTINE BORROWS FROM DISCUSSION AT: http://stackoverflow.com/questions/3679694/a-weighted-version-of-random-choice\n\nimport bisect, random\n\ndef getChoice(options):\n\toption, weight = zip(*options)\n\ttotal = 0\n\tweights = []\n\tfor w in weight:\n\t\ttotal += w\n\t\tweights.append(total)\n\tchance = random.random() * total\n\tresult = bisect.bisect(weights,chance)\n\treturn option[result]\n","repo_name":"dluman/NaNoGenMo-2016","sub_path":"weightedchoices.py","file_name":"weightedchoices.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23446770447","text":"# -*- coding: utf-8 -*-\n\nfrom math import sqrt\nimport csv\n\n\n# 使用欧几里德距离公式计算 两个人偏好的相似度\ndef sim_distance(prefs, person1, person2):\n si = {}\n # shared_iten 列表\n for item in prefs[person1]:\n if item in prefs[person2]:\n si[item] = 1\n\n if len(si) == 0:\n return 0\n\n sum_of_squares = sum([pow(prefs[person1][item] - prefs[person2][item], 2)\n for item in prefs[person1] if item in prefs[person2]])\n\n return 1 / (1 + sqrt(sum_of_squares))\n\n\n# 使用皮尔逊相关系数计算两个人相似度\ndef sim_pearson(prefs, person1, person2):\n si = {}\n for item in prefs[person1]:\n if item in prefs[person2]:\n si[item] = 1\n\n n = len(si)\n if n == 0:\n return 0\n\n # 对所有偏好求和\n sum1 = sum([prefs[person1][item] for item in si])\n sum2 = sum([prefs[person2][item] for item in si])\n\n # 求平方和\n sum1Sq = sum([pow(prefs[person1][item], 2) for item in si])\n sum2Sq = sum([pow(prefs[person2][item], 2) for item in si])\n\n # 乘积之和\n pSum = sum([prefs[person1][item] * prefs[person2][item] for item in si])\n\n # 计算皮尔逊评价值\n num = pSum - (sum1 * sum2 / n)\n den = sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n))\n if den == 0:\n return 0\n\n r = num / den\n # print(r)\n return r\n\n\n# 从反应偏好的数据中返回最匹配者\ndef topMatches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n scores.sort()\n scores.reverse()\n return scores[0:n]\n\n\n# 构建商品相关的商品信息合集\ndef calculateSimilarItems(prefs, n=10):\n result = {}\n itemsPrefs = conversionData(prefs)\n c = 0\n for item in itemsPrefs:\n c += 1\n if c % 100 == 0:\n print(\"%d / %d\" % (c, len(itemsPrefs)))\n scores = topMatches(itemsPrefs, item, n)\n result[item] = scores\n\n return result\n\n\n# 基于用户对比 推荐商品\ndef getRecommendactions(prefs, person, n=10, similarity=sim_pearson):\n totals = {}\n simSums = {}\n\n for other in prefs:\n # 不和自己比较\n if other == person:\n continue\n sim = similarity(prefs, person, other)\n\n if sim <= 0:\n continue\n for item in prefs[other]:\n if item not in prefs[person]:\n # 相似度 * 评价值\n totals.setdefault(item, 0)\n totals[item] += prefs[other][item] * sim\n # 相似度之和\n simSums.setdefault(item, 0)\n simSums[item] += sim\n\n rankings = [(total / simSums[item], item)\n for item, total in totals.items() if simSums[item] != 0]\n\n rankings.sort()\n rankings.reverse()\n\n return rankings[0, n]\n\n\n# 基于商品相关 推荐商品\ndef getRecommendactionItems(prefs, similarItems, user):\n if user in prefs:\n userRatings = prefs[user]\n else:\n return None\n\n scores = {}\n totalSim = {}\n\n for (item, rating) in userRatings.items():\n for (similar, item2) in similarItems[item]:\n if item2 in userRatings:\n continue\n\n # 未评价的商品 预测评价值总和\n scores.setdefault(item2, 0)\n scores[item2] += similar * rating\n\n # 未评价商品 相似值总和\n totalSim.setdefault(item2, 0)\n totalSim[item2] += similar\n\n rankings = [(score / totalSim[item], item)\n for (item, score) in scores.items() if totalSim[item] != 0]\n rankings.sort()\n rankings.reverse()\n return rankings\n\n\n# 转换数据 电影下用户评分集合\ndef conversionData(prefs):\n newData = {}\n\n for user, items in prefs.items():\n for movie, rating in items.items():\n newData.setdefault(movie, {})\n newData[movie][user] = float(rating)\n\n return newData\n","repo_name":"liheng666/facecat","sub_path":"recommendations.py","file_name":"recommendations.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"26645225933","text":"\n\"\"\"\nFor each dataset, the following fields are required:\n - num_classes: number of classes\n - train_list_name: the filename of train list\n - val_list_name: the filename of val list\n - filename_separator: the separator used in train/val/test list\n - image_tmpl: the template of images in the video folder\n - filter_video: the threshold to remove videos whose frame number is less than this value\n - label_file: a file contains mapping between label index to class name\n\nThose are optional:\n - test_list_name: the filename of test list\n - label_file: name of classes, used to map the prediction from a model to real label name\n\n\"\"\"\n\n\nDATASET_CONFIG = {\n 'kinetics-sounds': {\n 'num_classes': 31,\n 'train_list_name': 'train.txt',\n 'val_list_name': 'val.txt',\n 'filename_seperator': \";\",\n 'image_tmpl': '{:05d}.jpg',\n 'filter_video': 0,\n 'label_file': 'categories.txt'\n }\n}\n\n\ndef get_dataset_config(dataset):\n ret = DATASET_CONFIG[dataset]\n num_classes = ret['num_classes']\n train_list_name = ret['train_list_name']\n val_list_name = ret['val_list_name']\n test_list_name = ret.get('test_list_name', None)\n if test_list_name is not None:\n test_list_name = test_list_name\n filename_seperator = ret['filename_seperator']\n image_tmpl = ret['image_tmpl']\n filter_video = ret.get('filter_video', 0)\n label_file = ret.get('label_file', None)\n\n return num_classes, train_list_name, val_list_name, test_list_name, filename_seperator, \\\n image_tmpl, filter_video, label_file\n","repo_name":"IBM/AdaMML","sub_path":"utils/dataset_config.py","file_name":"dataset_config.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"5"} +{"seq_id":"8476505851","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\nfrom collections import defaultdict\nfrom functools import partial\nfrom string import punctuation\nimport re\n\n#from treedlib import compile_relation_feature_generator\n\nfrom Asterisk.heuristics_generator.features.entity_features import get_ddlib_feats\nfrom Asterisk.heuristics_generator.models import Span\nfrom Asterisk.heuristics_generator.models.utils import get_as_dict\nfrom Asterisk.heuristics_generator.vis.tree_structs import corenlp_to_xmltree\n\n\ndef get_span_splits(candidate, stopwords=None):\n \"\"\"Base function for candidate span tokens split on whitespace\n and punctuation\n candidate: @Candidate to extract features for\n stopwords: @set of stopwords to filter out from tokens\n \"\"\"\n split_pattern = r'[\\s{}]+'.format(re.escape(punctuation))\n for i, arg in enumerate(candidate.get_contexts()):\n for token in re.split(split_pattern, arg.get_span().lower()):\n if stopwords is None or token not in stopwords:\n yield 'SPAN_SPLIT[{0}][{1}]'.format(i, token), 1\n\n\ndef get_span_splits_stopwords(stopwords):\n \"\"\"Get a span split unary function\"\"\"\n return partial(get_span_splits, stopwords=stopwords)\n\n\ndef get_unary_span_feats(sidxs, sentence, stopwords):\n \"\"\"Get unary span features from DDLib and TreeDLib\"\"\"\n #get_tdl_feats = compile_entity_feature_generator()\n sent_dict = get_as_dict(sentence)\n xmltree = corenlp_to_xmltree(sent_dict)\n if len(sidxs) > 0:\n # Add DDLIB entity features\n for f in get_ddlib_feats(sent_dict, sidxs):\n yield 'DDL_' + f, 1\n # Add TreeDLib entity features\n #for f in get_tdl_feats(xmltree.root, sidxs, stopwords=stopwords):\n # yield 'TDL_' + f, 1\n\n\"\"\"\ndef get_binary_span_feats(sidxs, sentence, stopwords):\n #Get binary (relation) span features from TreeDLib\n get_tdl_feats = compile_relation_feature_generator()\n xmltree = corenlp_to_xmltree(get_as_dict(sentence))\n s1_idxs, s2_idxs = sidxs\n if len(s1_idxs) > 0 and len(s2_idxs) > 0:\n # Apply TDL features\n for f in get_tdl_feats(xmltree.root, s1_idxs, s2_idxs,\n stopwords=stopwords):\n yield 'TDL_' + f, 1\n\n\"\"\"\ndef get_span_feats(candidate, stopwords=None):\n \"\"\"Base function for sentence dependency path features\n candidate: @Candidate to extract features for\n stopwords: @set of stopwords to filter out from dependency path\n \"\"\"\n args = candidate.get_contexts()\n if not isinstance(args[0], Span):\n raise ValueError(\"Accepts Span-type arguments, %s-type found.\")\n # Unary candidates\n if len(args) == 1:\n sidxs = list(range(args[0].get_word_start(), args[0].get_word_end() + 1))\n return get_unary_span_feats(sidxs, candidate.get_parent(), stopwords)\n # Binary candidates\n elif len(args) == 2:\n sidxs = [list(range(a.get_word_start(), a.get_word_end() + 1)) for a in args]\n return get_binary_span_feats(sidxs, candidate.get_parent(), stopwords)\n else:\n raise NotImplementedError(\"Only handles unary or binary candidates\")\n\n\ndef get_span_feats_stopwords(stopwords):\n \"\"\"Get a span dependency tree unary function\"\"\"\n return partial(get_span_feats, stopwords=stopwords)\n\n\ndef get_entity_word_idxs(sentence, et, cid):\n \"\"\"Get indices of @sentence tokens with type @et and id @cid\"\"\"\n itr = enumerate(zip(sentence.entity_types, sentence.entity_cids))\n return [i for i, (t, c) in itr if c == cid and t == et]\n\n\ndef get_first_document_span_feats(candidate, stopwords=None):\n \"\"\"Base function for sentence dependency path features where the sentence\n is the first sentence in the parent @Document of @candidate which\n contains the entities in @candidate\n candidate: @Candidate to extract features for\n stopwords: @set of stopwords to filter out from dependency path\n \"\"\"\n # Get candidate entity types and cids\n entity_types = [\n c.get_attrib_tokens('entity_types')[0] for c in candidate.get_contexts()\n ]\n #entity_cids = candidate.get_cids()\n entity_cids = [\n c.get_attrib_tokens('entity_cids')[0] for c in candidate.get_contexts()\n ]\n # Look for entity mentions in each sentence\n for sentence in candidate.get_parent().document.get_sentence_generator():\n mention_idxs = [\n get_entity_word_idxs(sentence, t, cid)\n for t, cid in zip(entity_types, entity_cids)\n ]\n if all(len(idxs) > 0 for idxs in mention_idxs):\n break\n # Get features for first valid sentence\n if all(len(idxs) > 0 for idxs in mention_idxs):\n if len(mention_idxs) == 1:\n return get_unary_span_feats(mention_idxs[0], sentence, stopwords)\n elif len(mention_idxs) == 2:\n return get_binary_span_feats(mention_idxs, sentence, stopwords)\n else:\n raise NotImplementedError(\"Only handles unary or binary candidates\")\n\n\ndef get_first_document_span_feats_stopwords(stopwords):\n \"\"\"Get a first document span dependency tree unary function\"\"\"\n return partial(get_first_document_span_feats, stopwords=stopwords)\n\n\ndef get_entity_type_counts(context, entity_types):\n \"\"\"Get count of entity cids in @context by entity_type in @entity_types\"\"\"\n type_counts = {et: defaultdict(int) for et in entity_types}\n for sentence in context.get_sentence_generator():\n cur_et, cur_cid = None, None\n # Iterate over entities in sentence\n for et, cid in zip(sentence.entity_types, sentence.entity_cids):\n # If current entity changes, add to its count\n if et != cur_et or cid != cur_cid:\n if cur_et in type_counts:\n type_counts[cur_et][cur_cid] += 1\n cur_et, cur_cid = et, cid\n # Add last to count\n if cur_et in type_counts:\n type_counts[cur_et][cur_cid] += 1\n return type_counts\n\n\ndef get_relative_frequency_feats(candidate, context):\n \"\"\"Base getting relative frequency of @candidate entities among\n entities in @context\n candidate: @Candidate to extract features for\n context: @Context over which to get relative frequency\n \"\"\"\n # Get candidate entity types and cids\n entity_types = [\n c.get_attrib_tokens('entity_types')[0] for c in candidate.get_contexts()\n ]\n #entity_cids = candidate.get_cids()\n entity_cids = [\n c.get_attrib_tokens('entity_cids')[0] for c in candidate.get_contexts()\n ]\n # Get counts for entities of relevant types\n type_counts = get_entity_type_counts(context, entity_types)\n # Get most frequent entities and counts for candidate entities\n max_counts = {\n et: max(1, max(type_counts[et].values())) for et in entity_types\n }\n entity_counts = {\n cid: type_counts[et][cid] for et, cid in zip(entity_types, entity_cids)\n }\n # Compute relative frequency\n for i, (cid, et) in enumerate(zip(entity_cids, entity_types)):\n p = float(entity_counts[cid]) / max_counts[et]\n yield \"ENTITY_RELATIVE_FREQUENCY[{0}]\".format(i), p\n\n\ndef get_document_relative_frequency_feats(candidate):\n \"\"\"Apply @get_relative_frequency_feats over the parent\n @Document of @candidate\n \"\"\"\n doc = candidate.get_parent().document\n return get_relative_frequency_feats(candidate, doc)\n\n\ndef get_sentence_relative_frequency_feats(candidate):\n \"\"\"Apply @get_relative_frequency_feats over the parent\n @Sentence of @candidate\n \"\"\"\n sentence = candidate.get_parent()\n return get_relative_frequency_feats(candidate, sentence)\n","repo_name":"MonaNashaat/Asterisk","sub_path":"Asterisk/heuristics_generator/features/relative_features.py","file_name":"relative_features.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"5"} +{"seq_id":"25094188068","text":"import os\nimport glob as glob\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\nfrom tensorflow.keras import regularizers\n\nfrom PIL import Image , ImageOps\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n#######################################\n\nVOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],\n [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],\n [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],\n [0, 64, 128]]\n\nVOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']\n\nN_CLASSES = 21\n\n#######################################\n\ndef parse_record(record):\n name_to_features = {\n 'rows': tf.io.FixedLenFeature([], tf.int64),\n 'cols': tf.io.FixedLenFeature([], tf.int64),\n 'depth': tf.io.FixedLenFeature([], tf.int64),\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'target': tf.io.FixedLenFeature([], tf.string),\n }\n return tf.io.parse_single_example(record, name_to_features)\n\ndef decode_record(record):\n image = tf.io.decode_raw(\n record['image'], out_type='uint8', little_endian=True, fixed_length=None, name=None\n )\n target = tf.io.decode_raw(\n record['target'], out_type='uint8', little_endian=True, fixed_length=None, name=None\n )\n\n rows = record['rows']\n cols = record['cols']\n depth = record['depth']\n\n image = tf.reshape(image, (rows,cols,depth))\n target = tf.reshape(target, (rows,cols))\n\n return (image,target)\n\n#######################################\n\nAUTO = tf.data.experimental.AUTOTUNE\n\ndef read_tf_image_and_label(record):\n parsed_record = parse_record(record)\n X, y = decode_record(parsed_record)\n X = tf.cast(X, tf.float32) / 255.0\n return X, y\n\ndef get_training_dataset(record_files):\n dataset = tf.data.TFRecordDataset(record_files, buffer_size=100)\n dataset = dataset.map(read_tf_image_and_label, num_parallel_calls=AUTO)\n dataset = dataset.prefetch(AUTO)\n return dataset\n\n#######################################\n\ntrain_dataset = get_training_dataset(\"tfData/train_record.tfrecords\")\ntrain_dataset = train_dataset.shuffle(10000).batch(32)\n\nvalid_dataset = get_training_dataset(\"tfData/val_record.tfrecords\")\nvalid_dataset = valid_dataset.shuffle(10000).batch(32)\n\n#######################################\n\nclass ResNetFCN(tf.keras.Model):\n def __init__(self,input_shape=(320, 224, 3), **kwargs):\n super(ResNetFCN, self).__init__(**kwargs)\n self.model = self.getModel(input_shape)\n\n def getModel(self,input_shape):\n base_model = tf.keras.applications.ResNet101V2(\n weights=\"imagenet\", # Load weights pre-trained on ImageNet.\n input_shape=input_shape,\n include_top=False,\n )\n #base_model.trainable = False\n\n i = 0\n for layer in base_model.layers:\n print(layer.name)\n if i < 400:\n layer.trainable = False\n i += 1\n\n feat_ex = tf.keras.Model(base_model.input,base_model.layers[-10].output)\n\n # Build FCN\n inputs = tf.keras.Input(input_shape)\n x = feat_ex(inputs)\n x = layers.UpSampling2D(2)(x)\n for filters in [500, 400, 300, 200]:\n x = layers.Activation(\"relu\")(x)\n x = layers.Conv2DTranspose(filters, 4,kernel_regularizer= regularizers.l2(0.01), padding=\"same\")(x)\n x = layers.LayerNormalization()(x)\n x = layers.Dropout(.3)(x)\n\n x = layers.Activation(\"relu\")(x)\n x = layers.Conv2DTranspose(filters, 4,kernel_regularizer= regularizers.l2(0.01), padding=\"same\")(x)\n x = layers.LayerNormalization()(x)\n x = layers.Dropout(.3)(x)\n\n x = layers.UpSampling2D(2)(x)\n\n output = layers.Conv2D(N_CLASSES, 1,kernel_regularizer= regularizers.l2(0.001),activation=\"softmax\",padding=\"same\")(x)\n model = tf.keras.Model(inputs,output)\n\n print(model.summary())\n return model\n\n def train_step(self, data):\n X, y = data[0], data[1]\n with tf.GradientTape() as tape:\n yh = self.model(X)\n total_loss = self.compiled_loss(y,yh)\n grads = tape.gradient(total_loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n self.compiled_metrics.update_state(y, yh)\n return {m.name: m.result() for m in self.metrics}\n\n def test_step(self, data):\n X, y = data[0], data[1]\n yh = self.model(X)\n total_loss = self.compiled_loss(y,yh)\n self.compiled_metrics.update_state(y, yh)\n return {m.name: m.result() for m in self.metrics}\n\n def call(self, X):\n yh = self.model(X)\n return yh\n\n#######################################\n\nmodel = ResNetFCN()\nopt = tf.keras.optimizers.Nadam(learning_rate = 0.0005)\nmodel.compile(optimizer=opt,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=['accuracy'])\n\n#######################################\n\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=\"ResNetFCN_VOC2012.h5\",\n save_weights_only=True,\n monitor='val_accuracy',\n mode='max',\n save_best_only=True)\n\nepochs = 30\nmodel.fit(train_dataset,epochs=epochs,validation_data=valid_dataset,\n callbacks=model_checkpoint_callback)\n\nmodel.save_weights(\"final_weights.h5\")\n","repo_name":"EnriqueNueve/ResNetFCN","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70538334232","text":"import copy\nfrom src.analysis.analysis_functions import consolidate_fit_stats\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom src.utils.functions import get_config\nimport argparse\nfrom pathlib import Path\nfrom src.utils.definitions import ROOT_DIR, REL_PATHS\nfrom src.utils.functions import increment_suffix, log_config\nfrom src.analysis.fit_functions import generate_fit_keys\n\nBAR_HATCHES = [\n '////',\n '+++',\n 'xxxx',\n '...',\n '***',\n 'ooo'\n]\n\n\nBAR_COLORS = [\n 'tab:blue',\n 'tab:cyan',\n 'tab:olive',\n 'tab:orange',\n 'tab:gray',\n 'tab:green',\n 'tab:purple',\n 'tab:pink',\n 'tab:red',\n 'tab:brown',\n]\n\nFONT_SIZE = 11\n\n\ndef grouped_bar_chart(data, group_labels, ylabel='mean accuracy', xlabel=None, group_width=0.7, padding=3,\n bar_width_frac=0.85,\n edge_color='black', line_width=1, output_dir=None, x_scale=1,\n figsize=(8, 8 / 1.33), label_threshold=None, include_bar_labels=True, rotation=45,\n include_legend=True, bar_hatching=True,\n ylim=None):\n\n x = np.arange(len(group_labels)) * x_scale\n num_items = len(data)\n bar_space = group_width / num_items\n bar_width = bar_width_frac * bar_space\n bar_offset = -1 * group_width / 2\n\n fig, ax = plt.subplots(figsize=figsize)\n\n for i, (label, item_data) in enumerate(data):\n\n if bar_hatching and len(data) <= len(BAR_HATCHES):\n hatch = BAR_HATCHES[i]\n else:\n hatch = None\n\n if len(data) <= len(BAR_COLORS):\n color = BAR_COLORS[i]\n else:\n color = None\n\n left_edge = bar_offset + (i + 0.5) * bar_space\n rect = ax.bar(x + left_edge, item_data, bar_width, label=label, edgecolor=edge_color, linewidth=line_width,\n hatch=hatch, color=color)\n\n if include_bar_labels:\n labels = [round(item, 2) for item in item_data]\n labels = [str(item)[1:] for item in labels] # strip off leading zeros (i.e. '0.01' -> '.01')\n labels = [f'{item}0' if len(item) == 2 else item for item in labels]\n if label_threshold:\n labels = [f'<{str(label_threshold)[1:]}' if float(item) < label_threshold else item for item in labels]\n ax.bar_label(rect, labels=labels, padding=padding)\n\n ax.set_ylabel(ylabel, size=FONT_SIZE)\n ax.set_xlabel(xlabel, size=FONT_SIZE)\n ax.set_xticks(x, group_labels, rotation=rotation, size=FONT_SIZE)\n if ylim is not None:\n ax.set_ylim(ylim[0], ylim[1])\n\n if include_legend:\n ax.legend(loc='upper right')\n\n fig.tight_layout()\n if output_dir:\n plt.savefig(Path(output_dir, 'bar_chart.png'))\n plt.show()\n\n\ndef get_output_dir(data, parent_dir='default', overwrite=True, suffix=None, manual_name=None):\n\n if parent_dir == 'default':\n parent_dir = Path(ROOT_DIR, REL_PATHS['bar_charts'])\n\n if not parent_dir.is_dir():\n Path.mkdir(parent_dir)\n\n if manual_name:\n new_dir_name = manual_name\n else:\n new_dir_name = None\n for item in data:\n label = item[0]\n result_num_string = label[:4]\n if not new_dir_name:\n new_dir_name = result_num_string\n else:\n new_dir_name = f'{new_dir_name}-{result_num_string}'\n\n if suffix:\n new_dir_name = f'{new_dir_name}-{suffix}'\n new_dir = Path(parent_dir, new_dir_name)\n\n if not new_dir.is_dir():\n Path.mkdir(new_dir)\n return new_dir\n elif overwrite:\n return new_dir\n else:\n if not suffix:\n suffix = 'v2'\n return get_output_dir(data, parent_dir=parent_dir, overwrite=overwrite, suffix=suffix,\n manual_name=manual_name)\n else:\n suffix = increment_suffix(suffix)\n return get_output_dir(data, parent_dir=parent_dir, overwrite=overwrite, suffix=suffix,\n manual_name=manual_name)\n\n\ndef sort_filter_fit_stats_for_grouped_bar_chart(consolidated_stats, target_keys=('res', 'blur', 'noise'),\n traverse_keys=('1d',), outer_keys=None, additional_filter=None):\n \"\"\"\n Sorts through nested dictionary structure to extract data to be used by grouped_bar_chart() function. The\n grouped_bar_chart() function takes data with the structure\n [\n data_label_0, --> i.e. the label that goes into the legend of the bar chart\n [\n group_0_value,\n group_1_value,\n ...\n ]\n data_label_1,\n [\n group_0_value,\n group_1_value,\n ...\n ]\n ],\n and it is into this structure that this function places data from consolidated_stats.\n\n\n :param consolidated_stats: dictionary containing nested dictionaries of fit statistics. Structured as follows:\n {outer_key: --> the fit_key used mapped to a particular fit function\n {\n traverse_key_0: --> if present, key mapped to method of fit stat collection (e.g. dw in 1d vs. 2d)\n {\n target_key_0: target_value_0,\n target_key_1: target_value_1,\n ...\n }\n traverse_key_1:\n {\n target_key_0: target_value_0\n target_key_1: target_value_0\n ...\n }\n target_key_0: target_value_0--> if travers_keys is None, target_keys can be found at first level of nested dict\n ...\n }\n }\n :param target_keys: keys corresponding to the actual data to be extracted\n :param traverse_keys: intermediate keys mapped to method of stat collection\n :param outer_keys: keys to separate the separate fit functions\n :param additional_filter: Not implemented yet\n :return:\n outer_keys --> list of the fit function keys used\n target_data --> list structured as follows:\n [target_key,\n [target_value_0,\n target_value_1,\n ...\n ]\n ]\n \"\"\"\n\n if outer_keys is None:\n outer_keys = list(consolidated_stats.keys())\n\n if additional_filter:\n filter_func = sub_dict_filter_functions[additional_filter]\n sorted_data = apply_filter_func(consolidated_stats,\n outer_keys=outer_keys,\n traverse_keys=traverse_keys,\n filter_func=filter_func)\n return outer_keys, sorted_data\n\n sorted_data = []\n\n if target_keys == 'all':\n sub_dict = consolidated_stats[outer_keys[0]]\n if traverse_keys:\n for traverse_key in traverse_keys:\n sub_dict = sub_dict[traverse_key]\n target_keys = list(sub_dict.keys())\n\n for target_key in target_keys:\n\n target_data = []\n\n for outer_key in outer_keys:\n sub_dict = consolidated_stats[outer_key]\n\n if traverse_keys:\n for traverse_key in traverse_keys:\n sub_dict = sub_dict[traverse_key]\n\n target = sub_dict[target_key]\n target_data.append(target)\n\n sorted_data.append([\n target_key, target_data\n ])\n\n return outer_keys, sorted_data\n\n\ndef apply_filter_func(consolidated_stats, outer_keys, traverse_keys, filter_func):\n\n sorted_data = []\n\n # note: traverse_keys used differently than in sort_filter_fit_stats_for_grouped_bar_chart()\n for i, traverse_key in enumerate(traverse_keys):\n target_data = []\n composite_key = None\n for outer_key in outer_keys:\n sub_dict = consolidated_stats[outer_key]\n\n sub_sub_dict = sub_dict[traverse_key]\n\n target_key_stand_in, target_key, target_val = filter_func(sub_sub_dict)\n if not composite_key:\n composite_key = f'{traverse_key}_{target_key_stand_in}'\n\n target_data.append(target_val)\n\n sorted_data.append(\n [composite_key, target_data]\n )\n\n return sorted_data\n\n\ndef min_value(data):\n\n keys = tuple(data.keys())\n values = tuple(data.values())\n min_val = min(values)\n min_val_idx = values.index(min_val)\n min_val_key = keys[min_val_idx]\n\n return 'min', min_val_key, min_val\n\n\ndef mean(data):\n\n # keys = tuple(data.keys())\n values = tuple(data.values())\n mean_val = np.mean(values)\n # min_val_idx = values.index(min_val)\n # min_val_key = keys[min_val_idx]\n\n return 'mean', None, mean_val\n\n\nsub_dict_filter_functions = {\n 'min_value': min_value,\n 'mean': mean,\n}\n\n\ndef main(run_config):\n\n \"\"\"\n Generates a grouped bar chart using either data in run_config or else data fit characterization data extracted and\n grouped with the consolidate_fit_stats() and sort_filter_fit_stats_for_grouped_bar_chart() functions.\n \"\"\"\n if 'data' not in run_config.keys():\n\n if 'fit_keys' in run_config.keys():\n fit_keys = run_config['fit_keys']\n else:\n functional_forms = run_config['functional_forms']\n blur_mappings = run_config['blur_mappings']\n noise_mappings = run_config['noise_mappings']\n fit_keys = generate_fit_keys(functional_forms, blur_mappings, noise_mappings)\n\n composite_result_id = run_config['composite_result_id']\n analysis_type = run_config['analysis_type']\n target_keys = run_config['target_keys']\n traverse_keys = run_config['traverse_keys']\n\n group_labels = run_config['group_labels']\n\n if 'additional_filter' in run_config.keys():\n additional_filter = run_config['additional_filter']\n else:\n additional_filter = None\n\n consolidated_fit_stats = consolidate_fit_stats(fit_keys=fit_keys,\n composite_result_id=composite_result_id,\n analysis_type=analysis_type)\n __, data = sort_filter_fit_stats_for_grouped_bar_chart(consolidated_fit_stats, target_keys=target_keys,\n traverse_keys=traverse_keys, outer_keys=fit_keys,\n additional_filter=additional_filter)\n if group_labels is None:\n group_labels = copy.deepcopy(fit_keys)\n\n else:\n data = run_config['data']\n group_labels = run_config['group_labels']\n\n overwrite = run_config['overwrite']\n manual_name = run_config['manual_name']\n ylabel = run_config['ylabel']\n xlabel = run_config['xlabel']\n group_width = run_config['group_width']\n padding = run_config['padding']\n bar_width_frac = run_config['bar_width_frac']\n edge_color = run_config['edge_color']\n line_width = run_config['line_width']\n label_threshold = run_config['label_threshold']\n include_bar_labels = run_config['include_bar_labels']\n rotation = run_config['rotation']\n include_legend = run_config['include_legend']\n if 'bar_hatching' in run_config.keys():\n bar_hatching = run_config['bar_hatching']\n else:\n bar_hatching = False\n if 'ylim' in run_config.keys():\n ylim = run_config['ylim']\n else:\n ylim = None\n\n output_dir = get_output_dir(data, overwrite=overwrite,\n manual_name=manual_name)\n\n grouped_bar_chart(data=data,\n group_labels=group_labels,\n ylabel=ylabel,\n xlabel=xlabel,\n group_width=group_width,\n padding=padding,\n bar_width_frac=bar_width_frac,\n edge_color=edge_color,\n line_width=line_width,\n output_dir=output_dir,\n label_threshold=label_threshold,\n include_bar_labels=include_bar_labels,\n rotation=rotation,\n include_legend=include_legend,\n bar_hatching=bar_hatching,\n ylim=ylim\n )\n\n log_config(output_dir, run_config)\n\n\nif __name__ == '__main__':\n\n config_filename = 's6_keeper-model_dw_3d.yml'\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_name', default=config_filename, help='config filename to be used')\n parser.add_argument('--config_dir',\n default=Path(Path(__file__).parents[0], 'bar_chart'),\n help=\"configuration file directory\")\n args_passed = parser.parse_args()\n _run_config = get_config(args_passed)\n\n main(_run_config)\n","repo_name":"acb08/image-quality-for-deep-learning","sub_path":"src/analysis/bar_chart.py","file_name":"bar_chart.py","file_ext":"py","file_size_in_byte":12700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"20204123243","text":"# Напишите функцию, которая получает на вход директорию и рекурсивно обходит её и все вложенные директории. \n# Результаты обхода сохраните в файлы json, csv и pickle.\n# 1) Для дочерних объектов указывайте родительскую директорию.\n# 2) Для каждого объекта укажите файл это или директория.\n# 3) Для файлов сохраните его размер в байтах, \n# а для директорий размер файлов в ней с учётом всех вложенных файлов и директорий.\n\n\nimport os \nimport pickle\nimport json\nimport csv\n\n# находим размер директорий\ndef get_directory_size(path: str=os.getcwd()):\n dir_sizes={}\n for dir_path, _, file_name in os.walk(path): #обходит все папки, подпапки, файлы\n size_p = sum(os.path.getsize(os.path.join(dir_path,file)) for file in file_name)\n dir_sizes[dir_path] = size_p #нашли размеры папок\n return dir_sizes\n\n\n\ndef get_file(path: str=os.getcwd()):\n file_list=[]\n size_f=0\n for obj in os.listdir(path): #обходит все папки, подпапки, файлы\n objpath = os.path.join(path, obj) # полный путь\n sizes_ob=get_directory_size()\n if os.path.isfile(objpath):\n size_f=os.path.getsize(f'{objpath}')\n file_list.append(f'{objpath} -> {size_f} -> file')\n elif os.path.isdir(objpath):\n file_list.append(f'{objpath} - > {sizes_ob.get(objpath)} -> dir')\n \n return file_list\n\n # for i in file_list:\n # print(i)\n\n #записываем Json\ndef python_to_json(file_list:list,json_file):\n with open(json_file,'w',encoding='UTF-8') as file:\n json.dump(file_list,file,indent=4,ensure_ascii=False)\n\n #записываем csv\ndef python_to_csv (file_list,csv_file):\n with open(csv_file,'w',encoding='UTF-8') as file1:\n write = csv.writer(file1,delimiter = '\\r')\n write.writerow(file_list)\n\n #записываем Pickle\ndef python_to_pickle(file_list:list,pickle_file):\n with open(pickle_file,'wb') as file2:\n pickle.dump(file_list,file2) \n \n\n\ns=get_file('C:/Users/User/Desktop/Python/GB/Погружение в Python/PyCh/Sem_8') \nprint(python_to_json(s,'Home_work_8.json'))\nprint(python_to_csv(s,'Home_work_8.csv'))\nprint(python_to_pickle(s,'Home_work_8.pickle'))\n\n\n\n","repo_name":"ElenaVasileva12/Sem_8","sub_path":"Home_work_8.py","file_name":"Home_work_8.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8003426303","text":"from scipy.sparse import csr_matrix as scipy_csr_matrix\nfrom scipy.sparse import coo_matrix as scipy_coo_matrix\nfrom scipy.sparse.sputils import (upcast,\n upcast_char,\n get_index_dtype,\n )\n\nfrom scipy.sparse._sparsetools import csr_tocsc\nfrom scipy.sparse import issparse\n\ntry:\n from pyomo.contrib.pynumero.extensions.sparseutils import (csr_matvec_no_diag,\n csc_matvec_no_diag)\nexcept ImportError as e:\n print('{}'.format(e))\n raise ImportError('Error importing sparseutils while running coo interface. '\n 'Make sure libpynumero_SPARSE is installed and added to path.')\n\nfrom scipy.sparse import _sparsetools\nfrom pyomo.contrib.pynumero.sparse.base import SparseBase\nfrom pyomo.contrib.pynumero.sparse.utils import (is_symmetric_dense,\n _convert_matrix_to_symmetric,\n _is_symmetric_numerically)\nimport numpy as np\n\n__all__ = ['CSRMatrix', 'CSRSymMatrix']\n\n\nclass CSRMatrix(SparseBase, scipy_csr_matrix):\n \"\"\"\n Compressed Sparse Row matrix\n\n | This can be instantiated in several ways\n | CSRMatrix(D)\n | with a dense matrix D\n | CSRMatrix(S)\n | with another sparse matrix S (equivalent to S.tocoo())\n | CSRMatrix((M, N), [dtype])\n | to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype=d.\n | CSRMatrix((data, (i, j)), [shape=(M, N)])\n | to construct from three arrays:\n | data[:] the entries of the matrix, in any order\n | i[:] the row indices of the matrix entries\n | j[:] the column indices of the matrix entries\n | Where A[i[k], j[k]] = data[k]. When shape is not specified, it is inferred from the index arrays\n | CSRMatrix((data, indices, indptr), [shape=(M, N)])\n | to construct from three arrays:\n | is the standard CSR representation where the column indices for column i are stored in\n | indices[indptr[i]:indptr[i+1]] and their corresponding values are stored in\n | data[indptr[i]:indptr[i+1]]. If the shape parameter is not supplied, the matrix dimensions are inferred from\n | the index arrays.\n \"\"\"\n def __init__(self, arg1, shape=None, dtype=None, copy=False, **kwargs):\n\n # include upper triangular if arg1 is symmetric\n expand_symmetry = kwargs.pop('expand_symmetry', True)\n if expand_symmetry and isinstance(arg1, SparseBase):\n if arg1.is_symmetric:\n arg1 = arg1.tofullmatrix().tocsr()\n\n scipy_csr_matrix.__init__(self, arg1, shape=shape, dtype=dtype, copy=copy)\n SparseBase.__init__(self)\n\n def tosymcsr(self):\n raise NotImplementedError('Not supported')\n\n def tocsc(self, copy=False):\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(self.nnz, self.shape[0]))\n indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)\n indices = np.empty(self.nnz, dtype=idx_dtype)\n data = np.empty(self.nnz, dtype=upcast(self.dtype))\n\n csr_tocsc(self.shape[0], self.shape[1],\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n indptr,\n indices,\n data)\n\n from pyomo.contrib.pynumero.sparse.csc import CSCMatrix\n A = CSCMatrix((data, indices, indptr), shape=self.shape)\n A.has_sorted_indices = True\n return A\n\n def tocoo(self, copy=True):\n major_dim, minor_dim = self._swap(self.shape)\n minor_indices = self.indices\n major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)\n _sparsetools.expandptr(major_dim, self.indptr, major_indices)\n row, col = self._swap((major_indices, minor_indices))\n\n from pyomo.contrib.pynumero.sparse.coo import COOMatrix\n return COOMatrix((self.data, (row, col)), self.shape, copy=copy,\n dtype=self.dtype)\n\n def tocsr(self, copy=False):\n # copy only there to agree with the signature\n return self\n\n def todok(self, copy=False):\n raise NotImplementedError('Not supported')\n\n def todia(self, copy=False):\n raise NotImplementedError('Not supported')\n\n def tolil(self, copy=False):\n raise NotImplementedError('Not supported')\n\n def tofullmatrix(self):\n return self\n\n def transpose(self, axes=None, copy=False):\n if axes is not None:\n raise ValueError((\"Sparse matrices do not support \"\n \"an 'axes' parameter because swapping \"\n \"dimensions is the only logical permutation.\"))\n\n M, N = self.shape\n\n from pyomo.contrib.pynumero.sparse.csc import CSCMatrix\n return CSCMatrix((self.data, self.indices,\n self.indptr), shape=(N, M), copy=copy)\n\n def _with_data(self, data, copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n if copy:\n return self.__class__((data, self.indices.copy(), self.indptr.copy()),\n shape=self.shape, dtype=data.dtype)\n else:\n return self.__class__((data, self.indices, self.indptr),\n shape=self.shape, dtype=data.dtype)\n\n def _add_sparse(self, other):\n if isinstance(other, SparseBase):\n if other.is_symmetric:\n return super(CSRMatrix, self)._add_sparse(other.tofullmatrix())\n return super(CSRMatrix, self)._add_sparse(other)\n if issparse(other):\n raise RuntimeError(\"Addition not supported with scipy matrices\")\n raise RuntimeError(\"Sparse format not recognized {}\".format(type(other)))\n\n def _sub_sparse(self, other):\n if isinstance(other, SparseBase):\n if other.is_symmetric:\n return super(CSRMatrix, self)._sub_sparse(other.tofullmatrix())\n return super(CSRMatrix, self)._sub_sparse(other)\n if issparse(other):\n raise RuntimeError(\"Subtraction not supported with scipy matrices\")\n raise RuntimeError(\"Sparse format not recognized {}\".format(type(other)))\n\n def _mul_sparse_matrix(self, other):\n\n if isinstance(other, SparseBase):\n if other.is_symmetric:\n expanded_other = other.tofullmatrix()\n result = super(CSRMatrix, self)._mul_sparse_matrix(expanded_other)\n if self.shape[0] == expanded_other.shape[1]:\n if _is_symmetric_numerically(result):\n return _convert_matrix_to_symmetric(result, check_symmetry=False)\n return result\n from pyomo.contrib.pynumero.sparse.block_matrix import BlockMatrix\n if isinstance(other, BlockMatrix):\n raise NotImplementedError(\"Not supported yet\")\n expanded_other = other.tocsr()\n result = super(CSRMatrix, self)._mul_sparse_matrix(expanded_other)\n if self.shape[0] == expanded_other.shape[1]:\n if _is_symmetric_numerically(result):\n return _convert_matrix_to_symmetric(result, check_symmetry=False)\n return result\n\n result = super(CSRMatrix, self)._mul_sparse_matrix(other)\n if self.shape[0] == other.shape[1]:\n if _is_symmetric_numerically(result):\n return _convert_matrix_to_symmetric(result, check_symmetry=False)\n return result\n if issparse(other):\n raise RuntimeError(\"Multiplication not supported with scipy matrices\")\n raise RuntimeError(\"Sparse format not recognized {}\".format(type(other)))\n\n def getcol(self, j):\n from pyomo.contrib.pynumero.sparse.csc import CSCMatrix\n return CSCMatrix(self.toscipy().getcol(j))\n\n def getrow(self, i):\n return CSRMatrix(super(CSRMatrix, self).getrow(i))\n\n def toscipy(self):\n return scipy_csr_matrix(self)\n\n def __repr__(self):\n return 'CSRMatrix{}'.format(self.shape)\n\n\n# this matrix will only store the lower triangular indices\nclass CSRSymMatrix(CSRMatrix):\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False, **kwargs):\n\n # check if dense matrix is symmetric\n if isinstance(arg1, np.ndarray):\n if not is_symmetric_dense(arg1):\n raise RuntimeError(\"ndarray is not symmetric\")\n # keep only lower triangular\n arg1 = np.tril(arg1)\n\n # symmetric matrices don't expand symmetry\n expand_symmetry = kwargs.pop('expand_symmetry', False)\n\n error_msg = \"Symmetric matrices only store lower triangular\"\n assert not expand_symmetry, error_msg\n\n super(CSRSymMatrix, self).__init__(\n arg1,\n shape=shape,\n dtype=dtype,\n copy=copy,\n expand_symmetry=expand_symmetry,\n **kwargs\n )\n\n\n # add check to veryfy square matrix\n if self.shape[0] != self.shape[1]:\n raise RuntimeError('A rectangular matrix is not symmetric')\n\n # check nnz is less than the full lower triangular\n if self.nnz > self.shape[0]*(self.shape[0] + 1)/2:\n raise RuntimeError('CSRSymMatrix only store lower triangular entries. Too many nnz')\n\n # TODO: check only lower triangular entries\n\n # makes sparse matrix symmetric\n self._symmetric = True\n\n def transpose(self, axes=None, copy=False):\n if axes is not None:\n raise ValueError((\"Sparse matrices do not support \"\n \"an 'axes' parameter because swapping \"\n \"dimensions is the only logical permutation.\"))\n\n M, N = self.shape\n\n return CSRSymMatrix((self.data, self.indices,\n self.indptr), shape=(N, M), copy=copy)\n\n def toarray(self, order=None, out=None):\n m = self.tofullcoo()\n return m.toarray(order=order, out=out)\n\n def todense(self, order=None, out=None):\n return np.asmatrix(self.toarray(order=order, out=out))\n\n def tocsc(self, copy=False):\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(self.nnz, self.shape[0]))\n indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)\n indices = np.empty(self.nnz, dtype=idx_dtype)\n data = np.empty(self.nnz, dtype=upcast(self.dtype))\n\n csr_tocsc(self.shape[0], self.shape[1],\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n indptr,\n indices,\n data)\n\n from pyomo.contrib.pynumero.sparse.csc import CSCSymMatrix\n A = CSCSymMatrix((data, indices, indptr), shape=self.shape)\n A.has_sorted_indices = True\n return A\n\n def tocoo(self, copy=True):\n major_dim, minor_dim = self._swap(self.shape)\n minor_indices = self.indices\n major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)\n _sparsetools.expandptr(major_dim, self.indptr, major_indices)\n row, col = self._swap((major_indices, minor_indices))\n\n from pyomo.contrib.pynumero.sparse.coo import COOSymMatrix\n return COOSymMatrix((self.data, (row, col)), self.shape, copy=copy,\n dtype=self.dtype)\n\n def tofullcoo(self):\n return self.tocoo().tofullcoo()\n\n def tofullcsr(self):\n return self.tocoo().tofullcsr()\n\n def tofullcsc(self):\n return self.tocoo().tofullcsc()\n\n def tofullmatrix(self):\n return self.tofullcsr()\n\n def todia(self, copy=False):\n raise NotImplementedError('Not supported')\n\n def tolil(self, copy=False):\n raise NotImplementedError('Not supported')\n\n def toscipy(self):\n return scipy_csr_matrix(self.tofullmatrix())\n\n def _add_sparse(self, other):\n if isinstance(other, SparseBase):\n if other.is_symmetric:\n return self._binopt(other, '_plus_')\n return self.tofullmatrix()._add_sparse(other)\n if issparse(other):\n raise RuntimeError(\"Addition not supported with scipy matrices\")\n raise RuntimeError(\"Sparse format not recognized {}\".format(type(other)))\n\n def _sub_sparse(self, other):\n if isinstance(other, SparseBase):\n if other.is_symmetric:\n return self._binopt(other, '_minus_')\n return self.tofullmatrix()._sub_sparse(other)\n if issparse(other):\n raise RuntimeError(\"Subtraction not supported with scipy matrices\")\n raise RuntimeError(\"Sparse format not recognized {}\".format(type(other)))\n\n def _add_dense(self, other):\n return self.tofullcoo()._add_dense(other)\n\n def _mul_vector(self, other):\n\n M, N = self.shape\n\n # output array\n resultl = np.zeros(M, dtype=upcast_char(self.dtype.char,\n other.dtype.char))\n\n resultu = np.zeros(M, dtype=upcast_char(self.dtype.char,\n other.dtype.char))\n\n # csr_matvec or csc_matvec\n fnl = getattr(_sparsetools, self.format + '_matvec')\n fnl(M, N, self.indptr, self.indices, self.data, other, resultl)\n\n\n upper = self.transpose()\n # fnu = getattr(_sparsetools, upper.format + '_matvec')\n # fnu(M, N, upper.indptr, upper.indices, upper.data, other, resultu)\n csc_matvec_no_diag(N, upper.indptr, upper.indices, upper.data, other, resultu)\n\n # diagonal = self.diagonal()\n\n # result = np.zeros(M, dtype=upcast_char(self.dtype.char, other.dtype.char))\n # sym_csr_matvec(M, self.indptr, self.indices, self.data, other, result)\n # return result\n\n return resultl + resultu # - np.multiply(other, diagonal)\n\n def _mul_multivector(self, other):\n raise NotImplementedError('Not supported')\n\n def _mul_sparse_matrix(self, other):\n\n expanded_sym = self.tofullcsr()\n if isinstance(other, SparseBase):\n if other.is_symmetric:\n expanded_other = other.tofullcsr()\n result = expanded_sym * expanded_other\n return _convert_matrix_to_symmetric(result, check_symmetry=False)\n from pyomo.contrib.pynumero.sparse.block_matrix import BlockMatrix\n if isinstance(other, BlockMatrix):\n # this will also need a check for symmetric block matrices\n raise NotImplementedError(\"Not supported yet\")\n expanded_other = other.tocsr()\n result = expanded_sym * expanded_other\n if expanded_sym.shape[0] == expanded_other.shape[1]:\n if _is_symmetric_numerically(result):\n return _convert_matrix_to_symmetric(result, check_symmetry=False)\n return result\n result = expanded_sym * other\n if expanded_sym.shape[0] == other.shape[1]:\n if _is_symmetric_numerically(result):\n return _convert_matrix_to_symmetric(result, check_symmetry=False)\n return result\n if issparse(other):\n raise RuntimeError(\"Multiplication not supported with scipy matrices\")\n raise RuntimeError(\"Sparse format not recognized {}\".format(type(other)))\n\n def getcol(self, j):\n return self.tofullmatrix().getcol(j)\n\n def getrow(self, i):\n return self.tofullmatrix().getrow(i)\n\n def getallnnz(self):\n # ToDo: add support for this\n raise NotImplementedError(\"Operation not supported yet\")\n\n def __repr__(self):\n return 'CSRSymMatrix{}'.format(self.shape)\n\nif __name__ == \"__main__\":\n\n row = np.array([0, 3, 1, 0])\n col = np.array([0, 3, 1, 2])\n data = np.array([4, 5, 7, 9])\n m = CSRMatrix((data, (row, col)), shape=(4, 4))\n print(m.toarray())\n print(m.is_symmetric)\n\n row = np.array([0, 3, 1, 2, 3])\n col = np.array([0, 0, 1, 2, 3])\n data = np.array([2, 1, 3, 4, 5])\n m = CSRSymMatrix((data, (row, col)), shape=(4, 4))\n print(m.toarray())\n print(m.is_symmetric)\n\n mcsr = m.tofullcsr()\n print(mcsr.toarray())\n print(mcsr.is_symmetric)\n\n x = np.ones(m.shape[0])\n print(mcsr.dot(x))\n print(m.dot(x))\n\n row = np.array([0, 1, 4, 1, 2, 7, 2, 3, 5, 3, 4, 5, 4, 7, 5, 6, 6, 7])\n col = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 7])\n data = np.array([27, 5, 12, 56, 66, 34, 94, 31, 41, 7, 98, 72, 24, 33, 78, 47, 98, 41])\n big_m = CSRSymMatrix((data, (row, col)), shape=(8, 8))\n print(big_m.toarray())\n print(big_m.is_symmetric)\n x = np.ones(big_m.shape[0])\n print(big_m.tofullcsr().dot(x))\n print(big_m.dot(x))\n print(big_m.toarray())\n","repo_name":"mbakhtvar/pyomo","sub_path":"pyomo/contrib/pynumero/sparse/csr.py","file_name":"csr.py","file_ext":"py","file_size_in_byte":17407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"5"} +{"seq_id":"22273927604","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import Qt\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport fnmatch\nfrom random import shuffle\nimport time\nimport _pickle as pickle\nimport numpy as np\nimport scipy.misc as misc\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nimport sklearn\nimport pickle\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport argparse\nimport load_features as load\nfrom compute_features import compute_img_features\nfrom load_features import load_img_features\nimport time\nfrom sklearn import linear_model\n\nclass classifier_v2(QMainWindow):\n def __init__(self):\n super().__init__()\n css = \"\"\"QWidget{\n Background: #ffffff;\n color:#4f97c1;\n font:12px bold;\n font-weight:bold;\n border-radius: 1px;\n height: 11px;\n }\n QPushButton{\n background-color: #ffffff;\n border-radius: 15px;\n border: 1px;\n border-style: outset;\n border-width: 2px;\n border-color: #4f97c1;\n font: 13px;\n padding: 2px;\n }\n QPushButton:pressed {\n background-color: #0171a5;\n border-style: inset;\n }\n \"\"\"\n self.setAutoFillBackground(True)\n self.setBackgroundRole(QtGui.QPalette.Highlight)\n self.setStyleSheet(css)\n self.initUI()\n\n def initUI(self):\n self.statusBar().showMessage('No Images Loaded')\n self.label = QLabel(\"No Images Loaded\",self)\n self.label.resize(300,300)\n self.label.move(200,30)\n self.distorted = QLabel(\"1 : Distorted\",self)\n self.distorted.resize(100,30)\n self.distorted.move(250,300)\n self.un = QLabel(\"2 : Non-Distorted\",self)\n self.un.resize(120,30)\n self.un.move(250,320)\n self.sk = QLabel(\"3 : Skip Image\",self)\n self.sk.resize(120,30)\n self.sk.move(250,340)\n self.img_lbl = QLabel(\"Images Classified: \",self)\n self.img_lbl.resize(125,20)\n self.img_lbl.move(250,370)\n self.numImages = QLabel(\"0\",self)\n self.numImages.resize(50,20)\n self.numImages.move(380,370)\n self.setGeometry(0,0,450,430)\n self.setWindowTitle(\"Binary Picture Classifier\")\n self.setWindowIcon(QIcon(\"download-1.png\"))\n self.qbtn = QPushButton(\"Quit\",self)\n self.qbtn.clicked.connect(self.save)\n self.qbtn.resize(50,25)\n self.qbtn.move(25,260)\n self.next = QPushButton(\"Next\",self)\n self.next.resize(50,20)\n self.next.move(25,220)\n self.next.clicked.connect(self.getNext)\n self.prev_btn = QPushButton(\"Previous\",self)\n self.prev_btn.resize(70,20)\n self.prev_btn.move(25,180)\n self.prev_btn.clicked.connect(self.getPrev)\n self.load = QPushButton(\"Load Images\",self)\n self.load.resize(100,25)\n self.load.move(25,140)\n self.load.clicked.connect(self.init_load)\n self.skip = QPushButton(\"Skip\",self)\n self.skip.resize(50,25)\n self.skip.move(25,100)\n self.dist = QPushButton(\"Distorted\",self)\n self.dist.resize(70,20)\n self.dist.move(25,60)\n self.dist.clicked.connect(self.classB_event)\n self.nondist = QPushButton(\"Non-Distorted\",self)\n self.nondist.resize(110,20)\n self.nondist.move(25,20)\n self.nondist.clicked.connect(self.classA_event)\n self.skip.clicked.connect(self.skip_event)\n self.lbl = QLabel(\"Image Features:\",self)\n combo = QComboBox(self)\n combo.addItem(\"inception_v1\")\n combo.addItem(\"inception_v2\")\n combo.addItem(\"inception_v3\")\n combo.addItem(\"inception_resnet_v2\")\n combo.addItem(\"resnet_v1_50\")\n combo.addItem(\"resnet_v1_101\")\n combo.addItem(\"vgg_16\")\n combo.addItem(\"vgg_19\")\n self.images= 0 #tracks number of images\n self.feats = None\n self.imag_reps = [] #classified representations\n self.un_prev = ([],[]) #previous unclassified\n self.class_vals = [] #classified indexes\n self.classA_list = []\n self.classB_list = []\n self.rec = [] #recently classified indexes\n self.skipped = [] #indexes of skipped images\n self.img_dict = {} #dictionary of {index:images}\n self.npy_dict = {} #dic of numpy array reps of images\n self.paths = [] #list of all image paths\n self.full_paths = [] #holds the pkl file for feat reps (if found)\n self.index = 1 #image index (starts at 1)\n self.path = None #path to directory of images\n self.d = {} #dictionary of {image:label}\n self.learn_type = \"random\" #active learning method (default = random)\n self.first_time = False #only for a certain case when partial-fit does not have enough data\n self.clf = linear_model.SGDClassifier()\n self.skip_flg = False\n self.path_len = 0\n self.prev = -1 #index of previous image\n self.k = QShortcut(QKeySequence(\"1\"),self)\n self.k.activated.connect(self.classB_event)\n self.p = QShortcut(QKeySequence(\"4\"),self)\n self.p.activated.connect(self.getPrev)\n self.shtct = QShortcut(QKeySequence(\"2\"),self)\n self.shtct.activated.connect(self.classA_event)\n self.skct = QShortcut(QKeySequence(\"3\"),self)\n self.skct.activated.connect(self.skip_event)\n combo.resize(120,25)\n combo.move(25, 360)\n self.lbl.resize(150,25)\n self.lbl.move(25, 340)\n combo.activated[str].connect(self.chooseFeats)\n\n lbl2 = QLabel(\"Active Learning Method:\",self)\n combo2 = QComboBox(self)\n combo2.addItem(\"Farthest\")\n combo2.addItem(\"Closest\")\n combo2.addItem(\"Random\")\n combo2.activated[str].connect(self.chooseModel)\n combo2.move(25, 315)\n lbl2.resize(160,25)\n lbl2.move(25, 290)\n self.show()\n\n\n\n\n def chooseFeats(self,value):\n self.statusBar().showMessage('Loading Features')\n if self.classA_list == [] and self.classB_list == [] and value != 'pixels':\n type = value\n path = self.paths\n if self.check_and_reload() == False:\n self.first_time = True\n self.statusBar().showMessage('Loading Features...')\n if value != \"Pixels\":\n for i in self.full_paths:\n if value in i:\n self.statusBar().showMessage('Reloading Features...')\n self.feats = load_img_features(type,self.path)\n self.makeData(self.feats)\n self.statusBar().showMessage('Ready')\n break\n if self.feats == None:\n self.statusBar().showMessage('Loading Feature Data...')\n self.statusBar().showMessage('Computing Features...')\n compute_img_features(type,path,self.path)\n self.statusBar().showMessage('Loading Features')\n self.feats = load_img_features(type,self.path)\n self.makeData(self.feats)\n else:\n self.statusBar().showMessage(\"Computing Pixel Features...\")\n self.load_pix_features()\n self.statusBar().showMessage('Ready')\n self.path_len = len(self.paths)\n self.load_img()\n\n def check_and_reload(self):\n self.statusBar().showMessage('Loading Data...')\n if os.path.isfile(self.path+'/labels.pkl'):\n self.statusBar().showMessage('Reloading Previous Data...')\n self.d, self.classA_list, self.classB_list,self.skipped,self.img_dict,self.npy_dict,self.paths,self.imag_reps,self.class_vals,self.un_prev= pickle.load(open(self.path+'/labels.pkl', 'rb'))\n for i in self.d:\n if self.d[i] == 1 or self.d[i] ==2:\n self.images+=1\n self.statusBar().showMessage('Checking Images...')\n if len(self.classA_list) < 1 or len(self.classB_list) < 1:\n self.first_time = True\n if self.classA_list != [] or self.classB_list !=[]:\n while(self.index in self.classA_list or self.index in self.classB_list or self.index in self.skipped):\n self.index +=1\n if self.index > len(self.paths):\n self.statusBar().showMessage('All Images Classified!')\n self.save()\n self.statusBar().showMessage('Fitting Data...')\n self.clf.fit(np.asarray(self.imag_reps),np.asarray(self.class_vals))\n self.statusBar().showMessage('Loading Image...')\n self.path_len = len(self.paths)\n self.load_img()\n self.statusBar().showMessage('Ready')\n return True\n return False\n\n\n#should only occur once at the beginning to match up all of the Picture data\n def makeData(self,feats):\n self.statusBar().showMessage('Syncing Data...')\n if feats != None:\n index = 1\n self.paths = []\n self.npy_dict = {}\n self.img_dict = {}\n for i in feats:\n self.paths.append(i)\n self.img_dict[index] = i\n self.npy_dict[index] = feats[i]\n index +=1\n\n def chooseModel(self,value):\n if value == \"Random\":\n self.learn_type = \"r\"\n elif value == \"Closest\":\n self.learn_type = \"c\"\n elif value == \"Farthest\":\n self.learn_type = \"f\"\n\n def getNext(self):\n c = self.get_unclassified()\n if c != ([],[]):\n if self.learn_type == \"r\" or (self.classA_list == [] or self.classB_list == []):\n index = self.index\n index +=1\n if index < len(self.paths):\n self.index = index\n self.load_img()\n else:\n print(\"All Images Classified\")\n self.save()\n\n # if both lists have contents, train svm here\n elif self.learn_type == \"c\":\n if self.first_time:\n self.clf.fit(np.asarray(self.imag_reps),np.asarray(self.class_vals))\n self.index +=1\n self.prev = self.index\n self.load_img()\n self.first_time = False\n\n else:\n self.prev = self.index\n unclass_vals,indexes = c\n unclass_vals = np.asarray(unclass_vals)\n temp = np.array([self.imag_reps[-1]])\n self.clf.partial_fit(temp,np.asarray(self.class_vals)[-1])\n if len(unclass_vals) == 1:\n unclass_vals = unclass_vals.reshape(1,-1)\n self.index = indexes[0]\n self.load_img\n if len(unclass_vals) > 1:\n closest = np.argmin(self.clf.decision_function(unclass_vals))\n self.index = indexes[closest]\n self.load_img()\n else:\n if self.first_time:\n self.clf.fit(np.asarray(self.imag_reps),np.asarray(self.class_vals))\n self.prev = self.index\n self.index +=1\n self.load_img()\n self.first_time = False\n else:\n self.prev = self.index\n unclass_vals,indexes = c\n unclass_vals = np.asarray(unclass_vals)\n temp = np.array([self.imag_reps[-1]])\n s = time.time()\n self.clf.partial_fit(temp,np.array([self.class_vals[-1]]))\n e = time.time()\n if len(unclass_vals) == 1:\n unclass_vals = unclass_vals.reshape(1,-1)\n self.index = indexes[0]\n self.load_img\n if len(unclass_vals) > 1:\n farthest = np.argmax(self.clf.decision_function(unclass_vals))\n self.index = indexes[farthest]\n self.load_img()\n else:\n self.save()\n\n\n def classA_event(self): #class A Event (User Presses 2)\n self.images+=1\n self.d[self.paths[self.index-1]] = 2\n self.skip_flg = False\n self.rec.append(self.index)\n self.numImages.setText(str(self.images))\n self.classA_list.append(self.index)\n self.imag_reps.append(self.npy_dict[self.index])\n self.class_vals.append(2)\n self.getNext()\n def classB_event(self): #class B Event (User Presses 1)\n self.images+=1\n self.d[self.paths[self.index-1]] = 1\n self.skip_flg = False\n self.rec.append(self.index)\n self.numImages.setText(str(self.images))\n self.classB_list.append(self.index)\n self.imag_reps.append(self.npy_dict[self.index])\n self.class_vals.append(1)\n self.getNext()\n def skip_event(self): #class Skip Event (User Presses 3)\n self.d[self.paths[self.index-1]] = -1\n self.skip_flg = True\n self.rec.append(self.index)\n self.skipped.append(self.index)\n self.prev = self.index\n self.index = 1\n\n while self.index in self.classA_list or self.index in self.classB_list or self.index in self.skipped:\n self.index +=1\n if self.index > self.path_len:\n print(\"All Images Classified\")\n else:\n self.load_img()\n\n#first case should only occur once at the beginning\n def get_unclassified(self):\n if self.un_prev == ([],[]):\n unclass = []\n indexes = []\n for i in self.npy_dict:\n if i not in self.classA_list and i not in self.classB_list and i not in self.skipped:\n unclass.append(self.npy_dict[i])\n indexes.append(i)\n #print(len(unclass))\n self.un_prev = unclass,indexes\n self.rec = []\n return unclass,indexes\n else:\n index = 0\n unclass,indexes = self.un_prev\n for i in indexes:\n if i in self.rec:\n indexes.remove(i)\n del unclass[index]\n index+=1\n #print(len(unclass))\n self.rec = []\n self.un_prev = unclass,indexes\n return unclass,indexes\n\n def load_pix_features(self):\n self.feats =[]\n for e,p in enumerate(self.paths):\n self.feats.append(misc.imresize(misc.imread(p), (self.height, self.width, 3)))\n self.feats = np.asarray(self.features)\n self.makeData(self.feats)\n self.load_img()\n\n def getPrev(self):\n self.images-=1\n if self.learn_type == 'r' or (self.classA_list == [] or self.classB_list == []):\n self.d[self.paths[self.index-2]] = 0\n index = self.index\n index -=1\n if index > 0:\n self.index = index\n self.load_img()\n if self.index in self.classA_list:\n self.classA_list.remove(self.index)\n elif self.index in self.classB_list:\n self.classB_list.remove(self.index)\n else:\n self.skipped.remove(self.index)\n elif self.learn_type in \"cf\":\n if self.skip_flg:\n self.skipped.remove(self.prev)\n self.rec.remove(self.prev)\n self.index = self.prev\n self.d[self.paths[self.index-1]] = 0\n self.load_img()\n else:\n self.index = self.prev\n self.imag_reps.reverse()\n self.class_vals.reverse()\n self.imag_reps = self.imag_reps[1:]\n self.imag_reps.reverse()\n self.class_vals = self.class_vals[1:]\n self.class_vals.reverse()\n self.d[self.paths[self.index-1]] = 0\n if self.index in self.classA_list:\n self.classA_list.remove(self.index)\n else:\n self.classB_list.remove(self.index)\n self.load_img()\n\n\n def init_load(self):\n self.statusBar().showMessage('Loading Images...')\n self.path = str(QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n self.paths = self.getPaths(self.path)\n self.statusBar().showMessage('Images Loaded')\n self.statusBar().showMessage('Choose Image Features')\n self.label.setText(\"Choose Image Features\")\n\n def load_img(self):\n print(self.index)\n if self.index < len(self.paths):\n self.numImages.setText(str(self.images))\n pixmap = QPixmap(self.paths[self.index-1])\n pixmap_resized = pixmap.scaled(200, 200, QtCore.Qt.KeepAspectRatio)\n self.label.resize(300,300)\n self.label.move(170,30)\n self.label.setPixmap(pixmap_resized)\n\n def getPaths(self,data_dir):\n exts = ['*.JPEG','*.JPG','*.jpg','*.jpeg','*.png','*.PNG']\n for pattern in exts:\n for d, s, fList in os.walk(data_dir):\n for filename in fList:\n if fnmatch.fnmatch(filename, pattern):\n fname_ = os.path.join(d,filename)\n self.paths.append(fname_)\n shuffle(self.paths)\n exts = ['*.pkl']\n for pattern in exts:\n for d, s, fList in os.walk(data_dir):\n for filename in fList:\n if fnmatch.fnmatch(filename, pattern):\n fname_ = os.path.join(d,filename)\n self.full_paths.append(fname_)\n return self.paths\n\n\n def save(self):\n if self.get_unclassified() == ([],[]) and self.paths != []:\n self.statusBar().showMessage('All Images Classified')\n else:\n self.statusBar().showMessage('Closing...')\n reply = QMessageBox.question(self, 'Message',\n \"Are you sure to quit?\", QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n self.statusBar().showMessage('Saving Data...')\n if self.rec != []:\n self.un_prev = self.get_unclassified()\n if self.classA_list != [] or self.classB_list != []:\n pkl = open(self.path+'/labels.pkl', 'wb')\n data = pickle.dumps([self.d, self.classA_list, self.classB_list,self.skipped,self.img_dict,self.npy_dict,self.paths,self.imag_reps, self.class_vals,self.un_prev])\n pkl.write(data)\n pkl.close()\n self.statusBar().showMessage('Exiting...')\n exit()\n\napp = QApplication(sys.argv)\nc = classifier_v2()\nsys.exit(app.exec_())\n","repo_name":"cameronfabbri/Classification-Tool","sub_path":"classifier_v2.py","file_name":"classifier_v2.py","file_ext":"py","file_size_in_byte":19459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"26343491320","text":"from sys import stderr\nimport configparser\nimport argparse\nfrom python_core_components.panorama_calculation import PanoramaCalculator\nfrom python_core_components.varisphear import VariSphear\n\n\ndef arg_options():\n\n # init options\n parser = argparse.ArgumentParser(\n description='''Calculate the motorpositions of the VariSphear\n depending on the aperature angles of the camera\n and take pictures.\n Final this pictures will be saved in the output directory\n spezifed in \"panorama.cfg\". ''')\n\n parser.add_argument(\n '--angle',\n type=int,\n nargs=2,\n dest='angle',\n default=None,\n metavar=('VERTICAL', 'HORIZONTAL'),\n help='''Set the vertical aperature angle\n and the horizontal aperature angle for calculations.\n This values do NOT have to be the exact aperature angle \n of your camera. Instead they should contain a little \n overlap. You could use \"nodalpointHelper\" to figure out\n how much overlap do you prefer.\n This values will be saved in \"panorama.cfg\"\n unless \"-t\" is set.\n ( default: values will be read from \"panorama.cfg instead\" )''')\n\n parser.add_argument(\n '--directory',\n dest='directory',\n default=None,\n metavar=('DIRECTORY'),\n help='''Set the realtive path to your output directory.\n This value will be saved in \"panorama.cfg\" unless \"-t\" is set.\n ( default: path will be read from \"panorama.cfg instead \" ) ''')\n\n parser.add_argument(\n '--config',\n dest='config',\n default=None,\n metavar=('CONFIGFILE'),\n help='''Specifies the realtive path to \"panorama.cfg\"\n ( default: panorama.cfg ) ''')\n\n parser.add_argument(\n '-t',\n '--tmp',\n dest='tmp',\n action='store_true',\n help='Prevent any changes of \"panorama.cfg\" done by this program ')\n\n parser.add_argument(\n '-i',\n '--info',\n dest='info',\n action='store_true',\n help='Show info before mesurement starts')\n\n parser.add_argument(\n '-n',\n '--noPicture',\n dest='noPicture',\n action='store_true',\n help='Skip the step of driving to each position and taking pictures')\n\n # return arguments\n return parser.parse_args()\n\n\ndef main():\n\n # get arguments\n args = arg_options()\n\n # set path to panorama.cfg\n path_cfg = args.config\n if(not path_cfg):\n path_cfg = 'panorama.cfg'\n\n # get values from 'path_cfg'\n config = configparser.ConfigParser()\n try:\n config.read(path_cfg)\n test = config['camera']['vertical_angle']\n except:\n stderr.write(\"\\n ERROR: Can´t open file: %s !\\n exit \\n\" % path_cfg)\n exit()\n\n # set new values\n if(args.angle):\n config['camera']['vertical_angle'] = str(args.angle[0])\n config['camera']['horizontal_angle'] = str(args.angle[1])\n if(args.directory):\n config['output']['directory'] = args.directory\n\n # save new values in 'panorama.cfg' if not permitted\n if(not args.tmp):\n try:\n with open(path_cfg, \"w\") as configfile:\n config.write(configfile)\n except:\n stderr.write(\"\\n ERROR: Can´t open file %s !\\n exit \\n\" % path_cfg)\n exit()\n\n angle_h = float(config['camera']['horizontal_angle'])\n angle_v = float(config['camera']['vertical_angle'])\n dir_output = config['output']['directory']\n\n # Get Interfaces / Ports to VariSphear\n port_top = config['varisphear']['serialport_top']\n port_base = config['varisphear']['serialport_base']\n\n calc = PanoramaCalculator()\n variSphear = VariSphear(port_top, port_base)\n\n if(args.info):\n calc.show_info()\n\n calc.show_statistic(angle_v, angle_h, dir_output)\n # Mapping of angles to Motorpositions\n li_motorTop = variSphear.map_motor_top(angle_v)\n li_motorBase = variSphear.map_motor_base(angle_h)\n\n # Taking the Pictures\n if(not args.noPicture):\n\n variSphear.taking_pictures(\n li_motorTop, li_motorBase, dir_output, path_cfg)\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"spatialaudio/panorama","sub_path":"Python/takePicture.py","file_name":"takePicture.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"21263808204","text":"import sys\n\ninp = sys.argv[1]\ndropNum = int(sys.argv[2])\n\n#Student data structure that will hold all info associated with a student (or row in datasheet)\nclass Student(object):\n\tdef __init__(self, info, gradeslist, topPoints):\n\t\tself.info = info\n\t\tself.grades = map(int, gradeslist[0:-2])\n\t\tself.total = int(gradeslist[-2])\n\t\tself.possible = int(gradeslist[-1])\n\t\tself.gradespercent = [float(a)/float(b) for a, b in zip(self.grades, topPoints)]\n\t\t#self.ave = float(self.total)/float(self.possible)\n\n\n\n\n# A min function that will only evaluate the floats (this is to avoid evaluating None or empty strings)\ndef mymin(array):\n\tminimum = 1.0\n\tfor elem in array:\n\t\tif type(elem)==float:\n\t\t\tif elem ResultOfCheck:\n \"\"\"\n Abstract method to execute the check on the provided step.\n\n Args:\n step: The step to check.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n pass\n\n\nclass CheckResult(Check):\n \"\"\"\n Abstract class for checks that are based on the results of a step.\n \"\"\"\n\n @abstractmethod\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Abstract method that defines the logic of the check on the result.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n pass\n\n def check(self, step) -> ResultOfCheck:\n \"\"\"\n Executes the check on the result of the provided step.\n\n Args:\n step: The step whose results are to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n last_run = step.get_latest_run()\n result = last_run[\"scores\"] | last_run[\"parameters\"] | last_run\n return self._check_method(result)\n\n\nclass CheckResultExists(CheckResult):\n \"\"\"\n Concrete check class to verify the existence of a required key in the results.\n\n Attributes:\n required_key (str): The key that should exist in the results.\n \"\"\"\n\n def __init__(self, required_key):\n \"\"\"\n Constructor for the CheckResultExists class.\n\n Args:\n required_key (str): The key that should exist in the results.\n \"\"\"\n self.required_key = required_key\n\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Checks if the required_key exists in the result.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check, indicating success if the required_key exists.\n \"\"\"\n\n if self.required_key in result:\n return ResultOfCheck(is_positive=True)\n else:\n return ResultOfCheck(\n is_positive=False,\n error=f\"Score {self.required_key} is not in results.json\",\n )\n\n\nclass CheckScore(CheckResult):\n \"\"\"\n Base class for checking scores based on a specific metric.\n\n Attributes:\n metric: An object used to calculate the metric.\n value (float): The expected value of the metric.\n \"\"\"\n\n def __init__(\n self,\n metric, # This requires an object which was used to calculate metric\n value: float,\n ):\n self.metric = metric\n self.value = value\n\n def build_required_key(self, step, metric):\n \"\"\"\n Constructs the key for the metric based on the metric's name, the model's name,\n the current step's name, and the current check stage.\n\n Args:\n step: The step in which the metric was calculated.\n metric: The metric object.\n \"\"\"\n metric = metric.__class__.__name__\n stage = step.get_check_stage()\n self.required_key = f\"{metric}-{stage}\"\n\n def check(self, step) -> ResultOfCheck:\n \"\"\"\n Executes the check on the result of the provided step.\n\n Args:\n step: The step whose results are to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n last_run = step.get_latest_run()\n result = last_run[\"scores\"]\n self.build_required_key(step, self.metric)\n return self._check_method(result)\n\n\nclass CheckScoreExists(CheckScore):\n \"\"\"\n Check to verify the existence of a score in the results based on a specific metric.\n\n Attributes:\n metric: An object used to calculate the metric.\n \"\"\"\n\n def __init__(self, metric):\n super().__init__(metric, None)\n\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Checks if the constructed key based on the metric exists in the result.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check, indicating success if the key exists.\n \"\"\"\n if self.required_key in result:\n return ResultOfCheck(is_positive=True)\n else:\n return ResultOfCheck(\n is_positive=False,\n error=f\"Score {self.required_key} is not in results.json\",\n )\n\n\nclass CheckScoreEqualsTo(CheckScore):\n \"\"\"\n Check to verify if a score in the results based on a specific metric is equal to an expected value.\n \"\"\"\n\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Checks if the score associated with the constructed key is equal to the expected value.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n if result[self.required_key] == self.value:\n return ResultOfCheck(is_positive=True)\n else:\n return ResultOfCheck(\n is_positive=False,\n error=f\"Score {result[self.required_key]} is not equal to {self.value}\",\n )\n\n\nclass CheckScoreCloseTo(CheckScore):\n \"\"\"\n Check to verify if a score in the results based on a specific metric is close to an expected value.\n\n Attributes:\n rel_tol (float): Relative tolerance. Defaults to 1e-09.\n abs_tol (float): Absolute tolerance. Defaults to 0.0.\n \"\"\"\n\n def __init__(\n self,\n metric,\n value: float,\n rel_tol: float = 1e-09,\n abs_tol: float = 0.0,\n ):\n super().__init__(metric, value)\n self.rel_tol = rel_tol\n self.abs_tol = abs_tol\n\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Checks if the score associated with the constructed key is close to the expected value.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n if math.isclose(\n result[self.required_key],\n self.value,\n rel_tol=self.rel_tol,\n abs_tol=self.abs_tol,\n ):\n return ResultOfCheck(is_positive=True)\n else:\n return ResultOfCheck(\n is_positive=False,\n error=f\"Score {result[self.required_key]} is not equal to {self.value}\",\n )\n\n\nclass CheckScoreGreaterThan(CheckScore):\n \"\"\"\n Check to verify if a score in the results based on a specific metric is greater than an expected value.\n \"\"\"\n\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Checks if the score associated with the constructed key is greater than the expected value.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n if result[self.required_key] > self.value:\n return ResultOfCheck(is_positive=True)\n else:\n return ResultOfCheck(\n is_positive=False,\n error=f\"Score {result[self.required_key]} is not greater than {self.value}\",\n )\n\n\nclass CheckScoreLessThan(CheckScore):\n \"\"\"\n Check to verify if a score in the results based on a specific metric is less than an expected value.\n \"\"\"\n\n def _check_method(self, result) -> ResultOfCheck:\n \"\"\"\n Checks if the score associated with the constructed key is less than the expected value.\n\n Args:\n result: The result to be checked.\n\n Returns:\n ResultOfCheck: The result of the check.\n \"\"\"\n if result[self.required_key] < self.value:\n return ResultOfCheck(is_positive=True)\n else:\n return ResultOfCheck(\n is_positive=False,\n error=f\"Score {result[self.required_key]} is not less than {self.value}\",\n )\n","repo_name":"SebChw/Actually-Robust-Training","sub_path":"art/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":8816,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"5"} +{"seq_id":"73508362713","text":"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport torchvision.transforms as transforms\r\nimport torchvision.models as models\r\nimport copy \r\nimport matplotlib.pyplot as plt\r\n\r\n# ref : https://github.com/rrmina/fast-neural-style-pytorch\r\n\r\n\r\n\r\n# -------------------------------------------------------------------------------- # \r\ndef gram_matrix(input):\r\n a, b, c, d = input.size() # a=batch size(=1)\r\n # b=number of feature maps\r\n # (c,d)=dimensions of a f. map (N=c*d)\r\n\r\n features = input.view(a * b, c * d) # resise F_XL into \\hat F_XL\r\n\r\n G = torch.mm(features, features.t()) # compute the gram product\r\n\r\n # we 'normalize' the values of the gram matrix\r\n # by dividing by the number of element in each feature maps.\r\n return G.div(a * b * c * d)\r\n\r\n\r\n# -------------------------------------------------------------------------------- # \r\n\r\n\r\nclass ContentLoss(nn.Module):\r\n def __init__(self, target,):\r\n super(ContentLoss, self).__init__()\r\n # we 'detach' the target content from the tree used to dynamically compute the gradient: this is a stated value,\r\n # not a variable. Otherwise the forward method of the criterion will throw an error.\r\n self.target = target.detach()\r\n\r\n def forward(self, input):\r\n self.loss = F.mse_loss(input, self.target)\r\n return input\r\n\r\n# -------------------------------------------------------------------------------- # \r\n\r\nclass StyleLoss(nn.Module):\r\n def __init__(self, target_feature):\r\n super(StyleLoss, self).__init__()\r\n self.target = gram_matrix(target_feature).detach()\r\n\r\n def forward(self, input):\r\n G = gram_matrix(input)\r\n self.loss = F.mse_loss(G, self.target)\r\n return input\r\n\r\n\r\n# -------------------------------------------------------------------------------- # \r\n\r\nclass Normalization(nn.Module):\r\n def __init__(self, mean, std):\r\n super(Normalization, self).__init__()\r\n # .view the mean and std to make them [C x 1 x 1] so that they can\r\n # directly work with image Tensor of shape [B x C x H x W].\r\n # B is batch size. C is number of channels. H is height and W is width.\r\n self.mean = torch.tensor(mean).view(-1, 1, 1)\r\n self.std = torch.tensor(std).view(-1, 1, 1)\r\n\r\n def forward(self, img):\r\n # normalize img\r\n return (img - self.mean) / self.std\r\n\r\n\r\n# -------------------------------------------------------------------------------- # \r\n\r\n# desired depth layers to compute style/content losses :\r\n# content_layers_default = ['conv_4']\r\n# style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\r\n\r\ncontent_layers_default = ['conv_6']\r\nstyle_layers_default = ['conv_1', 'conv_3', 'conv_5', 'conv_9', 'conv_13']\r\n\r\ndef get_style_model_and_losses(cnn, normalization_mean, normalization_std,\r\n style_img, content_img,\r\n content_layers=content_layers_default,\r\n style_layers=style_layers_default, device=\"cpu\", show=True):\r\n cnn = copy.deepcopy(cnn)\r\n\r\n # normalization module\r\n normalization = Normalization(normalization_mean, normalization_std).to(device)\r\n\r\n # just in order to have an iterable access to or list of content/syle\r\n # losses\r\n content_losses = []\r\n style_losses = []\r\n\r\n # -- transforl tensor to PIl for ploting\r\n unloader = transforms.ToPILImage()\r\n\r\n # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential\r\n # to put in modules that are supposed to be activated sequentially\r\n model = nn.Sequential(normalization)\r\n\r\n\r\n i = 0 # increment every time we see a conv\r\n for layer in cnn.children():\r\n if isinstance(layer, nn.Conv2d):\r\n i += 1\r\n name = 'conv_{}'.format(i)\r\n elif isinstance(layer, nn.ReLU):\r\n name = 'relu_{}'.format(i)\r\n # The in-place version doesn't play very nicely with the ContentLoss\r\n # and StyleLoss we insert below. So we replace with out-of-place\r\n # ones here.\r\n layer = nn.ReLU(inplace=False)\r\n elif isinstance(layer, nn.MaxPool2d):\r\n name = 'pool_{}'.format(i)\r\n elif isinstance(layer, nn.BatchNorm2d):\r\n name = 'bn_{}'.format(i)\r\n else:\r\n raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))\r\n\r\n model.add_module(name, layer)\r\n\r\n if name in content_layers:\r\n # add content loss:\r\n target = model(content_img).detach()\r\n print('--content layer--')\r\n print(f'{name} | target features {target.shape}')\r\n content_loss = ContentLoss(target)\r\n model.add_module(\"content_loss_{}\".format(i), content_loss)\r\n content_losses.append(content_loss)\r\n if show:\r\n feature_map = target.cpu().clone() \r\n feature_map = unloader(feature_map.squeeze(0))\r\n plt.imshow(feature_map)\r\n plt.title(name)\r\n plt.show()\r\n\r\n\r\n if name in style_layers:\r\n # add style loss:\r\n target_feature = model(style_img).detach()\r\n print('--style layer--')\r\n print(f'{name} | target features {target_feature.shape}')\r\n style_loss = StyleLoss(target_feature)\r\n model.add_module(\"style_loss_{}\".format(i), style_loss)\r\n style_losses.append(style_loss)\r\n if show:\r\n feature_map = target_feature.cpu().clone() \r\n feature_map = unloader(feature_map.squeeze(0))\r\n plt.imshow(feature_map)\r\n plt.title(name)\r\n plt.show()\r\n \r\n\r\n\r\n # now we trim off the layers after the last content and style losses\r\n for i in range(len(model) - 1, -1, -1):\r\n if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):\r\n break\r\n\r\n model = model[:(i + 1)]\r\n\r\n return model, style_losses, content_losses\r\n\r\n# -------------------------------------------------------------------------------- # \r\n\r\ndef get_input_optimizer(input_img):\r\n # this line to show that input is a parameter that requires a gradient\r\n optimizer = optim.LBFGS([input_img.requires_grad_()], lr=1.3)\r\n return optimizer\r\n\r\n\r\n# -------------------------------------------------------------------------------- # \r\n\r\nclass VGG19(nn.Module):\r\n def __init__(self, vgg_path=\"models/vgg19-d01eb7cb.pth\", pretrained=False, requires_grad=False):\r\n super(VGG19, self).__init__()\r\n # Load VGG Skeleton, Pretrained Weights\r\n vgg19_features = models.vgg19(pretrained=pretrained)\r\n if not pretrained :\r\n vgg19_features.load_state_dict(torch.load(vgg_path), strict=False)\r\n\r\n self.features = vgg19_features.features\r\n # Turn-off Gradient History\r\n if not requires_grad:\r\n for param in self.features.parameters():\r\n param.requires_grad = False\r\n\r\n def forward(self, x):\r\n layers = {'3': 'relu1_2', '8': 'relu2_2', '17': 'relu3_4', '22': 'relu4_2', '26': 'relu4_4', '35': 'relu5_4'}\r\n features = {}\r\n for name, layer in self.features._modules.items():\r\n x = layer(x)\r\n if name in layers:\r\n features[layers[name]] = x\r\n\r\n return features\r\n\r\nclass VGG16(nn.Module):\r\n def __init__(self, vgg_path=\"models/vgg16-00b39a1b.pth\", pretrained=False, requires_grad=False):\r\n super(VGG16, self).__init__()\r\n # Load VGG Skeleton, Pretrained Weights\r\n vgg16_features = models.vgg16(pretrained=pretrained)\r\n if not pretrained :\r\n vgg16_features.load_state_dict(torch.load(vgg_path), strict=False)\r\n self.features = vgg16_features.features\r\n\r\n # Turn-off Gradient History\r\n for param in self.features.parameters():\r\n param.requires_grad = False\r\n\r\n def forward(self, x):\r\n layers = {'3': 'relu1_2', '8': 'relu2_2', '15': 'relu3_3', '22': 'relu4_3'}\r\n features = {}\r\n for name, layer in self.features._modules.items():\r\n x = layer(x)\r\n if name in layers:\r\n features[layers[name]] = x\r\n if (name=='22'):\r\n break\r\n\r\n return features\r\n\r\n","repo_name":"Lucile-S/ART_project","sub_path":"IART_app/blueprints/style_transfer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"3742905","text":"# Ejercicio 11: Agenda telefónica\r\n# Hacer un programa que simule una agenda de contactos. Crear un\r\n# diccionario donde la clave sea el nombre de usuario y el valor\r\n# sea el teléfono, el programa tendrá el siguiente menú de opciones:\r\n# Menú\r\n# 1 - Nuevo contacto\r\n# 2 - Borrar contacto\r\n# 3 - Ver contactos existentes\r\n# 4 - Salir\r\ncontactos = {}\r\nopcion = 0\r\nwhile (opcion != 4):\r\n print('\\t \\t MENÚ \\n 1 - Nuevo contacto. \\n 2 - Borrar contacto. \\n 3 - Ver contactos existentes. \\n 4 - Salir.')\r\n opcion = int(input())\r\n if opcion == 1:\r\n nombre = input(\"Digite nombre del contacto: \")\r\n telefono = int(input('Número de teléfono: '))\r\n if nombre not in contactos:\r\n contactos[nombre] = telefono\r\n print(\"Contacto agregado exitosamente.\")\r\n else:\r\n print(\"El contacto ya existe.\")\r\n elif opcion == 2:\r\n nombre = input(\"Digite el nombre del contacto a eliminar: \")\r\n if nombre in contactos:\r\n del (contactos[nombre])\r\n print(\"Contacto eliminado correctamente.\")\r\n else:\r\n print(\"Contacto inexistente.\")\r\n elif opcion == 3:\r\n print(\"\\tLista completa de contactos: \\n\")\r\n for i, j in contactos.items():\r\n print(f'Nombre: {i} | Teléfono: {j}')\r\n elif opcion == 4:\r\n print(\"Gracias por utilizar nuestra agenda. Hasta la próxima!\")\r\n else:\r\n print(\"ERROR. Opción inexistente, por favor, intente nuevamente.\")","repo_name":"CodeStrong2023/DevGroup_Segundo_Semestre","sub_path":"Enzo_Balderrama/Python2023/Segundo Semestre/Ejercicio11.py","file_name":"Ejercicio11.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"5"} +{"seq_id":"4534321604","text":"# Goal = Given an array of values, move all zeroes to the end while keeping the order of values the same.\n\n# check edge case for an array of length 1 and return nums if true\n# create start pointer at 0 and end pointer at 1\n# loop until end pointer reaches end of array\n# if start value is not 0 then increment start\n# else if start value is 0 and end value is not 0 then swap both values\n# increment start\n# increment end \n# return nums\n\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n if len(nums) == 1:\n return nums\n \n start = 0\n\n for endIndex, endValue in enumerate(nums[1:]):\n # nums[1:] starts with first index at 0 so need to add 1 to get correct index in list\n if nums[start] != 0:\n start += 1\n elif nums[start] == 0 and endValue != 0:\n nums[start], nums[endIndex + 1] = nums[endIndex + 1], nums[start]\n start += 1\n \n return nums\n \n# Time complexity = O(n)\n# Space complexity = O(1)\n","repo_name":"kevin-the-engi/leetcode-solutions","sub_path":"solutions/move-zeroes/move-zeroes.py","file_name":"move-zeroes.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"72623911192","text":"import sys\nsys.stdin = open('input.txt')\n\nT = 10\n\nfor tc in range(T):\n case_num = int(input())\n ladder = [list(map(int, input().split())) for _ in range(100)]\n y_len = len(ladder)\n x_len = len(ladder[0])\n dx = [1, -1, 0]\n dy = [0, 0, -1]\n now_x = 0\n now_y = 0\n\n for j in range(y_len):\n if ladder[y_len-1][x_len-j-1] == 2:\n now_x = x_len - j - 1\n now_y = y_len - 1\n break\n\n while True:\n for idx in range(3):\n next_x = now_x + dx[idx]\n next_y = now_y + dy[idx]\n if (0 <= next_x < 100 and 0 <= next_y < 100 and\n ladder[next_y][next_x] == 1):\n ladder[now_y][now_x] += 1\n now_x = next_x\n now_y = next_y\n if now_y == 0:\n break\n print('#{} {}'.format(tc+1, now_x))\n","repo_name":"Leeyounwoo/Algorithm","sub_path":"In SSAFY/0812/Sung-Jun/1210_Ladder1/s1210.py","file_name":"s1210.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"21768554614","text":"\nfiducialbeam_halfx = 19.5\nfiducialbeam_halfy = 20.22\n\ndef is_ccd_in_beam(xb, yb, iccd):\n # Get position of the centroid of the CCD\n x0, y0 = GetCCDPos(iccd)\n\n # size (in mm) of ccd pixels = (40.00, 40.72) ITL, (40.04, 40.96) E2V\n ccd_xsize, ccd_ysize = GetCCDSize(iccd)\n \n lengthx = ccd_xsize/2+fiducialbeam_halfx\n lengthy = ccd_ysize/2+fiducialbeam_halfy\n if abs(xb-x0)<=lengthx and abs(yb-y0)<=lengthy:\n return True\n else:\n return False\n","repo_name":"lsst-camera-dh/ccob-wb","sub_path":"soft_JSR/Fiducial.py","file_name":"Fiducial.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"33544745059","text":"import boto3\nfrom botocore.exceptions import ClientError\nfrom botocore.config import Config\nimport logging\nimport mysql.connector\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv() \n\nrds_config = {\n 'user': os.environ['DB_USER'],\n 'password': os.environ['DB_PASSWORD'],\n 'host': os.environ['DB_HOST'],\n 'port': os.environ['DB_PORT'],\n 'database': os.environ['DB_DATABASE']\n}\n\n# s3\ndef upload_file_to_s3(file, bucket, object_name=None):\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_fileobj(file, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n# rds\ncnx = mysql.connector.connect(pool_name=\"rds\",\n pool_size=5,\n **rds_config)\n\ndef get_lastdata_from_rds():\n try:\n cnx = mysql.connector.connect(pool_name=\"rds\")\n cursor = cnx.cursor()\n query = (\"SELECT * FROM board ORDER BY id DESC LIMIT 0,1\")\n cursor.execute(query)\n data = cursor.fetchone()\n return {\"data\": data}\n except Exception as e:\n raise e\n finally:\n if cnx.in_transaction:\n cnx.rollback()\n cursor.close()\n cnx.close()\n\ndef get_datas_from_rds():\n try:\n cnx = mysql.connector.connect(pool_name=\"rds\")\n cursor = cnx.cursor()\n query = (\"select * from board\")\n cursor.execute(query)\n data = cursor.fetchall()\n return {\"data\": data}\n except Exception as e:\n raise e\n finally:\n if cnx.in_transaction:\n cnx.rollback()\n cursor.close()\n cnx.close()\n \ndef create_data_to_rds(message, imgurl):\n try:\n cnx = mysql.connector.connect(pool_name=\"rds\")\n cursor = cnx.cursor()\n query = (\"insert into board (message, imgurl) values (%s, %s)\")\n data = (message, imgurl)\n \n cursor.execute(query, data)\n cnx.commit()\n return {\"ok\": True}\n except Exception as e:\n raise e\n finally:\n if cnx.in_transaction:\n cnx.rollback()\n cursor.close()\n cnx.close()\n ","repo_name":"ChengTze-Wu/stage3","sub_path":"flask_app/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"38457136884","text":"\"\"\"\nDiscrete interpolator base for least squares\n\"\"\"\nimport logging\n\nimport numpy as np\nfrom scipy.sparse import coo_matrix, bmat, eye\nfrom scipy.sparse import linalg as sla\n\nfrom LoopStructural.interpolators.geological_interpolator import \\\n GeologicalInterpolator\n\nfrom LoopStructural.utils import getLogger\nlogger = getLogger(__name__)\n\n\nclass DiscreteInterpolator(GeologicalInterpolator):\n \"\"\"\n\n \"\"\"\n def __init__(self, support):\n \"\"\"\n Base class for a discrete interpolator e.g. piecewise linear or finite difference which is\n any interpolator that solves the system using least squares approximation\n\n Parameters\n ----------\n support \n A discrete mesh with, nodes, elements, etc\n \"\"\"\n GeologicalInterpolator.__init__(self)\n self.B = []\n self.support = support\n self.region_function = lambda xyz : np.ones(xyz.shape[0],dtype=int)\n # self.region_map[self.region] = np.array(range(0,\n # len(self.region_map[self.region])))\n self.shape = 'rectangular'\n if self.shape == 'square':\n self.B = np.zeros(self.nx)\n self.c_ = 0\n self.A = [] # sparse matrix storage coo format\n self.col = []\n self.row = [] # sparse matrix storage\n self.solver = None\n self.eq_const_C = []\n self.eq_const_row = []\n self.eq_const_col = []\n self.eq_const_d = []\n self.eq_const_c_ = 0\n self.constraints = {}\n self.interpolation_weights= {}\n logger.info(\"Creating discrete interpolator with {} degrees of freedom\".format(self.nx))\n self.type = 'discrete'\n @property\n def nx(self):\n return len(self.support.nodes[self.region])\n\n @property\n def region(self):\n return self.region_function(self.support.nodes)\n\n @property\n def region_map(self):\n region_map = np.zeros(self.support.n_nodes).astype(int)\n region_map[self.region] = np.array(\n range(0, len(region_map[self.region])))\n return region_map\n def set_property_name(self, propertyname):\n \"\"\"\n Set the property name attribute, this is usually used to\n save the property on the support\n\n Parameters\n ----------\n propertyname\n\n Returns\n -------\n\n \"\"\"\n self.propertyname = propertyname\n\n def set_region(self, region=None):\n \"\"\"\n Set the region of the support the interpolator is working on\n\n Parameters\n ----------\n region - function(position)\n return true when in region, false when out\n\n Returns\n -------\n\n \"\"\"\n # evaluate the region function on the support to determine\n # which nodes are inside update region map and degrees of freedom\n self.region_function = region\n logger.info(\"Interpolation now uses region and has {} degrees of freedom\".format(self.nx))\n\n def set_interpolation_weights(self, weights):\n \"\"\"\n Set the interpolation weights dictionary\n\n Parameters\n ----------\n weights - dictionary\n Entry of new weights to assign to self.interpolation_weights\n\n Returns\n -------\n\n \"\"\"\n for key in weights:\n self.up_to_date = False\n self.interpolation_weights[key] = weights[key]\n\n def reset(self):\n \"\"\"\n Reset the interpolation constraints\n\n \"\"\"\n logger.debug(\"Resetting interpolation constraints\")\n self.c_ = 0\n self.A = [] # sparse matrix storage coo format\n self.col = []\n self.row = [] # sparse matrix storage\n self.eq_const_C = []\n self.eq_const_row = []\n self.eq_const_col = []\n self.eq_const_d = []\n self.eq_const_c_ = 0\n self.B = []\n self.n_constraints = 0\n\n def add_constraints_to_least_squares(self, A, B, idc, name='undefined'):\n \"\"\"\n Adds constraints to the least squares system. Automatically works\n out the row\n index given the shape of the input arrays\n\n Parameters\n ----------\n A : numpy array / list\n RxC numpy array of constraints where C is number of columns,R rows\n B : numpy array /list\n B values array length R\n idc : numpy array/list\n RxC column index\n\n Returns\n -------\n list of constraint ids\n\n \"\"\"\n A = np.array(A)\n B = np.array(B)\n idc = np.array(idc)\n nr = A.shape[0]\n #logger.debug('Adding constraints to interpolator: {} {} {}'.format(A.shape[0]))\n # print(A.shape,B.shape,idc.shape)\n if A.shape != idc.shape:\n return\n \n if len(A.shape) > 2:\n nr = A.shape[0] * A.shape[1]\n A = A.reshape((A.shape[0]*A.shape[1],A.shape[2]))\n idc = idc.reshape((idc.shape[0]*idc.shape[1],idc.shape[2]))\n # going to assume if any are nan they are all nan\n mask = np.any(np.isnan(A),axis=1)\n A[mask,:] = 0\n if np.any(np.isnan(idc)) or np.any(np.isnan(A)) or np.any(np.isnan(B)):\n logger.warning(\"Constraints contain nan not adding constraints: {}\".format(name))\n # return\n \n rows = np.arange(0, nr).astype(int)\n rows += self.c_\n constraint_ids = rows.copy()\n\n if name in self.constraints: \n \n self.constraints[name]['A'] = np.vstack([self.constraints[name]['A'],A])\n self.constraints[name]['B'] = np.hstack([self.constraints[name]['B'], B])\n self.constraints[name]['idc'] = np.vstack([self.constraints[name]['idc'],\n idc])\n \n if name not in self.constraints:\n self.constraints[name] = {'node_indexes':constraint_ids,'A':A,'B':B.flatten(),'idc':idc}\n rows = np.tile(rows, (A.shape[-1], 1)).T\n\n self.c_ += nr\n if self.shape == 'rectangular':\n # don't add operator where it is = 0 to the sparse matrix!\n A = A.flatten()\n rows = rows.flatten()\n idc = idc.flatten()\n B = B.flatten()\n mask = A == 0\n self.A.extend(A[~mask].tolist())\n self.row.extend(rows[~mask].tolist())\n self.col.extend(idc[~mask].tolist())\n self.B.extend(B.tolist())\n \n def calculate_residual_for_constraints(self):\n residuals = {}\n for constraint_name, constraint in self.constraints:\n residuals[constraint_name] = np.einsum('ij,ij->i',constraint['A'],self.c[constraint['idc'].astype(int)]) - constraint['B'].flatten()\n return residuals\n def remove_constraints_from_least_squares(self, name='undefined',\n constraint_ids=None):\n \"\"\"\n Remove constraints from the least squares system using the constraint ids\n which corresponds to the rows in the interpolation matrix.\n\n Parameters\n ----------\n constraint_ids : np.array(dtype=int)\n id of constraints to remove\n\n Returns\n -------\n\n \"\"\"\n\n if constraint_ids is None:\n constraint_ids = self.constraints[name]\n print(\"Removing {} {} constraints from least squares\".format(len(constraint_ids), name))\n A = np.array(self.A)\n B = np.array(self.B)\n col = np.array(self.col)\n row = np.array(self.row)\n mask = np.any((row[:,None] == constraint_ids[None,:]) == True,\n axis=1)\n # np.any((numbers[:, None] == np.array([0, 10, 30])[None, :]) == True,\n # axis=1)\n bmask = np.ones(B.shape,dtype=bool)\n bmask[constraint_ids] = 0\n self.A = A[~mask].tolist()\n self.B = B[bmask]\n self.col = col[~mask].tolist()\n rowmax = np.max(row[mask])\n rowrange = rowmax-np.min(row[mask])\n # row[np.logical_and(~mask,row>rowmax)] -= rowrange\n return row[~mask]\n\n def add_equality_constraints(self, node_idx, values):\n \"\"\"\n Adds hard constraints to the least squares system. For now this just\n sets\n the node values to be fixed using a lagrangian.\n\n Parameters\n ----------\n node_idx : numpy array/list\n int array of node indexes\n values : numpy array/list\n array of node values\n\n Returns\n -------\n\n \"\"\"\n # map from mesh node index to region node index\n gi = np.zeros(self.support.n_nodes)\n gi[:] = -1\n gi[self.region] = np.arange(0, self.nx)\n idc = gi[node_idx]\n outside = ~(idc == -1)\n\n self.eq_const_C.extend(np.ones(idc[outside].shape[0]).tolist())\n self.eq_const_col.extend(idc[outside].tolist())\n self.eq_const_row.extend((np.arange(0, idc[outside].shape[0])))\n self.eq_const_d.extend(values[outside].tolist())\n self.eq_const_c_ += idc[outside].shape[0]\n\n def add_tangent_ctr_pts(self, w=1.0):\n \"\"\"\n\n Parameters\n ----------\n w : double\n\n\n Returns\n -------\n\n \"\"\"\n points = self.get_tangent_constraints()\n if points.shape[0] > 1:\n self.add_gradient_orthogonal_constraint(points[:,:3],points[:,3:6],w)\n\n def build_matrix(self, square=True, damp=True):\n \"\"\"\n Assemble constraints into interpolation matrix. Adds equaltiy\n constraints\n using lagrange modifiers if necessary\n\n Parameters\n ----------\n damp: bool\n Flag whether damping should be added to the diagonal of the matrix\n Returns\n -------\n Interpolation matrix and B\n \"\"\"\n\n logger.info(\"Interpolation matrix is %i x %i\"%(self.c_,self.nx))\n cols = np.array(self.col)\n A = coo_matrix((np.array(self.A), (np.array(self.row), \\\n cols)), shape=(self.c_, self.nx),\n dtype=float) # .tocsr()\n B = np.array(self.B)\n if not square:\n logger.info(\"Using rectangular matrix, equality constraints are not used\")\n return A, B\n AAT = A.T.dot(A)\n BT = A.T.dot(B)\n # add a small number to the matrix diagonal to smooth the results\n # can help speed up solving, but might also introduce some errors\n\n if self.eq_const_c_ > 0:\n logger.info(\"Equality block is %i x %i\"%(self.eq_const_c_,self.nx))\n # solving constrained least squares using\n # | ATA CT | |c| = b\n # | C 0 | |y| d\n # where A is the interpoaltion matrix\n # C is the equality constraint matrix\n # b is the interpolation constraints to be honoured\n # in a least squares sense\n # and d are the equality constraints\n # c are the node values and y are the\n # lagrange multipliers#\n C = coo_matrix(\n (np.array(self.eq_const_C), (np.array(self.eq_const_row),\n np.array(self.eq_const_col))),\n shape=(self.eq_const_c_, self.nx))\n d = np.array(self.eq_const_d)\n AAT = bmat([[AAT, C.T], [C, None]])\n BT = np.hstack([BT, d])\n if damp:\n logger.info(\"Adding eps to matrix diagonal\")\n AAT += eye(AAT.shape[0]) * np.finfo('float').eps\n return AAT, BT\n\n def _solve_lu(self, A, B):\n \"\"\"\n Call scipy LU decomoposition\n\n Parameters\n ----------\n A : scipy square sparse matrix\n B : numpy vector\n\n Returns\n -------\n\n \"\"\"\n lu = sla.splu(A.tocsc())\n sol = lu.solve(B)\n return sol[:self.nx]\n\n def _solve_lsqr(self, A, B, **kwargs):\n \"\"\"\n Call scipy lsqr\n\n Parameters\n ----------\n A : rectangular sparse matrix\n B : vector\n\n Returns\n -------\n\n \"\"\"\n\n lsqrargs = {}\n lsqrargs['btol'] = 1e-12\n lsqrargs['atol'] = 0\n if 'iter_lim' in kwargs:\n logger.info(\"Using %i maximum iterations\" % kwargs['iter_lim'])\n lsqrargs['iter_lim'] = kwargs['iter_lim']\n if 'damp' in kwargs:\n logger.info(\"Using damping coefficient\")\n lsqrargs['damp'] = kwargs['damp']\n if 'atol' in kwargs:\n logger.info('Using a tolerance of %f' % kwargs['atol'])\n lsqrargs['atol'] = kwargs['atol']\n if 'btol' in kwargs:\n logger.info('Using btol of %f' % kwargs['btol'])\n lsqrargs['btol'] = kwargs['btol']\n if 'show' in kwargs:\n lsqrargs['show'] = kwargs['show']\n if 'conlim' in kwargs:\n lsqrargs['conlim'] = kwargs['conlim']\n return sla.lsqr(A,B, **lsqrargs)[0]\n\n def _solve_chol(self, A, B):\n \"\"\"\n Call suitesparse cholmod through scikitsparse\n LINUX ONLY!\n\n Parameters\n ----------\n A : scipy.sparse.matrix\n square sparse matrix\n B : numpy array\n RHS of equation\n\n Returns\n -------\n\n \"\"\"\n try:\n from sksparse.cholmod import cholesky\n factor = cholesky(A.tocsc())\n return factor(B)[:self.nx]\n except ImportError:\n logger.warning(\"Scikit Sparse not installed try using cg instead\")\n return False\n\n def _solve_cg(self, A, B, precon=None, **kwargs):\n \"\"\"\n Call scipy conjugate gradient\n\n Parameters\n ----------\n A : scipy.sparse.matrix\n square sparse matrix\n B : numpy vector\n precon : scipy.sparse.matrix\n a preconditioner for the conjugate gradient system\n kwargs\n kwargs to pass to scipy solve e.g. atol, btol, callback etc\n\n Returns\n -------\n numpy array\n \"\"\"\n cgargs = {}\n cgargs['tol'] = 1e-12\n cgargs['atol'] = 0\n if 'maxiter' in kwargs:\n logger.info(\"Using %i maximum iterations\"%kwargs['maxiter'])\n cgargs['maxiter'] = kwargs['maxiter']\n if 'x0' in kwargs:\n logger.info(\"Using starting guess\")\n cgargs['x0'] = kwargs['x0']\n if 'tol' in kwargs:\n logger.info('Using tolerance of %f'%kwargs['tol'])\n cgargs['tol'] = kwargs['tol']\n if 'atol' in kwargs:\n logger.info('Using atol of %f'%kwargs['atol'])\n cgargs['atol'] = kwargs['atol']\n if 'callback' in kwargs:\n cgargs['callback'] = kwargs['callback']\n if precon is not None:\n cgargs['M'] = precon(A)\n return sla.cg(A, B, **cgargs)[0][:self.nx]\n\n def _solve_pyamg(self, A, B, tol=1e-12,x0=None,**kwargs):\n \"\"\"\n Solve least squares system using pyamg algorithmic multigrid solver\n\n Parameters\n ----------\n A : scipy.sparse.matrix\n B : numpy array\n\n Returns\n -------\n\n \"\"\"\n import pyamg\n logger.info(\"Solving using pyamg: tol {}\".format(tol))\n return pyamg.solve(A, B, tol=tol, x0=x0, verb=False)[:self.nx]\n\n def _solve(self, solver='cg', **kwargs):\n \"\"\"\n Main entry point to run the solver and update the node value\n attribute for the\n discreteinterpolator class\n\n Parameters\n ----------\n solver : string\n solver e.g. cg, lu, chol, custom\n kwargs\n kwargs for solver e.g. maxiter, preconditioner etc, damping for\n \n Returns\n -------\n bool\n True if the interpolation is run\n\n \"\"\"\n logger.info(\"Solving interpolation for {}\".format(self.propertyname))\n self.c = np.zeros(self.support.n_nodes)\n self.c[:] = np.nan\n damp = True\n if 'damp' in kwargs:\n damp = kwargs['damp']\n if solver == 'lu':\n logger.info(\"Forcing matrix damping for LU\")\n damp = True\n if solver == 'lsqr':\n A, B = self.build_matrix(False)\n else:\n A, B = self.build_matrix(damp=damp)\n\n # run the chosen solver\n if solver == 'cg':\n logger.info(\"Solving using conjugate gradient\")\n self.c[self.region] = self._solve_cg(A, B, **kwargs)\n if solver == 'chol':\n self.c[self.region] = self._solve_chol(A, B)\n if solver == 'lu':\n logger.info(\"Solving using scipy LU\")\n self.c[self.region] = self._solve_lu(A, B)\n if solver == 'pyamg':\n try:\n logger.info(\"Solving with pyamg solve\")\n self.c[self.region] = self._solve_pyamg(A, B,**kwargs)\n except ImportError:\n logger.warn(\"Pyamg not installed using cg instead\")\n self.c[self.region] = self._solve_cg(A, B)\n if solver == 'lsqr':\n self.c[self.region] = self._solve_lsqr(A, B, **kwargs)\n if solver == 'external':\n logger.warning(\"Using external solver\")\n self.c[self.region] = kwargs['external'](A, B)[:self.nx]\n # check solution is not nan\n # self.support.properties[self.propertyname] = self.c\n if np.all(self.c == np.nan):\n logger.warning(\"Solver not run, no scalar field\")\n # if solution is all 0, probably didn't work\n if np.all(self.c[self.region] == 0):\n logger.warning(\"No solution, {} scalar field 0. Add more data.\".format(self.propertyname))\n\n def update(self):\n \"\"\"\n Check if the solver is up to date, if not rerun interpolation using\n the previously used solver. If the interpolation has not been run\n before it will\n return False\n\n Returns\n -------\n bool\n\n \"\"\"\n if self.solver is None:\n logging.debug(\"Cannot rerun interpolator\")\n return False\n if not self.up_to_date:\n self.setup_interpolator()\n return self._solve(self.solver)\n\n def evaluate_value(self, evaluation_points):\n evaluation_points = np.array(evaluation_points)\n evaluated = np.zeros(evaluation_points.shape[0])\n mask = np.any(evaluation_points == np.nan, axis=1)\n\n if evaluation_points[~mask, :].shape[0] > 0:\n evaluated[~mask] = self.support.evaluate_value(\n evaluation_points[~mask], self.c)\n return evaluated\n\n def evaluate_gradient(self, evaluation_points):\n \"\"\"\n Evaluate the gradient of the scalar field at the evaluation points\n Parameters\n ----------\n evaluation_points : np.array\n xyz locations to evaluate the gradient\n\n Returns\n -------\n\n \"\"\"\n if evaluation_points.shape[0] > 0:\n return self.support.evaluate_gradient(evaluation_points,\n self.c)\n return np.zeros((0, 3))","repo_name":"wgorczyk/LoopStructural","sub_path":"LoopStructural/interpolators/discrete_interpolator.py","file_name":"discrete_interpolator.py","file_ext":"py","file_size_in_byte":19092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"73889649112","text":"from EvalNatExp.parser import exp\nfrom ReduceNatExp.rule import Reduce1, Reduce0, ReduceD\nfrom bases.parser import Parser, pure, string2\n\nwith Parser() as assertion:\n reduce1 = pure(lambda a: lambda b: Reduce1(a, b)) + exp + (string2(r'--->') >> exp)\n reduce0 = pure(lambda a: lambda b: Reduce0(a, b)) + exp + (string2(r'-*->') >> exp)\n reduced = pure(lambda a: lambda b: ReduceD(a, b)) + exp + (string2(r'-d->') >> exp)\n assertion.define(reduce1 | reduce0 | reduced)\n\nif __name__ == '__main__':\n print(assertion.run(r'Z + S(S(Z)) -*-> S(S(Z))'))\n","repo_name":"speedcell4/CoPL-Py","sub_path":"ReduceNatExp/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"24548494766","text":"from django.shortcuts import render, HttpResponse, redirect, get_object_or_404\nfrom .models import KirrURL\nfrom django.views import View\nfrom .forms import SubmitUrlForm\n\nimport socket\n\nclass HomeView(View):\n def get(self, request, *args, **kwargs):\n tform = SubmitUrlForm()\n context = {\n 'form':tform,\n }\n return render(request, \"shortener/home.html\", context)\n def post(self, request, *args, **kwargs):\n form = SubmitUrlForm(request.POST)\n template=\"shortener/home.html\"\n context = {\n \"form\": form,\n #for proper redirect on a hosting\n 'host': request.get_host(),\n }\n if form.is_valid():\n print(form.cleaned_data)\n url = form.cleaned_data.get('url')\n obj, created = KirrURL.objects.get_or_create(url=url)\n context = {\n 'object':obj,\n 'created':created,\n 'host':request.get_host(),\n }\n #Well I decided that there won't be \"exists\" page\n #Let user think that he/she provided unique link\n #that we haven't in our DB before\n # if created==True:\n # template=\"shortener/success.html\"\n # else:\n # template=\"shortener/already-exists.html\"\n else:\n #Wrong link, error page\n context['failed']=True;\n print(context)\n\n return render(request, \"shortener/success.html\", context)\n\n\nclass KirrCBView(View):\n def get(self, request, code=None, *args, **kwargs):\n obj = get_object_or_404(KirrURL, shortcode=code)\n return redirect(obj.url)","repo_name":"fiterV/URL-Shortener","sub_path":"shortener/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8753640500","text":"import json\n\ndef compare_lists(left, right):\n for i in range(min(len(left), len(right))):\n cmp = compare(left[i], right[i])\n if cmp != 0:\n return cmp\n return len(left) - len(right)\n\ndef compare(left, right):\n if isinstance(left, int) and isinstance(right, list):\n left = [left]\n if isinstance(left, list) and isinstance(right, int):\n right = [right]\n if isinstance(left, int) and isinstance(right, int):\n return left - right\n if isinstance(left, list) and isinstance(right, list):\n return compare_lists(left, right)\n assert False\n\n\n\nwith open(\"input.txt\") as inp:\n lines = [line.strip() for line in inp]\n\npairs = []\nfor i in range(0, len(lines), 3):\n left = json.loads(lines[i])\n right = json.loads(lines[i+1])\n pairs.append((left, right))\n\ns = 0\nfor i in range(len(pairs)):\n left, right = pairs[i]\n if compare(left, right) < 0:\n s += i + 1\n\nprint(s)\n","repo_name":"iley/adventofcode","sub_path":"2022/13/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27081783337","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FAQ',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('question', models.CharField(max_length=100)),\n ('category', models.CharField(max_length=100)),\n ('answer', models.TextField(max_length=1000)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='GroupProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('short', models.CharField(max_length=10)),\n ('desc', models.CharField(max_length=200)),\n ('password', models.CharField(help_text=b'Geben Sie ein Passwort zum Beitreten ihrer Gruppe ein. (optional)', max_length=128, blank=True)),\n ('picture', models.ImageField(default=b'picture/gdefault.gif', upload_to=b'picture/', blank=True, help_text=b'Geben sie ein Foto ein!', verbose_name=b'Profilbild')),\n ('date', models.DateField(default=datetime.date.today, blank=True)),\n ('admin', models.ForeignKey(related_name='admin', to=settings.AUTH_USER_MODEL)),\n ('member', models.ManyToManyField(related_name='member', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Hashtag',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Message',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('text', models.CharField(max_length=254)),\n ('picture', models.ImageField(upload_to=b'picture/', blank=True)),\n ('date', models.DateTimeField(verbose_name=b'date published')),\n ],\n ),\n migrations.CreateModel(\n name='Nav',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ],\n ),\n migrations.CreateModel(\n name='NotificationF',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('read', models.BooleanField(default=True)),\n ('date', models.DateTimeField(default=datetime.datetime.now, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='NotificationM',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('read', models.BooleanField(default=True)),\n ('date', models.DateTimeField(default=datetime.datetime.now, blank=True)),\n ('message', models.ForeignKey(related_name='message', to='twittur.Message')),\n ('user', models.ForeignKey(related_name='user', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('studentNumber', models.CharField(default=b'000000', help_text=b'Über deine Matrikel-Nummer kannst Du eindeutig als Student der TU Berlin identifiziert werden.
(only numbers, max. 6 chars)', max_length=6)),\n ('academicDiscipline', models.CharField(help_text=b'Über deinen Studiengang wirst Du bestimmten Gruppen zugeordnet.', max_length=200)),\n ('picture', models.ImageField(default=b'picture/default.gif', upload_to=b'picture/', blank=True, help_text=b'Dieses Bild wird auf Deinem Profil (groß) und in deinen Nachrichten (klein) angezeigt.', verbose_name=b'Profilbild')),\n ('location', models.CharField(default=b'None', help_text=b'Lass Deine KommilitonInnen Dich finden!', max_length=200)),\n ('follow', models.ManyToManyField(related_name='follow', through='twittur.NotificationF', to=settings.AUTH_USER_MODEL)),\n ('userprofile', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='notificationf',\n name='me',\n field=models.ForeignKey(related_name='me', to='twittur.UserProfile'),\n ),\n migrations.AddField(\n model_name='notificationf',\n name='you',\n field=models.ForeignKey(related_name='you', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='message',\n name='attags',\n field=models.ManyToManyField(related_name='attags', through='twittur.NotificationM', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='message',\n name='hashtags',\n field=models.ManyToManyField(related_name='hashtags', to='twittur.Hashtag'),\n ),\n migrations.AddField(\n model_name='message',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n ),\n ]\n","repo_name":"kps85/PPSN","sub_path":"twittur/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"20218054659","text":"import ctypes\nimport tkinter as tk\nimport time\nimport threading\nimport subprocess\n \nLONG = ctypes.c_long\nDWORD = ctypes.c_ulong\nULONG_PTR = ctypes.POINTER(DWORD)\nWORD = ctypes.c_ushort\n \nclass MOUSEINPUT(ctypes.Structure):\n _fields_ = ((\"dx\", LONG),\n (\"dy\", LONG),\n (\"mouseData\", DWORD),\n (\"dwFlags\", DWORD),\n (\"time\", DWORD),\n (\"dwExtraInfo\", ULONG_PTR))\n \nclass INPUT(ctypes.Structure):\n _fields_ = ((\"type\", DWORD),\n (\"mi\", MOUSEINPUT))\n \nINPUT_MOUSE = 0\nMOUSEEVENTF_MOVE = 0x0001\n \ndef move_mouse(dx, dy):\n x = INPUT(type=INPUT_MOUSE, mi=MOUSEINPUT(dx=dx, dy=dy, mouseData=0, dwFlags=MOUSEEVENTF_MOVE, time=0))\n ctypes.windll.user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))\n \nmouse_moving = False\n \ndef jiggle_mouse():\n global stop_thread\n while not stop_thread: \n if mouse_moving: \n move_mouse(10, 10) \n time.sleep(1) \n move_mouse(-10, -10) \n time.sleep(0.5)\n else:\n time.sleep(1)\n \ndef toggle_mouse_moving():\n global mouse_moving\n mouse_moving = not mouse_moving\n button_text.set(\"Stop\" if mouse_moving else \"Start\")\n\ndef on_closing(root):\n global stop_thread\n stop_thread = True\n root.destroy()\n\ndef start_gui():\n global button_text, stop_thread\n root = tk.Tk()\n root.title(\"Better Jiggler by TJ\")\n root.geometry('300x35')\n button_text = tk.StringVar()\n button_text.set(\"Start\")\n button = tk.Button(root, textvariable=button_text, command=toggle_mouse_moving)\n button.pack()\n root.protocol(\"WM_DELETE_WINDOW\", lambda: on_closing(root))\n root.mainloop()\n\n\nif __name__ == '__main__':\n stop_thread = False\n t = threading.Thread(target=jiggle_mouse)\n t.start()\n start_gui()\n","repo_name":"TalvinJacobs/BetterJiggler","sub_path":"jiggler.pyw","file_name":"jiggler.pyw","file_ext":"pyw","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"4141140451","text":"# You will be given an array of numbers. You have to sort the odd numbers in ascending order while\n# leaving the even numbers at their original positions.\n#\n# Examples\n# [7, 1] => [1, 7]\n# [5, 8, 6, 3, 4] => [3, 8, 6, 5, 4]\n# [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] => [1, 8, 3, 6, 5, 4, 7, 2, 9, 0]\n\n\ndef sort_array(source_array):\n odd_numbers = sorted([i for i in source_array if i%2!=0])\n nul = 0\n result = []\n for n in source_array:\n if n%2!=0:\n result.append(odd_numbers[nul])\n nul += 1\n else:\n result.append(n)\n return result\n\nprint(sort_array([1, 9, 5, 8, 0]))\n\n","repo_name":"dima-pv/codewars","sub_path":"codewar/Sort the odd.py","file_name":"Sort the odd.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74299105431","text":"def sumDigits(n):\n sum=0\n res=0\n while(n>0):\n res = n % 10\n n = n // 10\n sum= sum + res\n return sum\n\ndef main():\n num = int(input(\"Enter the number: \"))\n sum = sumDigits(num)\n print(\"Sum of Digits: \", sum)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ayushgupta0110/Python_Practicals","sub_path":"Practicals/Date 17-09/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12432995507","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('create', views.create),\n path('login', views.login),\n path('logout', views.logout),\n path('show', views.show),\n path('create/message', views.message),\n path('create/comment', views.comment),\n\n]\n\n","repo_name":"JonLill/the_wall","sub_path":"wall_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"5945716277","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 21 19:31:09 2021\r\n\r\n@author: Tom\r\n\"\"\"\r\n\r\n# This is a program to calculate the kinetic & potential components of the energy of a falling projectile\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Defining the arrays we will need to store the relevant data\r\n\r\ntimeData = np.linspace(0.0, 4.5, num=50)\r\nheightData = []\r\nkineticEnergyData = []\r\npotentialEnergyData = []\r\ntotalEnergyData = []\r\n\r\n# Defining the constants of the motion\r\n\r\n# iniHeight = initial height of the projectile\r\niniHeight = 100\r\n\r\n# m = mass of the projectile in kilograms\r\nm = 1.0\r\n\r\n# g = acceleration due to gravity in metres per second squared\r\ng = 9.81\r\n\r\n# Creating the functions we will need in order to do the calculations\r\n\r\n# Function that calculates the vertical hieght of the projectile at time t\r\ndef heightCalc(iniHeight, t):\r\n temp_iniHeight = iniHeight\r\n temp_t = t\r\n h = temp_iniHeight - (0.5 * g * temp_t * temp_t)\r\n return(h)\r\n\r\n# Function that calculates the velocity of the projectile at time t\r\ndef velocityCalc(t):\r\n temp_t = t\r\n vel = -g * temp_t\r\n return(vel)\r\n\r\n# Function that calculates the potential energy of the projectile at time t\r\ndef potentialEnergyCalc(h):\r\n temp_h = h\r\n V = m * g * temp_h\r\n return(V)\r\n\r\n# Function that calculates the kinetic energy of the projectile at time t\r\ndef kineticEnergyCalc(v):\r\n temp_v = v\r\n T = 0.5 * m * temp_v * temp_v\r\n return(T)\r\n\r\ndef simulate():\r\n flag = True\r\n t = 0.0\r\n while (flag == True):\r\n temp_height = heightCalc(iniHeight, t)\r\n heightData.append(temp_height)\r\n temp_vel = velocityCalc(t)\r\n \r\n temp_PE = potentialEnergyCalc(temp_height)\r\n potentialEnergyData.append(temp_PE)\r\n temp_KE = kineticEnergyCalc(temp_vel)\r\n kineticEnergyData.append(temp_KE)\r\n \r\n temp_TE = temp_PE + temp_KE\r\n totalEnergyData.append(temp_TE)\r\n \r\n t = t + 0.01\r\n if(temp_height < 0.0):\r\n flag = False\r\n \r\n # Plotting the relevant data\r\n plt.plot(timeData, kineticEnergyData, \"g\")\r\n plt.plot(timeData, potentialEnergyData, \"r\")\r\n plt.plot(timeData, totalEnergyData, \"b\")\r\n plt.title(\"Energy of a Falling Projectile Falling from Rest\")\r\n plt.xlabel(\"time (s)\")\r\n plt.ylabel(\"Energy (J)\")\r\n plt.legend([\"Green - Kinetic Energy\", \"Red - Potential Energy\", \"Blue - Total Energy\"])\r\n plt.show()\r\n \r\ndef analytic():\r\n \r\n # This loop calculates and then stores the energy values over the course of the motion\r\n for i in range(len(timeData)):\r\n temp_height = heightCalc(iniHeight, timeData[i])\r\n heightData.append(temp_height)\r\n temp_vel = velocityCalc(timeData[i])\r\n \r\n temp_PE = potentialEnergyCalc(temp_height)\r\n potentialEnergyData.append(temp_PE)\r\n temp_KE = kineticEnergyCalc(temp_vel)\r\n kineticEnergyData.append(temp_KE)\r\n \r\n temp_TE = temp_PE + temp_KE\r\n totalEnergyData.append(temp_TE)\r\n \r\n # Plotting the relevant data\r\n plt.plot(timeData, kineticEnergyData, \"g\")\r\n plt.plot(timeData, potentialEnergyData, \"r\")\r\n plt.plot(timeData, totalEnergyData, \"b\")\r\n plt.title(\"Energy of a Falling Projectile Falling from Rest\")\r\n plt.xlabel(\"time (s)\")\r\n plt.ylabel(\"Energy (J)\")\r\n plt.legend([\"Green - Kinetic Energy\", \"Red - Potential Energy\", \"Blue - Total Energy\"])\r\n plt.show()\r\n\r\nflag = True\r\nt = 0.0\r\ntimeDataSim = []\r\nwhile (flag == True):\r\n temp_height = heightCalc(iniHeight, t)\r\n heightData.append(temp_height)\r\n temp_vel = velocityCalc(t)\r\n \r\n temp_PE = potentialEnergyCalc(temp_height)\r\n potentialEnergyData.append(temp_PE)\r\n temp_KE = kineticEnergyCalc(temp_vel)\r\n kineticEnergyData.append(temp_KE)\r\n \r\n temp_TE = temp_PE + temp_KE\r\n totalEnergyData.append(temp_TE)\r\n \r\n timeDataSim.append(t)\r\n \r\n t = t + 0.01\r\n if(temp_height < 0.0):\r\n flag = False\r\n \r\n\r\n# Plotting the relevant data\r\nplt.figure(0)\r\nplt.plot(timeDataSim, kineticEnergyData, \"g\")\r\nplt.plot(timeDataSim, potentialEnergyData, \"r\")\r\nplt.plot(timeDataSim, totalEnergyData, \"b\")\r\nplt.title(\"Energy of a Projectile Falling from Rest\")\r\nplt.xlabel(\"time (s)\")\r\nplt.ylabel(\"Energy (J)\")\r\nplt.legend([\"Green - Kinetic Energy\", \"Red - Potential Energy\", \"Blue - Total Energy\"])\r\nplt.show()\r\n\r\n\r\n# Plotting the trajectory\r\nplt.figure(1)\r\nplt.plot(heightData, timeDataSim, \"g\")\r\nplt.title(\"Trajectory of a Falling Projectile\")\r\nplt.xlabel(\"Time (s)\")\r\nplt.ylabel(\"Height (m)\")\r\nplt.show()\r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"lootedvillage/projectile-motion","sub_path":"Projectile Energy.py","file_name":"Projectile Energy.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"8469633084","text":"load(\"@rules_cc//cc:defs.bzl\", \"cc_library\")\n\ndef _file_name(filePathName):\n if \"/\" in filePathName:\n return filePathName.rsplit(\"/\", -1)[1]\n else:\n return filePathName\n\ndef _base_name(fileName):\n return fileName.split(\".\")[0]\n\ndef qt_cc_library(name, srcs, hdrs, copts = [], uis = [], res = [], normal_hdrs = [], deps = None, **kwargs):\n for hItem in hdrs:\n base_name = _base_name(_file_name(hItem))\n cmd = \"\"\"\n if grep -q Q_OBJECT $(location %s); then \\\n /usr/local/qt5/bin/moc $(location %s) -o $@ -f'%s'; \\\n else \\\n echo '' > $@ ; \\\n fi\"\"\" % (hItem, hItem, \"%s/%s\" % (native.package_name(), hItem))\n native.genrule(\n name = \"%s_moc\" % base_name,\n srcs = [hItem],\n outs = [\"moc_%s.cpp\" % base_name],\n cmd = cmd,\n )\n srcs.append(\"moc_%s.cpp\" % base_name)\n\n for uitem in uis:\n base_name = _base_name(_file_name(uitem))\n native.genrule(\n name = \"%s_ui\" % base_name,\n srcs = [uitem],\n outs = [\"ui_%s.h\" % base_name],\n cmd = \"/usr/local/qt5/bin/uic $(locations %s) -o $@\" % uitem,\n )\n hdrs.append(\"ui_%s.h\" % base_name)\n\n for ritem in res:\n base_name = _base_name(_file_name(ritem))\n native.genrule(\n name = \"%s_res\" % base_name,\n srcs = [ritem] + deps,\n outs = [\"res_%s.cpp\" % base_name],\n cmd = \"/usr/local/qt5/bin/rcc --name res --output $(OUTS) $(location %s)\" % ritem,\n )\n srcs.append(\"res_%s.cpp\" % base_name)\n\n hdrs = hdrs + normal_hdrs\n cc_library(\n name = name,\n srcs = srcs,\n hdrs = hdrs,\n deps = deps,\n copts = copts + [\"-fPIC\"],\n alwayslink = 1,\n **kwargs\n )\n","repo_name":"ApolloAuto/apollo","sub_path":"third_party/qt5/qt.bzl","file_name":"qt.bzl","file_ext":"bzl","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":23653,"dataset":"github-code","pt":"5"} +{"seq_id":"28729026458","text":"from flask import Flask, request, jsonify\r\nimport requests\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/', methods=['POST'])\r\ndef webhook():\r\n data = request.get_json(silent=True)\r\n intent = data['queryResult']['intent']['displayName']\r\n \r\n if intent == 'GetCareerSuggestion':\r\n # Assuming you want to get a career suggestion from the user's input\r\n user_text = data['queryResult']['queryText']\r\n response = get_career_suggestion(user_text)\r\n else:\r\n response = \"I'm sorry, I don't understand that.\"\r\n\r\n reply = {'fulfillmentText': response}\r\n return jsonify(reply)\r\n\r\ndef get_career_suggestion(user_input):\r\n # Add your logic to process user input and get career suggestion here\r\n # For now, let's assume a basic response\r\n return \"Based on your input, I suggest exploring careers in the field of Science.\"\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n","repo_name":"VarunKale08/career-counseling-chatbot","sub_path":"dialogflow_fulfillment.py","file_name":"dialogflow_fulfillment.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"7622167316","text":"# \n\nfrom Bio import SeqIO\nimport re\nimport argparse\n\ndef get_options():\n parser = argparse.ArgumentParser(description='Assign variants for gene')\n # parser.add_argument('--gene', help='Gene', type=str) # assumes stuff about directories, that pipeline has been run\n parser.add_argument(\"--variant_fasta\", help=\"Fasta file of variants to use\", type=str, required=True)\n parser.add_argument('--output_file', help='output file of assignments', type=str, required=False, default='gene-variants')\n parser.add_argument('--input_fasta',help='Input fasta', required=True)\n parser.add_argument('--outputdir', help='output dir', type=str, default='./', required=False)\n return parser.parse_args()\n\n\ndef main():\n\targs = get_options()\n\tvariant_seqs = SeqIO.to_dict(SeqIO.parse(args.variant_fasta, 'fasta'))\n\n\t#gene = args.gene\n\t#gene_family = re.sub(\"-.*\", \"\", gene) # technically doesn't handled CTX-M properly, but ok\n\n\tfasta_file = args.input_fasta\n\tseqs_to_assign = SeqIO.to_dict(SeqIO.parse(fasta_file, 'fasta'))\n\n\t#gene_seqs = {k:v for k, v in variant_seqs.items() if gene_family in k}\n\n\n\tseqs_aa = {str(v.seq.translate()): re.sub('.*\\\\|', '', k) for k, v in variant_seqs.items()}\n\n\tseqs_nt = {str(v.seq):re.sub('.*\\\\|', '', k) for k, v in variant_seqs.items()}\n\n\n\tseq_names_dict = {}\n\tseq_counts_dict = {}\n\tfor seqid, seq_obj in seqs_to_assign.items():\n\t\tseq_nt = str(seq_obj.seq)\n\t\tseq_aa = str(seq_obj.seq.translate())\n\n\t\t# Check for truncation\n\t\tif seq_aa.count('*')>1:\n\t\t\tseq_names_dict[seqid] = 'truncated'\n\t\t\t#print(seqid, 'truncated')\n\t\telse:\n\t\t\tif seq_aa in seqs_aa.keys():\n\t\t\t\t#print(seq_aa)\n\t\t\t\tseq_names_dict[seqid] = seqs_aa[seq_aa]\n\t\t\telse:\n\t\t\t\tseq_names_dict[seqid] = 'other'\n\t\t\t\t#print(seqid, 'unnamed')\n\t\tif seq_nt in seq_counts_dict.keys():\n\t\t\tseq_counts_dict[seq_nt] +=1\n\t\telse:\n\t\t\tseq_counts_dict[seq_nt] = 1\n\n\t#print(seq_names_dict)\n\t#print(seq_counts_dict)\n\n\twith open(args.output_file, \"w\") as f:\n\t\tfor seqid, name in seq_names_dict.items():\n\t\t\tf.write(\"%s,%s\\n\" % (seqid, name))\n\n\t#alignment_file = args.inputprefix+'_focal_gene.dedup.aln'\n\t#metadata_file = args.inputprefix+'_focal_gene.dedup.txt' \n\t#seq_groups = [line.split('\\t')[1] for line in open(metadata_file, 'r').readlines()]\n\n\t\n\n\t#aligned_seqs_dedup = SeqIO.to_dict(SeqIO.parse(alignment_file, 'fasta'))\n\n\t# counts_seqs = {}\n\t# for seqid in aligned_seqs_dedup.keys():\n\t# \tn_identical_seqs = [len(x.split(',')) for x in seq_groups if seqid in x]\n\t# \tif len(n_identical_seqs)==0:\n\t# \t\tn_identical_seqs = 1\n\t# \telse:\n\t# \t\tn_identical_seqs = n_identical_seqs[0]\n\t# \tcounts_seqs[seqid] = n_identical_seqs\n\n\t# Write a nucleotide alignment, with protein names where applicable (different nt seq can have same protein name because)\n\t# of synonymous mutations\n\t# with open(args.outputdir+args.outputprefix+'.aln','w') as f:\n\t# \tfor seqid, seq_obj in aligned_seqs_dedup.items():\n\t# \t\tseq = re.sub('\\n', '', str(seq_obj.seq))\n\t# \t\tf.write('>%s | %s %d\\n%s\\n' % (seqid, seq_names_dict[seqid], counts_seqs[seqid], seq))\n\n\t# # Write the sequence variants\n\t# with open(args.outputdir+args.outputprefix+'.csv','w') as f:\n\t# \tfor seq, name in seq_names_dict.items():\n\t# \t\tf.write('%s,%s\\n' % (seq,name) )\n\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"liampshaw/mobile-gene-regions","sub_path":"scripts/name_variants.py","file_name":"name_variants.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"15677184114","text":"import numpy as np\nimport pandas as pd\nimport scanpy as sc\n\nsc.settings.verbosity = 3\nsc.logging.print_versions()\nsc.settings.set_figure_params(dpi=80, facecolor='white')\nadata = sc.read_10x_mtx('filtered_gene_bc_matrices/hg19/',var_names='gene_symbols',cache=True)\nadata.var_names_make_unique()\nadata\nsc.pl.highest_expr_genes(adata, n_top=20, )\nsc.pp.filter_cells(adata, min_genes=200)\nsc.pp.filter_genes(adata, min_cells=3)\nadata.var['mt'] = adata.var_names.str.startswith('MT-')\nsc.pp.calculate_qc_metrics(adata, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True)\nsc.pl.violin(adata, ['n_genes_by_counts', 'total_counts', 'pct_counts_mt'],jitter=0.4, multi_panel=True)\nsc.pl.scatter(adata, x='total_counts', y='pct_counts_mt')\nsc.pl.scatter(adata, x='total_counts', y='n_genes_by_counts')\nadata = adata[adata.obs.n_genes_by_counts < 2500, :]\nadata = adata[adata.obs.pct_counts_mt < 5, :]\nsc.pp.normalize_total(adata, target_sum=1e4)\nsc.pp.log1p(adata)\nsc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)\nsc.pl.highly_variable_genes(adata)\nadata.raw = adata\nadata = adata[:, adata.var.highly_variable]\nsc.pp.regress_out(adata, ['total_counts', 'pct_counts_mt'])\nsc.pp.scale(adata, max_value=10)\nsc.tl.pca(adata, svd_solver='arpack')\nsc.pl.pca(adata, color='CST3')\nsc.pl.pca_variance_ratio(adata, log=True)\nadata.write(results_file)\nadata\nsc.pp.neighbors(adata, n_neighbors=10, n_pcs=40)\nsc.tl.umap(adata)\nsc.pl.umap(adata, color=['CST3', 'NKG7', 'PPBP'])\nsc.pl.umap(adata, color=['CST3', 'NKG7', 'PPBP'], use_raw=False)\nsc.tl.leiden(adata)\nsc.pl.umap(adata, color=['leiden', 'CST3', 'NKG7'])\nadata.write(results_file)\nsc.tl.rank_genes_groups(adata, 'leiden', method='t-test')\nsc.pl.rank_genes_groups(adata, n_genes=25, sharey=False)\nsc.settings.verbosity = 2\nsc.tl.rank_genes_groups(adata, 'leiden', method='wilcoxon')\nsc.pl.rank_genes_groups(adata, n_genes=25, sharey=False)\nadata.write(results_file)\nsc.tl.rank_genes_groups(adata, 'leiden', method='logreg')\nsc.pl.rank_genes_groups(adata, n_genes=25, sharey=False)\nmarker_genes = ['IL7R', 'CD79A', 'MS4A1', 'CD8A', 'CD8B', 'LYZ', 'CD14','LGALS3', 'S100A8', 'GNLY', 'NKG7', 'KLRB1','FCGR3A', 'MS4A7', 'FCER1A', 'CST3', 'PPBP']\nadata = sc.read(results_file)\npd.DataFrame(adata.uns['rank_genes_groups']['names']).head(5)\nresult = adata.uns['rank_genes_groups']\ngroups = result['names'].dtype.names\npd.DataFrame({group + '_' + key[:1]: result[key][group]for group in groups for key in ['names', 'pvals']}).head(5)\nsc.tl.rank_genes_groups(adata, 'leiden', groups=['0'], reference='1', method='wilcoxon')\nsc.pl.rank_genes_groups(adata, groups=['0'], n_genes=20)\nsc.pl.rank_genes_groups_violin(adata, groups='0', n_genes=8)\nsc.pl.violin(adata, ['CST3', 'NKG7', 'PPBP'], groupby='leiden')\nnew_cluster_names = ['CD4 T', 'CD14 Monocytes','B', 'CD8 T','NK', 'FCGR3A Monocytes','Dendritic', 'Megakaryocytes']\nadata.rename_categories('leiden', new_cluster_names)\nsc.pl.umap(adata, color='leiden', legend_loc='on data', title='', frameon=False, save='.pdf')\nsc.pl.dotplot(adata, marker_genes, groupby='leiden');\nsc.pl.stacked_violin(adata, marker_genes, groupby='leiden', rotation=90);\n","repo_name":"iamzhangxiaoyu/scRNA-seq","sub_path":"scanpy.py","file_name":"scanpy.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"693598789","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\n\nimport pytest\nfrom flask import url_for\n\nfrom smorest_sfs.modules.auth.permissions import ROLES\nfrom smorest_sfs.modules.storages.models import Storages\nfrom smorest_sfs.modules.users.models import User\nfrom smorest_sfs.utils.storages import load_storage_from_path\nfrom tests._utils.injection import FixturesInjectBase\n\n\nclass TestStoragesView(FixturesInjectBase):\n\n fixture_names = (\"flask_app_client\", \"flask_app\")\n\n def test_get(self, regular_user: User, add_storage: Storages) -> None:\n with self.flask_app_client.login(regular_user, [ROLES.User]) as client:\n resp = client.get(f\"/api/v1/storages/{add_storage.id}\")\n assert resp.data == b\"abc\"\n\n def test_put(self, regular_user: User, add_storage: Storages) -> None:\n with self.flask_app_client.login(regular_user, [ROLES.User]) as client:\n store_id = add_storage.id\n client.put(\n f\"/api/v1/storages/{store_id}\",\n data={\"file\": (io.BytesIO(b\"789\"), \"new.txt\")},\n content_type=\"multipart/form-data\",\n )\n add_storage.as_stream()\n resp = client.get(f\"/api/v1/storages/{add_storage.id}\")\n assert resp.data == b\"789\"\n\n def test_delete(self, regular_user: User, add_storage: Storages) -> None:\n with self.flask_app_client.login(regular_user, [ROLES.User]) as client:\n resp = client.delete(f\"/api/v1/storages/{add_storage.id}\")\n after_resp = client.get(f\"/api/v1/storages/{add_storage.id}\")\n assert resp.json[\"code\"] == 0 and after_resp.status_code == 404\n\n\nclass TestForceDeleteView(FixturesInjectBase):\n\n fixture_names = (\"flask_app_client\", \"flask_app\")\n\n def test_force_delete(self, regular_user: User, add_storage: Storages) -> None:\n file_id = add_storage.id\n path = add_storage.path[:]\n with self.flask_app_client.login(regular_user, [ROLES.SuperUser]) as client:\n with self.flask_app.test_request_context():\n client.delete(url_for(\"Storages.ForceDeleteView\", file_id=file_id))\n with pytest.raises(FileNotFoundError):\n load_storage_from_path(\"test.txt\", path)\n\n\nclass TestUploadView(FixturesInjectBase):\n fixture_names = (\"flask_app_client\", \"flask_app\")\n\n @pytest.mark.usefixtures(\"clean_dirs\")\n def test_post(self, regular_user: User) -> None:\n with self.flask_app_client.login(regular_user, [ROLES.User]) as client:\n with self.flask_app.test_request_context():\n resp = client.post(\n url_for(\"Storages.UploadView\", storetype=\"foo\"),\n data={\"file\": (io.BytesIO(b\"456\"), \"new.txt\")},\n content_type=\"multipart/form-data\",\n )\n store_id = resp.json[\"data\"][\"file_id\"]\n resp = client.get(url_for(\"Storages.StoragesView\", file_id=store_id))\n assert resp.data == b\"456\"\n","repo_name":"ssfdust/yt-media","sub_path":"tests/modules/storages/resources/test_storages.py","file_name":"test_storages.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"30216744408","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef ex8():\n\n f = 100\n t = np.linspace(-np.pi / 2, np.pi / 2, f)\n\n sin = np.sin(t)\n\n sin_approx_t = t\n\n sin_approx_p = (t - (7 * t ** 3) / 60) / (1 + (t ** 2) / 20)\n\n err_t = sin - sin_approx_t\n err_p = sin - sin_approx_p\n\n plt.figure(figsize=(12, 8))\n\n plt.subplot(2, 1, 1)\n plt.plot(t, sin, label='sin')\n plt.plot(t, sin_approx_t, label='aprox taylor')\n plt.plot(t, sin_approx_p, label='aprox pade')\n plt.title('Aproximarea sin in [-π/2, π/2]')\n plt.legend()\n\n plt.subplot(2, 1, 2)\n plt.semilogy(t, abs(err_t), label='taylor')\n plt.semilogy(t, abs(err_p), label='pade)')\n plt.title('Eroarea ca functie logaritmica')\n plt.legend()\n\n plt.tight_layout()\n plt.show()\n","repo_name":"AndreiLaurentiu/TemeProcSemCTI","sub_path":"lab2/ex8.py","file_name":"ex8.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"23245243466","text":"import pandas as pd\nimport numpy as np\n\n\ndef read_data(file_name, training=True):\n if isinstance(file_name, list):\n file_names = file_name\n elif isinstance(file_name, str):\n file_names = [file_name]\n else:\n file_names = []\n df = pd.DataFrame()\n for file in file_names:\n df = pd.concat([df, pd.read_csv(file, index_col=False)], axis=0, ignore_index=True)\n if not training:\n df[\"label\"] = -1\n df = df.sort_values(by=[\"sn\", \"fault_time\", \"label\"])\n df[\"fault_time\"] = pd.to_datetime(df[\"fault_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n df['fault_time_ts'] = df[\"fault_time\"].values.astype(np.int64) // 10 ** 9\n return df\n\n\ndef read_log_data(log_file_name):\n if isinstance(log_file_name, list):\n log_file_names = log_file_name\n elif isinstance(log_file_name, str):\n log_file_names = [log_file_name]\n else:\n log_file_names = []\n\n log_df = pd.DataFrame()\n for file in log_file_names:\n log_df = pd.concat([log_df, pd.read_csv(file, index_col=False)], axis=0, ignore_index=True)\n log_df = log_df.sort_values(by=[\"sn\", \"time\"])\n log_df[\"time\"] = pd.to_datetime(log_df[\"time\"], format=\"%Y-%m-%d %H:%M:%S\")\n log_df['time_ts'] = log_df[\"time\"].values.astype(np.int64) // 10 ** 9\n return log_df\n\n\ndef read_crashdump_data(crash_file_name):\n if isinstance(crash_file_name, list):\n crash_file_names = crash_file_name\n elif isinstance(crash_file_name, str):\n crash_file_names = [crash_file_name]\n else:\n crash_file_names = []\n\n crash_df = pd.DataFrame()\n for file in crash_file_names:\n crash_df = pd.concat([crash_df, pd.read_csv(file, index_col=False)], axis=0, ignore_index=True)\n crash_df[\"fault_time\"] = pd.to_datetime(crash_df[\"fault_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n # sn,fault_time,fault_code\n return crash_df\n\n\ndef read_venus_data(venus_file_name):\n if isinstance(venus_file_name, list):\n venus_file_names = venus_file_name\n elif isinstance(venus_file_name, str):\n venus_file_names = [venus_file_name]\n else:\n venus_file_names = []\n\n venus_df = pd.DataFrame()\n for file in venus_file_names:\n venus_df = pd.concat([venus_df, pd.read_csv(file, index_col=False)], axis=0, ignore_index=True)\n venus_df[\"fault_time\"] = pd.to_datetime(venus_df[\"fault_time\"], format=\"%Y-%m-%d %H:%M:%S\")\n # sn,fault_time,module_cause,module\n return venus_df\n","repo_name":"batigol-001/aliyun_alops_3","sub_path":"feature/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"8747394319","text":"# tmux11away mechanism\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nTIMEOUT_DETACHED = 120\nTIMEOUT_IDLE = 1200\n\nimport weechat\n\nimport os\nimport stat\nimport time\n\nweechat.register('tmux11_away', 'Benjamin Richter ', '0.1', 'GPL3', 'tmux11away mechanism', '', '')\nweechat.hook_timer(2000, 0, 0, 'check_away', '')\n\nlast_reason = None\n\ndef set_away(reason):\n global last_reason\n if reason == last_reason: return\n weechat.command('', '/away -all ' + reason)\n last_reason = reason\n\ndef check_away(data, remaining_calls):\n time_now = time.time()\n\n tmux_stat = os.stat(os.getenv('TMUX').split(',')[0])\n if (tmux_stat.st_mode & stat.S_IXUSR) == 0 and tmux_stat.st_ctime <= time_now - TIMEOUT_DETACHED:\n set_away('detached')\n return weechat.WEECHAT_RC_OK\n\n activity_path = os.path.join(os.getenv('XDG_RUNTIME_DIR'), 'activity')\n if os.path.exists(activity_path):\n last_activity = max(tmux_stat.st_ctime, os.stat(activity_path).st_ctime)\n else:\n last_activity = tmux_stat.st_ctime\n\n if last_activity <= time_now - TIMEOUT_IDLE:\n set_away('idle')\n else:\n set_away('')\n\n return weechat.WEECHAT_RC_OK\n","repo_name":"Waldteufel/gadgets","sub_path":"weechat/tmux11_away.py","file_name":"tmux11_away.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"31953567116","text":"\nimport pulp, global_var\n\ndef ip(fs, parameters):\n size = len(parameters['server_cost'])\n \n my_lp_problem = pulp.LpProblem(\"My LP Problem\", pulp.LpMaximize)\n \n q=[]\n k=0\n for i in range(size):\n q.append([])\n for j in range(size): \n k=i*size+j+1\n q[i].append([pulp.LpVariable('q%d' %k , lowBound=0, cat='Integer')])\n \n #objective\n my_lp_problem += pulp.lpSum(parameters['topology'][i][j] * parameters['utility'][i][j] * q[i][j] for i in fs for j in fs), 'utility'\n\n \n ####################### mpf constraint\n #capacity constraints\n \n for i in fs: #給出去的\n my_lp_problem += pulp.lpSum([q[i][j] for j in fs]) <= parameters['server_capacity'][i]\n \n ####################### as & lsf constraint \n #capacity constraints\n #for i in fs: \n # my_lp_problem += pulp.lpSum([q[i][j] for j in fs if i!=j]) <= parameters['server_capacity'][i]-parameters['user_request'][i]\n \n ####################### mpf & lsf & as constraint\n #demand constraints\n for i in fs: #別人給的\n my_lp_problem += pulp.lpSum([q[j][i] for j in fs]) <= parameters['user_request'][i]\n \n ####################### lsf & lso constraint\n #for i in fs: \n # my_lp_problem += pulp.lpSum(q[i][i] ) == min(parameters['server_capacity'][i], parameters['user_request'][i])\n \n \n \n my_lp_problem.solve()\n #print(my_lp_problem)\n #show variable value\n v_name=[]\n v_varvalue=[]\n for v in my_lp_problem.variables():\n v_name.append(v.name)\n v_varvalue.append(v.varValue)\n #print(v.name, \"=\", v.varValue)\n \n utility=pulp.value(my_lp_problem.objective)\n if utility==None:\n utility=0\n return utility, v_name, v_varvalue","repo_name":"yijia1127cs06g/coalition","sub_path":"ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"30842599829","text":"import boto3\ntranslate_client = boto3.client('translate')\ndef lambda_handler(event, context): \n # event['Details']['Parameters']['exampleParameterKey1'];\n print(event)\n review_text = event['Details']['Parameters']['text']\n targetLanaugageCode = event['Details']['Parameters']['targetLanguageCode']\n resp = {}\n \n if targetLanaugageCode == \"en\": \n resp['TranslatedText'] = review_text\n return resp\n \n translate_response = translate_client.translate_text(\n Text=review_text,\n SourceLanguageCode='auto',\n TargetLanguageCode=targetLanaugageCode\n )\n print(translate_response) \n return translate_response","repo_name":"johncrn/aws-hack-for-purpose-23-zeta","sub_path":"zetatranslate.py","file_name":"zetatranslate.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"432634158","text":"import streamlit as st\r\nimport numpy as np\r\nimport pickle\r\nmodel = pickle.load(open('C:/Users/91983/mlapp/model.pkl', 'rb'))\r\nst.title('Placement Predictor')\r\nst.write('Enter the details below to predict your placement status.')\r\ncgpa = st.text_input('CGPA', '0')\r\niq = st.text_input('IQ', '0')\r\nprofile_score = st.text_input('Profile Score', '0')\r\nif st.button('Predict'):\r\n input_query = np.array([[cgpa, iq, profile_score]])\r\n result = model.predict(input_query)[0]\r\n st.write(f'Placement: {result}')","repo_name":"debamitr1012/mlapp","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14535201131","text":"\"\"\"\r\n Author: CaptCorpMURICA\r\n Project: Iterators\r\n File: iteratorChallenge.py\r\n Creation Date: 12/4/2017, 2:06 PM\r\n Description: Create a list of items (you may use either strings or numbers in the list),\r\n then create an iterator using the iter() function.\r\n\r\n Use a FOR loop \"n\" times, where n is the number of items in your list.\r\n Each time round the loop, use next() on your list to print the next item.\r\n\r\n Hint: Use the len() function rather than counting the number of items in the list.\r\n\"\"\"\r\n\r\nspam = [\"spam\", \"spam\", \"spam\", \"spam\", \"spam\", \"bacon\", \"spam\"]\r\nspam_iterator = iter(spam)\r\n\r\ni = 0\r\nfor n in range(0, len(spam)):\r\n ingredient = next(spam_iterator)\r\n print(\"Ingredient {}: {}\".format(i, ingredient))\r\n i += 1\r\n\r\nprint(\"=============\")\r\n\r\n# His solution\r\nmy_list = [\"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\"]\r\n\r\nmy_iterator = iter(my_list)\r\n\r\nfor i in range(0, len(my_list)):\r\n next_item = next(my_iterator)\r\n print(next_item)","repo_name":"CaptCorpMURICA/TrainingClasses","sub_path":"Udemy/TimBuchalka/CompletePythonMasterclass/Iterators/iteratorChallenge.py","file_name":"iteratorChallenge.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70490868003","text":"# from selenium import webdriver\n\n# option = webdriver.ChromeOptions()\n# option.add_argument(r'--user-data-dir=C:\\Users\\ytlWin\\AppData\\Local\\Google\\Chrome\\User Data\\Profile 6')\n# driver = webdriver.Chrome(option)\n\n# # driver.get(\"https://cart.books.com.tw/member/login\")\n# driver.get(\"https://www.google.com.tw/\")\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nchrome_options = Options()\nchrome_options.add_argument(\"--disable-extensions\")\nchrome_options.add_argument(\"--user-data-dir=C:\\\\Users\\\\ytlWin\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\User Data\\\\Profile 6\")\ndriver = webdriver.Chrome(options=chrome_options)\ndriver.get(\"https://www.google.com\")","repo_name":"ytl0623/Web-Crawler","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9095364899","text":"import os\nimport json\nfrom flask import Flask, render_template, request, jsonify, url_for, redirect\nimport data\nimport argparse\n\napp = Flask(__name__)\n\n'''\nTo do:\nsave current idx.\n'''\n\n@app.route('/')\ndef index():\n return render_template('count_check.html', start_idx=app.config['raw_data_loader'].get_last_aligned_index())\n\n@app.route('/align')\ndef align():\n return render_template('align.html', start_idx=app.config['data_loader'].get_last_aligned_index())\n\n#using GET request so that index is visible to users and can be used to restart.\n@app.route('/get_data_by_index', methods = ['GET'])\ndef get_data_by_index():\n data = request.args\n curr_idx = data['curr_idx']\n entry = app.config['data_loader'].get_data(curr_idx)\n strin = jsonify(entry)\n return strin\n\n@app.route('/shift_alignment', methods = ['GET'])\ndef shift_alignment():\n data = request.args\n col_to_shift = data['col_to_shift']\n shift_at = data['shift_at']\n shift_by = data['shift_by']\n\n if col_to_shift == 'text':\n col_to_shift = ['id', 'raw_text', 'text']\n else:\n col_to_shift = ['image_path']\n\n app.config['data_loader'].shift_col_at_by(shift_at, shift_by, col=col_to_shift)\n\n print(app.config['data_loader'].data.loc[int(shift_at)])\n\n #save\n return \"done\"\n\n@app.route('/save_progress', methods = ['GET'])\ndef save_progress():\n data = request.args\n curr_idx = data['curr_idx']\n app.config['data_loader'].save_progress(curr_idx)\n return \"done\"\n\n@app.route('/reload', methods = ['GET'])\ndef reload():\n data = request.args\n app.config['data_loader'].load(True)\n return jsonify({'index_to_load': app.config['data_loader'].get_last_aligned_index()})\n #return \"done\"\n\n@app.route('/mark_page', methods = ['GET'])\ndef mark_page():\n data = request.args\n page = data['page']\n app.config['raw_data_loader'].mark_page(page)\n return \"done\"\n\n@app.route('/unmark_page', methods = ['GET'])\ndef unmark_page():\n data = request.args\n page = data['page']\n app.config['raw_data_loader'].unmark_page(page)\n return \"done\"\n\n@app.route('/get_raw_data_by_index', methods = ['GET'])\ndef get_raw_data_by_index():\n data = request.args\n curr_idx = data['curr_idx']\n entry = app.config['raw_data_loader'].get_data(curr_idx)\n print(entry['image_path'])\n #print(entry['image_path'], entry['raw_text'], entry['id'])\n strin = jsonify(entry)\n return strin\n\n@app.route('/save_raw_progress', methods = ['GET'])\ndef save_raw_progress():\n data = request.args\n curr_idx = data['curr_idx']\n app.config['raw_data_loader'].save_progress(curr_idx)\n return \"done\"\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Web interface to mark incorrect extractions')\n parser.add_argument('image_folder', help='Path where pictures have been auto-extracted')\n parser.add_argument('raw_image_folder', help='Path of PDF page images')\n parser.add_argument('marked_page_folder', help='Path where incorrect pages will be stored')\n parser.add_argument('alignment_csv', help='Path of csv where image and text is aligned')\n parser.add_argument('ocr_text', help='Path of text file where OCR text is stored')\n parser.add_argument('load_previous_state', type=str2bool, default=True, help='Load previously saved progress')\n\n args = parser.parse_args()\n #app.config['data_loader'] = data.DataLoader('../final_extraction/PPM3-aligned.csv', '../final_extraction/PPM3', '../PPM/PPM_text_files/truncated/PPM-3ocr.txt')\n app.config['data_loader'] = data.DataLoader(args.alignment_csv, args.image_folder, args.ocr_text, args.raw_image_folder)\n app.config['raw_data_loader'] = data.RawDataLoader(args.image_folder, args.raw_image_folder, args.marked_page_folder, args.load_previous_state)\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"AkshitaB/pompeii_explorer","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32578093676","text":"import json\nfrom django.views import View\nfrom django.http.response import HttpResponse\nfrom django.conf import settings\nfrom django.shortcuts import render, redirect\nimport itertools\nimport random\nfrom datetime import datetime\nfrom django.template.defaulttags import register\n\n\n# Create your tests here.\n\n\nclass WelcomeView(View):\n def get(self, request, *args, **kwargs):\n return redirect('/news/')\n\n\nclass ViewNews(View):\n def get(self, request, post_id, *args, **kwargs):\n with open(settings.NEWS_JSON_PATH, 'r') as json_file:\n json_dict = json.load(json_file)\n for i in json_dict:\n if post_id == i['link']:\n new = i\n context = {'new': new}\n return render(request, 'news/news.html', context)\n\n\nclass MainNews(View):\n def get(self, request, *args, **kwargs):\n with open(settings.NEWS_JSON_PATH, 'r') as json_file:\n news_list = json.load(json_file)\n def myFunc(e):\n return e['created']\n news_list.sort(reverse=True, key=myFunc)\n context = {\n 'news_list': news_list,\n }\n query = str(request.GET.get('q'))\n for i in news_list:\n if query in i['title']:\n context = {\n 'news_list': [i]\n }\n return render(request, 'news/all_news.html', context)\n\n\n\nclass CreateNews(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'news/create.html')\n\n def post(self, request, *args, **kwargs):\n def random_link():\n link_list = []\n with open(settings.NEWS_JSON_PATH, 'r') as json_file:\n news_list = json.load(json_file)\n for i in news_list:\n link_list.append(i['link'])\n n = random.getrandbits(32)\n if n in link_list:\n n = random.getrandbits(32)\n else:\n return n\n created = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n news_dict = {\n 'created': str(created),\n 'text': request.POST.get('text'),\n 'title': request.POST.get('title'),\n 'link': random_link()\n }\n with open(settings.NEWS_JSON_PATH) as f:\n data = json.load(f)\n data.append(news_dict)\n with open(settings.NEWS_JSON_PATH, 'w') as f:\n json.dump(data, f)\n return redirect('/news/')\n\n","repo_name":"NikitaTutik/News_Portal.Django","sub_path":"HyperNews Portal/task/hypernews/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"526550063","text":"# -*- coding: utf-8 -*-\n# author: 王树根\n# email: wangshugen@ict.ac.cn\n# date: 2018-12-25 15:23\nimport re\n\nfrom easy_tornado.utils import load_file_contents\n\ndblp_regex_fmt = '
  • (.*?).' \\\n '.*?
  • '\ndblp_paper_regex = re.compile(dblp_regex_fmt)\n\n\ndef retrieve_paper_titles(data_path, **kwargs):\n source = kwargs.pop('source', None)\n if source is None or source != 'dblp':\n return\n\n contents = load_file_contents(data_path, pieces=False)\n try:\n contents = contents.decode('utf-8')\n except (UnicodeDecodeError, AttributeError):\n pass\n\n return dblp_paper_regex.findall(contents)\n\n\ndef filter_paper_titles(paper_titles, subject=None, exclude_subject=None, logic_and=True):\n if subject is None and exclude_subject is None:\n return paper_titles, len(paper_titles)\n\n filtered = []\n for paper_title in paper_titles:\n y = paper_title.lower()\n if logic_and:\n criterion = all([y.find(x) != -1 for x in subject])\n else:\n criterion = any([y.find(x) != -1 for x in subject])\n if not criterion:\n continue\n\n if exclude_subject is None or not any([y.find(x) != -1 for x in exclude_subject]):\n filtered.append(paper_title)\n return filtered, len(filtered)\n\n\ndef filter_keys(key_holder, sub_key):\n filtered = []\n for _key in sorted(key_holder.keys()):\n if sub_key is None or _key.find(sub_key) == -1:\n continue\n filtered.append(_key)\n return filtered\n","repo_name":"ictnlp-wshugen/research_finder","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16200612472","text":"\r\nclass SRamp(object):\r\n \"\"\" Hold at vi for dt of time.\r\n (TABLE, APPEND, ch, xparam, value, duration)\r\n \r\n value = vi\r\n duration = dt\r\n \"\"\"\r\n \r\n inputs = ['vi', 'dt']\r\n \r\n def __init__(self, c = None, ch = None, xparam = None):\r\n self.c = c\r\n self.ch = ch\r\n self.xparam = xparam\r\n self.value = c.get('vi')\r\n self.duration = c.get('dt')\r\n \r\n def to_entry(self):\r\n return 'TABLE, APPEND, {}, {}, {}Hz, {}s'.format(self.ch, self.xparam,\r\n self.value, self.duration)\r\n \r\nclass LinRamp(object):\r\n \"\"\" Linearly ramp from vi to vf in dt of time, rate is refresh rate.\r\n (TABLE, RAMP, ch, xparam, start, stop, duration, count)\r\n duration = rate, duration per step\r\n count = int(dt/rate), number of steps\r\n \r\n start = vi\r\n stop = vf\r\n \"\"\"\r\n \r\n inputs = ['vi', 'vf',\r\n 'dt', 'rate']\r\n \r\n def __init__(self, c = None, ch = None, xparam = None):\r\n self.c = c\r\n self.ch = ch\r\n self.xparam = xparam\r\n self.start = c.get('vi')\r\n self.stop = c.get('vf')\r\n \r\n rate = c.get('rate')\r\n dt = c.get('dt')\r\n self.duration = rate\r\n self.count = int(dt/rate)\r\n \r\n def to_entry(self):\r\n return 'TABLE, RAMP, {}, {}, {}Hz, {}Hz, {}s, {}'.format(self.ch, self.xparam,\r\n self.start, self.stop,\r\n self.duration, self.count)\r\n \r\nclass Loop(object):\r\n \"\"\" Loop from source to dest until condition is satisfied.\r\n (TABLE, LOOP, ch, source, dest, condition) \r\n \r\n source and dest can be negative numbers, which are then taken as offsets.\r\n If source is negative, it is taken as an offset from the end of\r\n the table (requires TABLE,ENTRIES to be set). If dest is negative,\r\n it is taken as the offset from the source.\r\n \r\n The condition can be an integer in the range [1, 4095],\r\n corresponding to the number of times to execute the loop,\r\n or a hardware descriptor flag of the form IOxy,\r\n indicating to repeat until the digital input pin x exhibits behaviour y,\r\n as described below.\r\n \r\n vi = source\r\n vf = dest\r\n condition = condition\r\n H: terminate loop on logic level High at loop instruction\r\n L: terminate loop on logic level Low at loop instruction\r\n F: terminate loop after falling edge occurs\r\n R: terminate loop after rising edge occurs\r\n \"\"\"\r\n \r\n def __init__(self, c = None, ch = None, xparam = None):\r\n self.c = c\r\n self.ch = ch\r\n self.xparam = xparam\r\n \r\n self.source = c.get('vi')\r\n self.dest = c.get('vf')\r\n self.condition = c.get('condition')\r\n \r\n def to_entry(self):\r\n return 'TABLE, LOOP, {}, {}, {}, {}'.format(self.ch, self.source,\r\n self.dest, self.condition)\r\n \r\nclass EntryMaker(object):\r\n \r\n available_xparam = ['FREQ'] # currently we only work with FREQ.. might need PHASE later..\r\n fm_gain = 8 # Defalut to be 2 MHz\r\n \r\n available_entries = {'s': SRamp,\r\n 'lin': LinRamp, \r\n 'loop': Loop,}\r\n \r\n def __init__(self, request = None, ch = None):\r\n self.request = request\r\n self.ch = ch\r\n self.xparam = self.get_xparam(request)\r\n \r\n def get_xparam(self, request):\r\n xparam = request.get('xparam')\r\n if xparam == 'FREQ':\r\n self.fm_gain = request.get('fm_gain')\r\n return xparam\r\n \r\n def get_xparam_entry(self):\r\n if self.xparam == 'FREQ':\r\n return 'TABLE, XPARAM, {}, {}, {}'.format(self.ch, self.xparam, self.fm_gain)\r\n else:\r\n return 'TABLE, XPARAM, {}, {}'.format(self.ch, self.xparam)\r\n \r\n def get_xparam_unit(self):\r\n if self.xparam == 'FREQ':\r\n return 'Hz'\r\n \r\n def merge_entries(self, request):\r\n merged_request = []\r\n for key, value in request.items():\r\n if 'entry' in key:\r\n merged_request.extend(value) \r\n return merged_request\r\n \r\n def get_entries(self):\r\n entries = []\r\n entries.append('TABLE, CLEAR, {}'.format(self.ch))\r\n \r\n entry = self.get_xparam_entry()\r\n entries.append(entry)\r\n \r\n merged_entries = self.merge_entries(self.request)\r\n \r\n for j in merged_entries:\r\n entry = self.available_entries[j['type']](j, self.ch, self.xparam).to_entry()\r\n entries.append(entry)\r\n \r\n # The last entry can not be LinRamp/Loop..\r\n if merged_entries[-1]['type'] == 'lin':\r\n entries.append('TABLE, APPEND, {}, {}, {}{}, 32ns'.format(self.ch,\r\n self.xparam,\r\n merged_entries[-1]['vf'],\r\n self.get_xparam_unit()))\r\n \r\n elif merged_entries[-1]['type'] == 'loop':\r\n try:\r\n entries.append('TABLE, APPEND, {}, {}, {}{}, 32ns'.format(self.ch,\r\n self.xparam,\r\n merged_entries[-2]['vf'],\r\n self.get_xparam_unit()))\r\n except:\r\n entries.append('TABLE, APPEND, {}, {}, {}{}, 32ns'.format(self.ch,\r\n self.xparam,\r\n merged_entries[-2]['vi'],\r\n self.get_xparam_unit()))\r\n \r\n \r\n entries.append('TABLE, ARM, {}'.format(self.ch))\r\n return entries\r\n \r\n ","repo_name":"jameszheng1990/LabRad_Strontium_A","sub_path":"rf/devices/Moglabs_XRF/AdvancedTable_helper.py","file_name":"AdvancedTable_helper.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71259147041","text":"# You are given the heads of two sorted linked lists list1 and list2.\n# Merge the two lists in a one sorted list. The list should be made by splicing together the nodes of the first two lists.\n# Return the head of the merged linked list.\n\n# Example 1:\n\n# Input: list1 = [1,2,4], list2 = [1,3,4]\n# Output: [1,1,2,3,4,4]\n\n# Example 2:\n\n# Input: list1 = [], list2 = []\n# Output: []\n\n# Example 3:\n\n# Input: list1 = [], list2 = [0]\n# Output: [0]\n \n\n# Constraints:\n\n# The number of nodes in both lists is in the range [0, 50].\n# -100 <= Node.val <= 100\n# Both list1 and list2 are sorted in non-decreasing order.\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n \nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n # create new list node of -1 and set to prev pointer\n listR = ListNode(-1)\n prev = listR\n \n #if list1 and list2 not None\n while list1 and list2:\n #if the current val of list 1 is less or equal to list 2 prev pointer next item is list1 val\n #set the list1 val as the next val in the linked list\n if list1.val<=list2.val:\n prev.next=list1\n list1=list1.next\n #we do the same thing as above but in the case where the current val of list 2 is less than list 1\n else:\n prev.next=list2\n list2=list2.next\n #set the prev pointer after every iteration of the loop so we know what to connect the next result to\n prev = prev.next\n \n #when we run out of one list, the other list is not always empty, so we connect it as everything else will be in order and larger\n prev.next = list1 if list1 is not None else list2\n \n return listR.next","repo_name":"alexmcdermid/leetcode","sub_path":"python/MergeTwoSortedLists.py","file_name":"MergeTwoSortedLists.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72312220000","text":"from functools import partial\n\nfrom sellmo.contrib.attribute.admin import BaseProductAttributeMixin\n\nfrom django import forms\nfrom django.forms import ValidationError\nfrom django.forms.models import ModelForm\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.text import capfirst\nfrom django.utils import six\nfrom django.contrib.admin.sites import NotRegistered\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .forms import (\n VariantAttributeFormFactory, VariantAttributeFormMixin,\n ProductVariationFormFactory, ProductVariationFormMixin\n)\n\n\nclass VariantAttributeMixin(BaseProductAttributeMixin):\n def get_form(self, request, obj=None, **kwargs):\n kwargs['form'] = (\n self.get_attribute_formfactory(\n request,\n VariantAttributeFormFactory,\n prefix='attribute',\n mixin=VariantAttributeFormMixin,\n obj=obj,\n **kwargs\n ).factory()\n )\n return super(VariantAttributeMixin, self).get_form(\n request,\n obj=obj,\n **kwargs\n )\n\n def get_formset(self, request, obj=None, **kwargs):\n kwargs['form'] = (\n self.get_attribute_formfactory(\n request,\n VariantAttributeFormFactory,\n prefix='attribute',\n mixin=VariantAttributeFormMixin,\n obj=obj,\n **kwargs\n ).factory()\n )\n return super(VariantAttributeMixin, self).get_formset(\n request,\n obj=obj,\n **kwargs\n )\n\n def get_fieldsets(self, request, obj=None):\n fieldsets = super(VariantAttributeMixin, self).get_fieldsets(\n request, obj\n )\n fields = (\n self.get_attribute_formfactory(\n request,\n VariantAttributeFormFactory,\n prefix='attribute',\n mixin=VariantAttributeFormMixin,\n obj=obj\n ).get_attribute_formfield_names()\n )\n fieldsets += ((_(\"Attributes\"), {'fields': fields}), )\n return fieldsets\n\n\nclass ProductVariationMixin(BaseProductAttributeMixin):\n def get_form(self, request, obj=None, **kwargs):\n kwargs['form'] = (\n self.get_attribute_formfactory(\n request,\n ProductVariationFormFactory,\n prefix='variations',\n mixin=ProductVariationFormMixin,\n obj=obj,\n **kwargs\n ).factory()\n )\n return super(ProductVariationMixin, self).get_form(\n request,\n obj=obj,\n **kwargs\n )\n\n def save_model(self, request, obj, form, change):\n obj.save()\n form.save_variations()\n\n def get_fieldsets(self, request, obj=None):\n fieldsets = super(ProductVariationMixin, self).get_fieldsets(\n request, obj\n )\n fields = (\n self.get_attribute_formfactory(\n request,\n ProductVariationFormFactory,\n prefix='variations',\n mixin=ProductVariationFormMixin,\n obj=obj\n ).get_attribute_formfield_names()\n )\n fieldsets += ((_(\"Variations\"), {'fields': fields}), )\n return fieldsets\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/adaptivdesign/django-sellmo/sellmo/contrib/variation/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26161106875","text":"# Import PuLP modeler functions\nfrom pulp import *\n\nPilots = [p for p in range(1, 9)]\n\nLanguages = ['E', 'F', 'D', 'N']\n\nPlaneType = ['R', 'T', 'B', 'F', 'S']\n\nLangSkills = [\n [20, 14, 0, 13, 0, 0, 8, 8],\n [12, 0, 0, 10, 15, 20, 8, 9],\n [0, 20, 12, 0, 8, 11, 14, 12],\n [0, 0, 0, 0, 17, 0, 0, 16]\n]\nLangSkills = makeDict([Languages, Pilots], LangSkills, 0)\n\nFlyingSkills = [\n [18, 12, 15, 0, 0, 0, 8, 0],\n [10, 0, 9, 14, 15, 8, 12, 13],\n [0, 17, 0, 11, 13, 10, 0, 0],\n [0, 0, 14, 0, 0, 12, 16, 0],\n [0, 0, 0, 0, 12, 18, 0, 18]\n]\nFlyingSkills = makeDict([PlaneType, Pilots], FlyingSkills, 0)\n\nPilotCompatibility = dict()\n\nfor p1 in range(1, 9):\n for p2 in range(p1 + 1, 9):\n lc = False\n for l in Languages:\n if (LangSkills[l][p1] >= 10) and (LangSkills[l][p2] >= 10):\n lc = True\n break\n if not lc: continue\n\n pcs = 0\n pt = 0\n for p in PlaneType:\n if (FlyingSkills[p][p1] >= 10) and (FlyingSkills[p][p2] >= 10):\n pc = FlyingSkills[p][p1] + FlyingSkills[p][p2] \n if (pcs < pc): pcs = pc\n if (pcs > 20): \n PilotCompatibility[(p1, p2)] = pcs\n\n\nprint(PilotCompatibility)\n\nprob = LpProblem(\"FlightCrews\", LpMaximize)\n\nfly = LpVariable.dicts(\"Fly\", Pilots, cat=\"Binary\")\n\nfor key in PilotCompatibility:\n (p1,p2) = key\n prob += (\n fly[p1] + fly[p2] >= 1\n )\n\nprob += (\n lpSum([PilotCompatibility[key] for key in PilotCompatibility]),\n \"MaxPilotSkill\",\n)\nprob.solve()\n# maximise score such that all pilots flow\n\n\n# The status of the solution is printed to the screen\nprint(\"Status:\", LpStatus[prob.status])\n\n# The optimised objective function value is printed to the screen\nprint(\"Best = \", value(prob.objective))\n\n# Print the locations\nfor p in Pilots:\n if fly[p].value()==1:\n print(\"Pilot %2d flys\" % p)\n\n\n","repo_name":"simonb65/MATH1326","sub_path":"Week 04/FlightCrews.py","file_name":"FlightCrews.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22932883477","text":"import numpy as np\nimport pickle as pk\nfrom KSG.ADC import ADCAlgorithm\nfrom KSG.PRNN import PRNNAlgorithm\nfrom KSG.DataWriter import data_writer_b\n\n'''\n define parameters\n'''\n# file name\nfile_name = 'input_data'\n# define the number of samples and the size of time window, which is the number of samples needed to calculate a causal matrix\nN = 20\n# interval is the interval of the time window\ninterval = 10\n# tau is the time delay\ntau = 2\n# k in the KNN algorithm\nk = 4\n\n'''\n read data\n'''\nfile = open('./data/' + file_name, 'rb')\ndata = pk.load(file)\nfile.close()\n\nprint('The data has been loaded.')\nnode_n = data.shape[0]\ntime_n = data.shape[1]\nprint('The number of individuals is', node_n)\nprint('The number of time stamps is', time_n)\nprint('The dimension of the data is', data.shape[2])\n\n# t is the number of the causal matrix\nt = int((time_n - N - tau)/interval) + 1\nprint('The number of generated causal matirx is', t)\n# record number of loop\nl = 0\ncmi_result = np.zeros(shape=[t, node_n, node_n], dtype=np.float64)\n\nfor time_start in range(0, time_n - N - tau, interval):\n # time_start is the time to start prediction and time_end is the time to end prediction.\n time_end = time_start + N\n print('k =', k, 'sample:', time_start, '-', time_end)\n nodes = data[:, time_start: time_end + tau]\n for i in range(data.shape[0]):\n '''\n run ADC algorithm.\n '''\n k_set, k_size = ADCAlgorithm(i, nodes, k, tau)\n\n '''\n run PRNN algorithm.\n '''\n k_set, k_size, causal_entropy = PRNNAlgorithm(i, k_set, k_size, nodes, k, tau)\n\n print('#', end='')\n # save the result into cmi_result\n cmi_result[l, :, i] = causal_entropy\n print('\\n', cmi_result[l])\n l += 1\n\n # save the result to file oCEP_output\n data_writer_b('./result/' + 'oCEP_output', cmi_result)","repo_name":"cccs-data/MMO-detection","sub_path":"oCEP.py","file_name":"oCEP.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71586039521","text":"\"\"\"AutoML Tasks\"\"\"\nimport logging\n\nfrom .image_classification import *\nfrom .object_detection import *\n# from .utils import *\n\nlogger = logging.getLogger(__name__)\nmsg = (\n \"We plan to deprecate auto from gluoncv on release 0.12.0.\"\n \"Please consider using autogluon.vision instead, which provides the same functionality.\"\n \"https://auto.gluon.ai/stable/tutorials/image_prediction/index.html\"\n)\nlogger.warning(msg)\n","repo_name":"dmlc/gluon-cv","sub_path":"gluoncv/auto/tasks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":5662,"dataset":"github-code","pt":"54"} +{"seq_id":"21450440680","text":"'''\r\n13.3 Palindrome Partitioning II\r\n描述\r\nGiven a string s, partition s such that every substring of the partition is a palindrome.\r\nReturn the minimum cuts needed for a palindrome partitioning of s.\r\nFor example, given s = ”aab”,\r\nReturn 1 since the palindrome partitioning [”aa”,”b”] could be produced using 1 cut.\r\n'''\r\n\r\ndef Partitioning2(A):\r\n f=[0 for i in range(len(A)+1)]\r\n p=[[False for i in range(len(A))]for i in range(len(A))]\r\n for i in range(0,len(A)+1):\r\n f[i]=len(A)-1-i\r\n for i in range(len(A)-1,-1,-1):\r\n for j in range(i,len(A),1):\r\n if (A[i] == A[j] and (j - i < 2 or p[i + 1][j - 1])):\r\n p[i][j] =True;\r\n f[i]=min(f[i],f[j+1]+1)\r\n return f[0]\r\n\r\n\r\n\r\nA ='aabbbeccd'\r\nres=Partitioning2(A)\r\nprint(res)\r\n","repo_name":"loopGod/leetcode","sub_path":"mycode-python/L-13-3.py","file_name":"L-13-3.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"5613544438","text":"# 咕噜咕噜的丁丁\n# 不浪费一分一秒\n# 你可以的\n# 时间:2021/10/31 21:45\nqq=input('请输入你的扣扣号:')\npwd=input('请输入你的密��:')\nif qq=='123456' and pwd=='123456':\n print('登录成功!')\nelse:\n print('sorry!账号或者密码不正确!')\n","repo_name":"cc852852/vippython","sub_path":"chap17/实操案例四/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30081612408","text":"#!/usr/bin/env python\nimport rospy\nimport sys\nfrom std_msgs.msg import String, Bool\nimport speech_recognition as sr\nimport pyglet\nimport os\nimport time\nfrom scipy.io import wavfile\nimport pygame\n#from rosarnl.srv import *\n\n\n# Create the publisher to publish the topic with the next goal\npub_speech = rospy.Publisher('speech_recognition_node/activate_speech', String, queue_size=10)\npub_error = rospy.Publisher('speech_recognition_node/speech_recognition_error', Bool, queue_size=10)\n\n\n\ndef escucha_micro_callback(data):\n\tr = sr.Recognizer()\n\ttime.sleep(1)\n\tprint(data.data)\n\twith sr.Microphone() as source:\n\t\t#speak('Say something!')\n\t\tprint('Say something')\n\t\ttry:\n\t\t\taudio = r.listen(source,10)\n\t\texcept sr.WaitTimeoutError:\n\t\t\taudio = None\n\t\t\tpub_error.publish(True)\n\n\n\tprint(\"Okey\")\n\ttext = ''\n\t#fs, data = wavfile.read('pruebasonido.wav')\n\ttry:\n\t\tif audio != None:\n\t\t\ttext = r.recognize_google(audio)\n\texcept sr.UnknownValueError :\n\t\tprint(\"Error 1 - speach recognition can not understand\")\n\t\tpub_error.publish(True)\n\n\texcept sr.RequestError as e:\n\t\tprint (\"Error 2 - Could not request results from Google Speech Recognition service\")\n\n\tprint('understood ->' + str(text))\n\tif text != '':\n\t\tpub_speech.publish(text)\n\n\ndef main():\n\n\trospy.init_node('speech_recognition_node', anonymous=True)\n\trospy.sleep(1)\n\n\t#Subscribe to speak\n\trospy.Subscriber('speak_node/activate_speech_recognition', Bool, escucha_micro_callback)\n\n\trospy.spin()\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n","repo_name":"JaimeTolosaDeLaFuente/Pioneer","sub_path":"src/speach_recognition.py","file_name":"speach_recognition.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39140537111","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom u_plot import *\r\nfrom plot_trajectory import *\r\n# import matplotlib\r\n# matplotlib.rcParams['font.sans-serif'] = 'NSimSun,Times New Roman'\r\n# matplotlib.rcParams['text.usetex'] = True\r\nfont_size = 15\r\n\r\n'''\r\nPick trajectories data for corresponding $\\alpha$ \r\n'''\r\nA = torch.load('./data/hyper_a/data.pt')\r\nA = A[:,-8:-1,:,:]\r\nprint(A.shape)\r\n\r\n\r\n\r\ndef plot_grid():\r\n plt.grid(b=True, which='major', color='gray', alpha=0.6, linestyle='dashdot', lw=1.5)\r\n # minor grid lines\r\n plt.minorticks_on()\r\n plt.grid(b=True, which='minor', color='beige', alpha=0.8, ls='-', lw=1)\r\n\r\n\r\ndef plot_a(a):\r\n L = np.load('./data/hyper_a/a_{}.npy'.format(a))\r\n r_L = np.zeros(1000-len(L))\r\n L = np.concatenate((L,r_L),axis=0)\r\n # np.concatenate((a,b),axis=0)\r\n plt.plot(np.arange(len(L)),L,'b')\r\n # plt.xlabel('Iterations')\r\n plt.ylim(-0.01,1)\r\n plt.yticks([])\r\n plt.title(r'$\\alpha={}$'.format(a))\r\n\r\n\r\nfor i in range(7):\r\n # plt.axes([0.1+0.17*i, 0.7, 0.1, 0.1])\r\n plt.subplot(4, 7, i+1)\r\n plot_a(float(format(0.65+i*0.05,'.2f')))\r\n plot_grid()\r\n if i == 0:\r\n plt.yticks([0,10,20])\r\n plt.ylabel('Loss',fontsize=font_size)\r\n plt.text(-5,5,'Training',rotation=90,fontsize=font_size)\r\n else:\r\n plt.yticks([0, 10, 20], ['', '', ''])\r\n if i == 3:\r\n plt.xlabel('Iterations',fontsize=font_size) \r\n\r\n\r\nfor i in range(7):\r\n plt.subplot(4, 7, 7 + i+1)\r\n plot_trajec(A[0,i,:,0:60000:10],float(format(0.65+i*0.05,'.2f')))\r\n plot_grid()\r\n if i == 0:\r\n plt.yticks([-10,-5,0,5,10])\r\n plt.ylabel(r'$\\theta$',fontsize=font_size)\r\n plt.text(-1,-5,'Trajectory',rotation=90,fontsize=font_size)\r\n else:\r\n plt.yticks([-10,-5, 0,5, 10], ['', '', '','',''])\r\n if i == 3:\r\n plt.xlabel('Time',fontsize=font_size) \r\n\r\nfor i in range(7):\r\n plt.subplot(4, 7, 14 + i+1)\r\n plot_trajec(A[1,i,:,0:60000:10],float(format(0.65+i*0.05,'.2f')))\r\n plot_grid()\r\n if i == 0:\r\n plt.yticks([-10,-5,0,5,10])\r\n plt.ylabel(r'$\\dot{\\theta}$',fontsize=font_size)\r\n plt.text(-1,-5,'Trajectory',rotation=90,fontsize=font_size)\r\n else:\r\n plt.yticks([-10,-5, 0,5, 10], ['', '', '','',''])\r\n if i == 3:\r\n plt.xlabel('Time',fontsize=font_size) \r\n\r\n\r\nfor i in range(7):\r\n # plt.axes([0.1+0.17*i, 0.1, 0.1, 0.1])\r\n plt.subplot(4, 7, 21 + i+1)\r\n draw(float(format(0.65+i*0.05,'.2f')))\r\n if i == 0:\r\n plt.yticks([-5,0,5])\r\n plt.ylabel(r'$\\dot{\\theta}$',fontsize=font_size)\r\n plt.text(-15,-3,r'Control $u$',rotation=90,fontsize=font_size)\r\n if i == 3:\r\n plt.xlabel(r'$\\theta$',fontsize=font_size) \r\nplt.colorbar()\r\n\r\nplt.show()","repo_name":"jingddong-zhang/Neural-Stochastic-Control","sub_path":"Neural Stochastic Control/hyper_a/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31605981481","text":"\"\"\"\n学习predict_proba、predict、decision_function:展示模型对于输入样本的评判结果\n\n\"\"\"\nimport numpy as np\nfrom sklearn.svm import SVC\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\n\n# classes_属性:\n# 在sklearn中,对于训练好的分类模型,模型都有一个classes_属性,classes_属性中按顺序保存着训练样本的类别标记\n# 该输出结果的顺序就对应后续要说predict_proba、predict、decision_function输出结果的顺序或顺序组合\n\n# 1. 样本标签从0开始的场景下训练分类模型\n# x = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1], [-1, 1], [-1, 2], [1, -1], [1, -2]])\n# y = np.array([2, 2, 3, 3, 0, 0, 1, 1])\n# clf = LogisticRegression()\n# clf.fit(x, y)\n# print(clf.classes_) # [0 1 2 3]\n\n# # 2. 样本标签不是从0开始的场景下训练分类模型\n# x = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1], [-1, 1], [-1, 2], [1, -1], [1, -2]])\n# y = np.array([6, 6, 2, 2, 4, 4, 8, 8])\n# clf2 = LogisticRegression()\n# clf2.fit(x, y)\n# print(clf2.classes_) # [2 4 6 8]\n#\n# #\n# print(clf2.predict_proba([[-1, -1]])) # [[0.12532009 0.63284608 0.20186022 0.03997361]]\n# # 说明:\n# # 这行代码输出的就是对于clf2预测[[-1, -1]]类别的值\n# # 输出的结果为[[0.12532009 0.63284608 0.20186022 0.03997361]]\n# # 在训练数据中[-1, -1]属于类别6,\n# # 在predict_proba输出概率中,最大概率值出现在第三个位置上,第三个位置对应的classes_类别刚好也是类别6 。\n# # 也就是说,predict_proba输出概率最大值索引位置对应的classes_元素就是样本所属的类别\n\n\n# print(clf2.predict([[-1, -1]])) # [6]\n\n\n#\nx = np.array([[1, 2, 3], [1, 3, 4], [2, 1, 2], [4, 5, 6], [3, 5, 3], [1, 7, 2]])\ny = np.array([3, 3, 3, 2, 2, 2])\n\nclf = SVC(probability=True)\nclf.fit(x, y)\nprint(clf.decision_function(x))\n\n# 返回array([2, 3]),其中2为negetive,3为positive\nprint(clf.classes_)\n# decision_function的函数说明:\n# Returns\n# -------\n# X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)\n# Returns the decision function of the sample for each class\n# in the model.\n# If decision_function_shape='ovr', the shape is (n_samples,\n# n_classes).\n# 返回值:返回一个 (n_samples, n_classes * (n_classes-1) / 2)的数组。 参数decision_function_shape=\"ovo\"\n# 为模型中的每个类返回样本的决策函数。\n# 如果参数decision_function_shape='ovr',返回的形状是(n_samples, n_classes)\n\n# 先解释一下什么是ovo he ovr\n#\n# ovr: \"one vs rest\" ,\n# 假如训练数据中包含[0, 1, 2, 3]四个分类,那么分别将\n# (1) 0 作为正样本,其余的1, 2, 3作为负样本;\n# (2) 1 作为正样本,其余的0, 2, 3作为负样本;\n# (3) 2 作为正样本,其余的0, 1, 2作为负样本;\n# (4) 3 作为正样本,其余的0, 1, 2作为负样本;\n# 训练4个分类器,每个分类器预测的结果表示属于对应正类也就是0, 1, 2, 3 的概率。\n# 这样对于一个输入样本就相当于要进行4个二分类,然后取输出结果最大的数值对应的classes_类别。\n\n# ovo: \"One-vs-One\"。车轮战术。\n# 同样,假如训练数据中包含[0, 1, 2, 3]四个分类,\n# 先将类别0作为正样本,类别1,类别2,类别3依次作为负样本训练3个分类器,\n# 然后以类别1为正样本,类别0,类别2, 类别3作为负样本训练3个分类器,以此类推。\n# 由于类别0为正样本,类别1为负样本和类别1为正样本、类别0为负样本实质上是一样的,所以不需要重复训练。\n\n# 综上。训练样本有n_classes个类别,则\n# 'ovr'模式需要训练n_classes个分类器,\n# ‘ovo’模式需要训练n_classes * (n_classes - 1) / 2 个分类器\n\n# 对于SVM来说,有多少个分类器就得有多少个分隔超平面,有多少个分隔超平面就得有多少个decision_function值\n\n# 二分类模型中,decision_function返回的数组形状等于样本个数,\n# 也就是一个样本返回一个decision_function值.\n# 并且,此时的decision_function_shape参数失效 ,因为只需要训练一个分类器。(无关\"ovr\"与\"ovo\")\n# classes_中的第一个标签代表是负样本,第二个标签代表正样本。 [2 3]中, 2为negetive,3为positive\n# 如上面代码输出的值 [ 1.00089036 0.64493601 0.97960658 -1.00023781 -0.9995244 -1.00023779]\n\n# decision_function的符号\n# 大于0表示正样本的可信度大于负样本,否则可信度小于负样本。 选择可信度高的作为预测值,\n# 即:表示前3个样本为类别3,后3个样本为类别2\n\nprint(\"-\" * 50)\n\n# 对于多分类的decision_function\nX = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1], [-1, 1], [-1, 2], [1, -1], [1, -2]])\ny = np.array([2, 2, 3, 3, 0, 0, 1, 1]) # 可以得到的信息:(1) classes_: [0 1 2 3] (2) ovr:4 ovo: 6\nclf = SVC(probability=True, decision_function_shape=\"ovr\") # SVC多分类模型默认采用ovr模式\nclf.fit(X, y)\n\n# One-vs-One 按照decision_function的得分[01, 02, 03, 12, 13, 23]判断每个分类器的分类结果,然后进行投票\n# One-vs-Rest 选择decision_function的得分[0-Rest, 1-Rest, 2-Rest, 3-Rest]最大的作为分类结果\n\nprint(\"decision_function:\\n\", clf.decision_function([[-1, -1]]))\nprint(\"predict:\\n\", clf.predict([[-1, -1]])) # precidt预测样本对应的标签类别\nprint(\"predict_proba:\\n\", clf.predict_proba([[-1, -1]])) # predict_proba 预测样本对应各个类别的概率 这个是得分,每个分类器的得分,取最大得分对应的类。\n\n# decision_function输出的最大值对应的正样本类别就是decision_function认为置信度最高的预测类别\nprint(\"-\" * 50)\nprint(\"-\" * 50)\nprint(\"-\" * 50)\n\n# ovo场景:\nX = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1], [-1, 1], [-1, 2], [1, -1], [1, -2]])\ny = np.array([2, 2, 3, 3, 0, 0, 1, 1])\nclf = SVC(probability=True, decision_function_shape=\"ovo\")\nclf.fit(X, y)\n\nprint(\"decision_function:\\n\", clf.decision_function([[-1, -1]]))\n# 输出的结果为: [[-0.07609727 -1.00023294 0.27849207 -0.83425862 0.24756982 1.00006256]]\n# 分析:\n# -0.07609727对应01分类器,且数值小于0,则分类结果为后者,即类别1\n# -1.00023294对应02分类器,且数值小于0,则分类结果为后者,即类别2\n# 0.27849207对应03分类器,且数值大于0,则分类结果为前者,即类别0\n# -0.834258626对应12分类器,且数值小于0,则分类结果为后者,即类别2\n# 0.24756982对应13分类器,且数值大于0,则分类结果为前者,即类别1\n# 1.00006256对应23分类器,且数值大于0,则分类结果为前者,即类别2\n# 最终得票数:{类别0: 1, 类别1: 2, 类别2: 3, 类别3: 0}\n# 对以上分类结果voting投票,多数获胜,即最终分类结果为类别2。\n\n\n\"\"\"\n1. predict_proba: 输出样本属于各个类别的概率,取概率最大的类别作为样本的预测结果\n2. predict: 预测输入样本所属的类别\n3. decision_function: 计算样本距离每个分类边界的距离,并由此可以推算出predict的预测结果\n\"\"\"\n","repo_name":"LeenonGo/sklearn-learn","sub_path":"00-before/day02/day02-02-2.py","file_name":"day02-02-2.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28531333132","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, unicode_literals, print_function)\n\nimport os\nimport sys\nimport time\nimport logging\nimport itertools\n\nlog = logging.getLogger('sknn')\n\n\nimport numpy\n\nfrom .pywrap2 import (datasets, space, sgd)\nfrom .pywrap2 import learning_rule as lr, termination_criteria as tc\nfrom .dataset import DenseDesignMatrix, SparseDesignMatrix, FastVectorSpace\n\nfrom ..base import BaseBackend\n\n\nclass NeuralNetworkBackend(BaseBackend):\n\n def _create_input_space(self, X):\n if self.is_convolution:\n # Using `b01c` arrangement of data, see this for details:\n # http://benanne.github.io/2014/04/03/faster-convolutions-in-theano.html\n # input: (batch size, channels, rows, columns)\n # filters: (number of filters, channels, rows, columns)\n return space.Conv2DSpace(shape=X.shape[1:3], num_channels=X.shape[-1])\n else:\n InputSpace = space.VectorSpace if self.debug else FastVectorSpace\n return InputSpace(X.shape[-1])\n\n def _create_dataset(self, input_space, X, y=None):\n if self.is_convolution:\n view = input_space.get_origin_batch(X.shape[0])\n return DenseDesignMatrix(topo_view=view, y=y, mutator=self.mutator)\n else:\n if all([isinstance(a, numpy.ndarray) for a in (X, y) if a is not None]):\n return DenseDesignMatrix(X=X, y=y, mutator=self.mutator)\n else:\n return SparseDesignMatrix(X=X, y=y, mutator=self.mutator)\n\n def _create_trainer(self, dataset, cost):\n logging.getLogger('pylearn2.monitor').setLevel(logging.WARNING)\n if dataset is not None:\n termination_criterion = tc.MonitorBased(\n channel_name='objective',\n N=self.n_stable-1,\n prop_decrease=self.f_stable)\n else:\n termination_criterion = None\n\n if self.learning_rule == 'sgd':\n self._learning_rule = None\n elif self.learning_rule == 'adagrad':\n self._learning_rule = lr.AdaGrad()\n elif self.learning_rule == 'adadelta':\n self._learning_rule = lr.AdaDelta()\n elif self.learning_rule == 'momentum':\n self._learning_rule = lr.Momentum(self.learning_momentum)\n elif self.learning_rule == 'nesterov':\n self._learning_rule = lr.Momentum(self.learning_momentum, nesterov_momentum=True)\n elif self.learning_rule == 'rmsprop':\n self._learning_rule = lr.RMSProp()\n else:\n raise NotImplementedError(\n \"Learning rule type `%s` is not supported.\" % self.learning_rule)\n\n return sgd.SGD(\n cost=cost,\n batch_size=self.batch_size,\n learning_rule=self._learning_rule,\n learning_rate=self.learning_rate,\n termination_criterion=termination_criterion,\n monitoring_dataset=dataset)\n","repo_name":"lenguyenthedat/scikit-neuralnetwork","sub_path":"sknn/backend/pylearn2/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"6996729118","text":"import os\nimport shutil\n\n\n_ = os.path.dirname(os.path.realpath(__file__))\n\n\n# create a new, empty output directory\nshutil.rmtree(f\"{_}/build\", ignore_errors=True)\nos.makedirs(f\"{_}/build\", exist_ok=True)\nshutil.rmtree(f\"{_}/dist\", ignore_errors=True)\nos.makedirs(f\"{_}/dist\", exist_ok=True)\n\n\n# move the lib files in there\nshutil.copytree(f\"{_}/lib\", f\"{_}/build/lib\", dirs_exist_ok=True)\nshutil.copy(f\"{_}/rockeet.js\", f\"{_}/build/rockeet.js\")\nshutil.copy(f\"{_}/rockeetWeb.js\", f\"{_}/build/rockeetWeb.js\")\n\n\n# copy the web replacements\nshutil.copytree(f\"{_}/web\", f\"{_}/build/lib\", dirs_exist_ok=True)\n\n\nos.system(f\"browserify -o {_}/dist/bundle.js {_}/build/rockeetWeb.js\")\nos.system(f\"uglifyjs --compress --mangle -o {_}/dist/bundle.min.js {_}/dist/bundle.js\")\n","repo_name":"rockeet-ai/sdk-nodejs","sub_path":"browserify.py","file_name":"browserify.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26554156096","text":"import uuid\nfrom typing import Union\n\nfrom django.db import models\nfrom django.contrib.postgres import fields\nfrom django.contrib.auth.models import AbstractUser, Group\nfrom django.contrib.postgres.fields import JSONField\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.sites.models import Site\ntry:\n from django.utils import timezone\nexcept ImportError:\n from datetime import datetime as timezone\n\nROLE_ORGANIZATION_ADMIN = 'OrgAdmin'\nROLE_WORKFLOW_ADMIN = 'WorkflowAdmin'\nROLE_WORKFLOW_TEAM = 'WorkflowTeam'\nROLE_VIEW_ONLY = 'ViewOnly'\nROLES = (\n (ROLE_ORGANIZATION_ADMIN, ROLE_ORGANIZATION_ADMIN),\n (ROLE_WORKFLOW_ADMIN, ROLE_WORKFLOW_ADMIN),\n (ROLE_WORKFLOW_TEAM, ROLE_WORKFLOW_TEAM),\n (ROLE_VIEW_ONLY, ROLE_VIEW_ONLY),\n)\nDEFAULT_PROGRAM_NAME = 'Default program'\n\nPERMISSIONS_ORG_ADMIN = 15 # 1111\n\nPERMISSIONS_ADMIN = PERMISSIONS_ORG_ADMIN\n\nPERMISSIONS_WORKFLOW_ADMIN = PERMISSIONS_ORG_ADMIN\n\nPERMISSIONS_WORKFLOW_TEAM = 14 # 1110\n\nPERMISSIONS_VIEW_ONLY = 4 # 0100\n\n\nclass CoreSites(models.Model):\n name = models.CharField(blank=True, null=True, max_length=255)\n site = models.ForeignKey(Site, on_delete=models.CASCADE)\n privacy_disclaimer = models.TextField(blank=True, null=True)\n created = models.DateTimeField(auto_now=False, blank=True, null=True)\n updated = models.DateTimeField(auto_now=False, blank=True, null=True)\n whitelisted_domains = models.TextField(\"Whitelisted Domains\", null=True, blank=True)\n\n class Meta:\n verbose_name = \"Core Site\"\n verbose_name_plural = \"Core Sites\"\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if kwargs.pop('new_entry', True):\n self.created = timezone.now()\n else:\n self.updated = timezone.now()\n return super(CoreSites, self).save(*args, **kwargs)\n\n\nclass Industry(models.Model):\n name = models.CharField(\"Industry Name\", max_length=255, blank=True, default=\"Humanitec\")\n description = models.TextField(\"Description/Notes\", max_length=765, null=True, blank=True)\n create_date = models.DateTimeField(null=True, blank=True)\n edit_date = models.DateTimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name_plural = \"Industries\"\n app_label = 'workflow'\n\n def save(self, *args, **kwargs):\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n super(Industry, self).save()\n\n def __str__(self):\n return self.name\n\n\nclass Organization(models.Model):\n \"\"\"\n The organization instance. There could be multiple organizations inside one application.\n When organization is created two CoreGroups are created automatically: Admins group and default Users group.\n \"\"\"\n organization_uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, verbose_name='Organization UUID')\n name = models.CharField(\"Organization Name\", max_length=255, blank=True, default=\"Humanitec\", help_text=\"Each end user must be grouped into an organization\")\n description = models.TextField(\"Description/Notes\", max_length=765, null=True, blank=True, help_text=\"Descirption of organization\")\n organization_url = models.CharField(blank=True, null=True, max_length=255, help_text=\"Link to organizations external web site\")\n industries = models.ManyToManyField(Industry, blank=True, related_name='organizations', help_text=\"Type of Industry the organization belongs to if any\")\n level_1_label = models.CharField(\"Workflow Level 1 label\", default=\"Program\", max_length=255, blank=True, help_text=\"Label to display if needed for workflow i.e. Top Level Navigation, Primary, Program, etc. \")\n level_2_label = models.CharField(\"Workflow Level 2 label\", default=\"Project\", max_length=255, blank=True, help_text=\"Label to display if needed for workflow i.e. Second Level Navigation, Major, Project, etc. \")\n level_3_label = models.CharField(\"Workflow Level 3 label\", default=\"Component\", max_length=255, blank=True, help_text=\"Label to display if needed for workflow i.e. Third Level Navigation, Minor, Activity, etc. \")\n level_4_label = models.CharField(\"Workflow Level 4 label\", default=\"Activity\", max_length=255, blank=True, help_text=\"Label to display if needed for workflow i.e. Fourth Level Navigation, Sub, Sub-Activity, etc. \")\n create_date = models.DateTimeField(null=True, blank=True)\n edit_date = models.DateTimeField(null=True, blank=True)\n subscription_id = models.CharField(blank=True, null=True, max_length=50)\n used_seats = models.IntegerField(blank=True, null=True, default=0)\n oauth_domains = fields.ArrayField(models.CharField(\"OAuth Domains\", max_length=255, null=True, blank=True), null=True, blank=True)\n date_format = models.CharField(\"Date Format\", max_length=50, blank=True, default=\"DD.MM.YYYY\")\n phone = models.CharField(max_length=20, blank=True, null=True)\n\n class Meta:\n ordering = ('name',)\n verbose_name_plural = \"Organizations\"\n app_label = 'workflow'\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n is_new = self._state.adding\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n super(Organization, self).save()\n if is_new:\n self._create_initial_groups()\n\n def _create_initial_groups(self):\n CoreGroup.objects.create(\n organization=self,\n is_org_level=True,\n name='Admins',\n permissions=PERMISSIONS_ORG_ADMIN\n )\n\n CoreGroup.objects.create(\n organization=self,\n is_org_level=True,\n is_default=True,\n name='Users',\n permissions=PERMISSIONS_VIEW_ONLY\n )\n\n\nclass CoreGroup(models.Model):\n \"\"\"\n CoreGroup model defines the groups of the users with specific permissions for the set of workflowlevel1's\n and workflowlevel2's (it has many-to-many relationship to WorkFlowLevel1 and WorkFlowLevel2 models).\n Permissions field is the decimal integer from 0 to 15 converted from 4-bit binary, each bit indicates permissions\n for CRUD. For example: 12 -> 1100 -> CR__ (allowed to Create and Read).\n \"\"\"\n uuid = models.CharField('CoreGroup UUID', max_length=255, default=uuid.uuid4, unique=True)\n name = models.CharField('Name of the role', max_length=80)\n organization = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.CASCADE, help_text='Related Org to associate with')\n is_global = models.BooleanField('Is global group', default=False)\n is_org_level = models.BooleanField('Is organization level group', default=False)\n is_default = models.BooleanField('Is organization default group', default=False)\n permissions = models.PositiveSmallIntegerField('Permissions', default=PERMISSIONS_VIEW_ONLY, help_text='Decimal integer from 0 to 15 converted from 4-bit binary, each bit indicates permissions for CRUD')\n create_date = models.DateTimeField(default=timezone.now)\n edit_date = models.DateTimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return f'{self.name} <{self.organization}>'\n\n def save(self, *args, **kwargs):\n self.edit_date = timezone.now()\n super(CoreGroup, self).save(*args, **kwargs)\n\n @property\n def display_permissions(self) -> str:\n return '{0:04b}'.format(self.permissions if self.permissions < 16 else 15)\n\n\nclass CoreUser(AbstractUser):\n \"\"\"\n CoreUser is the registered user who belongs to some organization and can manage its projects.\n \"\"\"\n TITLE_CHOICES = (\n ('mr', 'Mr.'),\n ('mrs', 'Mrs.'),\n ('ms', 'Ms.'),\n )\n\n core_user_uuid = models.CharField(max_length=255, verbose_name='CoreUser UUID', default=uuid.uuid4, unique=True)\n title = models.CharField(blank=True, null=True, max_length=3, choices=TITLE_CHOICES)\n contact_info = models.CharField(blank=True, null=True, max_length=255)\n organization = models.ForeignKey(Organization, blank=True, null=True, on_delete=models.CASCADE, help_text='Related Org to associate with')\n core_groups = models.ManyToManyField(CoreGroup, verbose_name='User groups', blank=True, related_name='user_set', related_query_name='user')\n privacy_disclaimer_accepted = models.BooleanField(default=False)\n create_date = models.DateTimeField(default=timezone.now)\n edit_date = models.DateTimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('first_name',)\n\n def __str__(self):\n return self.username\n\n def save(self, *args, **kwargs):\n is_new = self.pk is None\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n super(CoreUser, self).save()\n if is_new:\n # Add default groups\n self.core_groups.add(*CoreGroup.objects.filter(organization=self.organization, is_default=True))\n\n @property\n def is_org_admin(self) -> bool:\n \"\"\"\n Check if user has organization level admin permissions\n \"\"\"\n if not hasattr(self, '_is_org_admin'):\n self._is_org_admin = self.core_groups.filter(permissions=PERMISSIONS_ORG_ADMIN, is_org_level=True).exists()\n return self._is_org_admin\n\n @property\n def is_global_admin(self) -> bool:\n \"\"\"\n Check if user has organization level admin permissions\n \"\"\"\n if self.is_superuser:\n return True\n if not hasattr(self, '_is_global_admin'):\n self._is_global_admin = self.core_groups.filter(permissions=PERMISSIONS_ADMIN, is_global=True).exists()\n return self._is_global_admin\n\n\nclass Internationalization(models.Model):\n language = models.CharField(\"Language\", blank=True, null=True, max_length=100)\n language_file = JSONField()\n create_date = models.DateTimeField(null=True, blank=True)\n edit_date = models.DateTimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('language',)\n\n def __str__(self):\n return self.language\n\n def save(self, *args, **kwargs):\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n super(Internationalization, self).save()\n\n\nclass WorkflowLevelType(models.Model):\n uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(\"Name\", max_length=255, help_text=\"Name of workflow2 type\")\n create_date = models.DateTimeField(default=timezone.now)\n edit_date = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ('create_date', )\n\n\nclass WorkflowLevelStatus(models.Model):\n uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)\n order = models.PositiveSmallIntegerField(default=0)\n name = models.CharField(\"Name\", max_length=255, help_text=\"Name of WorkflowLevelStatus\")\n short_name = models.SlugField(max_length=63, unique=True)\n create_date = models.DateTimeField(default=timezone.now)\n edit_date = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ('order', )\n verbose_name = \"Workflow Level Status\"\n verbose_name_plural = \"Workflow Level Statuses\"\n\n\nclass WorkflowLevel1(models.Model):\n level1_uuid = models.CharField(max_length=255, editable=False, verbose_name='WorkflowLevel1 UUID', default=uuid.uuid4, unique=True)\n unique_id = models.CharField(\"ID\", max_length=255, blank=True, null=True, help_text=\"User facing unique ID field if needed\")\n name = models.CharField(\"Name\", max_length=255, blank=True, help_text=\"Top level workflow can have child workflowleves, name it according to it's grouping of children\")\n organization = models.ForeignKey(Organization, blank=True, on_delete=models.CASCADE, null=True, help_text='Related Org to associate with')\n description = models.TextField(\"Description\", max_length=765, null=True, blank=True, help_text='Describe how this collection of related workflows are used')\n user_access = models.ManyToManyField(CoreUser, blank=True)\n start_date = models.DateTimeField(null=True, blank=True, help_text='If required a time span can be associated with workflow level')\n end_date = models.DateTimeField(null=True, blank=True, help_text='If required a time span can be associated with workflow level')\n create_date = models.DateTimeField(null=True, blank=True)\n edit_date = models.DateTimeField(null=True, blank=True)\n sort = models.IntegerField(default=0) # sort array\n core_groups = models.ManyToManyField(CoreGroup, verbose_name='Core groups', blank=True, related_name='workflowlevel1s', related_query_name='workflowlevel1s')\n\n class Meta:\n ordering = ('name',)\n verbose_name = \"Workflow Level 1\"\n verbose_name_plural = \"Workflow Level 1\"\n\n def save(self, *args, **kwargs):\n if not 'force_insert' in kwargs:\n kwargs['force_insert'] = False\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n\n super(WorkflowLevel1, self).save()\n\n def delete(self, *args, **kwargs):\n super(WorkflowLevel1, self).delete(*args, **kwargs)\n\n def __str__(self):\n if self.organization:\n return f'{self.name} <{self.organization.name}>'\n else:\n return self.name\n\n\nclass WorkflowLevel2(models.Model):\n level2_uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, verbose_name='WorkflowLevel2 UUID', help_text=\"Unique ID\")\n description = models.TextField(\"Description\", blank=True, null=True, help_text=\"Description of the workflow level use\")\n name = models.CharField(\"Name\", max_length=255, help_text=\"Name of workflow level as it relates to workflow level 1\")\n notes = models.TextField(blank=True, null=True)\n parent_workflowlevel2 = models.IntegerField(\"Parent\", default=0, blank=True, help_text=\"Workflow level 2 can relate to another workflow level 2 creating multiple levels of relationships\")\n short_name = models.CharField(\"Code\", max_length=20, blank=True, null=True, help_text=\"Shortened name autogenerated\")\n workflowlevel1 = models.ForeignKey(WorkflowLevel1, verbose_name=\"Workflow Level 1\", on_delete=models.CASCADE, related_name=\"workflowlevel2\", help_text=\"Primary or parent Workflow\")\n create_date = models.DateTimeField(\"Date Created\", null=True, blank=True)\n created_by = models.ForeignKey(CoreUser, related_name='workflowlevel2', null=True, blank=True, on_delete=models.SET_NULL)\n edit_date = models.DateTimeField(\"Last Edit Date\", null=True, blank=True)\n core_groups = models.ManyToManyField(CoreGroup, verbose_name='Core groups', blank=True, related_name='workflowlevel2s', related_query_name='workflowlevel2s')\n start_date = models.DateTimeField(\"Start Date\", null=True, blank=True)\n end_date = models.DateTimeField(\"End Date\", null=True, blank=True)\n type = models.ForeignKey(WorkflowLevelType, null=True, blank=True, on_delete=models.SET_NULL, related_name='workflowlevel2s')\n status = models.ForeignKey(WorkflowLevelStatus, null=True, blank=True, on_delete=models.SET_NULL, related_name='workflowlevel2s')\n\n class Meta:\n ordering = ('name',)\n verbose_name = \"Workflow Level 2\"\n verbose_name_plural = \"Workflow Level 2\"\n\n def save(self, *args, **kwargs):\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n\n super(WorkflowLevel2, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.name\n\n @property\n def organization(self) -> Union[Organization, None]:\n return self.workflowlevel1.organization\n\n\nclass WorkflowTeam(models.Model):\n \"\"\"\n WorkflowTeam defines m2m relations between CoreUser and Workflowlevel1.\n It also defines a role for this relationship (as a fk to Group instance).\n \"\"\"\n team_uuid = models.CharField(max_length=255, editable=False, verbose_name='WorkflowLevel1 UUID', default=uuid.uuid4, unique=True)\n workflow_user = models.ForeignKey(CoreUser, blank=True, null=True, on_delete=models.CASCADE, related_name=\"auth_approving\", help_text='User with access/permissions to related workflowlevels')\n workflowlevel1 = models.ForeignKey(WorkflowLevel1, null=True, on_delete=models.CASCADE, blank=True, help_text='Related workflowlevel 1')\n start_date = models.DateTimeField(null=True, blank=True, help_text='If required a time span can be associated with workflow level access')\n end_date = models.DateTimeField(null=True, blank=True, help_text='If required a time span can be associated with workflow level access expiration')\n status = models.CharField(max_length=255, null=True, blank=True, help_text='Active status of access')\n role = models.ForeignKey(Group, null=True, blank=True, on_delete=models.CASCADE, help_text='Type of access via related group')\n create_date = models.DateTimeField(null=True, blank=True)\n edit_date = models.DateTimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('workflow_user',)\n verbose_name = \"Workflow Team\"\n verbose_name_plural = \"Workflow Teams\"\n\n def clean(self):\n if self.role and self.role.name == ROLE_ORGANIZATION_ADMIN:\n raise ValidationError(\n 'Workflowteam role can not be ROLE_ORGANIZATION_ADMIN'\n )\n\n def save(self, *args, **kwargs):\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n super(WorkflowTeam, self).save()\n\n def __str__(self):\n return f'{self.workflow_user} - {self.role} <{self.workflowlevel1}>'\n\n @property\n def organization(self) -> Union[Organization, None]:\n return self.workflowlevel1.organization if self.workflowlevel1 else None\n\n\nclass WorkflowLevel2Sort(models.Model):\n workflowlevel1 = models.ForeignKey(WorkflowLevel1, null=True, on_delete=models.CASCADE, blank=True)\n workflowlevel2_parent = models.ForeignKey(WorkflowLevel2, on_delete=models.CASCADE, null=True, blank=True)\n workflowlevel2_pk = models.UUIDField(\"UUID to be Sorted\", default='00000000-0000-4000-8000-000000000000')\n sort_array = JSONField(null=True, blank=True, help_text=\"Sorted JSON array of workflow levels\")\n create_date = models.DateTimeField(null=True, blank=True)\n edit_date = models.DateTimeField(null=True, blank=True)\n\n class Meta:\n ordering = ('workflowlevel1', 'workflowlevel2_pk')\n verbose_name = \"Workflow Level Sort\"\n verbose_name_plural = \"Workflow Level Sort\"\n\n def save(self, *args, **kwargs):\n if self.create_date is None:\n self.create_date = timezone.now()\n self.edit_date = timezone.now()\n super(WorkflowLevel2Sort, self).save()\n\n def __str__(self):\n return self.workflowlevel1\n\n @property\n def organization(self) -> Union[Organization, None]:\n return self.workflowlevel1.organization if self.workflowlevel1 else None\n\n\nTEMPLATE_RESET_PASSWORD, TEMPLATE_INVITE = 1, 2\nTEMPLATE_TYPES = (\n (TEMPLATE_RESET_PASSWORD, 'Password resetting'),\n (TEMPLATE_INVITE, 'Invitation'),\n)\n\n\nclass EmailTemplate(models.Model):\n \"\"\"Stores e-mail templates specific to organization\n \"\"\"\n organization = models.ForeignKey(Organization, on_delete=models.CASCADE, verbose_name='Organization', help_text='Related Org to associate with')\n subject = models.CharField('Subject', max_length=255)\n type = models.PositiveSmallIntegerField('Type of template', choices=TEMPLATE_TYPES)\n template = models.TextField(\"Reset password e-mail template (text)\", null=True, blank=True)\n template_html = models.TextField(\"Reset password e-mail template (HTML)\", null=True, blank=True)\n\n class Meta:\n unique_together = ('organization', 'type', )\n verbose_name = \"Email Template\"\n verbose_name_plural = \"Email Templates\"\n\n def __str__(self):\n return f'{self.type} ({self.organization})'\n","repo_name":"gholcomb/buildly","sub_path":"workflow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":20203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5892352246","text":"from functools import wraps\n\nfrom flask import jsonify, render_template, request\n\nfrom mangoapi.exceptions import (\n SourceSite5xxError,\n SourceSiteResponseError,\n SourceSiteTimeoutError,\n SourceSiteUnexpectedError,\n)\n\nfrom .persistence import verify_token\n\n\ndef process_token(required=True):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n token = request.cookies.get(\"token\")\n if not token:\n if required:\n return (\n jsonify({\"message\": \"Missing 'token' cookie.\"}),\n 401,\n )\n else:\n request.token = None\n request.user_id = None\n return f(*args, **kwargs)\n\n user_id = verify_token(token)\n if user_id is None:\n return jsonify({\"message\": \"Invalid token.\"}), 401\n\n request.token = token\n request.user_id = user_id\n\n return f(*args, **kwargs)\n\n return decorated_function\n\n return decorator\n\n\nSOURCE_SITE_ERRORS = {\n SourceSiteTimeoutError: {\n \"code\": \"source_site_timeout\",\n \"message\": \"Source site took too long to respond. Try again later.\",\n },\n SourceSite5xxError: {\n \"code\": \"source_site_5xx\",\n \"message\": \"Source site crapped the bed. Try again later.\",\n },\n SourceSiteUnexpectedError: {\n \"code\": \"source_site_unexpected\",\n \"message\": \"Unexpected error when requesting source site.\",\n },\n}\n\n\ndef handle_source_site_errors(format=\"json\"):\n assert format in (\"json\", \"html\"), f\"{format} ain't no format I ever heard of!\"\n\n def outer_func(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except SourceSiteResponseError as err:\n resp = SOURCE_SITE_ERRORS[err.__class__]\n resp[\"detail\"] = err.__dict__\n if format == \"json\":\n return jsonify(resp), 500\n elif format == \"html\":\n return render_template(\"error.html\", **resp), 500\n\n return decorated_function\n\n return outer_func\n","repo_name":"nhanb/pytaku","sub_path":"src/pytaku/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"28877285897","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom kivy.metrics import dp\n\n#optimized draw on Agg backend\nmpl.rcParams['path.simplify'] = True\nmpl.rcParams['path.simplify_threshold'] = 1.0\nmpl.rcParams['agg.path.chunksize'] = 1000\n\nfont_size_axis_title=dp(13)\nfont_size_axis_tick=dp(12) \n\nseaborn_package=True\ntry:\n import seaborn as sns\nexcept:\n seaborn_package=False\n \n#avoid conflict between mouse provider and touch (very important with touch device)\nfrom kivy.config import Config\nConfig.set('input', 'mouse', 'mouse,disable_on_activity')\n\nfrom kivy.lang import Builder\nfrom kivy.app import App\nfrom kivy.core.window import Window\nimport numpy as np\nfrom random import randint\nfrom legend_widget import MatplotlibInteractiveLegend\n\nKV = '''\n#:import MatplotFigure graph_widget\n\nScreen\n figure_wgt1:figure_wgt1\n figure_wgt2:figure_wgt2\n figure_wgt3:figure_wgt3\n figure_wgt4:figure_wgt4\n \n BoxLayout:\n orientation:'vertical'\n BoxLayout:\n size_hint_y:0.3\n Button:\n text:\"home\"\n on_release:app.home()\n ToggleButton:\n group:'touch_mode'\n state:'down'\n text:\"pan\" \n on_release:\n app.set_touch_mode('pan')\n self.state='down'\n ToggleButton:\n group:'touch_mode'\n text:\"zoom box\" \n on_release:\n app.set_touch_mode('zoombox')\n self.state='down' \n ToggleButton:\n group:'touch_mode'\n text:\"drag legend\" \n on_release:\n app.set_touch_mode('drag_legend')\n self.state='down' \n\n BoxLayout:\n orientation:'vertical'\n BoxLayout:\n MatplotFigure:\n id:figure_wgt1\n legend_do_scroll_x:False\n MatplotFigure:\n id:figure_wgt2\n BoxLayout:\n MatplotFigure:\n id:figure_wgt3\n\n MatplotFigure:\n id:figure_wgt4 \n \n'''\n\n\nclass Test(App):\n lines = []\n instance_dict = dict()\n\n def build(self): \n self.screen=Builder.load_string(KV)\n return self.screen\n\n def on_start(self, *args):\n\n\n fig, ax = plt.subplots(1, 1)\n fig.subplots_adjust(right=0.7)\n \n \n for i in range(10):\n x=[randint(0, 9) for p in range(0, 10)]\n x.sort()\n y=[randint(0, 9) for p in range(0, 10)]\n ax.plot(x, y,label='line' + str(i+1))\n\n self.screen.figure_wgt1.figure = ax.figure\n ax.legend(loc=(1.04, -0.2))\n \n MatplotlibInteractiveLegend(self.screen.figure_wgt1)\n \n fig2, ax2 = plt.subplots(1, 1)\n\n x = [2,4,5,7,6,8,9,11,12,12]\n y = [1,2,3,4,5,6,7,8,9,10]\n \n sc1 = ax2.scatter(x, y, s=30, color='magenta', alpha=0.7, marker='x', picker=3,label='scatter')\n sc2 = ax2.scatter(np.array(x)+2, np.array(y)+1, s=30, color='r', alpha=0.7, marker='x', picker=3,label='scatter2')\n sc3 = ax2.scatter(np.array(x)+3, np.array(y)+3, s=30, color='k', alpha=0.7, marker='x', picker=3,label='scatter3')\n ax2.legend(loc=4)\n \n self.screen.figure_wgt2.figure = ax2.figure\n self.screen.figure_wgt2.fast_draw = False #update axis during pan/zoom\n \n MatplotlibInteractiveLegend(self.screen.figure_wgt2)\n \n fig3, ax3 = plt.subplots(1, 1)\n x = np.linspace(0, 1)\n \n # Plot the lines y=x**n for n=1..4.\n for n in range(1, 5):\n ax3.plot(x, x**n, label=\"n={0}\".format(n))\n ax3.legend(loc=\"upper left\",\n ncol=2, shadow=True, title=\"Legend\", fancybox=True)\n ax3.get_legend().get_title().set_color(\"red\")\n\n self.screen.figure_wgt3.figure = ax3.figure\n \n MatplotlibInteractiveLegend(self.screen.figure_wgt3)\n\n fig4, ax4 = plt.subplots(1, 1)\n if seaborn_package:\n df = sns.load_dataset(\"penguins\")\n sns.barplot(ax=ax4,data=df, x=\"island\", y=\"body_mass_g\", hue=\"sex\")\n self.screen.figure_wgt4.figure = ax4.figure\n self.screen.figure_wgt4.fast_draw = False #update axis during pan/zoom \n ax4.legend(title=\"sex\",loc=1) \n MatplotlibInteractiveLegend(self.screen.figure_wgt4,legend_handles='variante')\n \n \n def set_touch_mode(self,mode):\n self.screen.figure_wgt1.touch_mode=mode\n self.screen.figure_wgt2.touch_mode=mode\n self.screen.figure_wgt3.touch_mode=mode\n self.screen.figure_wgt4.touch_mode=mode\n\n def home(self):\n self.screen.figure_wgt1.home()\n self.screen.figure_wgt2.home()\n self.screen.figure_wgt3.home()\n self.screen.figure_wgt4.home()\n \nTest().run()","repo_name":"mp-007/kivy_matplotlib_widget","sub_path":"examples/example_legend_matplotlib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"3501660412","text":"import os\nimport subprocess\nfrom pathlib import Path\nfrom typing import Union\n\n\nfrom azure.ai.ml import MLClient\nfrom azure.ai.resources.entities.models import Model, PromptflowModel\nfrom azure.ai.resources._utils._dockerfile_utils import create_dockerfile\nfrom azure.ai.resources._utils._scoring_script_utils import create_mlmodel_file\n\n\nclass ModelOperations():\n def __init__(\n self,\n ml_client: MLClient,\n **kwargs\n ):\n self._ml_client = ml_client\n\n def package(\n self,\n model: Union[Model, PromptflowModel],\n output: Union[str, os.PathLike]=Path.cwd()\n ) -> None:\n output_path = Path(output/\"model_package\")\n output_path.mkdir(exist_ok=True)\n if isinstance(model, Model):\n if model.chat_module and model.loader_module:\n raise Exception(\"Only one of chat_module or loader_module can be provided to Model\")\n if model.chat_module:\n # create custom model dockerfile\n create_dockerfile(model, output_path, \"chat\")\n elif model.loader_module:\n # create mlflow dockerfile\n create_mlmodel_file(model)\n create_dockerfile(model, output_path, \"mlflow\")\n elif \"MLmodel\" in [path for path in os.listdir(model.path)]:\n create_dockerfile(model, output_path, \"mlflow\")\n else:\n raise Exception(\"Either one of chat_module or loader_module must be provided to Model if MLmodel is not present in Model.path \")\n elif isinstance(model, PromptflowModel):\n try:\n import promptflow\n except ImportError as e:\n print('In order to create a package for a promptflow, please make sure the promptflow SDK is installed in your environment')\n raise e\n # hack since pf build under the hood uses multi-processing to create new process. Due to implementation differences between\n # windows and linux of creating a new process, we cannot use the SDK function directly and instead need to use the CLI\n # in a separate process\n subprocess.call([\"pf\", \"flow\", \"build\", \"--source\", model.path, \"--output\", output_path, \"--format\", \"docker\"])\n else:\n raise Exception(\"Passed in model is not supported for packaging\")\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ai/azure-ai-resources/azure/ai/resources/operations/_model_operations.py","file_name":"_model_operations.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"13702646195","text":"#coding:utf-8\nimport os\n\nAPP_HOME = '/root/newtempo'\n\nLIVE_ROOT = os.path.join(APP_HOME, 'data', 'liveroot', 'channels')\n\nNOTIFY_ADDRESS = [('127.0.0.1', 3334)]\n\n\n#growing file dir\nGROWING_FILE_DIR = os.path.join(APP_HOME, 'data','tdlive')\n# cared suffix\nGROWING_FILE_SUFFIX = '.flv'\n#httpd server port\nHTTPD_SRC_SER_PORT = 20000\n#growingfile check frequency\nTIMER_INTERVAL = 1\n\n\n#Memory Fragment Step length\nMEMORY_FRAGMENT_STEP_LENGTH = 5\n#Max Pending times\nMAX_INACTIVE_TIMES = 5\n\nMAX_SUB_PIECE_SIZE = 10 * 1024 # 0x2800\n\nNEXT_REQ_DELAY_TIME = 2000\nDEFAULT_RETURN_BYTES = (2 * 1024 * 1024 * (NEXT_REQ_DELAY_TIME/1000))/8\n\n#Vod 资源文件,支持伪206请求\nVOD_RES_DIR = os.path.join(APP_HOME, 'data')\nVOD_RES_SUFFIX = '.flv'\n","repo_name":"pingansdaddy/newtempo","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22522322362","text":"import logging\nfrom numbers import Number\nimport time\nfrom typing import Optional\n\nimport wandb\n\nfrom fairseq.logging.meters import AverageMeter\nfrom fairseq.logging.progress_bar import BaseProgressBar, rename_logger, TensorboardProgressBarWrapper, \\\n AzureMLProgressBarWrapper\n\nlogger = logging.getLogger(__name__)\n\n\nclass WandBProgressBarWrapper(BaseProgressBar):\n \"\"\"Log to Weights & Biases.\"\"\"\n\n def __init__(self, wrapped_bar, wandb_project, run_name=None):\n self.wrapped_bar = wrapped_bar\n if wandb is None:\n logger.warning(\"wandb not found, pip install wandb\")\n return\n\n # reinit=False to ensure if wandb.init() is called multiple times\n # within one process it still references the same run\n # also: https://docs.wandb.ai/guides/track/launch#init-start-error\n num_tries = 0\n last_exc = None\n success = False\n while num_tries < 30:\n try:\n wandb.init(id=wandb.util.generate_id(),\n project=wandb_project,\n reinit=False,\n resume=\"allow\",\n name=run_name,\n settings=wandb.Settings(start_method=\"fork\")\n )\n success = True\n break\n except Exception as e:\n num_tries += 1\n last_exc = e\n print(\"Retrying\")\n time.sleep(10)\n if not success:\n raise last_exc\n\n def __iter__(self):\n return iter(self.wrapped_bar)\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats to tensorboard.\"\"\"\n self._log_to_wandb(stats, tag, step)\n self.wrapped_bar.log(stats, tag=tag, step=step)\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n self._log_to_wandb(stats, tag, step)\n self.wrapped_bar.print(stats, tag=tag, step=step)\n\n def update_config(self, config):\n \"\"\"Log latest configuration.\"\"\"\n if wandb is not None:\n wandb.config.update(config)\n self.wrapped_bar.update_config(config)\n\n def _log_to_wandb(self, stats, tag=None, step=None):\n if wandb is None:\n return\n if step is None:\n step = stats[\"num_updates\"]\n\n prefix = \"\" if tag is None else tag + \"/\"\n\n for key in stats.keys() - {\"num_updates\"}:\n if isinstance(stats[key], AverageMeter):\n wandb.log({prefix + key: stats[key].val}, step=step)\n elif isinstance(stats[key], Number):\n wandb.log({prefix + key: stats[key]}, step=step)\n\n\nclass ForceSimpleProgressBar(BaseProgressBar):\n \"\"\"A minimal logger for non-TTY environments.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.i = None\n self.size = None\n\n def __iter__(self):\n self.size = len(self.iterable)\n for i, obj in enumerate(self.iterable, start=self.n):\n self.i = i\n yield obj\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n stats = self._format_stats(stats)\n postfix = self._str_commas(stats)\n with rename_logger(logger, tag):\n logger.info(\n \"{}: {:5d} / {:d} {}\".format(\n self.prefix, self.i + 1, self.size, postfix\n )\n )\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n with rename_logger(logger, tag):\n logger.info(\"{} | {}\".format(self.prefix, postfix))\n\n\ndef simple_progress_bar(\n iterator,\n log_interval: int = 100,\n log_file: Optional[str] = None,\n epoch: Optional[int] = None,\n prefix: Optional[str] = None,\n tensorboard_logdir: Optional[str] = None,\n wandb_project: Optional[str] = None,\n wandb_run_name: Optional[str] = None,\n azureml_logging: Optional[bool] = False,\n **kwargs\n):\n if log_file is not None:\n handler = logging.FileHandler(filename=log_file)\n logger.addHandler(handler)\n\n bar = ForceSimpleProgressBar(iterator, epoch, prefix, log_interval)\n\n if tensorboard_logdir:\n try:\n # [FB only] custom wrapper for TensorBoard\n import palaas # noqa\n from .fb_tbmf_wrapper import FbTbmfWrapper\n\n bar = FbTbmfWrapper(bar, log_interval)\n except ImportError:\n bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)\n\n if wandb_project:\n bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)\n\n if azureml_logging:\n bar = AzureMLProgressBarWrapper(bar)\n\n return bar\n","repo_name":"tran-khoa/joint-training-cascaded-st","sub_path":"projects/speech_translation/cli/progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"74536084321","text":"import pandas as pd\nimport sklearn.model_selection as ms\nimport sklearn.linear_model as lm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n\n\ndataset = pd.read_csv(\"\")\ndataset = dataset.dropna()\nX = dataset.iloc[:, :-2]\ny = dataset.iloc[:, -1].values\n\n######################################\n# Codificando variaveis Dummy\n######################################\nX_dummies = pd.get_dummies(X)\n\n######################################\n# Separar dados em Treino e Teste\n######################################\n\nX_train, X_test, y_train, y_test = ms.train_test_split(X_dummies, y, test_size = 1/5, random_state = 0)\n\n\n######################################\n# Treinando o modelo\n######################################\n\nregressor = lm.LinearRegression()\nregressor.fit(X_train, y_train)\n\n######################################\n# Previsao\n######################################\n\ny_pred = regressor.predict(X_test)\n\nnp.set_printoptions(precision=2)\nresult = np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)\n\ndef undummify(df, prefix_sep=\"_\"):\n cols2collapse = {\n item.split(prefix_sep)[0]: (prefix_sep in item) for item in df.columns\n }\n series_list = []\n for col, needs_to_collapse in cols2collapse.items():\n if needs_to_collapse:\n undummified = (\n df.filter(like=col)\n .idxmax(axis=1)\n .apply(lambda x: x.split(prefix_sep, maxsplit=1)[1])\n .rename(col)\n )\n series_list.append(undummified)\n else:\n series_list.append(df[col])\n undummified_df = pd.concat(series_list, axis=1)\n return undummified_df\n\nX_reverse = undummify(X_test)\n\nX_reverse = X_reverse.reset_index(drop=True)\n\ny_compare = pd.DataFrame(result)\ny_compare = y_compare.rename(index=str, columns={0:'y_pred', 1:'y_test'})\ny_compare = y_compare.reset_index(drop=True)\n\nresultado_final = pd.concat([y_compare, X_reverse], axis=1)\n\n######################################\n# Valor Especifico\n######################################\n\nprint(regressor.predict([[10]]))\n\n\n\n\n'''Adicional\nNão consegui encontrar os dados solicitados na tarefa, adaptei outro csv DataSet para concluir o projeto chegando a um \nprojeto semelhante!\nUse a famosa Pesquisa Longitudinal Nacional da Juventude 1997-2011 para executar uma Regressão Multivariável para prever\n salários. Como os ganhos são determinados e o que isso implica na política governamental?'''","repo_name":"ThatianeDeboleto/100DaysOfCodePython","sub_path":"day-100/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"15115060287","text":"\"\"\"empty message\n\nRevision ID: 9165b3b23655\nRevises: \nCreate Date: 2019-11-04 20:05:15.592339\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9165b3b23655'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('skus', sa.Column('date', sa.Date(), nullable=True))\n op.alter_column('skus', 'sku',\n existing_type=sa.VARCHAR(),\n nullable=True)\n op.drop_column('skus', 'order_date')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('skus', sa.Column('order_date', sa.DATE(), server_default=sa.text(\"('now'::text)::date\"), autoincrement=False, nullable=False))\n op.alter_column('skus', 'sku',\n existing_type=sa.VARCHAR(),\n nullable=False)\n op.drop_column('skus', 'date')\n # ### end Alembic commands ###\n","repo_name":"cbh4ou/productreports","sub_path":"migrations/versions/9165b3b23655_.py","file_name":"9165b3b23655_.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24440467083","text":"import numpy as np\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\n\ndef load_data():\n \"\"\"\n 加载数据集\n :return:\n X: 花瓣宽度\n Y: 鸢尾花类型\n \"\"\"\n # 加载sklearn包自带的鸢尾花数据;\n iris = datasets.load_iris()\n # # 查看鸢尾花的数据集\n # print(iris)\n # # 查看鸢尾花的key值;\n # # dict_keys(['data', 'target', 'target_names', 'DESCR','feature_names', 'filename'])\n # print(iris.keys())\n # # 获取鸢尾花的特性: ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']\n # print(iris['feature_names'])\n # print(iris['data'])\n # print(iris['target'])\n # 因为花瓣的相关系数比较高, 所以分类效果比较好, 所以我们就用花瓣宽度当作x;\n # X = iris['data']\n X = iris['data'][:, 3:]\n # 获取分类的结果\n Y = iris['target']\n # print(iris)\n return X, Y\n\ndef configure_plt(plt):\n \"\"\"\n 配置图形的坐标表信息\n \"\"\"\n # 获取当前的坐标轴, gca = get current axis\n ax = plt.gca()\n # 设置x轴, y周在(0, 0)的位置\n ax.spines['bottom'].set_position(('data', 0))\n ax.spines['left'].set_position(('data', 0))\n\n # 绘制x,y轴说明\n plt.xlabel('petal width (cm)') # 花瓣宽度\n plt.ylabel('target') # 鸢尾花类型\n return plt\n\ndef model_train():\n \"\"\"\n 训练模型\n :return:\n \"\"\"\n # 通过上面的数据做逻辑回归\n \"\"\"\n multi_class='ovr' : 分类方式; OvR(One vs Rest),一对剩余的意思,有时候也称它为 OvA(One vs All);一般使用 OvR,更标准;\n solver='sag',逻辑回归损失函数的优化方法; sag:即随机平均梯度下降,是梯度下降法的变种\n \"\"\"\n log_reg = LogisticRegression(multi_class='ovr', solver='sag')\n X, Y = load_data()\n log_reg.fit(X, Y)\n print('w0:', log_reg.coef_)\n print('w1:', log_reg.intercept_)\n return log_reg\n\ndef test_data(log_reg):\n \"\"\"\n 测试数据集\n :param log_reg:\n :return:\n \"\"\"\n # 创建新的数据集去测试\n # np.linespace 用于创建等差数列的函数, 会创建一个从0到3的等差数列, 包含1000个值;\n # reshape生成1000行1列的数组;\n X_new = np.linspace(0, 3, 100).reshape(-1, 1)\n print(X_new)\n # X_new1 = np.hstack((X_new,X_new))\n # X_new1 = np.array([\n # [1, 2, 1, 2],\n # [2 ,2, 1, 1]\n # ])\n # print(X_new1)\n # 概率估计的对数。\n # y_proba = log_reg.predict_log_proba(X_new)\n # print(y_proba)\n # 预测X中样本的类标签\n y_hat = log_reg.predict(X_new)\n # print(y_hat)\n return X_new, y_hat\n\ndef bjhs(log_reg, plt):\n ## 绘制边界函数\n w = log_reg.coef_\n b = log_reg.intercept_\n print(\"回归系数:\", w)\n print(\"截距:\", b)\n # line equation: x0 * w0 + x1 * w1 + b = 0\n ax = [i / 10 for i in range(0, 30)]\n for i, a in enumerate(w):\n ay = a[0] * np.array(ax) + b[i]\n # print(y)\n plt.plot(ax, ay, c='yellow')\n\ndef draw_pic():\n \"\"\"\n 绘制图形\n :return:\n \"\"\"\n X, Y = load_data()\n log_reg = model_train()\n test_X, test_Y = test_data(log_reg)\n import matplotlib.pyplot as plt\n\n # 边界函数\n bjhs(log_reg,plt)\n\n plt.scatter(X, Y, c='red')\n # plt.scatter(test_X, test_Y, c='green')\n plt = configure_plt(plt)\n\n # 显示图\n plt.show()\n\nif __name__ == '__main__':\n draw_pic()\n # load_data()","repo_name":"ztwu/iris-classification","sub_path":"iris_classification.py","file_name":"iris_classification.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3820659504","text":"from os.path import exists as _exists\nfrom collections import Counter\n\nimport numpy as np\nfrom osgeo import osr\nfrom osgeo import gdal\nfrom osgeo.gdalconst import *\n\nfrom wepppy.all_your_base.geo import read_raster, raster_stacker\n\n\nclass LandcoverMap:\n def __init__(self, fname):\n assert _exists(fname)\n self.fname = fname\n\n data, transform, proj = read_raster(fname, dtype=np.int32)\n\n self.data = data\n self.transform = transform\n self.proj = proj\n self.lc_types = list(set(self.data.flatten()))\n self.fname = fname\n\n def _get_dominant(self, indices):\n x = self.data[indices]\n return int(Counter(x).most_common()[0][0])\n \n def _get_fractionals(self, indices):\n x = self.data[indices]\n return {str(k): v for k,v in Counter(x).most_common()}\n \n def calc_fractionals(self, subwta_fn):\n \"\"\"\n calc fractionals based on the subcatchment\n ids identified in the subwta_fn map\n \"\"\"\n assert _exists(subwta_fn)\n subwta, transform, proj = read_raster(subwta_fn, dtype=np.int32)\n assert self.data.shape == subwta.shape\n\n _ids = sorted(list(set(subwta.flatten())))\n \n frac_d = {}\n for _id in _ids:\n if _id == 0:\n continue\n \n _id = int(_id)\n indices = np.where(subwta == _id)\n frac = self._get_fractionals(indices)\n frac_d[str(_id)] = frac\n\n return frac_d\n \n def build_lcgrid(self, subwta_fn, mofe_fn=None):\n \"\"\"\n Generates a dominant lc map based on the subcatchment\n ids identified in the subwta_fn map\n \"\"\"\n assert _exists(subwta_fn)\n subwta, transform, proj = read_raster(subwta_fn, dtype=np.int32)\n\n if not self.data.shape == subwta.shape:\n dst_fn = subwta_fn.replace('.ARC', '.fixed.tif')\n raster_stacker(subwta_fn, self.fname, dst_fn)\n subwta, transform, proj = read_raster(dst_fn, dtype=np.int32)\n\n assert self.data.shape == subwta.shape, [self.data.shape, subwta.shape]\n\n if mofe_fn is None:\n mofe_map = None\n else:\n mofe_map, transform_m, proj_m = read_raster(mofe_fn, dtype=np.int32)\n\n _ids = sorted(list(set(subwta.flatten())))\n \n domlc_d = {}\n for _id in _ids:\n if _id == 0:\n continue\n \n _id = int(_id)\n indices = np.where(subwta == _id)\n\n if mofe_map is None:\n dom = self._get_dominant(indices)\n domlc_d[str(_id)] = str(dom)\n else:\n mofes = sorted(list(set(mofe_map[indices].flatten())))\n mofes = [mofe for mofe in mofes if mofe != 0]\n\n domlc_d[str(_id)] = {}\n for mofe in mofes:\n indices = np.where((subwta == _id) & (mofe_map == mofe))\n dom = self._get_dominant(indices)\n domlc_d[f'{_id}'][f'{mofe}'] = str(dom)\n \n return domlc_d\n\n\nif __name__ == \"__main__\":\n fn = \"/var/www/wepp/FlaskApp/static/runs/last/landuse/nlcd.asc\"\n lc = LandcoverMap(fn)\n print(lc.data.shape)\n","repo_name":"rogerlew/wepppy","sub_path":"wepppy/landcover/landcover_map.py","file_name":"landcover_map.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"40545653104","text":"#!\n\nimport numpy as np\n\n\ndef n2sat2(s, t):\n\t\"\"\"\n\tCalculate nitrogen concentration at saturation.\n\n\tSource: The solubility of neon, nitrogen, and argon in distilled water\n\t\t\tand seawater - Hamme & Emerson (2004) Deep Sea Research V51(11):\n\t\t\t1517-1528. doi: 10.1016/j.dsr.2004.06.009.\n\n\t:param s: Salinity (0/00)\n\t:param t: Temperature in degrees c\n\t:return: Nitrogen saturation at one atmosphere in umol/kg\n\t\"\"\"\n\t# Define constants, etc for saturation calculation\n\ta0 = 6.42931\n\ta1 = 2.92704\n\ta2 = 4.32531\n\ta3 = 4.69149\n\tb0 = -7.44129e-03\n\tb1 = -8.02566e-03\n\tb2 = -1.46775e-02\n\n\t# Calculate nitrogen saturation\n\tts = np.log((288.15 - t) / (273.15 + t))\n\n\tln_c = a0 + ts * (a1 + ts * (a2 + ts * a3))\n\tln_c += s * (b0 + ts * (b1 + ts * b2))\n\tn2 = np.exp(ln_c)\n\n\treturn n2\n","repo_name":"sghowell/pymbari","sub_path":"pymbari/n2sat2.py","file_name":"n2sat2.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21306313235","text":"# -*- coding: UTF-8 -*-\nimport base64\nimport collections\nimport copy\nimport json\nfrom datetime import datetime\nfrom urllib import request, parse\n\nimport rsa\n\n\nclass alipay:\n def __init__(self, app_id, private_key, public_key, notify_url=None, charset='UTF-8', sign_type='RSA',\n version='1.0', DEBUG=False):\n self.requesturl = 'https://openapi.alipay.com/gateway.do' if DEBUG is False else \"https://openapi.alipaydev.com/gateway.do\"\n self.private_key = private_key\n self.public_key = public_key\n self.params = dict(app_id=app_id, charset=charset, sign_type=sign_type, version=version,\n biz_content={}, timestamp='', notify_url=notify_url)\n\n def _sort(self, params):\n return collections.OrderedDict(sorted(dict(params).items(), key=lambda x: x[0]))\n\n @staticmethod\n def make_goods_etail(goods_detail=None, alipay_goods_id=None, goods_name=None, quantity=None, price=None,\n goods_category=None, body=None, show_url=None):\n params = dict(goods_detail=goods_detail, alipay_goods_id=alipay_goods_id, goods_name=goods_name,\n quantity=quantity, price=price, goods_category=goods_category, body=body, show_url=show_url)\n return dict(filter(lambda x: x[1] is not None, params.items()))\n\n def _make_sign(self, params, **kwargs):\n private_key = rsa.PrivateKey.load_pkcs1(kwargs.get('private_key', None) or self.private_key)\n sign = base64.b64encode(rsa.sign(params.encode(), private_key, \"SHA-1\")).decode('utf-8')\n return sign\n\n def _check_sign(self, message, sign, **kwargs):\n message = self._sort(message)\n data = '{'\n for key, value in message.items():\n data += '\"{}\":\"{}\",'.format(key, value)\n data = data[:-1] + '}'\n sign = base64.b64decode(sign)\n public_key = rsa.PublicKey.load_pkcs1_openssl_pem(kwargs.get('public_key', None) or self.public_key)\n try:\n rsa.verify(data.encode(), sign, public_key)\n return True\n except Exception:\n return False\n\n def _make_request(self, params, biz_content, **kwargs):\n buf = ''\n params['timestamp'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n params['biz_content'] = json.dumps(self._sort(biz_content))\n for key, value in kwargs.items():\n params[key] = value\n params = self._sort(params)\n for key in params:\n buf += '{}={}&'.format(key, params[key])\n params['sign'] = self._make_sign(buf[:-1], **kwargs)\n # 发射http请求取回数据\n data = request.urlopen(self.requesturl, data=parse.urlencode(params).encode('utf-8')).read().decode()\n return data\n\n def parse_response(self, params, **kwargs):\n sign = params['sign']\n if self._check_sign(dict(filter(lambda x: 'sign' not in x[0], params.items())), sign, **kwargs):\n return True\n else:\n return False\n\n def trade_pre_create(self, out_trade_no, total_amount, subject, seller_id=None, discountable_amount=None,\n undiscountable_amount=None, buyer_logon_id=None, body=None, goods_detail=None,\n operator_id=None, store_id=None, terminal_id=None, timeout_express=None, alipay_store_id=None,\n royalty_info=None, extend_params=None, **kwargs):\n \"\"\"\n\n :param out_trade_no: 商户订单号,64个字符以内、只能包含字母、数字、下划线;需保证在商户端不重复.\n :param total_amount: 订单总金额,单位为元,精确到小数点后两位.\n :param subject: 订单标题.\n :param seller_id: 卖家支付宝用户ID。 如果该值为空,则默认为商户签约账号对应的支付宝用户ID.\n\n :param discountable_amount:可打折金额. 参与优惠计算的金额,单位为元,精确到小数点后两位,取值范围[0.01,100000000]\n :param undiscountable_amount:不可打折金额. 不参与优惠计算的金额,单位为元,精确到小数点后两位,取值范围[0.01,100000000]\n :param buyer_logon_id: 买家支付宝账号\n :param body: 对交易或商品的描述\n :param goods_detail: 订单包含的商品列表信息.使用make_goods_etail生成. 其它说明详见:“商品明细说明”\n :param operator_id: 商户操作员编号\n :param store_id: 商户门店编号\n :param terminal_id: 商户机具终端编号\n :param timeout_express: 该笔订单允许的最晚付款时间,逾期将关闭交易。取值范围:1m~15d。m-分钟,h-小时,d-天,1c-当天\n :param alipay_store_id: 支付宝店铺的门店ID\n :param royalty_info: 描述分账信息 暂时无效\n :param extend_params: 业务扩展参数\t暂时无效\n :param kwargs: 公共参数可在此处暂时覆盖\n :return:\n \"\"\"\n params = copy.deepcopy(self.params)\n params['method'] = 'alipay.trade.precreate'\n total_amount = round(int(total_amount), 2)\n if discountable_amount:\n discountable_amount = round(int(discountable_amount), 2)\n if undiscountable_amount:\n undiscountable_amount = round(int(undiscountable_amount), 2)\n if discountable_amount:\n if undiscountable_amount is not None:\n if discountable_amount + undiscountable_amount != total_amount:\n return '传入打折金额错误'\n biz_content = dict(out_trade_no=out_trade_no[:64], total_amount=total_amount, seller_id=seller_id,\n subject=subject,\n discountable_amount=discountable_amount, undiscountable_amount=undiscountable_amount,\n buyer_logon_id=buyer_logon_id, body=body, goods_detail=goods_detail, operator_id=operator_id,\n store_id=store_id, terminal_id=terminal_id, timeout_express=timeout_express,\n alipay_store_id=alipay_store_id, royalty_info=royalty_info, extend_params=extend_params)\n resp = self._make_request(params, dict(filter(lambda x: x[1] is not None, biz_content.items())), **kwargs)\n check = eval(resp)\n resp = json.loads(resp)['alipay_trade_precreate_response']\n if self._check_sign(check['alipay_trade_precreate_response'], check['sign']):\n return resp\n return False\n\n def trade_refund(self, refund_amount, out_trade_no=None, trade_no=None,\n refund_reason=None, out_request_no=None, operator_id=None, store_id=None,\n terminal_id=None, **kwargs):\n \"\"\"\n\n :param refund_amount: 需要退款的金额,该金额不能大于订单金额,单位为元,支持两位小数\n :param out_trade_no: 商户订单号,不可与支付宝交易号同时为空\n :param trade_no: 支付宝交易号,和商户订单号不能同时为空\n :param refund_reason: 退款的原因说明\n :param out_request_no: 标识一次退款请求,同一笔交易多次退款需要保证唯一,如需部分退款,则此参数必传。\n :param operator_id: 商户的操作员编号\n :param store_id: 商户的门店编号\n :param terminal_id: 商户的终端编号\n :param kwargs: 公共参数可在此处临时覆盖\n :return:\n \"\"\"\n params = copy.deepcopy(self.params)\n params['method'] = 'alipay.trade.refund'\n refund_amount = round(float(refund_amount), 2)\n\n biz_content = dict(refund_amount=refund_amount, out_trade_no=out_trade_no, trade_no=trade_no,\n refund_reason=refund_reason, out_request_no=out_request_no, operator_id=operator_id,\n store_id=store_id, terminal_id=terminal_id)\n resp = self._make_request(params, dict(filter(lambda x: x[1] is not None, biz_content.items())), **kwargs)\n check = eval(resp)\n resp = json.loads(resp)['alipay_trade_refund_response']\n if self._check_sign(check['alipay_trade_refund_response'], check['sign']):\n return int(resp['code']) == 10000\n return False\n\n def trade_query(self, out_trade_no, trade_no=None, **kwargs):\n params = copy.deepcopy(self.params)\n params['method'] = 'alipay.trade.query'\n\n biz_content = dict(out_trade_no=out_trade_no, trade_no=trade_no)\n resp = self._make_request(params, dict(filter(lambda x: x[1] is not None, biz_content.items())), **kwargs)\n check = eval(resp)\n resp = json.loads(resp)['alipay_trade_query_response']\n if self._check_sign(check['alipay_trade_query_response'], check['sign']) and resp['code'] == 10000:\n return resp\n return False\n","repo_name":"Pengfei00/alipayF2F","sub_path":"alipay.py","file_name":"alipay.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"54"} +{"seq_id":"30590818650","text":"import unittest\nfrom mapping import recon\n\n\nclass TestMapping(unittest.TestCase):\n\n def test_common_kv(self):\n in_dict1 = {\"a\": 1, \"b\": 2, \"c\": 3, \"d\": \"9\"}\n in_dict2 = {\"a\": 1, \"b\": 7, \"e\": 4, \"f\": 3, \"k\": 8, \"l\": 13}\n result = recon(dict1=in_dict1, dict2=in_dict2)\n self.assertEqual(result['common_kv_1'], [(\"a\", 1)], \"Should have same kv\")\n\n def test_common_v(self):\n in_dict1 = {\"a\": 1, \"b\": 2, \"c\": 3, \"d\": \"9\"}\n in_dict2 = {\"a\": 1, \"b\": 7, \"e\": 4, \"f\": 3, \"k\": 8, \"l\": 13}\n result = recon(dict1=in_dict1, dict2=in_dict2)\n self.assertEqual(result['common_k_2'], [(\"b\", 2, 7)], \"Should have same kv\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mohanmca/MohanPyTools","sub_path":"mapping_test.py","file_name":"mapping_test.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36924178734","text":"#!/usr/bin/python3\n\"square class inherits from Rectangle\"\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"Square class\"\n def __init__(self, size, x=0, y=0, id=None):\n \"Initiation of Square class\"\n super().__init__(size, size, x, y, id)\n\n @property\n def size(self):\n \"Size getter\"\n return super().width\n\n @size.setter\n def size(self, value):\n \"size setter\"\n super().__init__(value, value, self.x, self.y, self.id)\n\n def update(self, *args, **kwargs):\n \"Update the Square values\"\n if args is None or len(args) == 0:\n if 'id' in kwargs:\n self.id = kwargs['id']\n if 'size' in kwargs:\n v = kwargs['size']\n super().__init__(v, v, self.x, self.y, self.id)\n if 'x' in kwargs:\n self.x = kwargs['x']\n if 'y' in kwargs:\n self.y = kwargs['y']\n else:\n if len(args) < 1:\n return\n self.id = args[0]\n if len(args) < 2:\n return\n super().__init__(args[1], args[1], self.x, self.y, self.id)\n if len(args) < 3:\n return\n self.x = args[2]\n if len(args) < 4:\n return\n self.y = args[3]\n\n def to_dictionary(self):\n \"Square information to dict\"\n d = {}\n d.setdefault('id', self.id)\n d.setdefault('size', super().width)\n d.setdefault('x', self.x)\n d.setdefault('y', self.y)\n return d\n","repo_name":"khalil-hassayoun/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31958686211","text":"__author__ = 'ylwoi'\n\n# create a function that takes a list of students and prints:\n# - Who has got more candies than 4 candies\n\n# create a function that takes a list of students and prints:\n# - how many candies they have on average\n\nstudents = [\n {'name': 'Rezso', 'age': 9.5, 'candies': 2},\n {'name': 'Gerzson', 'age': 10, 'candies': 1},\n {'name': 'Aurel', 'age': 7, 'candies': 3},\n {'name': 'Zsombor', 'age': 12, 'candies': 5}\n]\n\ndef list_candy(list):\n for i in list:\n if i['candies'] > 4:\n print(i['name'])\n\nlist_candy(students)\n\ndef average_candy(list):\n sum_candy = 0\n for i in list:\n sum_candy += i['candies']\n print(sum_candy / len(list))\n\naverage_candy(students)","repo_name":"jsdelivrbot/Ylwoi","sub_path":"week02/day_4/student_filter.py","file_name":"student_filter.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30478185528","text":"# python\n\nimport discord\nimport asyncio\n\nclient = discord.Client()\nvoice = None\nplayer = None\n\n\n@client.event\n@asyncio.coroutine\ndef on_message(message):\n global player\n global voice\n if message.content.startswith(\"$\"):\n m = message.content\n words = m.split(\" \")\n replyString = \"\"\n if m.startswith('$greet'):\n replyString = \"Hello!\"\n elif m.startswith('$quit'):\n yield from client.send_message(message.channel, 'Goodbye')\n yield from client.logout()\n elif m.startswith('$join'):\n voice = None\n if len(words) < 2:\n replyString = \"need to specify a channel\"\n for channel in message.channel.server.channels:\n if channel.name == words[1]:\n if channel.type == discord.ChannelType.voice:\n voice = yield from client.join_voice_channel(channel)\n replyString = \"Joining voice channel: \" + str(channel.name)\n if channel.type == discord.ChannelType.text:\n replyString = \"Cannot join \" + str(channel.name)\n replyString += \" because it is a text channel, not a voice channel\"\n if replyString == \"\":\n replyString = \"That channel does not exist\"\n elif m.startswith('$play'):\n if voice != None:\n if player != None:\n player.stop()\n player = None\n player = voice.create_ffmpeg_player('heathens.mp3')\n player.start()\n replyString = \"Playing: \" + voice.channel.name + \" channel is bumpin'!\"\n #replyString += \"\\nJammin' to \" + words[1]\n else:\n replyString = \"Not connected to a voice channel. :[\"\n elif m.startswith('$silence'):\n if player != None:\n player.stop()\n player = None\n replyString = \"silencing!\"\n else:\n replyString = \"nothing is playing you dippity shit >.<\"\n\n yield from client.send_message(message.channel, replyString)\n\n\n\n\n\n@client.event\n@asyncio.coroutine\ndef on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print()\n\n@asyncio.coroutine\ndef main():\n yield from client.login(\"dragonitecatcher5556@gmail.com\", \"dratini17\")\n yield from client.connect()\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(main())\n except:\n loop.run_until_complete(client.logout())\n finally:\n loop.close()\n","repo_name":"mkdidomenic/jarvis-discord-bot","sub_path":"old/playFile.py","file_name":"playFile.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13892917900","text":"first_input = input()\nsecond_input = input()\nthird_input = input()\n\nsum_of_chars = 0\n\nleft_range = int(ord(first_input))\nright_range = int(ord(second_input))\nfor char in third_input:\n current_char = int(ord(char))\n if left_range < current_char < right_range:\n sum_of_chars += current_char\n\nprint(sum_of_chars)","repo_name":"DianVK/softuni_python_fundamentals","sub_path":"Text Processing - More Exercises/ascii_sumator.py","file_name":"ascii_sumator.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"24953361436","text":"import csv\nimport os\nfrom glob import glob\n\nimport cv2\nfrom matplotlib import pyplot as plt\n\nproject_directory = input(\"Enter the image directory of project : \")\nimage_directory = os.path.join(project_directory, \"Geotagged-Images\")\ngcp_location_file = os.path.join(project_directory, 'GCP_location.csv')\ngcp_locations = []\ngcp_image_dict = {}\nlist_draw_points = []\nprevious_file = None\nfields = [\"FileName\", \"GCPLocation\"]\nif not os.path.exists(gcp_location_file):\n with open(gcp_location_file, 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=fields, lineterminator='\\n')\n writer.writeheader()\n\n\ndef write_to_csv(image_name, gcp_location_point):\n if gcp_location_point:\n with open(gcp_location_file, 'a') as csv_file_data:\n writer = csv.DictWriter(csv_file_data,\n fieldnames=fields,\n lineterminator='\\n')\n writer.writerow({fields[0]: image_name,\n fields[1]: gcp_location_point\n })\n\n\ndef handle_mouse_press(event):\n if event.dblclick:\n plt.plot(event.xdata, event.ydata, \"o\", markerfacecolor='b',\n markeredgecolor='b', markersize=5.00)\n fig.canvas.draw()\n gcp_locations.append([event.xdata, event.ydata])\n if event.button == 3 and gcp_locations:\n plt.plot(gcp_locations[-1][0], gcp_locations[-1][1], \"o\",\n markerfacecolor='r', markeredgecolor='r', markersize=5.00)\n fig.canvas.draw()\n gcp_locations.pop()\n\n\ndef press(event):\n global gcp_locations\n global list_draw_points\n if event.key == 'n':\n plt.close(fig)\n if event.key == 't':\n mng.full_screen_toggle()\n\n\nfor file in glob(os.path.join(image_directory, '*JPG')):\n image = cv2.imread(file, cv2.IMREAD_COLOR)\n fig = plt.figure()\n plt.title('Geotag Image: ' + str(file)),\n plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.90)\n ax = fig.add_subplot(111), plt.imshow(\n cv2.cvtColor(image, cv2.COLOR_BGR2RGB)),\n plt.yticks([]), plt.xticks([]),\n fig.canvas.mpl_connect('button_press_event', handle_mouse_press)\n fig.canvas.mpl_connect('key_press_event', press)\n mng = plt.get_current_fig_manager()\n mng.full_screen_toggle()\n plt.show()\n folder_name = os.path.split(os.path.split(image_directory)[0])[1]\n file_name = os.path.basename(file)\n file_name_to_store = os.path.join(folder_name, file_name)\n # gcp_image_dict[file_name_to_store] = {\"GCPLocation\": gcp_locations}\n write_to_csv(file_name_to_store, gcp_locations)\n gcp_locations = []\n list_draw_points = []\n","repo_name":"dweepjyoti/automatic_gcp_detection","sub_path":"gcpimagesnlocations.py","file_name":"gcpimagesnlocations.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19456561573","text":"#!/usr/bin/env bash\n\nimport subprocess\nimport time\nimport sys\nimport os\n\nITALIC = \"\\033[3m\"\npurple = '\\x1b[38;5;165m'\nblue = '\\x1b[38;5;33m'\nred = '\\x1b[38;5;196m'\ngreen = '\\x1b[38;5;118m'\ngrey = '\\x1b[38;5;0m'\npink = '\\x1b[38;5;199m'\nEND = \"\\033[0m\"\nUNDERLINE = \"\\033[4m\"\nBOLD = \"\\033[1m\"\nBLINK = \"\\033[5m\"\nBROWN = \"\\033[0;33m\"\n\ninterface = os.listdir('/sys/class/net')\n\nclass monitor():\n ask = str(input(f'{purple}{BOLD}{ITALIC}Your Adapter supported by AirCrack-package(y/n):{END} ')).lower()\n try:\n if ask == str('yes') or ask == str('y'):\n time.sleep(2)\n os.system('clear')\n print(f'{purple}{BOLD}{ITALIC}CHOOSE INTERFACE TO PUT IN MONITOR MODE{END}-{red}AIRCRACK SUPPORTED ADAPTERS{END}')\n print('')\n for i in interface:\n time.sleep(0.1)\n print(f'-> {i}')\n print('')\n card_interface = str(input(f'{BOLD}{purple}Enter intarface name:{END} ')).lower()\n if card_interface in interface:\n\n cmd_monitor = subprocess.run([\"airmon-ng\", \"start\", card_interface], stderr=subprocess.DEVNULL, \\\n stdout=subprocess.DEVNULL)\n print('success!! interface {0} on monitor mode'.format(card_interface))\n else:\n os.system('clear')\n sys.exit(f'Invalid Interface {card_interface}')\n elif ask == str('no') or ask == str('n'):\n time.sleep(2)\n os.system('clear')\n print(f'{BOLD}{purple}{ITALIC}CHOOSE INTERFACE TO PUT IN MONITOR MODE{END}')\n for i in interface:\n time.sleep(0.2)\n print(f'-> {i}')\n print('')\n card_interface = str(input(f'{BOLD}{purple}Enter intarface name:{END} ')).lower()\n if card_interface in interface:\n cmd_manual = subprocess.run([\"ifconfig\", card_interface, \"down\"])\n cmd_manual2 = subprocess.run([\"iwconfig\", card_interface, \"mode\", \"monitor\"])\n cmd_manual3 = subprocess.run([\"ifconfig\", card_interface, \"up\"])\n os.system('clear')\n print('success!! interface {0} on monitor mode'.format(card_interface))\n else:\n os.system('clear')\n sys.exit(f'{red}{BOLD}{ITALIC}Invalid Interface {card_interface}{END}')\n else:\n sys.exit('Unkwown option chooses!!')\n except KeyboardInterrupt:\n sys.exit('cancelled by User!! Good Bye')\n\n\n\n \n \n \n\n","repo_name":"DARK-EAGLE-FRAMEWORK/framework","sub_path":"AP/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"23536274752","text":"from pwn import *\n\ncontext(os='linux', arch='amd64', kernel='amd64',log_level='debug')\n\n\n\nio=gdb.debug('./pwn','''\n break main\n break vuln\n break *0x4007C8\n ''')\n\n\nio=process('./pwn')\n\n#gdb.attach(io)\n#io=remote('node4.anna.nssctf.cn',28799)\nelf=ELF('./pwn')\ns,sl,sa,sla = io.send, io.sendline, io.sendafter, io.sendlineafter\nr, ra, rl, ru = io.recv, io.recvall, io.recvline, io.recvuntil\n\n\nprintf=elf.got['printf']\nsystem=elf.plt['system']\n\n#system=0x40085D\n#system=0x4005E0\nprint(hex(system))\n\nru(b'name:')\n\n\npayload1=b'%16$paaa'\npayload1+=fmtstr_payload(7,{printf:system},numbwritten=17,write_size='short')\n\nprint(payload1)\nsl(payload1)\n\n\nru(b'hello,')\n#rl()\n\n\nru(b'0x')\nrbp= int(io.recv(12).rjust(16,b'0'),16)\nprint(hex(rbp))\nru(b'keep on !')\n\n#sl(b'/bin/sh\\x00'+b'a'*(0x60-len(b'/bin/sh\\x00')-8)+p64(0x4007BC))\ns(b'/bin/sh\\x00'*4+b'a'*(0x60-len(b'/bin/sh\\x00'*4)-8-8)+p64(rbp)+p64(0x4007BC))\n\n\nio.interactive()\n\n\n\n\n","repo_name":"xiaofeng789/fmt","sub_path":"geshihua/KEEP ON/exp2.py","file_name":"exp2.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42197149141","text":"import os\nfrom socket import *\nfrom gmssl import sm4\nimport sys\nsys.path.append(\"../sm2\")\nimport SM2Sign\nfrom random import randint\nfrom util import *\nfrom ECCPoint import *\n\nclass Sender(object):\n def __init__(self, sid):\n self.sock = socket(AF_INET, SOCK_STREAM)\n self.sock.bind((\"127.0.0.1\", 1024))\n self.id = sid\n self.public = None\n self.private = None\n self.key = None\n self.iv = (0x000102030405060708090a0b0c0d0e0f).to_bytes(16, byteorder='big')\n\n def generateKey(self):\n self.private = randint(3, ECCPoint.n - 1)\n self.public = SM2Sign.SM2Sign.g.multi(self.private) \n\n def decideKey(self, sock, auth):\n sign = SM2Sign.SM2Sign(self.id)\n sign.public = self.public\n sign.private = self.private\n k = randint(3, ECCPoint.n - 1)\n pt = point_to_bytes(SM2Sign.SM2Sign.g.multi(k))\n sig = sign.sign(pt)\n sock.send(b''.join([pt, sig[0], sig[1]]))\n other = sock.recv(130)\n otherPt = bytes_to_point(other[:65])\n otherSig = other[65:]\n if not auth.authenticate(other[:65], (otherSig[:32], otherSig[32:])):\n print(\"Authentication failed while key exchanging.\")\n self.sock.close()\n sys.exit(0)\n self.key = point_to_bytes(otherPt.multi(k))[1:]\n #print(self.key)\n\n def encryptFile(self, addr):\n with open(addr, \"rb\") as f:\n src = f.read()\n name = os.path.split(addr)[1].encode()\n sign = SM2Sign.SM2Sign(self.id)\n sign.public = self.public\n sign.private = self.private\n signature = sign.sign(src)\n encrypt = sm4.CryptSM4()\n encrypt.set_key(self.key, 0)\n encryptIV = encrypt.crypt_ecb(self.iv)\n cipher = encrypt.crypt_cbc(\n self.iv, b''.join([bytes([len(name)]), name, src, signature[0], signature[1]]))\n return b''.join([encryptIV, cipher])\n\n def sendFile(self, address):\n self.sock.listen(5)\n sock, addr = self.sock.accept()\n sock.send(self.id.encode())\n targetID = sock.recv(10).decode()\n auth = SM2Sign.SM2Sign(targetID)\n sock.send(point_to_bytes(self.public))\n auth.public = bytes_to_point(sock.recv(65))\n self.decideKey(sock, auth)\n e = self.encryptFile(address)\n sock.send((len(e)).to_bytes(3, byteorder='big'))\n for i in range(0, len(e), 1024):\n sock.send(e[i:i+1024])\n print(\"The file has been sent to \" + targetID + \".\")\n self.sock.close()\n \nif __name__ == '__main__':\n while True:\n sid = input(\"Input sender ID (no more than 10 bytes): \")\n if len(sid) <= 10:\n break\n else:\n print(\"ID too long.\")\n s = Sender(sid)\n s.generateKey()\n fileAddr = input(\"Input the file address: \")\n if not os.path.isfile(fileAddr):\n print(\"File dose not exist.\")\n sys.exit(0)\n s.sendFile(fileAddr)","repo_name":"tianchen-zhang/Crypto-Experiment","sub_path":"Exp11/sender/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71840758243","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom PIL import Image,ImageTk\nfrom tkinter.filedialog import askopenfilename\nfrom idlelib.tooltip import Hovertip\nfrom ControladorAFD import ControladorAFD\nfrom ControladorGR import ControladorGR\nfrom Grafica import Grafica\n\nclass App():\n ALTO = 1325\n ANCHO = 600\n def __init__(self):\n self.ctrlAFD = ControladorAFD()\n self.ctrlGR = ControladorGR()\n self.gr = Grafica()\n\n self.root = tk.Tk()\n self.root.title(\"Proyecto1 - LFP\")\n self.root.geometry(f'{App.ALTO}x{App.ANCHO}')\n self.root.state('zoomed')\n self.root.configure(bg='#212325')\n\n self.root.grid_columnconfigure(1,weight=1)\n self.root.grid_rowconfigure(0,weight=1)\n\n self.panelIzq = tk.Frame(master=self.root,width=200)\n self.panelIzq.configure(bg='#2A2D2E')\n self.panelIzq.grid(row=0,column=0,sticky='nswe')\n\n self.panelDer1 = tk.Frame(master=self.root)\n self.panelDer1.configure(bg='#2A2D2E')\n self.panelDer1.grid(row=0,column=1,sticky=\"nswe\",padx=20,pady=20)\n\n self.panelDer2 = tk.Frame(master=self.root)\n self.panelDer2.configure(bg='#2A2D2E')\n self.panelDer2.grid(row=0,column=1,sticky=\"nswe\",padx=20,pady=20)\n\n self.panelDer3 = tk.Frame(master=self.root)\n self.panelDer3.configure(bg='#2A2D2E')\n self.panelDer3.grid(row=0,column=1,sticky=\"nswe\",padx=20,pady=20)\n\n self.panelDer4 = tk.Frame(master=self.root)\n self.panelDer4.configure(bg='#2A2D2E')\n self.panelDer4.grid(row=0,column=1,sticky=\"nswe\",padx=20,pady=20)\n\n self.panelDer5 = tk.Frame(master=self.root)\n self.panelDer5.configure(bg='#2A2D2E')\n self.panelDer5.grid(row=0,column=1,sticky=\"nswe\",padx=20,pady=20)\n\n self.panelDer2.grid_remove()\n self.panelDer3.grid_remove()\n self.panelDer4.grid_remove()\n self.panelDer5.grid_remove()\n\n self.panelOpc()\n self.panelCargarArchivo()\n self.panelCrearAFD()\n self.panelCrearGR()\n self.panelAyudaAFD()\n self.panelAyudaGR()\n\n def panelOpc(self):\n self.panelIzq.grid_rowconfigure(0,minsize=10)\n self.panelIzq.grid_rowconfigure(7,weight=1)\n self.panelIzq.grid_rowconfigure(8,minsize=20)\n self.panelIzq.grid_rowconfigure(11,minsize=10)\n\n self.opciones = tk.Label(master=self.panelIzq,text='Opciones',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n self.opciones.grid(row=1,column=0,pady=10,padx=10)\n\n self.cargar = tk.Button(master=self.panelIzq,text='Cargar Archivo',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.opcion1)\n self.cargar.grid(row=2,column=0,pady=10,padx=20)\n\n self.crearAFD = tk.Button(master=self.panelIzq,text='Módulo AFD',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.opcion2)\n self.crearAFD.grid(row=3,column=0,pady=10,padx=10)\n\n self.crearGR = tk.Button(master=self.panelIzq,text='Módulo GR',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.opcion3)\n self.crearGR.grid(row=4,column=0,pady=10,padx=10)\n\n self.ayudaAFD = tk.Button(master=self.panelIzq,text='Ayúda AFD',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.opcion4)\n self.ayudaAFD.grid(row=5,column=0,pady=10,padx=10)\n\n self.ayudaGR = tk.Button(master=self.panelIzq,text='Ayúda GR',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.opcion5)\n self.ayudaGR.grid(row=6,column=0,pady=10,padx=10)\n\n self.salir = tk.Button(master=self.panelIzq,text='Salir',font=('Roboto Medium',11),bg='#D35B58',activebackground='#D35B58',foreground='white',activeforeground='white',width=15,height=1,command=quit)\n self.salir.grid(row=9,column=0,pady=10,padx=10)\n\n def panelCargarArchivo(self):\n self.panelDer1.rowconfigure((0,1,2,3,4),weight=1)\n self.panelDer1.rowconfigure(5,weight=10)\n self.panelDer1.columnconfigure((0,1),weight=1)\n self.panelDer1.columnconfigure(5,weight=0)\n\n self.panelD = tk.Frame(master=self.panelDer1)\n self.panelD.configure(bg='#343638')\n self.panelD.grid(row=0,column=0,columnspan=2,sticky=\"nswe\",padx=20,pady=20)\n self.panelDatos()\n\n self.cargarAFD = tk.Button(master=self.panelDer1,text='Cargar AFD',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.chooseFile)\n self.cargarAFD.grid(row=1,column=0,pady=(20,0),padx=(20,10),sticky='nwe')\n\n self.cargarAFD = tk.Button(master=self.panelDer1,text='Cargar GR',font=('Roboto Medium',11),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.chooseFile)\n self.cargarAFD.grid(row=1,column=1,pady=(20,0),padx=(10,20),sticky='nwe')\n\n self.ruta = tk.Entry(master=self.panelDer1,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.ruta.insert(0,'Ruta')\n self.ruta.configure(disabledbackground='#343638',disabledforeground='white',state='disabled')\n self.ruta.grid(row=2,column=0,columnspan=2,padx=20,sticky='nwe')\n\n def panelDatos(self):\n self.panelD.rowconfigure((0,1),weight=1)\n self.panelD.rowconfigure(2,weight=10)\n self.panelD.columnconfigure((0,1),weight=1)\n self.panelD.columnconfigure(2,weight=0)\n\n title1 = tk.Label(master=self.panelD,text='Lenguajes Formales y de Programación - N',font=('Roboto Medium',20),background='#343638',foreground='white')\n title1.grid(row=0,column=0,columnspan=2,pady=20,padx=20,sticky='we')\n\n title1 = tk.Label(master=self.panelD,text='Brandon Tejaxún',font=('Roboto Medium',20),background='#343638',foreground='white')\n title1.grid(row=1,column=0,columnspan=1,pady=20,padx=20,sticky='e')\n\n title1 = tk.Label(master=self.panelD,text='202112030',font=('Roboto Medium',20),background='#343638',foreground='white')\n title1.grid(row=1,column=1,columnspan=1,pady=20,padx=20,sticky='w')\n\n def panelCrearAFD(self):\n self.panelDer2.rowconfigure((0,1,2,3,4,5,6,7,8,9,10,11,12),weight=1)\n self.panelDer2.rowconfigure(13,weight=10)\n self.panelDer2.columnconfigure((0,1,2,3),weight=1)\n self.panelDer2.columnconfigure(4,weight=0)\n\n title1 = tk.Label(master=self.panelDer2,text='Crear Autómata Finito Determinista',font=('Roboto Medium',20),background='#2A2D2E',foreground='white')\n title1.grid(row=0,column=0,columnspan=4,pady=(15,0),padx=20,sticky='w')\n\n nombre = tk.Label(master=self.panelDer2,text='Nombre: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n nombre.grid(row=1,column=0,padx=20,sticky='nw')\n\n self.nombreAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.nombreAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.nombreAFD.grid(row=2,column=0,columnspan=2,padx=20,sticky='nwe')\n\n estados = tk.Label(master=self.panelDer2,text='Estados: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n estados.grid(row=1,column=2,padx=20,sticky='nw')\n\n self.estadosAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.estadosAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.estadosAFD.grid(row=2,column=2,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.estadosAFD,'Ejemplo: A;B;C;D (Separados por punto y coma)')\n\n alfabeto = tk.Label(master=self.panelDer2,text='Alfabeto: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n alfabeto.grid(row=3,column=0,padx=20,sticky='nw')\n\n self.alfabetoAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.alfabetoAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.alfabetoAFD.grid(row=4,column=0,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.alfabetoAFD,'Ejemplo: 0;1;2 (Separados por punto y coma)')\n\n eIni = tk.Label(master=self.panelDer2,text='Estado Inicial: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n eIni.grid(row=3,column=2,padx=20,sticky='nw')\n\n self.eInicialAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.eInicialAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.eInicialAFD.grid(row=4,column=2,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.eInicialAFD,'El estado inicial debe existir en los estados')\n\n eAcept = tk.Label(master=self.panelDer2,text='Estados de Aceptación: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n eAcept.grid(row=5,column=0,padx=20,sticky='nw')\n\n self.eAceptAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.eAceptAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.eAceptAFD.grid(row=6,column=0,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.eAceptAFD,'Ejemplo: A;B;C (Separados por punto y coma)')\n\n transiciones = tk.Label(master=self.panelDer2,text='Transiciones: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n transiciones.grid(row=5,column=2,padx=20,sticky='nw')\n\n self.transiAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.transiAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.transiAFD.grid(row=6,column=2,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.transiAFD,'Ejemplo: A,0,B;A,1,C - origen,entrada,destino ; origen,entrada,destino')\n\n self.guardarAFD = tk.Button(master=self.panelDer2,text='Guardar AFD',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.agregarAFD)\n self.guardarAFD.grid(row=7,column=0,columnspan=4,pady=(20,0),padx=20,sticky='nwe')\n\n # ====================\n self.validacionesAFD()\n\n def validacionesAFD(self):\n self.tituloAFD = tk.Label(master=self.panelDer2,text='Alfabeto del AFD: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n self.tituloAFD.grid(row=8,column=2,padx=20,sticky='sw')\n\n style= ttk.Style()\n style.theme_use('clam')\n style.configure(\"TCombobox\", fieldbackground= \"#343638\", background= \"#fff\", selectforeground='white',activebackground='#343638',activeforeground='black',foreground='white')\n\n self.cbAFD = ttk.Combobox(master=self.panelDer2,values=[],font=('Roboto Medium',16))\n self.cbAFD.bind('<>',self.verAlfabeto)\n self.cbAFD.grid(row=9,column=0,columnspan=2,padx=20,pady=(10,10),sticky='we')\n self.cbAFD.set('Seleccione un AFD')\n\n self.cadenaAFD = tk.Entry(master=self.panelDer2,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.cadenaAFD.configure(disabledbackground='#343638',disabledforeground='white')\n self.cadenaAFD.grid(row=9,column=2,columnspan=2,padx=20,pady=(10,10),sticky='we')\n self.agregarNota(self.cadenaAFD,'Ingrese una cadena para validar el AFD')\n\n self.generarReporteAFD = tk.Button(master=self.panelDer2,text='Generar Reporte',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.generarReportePdfAFD)\n self.generarReporteAFD.grid(row=10,column=0,columnspan=2,pady=(20,0),padx=20,sticky='nwe')\n\n self.validarCadAFD = tk.Button(master=self.panelDer2,text='Validar Cadena',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.validarCadenaAFD)\n self.validarCadAFD.grid(row=10,column=2,columnspan=2,pady=(20,0),padx=20,sticky='nwe')\n\n self.rutaAFD = tk.Button(master=self.panelDer2,text='Mostrar Ruta',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.generarRutaAFD)\n self.rutaAFD.grid(row=11,column=2,columnspan=2,pady=(20,0),padx=20,sticky='nwe')\n\n def agregarAFD(self):\n if self.nombreAFD.get().replace(' ','') == '' or self.estadosAFD.get().replace(' ','') == '' or self.alfabetoAFD.get().replace(' ','') == '' or self.eInicialAFD.get().replace(' ','') == '' or self.eAceptAFD.get().replace(' ','') == '' or self.transiAFD.get().replace(' ','') == '':\n messagebox.showinfo('Información','Todos los campos son obligatorios') \n else:\n for simbolo in self.alfabetoAFD.get().split(';'):\n for estado in self.estadosAFD.get().split(';'):\n if str(simbolo) == str(estado):\n messagebox.showinfo('Información',f'El simbolo {simbolo} es parte de los estados')\n return\n dup = [x for i, x in enumerate(self.alfabetoAFD.get().split(';')) if x in self.alfabetoAFD.get().split(';')[:i]]\n if len(dup) > 0:\n messagebox.showinfo('Información',f'El alfabeto contiene elementos repetidos {dup}')\n return\n\n self.dic = {}\n\n for estado in self.estadosAFD.get().split(';'):\n self.dic[estado] = {}\n\n transiciones = self.transiAFD.get().split(';')\n for transicion in transiciones:\n transicion = transicion.split(',')\n transicion[0] = transicion[0].replace(' ','')\n expresiones = []\n expresiones.append(transicion[1])\n expresiones.append(transicion[2])\n transicion.pop(2)\n transicion[1] = expresiones\n\n try:\n self.dic[transicion[0]][transicion[1][0]] = transicion[1][1]\n except:\n pass\n \n estados = []\n alfabeto = []\n for estado,value in self.dic.items():\n estados.append(estado)\n for entrada,destino in value.items():\n if not entrada in alfabeto:\n alfabeto.append(entrada)\n\n for estado,value in self.dic.items():\n for entrada,destino in value.items():\n if not destino in estados:\n estados.append(destino)\n\n for estado in estados:\n if not estado in self.estadosAFD.get().split(';'):\n messagebox.showinfo('Información',f'El estado {estado} no ha sido declarado')\n return\n for entrada in alfabeto:\n if not entrada in self.alfabetoAFD.get().split(';'):\n messagebox.showinfo('Información',f'El simbolo {entrada} no ha sido declarado como parte del alfabeto')\n return\n\n if not self.eInicialAFD.get() in self.estadosAFD.get().split(';'):\n messagebox.showinfo('Información',f'El estado inicial {self.eInicialAFD.get()} no es parte de los estados')\n elif set(self.eAceptAFD.get().split(';')).difference(set(self.estadosAFD.get().split(';'))):\n if len(set(self.eAceptAFD.get().split(';')).difference(set(self.estadosAFD.get().split(';')))) >= 2:\n messagebox.showinfo('Información',f\"Los estados de aceptación {set(self.eAceptAFD.get().split(';')).difference(set(self.estadosAFD.get().split(';')))} no han sido declarados\")\n else:\n messagebox.showinfo('Información',f\"El estado de aceptación {set(self.eAceptAFD.get().split(';')).difference(set(self.estadosAFD.get().split(';')))} no han sido declarado\")\n else:\n if self.ctrlAFD.agregarAFD(self.nombreAFD.get(),self.estadosAFD.get(),self.alfabetoAFD.get(),self.eInicialAFD.get(),self.eAceptAFD.get(),self.transiAFD.get().split(';'),self.dic):\n messagebox.showinfo('Información','Autómata creado exitosamente')\n #self.ctrlAFD.verAutomatas()\n self.limpiarFormAFD()\n self.nombAFD = []\n for i in range(len(self.ctrlAFD.automatas)):\n self.nombAFD.append(f'{i + 1} - {self.ctrlAFD.automatas[i].nombreAFD}')\n self.cbAFD.configure(values=self.nombAFD)\n else:\n messagebox.showerror('Error','El Autómata no es determinista')\n self.dic = {}\n\n def generarReportePdfAFD(self):\n if self.cbAFD.get() == 'Seleccione un AFD':\n messagebox.showinfo('Información','No se ha seleccionado ningún Autómata')\n else:\n cadena = self.cbAFD.get().split(' - ')\n indice = int(cadena[0]) - 1\n self.ctrlAFD.generarReporte(indice)\n self.cadenaAFD.delete(0,'end')\n self.tituloAFD.configure(text=f'Alfabeto del AFD:')\n self.cbAFD.set('Seleccione un AFD')\n\n def validarCadenaAFD(self):\n if self.cbAFD.get() == 'Seleccione un AFD':\n messagebox.showinfo('Información','No se ha seleccionado ningún Autómata')\n else:\n afd = self.cbAFD.get().split(' - ')\n indice = int(afd[0]) - 1\n if self.ctrlAFD.validarCadena(self.cadenaAFD.get(),indice):\n messagebox.showinfo('Información',f'La cadena es válida')\n else:\n messagebox.showerror('Error','La cadena no es válida')\n\n def generarRutaAFD(self):\n if self.cbAFD.get() == 'Seleccione un AFD':\n messagebox.showinfo('Información','No se ha seleccionado ningún Autómata')\n else:\n afd = self.cbAFD.get().split(' - ')\n indice = int(afd[0]) - 1\n if self.ctrlAFD.validarCadena(self.cadenaAFD.get(),indice):\n self.ctrlAFD.generarRuta(self.cadenaAFD.get(),indice)\n else:\n messagebox.showerror('Error','La cadena no es válida')\n\n def panelCrearGR(self):\n self.panelDer3.rowconfigure((0,1,2,3,4,5,6,7,8,9,10,11,12),weight=1)\n self.panelDer3.rowconfigure(13,weight=10)\n self.panelDer3.columnconfigure((0,1,2,3),weight=1)\n self.panelDer3.columnconfigure(4,weight=0)\n\n title1 = tk.Label(master=self.panelDer3,text='Crear Gramática Regular',font=('Roboto Medium',20),background='#2A2D2E',foreground='white')\n title1.grid(row=0,column=0,columnspan=4,pady=(15,0),padx=20,sticky='w')\n\n nombre = tk.Label(master=self.panelDer3,text='Nombre: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n nombre.grid(row=1,column=0,padx=20,sticky='nw')\n\n self.nombreGR = tk.Entry(master=self.panelDer3,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.nombreGR.configure(disabledbackground='#343638',disabledforeground='white')\n self.nombreGR.grid(row=2,column=0,columnspan=2,padx=20,sticky='nwe')\n\n noTerminales = tk.Label(master=self.panelDer3,text='No Terminales: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n noTerminales.grid(row=1,column=2,padx=20,sticky='nw')\n\n self.noTerminalesGR = tk.Entry(master=self.panelDer3,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.noTerminalesGR.configure(disabledbackground='#343638',disabledforeground='white')\n self.noTerminalesGR.grid(row=2,column=2,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.noTerminalesGR,'Ejemplo: A;B;C;D (Separados por punto y coma)')\n\n terminales = tk.Label(master=self.panelDer3,text='Terminales: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n terminales.grid(row=3,column=0,padx=20,sticky='nw')\n\n self.terminalesGR = tk.Entry(master=self.panelDer3,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.terminalesGR.configure(disabledbackground='#343638',disabledforeground='white')\n self.terminalesGR.grid(row=4,column=0,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.terminalesGR,'Ejemplo: 0;1;2 (Separados por punto y coma)')\n\n noTerminalInicial = tk.Label(master=self.panelDer3,text='No Terminal Inicial: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n noTerminalInicial.grid(row=3,column=2,padx=20,sticky='nw')\n\n self.noTermIniGR = tk.Entry(master=self.panelDer3,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.noTermIniGR.configure(disabledbackground='#343638',disabledforeground='white')\n self.noTermIniGR.grid(row=4,column=2,columnspan=2,padx=20,sticky='nwe')\n self.agregarNota(self.noTermIniGR,'El estado inicial debe existir en los estados')\n\n producciones = tk.Label(master=self.panelDer3,text='Producciones: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n producciones.grid(row=5,column=0,padx=20,sticky='nw')\n\n self.producGR = tk.Entry(master=self.panelDer3,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.producGR.configure(disabledbackground='#343638',disabledforeground='white')\n self.producGR.grid(row=6,column=0,columnspan=4,padx=20,sticky='nwe')\n self.agregarNota(self.producGR,'Ejemplo: A > 0 B;A > 1 C - No terminal > Expresión ; Expresión;No terminal > Expresión')\n\n self.guardarGR = tk.Button(master=self.panelDer3,text='Guardar GR',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.agregarGR)\n self.guardarGR.grid(row=7,column=0,columnspan=4,pady=(20,0),padx=20,sticky='nwe')\n\n self.guardarGR = tk.Button(master=self.panelDer3,text='Guardar GR',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.agregarGR)\n self.guardarGR.grid(row=7,column=0,columnspan=4,pady=(20,0),padx=20,sticky='nwe')\n\n # ====================\n self.validacionesGR()\n\n def validacionesGR(self):\n self.tituloGR = tk.Label(master=self.panelDer3,text='Terminales de la GR: ',font=('Roboto Medium',16),background='#2A2D2E',foreground='white')\n self.tituloGR.grid(row=8,column=2,padx=20,sticky='sw')\n\n style= ttk.Style()\n style.theme_use('clam')\n style.configure(\"TCombobox\", fieldbackground= \"#343638\", background= \"#fff\", selectforeground='white',activebackground='#343638',activeforeground='black',foreground='white')\n\n self.nombGR = []\n self.cbGR = ttk.Combobox(master=self.panelDer3,values=[],font=('Roboto Medium',16))\n self.cbGR.bind('<>',self.verTerminales)\n self.cbGR.grid(row=9,column=0,columnspan=2,padx=20,pady=(10,10),sticky='we')\n self.cbGR.set('Seleccione una GR')\n\n self.cadenaGR = tk.Entry(master=self.panelDer3,width=120,bg='#343638',foreground='white',font=('Roboto Medium',16))\n self.cadenaGR.configure(disabledbackground='#343638',disabledforeground='white')\n self.cadenaGR.grid(row=9,column=2,columnspan=2,padx=20,pady=(10,10),sticky='we')\n self.agregarNota(self.cadenaGR,'Ingrese una cadena para validar la GR')\n\n self.generarReporteGR = tk.Button(master=self.panelDer3,text='Generar Reporte',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.generarReportePdfGR)\n self.generarReporteGR.grid(row=10,column=0,columnspan=2,pady=(20,0),padx=20,sticky='nwe')\n\n self.validarCadGR = tk.Button(master=self.panelDer3,text='Validar Cadena',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.validarCadenaGR)\n self.validarCadGR.grid(row=10,column=2,columnspan=2,pady=(20,0),padx=20,sticky='nwe')\n\n self.rutaGR = tk.Button(master=self.panelDer3,text='Mostrar Ruta',font=('Roboto Medium',15),bg='#0059b3',activebackground='#0059b3',foreground='white',activeforeground='white',width=15,height=1,command=self.generarRutaGR)\n self.rutaGR.grid(row=11,column=2,columnspan=2,pady=(20,0),padx=20,sticky='nwe')\n\n def agregarGR(self):\n if self.nombreGR.get().replace(' ','') == '' or self.noTerminalesGR.get().replace(' ','') == '' or self.terminalesGR.get().replace(' ','') == '' or self.noTermIniGR.get().replace(' ','') == '' or self.producGR.get().replace(' ','') == '':\n messagebox.showinfo('Información','Todos los campos son obligatorios') \n else:\n if not self.noTermIniGR.get() in self.noTerminalesGR.get().split(';'):\n messagebox.showinfo('Información','El no terminal inicial no está declaro en los no terminales')\n return\n dup = []\n dup = [x for i, x in enumerate(self.noTerminalesGR.get().split(';')) if x in self.noTerminalesGR.get().split(';')[:i]]\n if len(dup) > 0:\n messagebox.showinfo('Información',f'Los no terminales contienen elementos repetidos {dup}')\n return\n for estado in self.noTerminalesGR.get().split(';'):\n for simbolo in self.terminalesGR.get().split(';'):\n if str(estado) == str(simbolo):\n messagebox.showinfo('Información',f'El terminal {simbolo} es parte de los no terminales')\n return\n dup = []\n dup = [x for i, x in enumerate(self.terminalesGR.get().split(';')) if x in self.terminalesGR.get().split(';')[:i]]\n if len(dup) > 0:\n messagebox.showinfo('Información',f'Los terminales contienen elementos repetidos {dup}')\n return\n\n #A > 0 B;A > 1 C;B > 0 A;B > 1 D;C > 0 D;C > 1 A;C > $;D > 0 C;D > 1 B\n self.dic = {}\n producciones = self.producGR.get().split(';')\n for produccion in producciones:\n produccion = produccion.split('>')\n produccion[0] = produccion[0].replace(' ','')\n expresiones = produccion[1].split(' ')\n expresiones = [s for s in expresiones if s]\n produccion[1] = expresiones\n\n if not self.existeEstado(produccion[0]):\n self.dic[produccion[0]] = {}\n\n try:\n self.dic[produccion[0]][produccion[1][0]] = produccion[1][1]\n except:\n self.dic[produccion[0]][produccion[1][0]] = 'ACEPTADO'\n\n #print(dic)\n noTerm = []\n termin = []\n eAcept = []\n for estado,value in self.dic.items():\n noTerm.append(estado)\n for entrada,destino in value.items():\n if entrada != '$':\n if not entrada in termin:\n termin.append(entrada)\n elif entrada == '$':\n eAcept.append(estado)\n for estado,value in self.dic.items():\n for entrada,destino in value.items():\n if entrada != '$':\n if not entrada in termin:\n termin.append(entrada)\n elif not destino in noTerm:\n noTerm.append(destino)\n\n for estado in noTerm:\n if not estado in self.noTerminalesGR.get().split(';'):\n messagebox.showinfo('Información',f'El no terminal {estado} no han sido declarado')\n return\n for entrada in termin:\n if not entrada in self.terminalesGR.get().split(';'):\n messagebox.showinfo('Información',f'El terminal {entrada} no han sido declarado')\n return\n \n self.ctrlGR.agregarGramatica(self.nombreGR.get(),self.noTerminalesGR.get(),eAcept,self.terminalesGR.get(),self.noTermIniGR.get(),self.dic)\n messagebox.showinfo('Información','Gramática creada exitosamente')\n self.limpiarFormGR()\n #self.ctrlGR.verGramaticas()\n self.nombGR = []\n for i in range(len(self.ctrlGR.gramaticas)):\n self.nombGR.append(f'{i + 1} - {self.ctrlGR.gramaticas[i].nombreGR}')\n self.cbGR.configure(values=self.nombGR)\n\n def existeEstado(self,nuevo):\n for estado in self.dic:\n if nuevo == estado:\n return True\n return False\n\n def generarReportePdfGR(self):\n if self.cbGR.get() == 'Seleccione una GR':\n messagebox.showinfo('Información','No se ha seleccionado ninguna gramática')\n else:\n cadena = self.cbGR.get().split(' - ')\n indice = int(cadena[0]) - 1\n self.ctrlGR.generarReporte(indice)\n self.cbGR.set('Seleccione una GR')\n self.tituloGR.configure(text='Terminales de la GR:')\n\n def validarCadenaGR(self):\n if self.cbGR.get() == 'Seleccione una GR':\n messagebox.showinfo('Información','No se ha seleccionado ninguna Gramática')\n else:\n gr = self.cbGR.get().split(' - ')\n indice = int(gr[0]) - 1\n if self.ctrlGR.validarCadena(self.cadenaGR.get(),indice):\n messagebox.showinfo('Información',f'La cadena es válida')\n else:\n messagebox.showerror('Error','La cadena no es válida')\n\n def generarRutaGR(self):\n if self.cbGR.get() == 'Seleccione una GR':\n messagebox.showinfo('Información','No se ha seleccionado ninguna Gramática')\n else:\n gr = self.cbGR.get().split(' - ')\n indice = int(gr[0]) - 1\n if self.ctrlGR.validarCadena(self.cadenaGR.get(),indice):\n self.ctrlGR.generarRuta(self.cadenaGR.get(),indice)\n else:\n messagebox.showerror('Error','La cadena no es valida')\n\n def panelAyudaAFD(self):\n self.panelDer4.rowconfigure((0,1,2,3,4,5,6,7,8,9,10,11,12),weight=1)\n self.panelDer4.rowconfigure(13,weight=10)\n self.panelDer4.columnconfigure((0,1,2,3),weight=1)\n self.panelDer4.columnconfigure(4,weight=0)\n\n texto = tk.Label(master=self.panelDer4,text='¿Qué es un Autómata Finito Determinista - AFD?',font=('Roboto Medium',20),background='#2A2D2E',foreground='white')\n texto.grid(row=0,column=0,columnspan=4,pady=(15,0),padx=20,sticky='we')\n\n texto = tk.Label(master=self.panelDer4,text='Un AFD tiene un conjunto finito de estados y un conjunto finito de símbolos de entrada. El término “determinista”\\nhace referencia al hecho de que para cada entrada sólo existe uno y sólo un estado al que el autómata puede hacer\\nla transición a partir de su estado actual. Un estado se diseña para que sea el estado inicial, y cero o más\\nestados para que sean estados de aceptación. Una función de transición determina cómo cambia el estado cada vez\\nque se procesa un símbolo de entrada.',font=('Roboto Medium',15),background='#2A2D2E',foreground='white')\n texto.grid(row=1,column=0,columnspan=4,rowspan=1,pady=(15,0),padx=20,sticky='wen')\n\n imagen = Image.open('images/afd.png')\n imagen = imagen.resize((500,400),Image.ANTIALIAS)\n imagen = ImageTk.PhotoImage(imagen)\n \n canva = Canvas(self.panelDer4)\n label = Label(self.panelDer4,image=imagen)\n label.img = imagen\n label.grid(row=3,column=0,columnspan=4,padx=20,pady=20)\n\n def panelAyudaGR(self):\n self.panelDer5.rowconfigure((0,1,2,3,4,5,6,7,8,9,10,11,12),weight=1)\n self.panelDer5.rowconfigure(13,weight=10)\n self.panelDer5.columnconfigure((0,1,2,3),weight=1)\n self.panelDer5.columnconfigure(4,weight=0)\n\n texto = tk.Label(master=self.panelDer5,text='¿Qué es una Gramática Regular - GR?',font=('Roboto Medium',20),background='#2A2D2E',foreground='white')\n texto.grid(row=0,column=0,columnspan=4,pady=(15,0),padx=20,sticky='we')\n\n texto = tk.Label(master=self.panelDer5,text='Una gramática regular es un cuádruplo (V, Σ, R, S) en donde:\\nV es un alfabeto de variables\\nΣ es un alfabeto de constantes\\nR, el conjunto de reglas, es un subconjunto finito de V × (ΣV ∪ Σ)\\nS, el símbolo inicial, es un elemento de V',font=('Roboto Medium',15),background='#2A2D2E',foreground='white')\n texto.grid(row=1,column=0,columnspan=4,rowspan=1,pady=(15,0),padx=20,sticky='wen')\n\n imagen = Image.open('images/gr.png')\n imagen = imagen.resize((500,400),Image.ANTIALIAS)\n imagen = ImageTk.PhotoImage(imagen)\n \n canva = Canvas(self.panelDer5)\n label = Label(self.panelDer5,image=imagen)\n label.img = imagen\n label.grid(row=3,column=0,columnspan=4,padx=20,pady=20)\n\n def opcion1(self):\n self.panelDer2.grid_remove()\n self.panelDer3.grid_remove()\n self.panelDer4.grid_remove()\n self.panelDer5.grid_remove()\n self.panelDer1.grid()\n\n def opcion2(self):\n self.panelDer1.grid_remove()\n self.panelDer3.grid_remove()\n self.panelDer4.grid_remove()\n self.panelDer5.grid_remove()\n self.panelDer2.grid()\n\n def opcion3(self):\n self.panelDer1.grid_remove()\n self.panelDer2.grid_remove()\n self.panelDer4.grid_remove()\n self.panelDer5.grid_remove()\n self.panelDer3.grid()\n\n def opcion4(self):\n self.panelDer1.grid_remove()\n self.panelDer2.grid_remove()\n self.panelDer3.grid_remove()\n self.panelDer5.grid_remove()\n self.panelDer4.grid()\n\n def opcion5(self):\n self.panelDer1.grid_remove()\n self.panelDer2.grid_remove()\n self.panelDer3.grid_remove()\n self.panelDer4.grid_remove()\n self.panelDer5.grid()\n\n def chooseFile(self):\n try:\n self.ruta.configure(state=tk.NORMAL)\n formatos = (\n (\"form files\",\"*.afd\"),\n (\"form files\",\"*.gre\"),\n )\n archivo = askopenfilename(\n title='Abrir Archivo',\n initialdir='',\n filetypes = formatos)\n if not archivo == '':\n self.ruta.delete(0,'end')\n self.ruta.insert(0,str(archivo))\n self.ruta.configure(state='disabled')\n extension = archivo.split('.')\n if extension[1] == 'afd':\n self.ctrlAFD.leerArchivo(archivo)\n self.ctrlAFD.reconocimientoAutomata()\n self.nombAFD = []\n for i in range(len(self.ctrlAFD.automatas)):\n self.nombAFD.append(f'{i + 1} - {self.ctrlAFD.automatas[i].nombreAFD}')\n self.cbAFD.configure(values=self.nombAFD)\n elif extension[1] == 'gre':\n self.ctrlGR.leerArchivo(archivo)\n self.ctrlGR.reconocimientoGramatica()\n self.nombGR = []\n for i in range(len(self.ctrlGR.gramaticas)):\n self.nombGR.append(f'{i + 1} - {self.ctrlGR.gramaticas[i].nombreGR}')\n self.cbGR.configure(values=self.nombGR)\n else:\n pass\n except:\n pass\n\n def agregarNota(self,componente,texto):\n self.myTip = Hovertip(componente,f'\\n {texto} \\n')\n\n def limpiarFormAFD(self):\n self.nombreAFD.delete(0,'end')\n self.estadosAFD.delete(0,'end')\n self.alfabetoAFD.delete(0,'end')\n self.eInicialAFD.delete(0,'end')\n self.eAceptAFD.delete(0,'end')\n self.transiAFD.delete(0,'end')\n\n def limpiarFormGR(self):\n self.nombreGR.delete(0,'end')\n self.noTerminalesGR.delete(0,'end')\n self.terminalesGR.delete(0,'end')\n self.noTermIniGR.delete(0,'end')\n self.producGR.delete(0,'end')\n\n def verAlfabeto(self,event):\n indice = int(self.cbAFD.get().split(' - ')[0]) - 1\n self.tituloAFD.configure(text=f'Alfabeto del AFD: {self.ctrlAFD.obtenerAlfabeto(indice)}')\n\n def verTerminales(self,event):\n indice = int(self.cbGR.get().split(' - ')[0]) - 1\n self.tituloGR.configure(text=f'Terminales de la GR: {self.ctrlGR.obtenerTerminales(indice)}')\n\nif __name__ == '__main__':\n app = App()\n #self.root.mainloop()\n app.root.mainloop()","repo_name":"brandonT2002/LFP_P1_202112030","sub_path":"Ventana.py","file_name":"Ventana.py","file_ext":"py","file_size_in_byte":37429,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30494796339","text":"import numpy as np\nimport sys, json, copy, time, pickle\nimport qiskit\nfrom qiskit import IBMQ, QuantumCircuit, execute\nfrom qiskit.providers.ibmq.managed import IBMQJobManager\n\n\n# input ibmq credential\nIBMQ.load_account()\nprovider = IBMQ.enable_account(\"...\",hub='...', group='...', project='...')\nbackend = provider.get_backend('ibmq_montreal')\n\n\nfilename = 'data/' + 'ibmq_experiment_all_20220323_8530634712' \nwith open(filename, 'rb') as outfile:\n data = pickle.load(outfile)\ntoken = data[\"token\"]\n\njob_manager = IBMQJobManager()\n\ncb = data['cb']\nfor tag in cb:\n print(tag)\n job_set = job_manager.retrieve_job_set(job_set_id=cb[tag]['job_set_id'], provider=provider,refresh=True)\n results = job_set.results().combine_results()\n print(results)\n data['cb'][tag][\"result\"] = [results]\n\nint_cb = data['int_cb']\nfor tag in int_cb:\n print(tag)\n job_set = job_manager.retrieve_job_set(job_set_id=int_cb[tag]['job_set_id'], provider=provider,refresh=True)\n results = job_set.results().combine_results()\n print(results)\n data['int_cb'][tag][\"result\"] = [results]\n\nintc_cb = data['intc_cb']\nfor tag in intc_cb:\n print(tag)\n job_set = job_manager.retrieve_job_set(job_set_id=intc_cb[tag]['job_set_id'], provider=provider,refresh=True)\n results = job_set.results().combine_results()\n print(results)\n data['intc_cb'][tag][\"result\"] = [results]\n\nwith open(filename + '_full', 'wb') as outfile:\n pickle.dump(data, outfile)\n","repo_name":"csenrui/Pauli_Learnability","sub_path":"main_retrieve.py","file_name":"main_retrieve.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"14356270530","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 12 16:18:56 2020\n\n@author: ojaro\n\"\"\"\nfrom PyQt5.QtWidgets import QWidget,QLabel,QPushButton,QLineEdit,QHBoxLayout,QVBoxLayout,QSpacerItem\nfrom AES import AES\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import Qt\nimport EncryptWindow\nimport DecryptWindow\nfrom re import search,compile\n\nclass GUI(QWidget):\n hexPattern = compile(r\"^[0-9A-Fa-f]+$\")\n\n\n success_signal = pyqtSignal()\n def __init__(self, plaintext, key):\n super().__init__()\n \n \n \n self.aes = AES()\n \n self.setStyleSheet(\"QWidget {background-color:#34495e;}\")\n \n \n self.text = QLineEdit()\n self.text.setPlaceholderText(\"Plaintext/Ciphertext\")\n self.text.setStyleSheet(\"QLineEdit {color:white;font-size:25px; border:1px solid white;border-radius:5%;}\")\n self.text.setText(plaintext)\n\n self.key = QLineEdit()\n self.key.setStyleSheet(\"QLineEdit {color:white;font-size:25px; border:1px solid white;border-radius:5%;}\")\n \n self.key.setPlaceholderText(\"Key\")\n self.setFixedSize(500,400)\n self.key.setText(key)\n \n self.encrypt = QPushButton(\"Encrypt\")\n self.encrypt.setStyleSheet(\"QPushButton {text-decoration: none;border: none;padding: 12px 40px;font-size: 16px;background-color: green;color: #fff;border-radius: 5px;box-shadow: 7px 6px 28px 1px rgba(0, 0, 0, 0.24);cursor: pointer;outline: none;transition: 0.2s all;} QPushButton::pressed{background-color : #006400;}\")\n \n self.decrypt = QPushButton(\"Decrypt\")\n self.decrypt.setStyleSheet(\"QPushButton { text-decoration: none; border: none; padding: 12px 40px; font-size: 16px; background-color: #c0392b; color: #fff; border-radius: 5px; box-shadow: 7px 6px 28px 1px rgba(0, 0, 0, 0.24); cursor: pointer; outline: none; transition: 0.2s all; }QPushButton::pressed{background-color : #8b0000;} \")\n\n \n self.hbox = QHBoxLayout()\n self.hbox.addWidget(self.encrypt)\n self.hbox.addItem(QSpacerItem(20,5))\n self.hbox.addWidget(self.decrypt)\n \n\n self.vbox = QVBoxLayout()\n self.vbox.addItem(QSpacerItem(100,50))\n\n self.vbox.addWidget(self.text)\n self.vbox.addItem(QSpacerItem(100,50))\n\n self.vbox.addWidget(self.key)\n self.vbox.addItem(QSpacerItem(100,50))\n\n self.vbox.addItem(self.hbox)\n\n \n self.errorMessage = QLabel(\"\")\n self.errorMessage.setStyleSheet(\"QLabel {color:white;font-size:15px;}\")\n self.errorMessage.setAlignment(Qt.AlignCenter) \n self.vbox.addWidget(self.errorMessage)\n self.setLayout(self.vbox)\n \n self.encrypt.clicked.connect(lambda:self.encryptRequest())\n self.decrypt.clicked.connect(lambda:self.decryptRequest())\n \n self.show()\n \n self.setWindowTitle(\"AES\")\n \n \n \n self.success_signal.connect(lambda:self.cleanUp())\n \n def encryptRequest(self):\n plaintext = self.text.text()\n key = self.key.text()\n if plaintext==\"\" or key ==\"\":\n self.errorMessage.setText(\"Error empty field(s)!\")\n elif len(key)!=32 and len(key)!=48 and len(key)!=64:\n self.errorMessage.setText(\"Error invalid key size!\")\n elif len(plaintext)!=32:\n self.errorMessage.setText(\"Error invalid text size!\")\n \n elif not search(self.hexPattern, plaintext) or not search(self.hexPattern,key):\n self.errorMessage.setText(\"Error invalid character(s)!\")\n else:\n enc = EncryptWindow.EncryptWindow(plaintext, key)\n self.success_signal.emit()\n# self.aes.Encrypt(plaintext,key)\n \n \n \n \n def decryptRequest(self):\n ciphertext = self.text.text()\n key = self.key.text()\n if ciphertext==\"\" or key ==\"\":\n self.errorMessage.setText(\"Error empty field(s)!\")\n elif len(key)!=32 and len(key)!=48 and len(key)!=64:\n self.errorMessage.setText(\"Error invalid key size!\")\n elif len(ciphertext)!=32:\n self.errorMessage.setText(\"Error invalid text size!\")\n elif not search(self.hexPattern, ciphertext) or not search(self.hexPattern,key):\n self.errorMessage.setText(\"Error invalid character(s)!\")\n else:\n dec = DecryptWindow.DecryptWindow(ciphertext, key)\n self.success_signal.emit()\n# self.errorMessage.setText(self.aes.Decrypt(ciphertext,key))\n \n \n \n def cleanUp(self):\n self.close()\n self.deleteLater() \n","repo_name":"OmarJaroudi/AES","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7317780554","text":"k=int(input())\narr=list(map(int,input().split()))\nmin=arr[0]\ncount=0\nfor i in range(0,k):\n if min>arr[i]:\n min=arr[i]\nwhile min:\n count=0\n for j in range(0,k):\n if arr[j]%min==0:\n count+=1\n if count==k:\n print(min)\n break\n min-=1","repo_name":"SantoshKumar353/codemind-python","sub_path":"Minimum_Scale_length.py","file_name":"Minimum_Scale_length.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40585820241","text":"from __future__ import print_function\r\nfrom itertools import permutations\r\nimport time;\r\n\r\ndef toString(List): \r\n return ''.join(List) \r\n\r\n\r\nf = open(\"Combinations.txt\", \"w\")\r\n\r\nstring = input(\"Enter the String : \")\r\ncond=input(\"Do you want to add @#$ in the String ? y for YES n for NO : \")\r\nif(cond=='y'):\r\n string=string+\"@#$\" \r\nletters = list(string)\r\nprint(\"\\nGenerating... \\n\")\r\nfor i in range(len(letters)):\r\n for c in permutations(letters, i):\r\n # print(c)\r\n f.write(toString(c)+'\\n')\r\nf.close();\r\nm=\" --- Combinations Generated ---\"\r\nblah=list(m)\r\nfor l in blah:\r\n print(l, end=\" \")\r\n time.sleep(0.2)\r\n\r\nprint(\"\\nCombinations are stored in a file named \\\"Combinaiton.txt\\\" \")\r\n","repo_name":"tanwanimohit/Password-Combinations","sub_path":"Password Combinations.py","file_name":"Password Combinations.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12486545610","text":"#!/opt/homebrew/bin/python3\n\nimport math\n\nn = 1999999\nlistToN = [True] * (n+1)\n\nfor num in range(2,n+1):\n\tif listToN[num] == True:\n\t\tfor num2 in range(num * num,n + 1, num):\n\t\t\tlistToN[num2] = False \n\nsumOfPrime = [num for num in range(2, n + 1) if listToN[num] == True]\n\nprint(sum(sumOfPrime))","repo_name":"AluiQT/portfolio","sub_path":"javaeuler/euler10(sieve).py","file_name":"euler10(sieve).py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70150626083","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom page.models import Category, Good\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import render\nfrom django.core.paginator import InvalidPage, Paginator\n\n\ndef index(request, id):\n try:\n page_num = request.GET[\"page\"]\n except KeyError:\n page_num = 1\n cats = Category.objects.all().order_by(\"name\")\n if id == None:\n cat = Category.objects.first()\n else:\n cat = Category.objects.get(pk=id)\n paginator = Paginator(Good.objects.filter(category=cat).order_by(\"name\"), 3)\n try:\n goods = paginator.page(page_num)\n except InvalidPage:\n goods = paginator.page(1)\n # goods = Good.objects.filter(category=cat).order_by(\"name\")\n return render(request, \"index.html\", {\"category\": cat, \"cats\": cats, \"goods\": goods})\n\n\ndef good(request, id):\n try:\n page_num = request.GET[\"page\"]\n except KeyError:\n page_num = 1\n cats = Category.objects.all().order_by(\"name\")\n try:\n good = Good.objects.get(pk=id)\n except Good.DoesNotExist:\n raise Http404\n return render(request, \"good.html\", {\"cats\": cats, \"good\": good, \"pn\": page_num})\n\n\ndef category(request, id):\n try:\n category = Good.objects.get(pk=id)\n except:\n raise Http404\n s = category.name + \"

    \" + category.description\n return HttpResponse(s)\n","repo_name":"FrostRunner/voicemind","sub_path":"page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3529760147","text":"from PIL import Image, ImageDraw, ImageColor\nfrom math import floor\n\ninput_data = []\nwith open('labels.csv') as input:\n for line in input:\n \tline = line[:-1]\n \tinput_data.append(line.split(\" \"))\n\nnormal_info_size = 3\ncancer_info_size = 7\nfor i in range(1, 323):\n\tdirectory = \"UPNG/\"\n\tname = \"mdb\" + str(\"0\" * (3 - len(str(i)))) + str(i)\n\timage = Image.open(directory + name + \".png\")\n\twidth = image.width\n\theight = image.height\n\n\timage_info = input_data[i - 1]\n\tif len(image_info) == cancer_info_size:\n\t\tx = int(image_info[4])\n\t\ty = height - int(image_info[5])\n\t\tr = int(image_info[6])\n\n\t\timage.draft(\"RGB\", (width, height))\n\t\tdraw = ImageDraw.Draw(image)\n\t\tdraw.ellipse((x - r, y - r, x + r, y + r), fill=None, outline=\"blue\")\n\n\t\tdirectory = \"CIR_2/\"\n\t\timage.save(directory + name + \".png\")\n\n\timage.close()\n\n","repo_name":"gregorybchris/convolutional-mammogram-analysis","sub_path":"scripts/circleimages.py","file_name":"circleimages.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"24024663024","text":"class CFG:\n class data:\n batch_size = 16\n lr = 1e-3 # 5e-5\n epochs = 10\n epsilon = 1e-8\n MAX_LEN = 128 # max sentence length\n seed_val = 42 # random seed\n k_folds = 10\n hidden_size = 768 # hidden layer size (embedding size) for feedforward net\n PATH = \"./saved_models/hs.pth\"\n\n # defaults for CNN\n dropout = 0.4\n Ks = [3, 4, 5, 6, 7]\n kernel_num = 8 # number of filters for each conv layer\n input_shape = [-1, 1, 128, 768]\n","repo_name":"Tushar-K24/hate-speech-detection","sub_path":"api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14966271005","text":"# Desenvolva um programa que leia 6 numeros inteiros e mostre a soma apenas daqueles que forem pares\n\nacumulador = 0\ncontador = 0\n\nfor c in range(6):\n n = int(input('Digite um numero:'))\n if n % 2 == 0:\n acumulador = acumulador + n\n contador = contador + 1\nprint(f'a soma dos valores pares é: {acumulador}, você digitou {contador} números pares.')\n\n","repo_name":"thaynahakan/Python","sub_path":"dowload arq/g50.py","file_name":"g50.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19038963284","text":"#! /usr/bin/env python\n\n\n\nimport rospy # Importing rospy \nfrom sensor_msgs.msg import LaserScan # Importing LaserScan from sensor_msgs message to use for subscribing \nfrom geometry_msgs.msg import Twist # Importing Twist message to publish messages to the topic /cmd_vel\n\n\ndef callback(msg): \n print(\"I am in the call back funciton\")\n # Messages are received, callback is invoked with the message as the first argument. \n list = msg.ranges # Creating an list from the selectd portion of the array \n directions.angular.z = 1\n directions.linear.x = 1\n \n\n pub.publish(directions) # Using the publisher, values are assigned to the Twist message and then published \n print(\"I have sucessfullt published the new values \")\n '''\n if list[360] > 1: # Cases /// need to make sure that robot position is reset after each case \n print(\"robot is moving forward - no wall detected \")\n directions.angular.z = 0\n directions.linear.x = 0.25\n if list[360] < 1:\n print(\"wall detected! - will turn left\")\n directions.linear.x = 0\n directions.angular.z = 0.25\n if list[719] < 1:\n print(\"robot will turn left\")\n directions.linear.x = 0\n directions.angular.z = -0.25\n if list[0] < 1:\n print(\"robot will turn right\")\n directions.linear.x = 0\n directions.angular.z = 0.25\n '''\n\n\nwhile not rospy.is_shutdown(): # Create a loop that will go until someone stops the program execution\n print(\"I am at the beg of the code\")\n rospy.init_node('cmd_vel_publisher') \n print(\"I have initalized the robot_move code\")\n # Create node called 'move_robot'\n directions = Twist() # Creating an instance of Twist to tinker with and use as a subsciber \n \n pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1) \n print(\"I am publishing to the cmd_vel topic\")\n rate = rospy.Rate(2)\n #directions.angular.z = 1\n #directions.linear.x = 1\n directions.angular.z = 0\n directions.linear.x = 1\n \n\n pub.publish(directions) \n print(\"I have sucessfully printed a message\")\n\n #pub.publish(directions) \n # Create a publisher oobject that is publishing a twist message to cmd_vel \n #rospy.init_node('robot_move') # Initiate a Node called 'odometry_subscriber'\n #rospy.Subscriber('/kobuki/laser/scan', LaserScan, callback)\n #print(\"I am subscribed to the lazer topic\")\n # Create a Subscriber that will listen to the \n rospy.spin() # simply keeps python from exiting until this node is stopped\n\n'''\n ***solution** \n#! /usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import LaserScan \nfrom geometry_msgs.msg import Twist\ndef callback(msg): \n print(msg.ranges[360]) #We print the distance to an obstacle in front of the robot\n#If the distance to an obstacle in front of the robot is bigger than 1 meter, the robot will move forward\n if msg.ranges[360] > 1:\n move.linear.x = 0.1\n move.angular.z = 0.0\n#If the distance to an obstacle in front of the robot is smaller than 1 meter, the robot will turn left\n if msg.ranges[360] < 1: \n move.linear.x = 0.0\n move.angular.z = 0.2\n \n#If the distance to an obstacle at the left side of the robot is smaller than 0.3 meters, the robot will turn right\n if msg.ranges[719] < 0.3:\n move.linear.x = 0.0\n move.angular.z = -0.2\n \n#If the distance to an obstacle at the right side of the robot is smaller than 0.3 meters, the robot will turn left\n if msg.ranges[0] < 0.3:\n move.linear.x = 0.0\n move.angular.z = 0.2\n \n pub.publish(move)\nrospy.init_node('topics_quiz_node')\nsub = rospy.Subscriber('/kobuki/laser/scan', LaserScan, callback) #We subscribe to the laser's topic\npub = rospy.Publisher('/cmd_vel', Twist)\nmove = Twist()\nrospy.spin() '''","repo_name":"gannalouis/ROS_Basics_Real_robot","sub_path":"src/part1/src/wall_follower.py","file_name":"wall_follower.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15782470870","text":"from typing import List\nfrom collections import defaultdict\n\nclass Solution:\n def hasValidPath(self, grid: List[List[int]]) -> bool:\n graph = unionFind(len(grid), len(grid[0]))\n directions = {(0, 1), (1, 0)}\n\n def inbound(row, col):\n if 0 <= row < len(grid) and 0 <= col < len(grid[0]):\n return True\n return False\n\n right = {1: {1, 3, 5}, 2: {}, 3: {}, 4: {1, 3, 5}, 5: {}, 6: {1, 3, 5}}\n down = {1: {}, 2: {2, 5, 6}, 3: {2, 5, 6}, 4: {2, 5, 6}, 5: {}, 6: {}}\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n for inc_i, inc_j in directions:\n if inbound(i + inc_i, j + inc_j) and grid[i + inc_i][j + inc_j] in right[grid[i][j]] and inc_j == 1:\n graph.union((i, j), (i + inc_i, j + inc_j))\n if inbound(i + inc_i, j + inc_j) and grid[i + inc_i][j + inc_j] in down[grid[i][j]] and inc_i == 1:\n graph.union((i, j), (i + inc_i, j + inc_j))\n\n return graph.find((0, 0)) == graph.find((len(grid) - 1, len(grid[-1]) - 1))\n\n\nclass unionFind:\n def __init__(self, row, col):\n self.parent = defaultdict(int)\n for i in range(row):\n for j in range(col):\n self.parent[(i, j)] = (i, j)\n self.rank = [[0 for _ in range(col)] for _ in range(row)]\n\n def find(self, root):\n if root == self.parent[root]:\n return root\n self.parent[root] = self.find(self.parent[root])\n return self.parent[root]\n\n def union(self, root1, root2):\n r1X, r1Y = self.find(root1)\n r2X, r2Y = self.find(root2)\n if self.rank[r1X][r1Y] < self.rank[r2X][r2Y]:\n self.parent[(r1X, r1Y)] = (r2X, r2Y)\n elif self.rank[r1X][r1Y] > self.rank[r2X][r2Y]:\n self.parent[(r2X, r2Y)] = (r1X, r1Y)\n else:\n self.parent[(r2X, r2Y)] = (r1X, r1Y)\n self.rank[r1X][r1Y] += 1\n\n def is_connected(self, root1, root2):\n return self.find(root1) == self.find(root2)","repo_name":"debiB/competitive-programming-","sub_path":"check-if-there-is-a-valid-path-in-a-grid.py","file_name":"check-if-there-is-a-valid-path-in-a-grid.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22577273614","text":"import pickle\nimport operator\nimport math\nimport codecs\nimport time\n\n\ndef load_graph(filename):\n with open(filename, 'rb') as fp:\n W = pickle.load(fp)\n return W\n\n\ndef load_raw_logs(input_file, user_index, POI_index):\n with codecs.open(input_file, 'r') as fr:\n train = {}\n index = 0\n for row in fr:\n cols = row.strip().split('\\t')\n index += 1\n if index % 10000 == 0:\n print(index)\n user = cols[user_index]\n item = cols[POI_index]\n if user not in train:\n train[user] = {}\n train[user][item] = 1\n\n if item not in train[user]:\n train[user][item] = 1\n\n return train\n\n\ndef item_phase(graph, node_id):\n pois = graph.neighbors(node_id)\n max_vote = float(len(pois))\n w_pois = {}\n for poi in pois:\n if poi not in w_pois:\n w_pois[poi] = {}\n poi_degree = graph.degree(poi)\n for n in graph.neighbors(poi):\n for new_poi in graph.neighbors(n):\n if poi == new_poi:\n continue\n\n if new_poi not in w_pois[poi]:\n w_pois[poi][new_poi] = 0\n w_pois[poi][new_poi] += 1\n\n\ndef user_phase(graph, node_id):\n pois = graph.neighbors(node_id)\n max_vote = float(len(pois))\n voters = {}\n for poi in pois:\n poi_degree = graph.degree(poi)\n for n in graph.neighbors(poi):\n if n not in voters:\n voters[n] = 0\n voters[n] += math.log(1 + poi_degree)\n\n scores = {}\n for voter in voters:\n pois = graph.neighbors(voter)\n similarity = voters[voter]\n for poi in pois:\n if poi in scores:\n scores[poi] += similarity / math.sqrt(max_vote * graph.degree(voter))\n else:\n scores[poi] = similarity / math.sqrt(max_vote * graph.degree(voter))\n\n return scores\n\n\n\n\nif __name__ == '__main__':\n train_file = '../data/SG_foursquare/train.txt'\n graph_file = 'SG.graph'\n\n user_logs = load_raw_logs(train_file, 0, 1)\n recommend_graph = load_graph(graph_file)\n start_time = time.time()\n for user in user_logs:\n print(user)\n scores = user_phase(recommend_graph, user)\n sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)\n\n end_time = time.time()\n print(\"--- %s seconds ---\" % (end_time - start_time))\n\n\n","repo_name":"ycjeanlin/Recommendation-Graph","sub_path":"src/Graph_CF_test.py","file_name":"Graph_CF_test.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3327722380","text":"\nfrom siriuspy.namesys import join_name as _join_name\nimport pyaccel as _pyaccel\n\n\n_family_segmentation = {\n 'B-1': 20, 'B-2': 20, 'B': 20, 'QF': 2, 'QD': 1, 'SD': 1, 'QS': 1,\n 'SF': 1, 'CH': 1, 'CV': 1,\n 'BPM': 1, 'Scrn': 1, 'DCCT': 1, 'TunePkup': 1, 'TuneShkr': 1, 'GSL': 1,\n 'P5Cav': 1, 'start': 1, 'BEND': 14,\n 'InjKckr': 1, 'EjeKckr': 1\n }\n\n_discipline_mapping = {\n 'B-1': 'PS',\n 'B-2': 'PS',\n 'QF': 'PS',\n 'QD': 'PS',\n 'QS': 'PS',\n 'SD': 'PS',\n 'SF': 'PS',\n 'CH': 'PS',\n 'CV': 'PS',\n 'InjKckr': 'PU',\n 'EjeKckr': 'PU',\n 'BPM': 'DI',\n 'DCCT': 'DI',\n 'Scrn': 'DI',\n 'TunePkup': 'DI',\n 'TuneShkr': 'DI',\n 'P5Cav': 'RF',\n }\n\nfamily_mapping = {\n 'B': 'dipole',\n 'B-1': 'dipole',\n 'B-2': 'dipole',\n\n 'QF': 'quadrupole',\n 'QD': 'quadrupole',\n 'QS': 'skew_quadrupole',\n\n 'SD': 'sextupole',\n 'SF': 'sextupole',\n\n 'InjKckr': 'pulsed_magnet',\n 'EjeKckr': 'pulsed_magnet',\n\n 'BPM': 'bpm',\n 'DCCT': 'beam_current_monitor',\n 'Scrn': 'beam_profile_monitor',\n 'TunePkup': 'tune_pickup',\n 'TuneShkr': 'tune_shaker',\n\n 'CH': 'horizontal_corrector',\n 'CV': 'vertical_corrector',\n\n 'P5Cav': 'rf_cavity',\n }\n\n\ndef families_dipoles():\n \"\"\".\"\"\"\n return ['B']\n\n\ndef families_quadrupoles():\n \"\"\".\"\"\"\n return ['QF', 'QD']\n\n\ndef families_sextupoles():\n \"\"\".\"\"\"\n return ['SF', 'SD']\n\n\ndef families_horizontal_correctors():\n \"\"\".\"\"\"\n return ['CH']\n\n\ndef families_vertical_correctors():\n \"\"\".\"\"\"\n return ['CV']\n\n\ndef families_skew_correctors():\n \"\"\".\"\"\"\n return ['QS']\n\n\ndef families_rf():\n \"\"\".\"\"\"\n return ['P5Cav']\n\n\ndef families_pulsed_magnets():\n \"\"\".\"\"\"\n return ['InjKckr', 'EjeKckr']\n\n\ndef families_di():\n \"\"\".\"\"\"\n return ['DCCT', 'BPM', 'Scrn', 'TunePkup', 'TuneShkr', 'GSL']\n\n\ndef get_section_name_mapping(lattice):\n \"\"\".\"\"\"\n lat = lattice[:]\n section_map = ['' for i in range(len(lat))]\n\n # find where the nomenclature starts counting and shift the lattice:\n start = _pyaccel.lattice.find_indices(lat, 'fam_name', 'start')[0]\n b1 = _pyaccel.lattice.find_indices(lat, 'fam_name', 'B')\n if b1[0] > start:\n ind_shift = (b1[-1] + 1) # Next element of last b1\n else:\n for i in b1[::-1]: # except there is a b1 before start\n if i < start:\n ind_shift = i + 1\n break\n lat = _pyaccel.lattice.shift(lat, ind_shift)\n\n # Find indices important to define the change of the names of\n # the subsections\n b = _pyaccel.lattice.find_indices(lat, 'fam_name', 'B')\n qf = _pyaccel.lattice.find_indices(lat, 'fam_name', 'QF')\n b_nrsegs = len(b)//50\n\n # divide the ring in 50 sectors defined by the b1 dipoles:\n Sects = []\n ini = 0\n for i in range(len(b)//b_nrsegs):\n end = b[(i+1)*b_nrsegs-1] + 1\n Sects.append(list(range(ini, end)))\n ini = end\n\n # Names of the subsections:\n sub_secs = ['U', 'D']\n\n for i, sec in enumerate(Sects, 1):\n # conditions that define change in subsection name:\n # define changes to ''\n sec_b = [x for x in b if sec[0] <= x <= sec[-1]]\n relev_inds = [sec_b[-1]]\n # define changes to '' and D\n sec_qf = [x for x in qf if sec[0] <= x <= sec[-1]]\n relev_inds += [sec_qf[-1]]\n relev_inds.sort()\n # fill the section_map variable\n ref = 0\n for j in sec:\n section_map[(ind_shift+j) % len(lat)] = \"{0:02d}\".format(i)\n section_map[(ind_shift+j) % len(lat)] += sub_secs[ref]\n if j >= relev_inds[ref]:\n ref += 1\n\n return section_map\n\n\ndef get_family_data(lattice):\n \"\"\"Get pyaccel lattice model index and segmentation for each family name.\n\n Keyword argument:\n lattice -- lattice model\n\n Returns dict.\n \"\"\"\n latt_dict = _pyaccel.lattice.find_dict(lattice, 'fam_name')\n section_map = get_section_name_mapping(lattice)\n\n # Fill the data dictionary with index info\n data = {}\n for key, idx in latt_dict.items():\n nr = _family_segmentation.get(key)\n if nr is None:\n continue\n # Create a list of lists for the indexes\n data[key] = [idx[i*nr:(i+1)*nr] for i in range(len(idx)//nr)]\n\n # quadrupoles knobs for optics correction\n idx = []\n fams = ['QF', 'QD']\n for fam in fams:\n idx.extend(data[fam])\n data['QN'] = sorted(idx, key=lambda x: x[0])\n\n # sbs - sextupoles knobs for optics correction\n idx = []\n fams = ['SD', 'SF']\n for fam in fams:\n idx.extend(data[fam])\n data['SN'] = sorted(idx, key=lambda x: x[0])\n\n # Dipole Families for power supplies\n idx = []\n fams = ['B']\n for fam in fams:\n idx.extend(data[fam])\n data['B-1'] = sorted(idx, key=lambda x: x[0])\n data['B-2'] = sorted(idx, key=lambda x: x[0])\n\n # ## Now organize the data dictionary:\n new_data = dict()\n for key, idx in data.items():\n # find out the name of the section each element is installed\n secs = [section_map[i[0]] for i in idx]\n\n # find out if there are more than one element per section and\n # attribute a number to it\n num = len(secs)*['']\n if len(secs) > 1:\n j = 1\n if secs[0] == secs[1]:\n num[0] = '{0:d}'.format(j)\n j += 1\n for i in range(1, len(secs)-1):\n if secs[i] == secs[i+1] or secs[i] == secs[i-1]:\n num[i] = '{0:d}'.format(j)\n\n if secs[i] == secs[i+1]:\n j += 1\n else:\n j = 1\n\n if secs[-1] == secs[-2]:\n num[-1] = '{0:d}'.format(j)\n\n new_data[key] = {'index': idx, 'subsection': secs, 'instance': num}\n\n # get control system devname\n for key in new_data:\n if key not in _discipline_mapping:\n continue\n dis = _discipline_mapping[key]\n dta = new_data[key]\n devnames = []\n subs = dta['subsection']\n insts = dta['instance']\n for sub, inst in zip(subs, insts):\n devnames.append(\n _join_name(sec='BO', dis=dis, sub=sub, idx=inst, dev=key))\n new_data[key]['devnames'] = devnames\n\n # girders\n girder = get_girder_data(lattice)\n if girder is not None:\n new_data['girder'] = girder\n\n return new_data\n\n\ndef get_girder_data(lattice):\n \"\"\".\"\"\"\n data = []\n girders = _pyaccel.lattice.find_indices(lattice, 'fam_name', 'girder')\n if not girders:\n return None\n\n idx = list(range(girders[-1], len(lattice))) + list(range(girders[0]))\n data.append(dict({'index': idx}))\n\n gir = girders[1:-1]\n gir_ini = gir[0::2]\n gir_end = gir[1::2]\n for ini, end in zip(gir_ini, gir_end):\n idx = list(range(ini, end+1))\n data.append(dict({'index': idx}))\n\n return data\n","repo_name":"lnls-fac/pymodels","sub_path":"pymodels/BO_V06_01/families.py","file_name":"families.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"32308496626","text":"import pandas as pd\nimport numpy as np\nimport pyedflib ## 读取BDF数据 \nimport matplotlib.pyplot as plt\n\n# pytorch相关\nimport torch\nimport torchvision\nimport torch.nn as nn\n\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torch.utils.data.dataset as Dataset\nimport torch.utils.data.dataloader as DataLoader\n\n# 信号处理\nfrom scipy import signal\nfrom scipy.fftpack import fft,ifft,fftshift\nfrom scipy.signal import welch\n\nimport random\n\n# 网络结构\nfrom torchsummary import summary\nfrom torch.autograd import Variable\n\n# python自带工具包\nfrom functools import reduce\nfrom operator import __add__\n\nfrom sklearn import preprocessing\n\nfrom model import *\n\n####################################################################################\n####################################################################################\n##################################### Get Data #####################################\n\n## 统计每个label出现次数\ndef get_label_dic(arr):\n arr = np.array(arr)\n key = np.unique(arr)\n result = {}\n for k in key:\n mask = (arr == k)\n arr_new = arr[mask]\n v = arr_new.size\n result[k] = v\n return result\n\n## 读取BDF文件数据,bdf_file为待读取通道名字,name为待读取通道名字, start和end为读取时间范围\ndef LoadBDF(bdf_file, name = \"EXG2\", start = None, end = None):\n with pyedflib.EdfReader(bdf_file) as f:\n status_index = f.getSignalLabels().index('Status')\n sample_frequency = f.samplefrequency(status_index)\n status_size = f.samples_in_file(status_index)\n status = np.zeros((status_size), dtype = 'float64')\n f.readsignal(status_index, 0, status_size, status)\n status = status.round().astype('int')\n nz_status = status.nonzero()[0]\n\n video_start = nz_status[0]\n video_end = nz_status[-1]\n\n index = f.getSignalLabels().index(name)\n sample_frequency = f.samplefrequency(index)\n\n video_start_seconds = video_start / sample_frequency\n\n if start is not None:\n start += video_start_seconds\n start *= sample_frequency\n if start < video_start:\n start = video_start\n start = int(start)\n else:\n start = video_start\n \n if end is not None:\n end += video_start_seconds\n end *= sample_frequency\n if end > video_end:\n end = video_end\n end = int(end)\n else:\n end = video_end\n \n# PhysicalMax = f.getPhysicalMaximum(index)\n# PhysicalMin = f.getPhysicalMinimum(index)\n# DigitalMax = f.getDigitalMaximum(index)\n# DigitalMin = f.getDigitalMinimum(index)\n\n# scale_factor = (PhysicalMax - PhysicalMin) / (DigitalMax - DigitalMin)\n# dc = PhysicalMax - scale_factor * DigitalMax\n\n container = np.zeros((end - start + 1), dtype = 'float64')\n f.readsignal(index, start, end - start + 1, container)\n# container = container * scale_factor + dc\n\n return container, sample_frequency\n\ndef get_eeg_data(path):\n\n# chan_list = [\"Fp1\",\"AF3\",\"F3\",\"F7\",\"FC1\",\"FC5\",\"T7\",\"C3\",\"CP1\",\"CP5\",\"P7\",\"P3\",\"PO3\",\"O1\",\n# \"Fp2\",\"AF4\",\"F4\",\"F8\",\"FC2\",\"FC6\",\"T8\",\"C4\",\"CP2\",\"CP6\",\"P8\",\"P4\",\"PO4\",\"O2\",\n# \"Fz\",\"Cz\",\"Pz\",\"Oz\"]\n chan_list = [\"Fp1\",\"AF3\",\"F3\",\"F7\",\"FC5\",\"FC1\",\"C3\",\"T7\",\"CP5\",\"CP1\",\"P3\",\"P7\",\"PO3\",\"O1\",\n \"Oz\",\"Pz\",\"Fp2\",\"AF4\",\"Fz\",\"F4\",\"F8\",\"FC6\",\"FC2\",\"Cz\",\"C4\",\"T8\",\"CP6\",\"CP2\",\"P4\",\"P8\",\"PO4\",\"O2\"]\n\n eeg_data = []\n\n # status, freq = LoadBDF(path,\"Status\")\n # status_pd = pd.Series(status)\n # valid_index = status_pd[status_pd fmin) - 1\n ind_max = np.argmax(f > fmax) - 1\n\n return Pxx[:,ind_min:ind_max].sum(1)\n\ndef re_data2(trials, labels, wid_len, step_len):\n \n new_trials = []\n new_labels = []\n trial_index = 0\n min_max_scaler = preprocessing.MinMaxScaler()\n \n for trial in trials:\n for wid_index in np.arange((len(trial)-wid_len)//step_len):\n# print(trial[wid_index*wid_len:(wid_index+1)*wid_len,:].shape)\n new_trial = trial[wid_index*step_len:wid_index*step_len+wid_len,:]\n new_trial_filter = filter_data(0.5,50,new_trial)\n new_trial_notch = filter_data_notch(50,5,new_trial_filter)\n new_trials.append(min_max_scaler.fit_transform(new_trial_notch))\n new_labels.append(labels[trial_index])\n trial_index = trial_index+1\n return np.asarray(new_trials), np.asarray(new_labels)\n\ndef balance_data_and_re(eeg_trial_list, labels, window_len, overlap, split_rate):\n \n label0 = np.where(labels==0)[0]\n label1 = np.where(labels==1)[0]\n label2 = np.where(labels==2)[0]\n \n eeg_trials = []\n for i in np.arange(len(eeg_trial_list)):\n eeg_trials.append(np.asarray(eeg_trial_list[i]))\n \n label0_len = 0\n label1_len = 0\n label2_len = 0\n trials0 = []\n trials1 = []\n trials2 = []\n for i in np.arange(len(eeg_trials)):\n if i in label0:\n label0_len = label0_len + eeg_trials[i].shape[0]\n trials0.append(eeg_trials[i])\n elif i in label1:\n label1_len = label1_len + eeg_trials[i].shape[0]\n trials1.append(eeg_trials[i])\n else:\n label2_len = label2_len + eeg_trials[i].shape[0]\n trials2.append(eeg_trials[i])\n label_len = label0_len+label1_len+label2_len\n label_percent = (label0_len/label_len,label1_len/label_len,label2_len/label_len)\n \n step1_len = int(window_len*(1-overlap))\n step2_len = int(step1_len/label_percent[0]*label_percent[1])\n step3_len = int(step1_len/label_percent[0]*label_percent[2])\n \n new_trials0,new_labels0 = re_data2(trials0, labels[label0], window_len, step1_len)\n new_trials1,new_labels1 = re_data2(trials1, labels[label1], window_len, step2_len)\n new_trials2,new_labels2 = re_data2(trials2, labels[label2], window_len, step3_len)\n \n train_x = np.concatenate([new_trials0[:int(len(new_trials0)*split_rate),:,:],new_trials1[:int(len(new_trials1)*split_rate),:,:],new_trials2[:int(len(new_trials2)*split_rate),:,:]])\n train_y = np.concatenate([new_labels0[:int(len(new_labels0)*split_rate)],new_labels1[:int(len(new_labels1)*split_rate)],new_labels2[:int(len(new_labels2)*split_rate)]])\n test_x = np.concatenate([new_trials0[int(len(new_trials0)*split_rate):,:,:],new_trials1[int(len(new_trials1)*split_rate):,:,:],new_trials2[int(len(new_trials2)*split_rate):,:,:]])\n test_y = np.concatenate([new_labels0[int(len(new_labels0)*split_rate):],new_labels1[int(len(new_labels1)*split_rate):],new_labels2[int(len(new_labels2)*split_rate):]])\n \n train_x = train_x[:,np.newaxis,:,:].transpose(0,3,1,2)\n train_y = train_y.reshape(-1,1)\n test_x = test_x[:,np.newaxis,:,:].transpose(0,3,1,2)\n test_y = test_y.reshape(-1,1)\n \n return train_x, train_y, test_x, test_y\n\n\ndef my_norm_data(trial_data):\n trial_norm_result = []\n for i in np.arange(trial_data.shape[1]):\n trial_norm_result.append(trial_data[:,i])\n return np.asarray(trial_norm_result)\n\ndef split_train_test(re_eeg_data, labels, split_rate):\n \n norm_re_eeg_data = []\n for i in np.arange(len(re_eeg_data)):\n norm_re_eeg_data.append(my_norm_data(re_eeg_data[i]))\n re_eeg_data = np.asarray(norm_re_eeg_data)\n \n random.seed(10)\n \n label0_index = np.asarray(random.sample(list(np.where(labels==0)[0]), int((labels==0).sum()*(1-split_rate))))\n label1_index = np.asarray(random.sample(list(np.where(labels==1)[0]), int((labels==1).sum()*(1-split_rate))))\n label2_index = np.asarray(random.sample(list(np.where(labels==2)[0]), int((labels==2).sum()*(1-split_rate))))\n \n test_index = np.concatenate([label0_index,label1_index,label2_index])\n train_index = np.setdiff1d(np.arange(len(labels)),test_index)\n \n train_x = re_eeg_data[train_index][:,:,np.newaxis,:]\n train_y = labels[train_index].reshape(-1,1)\n\n test_x = re_eeg_data[test_index][:,:,np.newaxis,:]\n test_y = labels[test_index].reshape(-1,1)\n \n return train_x, train_y, test_x, test_y\n\n####################################################################################\n####################################################################################\n############################# Get Time Series Features #############################\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel 部分统计学特征\ndef get_statistical_feature(data):\n data_std = data.std(axis=0)\n data_mean = data.mean(axis=0)\n data_max = data.max(axis=0)\n data_min = data.min(axis=0)\n data_percentile = np.percentile(data, [25, 50, 75], axis=0).flatten()\n# data_percentile_25 = np.percentile(data, [25, 50, 75], axis=0)[0]\n# data_percentile_50 = np.percentile(data, [25, 50, 75], axis=0)[1]\n# data_percentile_75 = np.percentile(data, [25, 50, 75], axis=0)[2]\n data_negative_rate = (data<0).sum(axis=0)/len(data)\n return np.concatenate((data_std,data_mean,data_max,data_min,data_percentile,data_negative_rate),axis=0)\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel一阶差分绝对值的平均值\ndef first_order_difference(data):\n N = len(data)\n tmp = 0\n for i in np.arange(N-1):\n tmp += np.abs(data[i+1]-data[i])\n res = tmp/N\n return res\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel二阶差分绝对值的平均值\ndef second_order_difference(data):\n N = len(data)\n tmp = 0\n for i in np.arange(N-2):\n tmp += np.abs(data[i+2]-data[i])\n res = tmp/(N-1)\n return res\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel归一化的一阶差分\n## 使用该函数时保证前面有求一阶差分绝对值的平均值函数即first_order_difference函数存在\ndef norm_first_order_difference(data):\n return first_order_difference(data)/np.std(data,axis=0)\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel归一化的二阶差分\n## 使用该函数时保证前面有求二阶差分绝对值的平均值函数即second_order_difference函数存在\ndef norm_second_order_difference(data):\n return second_order_difference(data)/np.std(data,axis=0)\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel的时域能量\ndef get_engery(data):\n N = len(data)\n tmp = 0\n for i in np.arange(N):\n tmp += (data[i])*(data[i])\n return tmp\n\n## data:可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel的功率\ndef get_power(data):\n return get_engery(data)/len(data)\n\n## data���可以是一个trail数据,也可以是单个通道数据\n## return: trail/channel的Hjorth参数特征-activity\ndef get_hjorth_activity(data):\n N = len(data)\n tmp = 0\n avg_s = np.average(data,axis=0)\n for i in np.arange(N):\n tmp += ((data[i])-avg_s)*((data[i])-avg_s)\n res = tmp/N\n return res\n\n## data:单个trail数据\n## return: trail的Hjorth参数特征-mobility&complexity\ndef get_hjorth_mobility_complexity(data):\n D = np.diff(data,axis=0)\n D = np.insert(D,0,0,axis=0)\n \n N = len(data)\n \n M2 = np.sum(D ** 2, axis = 0) / N\n TP = np.sum(data ** 2, axis = 0)\n M4 = 0\n for i in range(N-1):\n M4 += (D[i+1] - D[i]) ** 2\n M4 = M4 / N\n \n mobility = np.sqrt(M2 / TP)\n complexity = np.sqrt(M4 * TP / M2 / M2)\n \n return mobility, complexity\n\n## data:单个trail数据\n## return: trail的高阶过零分析(HOC)统计量\ndef get_hoc(data):\n nzc = []\n for i in range(10):\n curr_diff = np.diff(data, n=i)\n\n x_t = curr_diff >= 0 # binary time series signal\n x_t = np.diff(x_t) # taking diff of x_t\n x_t = np.abs(x_t) # taking abs value\n\n count = np.count_nonzero(x_t)\n nzc.append(count)\n return nzc\n\ndef get_other_time_sereis_features(data):\n fod = first_order_difference(data)\n sod = second_order_difference(data)\n nfod = norm_first_order_difference(data)\n nsod = norm_second_order_difference(data)\n egy = get_engery(data)\n pwr = get_power(data)\n act = get_hjorth_activity(data)\n mob, compl = get_hjorth_mobility_complexity(data)\n hoc = get_hoc(data)\n return np.concatenate((fod, sod, nfod, nsod, egy, pwr, act, mob, compl, hoc), axis=0)","repo_name":"IntelliSense-Interactive-Group/MAHNOB-HCI-Strat-Simple","sub_path":"utils2.py","file_name":"utils2.py","file_ext":"py","file_size_in_byte":23698,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"24343067624","text":"#!/usr/bin/env python3\n# coding = utf-8\n__all__ = [\"ElastciSearch\"]\n__doc__ = \"\"\"\n\nElasticSearch 的操作\nos:linux;app:wordpress;ip:210.43.32.32/16;\n\"\"\"\n\nfrom elasticsearch import Elasticsearch\nfrom config.baseconfig import ELASTICSEARCH_HOST_LIST, ES_INDEX_NAME, ES_DOC_TYPE\nimport re\n\n\nclass ElastciSearch(object):\n\n def __init__(self):\n self.es = Elasticsearch(hosts=ELASTICSEARCH_HOST_LIST)\n self.index_name = ES_INDEX_NAME\n self.doc_type = ES_DOC_TYPE\n # if not self.es.indices.exists(index=self.index_name):\n # self.create_mapping()\n self.search_type = [\"os\", \"ip\", \"app\", \"title\", \"port\", \"statecode\", \"protocol\", \"domain\"]\n self.switch = {\n \"os\": self.getos,\n \"app\": self.getapp,\n \"ip\": self.getip,\n \"title\": self.gettitle,\n \"port\": self.getport,\n \"statecode\": self.getstatecode,\n \"protocol\": self.getprotocolmsg,\n \"domain\": self.getinfofordomain,\n }\n\n def search(self, datas, page):\n \"\"\"\n datas is like os:linux & app:wordpress & ip:210.43.32.30/26;\n or os:linux & app:wordpress & ip:210.43.32.30/26\n \"\"\"\n if page < 0:\n page = 1\n return self._analysis(datas, page)\n\n def _analysis(self, datas, page):\n \"\"\"\n 分析搜索语句,返回es搜索的json\n :param datas:str\n :return: dict\n \"\"\"\n datas = datas.lower()\n if \":\" not in datas:\n return self.getall(datas, page)\n datas = datas.split(\"&\")\n if \"\" in datas:\n datas.remove(\"\")\n keys = []\n values = []\n for data in datas:\n key, value = data.split(\":\")\n key = key.strip()\n value = value.strip()\n if key not in self.search_type:\n return False, None\n keys.append(key)\n values.append(value)\n data = {}\n # 简单查询\n if len(keys) == 1:\n data = self.switch[keys[0]](values[0], page)\n else:\n # 组合查询\n # ip os app port code title 以后可以加一个非 ! 取反的判断,这个版本先不加\n datas = {}\n for i in range(len(keys)):\n datas[keys[i]] = values[i]\n data = self.combination(datas=datas, page=page)\n if data:\n res = self.es.search(index=self.index_name, doc_type=self.doc_type, body=data)\n return res[\"hits\"], keys\n else:\n return False, keys\n\n def getos(self, value, page):\n data = {\n \"query\": {\n \"multi_match\": {\n \"query\": value,\n \"fields\": [\"OS^4\", \"SERVER^3\", \"TITLE^2\", \"CONTENT^1\"]\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n\n def getip(self, value, page):\n if \"/\" in value:\n pattern = re.compile('(([25[0-5]|2[0-4]\\d|((1\\d{2})|([1-9]?\\d)))\\.){3}([25[0-5]|2[0-4]\\d|((1\\d{2})|([1-9]?\\d)))\\/(16|24)')\n if pattern.match(value):\n cidr = value.split(\"/\")[-1]\n ips = []\n if cidr == \"16\":\n ip = \".\".join(value.split(\".\")[:2]) + \".\"\n for i in range(0, 256):\n for j in range(0, 256):\n ips.append(ip + str(i) + \".\" + str(j))\n elif cidr == \"24\":\n ip = \".\".join(value.split(\".\")[:3]) + \".\"\n for i in range(0,256):\n ips.append(ip + str(i))\n data = {\n \"query\": {\n \"terms\": {\n \"HOST\": ips\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n else:\n return False\n else:\n pattern = re.compile('(([25[0-5]|2[0-4]\\d|((1\\d{2})|([1-9]?\\d)))\\.){3}([25[0-5]|2[0-4]\\d|((1\\d{2})|([1-9]?\\d)))')\n if pattern.match(value):\n data = {\n \"query\": {\n \"term\": {\n \"HOST\": value\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n else:\n return False\n\n def getapp(self, value, page):\n data = {\n \"query\": {\n \"multi_match\": {\n \"query\": value,\n \"fields\": [\"BANNER^5\", \"SERVER^4\", \"TITLE^3\", \"CONTENT^2\"]\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n\n def gettitle(self, value, page):\n data = {\n \"query\": {\n \"match\": {\n \"TITLE\": value\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n\n def getall(self, value, page):\n data = {\n \"query\": {\n \"multi_match\": {\n \"query\": value,\n \"fields\": [\"TITLE^6\", \"HEADERS^5\", \"CONTENT^4\", \"SERVER^3\", \"BANNER^2\", \"EXTRAINFO^1\"]\n }\n },\n \"from\": (page-1)*10,\n \"size\": 10\n }\n res = self.es.search(index=self.index_name, doc_type=self.doc_type, body=data)\n return res[\"hits\"], value\n\n def getport(self, value, page):\n data = {\n \"query\": {\n \"match\": {\n \"PORT\": value\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n\n def getstatecode(self, value, page):\n data = {\n \"query\": {\n \"match\": {\n \"STATE_CODE\": value\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n\n def getinfofordomain(self, value, page):\n import socket\n ip = socket.gethostbyname(value)\n return self.getip(ip, page)\n\n def getprotocolmsg(self, value, page):\n data = {\n \"query\": {\n \"match\": {\n \"PROTOCOL\": value\n }\n },\n \"from\": (page - 1) * 10,\n \"size\": 10\n }\n return data\n\n # 获取一个ip的所有信息\n def getipmsg(self, value):\n data = {\n \"query\": {\n \"term\": {\n \"HOST\": {\n \"value\": value\n }\n }\n }\n }\n res = self.es.search(index=self.index_name, doc_type=self.doc_type, body=data)\n return res[\"hits\"]\n\n def combination(self, datas=None, page=None):\n \"\"\"\n 组合查询\n \"\"\"\n must = []\n for key, value in datas.items():\n must.append(self.switch[key](value, page)[\"query\"])\n fiter = must.pop()\n data = {\n \"query\": {\n \"bool\": {\n \"must\": must\n ,\n \"filter\": fiter\n }\n }\n }\n return data\n\n\n def delete(self, datas):\n pass\n\n\ndef main():\n es = ElastciSearch()\n # res = es.search(\"statecode:200\", 1)\n res = es.search(\"linux\", 1)\n print(res)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"imjdl/SearchApp","sub_path":"esapi/ElasticSearch.py","file_name":"ElasticSearch.py","file_ext":"py","file_size_in_byte":7625,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"54"} +{"seq_id":"15176048026","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft\nimport math\nimport timeit\nimport heapq\n\nfrom numpy import array, sign, zeros\nfrom scipy.interpolate import interp1d\nimport scipy.signal\nfrom scipy.stats import pearsonr,spearmanr,kendalltau\nN = 600\nT = 1.0 / 800.0\nx = np.linspace(0.0, N*T, N)\n\n\n# y1 = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x)\n# y2 = np.sin(55.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x)\n\ny1 = np.array([0.309016989,0.587785244,0.809016985,0.95105651,1,0.951056526,\n0.809017016,0.587785287,0.30901704,5.35898E-08,0,0,\n0,0,0,0,0,0,\n0,0,0,0,0,0,\n0,0,0,0,0,0])+10\n\ny2 = np.array([\n0.343282816,0.686491368,0.874624132,0.99459642,1.008448609,\n1.014252458,0.884609221,0.677632906,0.378334666,0.077878732,\n0.050711886,0.066417083,0.088759401,0.005440732,0.04225661,\n0.035349939,0.0631196,0.007566056,0.053183895,0.073143706,\n0.080285063,0.030110227,0.044781145,0.01875573,0.08373928,\n0.04550342,0.038880858,0.040611891,0.046116826,0.087670453,\n])\n\ny3 = np.array([\n0.343282816,0.686491368,0.874624132,0.99459642,1.008448609,\n1.014252458,0.884609221,0.677632906,0.378334666,0.077878732,\n0.050711886,0.066417083,0.088759401,0.005440732,0.04225661,\n0.035349939,0.0631196,0.007566056,0.053183895,0.073143706,\n0.080285063,0.030110227,0.044781145,0.01875573,0.08373928,\n0.04550342,0.038880858,0.040611891,0.046116826,0.087670453,\n]).reshape(-1,5)\n\nprint(y3.shape)\nprint(y3)\n\ndef Modified_Z(data):\n c = 1.4826\n median = np.median(data)\n # print(median)\n # print(\"median.shape\",median.shape)\n dev_med = np.array(data) -median\n # print(\"dev_med.shape\",dev_med.shape)\n mad = np.median(np.abs(dev_med))\n if mad!=0:\n \n z_score = dev_med/(c*mad)\n else : \n df = pd.DataFrame(data)\n meanAD = df.mad().values\n z_score = dev_med/(1.253314*meanAD)\n \n return z_score\n \n\n\na = np.array([1,2])\nb = np.array([5,6])\n\n\ndef general_equation(first_x,first_y,second_x,second_y):\n # 斜截式 y = kx + b \n A = second_y-first_y\n B = first_x-second_x\n C = second_x * first_y - first_x * second_y\n k = -1 * A / B\n b = -1 * C / B\n return k, b\n \ndef cal(a,b,n):\n sum_s12 = 0 \n sum_s1s1 = 0\n sum_s2s2 = 0 \n sum_s1 = 0\n sum_s2 = 0 \n # temp1 = 0\n # temp2 = 0\n delta = 0.0001\n \n for i in range(0,n):\n sum_s12+=a[i]*b[i]\n sum_s1+=a[i]\n sum_s2+=b[i]\n sum_s2s2+=b[i]*b[i]\n sum_s1s1+=a[i]*a[i]\n \n temp1 = n*sum_s1s1-sum_s1*sum_s1\n temp2 = n*sum_s2s2-sum_s2*sum_s2 \n if( (temp1>-delta and temp1-delta and temp2=target):\r\n pass\r\n else:\r\n if(target-i in dict.keys()):\r\n print(max(dict[i],dict[target-i]),min(dict[i],dict[target-i]))\r\n return max(dict[i],dict[target-i]),min(dict[i],dict[target-i])\r\n print(-1)\r\n return -1\r\n\r\nmSolu=Solution()\r\nmSolu.TwoSum(A=[100, 4, 200, 6, 3, 2],target=102)\r\n'''\r\n\r\n#网上的哈希表,很简洁,边建hash表边判断\r\nclass Solution:\r\n # @return a tuple, (index1, index2)\r\n def twoSum(self, num, target):\r\n dict = {}\r\n for i in range(len(num)):\r\n x = num[i]\r\n if target-x in dict:\r\n return (dict[target-x]+1, i+1)\r\n dict[x] = i\r\nmSolu=Solution()\r\nB=mSolu.twoSum(num=[100, 4, 101, 6, 1, 2],target=102)\r\nprint(B)\r\n\r\n'''\r\n#左右夹逼O(n2)\r\n1,由于要找到符合题意的数组元素的下标,所以先要将原来的数组深拷贝一份,然后排序。\r\n\r\n2,然后在排序后的数组中找两个数使它们相加为target。这个思路比较明显:\r\n使用两个指针,一个指向头,一个指向尾,两个指针向中间移动并检查两个指针指向的数的和是否为target。如果找到了这两个数,再将这两个数在原数组中的位置找出来就可以了。\r\n\r\n3,要注意的一点是:在原来数组中找下标时,需要一个从头找,一个从尾找,要不无法通过。\r\n如这个例子:numbers=[0,1,2,0]; target=0。如果都从头开始找,就会有问题。\r\n\r\n***while i