diff --git "a/1878.jsonl" "b/1878.jsonl" new file mode 100644--- /dev/null +++ "b/1878.jsonl" @@ -0,0 +1,712 @@ +{"seq_id":"645101009","text":"import json\nimport requests\nimport utils\nimport logging\nlogger = logging.getLogger(__name__)\n\nAPI_HOST = 'https://webprod.plosjournals.org/api'\n\nclass Rhyno(object):\n def __init__(self, host=API_HOST, verify_ssl=False):\n self.host = host\n self.verify_ssl=verify_ssl\n\n '''EXCEPTIONS'''\n class Base400Error(Exception):\n def __init__(self, message):\n Exception.__init__(self, \"Server responded with a 400: %s\" % message)\n\n class Base405Error(Exception):\n def __init__(self, message):\n Exception.__init__(self, \"Server responded with a 405: %s\" % message)\n\n class Base404Error(Exception):\n def __init__(self, message):\n Exception.__init__(self, \"Server responded with a 404: %s\" % message)\n \n class Base500Error(Exception):\n def __init__(self, message):\n Exception.__init__(self, \"Server responded with a 500: %s\" % message) \n\n @staticmethod\n def handle_error_codes(r):\n if r.status_code == 400:\n raise Rhyno.Base400Error(r.content)\n if r.status_code == 405:\n raise Rhyno.Base405Error(r.content)\n if r.status_code == 404:\n raise Rhyno.Base404Error(r.content)\n if r.status_code == 500:\n raise Rhyno.Base500Error(r.content)\n\n def ingestibles(self, verbose=False):\n '''\n returns list of ingestible DOIs as unicode\n '''\n r = requests.get(self.host + '/ingestibles/', verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"GET /ingestibles/\", r))\n return json.loads(r.content)\n\n def ingest(self, doi, force_reingest=None, verbose=False):\n '''\n attempts to ingest ingestible article by DOI\n returns article metadata dict if successful\n '''\n payload = {\n 'name': doi\n }\n if force_reingest:\n payload['force_reingest'] = True\n r = requests.post(self.host + '/ingestibles', data=payload, verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"POST /ingestibles/ %s\" % pretty_dict_repr(payload), r))\n\n self.handle_error_codes(r)\n return r.content\n\n def ingest_zip(self, archive_name, force_reingest=False, verbose=False):\n raise NotImplementedError\n \"\"\"\n try:\n archive = open(archive_name, 'rb')\n except IOError as e:\n print(e)\n return -1\n files = {'archive': archive}\n payload = None\n if force_reingest:\n payload = {'force_reingest': True} \n r = requests.post(self.host + '/zip/', files=files, data=payload, verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"POST /zip/ %s\"% utils.pretty_dict_repr(files), r))\n self.handle_error_codes(r)\n return json.loads(r.content)\n \"\"\"\n\n def get_metadata(self, doi, verbose=False):\n r = requests.get(self.host + '/articles/' + doi, verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"GET /articles/%s\" % doi, r))\n self.handle_error_codes(r) \n return json.loads(r.content)\n\n def _get_state(self, doi, verbose=False):\n r = requests.get(self.host + '/articles/%s?state' % doi, verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"GET /articles/%s?state\" % doi, r))\n self.handle_error_codes(r)\n return json.loads(r.content)\n \n def is_published(self, doi, verbose=False):\n return self._get_state(doi, verbose)['state'] == 'published'\n\n def get_crossref_syndication_state(self, doi, verbose=False):\n return self._get_state(doi, verbose)['crossRefSyndicationState']\n\n def get_pmc_syndication_state(self, doi, verbose=False):\n return self._get_state(doi, verbose)['pmcSyndicationState']\n\n def _base_publish(self, doi, publish=True, syndicate_all=False, verbose=False):\n if publish:\n state = 'published'\n else:\n state = 'ingested'\n payload = {\n 'state': state\n }\n\n if syndicate_all:\n syndications = {\n 'CROSSREF': {'status': 'IN_PROGRESS'},\n 'PMC': {'status': 'IN_PROGRESS'},\n 'PUBMED': {'status': 'IN_PROGRESS'}\n }\n payload['syndications'] = syndications\n\n r = requests.patch(self.host + '/articles/%s' % doi, data=json.dumps(payload), verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"PATCH /articles/%s\" % doi, r))\n self.handle_error_codes(r) \n return json.loads(r.content)\n\n def publish(self, doi, verbose=False):\n self._base_publish(doi, publish=True, verbose=verbose)\n\n def production_publish(self, doi, verbose=False):\n self._base_publish(doi, publish=True, syndicate_all=True, verbose=verbose)\n\n def unpublish(self, doi, verbose=False):\n self._base_publish(doi, publish=False, verbose=verbose)\n\n def get_journals(self, verbose=False):\n r = requests.get(self.host + \"/journals\", verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"GET /journals\", r))\n self.handle_error_codes(r)\n return json.loads(r.content)\n\n def read_journal(self, journal_key, verbose=False):\n r = requests.get(self.host + \"/journals/%s\" % journal_key, verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"GET /journals/%s\" % journal_key, r))\n self.handle_error_codes(r)\n return json.loads(r.content)\n\n def create_volume(self, journal_key, volume_uri, display_name, image_uri, verbose=False):\n payload = {\n 'volumeUri': volume_uri,\n 'displayName': display_name,\n 'imageUri': image_uri,\n }\n r = requests.post(self.host + \"/journals/%s\" % journal_key, data=json.dumps(payload), verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"POST /journals/%s\" % journal_key, r))\n self.handle_error_codes(r)\n return r.content\n\n def get_volume(self, volume_uri, verbose=False):\n r = requests.get(self.host + \"/volumes/%s\" % volume_uri, verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"GET /volume/%s\" % volume_uri, r))\n self.handle_error_codes(r)\n return json.loads(r.content)\n\n def create_issue(self, volume_uri, issue_uri, display_name, image_uri, verbose=False):\n payload = {\n 'issueUri': issue_uri,\n 'displayName': display_name,\n 'imageUri': image_uri,\n 'respectOrder': True,\n }\n r = requests.post(self.host + \"/volumes/%s\" % volume_uri, data=json.dumps(payload), verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"POST /volumes/%s\" % volume_uri, r))\n self.handle_error_codes(r)\n return r.content\n\n def modify_issue(self, issue_uri, display_name, image_uri, article_order, verbose=False):\n payload = {\n 'respectOrder': True,\n 'issueUri': issue_uri,\n 'displayName': display_name,\n 'imageUri': image_uri,\n 'articleOrder': article_order,\n }\n r = requests.patch(self.host + \"/issues/%s\" % issue_uri, data=json.dumps(payload), verify=self.verify_ssl)\n if verbose:\n print(utils.report(\"POST /issues/%s\" % issue_uri, r))\n self.handle_error_codes(r)\n return r.content\n","sub_path":"rhyno/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"308139071","text":"import pandas as pd\nimport numpy as np\nimport re\nimport glob\nimport math\nfrom math import log\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.offsetbox import AnchoredText\nfrom sklearn.linear_model import LinearRegression\nfrom scipy import stats\n\nfrom Josie17_Functions import Calc_average_profile\nfrom Josie17_Functions import Plot_compare_profile_plots\nfrom Josie17_Functions import Plot_compare_profile_plots_updated\nfrom Josie17_Functions import Calc_RDif\nfrom Josie17_Functions import meanfunction\nfrom Josie18_Functions import Calc_average_profile_PO3\n\ndef simpleplot(x0, y0, fit0, x1,y1,fit1, x2,y2,fit2, x3,y3,fit3, x4, y4, fit4, label_pl0, label_pl1, label_pl2, label_pl3, label_pl4, plname, pltitle ):\n\n fig,ax = plt.subplots()\n plt.xlim(0,30)\n plt.ylim(0,30)\n plt.xlabel('OPM (mPa)')\n plt.ylabel(r'PO3 (mPa)')\n plt.title(pltitle)\n plt.grid(True)\n \n ax.tick_params(axis='both', which='both', direction='in')\n ax.yaxis.set_ticks_position('both')\n ax.xaxis.set_ticks_position('both')\n\n ax.yaxis.set_minor_locator(AutoMinorLocator(10))\n ax.xaxis.set_minor_locator(AutoMinorLocator(10))\n\n x = []\n\n plt.scatter(x0, y0, s = 0.1)\n plt.plot( x0, fit0,'-.', label = label_pl0, linewidth = 3.0)\n plt.scatter(x1, y1, s = 0.1)\n plt.plot( x1, fit1,'-.', label = label_pl1, linewidth = 3.0)\n plt.scatter(x2, y2, s = 0.1)\n plt.plot( x2, fit2,'-.', label = label_pl2, linewidth = 3.0)\n plt.scatter(x3, y3, s = 0.1)\n plt.plot( x3, fit3,'-.', label = label_pl3, linewidth = 3.0)\n plt.plot( x4, fit4,'-.', color = 'black', label = label_pl4, linewidth = 2.0)\n ax.legend(loc='upper left', frameon=True, fontsize = 'small')\n\n plt.savefig('/home/poyraden/Josie17/Plots/Calibration/Scatter_PerSim' + plname +'.pdf')\n plt.savefig('/home/poyraden/Josie17/Plots/Calibration/Scatter_PerSim' + plname +'.eps')\n\n #plt.show()\n\n \n\ndef scatterfitplot_persim(category, simarray,dfm, pltitle, plname ):\n#scatterfitplot_persim(asp_1p01p0_sim, f_sp_1p01p0_sim,df,'SP 1.0% 1.0B', 'SP_1p01p0')\n\n df_c = {}\n p = [0]* 4; f_sp = [0]*4; filter_c = [0]*4\n\n #po3[sim,pressure]\n simlen = len(simarray)\n presize = 5\n \n p_0 = (dfm.Pair <= 999) & (dfm.Pair > 500) \n p_1 = (dfm.Pair <= 500) & (dfm.Pair > 100) \n p_2 = (dfm.Pair <= 100) & (dfm.Pair > 30) \n p_3 = (dfm.Pair <= 30) & (dfm.Pair > 1)\n p_4 = (dfm.Pair <= 999) & (dfm.Pair > 1)\n\n #dfm_p0 = dfm[category & p_0];dfm_p1 = dfm[category & p_1];dfm_p2 = dfm[category & p_2];dfm_p3 = dfm[category & p_3];dfm_p4 = dfm[category & p_4]; \n\n label_p0 = \"999>= Pair > 500 \"; label_p1 = \"500>= Pair > 100 \"; label_p2 = \"100>= Pair > 30 \"\n label_p3 = \"30>= Pair > 1 \"\n\n\n dfm_0 = {}; dfm_1 = {}; dfm_2 = {}; dfm_3 = {}; dfm_4 = {};\n\n opm_0 = [0]*simlen; opm_1 =[0]*simlen; opm_2 = [0]*simlen; opm_3 = [0]*simlen; opm_4 = [0]*simlen\n po3_0 = [0]*simlen; po3_1 =[0]*simlen; po3_2 = [0]*simlen; po3_3 = [0]*simlen; po3_4 = [0]*simlen\n dp_0 = [0]*simlen; dp_1 =[0]*simlen; dp_2 = [0]*simlen; dp_3 = [0]*simlen; dp_4 = [0]*simlen\n slope_0 = [0]*simlen; slope_1 =[0]*simlen; slope_2 = [0]*simlen; slope_3 = [0]*simlen; slope_4 = [0]*simlen\n intercept_0 = [0]*simlen; intercept_1 =[0]*simlen; intercept_2 = [0]*simlen; intercept_3 = [0]*simlen; intercept_4 = [0]*simlen\n r_value_0 = [0]*simlen; r_value_1 =[0]*simlen; r_value_2 = [0]*simlen; r_value_3 = [0]*simlen; r_value_4 = [0]*simlen\n p_value_0 = [0]*simlen; p_value_1 =[0]*simlen; p_value_2 = [0]*simlen; p_value_3 = [0]*simlen; p_value_4 = [0]*simlen\n std_err_0 = [0]*simlen; std_err_1 =[0]*simlen; std_err_2 = [0]*simlen; std_err_3 = [0]*simlen; std_err_4 = [0]*simlen\n po3_fit_0 = [0]*simlen; po3_fit_1 =[0]*simlen; po3_fit_2 = [0]*simlen; po3_fit_3 = [0]*simlen; po3_fit_4 = [0]*simlen\n \n label_pl_0 = ['']*simlen; label_pl_1 =['']*simlen; label_pl_2 = ['']*simlen; label_pl_3 = ['']*simlen; label_pl_4 = ['']*simlen\n plotname = ['']*simlen; plottitle = ['']*simlen\n\n slope_0_all = []; slope_1_all =[]; slope_2_all = []; slope_3_all =[]; slope_4_all = [];\n intercept_0_all = []; intercept_1_all =[]; intercept_2_all = []; intercept_3_all =[]; intercept_4_all = [];\n std_err_0_all = []; std_err_1_all =[]; std_err_2_all = []; std_err_3_all =[]; std_err_4_all = [];\n\n\n for j in range(simlen):\n print('simarray', simarray[j])\n dfm_0[j] = dfm[(dfm.Sim == simarray[j]) & p_0 & category]\n dfm_1[j] = dfm[(dfm.Sim == simarray[j]) & p_1 & category]\n dfm_2[j] = dfm[(dfm.Sim == simarray[j]) & p_2 & category]\n dfm_3[j] = dfm[(dfm.Sim == simarray[j]) & p_3 & category]\n dfm_4[j] = dfm[(dfm.Sim == simarray[j]) & p_4 & category]\n\n po3_0[j] = np.asarray(dfm_0[j].PO3.tolist())\n opm_0[j] = np.asarray(dfm_0[j].PO3_OPM.tolist())\n po3_1[j] = np.asarray(dfm_1[j].PO3.tolist())\n opm_1[j] = np.asarray(dfm_1[j].PO3_OPM.tolist())\n po3_2[j] = np.asarray(dfm_2[j].PO3.tolist())\n opm_2[j] = np.asarray(dfm_2[j].PO3_OPM.tolist())\n po3_3[j] = np.asarray(dfm_3[j].PO3.tolist())\n opm_3[j] = np.asarray(dfm_3[j].PO3_OPM.tolist())\n po3_4[j] = np.asarray(dfm_4[j].PO3.tolist())\n opm_4[j] = np.asarray(dfm_4[j].PO3_OPM.tolist())\n\n dp_0[j] = len(po3_0[j]); dp_1[j] = len(po3_1[j]); dp_2[j] = len(po3_2[j]); dp_3[j] = len(po3_3[j]); dp_4[j] = len(po3_4[j]);\n\n slope_0[j], intercept_0[j], r_value_0[j], p_value_0[j], std_err_0[j] = stats.linregress(opm_0[j],po3_0[j])\n slope_1[j], intercept_1[j], r_value_1[j], p_value_1[j], std_err_1[j] = stats.linregress(opm_1[j],po3_1[j])\n slope_2[j], intercept_2[j], r_value_2[j], p_value_2[j], std_err_2[j] = stats.linregress(opm_2[j],po3_2[j])\n slope_3[j], intercept_3[j], r_value_3[j], p_value_3[j], std_err_3[j] = stats.linregress(opm_3[j],po3_3[j])\n slope_4[j], intercept_4[j], r_value_4[j], p_value_4[j], std_err_4[j] = stats.linregress(opm_4[j],po3_4[j])\n\n slope_0_all.append(slope_0[j])\n intercept_0_all.append(intercept_0[j])\n std_err_0_all.append(std_err_0[j])\n\n slope_1_all.append(slope_1[j])\n intercept_1_all.append(intercept_1[j])\n std_err_1_all.append(std_err_1[j])\n\n slope_2_all.append(slope_2[j])\n intercept_2_all.append(intercept_2[j])\n std_err_2_all.append(std_err_2[j])\n\n slope_3_all.append(slope_3[j])\n intercept_3_all.append(intercept_3[j])\n std_err_3_all.append(std_err_3[j])\n\n slope_4_all.append(slope_4[j])\n intercept_4_all.append(intercept_4[j])\n std_err_4_all.append(std_err_4[j])\n\n \n\n po3_fit_0[j] = slope_0[j] * opm_0[j] + intercept_0[j]\n label_pl_0[j] = label_p0 + \",slope =\" + str(round(slope_0[j],2)) + \", int. =\" + str(round(intercept_0[j],2))+ \", DP= \" + str(dp_0[j])\n po3_fit_1[j] = slope_1[j] * opm_1[j] + intercept_1[j]\n label_pl_1[j] = label_p1 + \",slope =\" + str(round(slope_1[j],2)) + \", int. =\" + str(round(intercept_1[j],2))+ \", DP= \" + str(dp_1[j])\n po3_fit_2[j] = slope_2[j] * opm_2[j] + intercept_2[j]\n label_pl_2[j] = label_p2 + \",slope =\" + str(round(slope_2[j],2)) + \", int. =\" + str(round(intercept_2[j],2))+ \", DP= \" + str(dp_2[j])\n po3_fit_3[j] = slope_3[j] * opm_3[j] + intercept_3[j]\n label_pl_3[j] = label_p3 + \",slope =\" + str(round(slope_3[j],2)) + \", int. =\" + str(round(intercept_3[j],2))+ \", DP= \" + str(dp_3[j])\n po3_fit_4[j] = slope_4[j] * opm_4[j] + intercept_4[j]\n label_pl_4[j] = \"All, slope =\" + str(round(slope_4[j],2)) + \", int. =\" + str(round(intercept_4[j],2))\n\n #label_pl4 = \"All, slope =\" + str(round(slope,2)) + \" int. =\" + str(round(intercept[4],2))\n\n for p in range(simlen):\n\n plotname[p] = plname + ' Sim_' + str(simarray[p])\n plottitle[p] = pltitle + ' Sim_' + str(simarray[p])\n\n simpleplot(opm_0[p], po3_0[p], po3_fit_0[p], opm_1[p], po3_1[p], po3_fit_1[p], opm_2[p], po3_2[p], po3_fit_2[p], opm_3[p], po3_3[p], po3_fit_3[p], opm_4[p], po3_4[p], po3_fit_4[p], label_pl_0[p], label_pl_1[p], label_pl_2[p], label_pl_3[p], label_pl_4[p], plotname[p], plottitle[p] )\n\n \n print(plname, '0', np.nanmean(slope_0_all), '+/-' , np.nanmean(std_err_0_all), ' intercept:' , np.nanmean(intercept_0_all)) \n print(plname, '1', np.nanmean(slope_1_all), '+/-' , np.nanmean(std_err_1_all), ' intercept:' , np.nanmean(intercept_1_all)) \n print(plname, '2', np.nanmean(slope_2_all), '+/-' , np.nanmean(std_err_2_all), ' intercept:' , np.nanmean(intercept_2_all)) \n print(plname, '3', np.nanmean(slope_3_all), '+/-' , np.nanmean(std_err_3_all), ' intercept:' , np.nanmean(intercept_3_all)) \n print(plname, '4', np.nanmean(slope_4_all), '+/-' , np.nanmean(std_err_4_all), ' intercept:' , np.nanmean(intercept_4_all))\n\n slope = [ np.nanmean(slope_0_all), np.nanmean(slope_1_all), np.nanmean(slope_2_all), np.nanmean(slope_3_all), np.nanmean(slope_4_all)]\n #err = [ np.nanmean(std_err_0_all), np.nanmean(std_err_1_all), np.nanmean(std_err_2_all), np.nanmean(std_err_3_all), np.nanmean(std_err_4_all)]\n err = [ np.nanstd(slope_0_all), np.nanstd(slope_1_all), np.nanstd(slope_2_all), np.nanstd(slope_3_all), np.nanstd(slope_4_all)]\n\n intercept = [ np.nanmean(intercept_0_all), np.nanmean(intercept_1_all), np.nanmean(intercept_2_all), np.nanmean(intercept_3_all), np.nanmean(intercept_4_all)]\n\n return slope, err, intercept\n\ndef scatterfitplot(category,dfm, pltitle, plname ):\n\n dfm_c = {}\n p = [0]* 4; f_sp = [0]*4; filter_c = [0]*4\n po3 = [0]*5; opm = [0]*5\n\n slope = [0]*5; intercept = [0]*5; r_value = [0]*5; p_value = [0]*5; std_err = [0]*5\n po3_fit = [0]*5; dp = [0]*5\n\n label_p = [''] * 5\n label_pl = [''] * 5\n\n p[0] = (dfm.Pair <= 999) & (dfm.Pair > 500) \n p[1] = (dfm.Pair <= 500) & (dfm.Pair > 100) \n p[2] = (dfm.Pair <= 100) & (dfm.Pair > 30) \n p[3] = (dfm.Pair <= 30) & (dfm.Pair > 1)\n\n label_p[0] = \"999>= Pair > 500 \"; label_p[1] = \"500>= Pair > 100 \"; label_p[2] = \"100>= Pair > 30 \"\n label_p[3] = \"30>= Pair > 1 \"\n\n \n for i in range(4):\n filter_c[i] = category & p[i]\n dfm_c[i]= dfm[filter_c[i]]\n \n po3[i] = np.asarray(dfm_c[i].PO3.tolist())\n opm[i] = np.asarray(dfm_c[i].PO3_OPM.tolist())\n dp[i] = len(po3[i])\n slope[i], intercept[i], r_value[i], p_value[i], std_err[i] = stats.linregress(opm[i],po3[i]) \n\n po3_fit[i] = slope[i] * opm[i] + intercept[i]\n label_pl[i] = label_p[i] + \"slope =\" + str(round(slope[i],2)) + \" int. =\" + str(round(intercept[i],2))+ \" DP= \" + str(dp[i])\n \n #print(slope[i], intercept[i], r_value[i], p_value[i], std_err[i], dp[i])\n\n df_main = dfm[category]\n po3[4] = np.asarray(df_main.PO3.tolist())\n opm[4] = np.asarray(df_main.PO3_OPM.tolist())\n dp[4] = len(po3[4])\n \n slope[4], intercept[4], r_value[4], p_value[4], std_err[4] = stats.linregress(opm[4],po3[4])\n po3_fit[4] = slope[4] * opm[4] + intercept[4]\n\n label_pl[4] = \"All, slope =\" + str(round(slope[4],2)) + \" int. =\" + str(round(intercept[4],2))\n\n fig,ax = plt.subplots()\n plt.xlim(0,30)\n plt.ylim(0,30)\n plt.xlabel('OPM (mPa)')\n plt.ylabel(r'PO3 (mPa)')\n plt.title(pltitle)\n plt.grid(True)\n \n ax.tick_params(axis='both', which='both', direction='in')\n ax.yaxis.set_ticks_position('both')\n ax.xaxis.set_ticks_position('both')\n\n ax.yaxis.set_minor_locator(AutoMinorLocator(10))\n ax.xaxis.set_minor_locator(AutoMinorLocator(10))\n\n x = []\n\n for p in range(4):\n plt.scatter(opm[p],po3[p], s = 0.1)\n plt.plot( opm[p], po3_fit[p],'-.', label = label_pl[p], linewidth = 3.0)\n # plt.scatter(opm_sim[p],po3_sim[p], s=0.5)\n # plt.plot( opm_sim[p], po3_fit[p],'-.', label = label_sim[p], linewidth = 2.0)\n \n plt.plot( opm[4], po3_fit[4],'-.', color = 'black', label = label_pl[4], linewidth = 2.0)\n ax.legend(loc='upper left', frameon=True, fontsize = 'small')\n\n plt.savefig('/home/poyraden/Josie17/Plots/Calibration/Scatter_' + plname +'.pdf')\n plt.savefig('/home/poyraden/Josie17/Plots/Calibration/Scatter_' + plname +'.eps')\n\n #plt.show()\n\n return slope, std_err, intercept\n\n###############################################################################################\n\n\ndf = pd.read_csv(\"/home/poyraden/Josie17/Files/Josie17_DataAll.csv\")\n\ndf = df.drop(df[((df.Sim == 171) | (df.Sim == 172) | (df.Sim == 180) | (df.Sim == 185) )].index)\ndf = df.drop(df[(df.Sim == 179) & (df.Team == 4)].index)\ndf = df.drop(df[(df.Sim == 172) & (df.Team == 1)].index)\ndf = df.drop(df[(df.Sim == 178) & (df.Team == 3)].index)\n#df = df.drop(df[(df.Tsim > 7000)].index)\ndf = df.drop(df[(df.Sim == 175)].index)\n\n# remove -9999 entries\ndf = df.drop(df[(df.PO3 < 0)].index)\ndf = df.drop(df[(df.PO3_OPM < 0)].index)\n\n#SciPump\nsp_1p01p0 = (df.ENSCI == 0) & (df.Sol == 1.0) & (df.Buf == 1.0)\nsp_1p00p1 = (df.ENSCI == 0) & (df.Sol == 1.0) & (df.Buf == 0.1)\nsp_2p00p1 = (df.ENSCI == 0) & (df.Sol == 2.0) & (df.Buf == 0.1)\n\nslopem_sp_1p01p0, errm_sp_1p01p0, interceptm_sp_1p01p0 = scatterfitplot(sp_1p01p0,df, 'SP 1.0% 1.0B','SP_1p01p0')\nslopem_sp_1p00p1, errm_sp_1p00p1, interceptm_sp_1p00p1 = scatterfitplot(sp_1p00p1,df, 'SP 1.0% 0.1B', 'SP_1p00p1')\nslopem_sp_2p00p1, errm_sp_2p00p1, interceptm_sp_2p00p1 = scatterfitplot(sp_2p00p1,df, 'SP 2.0% 0.1B', 'SP_2p00p1')\n\n#ENSCI\nen_1p00p1 = (df.ENSCI == 1) & (df.Sol == 1.0) & (df.Buf == 0.1)\nen_2p00p1 = (df.ENSCI == 1) & (df.Sol == 2.0) & (df.Buf == 0.1)\nen_0p50p5 = (df.ENSCI == 1) & (df.Sol == 0.5) & (df.Buf == 0.5)\n\nslopem_en_1p00p1, errm_en_1p00p1, interceptm_en_1p00p1 = scatterfitplot(en_1p00p1,df, 'EN 1.0% 0.1B','ENSCI_1p00p1')\nslopem_en_2p00p1, errm_en_2p00p1, interceptm_en_2p00p1 = scatterfitplot(en_2p00p1,df, 'EN 2.0% 0.1B', 'ENSCI_2p00p1')\nslopem_en_0p50p5, errm_en_0p50p5, interceptm_en_0p50p5 = scatterfitplot(en_0p50p5,df, 'EN 0.5% 0.5B', 'ENSCI_0p50p5')\n\n\n##now for each simulation seperatley:\nasp_1p01p0_sim = np.asarray(df[sp_1p01p0].drop_duplicates(['Sim']) ['Sim'])\nasp_1p01p0_sim = np.sort(asp_1p01p0_sim)\n\nasp_1p00p1_sim = np.asarray(df[sp_1p00p1].drop_duplicates(['Sim']) ['Sim'])\nasp_1p00p1_sim = np.sort(asp_1p00p1_sim)\n\nasp_2p00p1_sim = np.asarray(df[sp_2p00p1].drop_duplicates(['Sim']) ['Sim'])\nasp_2p00p1_sim = np.sort(asp_2p00p1_sim)\n\n\naen_1p00p1_sim = np.asarray(df[en_1p00p1].drop_duplicates(['Sim']) ['Sim'])\naen_1p00p1_sim = np.sort(aen_1p00p1_sim)\n\naen_2p00p1_sim = np.asarray(df[en_2p00p1].drop_duplicates(['Sim']) ['Sim'])\naen_2p00p1_sim = np.sort(aen_2p00p1_sim)\n\naen_0p50p5_sim = np.asarray(df[en_0p50p5].drop_duplicates(['Sim']) ['Sim'])\naen_0p50p5_sim = np.sort(aen_0p50p5_sim)\n\n\n##scatterfitplot_sim(category, simarray, simfilter ,dfm, pltitle, plname ):\n\nslope_sp_1p01p0, err_sp_1p01p0, intercept_sp_1p01p0 = scatterfitplot_persim(sp_1p01p0, asp_1p01p0_sim ,df,'SP 1.0% 1.0B', 'SP_1p01p0')\nslope_sp_1p00p1, err_sp_1p00p1, intercept_sp_1p00p1 = scatterfitplot_persim(sp_1p00p1, asp_1p00p1_sim ,df,'SP 1.0% 0.1B', 'SP_1p00p1')\nslope_sp_2p00p1, err_sp_2p00p1, intercept_sp_2p00p1 = scatterfitplot_persim(sp_2p00p1, asp_2p00p1_sim ,df,'SP 2.0% 0.1B', 'SP_2p00p1')\n\nslope_en_1p00p1, err_en_1p00p1, intercept_en_1p00p1 = scatterfitplot_persim(en_1p00p1, aen_1p00p1_sim ,df,'EN 1.0% 1.0B', 'EN_1p00p1')\nslope_en_2p00p1, err_en_2p00p1, intercept_en_2p00p1 = scatterfitplot_persim(en_2p00p1, aen_2p00p1_sim ,df,'EN 2.0% 0.1B', 'EN_2p00p1')\nslope_en_0p50p5, err_en_0p50p5, intercept_en_0p50p5 = scatterfitplot_persim(en_0p50p5, aen_0p50p5_sim ,df,'EN 0.5% 0.5B', 'EN_0p50p5')\n\n\nfile = open(\"slope_Files.txt\", \"w\")\n\nfile.write('Main fit' + '\\n')\nfile.write('SP 1.0% 1.0B slope:' + str(slopem_sp_1p01p0) + '\\n')\nfile.write('SP 1.0% 1.0B err:' + str(errm_sp_1p01p0) + '\\n')\nfile.write('SP 1.0% 1.0B intercept:' + str(interceptm_sp_1p01p0) + '\\n')\nfile.write('Seperate simulation fit' + '\\n')\nfile.write('SP 1.0% 1.0B' + str(asp_1p01p0_sim) +'\\n')\nfile.write('SP 1.0% 1.0B slope:' + str(slope_sp_1p01p0) + '\\n')\nfile.write('SP 1.0% 1.0B err:' + str(err_sp_1p01p0) + '\\n')\nfile.write('SP 1.0% 1.0B intercept:' + str(intercept_sp_1p01p0) + '\\n')\nfile.write(' ' + '\\n')\nfile.write('Main fit' + '\\n')\nfile.write('SP 1.0% 0.1B slope:' + str(slopem_sp_1p00p1) + '\\n')\nfile.write('SP 1.0% 0.1B err:' + str(errm_sp_1p00p1) + '\\n')\nfile.write('SP 1.0% 0.1B intercept:' + str(interceptm_sp_1p00p1) + '\\n')\nfile.write('Seperate simulation fit' + '\\n')\nfile.write('SP 1.0% 0.1B' + str(asp_1p00p1_sim) +'\\n')\nfile.write('SP 1.0% 0.1B slope:' + str(slope_sp_1p00p1) + '\\n')\nfile.write('SP 1.0% 0.1B err:' + str(err_sp_1p00p1) + '\\n')\nfile.write('SP 1.0% 0.1B intercept:' + str(intercept_sp_1p00p1) + '\\n')\nfile.write(' ' + '\\n')\nfile.write('Main fit' + '\\n')\nfile.write('SP 2.0% 0.1B slope:' + str(slopem_sp_2p00p1) + '\\n')\nfile.write('SP 2.0% 0.1B err:' + str(errm_sp_2p00p1) + '\\n')\nfile.write('SP 2.0% 0.1B intercept:' + str(interceptm_sp_2p00p1) + '\\n')\nfile.write('Seperate simulation fit' + '\\n')\nfile.write('SP 2.0% 0.1B' + str(asp_2p00p1_sim) +'\\n')\nfile.write('SP 2.0% 0.1B slope:' + str(slope_sp_2p00p1) + '\\n')\nfile.write('SP 2.0% 0.1B err:' + str(err_sp_2p00p1) + '\\n')\nfile.write('SP 2.0% 0.1B intercept:' + str(intercept_sp_2p00p1) + '\\n')\nfile.write(' ' + '\\n')\nfile.write('Main fit' + '\\n')\nfile.write('EN 1.0% 0.1B slope:' + str(slopem_en_1p00p1) + '\\n')\nfile.write('EN 1.0% 0.1B err:' + str(errm_en_1p00p1) + '\\n')\nfile.write('EN 1.0% 0.1B intercept:' + str(interceptm_en_1p00p1) + '\\n')\nfile.write('Seperate simulation fit' + '\\n')\nfile.write('EN 1.0% 0.1B' + str(aen_1p00p1_sim) +'\\n')\nfile.write('EN 1.0% 0.1B slope:' + str(slope_en_1p00p1) + '\\n')\nfile.write('EN 1.0% 0.1B err:' + str(err_en_1p00p1) + '\\n')\nfile.write('EN 1.0% 0.1B intercept:' + str(intercept_en_1p00p1) + '\\n')\nfile.write(' ' + '\\n')\n\nfile.write('Main fit' + '\\n')\nfile.write('EN 2.0% 0.1B slope:' + str(slopem_en_2p00p1) + '\\n')\nfile.write('EN 2.0% 0.1B err:' + str(errm_en_2p00p1) + '\\n')\nfile.write('EN 2.0% 0.1B intercept:' + str(interceptm_en_2p00p1) + '\\n')\nfile.write('Seperate simulation fit' + '\\n')\nfile.write('EN 2.0% 0.1B' + str(aen_2p00p1_sim) +'\\n')\nfile.write('EN 2.0% 0.1B slope:' + str(slope_en_2p00p1) + '\\n')\nfile.write('EN 2.0% 0.1B err:' + str(err_en_2p00p1) + '\\n')\nfile.write('EN 2.0% 0.1B intercept:' + str(intercept_en_2p00p1) + '\\n')\nfile.write(' ' + '\\n')\nfile.write('Main fit' + '\\n')\nfile.write('EN 0.5% 0.5B slope:' + str(slopem_en_0p50p5) + '\\n')\nfile.write('EN 0.5% 0.5B err:' + str(errm_en_0p50p5) + '\\n')\nfile.write('EN 0.5% 0.5B intercept:' + str(interceptm_en_0p50p5) + '\\n')\nfile.write('Seperate simulation fit' + '\\n')\nfile.write('EN 0.5% 0.5B' + str(aen_0p50p5_sim) +'\\n')\nfile.write('EN 0.5% 0.5B slope:' + str(slope_en_0p50p5) + '\\n')\nfile.write('EN 0.5% 0.5B err:' + str(err_en_0p50p5) + '\\n')\nfile.write('EN 0.5% 0.5B intercept:' + str(intercept_en_0p50p5) + '\\n')\n\n\nprint('SP 1.0% 1.0B', asp_1p01p0_sim)\nprint('SP 1.0% 1.0B slope:', slope_sp_1p01p0)\n\n\n\n\n\n","sub_path":"ScatterPlot_PO3vsOPM.py","file_name":"ScatterPlot_PO3vsOPM.py","file_ext":"py","file_size_in_byte":19060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"349198967","text":"from demo.init.coursera.linearregression.simpleregression import common as com\nfrom demo.init.coursera.linearregression.simpleregression import linear_regression as lr\nfrom os.path import abspath\nimport random\n\ndata_path = abspath('coursera/linearregression/data/ex1data1.txt')\n\n\ndef compute_linear_regression():\n # loading exercise data\n data = com.load_data(data_path)\n\n # getting separated x and y data for descent and cost function\n x_data = data[:, 0]\n y_data = data[:, 1]\n\n # testing compute_cost function\n print('For theta [0, 0] expected cost function result: 32.0')\n print('For theta [0, 0] computed cost function result: %.4f' % lr.compute_cost_function(x_data, y_data, [0, 0]))\n print('For theta [-1, 2] expected cost function result: 54.5')\n print('For theta [-1, 2] computed cost function result: %.4f' % lr.compute_cost_function(x_data, y_data, [-1, 2]))\n\n # generating random start theta (or coefficients on linear regression)\n theta = [random.uniform(0, 1), random.uniform(0, 1)]\n\n # descent speed\n # alpha should be between 0.005 and 0.2, or cost function will not converge\n # if alpha > 0.2 cost function will increase instead of decrease\n # if alpha < 0.005 cost function will never converge faster then in 10.000 iteration\n alpha = 0.01\n\n # if cost function not converged, descent will stop after all iterations\n iterations = 10000\n\n # computing gradient descent\n print('Started gradient descent with theta zero: %.4f, theta one: %.4f' % (theta[0], theta[1]))\n print('Expected theta zero: %.4f, theta one: %.4f (approximately)' % (-3.6303, 1.1664))\n theta = lr.gradient_descent(x_data, y_data, theta, alpha, iterations)\n print('Computed theta zero: %.4f, theta one: %.4f' % (theta[0], theta[1]))\n\n # displaying results\n com.display_regression(data, [com.hyp_value(x, theta) for x in x_data])\n\ncompute_linear_regression()\n","sub_path":"demo/init/coursera/linearregression/simpleregression/regression_starter.py","file_name":"regression_starter.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186570712","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport numpy as np\nfrom dash.dependencies import Output, Input\nimport os\nimport plotly.express as px\n\nfrom assets import whoop\n\naccess_token = whoop.get_access_token(\"trevor.liggett@gmail.com\", os.getenv('WHOOP_PASSWORD'))\n\n\ndata = whoop.get_user_data_df(access_token,\n start_date='2000-01-01T00:00:00.000Z', \n end_date='2030-01-01T00:00:00.000Z',\n url='https://api-7.whoop.com/users/{}/cycles')\n\n\n\ndata[\"Date\"] = pd.to_datetime(data[\"date\"], format=\"%Y-%m-%d\")\ndata.sort_values(\"Date\", inplace=True)\n\n# Have to hard code stats at first\nstats = ['sleep.score', 'sleep.qualityDuration']\n\n\n\nexternal_stylesheets = [\n {\n \"href\": \"https://fonts.googleapis.com/css2?\"\n \"family=Lato:wght@400;700&display=swap\",\n \"rel\": \"stylesheet\",\n },\n]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.title = \"Sleep Analysis with WHOOP\"\n\napp.layout = html.Div(\n children=[\n html.Div(\n children=[\n html.Img(src=\"assets/sleep.png\", className=\"header-emoji\"),\n html.H1(\n children=\"Sleep Analysis: Powered by WHOOP\", className=\"header-title\"\n ),\n html.P(\n children=\"Monitoring and analyzing\"\n \" sleep patterns between 2018 and 2021\"\n \" using the WHOOP wearable.\",\n className=\"header-description\",\n ),\n ],\n className=\"header\",\n ),\n html.Div(\n children=[\n html.Div(\n children=[\n html.Div(\n children=\"Date Range\", className=\"menu-title\"\n ),\n dcc.DatePickerRange(\n id=\"date-range\",\n min_date_allowed=data.Date.min().date(),\n max_date_allowed=data.Date.max().date(),\n start_date=pd.to_datetime(\"2021-04-11\", format=\"%Y-%m-%d\"),\n end_date=data.Date.max().date(),\n ),\n ]\n ),\n ],\n className=\"menu\",\n ),\n html.Div(\n children=[\n html.Div(\n children=dcc.Graph(\n id=\"need-chart\",\n config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n html.Div(\n children=dcc.Graph(\n id=\"efficiency-chart\",\n config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n html.Div(\n children=dcc.Graph(\n id=\"consistency-chart\",\n config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n html.Div(\n children=dcc.Graph(\n id=\"pie-chart\",\n config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n ],\n className=\"wrapper\",\n ),\n ]\n)\n\n\n@app.callback(\n [Output(\"need-chart\", \"figure\"), Output(\"efficiency-chart\", \"figure\"), Output(\"consistency-chart\", \"figure\"), Output(\"pie-chart\", \"figure\")],\n [\n Input(\"date-range\", \"start_date\"),\n Input(\"date-range\", \"end_date\"),\n ],\n)\ndef update_charts(start_date, end_date):\n mask = (data['Date'] > start_date) & (data['Date'] <= end_date)\n filtered_data = data.loc[mask]\n # create pie data\n names = ['sws', 'rem', 'light', 'wake']\n values = [filtered_data['sleep.sws.duration'].mean() / 3600000, \n filtered_data['sleep.rem.duration'].mean() / 3600000,\n filtered_data['sleep.light.duration'].mean() / 3600000,\n filtered_data['sleep.wake.duration'].mean() / 3600000]\n \n pie_data = pd.DataFrame(list(zip(names, values)), columns =['names', 'values'])\n \n need_chart_figure = {\n \"data\": [\n {\n \"x\": filtered_data[\"Date\"],\n \"y\": filtered_data[\"sleep.qualityDuration\"] / 3600000,\n \"type\": \"lines\",\n \"name\": \"Sleep Duration\",\n },\n {\n \"x\": filtered_data[\"Date\"],\n \"y\": filtered_data[\"sleep.needBreakdown.total\"] / 3600000,\n \"type\": \"lines\",\n \"name\": \"Sleep Need\",\n },\n ],\n \"layout\": {\n \"title\": {\"text\": \"Hours of Sleep vs Sleep Need\", \"x\": 0.05, \"xanchor\": \"left\"},\n \"xaxis\": {\"fixedrange\": True},\n \"yaxis\": {\"fixedrange\": True},\n \"colorway\": [\"#52B2BF\", \"#0A1172\"],\n },\n }\n \n efficiency_chart_figure = {\n \"data\": [\n {\n \"x\": filtered_data[\"Date\"],\n \"y\": filtered_data[\"sleep.efficiency\"],\n \"type\": \"lines\",\n \"hovertemplate\": \"%{y:.2f} percent\",\n },\n ],\n \"layout\": {\n \"title\": {\n \"text\": \"Sleep Efficiency\",\n \"x\": 0.05,\n \"xanchor\": \"left\",\n },\n \"xaxis\": {\"fixedrange\": True},\n \"yaxis\": {\"tickprefix\": \"\", \"fixedrange\": True},\n \"colorway\": [\"#7A4988\"],\n },\n }\n\n consistency_chart_figure = {\n \"data\": [\n {\n \"x\": filtered_data[\"Date\"],\n \"y\": filtered_data[\"sleep.consistency\"],\n \"type\": \"lines\",\n \"hovertemplate\": \"%{y:.2f} percent\",\n },\n ],\n \"layout\": {\n \"title\": {\n \"text\": \"Sleep Consistency\",\n \"x\": 0.05,\n \"xanchor\": \"left\",\n },\n \"xaxis\": {\"fixedrange\": True},\n \"yaxis\": {\"tickprefix\": \"\", \"fixedrange\": True},\n \"colorway\": [\"#7A4988\"],\n },\n }\n \n pie_chart_figure = px.pie(pie_data, values=values, names=names, \n color_discrete_sequence=px.colors.sequential.Purp,\n hole=.3,\n title=\"Proportion of Sleep Spent in Major Stages\")\n \n return need_chart_figure, efficiency_chart_figure, consistency_chart_figure, pie_chart_figure\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","sub_path":"data_visualization__COSC322/final_project/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604671712","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom functions import highlight\nchrome_driver = \"[...]/chromedriver.exe\"\n# chrome_driver = \"[...]/chromedriver\"\n\nchrome_options = Options()\nchrome_options.add_argument(\"--window-size=1200,800\")\ndriver = webdriver.Chrome(executable_path=chrome_driver, chrome_options=chrome_options)\nweb_link = 'https://www.imdb.com'\n\n\nclass ImdbObject:\n\n def __init__(self, tv_show):\n self.web_link = web_link\n self.driver = driver\n self.tv_show = tv_show\n\n def get_weblink(self):\n self.driver.get(self.web_link)\n\n def get_search(self):\n search_entry = self.driver.find_element_by_xpath('//*[@id=\"navbar-query\"]')\n highlight(search_entry)\n search_entry.send_keys(self.tv_show)\n search_entry.send_keys(Keys.ENTER)\n\n def get_result(self):\n search_result = self.driver.find_element_by_xpath('//*[@id=\"main\"]/div/div[2]/table/tbody/tr[1]/td[2]/a')\n highlight(search_result)\n search_result.click()\n\n def get_episode_guide(self):\n episode_guide = self.driver.find_element_by_xpath('//*[@id=\"title-overview-widget\"]/div[2]/div[3]/a')\n highlight(episode_guide)\n episode_guide.click()\n\n def get_latest_episode(self):\n ''' This method should return first episode ascending '''\n latest = self.driver.find_element_by_css_selector('#episodes_content > div.clear > div.list.detail.eplist > div:nth-child(1)')\n print(latest.text)\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"288332454","text":"import time\n\ndef clock(func):\n def clocked(*args, **kwargs):\n t1 = time.time()\n print(121232133)\n result = func(*args, **kwargs)\n\n print('dfge5g')\n t2 = time.time()\n elapsed = t2 - t1\n print(\"This func cost: {}seconds\".format(elapsed))\n return result\n return clocked\n\n@clock\ndef test():\n print(123)\n\ntest()","sub_path":"test/202103/test_zhuangshiqi.py","file_name":"test_zhuangshiqi.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"412364809","text":"import sys\ndef endian_conv(value):\n left_most_byte = (value & 0x000000ff) >> 0\n left_mid_byte = (value & 0x0000ff00) >> 8\n right_mid_byte = (value & 0x00ff0000) >> 16\n right_most_byte= (value & 0xff000000) >> 24\n left_most_byte <<= 24\n left_mid_byte <<= 16\n right_mid_byte <<= 8\n right_most_byte <<= 0\n return (left_most_byte|left_mid_byte|right_mid_byte|right_most_byte)\n\nvalue = int (input(\"Enter the Hexadecimal Value for the type int (eg:0x12345678): \"), 16)\nconverted=(endian_conv(value))\nif sys.byteorder == \"little\":\n print(\"your system is little Endian \")\n print(\"The Little endian Byte Order: \", hex(endian_conv(value)))\n choice =input(\"would you like to convert to Big Endian? (y/n):\" )\n if (choice=='y' or choice=='Y' ):\n print(\"Converted Big endian Byteorder:\", hex(endian_conv(converted)))\nelse:\n print(\"your system is Big Endian\")\n print(\"The Big endian Byte Order: \", hex(endian_conv(converted)))\n choice = input(\"would you like to convert to Little Endian? (y/n):\" )\n if (choice=='y' or choice=='Y' ):\n print(\"converted Little endian order\", hex(endian_conv(value)))","sub_path":"endianconv.py","file_name":"endianconv.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"85628255","text":"# ----------------开发者信息----------------------------\n# 开发者:张涵毓\n# 开发日期:2020年6月3日\n# 内容:2.1 CNN-招聘信息文本分类\n# 修改内容:\n# 修改者:\n# ----------------开发者信息----------------------------\n# ---------------------- 代码布局: ----------------------\n# 1、导入 Keras, matplotlib, numpy, sklearn 和 panda的包\n# 2、招聘数据数据导入\n# 3、分词和提取关键词\n# 4、建立字典,并使用\n# 5、训练模型\n# 6、保存模型,显示运行结果\n# ---------------------- 代码布局: ----------------------\n\n# -------------------------- 1、导入需要包 -------------------------------\nimport pandas as pd\nimport jieba\nimport jieba.analyse as analyse\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\n# -------------------------- 1、导入需要包 -------------------------------\n\n\n# -------------------------- 2、招聘数据数据导入 -------------------------------\npath='D:\\\\研究生\\\\代码\\\\Keras代码\\\\1.Multi-Layer perceptron(MLP 多层感知器)\\\\job_detail_dataset.csv'\njob_detail_pd = pd.read_csv(path, encoding='UTF-8')\nprint(job_detail_pd.head(5))\nlabel = list(job_detail_pd['PositionType'].unique()) # 标签\nprint(label)\n\n\n# 为工作描述设置标签的id\ndef label_dataset(row):\n num_label = label.index(row) # 返回label列表对应值的索引\n return num_label\n\n\njob_detail_pd['label'] = job_detail_pd['PositionType'].apply(label_dataset)\njob_detail_pd = job_detail_pd.dropna() # 删除空行\njob_detail_pd.head(5)\n\n\n# -------------------------- 2、招聘数据数据导入 -------------------------------\n\n# -------------------------- 3、分词和提取关键词 -------------------------------\n# 中文分词\ndef chinese_word_cut(row):\n return \" \".join(jieba.cut(row))\n\njob_detail_pd['Job_Description_jieba_cut'] = job_detail_pd.Job_Description.apply(chinese_word_cut)\njob_detail_pd.head(5)\n\n# 提取关键词\ndef key_word_extract(texts):\n return \" \".join(analyse.extract_tags(texts, topK=50, withWeight=False, allowPOS=()))\n\n\njob_detail_pd['Job_Description_key_word'] = job_detail_pd.Job_Description.apply(key_word_extract)\n# -------------------------- 3、分词和提取关键词 -------------------------------\n\n# -------------------------- 4、建立字典,并使用 -------------------------------\n# 建立2000个词的字典\ntoken = Tokenizer(num_words=2000)\ntoken.fit_on_texts(job_detail_pd['Job_Description_key_word']) # 按单词出现次数排序,排序前2000的单词会列入词典中\n\n# 使用token字典将“文字”转化为“数字列表”\nJob_Description_Seq = token.texts_to_sequences(job_detail_pd['Job_Description_key_word'])\n\n# 截长补短让所有“数字列表”长度都是50\nJob_Description_Seq_Padding = sequence.pad_sequences(Job_Description_Seq, maxlen=50)\nx_train = Job_Description_Seq_Padding\ny_train = job_detail_pd['label'].tolist()\n# -------------------------- 4、建立字典,并使用 -------------------------------\n\n# -------------------------- 5、训练模型 -------------------------------\nclass CNNic(nn.module):\n def __init__(self):\n super(CNNic,self).__init__()\n self.dense=nn.Sequential(nn.Embedding(num_embeddings=2000,embedding_dim=32),\n nn.Conv1d(in_channels=50,out_channels=256,kernel_size=3,padding=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3,padding=1),\n nn.Conv1d(in_channels=256,out_channels=32,kernel_size=3),\n nn.Flatten(),\n nn.Dropout(0.3),\n nn.BatchNorm1d(550),\n nn.Linear(550,out_features=256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, out_features=10),\n nn.Softmax()\n )\n def forward(self, x):\n x = self.dense(x)\n return x\n\n\nmodel = CNNic() # 实例化招聘模型\nprint(model) # 打印模型结构\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3) # 定义优化器为SGD,学习率是1e-3\nloss_func = torch.nn.CrossEntropyLoss() # 定义损失函数为均方误差\n\nbatch_size = 256\nepochs = 5\n\noptimizer = torch.optim.Adam(model.parameters())\nloss_func = nn.CrossEntropyLoss()\n\nprint(\"-----------训练开始-----------\")\niteration = [] # list存放epoch数\nloss_total = [] # list存放损失\nfor epoch in range(epochs):\n # train_loss = 0.0\n model.train() # 训练模式\n predict = model(x_train) # output\n loss_epoch_train = loss_func(predict,y_train) # cross entropy loss\n iteration.append(epoch) # 将epoch放到list中\n loss_total.append(loss_epoch_train) # 将loss放到list中\n optimizer.zero_grad() # clear gradients for this training step\n loss_epoch_train.backward() # 误差反向传播, 计算参数更新值\n optimizer.step() # 将参数更新值施加到 net 的 parameters 上\n print('epoch %3d , loss %3d' % (epoch, loss_epoch_train))\nprint(\"-----------训练结束-----------\")\ntorch.save(model.state_dict(), \"job information.pkl\") # 保存模型参数\n# -------------------------------模型训练------------------------\n\n# -------------------------- 6、模型可视化 ------------------------------\nplt.plot(iteration,loss_total, label=\"Train loss\")\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train'], loc='upper left') # loc:图例位置\nplt.show()\n","sub_path":"zhanghanyu/Pytorch/2 CNN/招聘信息文本分类/CNN 招聘信息文本分类.py","file_name":"CNN 招聘信息文本分类.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304086865","text":"## test_set1.py\n\nfrom collections import Counter\n\nimport pytest\nimport random\nimport secrets\nfrom hypothesis import given, note\nfrom hypothesis.strategies import binary, composite, integers\n\nfrom set1 import *\n\n\ndef test_ex1():\n \"\"\"in_text and out_text fro https://cryptopals.com/sets/1/challenges/1\"\"\"\n in_text = \"49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d\"\n # bytes.fromhex(in_text)\n out_text = \"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t\"\n assert hex2b64(in_text) == out_text\n\n\ndef test_ex2():\n in_key = \"1c0111001f010100061a024b53535009181c\"\n in_text = \"686974207468652062756c6c277320657965\" # bytes.fromhex(in_text)\n out_text = \"746865206b696420646f6e277420706c6179\"\n assert fixed_xor(in_text, in_key) == out_text\n\n\n@composite\ndef same_length_hex_binaries(draw, number=2):\n l = draw(integers(min_value=1, max_value=1000))\n return [draw(binary(min_size=l, max_size=l)).hex() for _ in range(number)]\n\n\n@given(same_length_hex_binaries())\ndef test_fixed_xor_invertible(bs):\n b0 = bs[0]\n b1 = bs[1]\n assert b1 == fixed_xor(fixed_xor(b1, b0), b0)\n assert b1 == fixed_xor(b0, fixed_xor(b1, b0))\n\n\n@given(same_length_hex_binaries())\ndef test_fixed_xor_commutative(bs):\n b0 = bs[0]\n b1 = bs[1]\n assert fixed_xor(b0, b1) == fixed_xor(b1, b0)\n\n\n@given(same_length_hex_binaries(3))\ndef test_fixed_xor_associative(bs):\n b0 = bs[0]\n b1 = bs[1]\n b2 = bs[2]\n assert fixed_xor(fixed_xor(b0, b1), b2) == fixed_xor(b0, fixed_xor(b1, b2))\n\n\ndef test_fixed_xor_rejects_different_length():\n in1 = \"1c0111001f010100061a024b53535009181c\"\n in2 = \"686974207468652062756c6c277320657\"\n with pytest.raises(AssertionError):\n fixed_xor(in1, in2)\n\n\n@pytest.mark.parametrize(\n \"plaintext, short\",\n [\n (\"This is a normal English text to be encrypted with a one char key\", False),\n (\"Another perfectly normal English text to be tried\", False),\n (\"Does this work as well?\", False),\n (\"How about this?\", True),\n (\"Some more text to try\", False),\n (\"Cooking MC's like a pound of bacon\", True),\n ],\n)\ndef test_find_decryption(plaintext, short):\n \"\"\"Our scoring is not perfect, on some \"short\" texts the right\n decoding is not the best scoring result. Hence for some we only\n check that the candidate is among the first 3. We can filter more\n on \"special\" characters, but that doesn't feel right.\n\n \"\"\"\n plain = plaintext.encode().hex()\n c = 42\n key = one_char_key(c, len(plain) // 2)\n cipher = fixed_xor(plain, key)\n\n candidates = all_one_char_decodes(cipher)\n candidates_decoded = decode_all(candidates)\n assert plaintext in candidates_decoded\n ft = freq_tables(candidates_decoded)\n assert ft[plaintext] == Counter(plaintext.upper())\n candidates_ordered = order_ft(ft)\n assert plaintext in candidates_ordered[:3]\n if not short:\n assert plaintext == candidates_ordered[0]\n\n\ndef test_ex3():\n cipher = \"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\"\n candidates = all_one_char_decodes(cipher)\n assert \"Cooking MC's like a pound of bacon\" in find_decode(cipher, 5)\n\n\n@pytest.mark.parametrize(\n \"plaintext\",\n [\n \"This is a normal English text to be encrypted with a one char key\",\n \"Another perfectly normal English text to be tried\",\n \"Does this work as well?\",\n \"How about this?\",\n \"Some more text to try\",\n \"Cooking MC's like a pound of bacon\",\n ],\n)\ndef test_develop_ex4(plaintext):\n plain = plaintext.encode().hex()\n c = 42\n key = one_char_key(c, len(plain) // 2)\n cipher = fixed_xor(plain, key)\n\n l = len(cipher.encode())\n N = 50\n fake_ciphers = [secrets.token_hex(l) for _ in range(N)]\n ciphers = fake_ciphers + [cipher]\n random.shuffle(ciphers)\n\n assert plaintext in [p[0] for ps in find_all_decodes(ciphers, 3, 2) for p in ps]\n\ndef test_ex4():\n with open(\"4.txt\") as f:\n data = [l.strip() for l in f.readlines()]\n assert 'Now that the party is jumping\\n' in [p[0] for ps in find_all_decodes(data, 4, 4) for p in ps]\n","sub_path":"test_set1.py","file_name":"test_set1.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450018219","text":"import copy\nimport json\nimport shutil\nimport xml\nimport os\nfrom statistics import mean\nfrom service.models.packing.svg_nest_packing.utils import packmap_from_etree_and_json\nfrom service.models.packing.utils.save import save_svgs, rm_svgs\nfrom service.models.packing.utils.errors import PackingError\n\nBASE_PATH = \"models/packing/\"\n\nclass SvgNestPacker:\n\n def _translate_shape(self, shape, dx):\n for i, point in enumerate(shape['points']):\n shape['points'][i]['x'] = float(shape['points'][i]['x'] + dx)\n shape['points'][i]['y'] = float(shape['points'][i]['y'])\n return shape\n\n def _rearrange_shapes(self, material_width, shapes):\n \"\"\"Orders shapes in one horizontal line without intersections. SvgNest needs\n details to be non-touching\"\"\"\n ordered_shapes = []\n curr_right_bound = material_width + 50\n for shape in shapes:\n minx, maxx = self._get_shape_xbounds(shape)\n shape = self._translate_shape(shape, curr_right_bound - minx)\n\n width = maxx - minx\n curr_right_bound += width + 50\n ordered_shapes.append(shape)\n return ordered_shapes\n\n def _get_shape_xbounds(self, shape):\n \"\"\"Returns min and max x coordinate of a shape\"\"\"\n minx = float('inf')\n maxx = 0\n for p in shape['points']:\n x = p['x']\n if x < minx:\n minx = x\n if x > maxx:\n maxx = x\n return minx, maxx\n\n def _divide_svg_per_packmaps(self, path):\n \"\"\"Returns n packmaps as a xml.dom.minidom objects\"\"\"\n dom = xml.dom.minidom.parse(path)\n\n gs = dom.childNodes[1].childNodes\n is_g = lambda elem: 'Element: g at' in str(elem)\n gs = list(filter(is_g, gs))\n for i in range(len(gs)):\n gs[i].setAttribute('transform', '(0, 0)')\n\n return gs\n\n def __call__(self, details, material, iterations, rotations, render=True):\n assert len(details) > 0\n shapes = []\n\n idx = 1\n for detail in details:\n w, h = detail.get_size()\n\n shape = detail.to_nest4j_format()\n for i in range(detail.quantity):\n shape_copy = copy.deepcopy(shape)\n shape_copy['id'] = idx + i\n shape_copy['type_id'] = detail.idx\n shapes.append(shape_copy)\n idx += detail.quantity\n\n shapes = self._rearrange_shapes(w, shapes)\n\n shapes_str = str(shapes).replace(\"'\", '\"')\n path = BASE_PATH +'files/tmp1.json'\n with open(path, 'w') as f:\n w = int(material['width'])\n h = int(material['height'])\n f.write('{\"container\": { \"width\": ' + str(w) + ', \"height\": ' + str(h) + ' },')\n f.write(' \"shapes\": ' + shapes_str)\n f.write('}')\n os.system(f\"java -cp {BASE_PATH}nest4J.jar UseCase.Main {path} {iterations} {rotations}\")\n try:\n shutil.move('res.svg', BASE_PATH + 'files/packing.svg')\n except FileNotFoundError:\n # java app crashed\n raise PackingError\n\n svgs = self._divide_svg_per_packmaps(BASE_PATH + 'files/packing.svg')\n kims = []\n ids_per_list = []\n for svg in svgs:\n packmap = packmap_from_etree_and_json(svg, shapes)\n kims.append(round(packmap.get_kim(), 2))\n ids_per_list.append(packmap.get_ids_per_list())\n if render:\n archive_path = save_svgs(svgs)\n else:\n pass#rm_svgs()\n return {'results': {'materials': {'n': len(kims)},\n 'kim': {'average': 'not implemented',\n 'all': kims},\n 'ids_per_list': ids_per_list},\n 'filepath': archive_path if render else \"Rendering disabled\"}\n","sub_path":"service/models/packing/svg_nest_packing/packing.py","file_name":"packing.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"498343129","text":"class Solution:\n def nthUglyNumber(self, n: int) -> int:\n ans = [1]\n idx2 = idx3 = idx5 = 0\n while len(ans) < n:\n a = ans[idx2] * 2\n b = ans[idx3] * 3\n c = ans[idx5] * 5\n min_n = min(a, b, c)\n if min_n not in ans:\n ans.append(min_n)\n if min_n == a:\n idx2 += 1\n elif min_n == b:\n idx3 += 1\n else:\n idx5 += 1\n print(ans)\n return ans[n-1]\n\n\nif __name__ == '__main__':\n so = Solution()\n n = 10\n res = so.nthUglyNumber(n)\n print(res)","sub_path":"offer/ugly_number.py","file_name":"ugly_number.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"10600165","text":"import os\nfrom twilio.rest import Client\n\n\n# Your Account Sid and Auth Token from twilio.com/console\n# and set the environment variables. See http://twil.io/secure\naccount_sid = os.environ['AC4bd0ef5a7a777b65e0a698a8c4663865']\nauth_token = os.environ['52c192d46b062abde74f2da2719e632c']\nclient = Client(account_sid, auth_token)\n\nmessage = client.messages.create(\n body='Hi there! i am your sms bot !',\n from_='+17073294447',\n to='+918898779021'\n )\n\nprint(message.sid)","sub_path":"textme/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239641963","text":"\"\"\"\n 练习在终端中录入四个同学体重\n 打印最沉的体重\n 思路:\n 假设第一个是最大的\n 以此与后面的元素比较\n 发现更大的,则更换假设\n\"\"\"\n# weight = float(input(\"第一个同学体制\"))\n# weight1 = float(input(\"第二个同学体制\"))\n# weight2 = float(input(\"第三个同学体制\"))\n# weight3 = float(input(\"第四个同学体制\"))\n# if weight > weight1 and weight2 and weight3:\n# print(\"第一个同体重最重\")\n# if weight1 > weight and weight2 and weight3:\n# print(\"第二个同体重最重\")\n# if weight2 > weight and weight2 and weight3:\n# print(\"第三个同体重最重\")\n# if weight3 > weight1 and weight2 and weight:\n# print(\"第四个同体重最重\")\nweight = float(input(\"第一个同学体制\"))\nweight1 = float(input(\"第二个同学体制\"))\nweight2 = float(input(\"第三个同学体制\"))\nweight3 = float(input(\"第四个同学体制\"))\nmax_value = weight\nif max_value < weight1:\n max_value = weight1\nif max_value < weight2:\n max_value = weight2\nif max_value < weight3:\n max_value = weight3\nprint(max_value)\n","sub_path":"month01/day03/exercise04.py","file_name":"exercise04.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"141329795","text":"#!usr/bin/env python\n# (c) R. D. Scanlon 2016\n\nr\"\"\"\nmicroblog Forms\n\nLogin form constructor for the microblog web app.\n\"\"\"\n\n\n# Imports:\nfrom flask_wtf.form import Form\nfrom wtforms import StringField, BooleanField, TextAreaField\nfrom wtforms.validators import DataRequired, Length\nfrom app.models import User\n\n# Functions/Classes/etc.:\nclass LoginForm(Form):\n \"\"\"\n Generates login form for authentication of users.\n \"\"\"\n user_id = StringField('user_id', validators=[DataRequired()])\n remember_me = BooleanField('remember_me', default=False)\n\n\nclass EditForm(Form):\n \"\"\"\n Generates a form for editing user information.\n \"\"\"\n nickname = StringField('nickname', validators=[DataRequired()])\n about_me = TextAreaField('about_me', validators=[Length(min=0, max=140)])\n\n def __init__(self, original_nickname, *args, **kwargs):\n Form.__init__(self, *args, **kwargs)\n self.original_nickname = original_nickname\n\n def validate(self):\n \"\"\"\n Validates the user entered nickname to ensure that there are no collisions\n in the database.\n :return:\n \"\"\"\n if not Form.validate(self):\n return False\n if self.nickname.data == self.original_nickname:\n return True\n user = User.query.filter_by(nickname=self.nickname.data).first()\n if user is not None:\n self.nickname.errors.append('This nickname is in use. CHOOSE ANOTHER.')\n return False\n return True\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"650200200","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 18:07:37 2018\n\n@author: mishr\n\"\"\"\n\n# Here we will write a Custome Classifier, so Comment the importing Statement of KNeighborCalssifier.\n# Create fn euc() to get euclidean distance.\nfrom scipy.spatial import distance\ndef euc(a,b):\n\treturn distance.euclidean(a,b)\n# Create Class ScrapKNN() with fn's : fit,predict and closest\nclass ScrapKNN():\n\tdef fit(self,X_train,y_train):\n\t\tself.X_train=X_train\n\t\tself.y_train=y_train\n\n\tdef predict(self,X_test):\n\t\tpredictions=[]\n\t\tfor row in X_test:\n\t\t\tlabel=self.closest(row)\n\t\t\tpredictions.append(label)\n\t\treturn predictions\n\t\n\tdef closest(self,row):\n\t\tbest_dist = euc(row,self.X_train[0])\n\t\tbest_index = 0\n\t\tfor i in range(1,len(self.X_train)):\n\t\t\tdist=euc(row,self.X_train[i])\n\t\t\tif dist/',updatepost,name='updatepost'),\n path('deletepost//',deletepost,name=\"deletepost\"),\n path('delc',deleting_cookie, name='delc'),\n\n\n]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"bloggpro/bloggpro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"133621918","text":"import requests\r\n\r\nAPI_T = '3ff510558e3e70a5025c3e5410267d49'\r\nurlGet = 'http://challenge.code2040.org/api/reverse'\r\nurlValid = 'http://challenge.code2040.org/api/reverse/validate'\r\n# a method called reverseString is defined and it takes it any String that is provided \r\ndef reverseString(string):\r\n # creates a empty list called alpahabet \r\n alphabet = []\r\n # loops througb reversing the string \r\n for i in range(len(string)-1,-1,-1):\r\n # adds each reverse character to the string\r\n alphabet.append(string[i])\r\n # when the characters are added to the string they are seperated so the join method must be called to make them a word again \r\n reverse = ''.join(alphabet) \r\n # returns the word in is reverse form \r\n return reverse\r\n# requests to post to the given url usign the API token that was provided to you to identify who you are \r\ns = requests.post(urlGet,data={'token':API_T})\r\n# stores the text in api_word: this is the word that was provided in it correct format \r\napi_word = s.text\r\n# reverse is assigned the method with the reversed word that was provided to the url \r\nreverse = reverseString(api_word)\r\n# the word is now requested with the url and the api to id the person however, the word is in its reverse form \r\np = requests.post(urlValid,data={'token':API_T,'string':reverse})\r\n# prints out that the step was completed \r\nprint(p.text)\r\n","sub_path":"code2040_2.py","file_name":"code2040_2.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"559203352","text":"import codecs\ndef zenit_pelar(x):\n with codecs.open(x, \"r+\") as abrir:\n for linha in abrir.readlines():\n\n texto = linha.upper().replace(\"A\", \"I\").replace(\"B\", \"D\").replace(\"C\", \"C\").replace(\"D\", \"B\")\\\n .replace(\"E\", \"O\").replace(\"F\", \"H\").replace(\"G\", \"G\").replace(\"I\", \"A\").replace(\"H\", \"F\")\\\n .replace(\"J\", \"J\").replace(\"K\", \"Q\").replace(\"L\", \"N\").replace(\"M\", \"V\").replace(\"N\", \"L\")\\\n .replace(\"O\", \"E\").replace(\"P\", \"Z\").replace(\"Q\", \"K\").replace(\"R\", \"T\").replace(\"S\", \"S\")\\\n .replace(\"T\", \"R\").replace(\"U\", \"U\").replace(\"V\", \"M\").replace(\"W\", \"X\").replace(\"X\", \"W\")\\\n .replace(\"Z\", \"P\")\n crip = texto\n print(crip, end=\" \")\n\n\nprint(\"Obs: Digite o caminho do arquivo com a barra invertida [/] \\nnesse progama só aceita arquivos .txt\")\nnome = input(\"Informe o arquivo: \")\nzenit_pelar(nome)","sub_path":"lista06/ex010.py","file_name":"ex010.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"348795471","text":"from vnpy.app.cta_strategy import (\n CtaTemplate,\n StopOrder,\n TickData,\n BarData,\n TradeData,\n OrderData,\n BarGenerator,\n ArrayManager,\n)\nimport numpy as np\nfrom functools import reduce\nfrom abu.UtilBu.ABuRegUtil import calc_regress_deg\nimport abu.UtilBu.ABuRegUtil as reg_util\nfrom vnpy.trader.object import Status\nfrom vnpy.trader.utility import IntervalGen\nfrom vnpy.trader.constant import Direction, Exchange, Interval, Offset, Status, Product, OptionType, OrderType, KlinePattern, KLINE_PATTERN_CHINESE\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nimport math\nimport pandas as pd\nfrom functools import partial\n\nclass PatternRecord:\n data = {}\n expiry = {}\n def __init__(self):\n pass\n\n def add_pattern(self, pattern_list):\n for item in pattern_list:\n self.data[item[0]] =dict(count=0,value=item[1])\n\n\n def update(self):\n discard = []\n for i in self.data.keys():\n self.data[i][\"count\"] += 1\n if i in self.expiry and self.data[i][\"count\"] > self.expiry[i]:\n discard.append(i)\n \n for item in discard:\n self.data.pop(item)\n\n def set_expiry(self, pattern_list, count):\n for item in pattern_list:\n self.expiry[item] = count\n\n def __contains__(self, item):\n\t return item in self.data\n \n def __getitem__(self, i):\n return self.data[i]\n \n def keys(self):\n return self.data.keys()\n\n def items(self):\n return self.data.items()\n\n def values(self):\n return self.data.values()\nclass ClosePosType(Enum):\n SAFE_PRICE = 1\n TREND_CHANGE = 2\n\n\nclass Position:\n volumn: int = 0\n level: int = 0\n close_price: float = 0.0\n buy_price: float = 0\n safe_price: float = 0\n order_data = np.array([])\n # 形态预测出错修正,日后增设级别在3以上才执行\n last_close_info = None\n guard = None\n\n def __init__(self, strategy):\n self.strategy: MaLevelTrackStrategy = strategy\n # self.am = self.strategy.am\n self.ma_tag = self.strategy.ma_tag\n self.close_process = [self.close1, self.close_ma120]\n\n def buy(self, price: float, volume: float, lock: bool = False, type: OrderType = OrderType.MARKET):\n \"\"\"\n Send buy order to open a long position.\n \"\"\"\n return self.strategy.send_order(Direction.LONG, Offset.OPEN, price, volume, lock, type)\n\n def sell(self, price: float, volume: float, lock: bool = False, type: OrderType = OrderType.MARKET):\n \"\"\"\n Send sell order to close a long position.\n \"\"\"\n return self.strategy.send_order(Direction.SHORT, Offset.CLOSE, price, volume, lock, type)\n\n def short(self, price: float, volume: float, lock: bool = False, type: OrderType = OrderType.MARKET):\n \"\"\"\n Send short order to open as short position.\n \"\"\"\n return self.strategy.send_order(Direction.SHORT, Offset.OPEN, price, volume, lock, type)\n\n def cover(self, price: float, volume: float, lock: bool = False, type: OrderType = OrderType.MARKET):\n \"\"\"\n Send cover order to close a short position.\n \"\"\"\n return self.strategy.send_order(Direction.LONG, Offset.CLOSE, price, volume, lock, type)\n\n def close1(self, bar:BarData, calc_data):\n if self.volumn < 0:\n if bar.close_price > self.close_price:\n return self.strategy.cover(self.close_price, abs(self.volumn), type=OrderType.MARKET,\n extra= { \"reason\":\"平仓:到达最低价{}\".format(self.close_price)})\n \n elif self.volumn > 0:\n if bar.close_price < self.close_price:\n return self.strategy.sell(self.close_price, abs(self.volumn), type=OrderType.MARKET,\n extra={\"reason\": \"平仓:到达最低价{}\".format(self.close_price)})\n\n def close_ma120(self, bar:BarData, calc_data):\n if not (self.volumn < 0 and bar.close_price < self.safe_price or \\\n self.volumn > 0 and bar.close_price > self.safe_price):\n return\n\n am = self.strategy.am\n rg = (bar.close_price / self.buy_price) - 1\n\n close_price = None\n if rg > 0.01 and self.volumn > 0:\n close_price = am.sma(120, array=False, length=120+1)\n if self.level < 5:\n self.level = 5\n return self.strategy.buy(bar.close_price, 50, type=OrderType.MARKET) \n elif rg < -0.01 and self.volumn < 0:\n close_price = am.sma(120, array=False, length=120+1)\n if self.level < 5:\n self.level = 5\n return self.strategy.short(bar.close_price, 50, type=OrderType.MARKET) \n \n\n for lvl in self.strategy.ma_level[-1:]:\n if len(self.order_data) < lvl:\n close_price = am.sma(lvl, array=False, length=lvl+1)\n break\n \n\n if close_price is None:\n lvl = self.strategy.ma_level[-1]\n close_price = am.sma(lvl, array=False, length=lvl+1)\n\n \n \n if self.volumn < 0:\n if bar.close_price > close_price:\n return self.strategy.cover(bar.close_price, abs(self.volumn), type=OrderType.MARKET,\n extra= { \"reason\":\"平仓:到达MA均线价{}\".format(close_price)})\n \n elif self.volumn > 0:\n if bar.close_price < close_price:\n return self.strategy.sell(bar.close_price, abs(self.volumn), type=OrderType.MARKET,\n extra={\"reason\": \"平仓:到达MA均线价{}\".format(close_price)})\n\n \n def close2(self, bar:BarData, calc_data):\n \n if self.level > 0:\n self.order_data = np.append(self.order_data, bar.close_price)\n order_id = None\n offset = -40\n offset_m = int(offset / 2)\n\n \n deg_full = calc_regress_deg(self.strategy.am.close[-10 :], False)\n \n if len(self.order_data) > abs(offset * 1.5):\n y_fit = reg_util.regress_y_polynomial(self.order_data, zoom=True)\n deg_order_short = calc_regress_deg(y_fit[:abs(offset)], False)\n\n if self.volumn > 0:\n if deg_full < -0.01:\n # if abs(deg_order_short) < abs(deg_full):\n return self.strategy.sell(bar.close_price, abs(self.volumn), type=OrderType.MARKET, \n extra={\"reason\":\"平仓:趋势趋弱,deg={}\".format(deg_full)})\n\n\n elif self.volumn < 0:\n if deg_full > 0.01:\n # if abs(deg_order_short) < abs(deg_full):\n return self.strategy.cover(bar.close_price, abs(self.volumn), type=OrderType.MARKET, \n extra={\"reason\": \"平仓:趋势趋弱,deg={}\".format(deg_full)})\n\n # print(\"pos<0\", deg_order_short, deg_full)\n\n\n def on_strategy(self, bar:BarData, calc_data):\n if self.volumn == 0:\n return \n \n if self.level == 0:\n if self.volumn > 0 and bar.close_price > self.safe_price:\n self.level += 1\n elif self.volumn < 0 and bar.close_price < self.safe_price:\n self.level += 1\n\n offset = -40\n calc_nums = np.array(self.ma_tag[-offset:-1])\n # var_val = np.var(calc_nums)\n std_val = np.std(calc_nums)\n mean_val = np.mean(calc_nums)\n \n if self.level == 1 and std_val < 0.8:\n # self.strategy.ma_tag[-1] > 3\n # level += 1\n if self.volumn > 0 and mean_val > 3.8:\n self.level += 1\n elif self.volumn < 0 and mean_val < 1.2:\n self.level += 1\n\n order_id = None \n\n for close_process in self.close_process:\n order_id = close_process(bar, calc_data)\n if order_id is not None:\n break\n\n return order_id\n # # print(deg)\n # if abs(deg_order_short) < abs(deg_full):\n # order_id = self.strategy.cover(bar.close_price, 1) \n def on_trade(self, trade: TradeData):\n \"\"\"\n Callback of new trade data update.\n \"\"\"\n self.put_event()\n\n\n def on_order(self, order: OrderData):\n if order.status == Status.ALLTRADED:\n # pre_volumn = 0\n if order.direction == Direction.LONG:\n if self.volumn == 0:\n self.close_price = round(order.price * 0.998, 2)\n self.safe_price = order.price * 1.005\n self.buy_price = order.price\n self.order_data = np.array([])\n self.level = 0\n self.volumn += order.volume\n \n elif order.direction == Direction.SHORT:\n if self.volumn == 0:\n self.close_price = round(order.price * 1.002, 2)\n self.order_data = np.array([])\n self.buy_price = order.price\n self.safe_price = order.price * 0.995\n self.level = 0\n self.volumn -= order.volume\n\n elif order.direction == Direction.NET:\n self.volumn = order.volume\n \n \n''' \n TODO: 加入每日时间识别\n TODO: 加入红绿十字星识别\n TODO: 加入十字星后,快速趋势判断购买\n TODO: 加入高吊线和低线识别\n TODO: 策略架构改进,多个策略并存\n'''\nclass MaLevelTrackStrategy(CtaTemplate):\n author = \"用Python的交易员\"\n\n ma_level = [5, 10, 20, 30, 120]\n ma_tag = []\n bd = []\n fast_ma0 = 0.0\n fast_ma1 = 0.0\n\n slow_ma0 = 0.0\n slow_ma1 = 0.0\n request_order = []\n bar_identify = []\n \n \n parameters = [\"ma_level\"]\n variables = [\"fast_ma0\", \"fast_ma1\", \"slow_ma0\", \"slow_ma1\"]\n\n def __init__(self, cta_engine, strategy_name, vt_symbol, setting):\n \"\"\"\"\"\"\n super(MaLevelTrackStrategy, self).__init__(\n cta_engine, strategy_name, vt_symbol, setting\n )\n self.bg = BarGenerator(self.on_bar, 15, self.on_1min_bar)\n self.am = ArrayManager(400)\n self.am3 = ArrayManager(150)\n self.bg3 = BarGenerator(self.on_bar, 3, self.on_3min_bar)\n self.am5 = ArrayManager(120)\n self.bg5 = BarGenerator(self.on_bar, 5, self.on_5min_bar)\n self.order_data = None\n self.positions = Position(self)\n self.std_range = IntervalGen(np.std,5)\n self.std_range3 = IntervalGen(np.std,5) \n self.std_range5 = IntervalGen(np.std,5)\n self.pattern_record = PatternRecord()\n # self.pattern_record.set_expiry([KlinePattern.CDLEVENINGSTAR], 3)\n self.pattern_record.set_expiry(list(KlinePattern), 1)\n \n five_min_open_5 = partial(self.reverse_shape_strategy, setting={\"atr\":10, \"atr_valve\":0.8, \"deg1\":(10,5),\"deg2\":5})\n self.open_strategy = {\n \"1\":[self.reverse_shape_strategy],\n \"5\":[five_min_open_5],\n }\n self.offset = 40\n self.ma120_track = None\n self.ma120_track_list = []\n def on_init(self):\n \"\"\"\n Callback when strategy is inited.\n \"\"\"\n self.write_log(\"策略初始化\")\n self.load_bar(10)\n\n def on_start(self):\n \"\"\"\n Callback when strategy is started.\n \"\"\"\n self.write_log(\"策略启动\")\n self.put_event()\n\n def on_stop(self):\n \"\"\"\n Callback when strategy is stopped.\n \"\"\"\n self.write_log(\"策略停止\")\n\n self.put_event()\n\n def on_tick(self, tick: TickData):\n \"\"\"\n Callback of new tick data update.\n \"\"\"\n self.bg.update_tick(tick)\n self.bg3.update_tick(tick)\n self.bg5.update_tick(tick)\n\n def on_3min_bar(self, bar: BarData):\n self.am3.update_bar(bar)\n self.std_range3.update(self.am3.range[-1])\n if not self.am.inited or not self.trading:\n return \n pattern = self.am3.pattern([KlinePattern.CDLEVENINGSTAR, KlinePattern.CDL2CROWS])\n \n if len(pattern) > 0:\n print(pattern)\n self.pattern_record.add_pattern(pattern)\n # deg = calc_regress_deg(self.am3.close[-20:])\n \n def wave(self, data, window = 0.0002):\n\n if len(data) <= 0:\n return \n # r = array[::-1]\n result = { \"value\":[], \"range\":[], \"pos\":[], \"length\":[]}\n r = data\n l = len(data) - 1\n now = r[0]\n # v_list.append(now)\n # p_list.append(0)\n pos = 1\n\n vol = 0\n u_tag = None\n d_tag = None\n end_tag = None\n start_pos = 0\n while pos < l:\n if math.isnan(now):\n now = r[pos]\n pos += 1\n continue\n else:\n start_pos = pos - 1\n break\n\n while pos < l:\n\n if now < r[pos]:\n u_tag = pos\n if d_tag:\n diff = r[start_pos] - r[d_tag]\n if abs(diff / r[start_pos]) > window and d_tag - start_pos > 1:\n end_tag = d_tag\n \n elif now > r[pos]:\n d_tag = pos\n if u_tag:\n diff = r[start_pos] - r[u_tag]\n if abs(diff / r[start_pos]) > window and d_tag - start_pos > 1:\n end_tag = u_tag\n\n if end_tag is not None:\n result[\"range\"].append(r[end_tag] / r[start_pos] - 1)\n result[\"length\"].append(end_tag - start_pos)\n start_pos = end_tag\n result[\"value\"].append(r[end_tag])\n result[\"pos\"].append(end_tag)\n end_tag = None\n\n vol += r[pos] - now\n now = r[pos]\n pos += 1\n return pd.DataFrame(result)\n\n \n def mode_identify(self, bar: BarData):\n self.bar_identify = []\n hl_scale = round(bar.high_price / bar.low_price - 1, 4)\n if hl_scale > 0.001:\n diff = bar.high_price - bar.low_price\n diff_up = bar.low_price + diff / 2 * 1.20\n diff_down = bar.low_price + diff / 2 * 0.80 \n close = bar.close_price\n if bar.open_price < diff_up and bar.open_price > diff_down and \\\n bar.close_price < diff_up and bar.close_price > diff_down:\n if bar.close_price > bar.open_price:\n print(\"绿十字星\",bar.datetime, bar.high_price,bar.low_price,diff,diff_up,diff_down, bar.open_price, bar.close_price)\n else:\n print(\"红十字星\",bar.datetime, bar.high_price,bar.low_price,diff,diff_up,diff_down, bar.open_price, bar.close_price)\n \n\n def on_5min_bar(self, bar: BarData):\n self.std_range5.update(self.am5.range[-1])\n self.am5.update_bar(bar)\n if not self.am.inited or not self.trading:\n return \n \n self.on_strategy(self.am5, bar, self.open_strategy[\"5\"])\n # pattern_list = [KlinePattern.CDLEVENINGSTAR, KlinePattern.CDL2CROWS, KlinePattern.CDLCONCEALBABYSWALL, KlinePattern.CDLEVENINGDOJISTAR]\n # pattern = self.am5.pattern(list(KlinePattern))\n # if len(pattern) > 0:\n # print(list(map(lambda x: (KLINE_PATTERN_CHINESE[x[0]],x[1]), pattern)))\n # self.pattern_record.add_pattern(pattern)\n # deg_full = calc_regress_deg(self.am.close[-40 :], False)\n # print(\"deg:\",deg_full)\n \n # self.pattern_record.update()\n\n def open_v3(self, am:ArrayManager, bar:BarData):\n std_val2 = np.std(np.array(self.ma_tag[-10:-1]))\n mean_val2 = np.mean(np.array(self.ma_tag[-10:-1]))\n mean = np.mean(np.array(self.ma_tag[-30:-10]))\n\n if std_val2 < 0.2: \n if mean_val2 > 3:\n if mean_val2 >= (mean + 1):\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n elif mean_val2 < 2:\n if mean_val2 <= (mean - 1):\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n\n def open_v1(self, am:ArrayManager, bar:BarData):\n offset = -40\n offset_m = int(offset / 2)\n calc_nums = np.array(self.ma_tag[-offset:-1])\n mean_val = np.mean(calc_nums)\n # var_val = np.var(calc_nums)\n std_val = np.std(calc_nums)\n if std_val < 1 and mean_val < 2 and self.ma_tag[-1] >= (mean_val + 2):\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n elif std_val < 1 and mean_val > 3 and self.ma_tag[-1] <= (mean_val - 2):\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n \n def open_v2(self, am:ArrayManager, bar:BarData):\n std_val2 = np.std(np.array(self.ma_tag[-10:-1]))\n mean_val2 = np.mean(np.array(self.ma_tag[-10:-1]))\n mean = np.mean(np.array(self.ma_tag[-30:-10]))\n\n if std_val2 < 0.2:\n if mean_val2 > 2.5:\n if mean_val2 >= (mean + 1):\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n elif mean_val2 < 2.5:\n if mean_val2 <= (mean - 1):\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n\n \n def open2(self, am:ArrayManager, bar:BarData, calc_data):\n deg = calc_data[\"deg20\"]\n ma = self.ma_tag[-1]\n if deg > 0.5 and ma > 3 and self.am5.range[-1] > -0.002:\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n elif deg < -0.5 and ma < 2 and self.am5.range[-1] < 0.002:\n return self.short(bar.close_price, 1, type=OrderType.MARKET) \n\n def open1(self, am:ArrayManager, bar:BarData, calc_data):\n \n mean = calc_data[\"mean30_10\"]\n mean_val2 = calc_data[\"mean10\"]\n # if std_val2 < 0.2: \n if mean_val2 > 3.5 and mean_val2 >= (mean + 2):\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n elif mean_val2 < 1.5 and mean_val2 <= (mean - 2):\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n\n # v形反转捕获\n def reverse_shape_strategy(self, am:ArrayManager, bar:BarData, calc_data, setting={\"atr\":40, \"atr_valve\":0.8, \"deg1\":(40,20),\"deg2\":(20,0),}):\n \n deg1 = calc_data[\"deg40_20\"]\n deg2 = calc_data[\"deg20_0\"]\n kdj = calc_data[\"kdj\"]\n\n atr = self.am.atr(40)\n\n if atr < 0.08:\n return\n\n if deg1 > 0 and deg2 > 0 or \\\n deg1 < 0 and deg2 < 0:\n return\n \n if not (abs(deg1) > 0.15 and abs(deg2) > 0.1 and (abs(deg1) + abs(deg2)) > 0.3) :\n return\n\n close = am.close[-40:]\n min_val = np.min(close)\n max_val = np.max(close)\n mid_val = max_val if deg1 > 0 else min_val\n mid_pos = np.where(close == mid_val)[0][0]\n\n if mid_pos < 10 or mid_pos > 30:\n return\n\n start_val = np.min(close[:mid_pos]) if deg1 > 0 else np.max(close[:mid_pos])\n start_pos = np.where(close == start_val)[0][0]\n l = mid_pos - start_pos\n \n\n\n\n # pos2 = np.where(close == min_val)[0][0]\n \n x_fit = reg_util.regress_y_polynomial(close[:mid_pos], zoom=True)\n deg1_remake = calc_regress_deg(x_fit[:abs(mid_pos)], False)\n y_fit = reg_util.regress_y_polynomial(close[mid_pos:], zoom=True)\n deg2_remake = calc_regress_deg(y_fit[:abs(mid_pos)], False)\n print(start_pos, mid_pos, deg1, deg2, deg1_remake, deg2_remake, l, start_val, mid_val)\n if deg2 < 0:\n if kdj[0] < 20 and kdj[1] < 10 and kdj[2] < 10:\n # if kdj[2] < 10:\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n else:\n if kdj[0] > 80 and kdj[1] > 90 and kdj[2] > 90:\n # if kdj[2] > 90:\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n\n # print(\"找到大v形:\", deg1, deg2 )\n\n\n\n def open5(self, am:ArrayManager, bar:BarData, calc_data):\n \n ma = self.ma_tag[-1]\n mean = calc_data[\"mean30_10\"]\n atr = self.am.atr(10, array=True, length=20)\n tr = self.am.atr(1, array=True, length=11)\n # self.ma120_track\n ma120 = self.am.sma(120)\n # if std_val2 < 0.2: \n mean_std = calc_data[\"mean_std\"]\n if mean_std < 0.8 and tr[-1] > 0.1 and tr[-1] / tr[-10] > 3 and tr[-1] / atr[-1] >= 1.7 and tr[-10] / atr[-10] < 1:\n if np.sum(self.am.range[-10:]) > 0 and self.ma120_track > 0:\n return self.buy(bar.close_price, 1, type=OrderType.MARKET)\n elif self.ma120_track < 0:\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n\n def open_kline1(self, am:ArrayManager, bar:BarData, calc_data):\n \n if KlinePattern.CDLEVENINGSTAR not in self.pattern_record:\n return\n # if std_val2 < 0.2: \n deg = calc_regress_deg(self.am.close[-5:], False)\n print(\"kline_strategy\",deg)\n if deg < -0.1:\n return self.short(bar.close_price, 1, type=OrderType.MARKET)\n \n def generate_data(self, bar:BarData):\n offset = -self.offset\n offset_m = int(offset / 2)\n calc_nums = np.array(self.ma_tag[-offset:-1])\n # var_val = np.var(calc_nums)\n std_val = np.std(calc_nums)\n std_val2 = np.std(np.array(self.ma_tag[-10:-1]))\n std_val3 = np.std(np.array(self.am.range[-30:-10]))\n ma = self.ma_tag[-1]\n \n mean_val = np.mean(calc_nums)\n mean_val2 = np.mean(np.array(self.ma_tag[-5:-1]))\n mean_val3 = np.mean(np.array(self.ma_tag[-20:-1]))\n mean_val4 = np.mean(np.array(self.ma_tag[-30:-5]))\n kdj_val = self.am.kdj()\n\n deg1 = calc_regress_deg(self.am.close[offset : offset_m], False)\n deg2 = calc_regress_deg(self.am.close[offset_m :], False)\n deg3 = calc_regress_deg(self.am.close[-10 :], False)\n deg_full = calc_regress_deg(self.am.close[offset :], False)\n\n wave = self.wave(self.am.close[-30:])\n wave_r_sum = np.sum(wave[\"range\"])\n macd=self.am.macd(20,40, 16)\n calc_data = (dict(\n kdj=[round(kdj_val[\"k\"][-1],2),round(kdj_val[\"d\"][-1],2),round(kdj_val[\"j\"][-1],2)],\n cci_20=self.am.cci(20),rsi=self.am.rsi(20),adx=self.am.adx(20),boll=self.am.boll(20, 3.4),\n macd=[round(macd[0],2),round(macd[1],2),round(macd[2],2)],\n deg40_20=round(deg1,2), deg20_0=round(deg2,2), deg20_10=round(calc_regress_deg(self.am.close[-20:-10], False),2), deg10_0=round(deg3,2),\n deg30_15=round(calc_regress_deg(self.am.close[-30:-15], False),2), deg15_0=round(calc_regress_deg(self.am.close[-15:], False),2),deg_f=round(deg_full,2),\n atr=round(self.am.atr(10, length=15), 3), tr=round(self.am.atr(1, length=2), 3),atr_40=round(self.am.atr(40, length=42), 3),\n time=bar.datetime, price=bar.close_price, ma=round(ma, 2), \n std_40=round(std_val, 2),mean40=round(mean_val,2), mean_std=np.mean(self.std_range.data[-5:]),\n std_10=round(std_val2,2), mean30_10=round(mean_val4,2), mean10=round(mean_val2,2),\n vol=self.am.volume[-1], std_range=self.std_range.data[-1:-5:-1], range=self.am.range[-1:-5:-1].tolist(),\n range_sum=np.sum(self.am.range[-5:]), \n pattern=list(map(lambda x: KLINE_PATTERN_CHINESE[x], self.pattern_record.keys())),\n ma120t=self.ma120_track, \n ma120t_list=self.ma120_track_list[-1:-10:-1], \n ma120t_sort=sorted(self.ma120_track_list[-20:-1], key=abs),\n ma120t_sum=np.sum(self.ma120_track_list[-20:-1] + [self.ma120_track]), \n ma120t_mean=np.mean(self.ma120_track_list[-20:-1] + [self.ma120_track]),\n ma120t_std=np.std(self.ma120_track_list[-20:-1] + [self.ma120_track]),\n wave_cnt=len(wave), wave_r_sum=wave_r_sum, atr_mean=np.mean(self.am.atr(20, array=True,length=240)[-200:])\n ))\n\n return calc_data\n\n def on_strategy(self, am:ArrayManager, bar: BarData, strategy_list):\n calc_data = self.generate_data(bar)\n \n order_id = None\n if self.pos == 0:\n for open_strategy in strategy_list:\n if order_id is not None:\n break\n order_id = open_strategy(am, bar, calc_data)\n else:\n order_id = self.positions.on_strategy(bar, calc_data)\n\n \n if order_id is not None:\n offset = -self.offset\n offset_m = int(offset / 2)\n self.tracker[\"trade_info\"].append((\n self.am.time_array[offset], self.am.time_array[offset_m], bar.datetime, calc_data[\"deg40_20\"], calc_data[\"deg20_0\"]))\n self.request_order.extend(order_id)\n \n if self.tracker is not None:\n self.tracker[\"ma_tag_ls\"].append(calc_data)\n \n def on_1min_bar(self, bar: BarData):\n self.am.update_bar(bar)\n am = self.am\n max_len = self.ma_level[-1] + 20\n data = self.am.close[-max_len:-1]\n ma_lvl = []\n for i in self.ma_level:\n ma = self.am.sma(i, True)[-1]\n ma_lvl.append(ma)\n \n \n l = len(ma_lvl)\n ma_lvl_tag = []\n now = bar.close_price\n direction = 1 if now > ma_lvl[0] else 0\n ma_lvl_tag.append(direction)\n for i in range(l-1):\n val = 1 if ma_lvl[i] > ma_lvl[i+1] else 0\n ma_lvl_tag.append(val)\n bincount_val = np.bincount(np.array(ma_lvl_tag))\n tag_val = 0\n if len(bincount_val) == 2:\n tag_val = bincount_val[1]\n\n if len(self.ma_tag) < 200:\n self.ma_tag.append(tag_val)\n else:\n self.ma_tag[:-1] = self.ma_tag[1:]\n self.ma_tag[-1] = tag_val\n if self.tracker is not None:\n self.tracker[\"bar_data\"].append(bar)\n self.std_range.update(self.am.range[-1])\n\n ma120 = self.am.sma(120)\n \n if bar.close_price >= ma120:\n if self.ma120_track is None:\n self.ma120_track = 1\n elif self.ma120_track > 0:\n self.ma120_track += 1\n else:\n self.ma120_track_list.append(self.ma120_track)\n self.ma120_track = 1\n elif bar.close_price < ma120:\n if self.ma120_track is None:\n self.ma120_track = -1\n elif self.ma120_track < 0:\n self.ma120_track -= 1\n else:\n self.ma120_track_list.append(self.ma120_track)\n self.ma120_track = -1\n\n\n\n if not am.inited or not self.trading:\n return\n \n \n \n self.on_strategy(am, bar, self.open_strategy[\"1\"])\n # median_val = np.median(calc_nums)\n \n self.put_event()\n\n def on_bar(self, bar: BarData):\n \"\"\"\n Callback of new bar data update.\n \"\"\"\n self.bg3.update_bar(bar)\n self.bg5.update_bar(bar)\n self.bg.update_bar(bar)\n\n \n\n # def init_order_data(self):\n # self.order_data = np.array([])\n \n\n def on_order(self, order: OrderData):\n \"\"\"\n Callback of new order data update.\n \"\"\"\n print(\"{}产生了{},价格为{},笔数为{},交易{},pos={}\".format(order.datetime.strftime(\"%m/%d %H:%M:%S\"), order.offset.value + order.direction.value,order.price, order.volume, order.status.value, self.pos))\n \n if order.vt_orderid in self.request_order:\n self.positions.on_order(order)\n if order.status == Status.ALLTRADED or order.status == Status.CANCELLED or order.status == Status.REJECTED:\n self.request_order.remove(order.vt_orderid)\n # if order.status == Status.ALLTRADED or order.status == Status.PARTTRADED:\n # if order.direction == Direction.LONG:\n # if self.positions.volumn == 0:\n # self.positions.close_price = round(order.price * 0.995)\n # self.positions.volumn += order.volume\n # elif order.direction == Direction.SHORT:\n # self.positions.volumn -= order.volume\n # elif order.direction == Direction.NET:\n # self.positions.volumn = order.volume\n\n def on_trade(self, trade: TradeData):\n \"\"\"\n Callback of new trade data update.\n \"\"\"\n self.put_event()\n\n def on_stop_order(self, stop_order: StopOrder):\n \"\"\"\n Callback of stop order update.\n \"\"\"\n pass\n","sub_path":"vnpy/app/cta_strategy/strategies/ma_level_tracker_strategy.py","file_name":"ma_level_tracker_strategy.py","file_ext":"py","file_size_in_byte":29027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"612457469","text":"#!/usr/bin/env python \n# encoding: utf-8 \n\n\"\"\" \n@version: v1.0 \n@author: BaoChengCai \n@contact: baochengcai@lanjingren.com\n@site: http://www.meipian.cn \n@software: PyCharm \n@file: test.py \n@time: 2019/3/1 12:26 PM \n\"\"\"\nfrom action import InceptionClass\n\ndef demo():\n a = InceptionClass()\n print(a.CheckSql('rm-2ze76g4a66f5h4645747.mysql.rds.aliyuncs.com', '3306', 'zabbix', '', 0))\n\nif __name__ == '__main__':\n demo()","sub_path":"drf_api/app/instance/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"654180993","text":"import turtle as trtl\n\n\n# creating the box\ndef main():\n gary = trtl.Turtle()\n\n xPos = -500\n yPos = -300\n gary.speed(0)\n gary.penup()\n gary.goto(xPos, yPos)\n\n # creating the bottom right curve\n gary.pendown()\n gary.goto(xPos + 980, yPos)\n gary.goto(xPos + 980, yPos + 630)\n gary.goto(xPos, yPos + 630)\n gary.goto(xPos, yPos)\n gary.penup()\n x = 1\n x2Pos = 480\n y2Pos = -300\n while (x < 50):\n gary.goto(xPos + 19.6, yPos)\n gary.pendown()\n gary.goto(x2Pos, y2Pos + 12.6)\n gary.penup()\n xPos = xPos + 19.6\n y2Pos = y2Pos + 12.6\n x = x + 1\n\n # creating bottom left curve\n x3Pos = 480\n y3Pos = -300\n x4Pos = -500\n y4Pos = -300\n x = 0\n while (x < 50):\n gary.goto(x3Pos , y3Pos)\n gary.pendown()\n gary.goto(x4Pos, y4Pos)\n gary.penup()\n x3Pos = x3Pos - 19.6\n y4Pos = y4Pos + 12.6\n x = x + 1\n\n #creating top right curve\n x = 0\n x5Pos = -500\n y5Pos = 330\n x6Pos = 480\n y6Pos = 330\n while (x < 50):\n gary.goto(x5Pos, y5Pos)\n gary.pendown()\n gary.goto(x6Pos,y6Pos)\n gary.penup()\n x5Pos = x5Pos + 19.6\n y6Pos = y6Pos -12.6\n x = x + 1\n \n #creating top left curve\n x = 0 \n x7Pos = 480\n y7Pos = 330\n x8Pos = -500\n y8Pos = 330\n while (x < 50):\n gary.goto(x7Pos,y7Pos)\n gary.pendown()\n gary.goto(x8Pos,y8Pos)\n gary.penup()\n x7Pos = x7Pos - 19.6\n y8Pos = y8Pos -12.6\n x = x + 1\n\n\nif __name__ == \"__main__\":\n main()\n\n wn = trtl.Screen()\n wn.mainloop()\n\n\n def box(gary):\n xPos = -500\n yPos = -300\n gary.speed(0)\n gary.penup()\n gary.goto(xPos, yPos)\n gary.pendown()\n gary.goto(xPos + 980, yPos)\n gary.goto(xPos + 980, yPos + 630)\n gary.goto(xPos, yPos + 630)\n gary.goto(xPos, yPos)\n gary.penup()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634812185","text":"# -*- coding:utf-8 -*-\nimport requests\nimport os\n\ndef download_file_from_google_drive(id, destination):\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = requests.Session()\n\n response = session.get(URL, params = { 'id' : id }, stream = True)\n token = get_confirm_token(response)\n\n if token:\n params = { 'id' : id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n save_response_content(response, destination)\n\ndef get_confirm_token(response):\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef save_response_content(response, destination):\n CHUNK_SIZE = 32768\n\n with open(destination, \"wb\") as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n\n\n\nif __name__ == \"__main__\":\n\n # print('Dowloading Sony subset... (25GB)')\n # download_file_from_google_drive('10kpAcvldtcb9G2ze5hTcF1odzu4V_Zvh', 'dataset/Sony.zip')\n #\n # print('Dowloading Fuji subset... (52GB)')\n # download_file_from_google_drive('12hvKCjwuilKTZPe9EZ7ZTb-azOmUA3HT', 'dataset/Fuji.zip')\n #\n # os.system('unzip dataset/Sony.zip -d dataset')\n # os.system('unzip dataset/Fuji.zip -d dataset')\n\n #设置第二个参数,对应到drive中需要下载的文件夹\n # print('Dowloading Iphone subset... ')\n # download_file_from_google_drive('101eXwidf9a5ZmvAt__sLWI9H9g_3KMp7', 'Iphone.zip')\n # print(os.path.abspath('.'))\n # os.system('unzip ./Iphone.zip -d dataset')\n print('Dowloading UnderexposedImage subset... ')\n download_file_from_google_drive('1eCBEgnzmKO4zhdsq7G1IEap0G2r7cJHK', 'UnderexposedImage.zip')\n print(os.path.abspath('.'))\n os.system('unzip ./UnderexposedImage.zip -d dataset')\n","sub_path":"runInGoogleColab/download_dataset.py","file_name":"download_dataset.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"140133766","text":"# -*- coding:utf-8 -*-\n\n\n#\n# The set S originally contains numbers from 1 to n. But unfortunately, due to the data error, one of the numbers in the set got duplicated to another number in the set, which results in repetition of one number and loss of another number. \r\n#\n#\n#\n# Given an array nums representing the data status of this set after the error. Your task is to firstly find the number occurs twice and then find the number that is missing. Return them in the form of an array.\r\n#\n#\n#\n# Example 1:\r\n#\n# Input: nums = [1,2,2,4]\r\n# Output: [2,3]\r\n#\n#\n#\n# Note:\r\n#\n# The given array size will in the range [2, 10000].\r\n# The given array's numbers won't have any order.\r\n#\n#\n\n\nclass Solution(object):\n def findErrorNums(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n \n # sum(nums) - \n lens = len(nums)\n old = list(set(range(1, lens+1)) - set(nums))[0]\n new = sum(nums) - (lens+1)*lens/2 + old\n return [new, old]\n \n \"\"\"\n N = len(nums)\n a = sum(nums) - (N+1)*N/2\n b = sum(num * num for num in nums) - N*(N+1)*(2*N+1)/6\n \n return [(b/a+a)/2, (b/a-a)/2]\n \"\"\"\n","sub_path":"645-set-mismatch/set-mismatch.py","file_name":"set-mismatch.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576829915","text":"import sys\nsys.path.append('Z:\\\\xiangliang\\\\workspace\\\\PycharmProjects\\\\xlpyle\\\\simulation')\n\nimport sequencesim as seq\nimport numpy as np\nfrom scipy import integrate, interpolate\nfrom scipy import optimize\nfrom interpol import interp1d_cubic\nimport matplotlib.pyplot as plt\n# %matplotlib inline\namp = 1\nstart = 0\nstop = 1\ncutoff = 2.14\nintrange = (cutoff, 20*cutoff)\nrect = seq.rect(0, 1, amp)\n# power spectrum density is single side\ndef rectPSD(f):\n return 2*np.abs(rect.freqFunc(f))**2\n\n# series of hann functions need to be normalized to have unit area, that is\n# to have the same amplitude at point f = 0 i.e. the DC part\n# however, although they may have different power spectrum integration, we\n# still optimize with constraints to have the same DC part, where sum(Lm)=1\n\ndef hanns(amp, Lm, psd=False):\n hanns = seq.NOTHING\n for i in range(len(Lm)):\n hanns += seq.hann(start, stop, Lm[i]*amp, m=float(i+1))\n def powerspectrum(f):\n return 2*np.abs(hanns.freqFunc(f))**2\n if psd:\n return powerspectrum\n else:\n return hanns.timeFunc\n\ndef optfun(Lm):\n power, err = integrate.quad(hanns(amp=amp, Lm=Lm, psd=True), *intrange)\n return power, err\ndef optfun1(Lm):\n power, _ = optfun(Lm)\n return power\n\n# =============optimization==================\ndef optimizeLm(Lmidx):\n # cone = lambda Lm: Lm[0]**2+(Lm[1]-3)**2\n bnds = {\"L1\" : ((-2, 2),),\n \"L2\" : ((-2 ,2), (-1 ,1)),\n \"L3\" : ((-2 ,2), (-1 ,1), (-1 ,1)),\n \"L4\" : ((-2 ,2), (-1 ,1), (-1 ,1), (-1 ,1)),\n \"L10\" : ((-2 ,2), (-1 ,1), (-1 ,1), (-1 ,1), (-1 ,1),\n (-1 ,1), (-1 ,1), (-1 ,1), (-1 ,1), (-1 ,1))}\n\n x0 = {\"L1\" : (1,),\n \"L2\" : (1, 0),\n \"L3\" : (1, 0, 0),\n \"L4\" : (1, 0, 0, 0),\n \"L10\" : (1, 0, 0, 0, 0, 0, 0, 0, 0, 0)}\n cons = ({'type':'eq', 'fun': lambda x: sum(x)-1})\n keystr = str(int(Lmidx))\n result = optimize.minimize(optfun1, x0['L'+keystr],\n bounds=bnds['L'+keystr], constraints=cons)\n success, fun, x = result.success, result.fun, result.x\n return x\n# print optimizeLm(2)\n\nL = {\"L1\" : [1,],\n \"L2\" : [ 1.0863745, -0.0863745],\n \"L3\" : [ 1.08421251, -0.0855017, 0.00128919],\n \"L4\" : [1.07553534, -0.08183981, 0.00188607, 0.0044184],\n \"L10\": [1.04257146, -0.06815658, 0.00314485, 0.00447989, 0.00393446,\n 0.00340102, 0.00300808, 0.00272626, 0.00252138, 0.00236918]}\n\n# ==============check the optimized result=================\n# power, err = integrate.quad(rectPSD, *intrange)\n# power1, err1 = optfun(Lm=L[\"L1\"])\n# power2, err2 = optfun(Lm=L[\"L2\"])\n# power3, err3 = optfun(Lm=L[\"L3\"])\n# power4, err4 = optfun(Lm=L[\"L4\"])\n# power10, err10 = optfun(Lm=L[\"L10\"])\n# print power, err\n# print power1, err1\n# print power2, err2\n# print power3, err3\n# print power4, err4\n# print power10, err10\n\nf = np.linspace(0, 4, 500, endpoint=False)\n\n# hannpsd = lambda f, Lm : hanns(amp, Lm, psd=True)(f)\n\n# plt.figure(1)\n# ax = plt.subplot(111)\n# ax.semilogy(f, rectPSD(f)/2)\n# ax.semilogy(f, hannpsd(f, L['L1'])/2, label=\"L1\")\n# ax.semilogy(f, hannpsd(f, L['L2'])/2, label=\"L2\")\n# ax.semilogy(f, hannpsd(f, L['L3'])/2, label=\"L3\")\n# ax.semilogy(f, hannpsd(f, L['L4'])/2, label=\"L4\")\n# ax.semilogy(f, hannpsd(f, L['L10'])/2, label=\"L10\")\n# ax.semilogy([2.3, 2.3], [10**-7, 1], 'k--')\n# ax.semilogy([cutoff, cutoff], [10**-7, 1], 'b-')\n# plt.title(r'power spectrum density vs tp $\\omega$'+r'$\\_0$/2$\\pi$')\n# plt.ylim(10**-7, 1)\n# plt.legend()\n# # plt.plot(x, rectPSD(x))\n# # plt.plot(t2, np.real(y2), t4, np.real(y4))\n# plt.show()\n\n\n# ================================================\ndef NumericTau(Lm):\n N_interpl=100\n tausample = np.linspace(0, 1, N_interpl+1)\n\n # assume here theta is normalized to 1\n # DthetaDtau = lambda tau : rect(tau)\n DthetaDtau = lambda tau : hanns(amp, Lm)(tau)\n theta_tau = lambda tau : 1 - integrate.quad(DthetaDtau, 0, tau)[0]\n\n intfunc = lambda x : np.sin(np.pi*theta_tau(x)) # theta: 1~0\n t_tau = lambda tau : integrate.quad(intfunc, 0, tau)[0]\n\n thetas = []\n ts = []\n for tau in tausample:\n thetas.append(theta_tau(tau))\n ts.append(t_tau(tau))\n return tausample, ts, thetas\n\ndef interplfunc(ts, thetas):\n # calculate the new time function of theta\n theta_t = interpolate.interp1d(ts, thetas, kind='linear')\n\n def HzDHx(t):\n # let Hx = 1\n return 1.0/np.tan(np.pi*theta_t(t))\n return theta_t, HzDHx\n\n# ====================plot sequence======================\ntausample, ts, thetas = NumericTau(L[\"L2\"])\ntheta_t, HzDHx = interplfunc(ts, thetas)\ntrange = (ts[0], ts[-1])\n\nt = np.linspace(ts[0], ts[-1], 100)\nplt.figure()\nax1 = plt.subplot(221)\nax1.plot(tausample, thetas)\nplt.title(r'$\\Theta$ vs $\\tau$')\n\nax2 = plt.subplot(222)\nax2.plot(tausample, ts)\nplt.title(r't vs $\\tau$')\n\nt = np.linspace(0, ts[-1], 100)\nax3 = plt.subplot(223)\nax3.plot(t, theta_t(t), 'b')\nplt.title(r'$\\theta$ vs t')\n\nax4 = plt.subplot(224)\nax4.plot(t, -HzDHx(t), 'r')\nplt.title('Hz/Hx vs t')\nplt.xlim(ts[0], ts[-1])\nplt.ylim(-10, 10)\nplt.show()\n","sub_path":"xlpyle/simulation/fastadiabaticZ.py","file_name":"fastadiabaticZ.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645246369","text":"from __future__ import division, print_function, absolute_import\n\nimport sys\n\nfrom perf._cli import display_title, format_result_value\nfrom perf._utils import is_significant\nfrom perf._utils import is_verbose\n\n\ndef is_significant_benchs(bench1, bench2):\n values1 = bench1.get_values()\n values2 = bench2.get_values()\n\n if len(values1) == 1 and len(values2) == 1:\n # FIXME: is it ok to consider that comparison between two values\n # is significant?\n return (True, None)\n\n try:\n significant, t_score = is_significant(values1, values2)\n return (significant, t_score)\n except Exception:\n # FIXME: fix the root bug, don't work around it\n return (True, None)\n\n\nclass CompareData:\n def __init__(self, name, benchmark):\n self.name = name\n self.benchmark = benchmark\n\n def __repr__(self):\n return '' % (self.name, self.benchmark.get_nvalue())\n\n\ndef compute_speed(ref, changed):\n ref_avg = ref.mean()\n changed_avg = changed.mean()\n # Note: means cannot be zero, it's a warranty of perf API\n speed = ref_avg / changed_avg\n percent = (changed_avg - ref_avg) * 100.0 / ref_avg\n return (speed, percent)\n\n\ndef format_speed(speed, percent):\n if speed == 1.0:\n return \"no change\"\n elif speed > 1.0:\n return \"%.2fx faster (%+.0f%%)\" % (speed, percent)\n else:\n return \"%.2fx slower (%+.0f%%)\" % (1.0 / speed, percent)\n\n\nclass CompareResult(object):\n def __init__(self, ref, changed):\n # CompareData object\n self.ref = ref\n # CompareData object\n self.changed = changed\n self._significant = None\n self._t_score = None\n self._speed = None\n self._percent = None\n\n def __repr__(self):\n return '' % (self.ref, self.changed)\n\n def _set_significant(self):\n bench1 = self.ref.benchmark\n bench2 = self.changed.benchmark\n self._significant, self._t_score = is_significant_benchs(bench1, bench2)\n\n @property\n def significant(self):\n if self._significant is None:\n self._set_significant()\n return self._significant\n\n @property\n def t_score(self):\n if self._significant is None:\n self._set_significant()\n return self._t_score\n\n def _compute_speed(self):\n self._speed, self._percent = compute_speed(self.ref.benchmark,\n self.changed.benchmark)\n\n @property\n def speed(self):\n if self._speed is None:\n self._compute_speed()\n return self._speed\n\n @property\n def percent(self):\n if self._percent is None:\n self._compute_speed()\n return self._percent\n\n def oneliner(self, verbose=True, show_name=True, check_significant=True):\n if check_significant and not self.significant:\n return \"Not significant!\"\n\n ref_text = format_result_value(self.ref.benchmark)\n chg_text = format_result_value(self.changed.benchmark)\n if verbose:\n if show_name:\n ref_text = \"[%s] %s\" % (self.ref.name, ref_text)\n chg_text = \"[%s] %s\" % (self.changed.name, chg_text)\n if (self.ref.benchmark.get_nvalue() > 1\n or self.changed.benchmark.get_nvalue() > 1):\n text = \"Mean +- std dev: %s -> %s\" % (ref_text, chg_text)\n else:\n text = \"%s -> %s\" % (ref_text, chg_text)\n else:\n text = \"%s -> %s\" % (ref_text, chg_text)\n\n text = \"%s: %s\" % (text, format_speed(self.speed, self.percent))\n return text\n\n def format(self, verbose=True, show_name=True):\n text = self.oneliner(show_name=show_name, check_significant=False)\n lines = [text]\n\n # significant?\n if self.t_score is None:\n lines.append(\"ERROR when testing if values are significant\")\n\n if self.significant:\n if verbose:\n if self.t_score is not None:\n lines.append(\"Significant (t=%.2f)\" % self.t_score)\n else:\n lines.append(\"Significant\")\n else:\n lines.append(\"Not significant!\")\n return lines\n\n\nclass CompareResults(list):\n # list of CompareResult objects\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return '' % (list(self),)\n\n\ndef compare_benchmarks(name, benchmarks):\n results = CompareResults(name)\n\n ref_item = benchmarks[0]\n ref = CompareData(ref_item.filename, ref_item.benchmark)\n\n for item in benchmarks[1:]:\n changed = CompareData(item.filename, item.benchmark)\n result = CompareResult(ref, changed)\n results.append(result)\n\n return results\n\n\nclass Table:\n def __init__(self, headers, rows):\n self.headers = headers\n self.rows = rows\n self.widths = [len(header) for header in self.headers]\n for row in self.rows:\n for column, cell in enumerate(row):\n self.widths[column] = max(self.widths[column], len(cell))\n\n def _render_line(self, char='-'):\n parts = ['']\n for width in self.widths:\n parts.append(char * (width + 2))\n parts.append('')\n return '+'.join(parts)\n\n def _render_row(self, row):\n parts = ['']\n for width, cell in zip(self.widths, row):\n parts.append(' %s ' % cell.ljust(width))\n parts.append('')\n return '|'.join(parts)\n\n def render(self, write_line):\n write_line(self._render_line('-'))\n write_line(self._render_row(self.headers))\n write_line(self._render_line('='))\n for row in self.rows:\n write_line(self._render_row(row))\n write_line(self._render_line('-'))\n\n\ndef compare_suites_table(grouped_by_name, by_speed, args):\n headers = ['Benchmark']\n for group in grouped_by_name:\n for item in group.benchmarks:\n headers.append(item.filename)\n break\n\n not_significant = []\n\n if by_speed:\n def sort_key(group):\n ref = group.benchmarks[0].benchmark\n bench = group.benchmarks[1].benchmark\n speed, percent = compute_speed(ref, bench)\n return -speed\n\n grouped_by_name.sort(key=sort_key)\n\n rows = []\n for group in grouped_by_name:\n all_significant = []\n row = [group.name]\n ref = group.benchmarks[0].benchmark\n for index, item in enumerate(group.benchmarks):\n bench = item.benchmark\n text = bench.format_value(bench.mean())\n if index != 0:\n speed, percent = compute_speed(ref, bench)\n if args.min_speed and abs(speed - 1.0) * 100 < args.min_speed:\n significant = False\n else:\n significant = is_significant_benchs(ref, bench)[0]\n if significant:\n if args.quiet:\n text = format_speed(speed, percent)\n else:\n text = \"%s: %s\" % (text, format_speed(speed, percent))\n else:\n text = \"not significant\"\n all_significant.append(significant)\n row.append(text)\n if any(all_significant):\n rows.append(row)\n else:\n not_significant.append(group.name)\n\n if rows:\n table = Table(headers, rows)\n table.render(print)\n\n if not_significant:\n if rows:\n print()\n print(\"Not significant (%s): %s\"\n % (len(not_significant), '; '.join(not_significant)))\n\n\ndef compare_suites_list(all_results, show_name, args):\n not_significant = []\n verbose = is_verbose() or args.verbose\n\n for index, results in enumerate(all_results):\n significant = any(result.significant for result in results)\n lines = []\n for result in results:\n lines.extend(result.format(verbose))\n\n if not(significant or verbose):\n not_significant.append(results.name)\n continue\n\n if len(lines) != 1:\n if show_name:\n display_title(results.name)\n for line in lines:\n print(line)\n if index != len(all_results) - 1:\n print()\n else:\n text = lines[0]\n if show_name:\n text = '%s: %s' % (results.name, text)\n print(text)\n\n if not args.quiet:\n if not_significant:\n print(\"Benchmark hidden because not significant (%s): %s\"\n % (len(not_significant), ', '.join(not_significant)))\n\n\ndef compare_suites_by_speed(all_results, show_name, args):\n not_significant = []\n slower = []\n faster = []\n same = []\n for results in all_results:\n result = results[0]\n if not result.significant:\n not_significant.append(results.name)\n continue\n\n speed = result.speed\n if args.min_speed and abs(speed - 1.0) * 100 < args.min_speed:\n not_significant.append(results.name)\n continue\n\n item = (results.name, result)\n if speed == 1.0:\n same.append(item)\n elif speed > 1.0:\n faster.append(item)\n else:\n slower.append(item)\n\n for title, results, sort_reverse in (\n ('Slower', slower, False),\n ('Faster', faster, True),\n ('Same speed', same, False),\n ):\n if not results:\n continue\n\n results.sort(key=lambda item: item[1].speed, reverse=sort_reverse)\n\n print(\"%s (%s):\" % (title, len(results)))\n for name, result in results:\n text = result.oneliner(verbose=False)\n print(\"- %s: %s\" % (name, text))\n print()\n\n if not args.quiet and not_significant:\n print(\"Benchmark hidden because not significant (%s): %s\"\n % (len(not_significant), ', '.join(not_significant)))\n\n\ndef compare_suites(benchmarks, args):\n grouped_by_name = benchmarks.group_by_name()\n if not grouped_by_name:\n print(\"ERROR: Benchmark suites have no benchmark in common\",\n file=sys.stderr)\n sys.exit(1)\n\n if args.table:\n compare_suites_table(grouped_by_name, args.group_by_speed, args)\n else:\n # List of CompareResults\n all_results = []\n for item in grouped_by_name:\n cmp_benchmarks = item.benchmarks\n results = compare_benchmarks(item.name, cmp_benchmarks)\n all_results.append(results)\n\n show_name = (len(grouped_by_name) > 1)\n if args.group_by_speed:\n compare_suites_by_speed(all_results, show_name, args)\n else:\n compare_suites_list(all_results, show_name, args)\n\n if not args.quiet:\n for suite, hidden in benchmarks.group_by_name_ignored():\n if not hidden:\n continue\n hidden_names = [bench.get_name() for bench in hidden]\n print(\"Ignored benchmarks (%s) of %s: %s\"\n % (len(hidden), suite.filename, ', '.join(sorted(hidden_names))))\n\n\ndef timeit_compare_benchs(name1, bench1, name2, bench2, args):\n data1 = CompareData(name1, bench1)\n data2 = CompareData(name2, bench2)\n compare = CompareResult(data1, data2)\n if not args.quiet:\n verb = is_verbose() or args.verbose\n lines = compare.format(verbose=verb)\n for line in lines:\n print(line)\n else:\n line = compare.oneliner()\n print(line)\n","sub_path":"perf/_compare.py","file_name":"_compare.py","file_ext":"py","file_size_in_byte":11642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604404521","text":"from django.shortcuts import render, redirect\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom .forms import ContactForm\n\n\ndef index(request):\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n from_email = form.cleaned_data['from_email']\n message = form.cleaned_data['message']\n\n message_with_sender = from_email + \"\\n\\n\" + message\n\n try:\n send_mail(name, message_with_sender, from_email, [settings.EMAIL_HOST_USER], fail_silently=True)\n messages.success(request, \"Success! Thanks for your message.\")\n return redirect('myprofile:index')\n except BadHeaderError:\n return HttpResponse('Invalid Header Found')\n return render(request, 'myprofile/index.html', {'form': form})\n","sub_path":"myprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474684307","text":"# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (nested_scopes, generators, division, absolute_import, with_statement,\n print_function, unicode_literals)\n\nimport inspect\nimport optparse\nimport os\n\nfrom collections import defaultdict\n\nfrom pkg_resources import resource_string\nfrom twitter.common.dirutil import Fileset, safe_open\n\nfrom pants.base.build_environment import get_buildroot\nfrom pants.base.build_manual import get_builddict_info\nfrom pants.base.build_file_aliases import maven_layout\nfrom pants.base.build_file_parser import BuildFileParser\nfrom pants.base.config import ConfigOption\nfrom pants.base.exceptions import TaskError\nfrom pants.base.generator import Generator, TemplateData\nfrom pants.goal.option_helpers import add_global_options\nfrom pants.goal.phase import Phase\nfrom pants.tasks.task import Task\n\n\ndef indent_docstring_by_n(s, n=1):\n \"\"\"Given a non-empty docstring, return version indented N spaces.\n Given an empty thing, return the thing itself.\"\"\"\n # In reST, it's useful to have strings that are similarly-indented.\n # If we have a classdoc indented by 2 next to an __init__ funcdoc indented\n # by 4, reST doesn't format things nicely. Oh, totally-dedenting doesn't\n # format nicely either.\n\n # Docstring indentation: more gnarly than you'd think:\n # http://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\n if not s: return s\n # Convert tabs to spaces (following the normal Python rules)\n # and split into a list of lines:\n lines = s.expandtabs().splitlines()\n # Determine minimum indentation (first line doesn't count):\n indent = 999\n for line in lines[1:]:\n stripped = line.lstrip()\n if stripped:\n indent = min(indent, len(line) - len(stripped))\n # Remove indentation (first line is special):\n trimmed = [lines[0].strip()]\n if indent < 999:\n for line in lines[1:]:\n trimmed.append(line[indent:].rstrip())\n # Strip off trailing and leading blank lines:\n while trimmed and not trimmed[-1]:\n trimmed.pop()\n while trimmed and not trimmed[0]:\n trimmed.pop(0)\n # Return a single string:\n indent = n * \" \"\n return '\\n'.join([indent + t for t in trimmed])\n\n\ndef entry(nom, classdoc=None, msg_rst=None, argspec=None, funcdoc=None, methods=None, indent=1):\n \"\"\"Create a struct that our template expects to see.\n\n :param nom: Symbol name, e.g. python_binary\n :param classdoc: plain text appears above argspec\n :param msg_rst: reST. useful in hand-crafted entries\n :param argspec: arg string like (x, y=\"deflt\")\n :param funcdoc: function's __doc__, plain text\n :param methods: list of entries for class' methods\n \"\"\"\n\n return TemplateData(\n nom=nom.strip(),\n classdoc=indent_docstring_by_n(classdoc),\n msg_rst=indent_docstring_by_n(msg_rst, indent),\n argspec=argspec,\n funcdoc=indent_docstring_by_n(funcdoc, indent),\n methods=methods,\n showmethods=(methods and len(methods) > 0))\n\n\ndef msg_entry(nom, defn):\n \"\"\"For hard-wired entries a la \"See Instead\" or other simple stuff\"\"\"\n return entry(nom, msg_rst=defn)\n\n\ndef entry_for_one_func(nom, func):\n \"\"\"Generate a BUILD dictionary entry for a function\n nom: name like 'python_binary'\n func: function object\"\"\"\n args, varargs, varkw, defaults = inspect.getargspec(func)\n argspec = inspect.formatargspec(args, varargs, varkw, defaults)\n return entry(nom,\n argspec=argspec,\n funcdoc=func.__doc__)\n\n\ndef entry_for_one_method(nom, method):\n \"\"\"Generate a BUILD dictionary entry for a method\n nom: name like 'with_description'\n method: method object\"\"\"\n # TODO(lhosken) : This is darned similar to entry_for_one_func. Merge 'em?\n # (Punted so far since funcdoc indentation made my head hurt)\n assert inspect.ismethod(method)\n args, varargs, varkw, defaults = inspect.getargspec(method)\n # args[:1] instead of args to discard \"self\" arg\n argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)\n return entry(nom,\n argspec=argspec,\n funcdoc=(method.__doc__ or \"\"),\n indent=2)\n\n\ndef entry_for_one(nom, sym):\n if inspect.isclass(sym):\n return entry_for_one_class(nom, sym)\n if inspect.ismethod(sym) or inspect.isfunction(sym):\n return entry_for_one_func(nom, sym)\n return msg_entry(nom, \"TODO! no doc gen for %s %s\" % (\n str(type(sym)), str(sym)))\n\n\nPREDEFS = { # some hardwired entries\n \"Amount\": {\"defn\": msg_entry(\"Amount\", \"\"\"\n `Amount from twitter.commons.quantity `_\n E.g., ``Amount(2, Time.MINUTES)``.\"\"\")},\n \"fancy_pants\": {\"suppress\": True}, # unused alias for pants\n \"__file__\": {\"defn\": msg_entry(\"__file__\", \"Path to BUILD file (string).\")},\n \"globs\": {\"defn\": entry_for_one(\"globs\", Fileset.globs)},\n \"jar_library\": {\"defn\": msg_entry(\"jar_library\",\n \"\"\"Old name for `dependencies`_\"\"\")},\n \"java_tests\": {\"defn\": msg_entry(\"java_tests\",\n \"\"\"Old name for `junit_tests`_\"\"\")},\n \"pants\": {\"defn\": msg_entry(\"pants\",\n \"\"\"In old Pants versions, a reference to a Pants targets. (In new Pants versions, just use strings.)\"\"\"),\n \"tags\": [\"anylang\"]},\n \"maven_layout\": {\"defn\": entry_for_one(\"maven_layout\", maven_layout)},\n \"python_artifact\": {\"suppress\": True}, # unused alias for PythonArtifact\n\n \"rglobs\": {\"defn\": entry_for_one(\"rglobs\", Fileset.rglobs)},\n \"ROOT_DIR\": {\"defn\": msg_entry(\"ROOT_DIR\",\n \"Root directory of source code (string).\")},\n \"scala_tests\": {\"defn\": msg_entry(\"scala_tests\",\n \"\"\"Old name for `scala_specs`_\"\"\")},\n \"Time\": {\"defn\": msg_entry(\"Time\", \"\"\"\n `Amount from twitter.commons.quantity `_\n E.g., ``Amount(2, Time.MINUTES)``.\"\"\"), },\n}\n\n\n# Thingies like scala_library\n# Returns list of duples [(name, object), (name, object), (name, object),...]\ndef get_syms():\n r = {}\n vc = BuildFileParser.report_registered_context()\n for s in vc:\n if s in PREDEFS: continue\n o = vc[s]\n r[s] = o\n return r\n\n# Needed since x may be a str or a unicode, so we can't hard-code str.lower or unicode.lower.\n_lower = lambda x: x.lower()\n\n\ndef tocl(d):\n \"\"\"Generate TOC, in-page links to the IDs we're going to define below\"\"\"\n anchors = sorted(d.keys(), key=_lower)\n return TemplateData(t=\"All The Things\", e=[a for a in anchors])\n\n\ndef tags_tocl(d, tag_list, title):\n \"\"\"Generate specialized TOC.\n E.g., tags_tocl(d, [\"python\", \"anylang\"], \"Python\")\n tag_list: if an entry's tags contains any of these, use it\n title: pretty title\n \"\"\"\n filtered_anchors = []\n for anc in sorted(d.keys(), key=_lower):\n entry = d[anc]\n if not \"tags\" in entry: continue\n found = [t for t in tag_list if t in entry[\"tags\"]]\n if not found: continue\n filtered_anchors.append(anc)\n return TemplateData(t=title, e=filtered_anchors)\n\n\ndef entry_for_one_class(nom, klas):\n \"\"\" Generate a BUILD dictionary entry for a class.\n nom: name like 'python_binary'\n klas: class like pants.python_binary\"\"\"\n try:\n args, varargs, varkw, defaults = inspect.getargspec(klas.__init__)\n argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)\n funcdoc = klas.__init__.__doc__\n\n methods = []\n for attrname in dir(klas):\n attr = getattr(klas, attrname)\n attr_bdi = get_builddict_info(attr)\n if not attr_bdi: continue\n if inspect.ismethod(attr):\n methods.append(entry_for_one_method(attrname, attr))\n continue\n raise TaskError('@manual.builddict on non-method %s within class %s '\n 'but I only know what to do with methods' %\n (attrname, nom))\n\n except TypeError: # __init__ might not be a Python function\n argspec = None\n funcdoc = None\n methods = None\n\n return entry(nom,\n classdoc=klas.__doc__,\n argspec=argspec,\n funcdoc=funcdoc,\n methods=methods)\n\n\ndef gen_goals_glopts_reference_data():\n global_option_parser = optparse.OptionParser(add_help_option=False)\n add_global_options(global_option_parser)\n glopts = []\n for o in global_option_parser.option_list:\n hlp = None\n if o.help:\n hlp = indent_docstring_by_n(o.help.replace(\"[%default]\", \"\").strip(), 2)\n glopts.append(TemplateData(st=str(o), hlp=hlp))\n return glopts\n\n\ndef gref_template_data_from_options(og):\n \"\"\"Get data for the Goals Reference from an optparse.OptionGroup\"\"\"\n if not og: return None\n title = og.title or \"\"\n xref = \"\".join([c for c in title if c.isalnum()])\n option_l = []\n for o in og.option_list:\n default = None\n if o.default and not str(o.default).startswith(\"('NO',\"):\n default = o.default\n hlp = None\n if o.help:\n hlp = indent_docstring_by_n(o.help.replace(\"[%default]\", \"\").strip(), 6)\n option_l.append(TemplateData(\n st=str(o),\n default=default,\n hlp=hlp,\n typ=o.type))\n return TemplateData(\n title=title,\n options=option_l,\n xref=xref)\n\n\ndef gen_goals_phases_reference_data():\n \"\"\"Generate the goals reference rst doc.\"\"\"\n phase_dict = {}\n phase_names = []\n for phase, raw_goals in Phase.all():\n parser = optparse.OptionParser(add_help_option=False)\n phase.setup_parser(parser, [], [phase])\n options_by_title = defaultdict(lambda: None)\n for group in parser.option_groups:\n options_by_title[group.title] = group\n found_option_groups = set()\n goals = []\n for goal in sorted(raw_goals, key=(lambda x: x.name.lower())):\n doc = indent_docstring_by_n(goal.task_type.__doc__ or \"\", 2)\n options_title = goal.title_for_option_group(phase)\n og = options_by_title[options_title]\n if og:\n found_option_groups.add(options_title)\n goals.append(TemplateData(\n name=goal.task_type.__name__,\n doc=doc,\n ogroup=gref_template_data_from_options(og)))\n\n leftover_option_groups = []\n for group in parser.option_groups:\n if group.title in found_option_groups: continue\n leftover_option_groups.append(gref_template_data_from_options(group))\n leftover_options = []\n for option in parser.option_list:\n leftover_options.append(TemplateData(st=str(option)))\n phase_dict[phase.name] = TemplateData(phase=phase,\n goals=goals,\n leftover_opts=leftover_options,\n leftover_ogs=leftover_option_groups)\n phase_names.append(phase.name)\n\n phases = [phase_dict[name] for name in sorted(phase_names, key=_lower)]\n return phases\n\n\ndef assemble(predefs=PREDEFS, symbol_hash=None):\n \"\"\"Assemble big hash of entries suitable for smushing into a template.\n\n predefs: Hash of \"hard-wired\" predefined entries.\n symbol_hash: Python syms from which to generate more entries. Default: get from BUILD context\"\"\"\n d = {}\n for k in PREDEFS:\n v = PREDEFS[k]\n if \"suppress\" in v and v[\"suppress\"]: continue\n d[k] = v\n if symbol_hash is None:\n symbol_hash = get_syms()\n for k in symbol_hash:\n bdi = get_builddict_info(symbol_hash[k])\n if bdi is None: continue\n d[k] = bdi.copy()\n if not \"defn\" in d[k]:\n d[k][\"defn\"] = entry_for_one(k, symbol_hash[k])\n return d\n\n\nclass BuildBuildDictionary(Task):\n \"\"\"Generate documentation for the Sphinx site.\"\"\"\n\n def __init__(self, context, workdir):\n super(BuildBuildDictionary, self).__init__(context, workdir)\n self._templates_dir = os.path.join('templates', 'builddictionary')\n self._outdir = os.path.join(self.context.config.getdefault(\"pants_distdir\"), \"builddict\")\n\n def execute(self, targets):\n self._gen_goals_reference()\n self._gen_config_reference()\n self._gen_build_dictionary()\n\n def _gen_build_dictionary(self):\n \"\"\"Generate the BUILD dictionary reference rst doc.\"\"\"\n d = assemble()\n template = resource_string(__name__, os.path.join(self._templates_dir, 'page.mustache'))\n tocs = [tocl(d),\n tags_tocl(d, [\"java\", \"scala\", \"jvm\", \"anylang\"], \"JVM\"),\n tags_tocl(d, [\"python\", \"anylang\"], \"Python\")]\n defns = [d[t][\"defn\"] for t in sorted(d.keys(), key=_lower)]\n filename = os.path.join(self._outdir, 'build_dictionary.rst')\n self.context.log.info('Generating %s' % filename)\n with safe_open(filename, 'w') as outfile:\n generator = Generator(template,\n tocs=tocs,\n defns=defns)\n generator.write(outfile)\n\n def _gen_goals_reference(self):\n \"\"\"Generate the goals reference rst doc.\"\"\"\n phases = gen_goals_phases_reference_data()\n glopts = gen_goals_glopts_reference_data()\n\n template = resource_string(__name__,\n os.path.join(self._templates_dir, 'goals_reference.mustache'))\n filename = os.path.join(self._outdir, 'goals_reference.rst')\n self.context.log.info('Generating %s' % filename)\n with safe_open(filename, 'w') as outfile:\n generator = Generator(template, phases=phases, glopts=glopts)\n generator.write(outfile)\n\n def _gen_config_reference(self):\n options_by_section = defaultdict(list)\n for option in ConfigOption.all():\n if isinstance(option.default, unicode):\n option.default = option.default.replace(get_buildroot(), '%(buildroot)s')\n options_by_section[option.section].append(option)\n sections = list()\n for section, options in options_by_section.items():\n sections.append(TemplateData(section=section, options=options))\n template = resource_string(__name__,\n os.path.join(self._templates_dir, 'pants_ini_reference.mustache'))\n filename = os.path.join(self._outdir, 'pants_ini_reference.rst')\n self.context.log.info('Generating %s' % filename)\n with safe_open(filename, 'w') as outfile:\n generator = Generator(template, sections=sections)\n generator.write(outfile)\n","sub_path":"src/python/pants/tasks/builddictionary.py","file_name":"builddictionary.py","file_ext":"py","file_size_in_byte":14257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"283398717","text":"\"\"\"Now is deprecated. Does not supported since 2020\"\"\"\n\nimport numpy as np\n\nfrom surfaces.surface import Surface\nfrom ray.abstract_ray import ARay\n\n\nclass Ray(ARay):\n EPS0 = 1 / (4 * np.pi * 9 * 10 ** 9)\n nu0 = 4 * np.pi * 10 ** -7\n SQRT_EPS0_ON_NU0 = (EPS0 / nu0) ** 0.5\n\n # Конструктор класса Ray, создает объекты лучей из двух массивов вещественных числел\n # начала луча(start) и направление(dirrection)\n def __init__(self, start: list, direction: list, amplitude: float = 1, brightness: float = 1):\n if len(direction) == len(start):\n if not all(isinstance(i, (float, int)) and isinstance(j, (float, int)) for i, j in zip(start, direction)):\n raise AttributeError(\n \"\"\"Some element in %s or %s is not a float number..\"\"\" % (str(start), str(direction)))\n if amplitude <= 0:\n raise AttributeError(\"Amplitude must be positive digit(%s)\" % (str(amplitude)))\n if brightness <= 0 or brightness > 1:\n raise AttributeError(\n \"Brightness must be positive digit more than zero and less than one(%s)\" % (str(brightness)))\n\n self.__dir = direction.copy()\n norm_val = np.linalg.norm(self.dir)\n if abs(norm_val - 1.0) > np.finfo(float).eps:\n self.__dir = np.dot(1 / norm_val, self.dir)\n self.__dim = len(direction)\n self.__start = start.copy()\n self.__amplitude = amplitude\n self.__brightness = brightness\n self.__path_of_ray = []\n self.__t1 = -1\n else:\n raise AttributeError(\"\"\"Iterables objects have different length. \n len(start): %d,\n len(direction): %d\"\"\" % (len(start), len(direction)))\n\n # getter and setter====================================================================\n\n @property\n def dim(self):\n return self.__dim\n\n @property\n def dir(self) -> list:\n return self.__dir.copy()\n\n @property\n def start(self) -> list:\n return self.__start.copy()\n\n @property\n def t1(self) -> float:\n return self.__t1\n\n @property\n def A(self) -> float:\n return self.__amplitude\n\n @property\n def bright(self) -> float:\n return self.__brightness\n\n @t1.setter\n def t1(self, t1_: (float, int)):\n if t1_ > 0:\n self.__t1 = t1_\n\n @A.setter\n def A(self, amplitude: (float, int)):\n if amplitude <= 0:\n raise AttributeError(\"Amplitude must be positive digit(%s)\" % (str(amplitude)))\n self.__amplitude = amplitude\n\n @bright.setter\n def bright(self, brightness):\n if brightness <= 0 or brightness > 1:\n raise AttributeError(\n \"Brightness must be positive digit more than zero and less than one(%s)\" % (str(brightness)))\n self.__brightness = brightness\n\n # methods of object ray===========================================================================================\n\n def __str__(self) -> str:\n return \"ray:{ start: %s, direction: %s, A: %s, B:%s}\" % (\n self.__start.__str__(), self.__dir.__str__(), str(self.A), str(self.bright))\n\n def reflect(self, surface: Surface):\n if self.__dim != surface.dim:\n raise AttributeError(\"Different dimension of ray(%d) and of surface(%d)\" % (self.__dim, surface.dim))\n point, e, t_1 = ARay.reflect_(e=self.dir, r=self.start, surface=surface)\n if len(point) == 0 or len(e) == 0 or t_1 is None:\n return None\n self.t1 = t_1\n return Ray(point, e)\n\n def refract(self, surface: Surface):\n if self.__dim != surface.dim:\n raise AttributeError(\"Different dimension of ray(%d) and of surface(%d)\" % (self.__dim, surface.dim))\n # list, list, float\n point, e, t_1 = ARay.refract_(e=self.dir, r=self.start, surface=surface)\n if len(point) == 0 or len(e) == 0 or t_1 is None:\n return None\n self.t1 = t_1\n return Ray(point, e)\n\n def calc_point_of_ray(self, t: float) -> list:\n if not t > 10 * np.finfo(float).eps:\n return []\n return ARay.calc_point_of_ray_(self.dir, self.start, t)\n\n def calc_I(self, n: float):\n return Ray.SQRT_EPS0_ON_NU0 * self.A ** 2\n","sub_path":"ray/ray.py","file_name":"ray.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466461468","text":"from django import forms\nfrom django.conf import settings\n\nfrom .models import Example\n\n\nclass ExampleImportForm(forms.ModelForm):\n class Meta:\n model = Example\n fields = ['morpheme', 'content', 'used_in', 'prefix', 'suffix', 'category']\n \n\nclass FlashcardForm(forms.ModelForm):\n category = forms.ChoiceField(widget=forms.RadioSelect)\n \n class Meta:\n model = Example\n fields = ['category', 'prefix', 'suffix',]\n widgets = {\n 'prefix': forms.TextInput(attrs={'class': 'form-control'}),\n 'suffix': forms.TextInput(attrs={'class': 'form-control'}),\n }\n \n def __init__(self, *args, **kwargs):\n super(FlashcardForm, self).__init__(*args, **kwargs)\n categories = ((None, '0. Not set'), )\n for index, option in enumerate(Example.CATEGORY_CHOICES):\n new_option = ((option[0], str(index + 1) + '. ' + option[1]), )\n categories += new_option\n self.fields['category'].choices = categories","sub_path":"kkma/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337986580","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport wiimote\nimport time\nimport sys\nimport csv\n\nwm = wiimote.connect(sys.argv[1])\n\nfilename = raw_input(\"Testfile: \")\nxp = yp = zp = 0\ndata = []\n\n\ndef writeLogData(data):\n csv.register_dialect('logging', delimiter=';', quoting=csv.QUOTE_ALL)\n logfile = open(filename+\".csv\", \"wb\")\n writer = csv.DictWriter(logfile, [\"x\", \"y\", \"z\"], 'logging')\n writer.writeheader()\n writer.writerows(data)\n logfile.close()\n\nwhile True:\n if wm.buttons[\"A\"]:\n x, y, z = wm.accelerometer\n if (x != xp) or (y != yp) or (z != zp):\n print(\"%d,%d,%d\") % (x, y, z)\n data.append({\"x\": x, \"y\": y, \"z\": z})\n xp, yp, zp = x, y, z\n time.sleep(0.01)\n\n if wm.buttons[\"B\"]:\n print\n writeLogData(data)\n break\n\nwm.disconnect()\ntime.sleep(1)\n","sub_path":"blatt9/ml-test/grab_values.py","file_name":"grab_values.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"489363839","text":"import paddle\nfrom tqdm import tqdm\n\nfrom utils.utils import get_lr\n\n\ndef fit_one_epoch(model_train, model, yolo_loss, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen,\n gen_val, Epoch, cuda):\n loss = 0\n val_loss = 0\n\n model_train.train()\n print('Start Train')\n with tqdm(total=epoch_step, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:\n for iteration, batch in enumerate(gen):\n if iteration >= epoch_step:\n break\n\n images, targets = batch[0], batch[1]\n with paddle.no_grad():\n if cuda:\n images = paddle.to_tensor(images).type(paddle.to_tensor([], dtype='float32')).cuda()\n targets = [paddle.to_tensor(ann).type(paddle.to_tensor([], dtype='float32')).cuda() for ann in targets]\n else:\n # images = paddle.to_tensor(images).type(paddle.to_tensor([], dtype='float32'))\n # targets = [paddle.to_tensor(ann).type(paddle.to_tensor([], dtype='float32')) for ann in targets]\n images = paddle.to_tensor(images).astype(dtype='float32')\n targets = [paddle.to_tensor(ann).astype(dtype='float32') for ann in targets]\n # ----------------------#\n # 清零梯度\n # ----------------------#\n optimizer.clear_grad()\n # ----------------------#\n # 前向传播\n # ----------------------#\n outputs = model_train(images)\n\n # ----------------------#\n # 计算损失\n # ----------------------#\n loss_value = yolo_loss(outputs, targets)\n\n # ----------------------#\n # 反向传播\n # ----------------------#\n loss_value.backward()\n optimizer.step()\n\n loss += loss_value.item()\n\n pbar.set_postfix(**{'loss': loss / (iteration + 1),\n 'lr': get_lr(optimizer)})\n pbar.update(1)\n\n print('Finish Train')\n\n model_train.eval()\n print('Start Validation')\n with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:\n for iteration, batch in enumerate(gen_val):\n if iteration >= epoch_step_val:\n break\n images, targets = batch[0], batch[1]\n with paddle.no_grad():\n if cuda:\n images = paddle.to_tensor(images).paddle.to_tensor([], dtype='float32').cuda()\n targets = [paddle.to_tensor(ann).paddle.to_tensor([], dtype='float32').cuda() for ann in targets]\n else:\n images = paddle.to_tensor(images).astype(dtype='float32')\n targets = [paddle.to_tensor(ann).astype(dtype='float32') for ann in targets]\n # ----------------------#\n # 清零梯度\n # ----------------------#\n optimizer.zero_grad()\n # ----------------------#\n # 前向传播\n # ----------------------#\n outputs = model_train(images)\n\n # ----------------------#\n # 计算损失\n # ----------------------#\n loss_value = yolo_loss(outputs, targets)\n\n val_loss += loss_value.item()\n pbar.set_postfix(**{'val_loss': val_loss / (iteration + 1)})\n pbar.update(1)\n\n print('Finish Validation')\n\n loss_history.append_loss(loss / epoch_step, val_loss / epoch_step_val)\n print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))\n print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))\n paddle.save(model.state_dict(),\n 'logs/ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val))\n","sub_path":"utils/utils_fit.py","file_name":"utils_fit.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"43143590","text":"import math\r\n\r\nimport pyorama\r\nfrom pyorama import app\r\nfrom pyorama.asset import *\r\nfrom pyorama.core import *\r\nfrom pyorama.event import *\r\nfrom pyorama.graphics import *\r\nfrom pyorama.math import *\r\n\r\ndef on_window_event(event, *args, **kwargs):\r\n if event[\"sub_type\"] == WINDOW_EVENT_TYPE_CLOSE:\r\n app.trigger_quit()\r\n\r\nwidth = 800\r\nheight = 600\r\ntitle = b\"Cubes\"\r\n\r\nbase_path = b\"./examples/005_texture/\"\r\nvs_source_path = base_path + b\"shaders/vs_mesh.sc\"\r\nfs_source_path = base_path + b\"shaders/fs_mesh.sc\"\r\nimage_path = base_path + b\"textures/capsule.jpg\"\r\n\r\ncounter = 0\r\nat = Vec3(0.0, 0.0, 0.0)\r\neye = Vec3()\r\nup = Vec3(0.0, 1.0, 0.0)\r\nview_mat = Mat4()\r\nproj_mat = Mat4()\r\nmodel_mat = Mat4()\r\n\r\napp.init()\r\n\r\nvertex_format = BufferFormat([\r\n (b\"a_position\", 3, BUFFER_FIELD_TYPE_F32),\r\n (b\"a_texcoord0\", 2, BUFFER_FIELD_TYPE_F32),\r\n])\r\nvertex_layout = VertexLayout.init_create(\r\n vertex_format, \r\n normalize={b\"a_color0\",},\r\n)\r\nvertices = Buffer(vertex_format)\r\nvertices.init_from_list([\r\n (-400, -300, 0.0, 0.0, 0.0),\r\n (+400, -300, 0.0, 1.0, 0.0),\r\n (+400, +300, 0.0, 1.0, 1.0),\r\n (-400, +300, 0.0, 0.0, 1.0),\r\n])\r\nvertex_buffer = VertexBuffer.init_create(vertex_layout, vertices)\r\n\r\nindex_format = BufferFormat([\r\n (b\"a_indices\", 1, BUFFER_FIELD_TYPE_U16),\r\n])\r\nindices = Buffer(index_format)\r\nindices.init_from_list([\r\n 0, 1, 2, 0, 2, 3\r\n], is_flat=True)\r\nindex_layout = INDEX_LAYOUT_U16\r\nindex_buffer = IndexBuffer.init_create(index_layout, indices)\r\n\r\nqueue = AssetQueue.init_create()\r\nqueue.add_asset(ASSET_TYPE_IMAGE, b\"capsule\", image_path)\r\nqueue.load()\r\n\r\nasset_manager = app.get_asset_system()\r\nimage = Image()\r\nasset_manager.get_asset(b\"capsule\", image)\r\ntexture = Texture.init_create_2d_from_image(image)\r\nsampler = Uniform.init_create(b\"s_tex0\", UNIFORM_TYPE_SAMPLER)\r\n\r\nvertex_shader = Shader.init_create_from_source_file(SHADER_TYPE_VERTEX, vs_source_path)\r\nfragment_shader = Shader.init_create_from_source_file(SHADER_TYPE_FRAGMENT, fs_source_path)\r\nprogram = Program.init_create(vertex_shader, fragment_shader)\r\n\r\n\r\nwindow = Window.init_create(width, height, title)\r\nframe_buffer = FrameBuffer.init_create_from_window(window)\r\nview = View.init_create()\r\non_window_listener = Listener.init_create(EventType._WINDOW, on_window_event, None, None)\r\n\r\n\r\nVec3.set_data(eye, 0, 0, 1000)\r\nMat4.look_at(view_mat, eye, at, up)\r\nMat4.orthographic(proj_mat, 1.0/2.0, 1.0/2.0, 0.01, 1000.0)\r\n\r\n\"\"\"\r\nvoid mtxOrtho(float* _result, float _left, float _right, float _bottom, float _top, float _near, float _far, float _offset, bool _homogeneousNdc, Handness::Enum _handness)\r\n\t{\r\n\t\tconst float aa = 2.0f/(_right - _left);\r\n\t\tconst float bb = 2.0f/(_top - _bottom);\r\n\t\tconst float cc = (_homogeneousNdc ? 2.0f : 1.0f) / (_far - _near);\r\n\t\tconst float dd = (_left + _right )/(_left - _right);\r\n\t\tconst float ee = (_top + _bottom)/(_bottom - _top );\r\n\t\tconst float ff = _homogeneousNdc\r\n\t\t\t? (_near + _far)/(_near - _far)\r\n\t\t\t: _near /(_near - _far)\r\n\t\t\t;\r\n\r\n\t\tmemSet(_result, 0, sizeof(float)*16);\r\n\t\t_result[ 0] = aa;\r\n\t\t_result[ 5] = bb;\r\n\t\t_result[10] = cc;\r\n\t\t_result[12] = dd + _offset;\r\n\t\t_result[13] = ee;\r\n\t\t_result[14] = ff;\r\n\t\t_result[15] = 1.0f;\r\n\t}\r\n\r\n@staticmethod\r\ncdef void c_orthographic(Mat4C *out, float x_mag, float y_mag, float z_near, float z_far) nogil:\r\n out.m00 = 1.0/x_mag\r\n out.m01 = 0\r\n out.m02 = 0\r\n out.m03 = 0\r\n out.m10 = 0\r\n out.m11 = 1.0/y_mag\r\n out.m12 = 0\r\n out.m13 = 0\r\n out.m20 = 0\r\n out.m21 = 0\r\n out.m22 = 2.0/(z_near - z_far)\r\n out.m23 = 0\r\n out.m30 = 0\r\n out.m31 = 0\r\n out.m32 = (z_far + z_near) / (z_near - z_far)\r\n out.m33 = 1\r\n\"\"\"\r\n\r\n#print(proj_mat.data)\r\n#Mat4.orthographic_alt(proj_mat, 0, width, 0, height, 0.01, 1000.0)\r\n#print(proj_mat.data)\r\nMat4.perspective(proj_mat, math.radians(60.0), float(width) / float(height), 0.01, 1000.0)\r\nprint(proj_mat.data)\r\n#print(\"\")\r\n\r\nclear_flags = VIEW_CLEAR_COLOR | VIEW_CLEAR_DEPTH\r\nview.set_clear(clear_flags, 0x443355FF, 1.0, 0)\r\nview.set_rect(0, 0, width, height)\r\nview.set_frame_buffer(frame_buffer)\r\nview.set_vertex_buffer(vertex_buffer)\r\nview.set_index_buffer(index_buffer)\r\nview.set_program(program)\r\nview.set_texture(sampler, texture, 0)\r\nview.set_transform_model(model_mat)\r\nview.set_transform_view(view_mat)\r\nview.set_transform_projection(proj_mat)\r\nview.submit()\r\n\r\napp.run()\r\n\r\non_window_listener.delete()\r\nprogram.delete()\r\nfragment_shader.delete()\r\nvertex_shader.delete()\r\nvertex_buffer.delete(); vertex_layout.delete(); vertices.free()\r\nindex_buffer.delete(); indices.free()\r\nview.delete()\r\nframe_buffer.delete()\r\nwindow.delete()\r\napp.quit()","sub_path":"examples/broken/005_texture/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"151099567","text":"import requests\nimport random\nimport hashlib\nimport string\n\ndef get_random_string(length):\n letters = string.ascii_letters + string.digits\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n\ndef PoW(target, start, end):\n while True:\n s = get_random_string(20)\n h = hashlib.sha1(s).hexdigest()\n if h[start:end] == target:\n return s\n\n\ns = \"01237IJLMNQSWXYadeghilnopqrtuyz\"\n\n#print PoW(\"0ca6d\",0,5)\n\n\nurl = \"http://3.35.121.198:40831/vaccine.php\"\ncoo = {\"PHPSESSID\":\"tt13ib28spldb6o8o56cml8lk\"}\n\n\nr = requests.get(url, cookies=coo)\nprint(r.text)\n\nfor i in range(31):\n c = s[i]\n ss = s[:i] + c + s[i:]\n print(ss)\n\n\n t = r.text\n t = t[t.index(\"===\"):]\n t = t[:t.index(\"\")]\n t = t[4:]\n print(t)\n\n pow = PoW(t, 0, 6)\n print(pow)\n\n r = requests.post(url, cookies=coo, data={\"code\": ss, \"pow\": pow})\n print(r.text)\n\n print(\"=\"*40)\n","sub_path":"vaccine.py","file_name":"vaccine.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394079849","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 19 11:36:22 2020\n\n@author: matt hanson\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport requests\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#get the names of the country to find the csv files already made\ncountryMap={}\nsource=requests.get('https://www.beeradvocate.com/beer/top-rated/').text\nsoup=BeautifulSoup(source,'lxml')\ncountries=soup.find('select').find_all('option')\n\n\n#get data from csv files\nfor ite in countries:\n acr=ite['value']\n country=ite.text\n if (country=='Countries:'):\n country='World'\n countryMap[country]=pd.read_csv(country+'.csv')\n #print(countryMap[country].tail())\n \nnewMap={}\n\n#get the alc percentage to be graphed\nfor country in countryMap.keys():\n newMap[country]=countryMap[country]['Alcohol Percent'].mean()\n #print(newMap[country])\n\n\n#set values to be graphed x is the country and y is alc percent\nx=[] \ny=[]\nfor val in newMap.keys():\n x.append(val)\n y.append(newMap[val])\n\n\n\nfigure, axis = plt.subplots(figsize=(20, 10)) \n \n#set width and where the country labels are\nwidth = 0.8 \nindent = np.arange(len(y))\naxis.barh(indent, y, width, color=\"blue\")\n\n#set where the ticks are\naxis.set_yticks(indent+width/2-0.4)\naxis.set_yticklabels(x, minor=False)\n\n#set titles\nplt.title('Average Alcohol Percentage of Favorite Beer by Country')\nplt.xlabel('Alcohol Percentage')\nplt.ylabel('Country') \n\n\nplt.show()","sub_path":"graph data found with web.py","file_name":"graph data found with web.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"23141515","text":"from random import choice\ndata=open(\"degree.txt\").readlines()\nresult=open(\"result.txt\",'w')\ndic={}\nfor line in data:\n line=line.strip()\n line=line.split(':')\n dic[line[0]]=int(line[1])\nCopy=dic.copy()\ndef degree(lis,node1,node2):\n tmp=[node1,node2]\n link=','.join(tmp)\n link2=','.join([node2,node1])\n if(node1!=node2)&(lis.count(link)==0)&(lis.count(link2)==0):\n lis.append(link)\n return lis\n else:\n edge=choice(lis)\n [node3,node4]=str(edge).split(',')\n lis.remove(edge)\n degree(lis,node1,node3)\n degree(lis,node2,node4)\n return lis\ni=0\nlis=[]\nlink=0\nj=0\nwhile(j<1000):\n while(i<58374):\n a=choice(dic.keys())\n b=choice(dic.keys())\n if(a==b)&(i==0):\n continue\n elif(dic[a]==0):\n del dic[a]\n elif(dic[b]==0):\n del dic[b]\n elif(a==b)&(dic[a]==1):\n continue\n else:\n lis=degree(lis,a,b)\n i+=1\n dic[a]-=1\n dic[b]-=1\n print>>result,lis,'\\n','\\n'\n lis=[]\n dic=Copy.copy()\n i=0\n j+=1\n\n","sub_path":"random_PPI.py","file_name":"random_PPI.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"580770974","text":"from kivy.app import App\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.utils import get_color_from_hex\nfrom kivy.graphics import Line, Color\nfrom kivy.core.text import LabelBase\nfrom kivy.uix.togglebutton import ToggleButton\nfrom kivy.uix.behaviors import ToggleButtonBehavior\n\n\nclass RadioButton(ToggleButton):\n def _do_press(self):\n if self.state == 'normal':\n ToggleButtonBehavior._do_press(self)\n\n\nclass CanvasFloat(FloatLayout):\n undo_list = []\n current_color = get_color_from_hex('#2980b9')\n\n def on_touch_down(self, touch):\n for child in self.children[:]:\n if child.dispatch('on_touch_down', touch):\n return True\n\n if child.collide_point(touch.x, touch.y):\n return True\n\n with self.canvas:\n Color(*self.line_color)\n touch.ud['current_line'] = Line(points=(touch.x, touch.y), width=self.line_width)\n\n def on_touch_move(self, touch):\n for child in self.children[:]:\n if child.collide_point(touch.x, touch.y):\n return True\n\n with self.canvas:\n Color(*self.line_color)\n if 'current_line' in touch.ud:\n touch.ud['current_line'].points += (touch.x, touch.y)\n\n def on_touch_up(self, touch):\n if 'current_line' in touch.ud:\n self.undo_list.append(touch.ud['current_line'])\n\n def set_line_color(self, color):\n self.line_color = color\n\n def clear_canvas(self):\n saved = self.children[:]\n self.clear_widgets()\n self.canvas.clear()\n for child in saved:\n self.add_widget(child)\n\n def undo_canvas(self):\n try:\n self.canvas.remove(self.undo_list.pop())\n except IndexError:\n pass\n\n def erase_canvas(self):\n white_color = get_color_from_hex('#ffffff')\n\n return self.set_line_color(white_color)\n\n def set_line_width(self, line_width):\n self.line_width = line_width\n\n\nclass PaintApp(App):\n _default_line_color = get_color_from_hex('#2980b9')\n _normal_line_width = 2\n\n def build(self):\n self.canvas_float = CanvasFloat()\n self.canvas_float.set_line_color(self._default_line_color)\n self.canvas_float.set_line_width(self._normal_line_width)\n\n return self.canvas_float\n\n\nif __name__ == '__main__':\n LabelBase.register(name='Modern Pictograms',\n fn_regular='./fonts/modernpics.ttf')\n\n LabelBase.register(name='Heydings',\n fn_regular='./fonts/heydings_controls.ttf')\n\n from kivy.core.window import Window\n\n Window.clearcolor = get_color_from_hex('#ffffff')\n\n PaintApp().run()\n","sub_path":"paint/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76235458","text":"\"\"\"\r\nc) Modificar las funciones anteriores para que tengan en cuenta el género del\r\ndestinatario, para ello, deberán recibir una tupla de tuplas, conteniendo el\r\nnombre y el género.\r\n\"\"\"\r\n\r\ndef campaign_3a(names):\r\n messages = []\r\n for name in names:\r\n messages.append(\"Estimad\" + (\"o\" if name[0] == \"H\" else \"a\") +\r\n \" %s, vote por mí\" % name[1])\r\n return messages\r\n\r\n\r\ndef campaign_3b(names, pos, num):\r\n messages = []\r\n for i in range(pos, pos + num):\r\n messages.append(\"Estimad\" + (\"o\" if names[i][0] == \"H\" else \"a\") +\r\n \" %s, vote por mí\" % names[i][1])\r\n return messages","sub_path":"1.UMDC/UMDC 04/03c.py","file_name":"03c.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"224693657","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/ljf/superset/superset18/superset/migrations/versions/1226819ee0e3_fix_wrong_constraint_on_table_columns.py\n# Compiled at: 2017-10-30 08:27:50\n# Size of source mod 2**32: 1681 bytes\n\"\"\"Fix wrong constraint on table columns\n\nRevision ID: 1226819ee0e3\nRevises: 956a063c52b3\nCreate Date: 2016-05-27 15:03:32.980343\n\n\"\"\"\nrevision = '1226819ee0e3'\ndown_revision = '956a063c52b3'\nfrom alembic import op\nfrom superset import db\nfrom superset.utils import generic_find_constraint_name\nimport logging\nnaming_convention = {'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s'}\n\ndef find_constraint_name(upgrade=True):\n cols = {'column_name'} if upgrade else {'datasource_name'}\n return generic_find_constraint_name(table='columns', columns=cols, referenced='datasources', db=db)\n\n\ndef upgrade():\n try:\n constraint = find_constraint_name() or 'fk_columns_column_name_datasources'\n with op.batch_alter_table('columns', naming_convention=naming_convention) as (batch_op):\n batch_op.drop_constraint(constraint, type_='foreignkey')\n batch_op.create_foreign_key('fk_columns_datasource_name_datasources', 'datasources', [\n 'datasource_name'], ['datasource_name'])\n except:\n logging.warning('Could not find or drop constraint on `columns`')\n\n\ndef downgrade():\n constraint = find_constraint_name(False) or 'fk_columns_datasource_name_datasources'\n with op.batch_alter_table('columns', naming_convention=naming_convention) as (batch_op):\n batch_op.drop_constraint(constraint, type_='foreignkey')\n batch_op.create_foreign_key('fk_columns_column_name_datasources', 'datasources', [\n 'column_name'], ['datasource_name'])","sub_path":"pycfiles/superset-hand-china-1.3.8.tar/1226819ee0e3_fix_wrong_constraint_on_table_columns.cpython-35.py","file_name":"1226819ee0e3_fix_wrong_constraint_on_table_columns.cpython-35.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"567096187","text":"import copy\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom json import JSONDecodeError\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, List, Optional, Set, TextIO, Tuple, Type, Union, cast, Iterator\n\nimport click\nimport htmlmin\nimport jinja2\nimport markdown2\nimport requests\nimport yaml\nfrom dataclasses_json import dataclass_json\nfrom jinja2 import FileSystemLoader\nfrom pygments import highlight\nfrom pygments.formatters.html import HtmlFormatter\nfrom pygments.lexers.javascript import JavascriptLexer\nfrom pytz import reference\n\nTEMPLATE_FILE_NAME = \"base.html\"\nCSS_FILE_NAME = \"schema_doc.css\"\nJS_FILE_NAME = \"schema_doc.min.js\"\n\nDEFAULT_PATTERN = r\"(\\[Default - `([^`]+)`\\])\"\nDEPRECATED_PATTERN = r\"\\[Deprecated\"\n\nTYPE_ARRAY = \"array\"\nTYPE_BOOLEAN = \"boolean\"\nTYPE_CONST = \"const\"\nTYPE_ENUM = \"enum\"\nTYPE_INTEGER = \"integer\"\nTYPE_NUMBER = \"number\"\nTYPE_OBJECT = \"object\"\nTYPE_STRING = \"string\"\n\nKW_REQUIRED = \"required\"\nKW_TITLE = \"title\"\nKW_CONTAINS = \"contains\"\nKW_ITEMS = \"items\"\nKW_UNIQUE_ITEMS = \"uniqueItems\"\nKW_MAX_ITEMS = \"maxItems\"\nKW_MIN_ITEMS = \"minItems\"\nKW_MAX_LENGTH = \"maxLength\"\nKW_MIN_LENGTH = \"minLength\"\nKW_PATTERN = \"pattern\"\nKW_CONST = \"const\"\nKW_ENUM = \"enum\"\nKW_ELSE = \"else\"\nKW_THEN = \"then\"\nKW_IF = \"if\"\nKW_NOT = \"not\"\nKW_ONE_OF = \"oneOf\"\nKW_ANY_OF = \"anyOf\"\nKW_ALL_OF = \"allOf\"\nKW_PROPERTIES = \"properties\"\nKW_PATTERN_PROPERTIES = \"patternProperties\"\nKW_ADDITIONAL_PROPERTIES = \"additionalProperties\"\n\nDESCRIPTION = \"description\"\nDEFAULT = \"default\"\nEXAMPLES = \"examples\"\nITEMS = \"items\"\nTYPE = \"type\"\nREF = \"$ref\"\n\nMULTIPLE_OF = \"multipleOf\"\nMAXIMUM = \"maximum\"\nEXCLUSIVE_MAXIMUM = \"exclusiveMaximum\"\nMINIMUM = \"minimum\"\nEXCLUSIVE_MINIMUM = \"exclusiveMinimum\"\n\n\nSHORT_DESCRIPTION_NUMBER_OF_LINES = 8\n\n\nCONFIG_DEPRECATION_MESSAGE = (\n \"JSON Schema for humans: Please supply a GenerationConfiguration object instead of individual options\"\n)\n\n\ncircular_references: Dict[\"SchemaNode\", bool] = {}\n\n\n@dataclass_json\n@dataclass\nclass GenerationConfiguration:\n \"\"\"Configuration for generating documentation for a schema\"\"\"\n\n minify: bool = True\n description_is_markdown: bool = True\n deprecated_from_description: bool = False\n show_breadcrumbs: bool = True\n collapse_long_descriptions: bool = True\n default_from_description: bool = False\n expand_buttons: bool = False\n copy_css: bool = True\n copy_js: bool = True\n link_to_reused_ref: bool = True\n recursive_detection_depth: int = 25\n templates_directory: str = os.path.join(os.path.dirname(__file__), \"templates\")\n template_name: str = \"js\"\n # markdown2 extra parameters can be added here: https://github.com/trentm/python-markdown2/wiki/Extras\n markdown_options: Any = field(\n default_factory=lambda: {\n \"break-on-newline\": True,\n \"fenced-code-blocks\": {\"cssclass\": \"highlight jumbotron\"},\n \"tables\": None,\n }\n )\n\n\nclass SchemaNode:\n \"\"\"\n Represents a part of a JSON schema with additional metadata to help with documentation\n \"\"\"\n\n def __init__(\n self,\n depth: int,\n file: str,\n path_to_element: List[Union[str, int]],\n html_id: str,\n breadcrumb_name: str = \"\",\n ref_path=\"\",\n parent: \"SchemaNode\" = None,\n parent_key: str = None,\n literal: Union[str, int, bool] = None,\n keywords: Dict[str, Union[\"SchemaNode\", str, List[str]]] = None,\n array_items: List[\"SchemaNode\"] = None,\n links_to: \"SchemaNode\" = None,\n refers_to: \"SchemaNode\" = None,\n is_displayed: bool = True,\n ):\n \"\"\"\n\n :param depth: Number of levels from the root of the schema to this node.\n :param file: Real path to the schema file\n :param path_to_element: Path from the root of the schema to the current element\n :param html_id: HTML ID for the current element. Used for anchor links.\n :param parent: The parent node of which the current node is an array item or keyword\n :param parent_key: If the node is under a keyword of the parent node, that keyword\n Example:\n In the following context\n {\n \"patternProperties\": {\n \".*\": {\n \"type\": string\n }\n }\n }\n\n For the node\n {\n \"type\": string\n }\n The parent_key is \".*\"\n\n For the node\n {\n \".*\": {\n \"type\": string\n }\n }\n The parent key is \"patternProperties\"\n\n :param ref_path: Path of a reference to this element, if any (usually \"#/definitions/A name\")\n :param literal: If the schema is neither a dict nor an array, it will be kept here\n Useful for things like description, types, const, enum, etc.\n :param keywords: If the schema is a dict, this will be filled. Otherwise, this stays empty\n :param array_items: If the schema is an array, this will be filled. Otherwise, this stays empty\n :param links_to: If the same node is documented elsewhere, the other SchemaNode that documents it\n :param refers_to: If there is a $ref, this should contain the SchemaNode object for it\n :param is_displayed: Instructs the templates if this part should be fully documented.\n If false, the description and a link to the referenced element will be generated instead.\n If false, refers_to needs to be set\n \"\"\"\n self.depth = depth\n self.file = file\n self.path_to_element = path_to_element\n self.html_id = html_id or \"_\".join(path_to_element) or \"root\"\n self.breadcrumb_name = breadcrumb_name\n self.parent = parent\n self.parent_key = parent_key\n self.ref_path = ref_path\n self.literal = literal\n self.keywords = keywords or {}\n self.array_items = array_items or []\n self.links_to = links_to\n self.refers_to = refers_to\n self.is_displayed = is_displayed\n self._refers_to_merged = None\n self.properties: Dict[str, \"SchemaNode\"] = {}\n self.additional_properties: Optional[\"SchemaNode\"] = None\n # If True, it means additionalProperties is there and false. If False, additionalProperties is either not set\n # or is set but is not false (depends on self.additional_properties)\n self.no_additional_properties: bool = False\n self.pattern_properties: Dict[str, \"SchemaNode\"] = {}\n\n @property\n def explicit_no_additional_properties(self) -> bool:\n \"\"\"Return True if additionalProperties is set and false (to differentiate from not set)\"\"\"\n return bool(\n (self.properties or self.pattern_properties)\n and self.no_additional_properties\n and not self.additional_properties\n )\n\n @property\n def definition_name(self) -> str:\n \"\"\"The text to display when this node is the title of a section or tab\"\"\"\n if self.is_property and self.property_name:\n return self.property_name\n if self.title:\n return self.title\n if self.ref_path:\n return self.ref_path.split(\"/\")[-1]\n return \"\"\n\n @property\n def link_name(self) -> str:\n \"\"\"The text to display when linking to this node from somewhere else in the schema\"\"\"\n return self.definition_name or self.html_id\n\n @property\n def name_for_breadcrumbs(self) -> str:\n return self.definition_name or self.breadcrumb_name\n\n @property\n def is_property(self) -> bool:\n return bool(self.parent and self.property_name in self.parent.properties.keys())\n\n @property\n def is_pattern_property(self) -> bool:\n return bool(self.parent and self.property_name in self.parent.pattern_properties.keys())\n\n @property\n def is_additional_properties(self) -> bool:\n return self.parent_key == KW_ADDITIONAL_PROPERTIES\n\n @property\n def is_a_property_node(self) -> bool:\n return self.is_property or self.is_pattern_property or self.is_additional_properties\n\n @property\n def is_additional_properties_schema(self) -> bool:\n return self.is_additional_properties and self.literal is not True\n\n @property\n def iterate_properties(self) -> Iterable[\"SchemaNode\"]:\n if self.properties:\n yield from self.properties.values()\n\n if self.pattern_properties:\n yield from self.pattern_properties.values()\n\n if self.additional_properties:\n yield self.additional_properties\n\n @property\n def required_properties(self) -> List[str]:\n \"\"\"The required properties for this node\"\"\"\n required_properties = self.kw_required\n if not required_properties:\n return []\n\n return [r.literal for r in required_properties.array_items]\n\n @property\n def is_required_property(self) -> bool:\n \"\"\"Check if the current node represents a property and that this property is required by its parent\"\"\"\n return self.parent and self.property_name in self.parent.required_properties\n\n @property\n def nodes_from_root(self) -> Iterator[\"SchemaNode\"]:\n \"\"\"The list of nodes to reach this node\"\"\"\n nodes: List[\"SchemaNode\"] = [self]\n current_node = self\n while current_node.parent:\n nodes.append(current_node.parent)\n current_node = current_node.parent\n\n if len(nodes) == 1:\n # Don't want to display \"root\" alone at the root\n return []\n\n return reversed(nodes)\n\n @property\n def path_to_property(self) -> str:\n \"\"\"Human-readable representation of the path from the root of the schema to this node\"\"\"\n path_without_properties = [p for p in self.path_to_element if p not in [KW_PROPERTIES, KW_PATTERN_PROPERTIES]]\n return \" -> \".join([p if isinstance(p, str) else f\"Item {p}\" for p in path_without_properties])\n\n @property\n def flat_path(self) -> str:\n \"\"\"String representation of the path to this node from the root of the current schema\"\"\"\n return \"/\".join(str(part) for part in self.path_to_element)\n\n @property\n def default_value(self) -> Optional[Any]:\n def _default_value(node: SchemaNode) -> Optional[Any]:\n default = node.keywords.get(DEFAULT)\n if isinstance(default, SchemaNode) and default.is_a_property_node:\n return None\n return default\n\n seen = set()\n current_node = self\n possible_default = _default_value(current_node)\n while not possible_default and current_node.refers_to:\n if current_node in seen:\n break\n seen.add(current_node)\n current_node = current_node.refers_to\n possible_default = _default_value(current_node)\n\n return possible_default\n\n @property\n def examples(self) -> List[str]:\n possible_examples = self.keywords.get(EXAMPLES)\n if not possible_examples:\n return []\n\n if isinstance(possible_examples, SchemaNode) and possible_examples.is_a_property_node:\n return []\n\n return possible_examples\n\n @property\n def refers_to_merged(self) -> Optional[\"SchemaNode\"]:\n \"\"\"The referenced node, with values from the current node merged in\"\"\"\n if self._refers_to_merged:\n return self._refers_to_merged\n\n if not self.refers_to:\n return None\n\n merged_node = copy.copy(self.refers_to)\n merged_node.keywords = {k: copy.copy(v) for k, v in self.refers_to.keywords.items()}\n merged_node.array_items = [copy.copy(i) for i in self.refers_to.array_items]\n\n merged_node.keywords.update({k: copy.copy(v) for k, v in self.keywords.items()})\n merged_node.array_items += [copy.copy(i) for i in self.array_items]\n\n return merged_node\n\n # self._refers_to_merged = merged_node\n\n # return self._refers_to_merged\n\n def get_keyword(self, keyword: str) -> Optional[\"SchemaNode\"]:\n \"\"\"Get the value of a keyword if present and it is not a property (to avoid conflicts with properties being\n named like a keyword, e.g. a property named \"if\")\n \"\"\"\n possible_keyword = self.keywords.get(keyword)\n if possible_keyword and isinstance(possible_keyword, SchemaNode) and not possible_keyword.is_property:\n return possible_keyword\n\n return None\n\n @property\n def kw_all_of(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ALL_OF)\n\n @property\n def kw_any_of(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ANY_OF)\n\n @property\n def kw_one_of(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ONE_OF)\n\n @property\n def kw_not(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_NOT)\n\n @property\n def has_conditional(self) -> bool:\n return self.kw_if is not None and (self.kw_then is not None or self.kw_else is not None)\n\n @property\n def kw_if(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_IF)\n\n @property\n def kw_then(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_THEN)\n\n @property\n def kw_else(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ELSE)\n\n @property\n def kw_enum(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ENUM)\n\n @property\n def kw_const(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_CONST)\n\n @property\n def kw_pattern(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_PATTERN)\n\n @property\n def kw_properties(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_PROPERTIES)\n\n @property\n def kw_pattern_properties(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_PATTERN_PROPERTIES)\n\n @property\n def kw_additional_properties(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ADDITIONAL_PROPERTIES)\n\n @property\n def kw_min_length(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_MIN_LENGTH)\n\n @property\n def kw_max_length(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_MAX_LENGTH)\n\n @property\n def kw_items(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_ITEMS)\n\n @property\n def kw_min_items(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_MIN_ITEMS)\n\n @property\n def kw_max_items(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_MAX_ITEMS)\n\n @property\n def kw_unique_items(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_UNIQUE_ITEMS)\n\n @property\n def kw_contains(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_CONTAINS)\n\n @property\n def kw_required(self) -> Optional[\"SchemaNode\"]:\n return self.get_keyword(KW_REQUIRED)\n\n @property\n def title(self) -> Optional[str]:\n title_kw = self.get_keyword(KW_TITLE)\n if not title_kw:\n return None\n title = title_kw.literal\n title = cast(str, title)\n return title\n\n @property\n def property_name(self) -> Optional[str]:\n return self.parent_key\n\n @property\n def property_display_name(self) -> Optional[str]:\n \"\"\"The name to display in documentation for this property.\n\n This is simply the property name unless it is under \"patternProperties\" and it has a title,\n in which case it is that title\n \"\"\"\n if self.is_pattern_property:\n return self.title or self.parent_key\n if self.is_additional_properties:\n return \"Additional Properties\"\n return self.parent_key\n\n @property\n def type_name(self) -> str:\n name = get_type_name(self)\n\n if name:\n return name\n\n seen = set()\n current_node = self\n while not name and current_node.refers_to:\n if current_node in seen:\n break\n seen.add(current_node)\n referenced_schema = current_node.refers_to\n name = get_type_name(referenced_schema)\n current_node = referenced_schema\n\n return name or TYPE_OBJECT\n\n def should_be_a_link(self, config: GenerationConfiguration) -> bool:\n \"\"\"Check if this node should be displayed as a link to another section of the schema in the context of\n the provided configuration.\n \"\"\"\n if not self.links_to or self.is_displayed:\n return False\n\n if config.link_to_reused_ref:\n return True\n\n return self.has_circular_reference(config)\n\n def node_is_parent(self, node_to_check: \"SchemaNode\") -> bool:\n \"\"\"Check if the provided node is a parent of the current node\"\"\"\n if self.file != node_to_check.file:\n return False\n\n for i, path_part in enumerate(node_to_check.path_to_element):\n if len(self.path_to_element) <= i:\n return False\n if self.path_to_element[i] != path_part:\n return False\n return True\n\n def has_circular_reference(self, config: GenerationConfiguration) -> bool:\n \"\"\"Check if the current schema is a reference to another section that references the current schema.\n\n The check is recursive up to config.recursive_detection_depth levels, meaning that if the node refers to another\n node that refers to another node that refers to a parent of itself, this will still return True if, and only if,\n it takes less than config.recursive_detection_depth steps to get to the parent.\n \"\"\"\n if self in circular_references:\n return circular_references[self]\n\n if not self.links_to:\n circular_references[self] = False\n return False\n\n iteration_count = 0\n to_check = {self.links_to}\n while to_check and iteration_count < config.recursive_detection_depth:\n for node_to_check in to_check:\n # If the node reached via reference, keywords, or array items is the node itself, we have a circular\n # reference.\n # We also check if the path is for a parent to save on cycles\n if node_to_check == self or self.node_is_parent(node_to_check):\n circular_references[self] = True\n return True\n\n new_to_check: Set[SchemaNode] = set()\n for node_to_check in to_check:\n if node_to_check.links_to:\n new_to_check.add(node_to_check.links_to)\n new_to_check = new_to_check.union(\n set(n for n in node_to_check.keywords.values() if isinstance(n, SchemaNode))\n )\n new_to_check = new_to_check.union(set(node_to_check.array_items))\n to_check = new_to_check\n iteration_count += 1\n\n circular_references[self] = False\n return False\n\n def __eq__(self, other: object) -> bool:\n \"\"\"For two schema nodes to be considered equals they must represent the same element in the same file\"\"\"\n if other is None:\n return False\n\n if not isinstance(other, SchemaNode):\n return NotImplemented\n\n return self.file == other.file and self.path_to_element == other.path_to_element\n\n def __hash__(self) -> int:\n return hash(self.file + self.flat_path)\n\n def __str__(self) -> str:\n return self.flat_path\n\n\ndef build_intermediate_representation(\n schema_path: Union[str, TextIO],\n config: GenerationConfiguration,\n loaded_schemas: Optional[Dict[str, Any]] = None,\n) -> SchemaNode:\n \"\"\"Build a SchemaNode object representing a JSON schema with added metadata to help rendering as a documentation.\n\n The representation will resolve references and generate HTML ids for elements\n \"\"\"\n resolved_references: Dict[str, Dict[str, SchemaNode]] = defaultdict(dict)\n\n def defaultdict_list() -> Dict[Any, List]:\n return defaultdict(list)\n\n reference_users: Dict[str, Dict[str, List[SchemaNode]]] = defaultdict(defaultdict_list)\n _loaded_schemas: Dict[str, Any]\n if loaded_schemas is None:\n _loaded_schemas = {}\n else:\n assert isinstance(loaded_schemas, dict) and all(\n isinstance(k, str) for k in loaded_schemas.keys()\n ), \"loaded_schemas must be Dict[str, Any]\"\n _loaded_schemas = loaded_schemas\n\n # Make sure schema_path is absolute, all symlinks are resolved\n if isinstance(schema_path, Path):\n schema_path = str(schema_path.resolve())\n elif isinstance(schema_path, str):\n schema_path = os.path.realpath(schema_path)\n else:\n # Assuming schema_path is a file object (TextIO)\n schema_path = os.path.realpath(schema_path.name)\n\n def _record_ref(schema_real_path: str, path_to_element: List[Union[str, int]], current_node: SchemaNode) -> None:\n \"\"\"Record that the node is describing the schema at the provided path\"\"\"\n resolved_references[schema_real_path][\"/\".join(str(e) for e in path_to_element)] = current_node\n\n def _resolve_ref(\n current_node: SchemaNode, schema: Union[Dict, List, int, str]\n ) -> Tuple[Optional[SchemaNode], Optional[SchemaNode]]:\n \"\"\"Resolve the $ref keyword\n\n 2 values are returned:\n - The \"links_to\" value, which is the node to which the current node should point to. This is used when several\n nodes have the same reference.\n\n This value cannot be under #/definitions, since those are not displayed.\n\n If properties a and b both references #/definitions/common, only a will be documented and b will link to a.\n In that case, the method would return the tuple (a, common).\n\n This method makes sure that the final element to be fully documented is the one that is the less nested so\n that the information is closer to the user.\n If properties a/b and c both references #/definitions/common, then a/b will link to c (c, common) and c will\n refer to common directly (None, common)\n - The \"refers_to\" value which is where the definition is in the schema.\n\n In general:\n - If there is no $ref, return (None, None).\n - If there is a referenced element that was never encountered before, build that element and return it for both\n \"links_to\" and \"refers_to\".\n - If there is a referenced element that was already encountered:\n - Check for circular references, if there are, return (None, None)\n - Check if another built node references the same one. If that node is closer to the user, \"links_to\" will be\n that node. Otherwise \"links_to\" is the same as \"refers_to\". \"refers_to\" is the reference that was found.\n \"\"\"\n if not isinstance(schema, Dict) or REF not in schema:\n return None, None\n\n reference_path = schema.get(REF)\n if not reference_path:\n return None, None\n\n # Reference found, resolve the path (format \"#/a/b/c\", \"file.json#/a/b/c\", or \"file.json\")\n if \"#\" not in reference_path:\n uri_part = reference_path\n anchor_part = \"\"\n else:\n uri_part, anchor_part = reference_path.split(\"#\", maxsplit=1)\n anchor_part = anchor_part.strip(\"/\")\n\n # Resolve file path portion of reference\n if uri_part:\n if uri_part.startswith(\"http\"):\n referenced_schema_path = uri_part\n else:\n referenced_schema_path = os.path.realpath(os.path.join(os.path.dirname(current_node.file), uri_part))\n elif current_node.file.startswith(\"http\"):\n referenced_schema_path = current_node.file\n else:\n referenced_schema_path = os.path.realpath(current_node.file)\n\n def _find_reference(path: str, anchor_path: str) -> Optional[SchemaNode]:\n resolved_references_for_this_schema = resolved_references[path]\n return resolved_references_for_this_schema.get(anchor_path)\n\n # Check if already loaded\n found_reference = _find_reference(referenced_schema_path, anchor_part)\n\n if found_reference == current_node:\n found_reference = None\n\n if found_reference:\n reference_users_for_this_schema = reference_users[found_reference.file][anchor_part]\n reference_users[referenced_schema_path][anchor_part].append(current_node)\n\n # Detect infinite loop\n ref_by_file = current_node.file\n ref_by_path = current_node.flat_path\n found_users = reference_users.get(ref_by_file, {}).get(ref_by_path)\n while found_users:\n new_found_users = []\n for found_user in found_users:\n if found_user == current_node:\n # Huh oh, this node refers to the current node, let's break the cycle!\n return None, None\n ref_by_file = found_user.file\n ref_by_path = found_user.flat_path\n found_users_for_this = reference_users.get(ref_by_file, {}).get(ref_by_path)\n if found_users_for_this:\n new_found_users += found_users_for_this\n found_users = new_found_users\n\n # Find the first displayed node following the references\n while not found_reference.is_displayed and found_reference.refers_to:\n if found_reference.refers_to == current_node:\n break\n found_reference = found_reference.refers_to\n\n # Is someone else using the reference?\n if reference_users_for_this_schema:\n other_user = None\n other_is_better = False\n i_am_better = False\n for user in reference_users_for_this_schema:\n if user == current_node or not user.is_displayed:\n continue\n\n if not other_user:\n other_user = user\n\n if user.depth < other_user.depth:\n other_user = user\n\n if other_user.depth < current_node.depth:\n other_user = user\n other_is_better = True\n i_am_better = False\n elif other_user.depth > current_node.depth:\n other_is_better = False\n i_am_better = True\n\n # There is at least one other node having the same reference as the current node.\n if other_is_better:\n # The other referencing node is nearer to the user, so it will now be displayed\n # We mark the current node as being hidden and linking to the other one\n other_user.is_displayed = True\n current_node.is_displayed = False\n return other_user, found_reference\n elif i_am_better:\n # The other referencing node is more nested, it should be hidden and link to the current node\n # The current node will documented the element referenced by both\n other_user.is_displayed = False\n other_user.links_to = current_node\n current_node.is_displayed = True\n return found_reference, found_reference\n elif other_user and other_user.refers_to:\n # Both nodes are the same depth. The other having been seen first,\n # this node will be hidden and linked to the other node\n current_node.is_displayed = False\n return other_user, found_reference\n\n return found_reference, found_reference\n else:\n reference_users[referenced_schema_path][anchor_part].append(current_node)\n\n # Not an existing reference, so it shall be built\n referenced_schema_path_to_element = anchor_part.split(\"/\")\n new_reference = _build_node(\n current_node.depth,\n current_node.html_id,\n current_node.breadcrumb_name,\n referenced_schema_path,\n referenced_schema_path_to_element,\n _load_schema(referenced_schema_path, referenced_schema_path_to_element),\n current_node.parent,\n current_node.parent_key,\n )\n return new_reference, new_reference\n\n def _load_schema(schema_uri: str, path_to_element: List[Union[str, int]]) -> Union[Dict, List, int, str]:\n \"\"\"Load the schema at the provided path or URL.\n\n If the URI is for a local file, it must be a \"realpath\", meaning absolute and with symlinks resolved.\n\n Loaded paths are kept in memory as to ensure never loading the same file twice\n \"\"\"\n if schema_uri in _loaded_schemas:\n loaded_schema = _loaded_schemas[schema_uri]\n else:\n if schema_uri.startswith(\"http\"):\n if schema_uri.endswith(\".yaml\"):\n loaded_schema = yaml.safe_load(requests.get(schema_uri).text)\n else:\n loaded_schema = requests.get(schema_uri).json()\n else:\n with open(schema_uri, encoding=\"utf-8\") as schema_fp:\n _, extension = os.path.splitext(schema_uri)\n if extension == \".json\":\n loaded_schema = json.load(schema_fp)\n else:\n loaded_schema = yaml.safe_load(schema_fp)\n _loaded_schemas[schema_uri] = loaded_schema\n\n if path_to_element:\n for path_part in path_to_element:\n if not path_part:\n # Empty string\n continue\n if isinstance(path_part, str):\n loaded_schema = loaded_schema[path_part]\n elif isinstance(path_part, int):\n loaded_schema = loaded_schema[path_part]\n\n return loaded_schema\n\n def _get_node_ref(schema: Union[int, str, List, Dict]) -> str:\n if isinstance(schema, dict) and REF in schema:\n return schema[REF]\n return \"\"\n\n def _build_node(\n depth: int,\n html_id: str,\n breadcrumb_name: str,\n schema_file_path: str,\n path_to_element: List[Union[str, int]],\n schema: Union[Dict, List, int, str],\n parent: Optional[SchemaNode] = None,\n parent_key: Optional[str] = None,\n ) -> SchemaNode:\n \"\"\"Recursively build a schema representation\n\n :param depth: Number of levels from the root of the schema to this node. Used when there are references to\n figure out the less nested one in order to display it.\n :param html_id: HTML ID for the current element. Used for anchor links.\n :param breadcrumb_name: Name of the node in the breadcrumbs\n :param schema_file_path: Real path to the schema (absolute path with symlinks resolved)\n :param path_to_element: Path from the root of the schema to the current element\n :param schema: The JSON schema part being represented\n :return: A representation of the schema\n \"\"\"\n if not schema_file_path.startswith(\"http\"):\n schema_file_path = os.path.realpath(schema_file_path)\n\n new_node = SchemaNode(\n depth,\n file=schema_file_path,\n path_to_element=path_to_element,\n html_id=html_id,\n breadcrumb_name=breadcrumb_name,\n parent=parent,\n parent_key=parent_key,\n ref_path=_get_node_ref(schema),\n )\n if html_id == \"root\":\n html_id = \"\"\n\n _record_ref(schema_file_path, path_to_element, new_node)\n\n if isinstance(schema, dict):\n keywords = {}\n pattern_id = 1\n for schema_key, schema_value in schema.items():\n # These won't be needed to render the documentation.\n # The definitions will be reached from references, otherwise they are useless\n if schema_key in [\"$id\", \"$ref\", \"$schema\", \"definitions\"]:\n continue\n\n # Examples are rendered in JSON because they will be represented that way in the documentation,\n # no need for a SchemaNode object\n if schema_key == \"examples\":\n keywords[schema_key] = [\n json.dumps(example, indent=4, separators=(\",\", \": \"), ensure_ascii=False)\n for example in schema_value\n ]\n continue\n\n # The default value will be printed as-is, no need for a SchemaNode object\n if schema_key == \"default\":\n keywords[schema_key] = json.dumps(schema_value, ensure_ascii=False)\n continue\n\n if schema_key in KW_PROPERTIES:\n for new_property_name, new_property_schema in schema_value.items():\n new_html_id = html_id\n new_html_id += \"_\" if html_id else \"\"\n new_html_id += escape_property_name_for_id(new_property_name)\n new_node.properties[new_property_name] = _build_node(\n depth + 1,\n new_html_id,\n new_property_name,\n schema_file_path,\n copy.deepcopy(path_to_element) + [new_property_name],\n new_property_schema,\n new_node,\n new_property_name,\n )\n elif schema_key == KW_ADDITIONAL_PROPERTIES:\n if schema_value == False:\n new_node.no_additional_properties = True\n else:\n new_html_id = html_id\n new_html_id += \"_\" if html_id else \"\"\n new_html_id += KW_ADDITIONAL_PROPERTIES\n new_node.additional_properties = _build_node(\n depth + 1,\n new_html_id,\n KW_ADDITIONAL_PROPERTIES,\n schema_file_path,\n copy.deepcopy(path_to_element) + [KW_ADDITIONAL_PROPERTIES],\n schema_value,\n new_node,\n KW_ADDITIONAL_PROPERTIES,\n )\n elif schema_key == KW_PATTERN_PROPERTIES:\n for new_property_name, new_property_schema in schema_value.items():\n new_html_id = html_id\n new_html_id += \"_\" if html_id else \"\"\n new_html_id += f\"pattern{pattern_id}\"\n pattern_id += 1\n new_node.pattern_properties[new_property_name] = _build_node(\n depth + 1,\n new_html_id,\n new_property_name,\n schema_file_path,\n copy.deepcopy(path_to_element) + [new_property_name],\n new_property_schema,\n new_node,\n new_property_name,\n )\n else:\n # Add the property name (correctly escaped) to the ID\n new_html_id = html_id\n new_depth = depth\n if schema_key not in [KW_PROPERTIES, KW_PATTERN_PROPERTIES]:\n new_depth += 1\n new_html_id += \"_\" if html_id else \"\"\n if not parent_key == KW_PATTERN_PROPERTIES:\n new_html_id += escape_property_name_for_id(schema_key)\n else:\n new_html_id += f\"pattern{pattern_id}\"\n pattern_id += 1\n\n keywords[schema_key] = _build_node(\n new_depth,\n new_html_id,\n schema_key,\n schema_file_path,\n copy.deepcopy(path_to_element) + [schema_key],\n schema_value,\n parent=new_node,\n parent_key=schema_key,\n )\n new_node.keywords = keywords\n elif isinstance(schema, list):\n array_items = []\n for i, element in enumerate(schema):\n # Add the property name (correctly escaped) to the ID\n new_html_id = html_id + (\"_\" if html_id else \"\") + \"i\" + str(i)\n\n array_items.append(\n _build_node(\n depth + 1,\n new_html_id,\n f\"item {i}\",\n schema_file_path,\n path_to_element + [i],\n element,\n parent=new_node,\n )\n )\n new_node.array_items = array_items\n\n else:\n new_node.literal = schema\n\n new_node.links_to, new_node.refers_to = _resolve_ref(new_node, schema)\n\n return new_node\n\n intermediate_representation = _build_node(0, \"\", \"root\", schema_path, [], _load_schema(schema_path, []))\n\n return intermediate_representation\n\n\ndef is_combining(schema_node: SchemaNode) -> bool:\n \"\"\"Test if a schema is one of the combining schema keyword\"\"\"\n return bool({\"anyOf\", \"allOf\", \"oneOf\", \"not\"}.intersection(schema_node.keywords.keys()))\n\n\ndef is_text_short(text: str) -> bool:\n \"\"\"Check if a string is short so that we can decide whether to make the section containing it expandable or not.\n The heuristic is counting 1 for each line + 1 for each group of 80 characters a line has\n \"\"\"\n return sum((len(line) / 80 + 1) for line in str(text).splitlines()) < SHORT_DESCRIPTION_NUMBER_OF_LINES\n\n\ndef is_deprecated(_property_dict: Dict[str, Any]) -> bool:\n \"\"\"Test. Check if a property is deprecated without looking in description\"\"\"\n return False\n\n\ndef is_deprecated_look_in_description(schema_node: SchemaNode) -> bool:\n \"\"\"Test. Check if a property is deprecated looking in description\"\"\"\n if DESCRIPTION not in schema_node.keywords:\n return False\n\n return bool(re.match(DEPRECATED_PATTERN, schema_node.keywords[DESCRIPTION].literal))\n\n\ndef get_required_properties(schema_node: SchemaNode) -> List[str]:\n required_properties = schema_node.keywords.get(\"required\") or []\n if required_properties:\n required_properties = [p.literal for p in required_properties.array_items]\n\n return required_properties\n\n\ndef get_undocumented_required_properties(schema_node: SchemaNode) -> List[str]:\n return list(set(get_required_properties(schema_node)).difference(schema_node.properties.keys()))\n\n\ndef python_to_json(value: Any) -> Any:\n \"\"\"Filter. Return the value as it needs to be displayed in JSON\n\n Used to display a string literals more explicitly for default and const values.\n \"\"\"\n if value is None:\n return \"null\"\n if value is True:\n return \"true\"\n if value is False:\n return \"false\"\n\n if isinstance(value, str) and not value.startswith('\"'):\n return f'\"{value}\"'\n\n return value\n\n\ndef get_type_name(schema_node: SchemaNode) -> Optional[str]:\n \"\"\"Filter. Return the type of a property taking into account the type of items for array and enum\"\"\"\n\n def _python_type_to_json_type(python_type: Type[Union[str, int, float, bool, list, dict]]) -> str:\n return {\n str: TYPE_STRING,\n int: TYPE_INTEGER,\n float: TYPE_NUMBER,\n bool: TYPE_BOOLEAN,\n list: TYPE_ARRAY,\n dict: TYPE_OBJECT,\n }.get(python_type, TYPE_STRING)\n\n def _enum_type(enum_values: List[SchemaNode]) -> str:\n enum_type_names = [\n _python_type_to_json_type(python_type_name)\n for python_type_name in set(type(v.literal) for v in enum_values)\n ]\n if enum_type_names:\n return f\"{TYPE_ENUM} (of {' or '.join(enum_type_names)})\"\n\n return TYPE_ENUM\n\n def _add_subtype_if_array(type_name: str):\n if type_name == TYPE_ARRAY:\n items = schema_node.keywords.get(ITEMS, None)\n if not items:\n return type_name\n\n subtype = items.keywords.get(TYPE)\n if subtype:\n subtype = subtype.literal\n if TYPE_ENUM in items.keywords:\n subtype = _enum_type(items.keywords[TYPE_ENUM].array_items)\n\n if not subtype:\n # Too complex to guess items\n return type_name\n\n type_name = f\"{type_name} of {subtype}\"\n\n return type_name\n\n if TYPE_CONST in schema_node.keywords:\n return TYPE_CONST\n if TYPE_ENUM in schema_node.keywords:\n return _enum_type(schema_node.keywords[TYPE_ENUM].array_items)\n\n type_node = schema_node.keywords.get(TYPE)\n if type_node:\n if type_node.array_items:\n type_names = [node.literal for node in type_node.array_items]\n else:\n type_names = [type_node.literal]\n else:\n return None\n\n type_names = [_add_subtype_if_array(type_name) for type_name in type_names]\n\n return \", \".join(type_names[:-1]) + (\" or \" if len(type_names) > 1 else \"\") + type_names[-1]\n\n\ndef _get_description(schema_node: SchemaNode) -> str:\n description = \"\"\n description_node = schema_node.keywords.get(DESCRIPTION)\n if description_node:\n description = description_node.literal\n\n seen = set()\n current_node = schema_node\n while not description and current_node.refers_to:\n if current_node in seen:\n break\n seen.add(current_node)\n referenced_schema = current_node.refers_to\n referenced_description_node = referenced_schema.keywords.get(DESCRIPTION)\n if referenced_description_node:\n description = referenced_description_node.literal\n current_node = referenced_schema\n\n return description\n\n\ndef get_description(schema_node: SchemaNode) -> str:\n \"\"\"Filter. Get the description of a property or an empty string\"\"\"\n return _get_description(schema_node)\n\n\ndef get_description_remove_default(schema_node: SchemaNode) -> str:\n \"\"\"Filter. From the description attribute of a property, return the description without any default values in it.\n Will also convert None to an empty string.\n \"\"\"\n description = _get_description(schema_node)\n if not description:\n return \"\"\n\n match = re.match(DEFAULT_PATTERN, description)\n if not match:\n return description\n\n return description[match.span(1)[1] :].lstrip()\n\n\ndef get_default(schema_node: SchemaNode) -> str:\n \"\"\"Filter. Return the default value for a property\"\"\"\n return schema_node.default_value\n\n\ndef get_default_look_in_description(schema_node: SchemaNode) -> str:\n \"\"\"Filter. Get the default value of a JSON Schema property. If not set, look for it in the description.\"\"\"\n default_value = schema_node.default_value\n if default_value:\n return default_value\n\n description = schema_node.keywords.get(DESCRIPTION)\n if not description:\n return \"\"\n description = description.literal\n\n match = re.match(DEFAULT_PATTERN, description)\n if not match:\n return \"\"\n\n return match.group(2)\n\n\ndef get_numeric_restrictions_text(schema_node: SchemaNode, before_value: str = \"\", after_value: str = \"\") -> str:\n \"\"\"Filter. Get the text to display about restrictions on a numeric type(integer or number)\"\"\"\n multiple_of = schema_node.keywords.get(MULTIPLE_OF)\n if multiple_of:\n multiple_of = multiple_of.literal\n maximum = schema_node.keywords.get(MAXIMUM)\n if maximum:\n maximum = maximum.literal\n exclusive_maximum = schema_node.keywords.get(EXCLUSIVE_MAXIMUM)\n if exclusive_maximum:\n exclusive_maximum = exclusive_maximum.literal\n minimum = schema_node.keywords.get(MINIMUM)\n if minimum:\n minimum = minimum.literal\n exclusive_minimum = schema_node.keywords.get(EXCLUSIVE_MINIMUM)\n if exclusive_minimum:\n exclusive_minimum = exclusive_minimum.literal\n\n # Fix minimum and exclusive_minimum both there\n if minimum is not None and exclusive_minimum is not None:\n if minimum <= exclusive_minimum:\n exclusive_minimum = None\n else:\n minimum = None\n\n minimum_fragment = \"\"\n if minimum is not None:\n minimum_fragment += f\"greater or equal to {before_value}{minimum}{after_value}\"\n if exclusive_minimum is not None:\n minimum_fragment += f\"strictly greater than {before_value}{exclusive_minimum}{after_value}\"\n\n # Fix maximum and exclusive_maximum both there\n if maximum is not None and exclusive_maximum is not None:\n if maximum > exclusive_maximum:\n exclusive_maximum = None\n else:\n maximum = None\n\n maximum_fragment = \"\"\n if maximum is not None:\n maximum_fragment += f\"lesser or equal to {before_value}{maximum}{after_value}\"\n if exclusive_maximum is not None:\n maximum_fragment += f\"strictly lesser than {before_value}{exclusive_maximum}{after_value}\"\n\n result = \"Value must be \"\n touched = False\n if minimum_fragment:\n touched = True\n result += minimum_fragment\n if maximum_fragment:\n if touched:\n result += \" and \"\n touched = True\n result += maximum_fragment\n if multiple_of:\n if touched:\n result += \" and \"\n result += f\"a multiple of {before_value}{multiple_of}{after_value}\"\n\n return result if touched else \"\"\n\n\ndef escape_property_name_for_id(property_name: str) -> str:\n \"\"\"Filter. Escape unsafe characters in a property name so that it can be used in a HTML id\"\"\"\n\n escaped = re.sub(\"[^0-9a-zA-Z_-]\", \"_\", str(property_name))\n if not escaped[0].isalpha():\n escaped = \"a\" + escaped\n return escaped\n\n\ndef highlight_json_example(example_text: str) -> str:\n \"\"\"Filter. Return an highlighted version of the provided JSON text\"\"\"\n return highlight(example_text, JavascriptLexer(), HtmlFormatter())\n\n\ndef get_local_time() -> str:\n return datetime.now(tz=reference.LocalTimezone()).strftime(\"%Y-%m-%d at %H:%M:%S %z\")\n\n\ndef generate_from_schema(\n schema_file: Union[str, Path, TextIO],\n loaded_schemas: Optional[Dict[str, Any]] = None,\n minify: bool = True,\n deprecated_from_description: bool = False,\n default_from_description: bool = False,\n expand_buttons: bool = False,\n link_to_reused_ref: bool = True,\n config: GenerationConfiguration = None,\n) -> str:\n config = config or _get_final_config(\n minify=minify,\n deprecated_from_description=deprecated_from_description,\n default_from_description=default_from_description,\n expand_buttons=expand_buttons,\n copy_css=False,\n copy_js=False,\n link_to_reused_ref=link_to_reused_ref,\n )\n\n templates_directory = os.path.join(config.templates_directory, config.template_name)\n base_template_path = os.path.join(templates_directory, TEMPLATE_FILE_NAME)\n\n md = markdown2.Markdown(extras=config.markdown_options)\n loader = FileSystemLoader(templates_directory)\n env = jinja2.Environment(loader=loader)\n env.filters[\"markdown\"] = (\n lambda text: jinja2.Markup(md.convert(text)) if config.description_is_markdown else lambda t: t\n )\n env.filters[\"python_to_json\"] = python_to_json\n env.filters[\"get_default\"] = get_default_look_in_description if config.default_from_description else get_default\n env.filters[\"get_type_name\"] = get_type_name\n env.filters[\"get_description\"] = (\n get_description_remove_default if config.default_from_description else get_description\n )\n env.filters[\"get_numeric_restrictions_text\"] = get_numeric_restrictions_text\n env.filters[\"get_required_properties\"] = get_required_properties\n env.filters[\"get_undocumented_required_properties\"] = get_undocumented_required_properties\n env.filters[\"highlight_json_example\"] = highlight_json_example\n env.tests[\"combining\"] = is_combining\n env.tests[\"description_short\"] = is_text_short\n env.tests[\"deprecated\"] = is_deprecated_look_in_description if config.deprecated_from_description else is_deprecated\n env.globals[\"get_local_time\"] = get_local_time\n\n with open(base_template_path, \"r\") as template_fp:\n template = env.from_string(template_fp.read())\n\n if isinstance(schema_file, list):\n # Backward compatibility\n schema_file = os.path.sep.join(schema_file)\n\n intermediate_schema = build_intermediate_representation(schema_file, config, loaded_schemas)\n\n rendered = template.render(schema=intermediate_schema, config=config)\n\n if minify:\n rendered = htmlmin.minify(rendered)\n\n return rendered\n\n\ndef generate_from_filename(\n schema_file_name: Union[str, Path],\n result_file_name: str,\n minify: bool = True,\n deprecated_from_description: bool = False,\n default_from_description: bool = False,\n expand_buttons: bool = False,\n copy_css: bool = True,\n copy_js: bool = True,\n link_to_reused_ref: bool = True,\n config: GenerationConfiguration = None,\n) -> None:\n \"\"\"Generate the schema documentation from a filename\"\"\"\n config = config or _get_final_config(\n minify=minify,\n deprecated_from_description=deprecated_from_description,\n default_from_description=default_from_description,\n expand_buttons=expand_buttons,\n copy_css=copy_css,\n copy_js=copy_js,\n link_to_reused_ref=link_to_reused_ref,\n )\n\n if isinstance(schema_file_name, str):\n schema_file_name = os.path.realpath(schema_file_name)\n elif isinstance(schema_file_name, Path):\n schema_file_name = str(schema_file_name.resolve())\n\n rendered_schema_doc = generate_from_schema(\n schema_file_name,\n minify=minify,\n deprecated_from_description=deprecated_from_description,\n default_from_description=default_from_description,\n expand_buttons=expand_buttons,\n link_to_reused_ref=link_to_reused_ref,\n config=config,\n )\n\n copy_css_and_js_to_target(result_file_name, config)\n\n with open(result_file_name, \"w\", encoding=\"utf-8\") as result_schema_doc:\n result_schema_doc.write(rendered_schema_doc)\n\n\ndef generate_from_file_object(\n schema_file: TextIO,\n result_file: TextIO,\n minify: bool = True,\n deprecated_from_description: bool = False,\n default_from_description: bool = False,\n expand_buttons: bool = False,\n copy_css: bool = True,\n copy_js: bool = True,\n link_to_reused_ref: bool = True,\n config: GenerationConfiguration = None,\n) -> None:\n \"\"\"Generate the JSON schema documentation from opened file objects for both input and output files. The\n result_file should be opened in write mode.\n \"\"\"\n config = config or _get_final_config(\n minify=minify,\n deprecated_from_description=deprecated_from_description,\n default_from_description=default_from_description,\n expand_buttons=expand_buttons,\n copy_css=copy_css,\n copy_js=copy_js,\n link_to_reused_ref=link_to_reused_ref,\n )\n\n result = generate_from_schema(schema_file, config=config)\n\n copy_css_and_js_to_target(result_file.name, config)\n\n result_file.write(result)\n\n\ndef copy_css_and_js_to_target(result_file_path: str, config: GenerationConfiguration) -> None:\n \"\"\"Copy the CSS and JS files needed to display the resulting page to the directory containing the result file\"\"\"\n files_to_copy = []\n if config.copy_css:\n files_to_copy.append(CSS_FILE_NAME)\n if config.copy_js:\n files_to_copy.append(JS_FILE_NAME)\n if not files_to_copy:\n return\n\n target_directory = os.path.dirname(result_file_path)\n source_directory = os.path.join(config.templates_directory, config.template_name)\n if target_directory == source_directory:\n return\n\n for file_to_copy in files_to_copy:\n source_file_path = os.path.join(source_directory, file_to_copy)\n if not os.path.exists(source_file_path):\n continue\n try:\n shutil.copy(source_file_path, os.path.join(target_directory, file_to_copy))\n except shutil.SameFileError:\n print(f\"Not copying {file_to_copy} to {os.path.abspath(target_directory)}, file already exists\")\n\n\ndef _get_final_config(\n minify: bool,\n deprecated_from_description: bool,\n default_from_description: bool,\n expand_buttons: bool,\n copy_css: bool,\n copy_js: bool,\n link_to_reused_ref: bool,\n config: Union[str, Path, TextIO, Dict[str, Any], GenerationConfiguration] = None,\n config_parameters: List[str] = None,\n) -> GenerationConfiguration:\n if config:\n final_config = _load_config(config)\n else:\n final_config = GenerationConfiguration(\n minify=minify,\n deprecated_from_description=deprecated_from_description,\n default_from_description=default_from_description,\n expand_buttons=expand_buttons,\n link_to_reused_ref=link_to_reused_ref,\n copy_css=copy_css,\n copy_js=copy_js,\n )\n if (\n not minify\n or deprecated_from_description\n or default_from_description\n or expand_buttons\n or not link_to_reused_ref\n ):\n logging.info(CONFIG_DEPRECATION_MESSAGE)\n\n if config_parameters:\n final_config = _apply_config_cli_parameters(final_config, config_parameters)\n\n return final_config\n\n\ndef _load_config(\n config_parameter: Optional[Union[str, Path, TextIO, Dict[str, Any], GenerationConfiguration]]\n) -> GenerationConfiguration:\n \"\"\"Load the configuration from either the path (as str or Path) to a config file, the open config file object,\n The loaded config as a dict or the GenerateConfiguration object directly.\n \"\"\"\n if config_parameter is None:\n return GenerationConfiguration()\n\n if isinstance(config_parameter, GenerationConfiguration):\n return config_parameter\n\n if isinstance(config_parameter, dict):\n config_dict = config_parameter\n elif isinstance(config_parameter, (str, Path)):\n if isinstance(config_parameter, str):\n real_path = os.path.realpath(config_parameter)\n else:\n real_path = str(config_parameter.resolve())\n with open(os.path.realpath(real_path), encoding=\"utf-8\") as config_fp:\n config_dict = yaml.safe_load(config_fp.read())\n else:\n config_dict = yaml.safe_load(config_parameter.read())\n\n return GenerationConfiguration.from_dict(config_dict)\n\n\ndef _apply_config_cli_parameters(\n current_configuration: GenerationConfiguration, config_cli_parameters: List[str]\n) -> GenerationConfiguration:\n if not config_cli_parameters:\n return current_configuration\n\n current_configuration_as_dict = current_configuration.to_dict()\n for parameter in config_cli_parameters:\n if \"=\" in parameter:\n parameter_name, parameter_value = parameter.split(\"=\")\n try:\n parameter_value = json.loads(parameter_value)\n except JSONDecodeError:\n pass\n else:\n parameter_name = parameter\n if parameter_name.startswith(\"no_\") or parameter_name.startswith(\"no-\"):\n parameter_value = False\n parameter_name = parameter_name[3:] # Strip the `no_`/`no-`\n else:\n parameter_value = True\n current_configuration_as_dict[parameter_name] = parameter_value\n\n return GenerationConfiguration.from_dict(current_configuration_as_dict)\n\n\n@click.command()\n@click.argument(\"schema_file\", nargs=1, type=click.File(\"r\", encoding=\"utf-8\"))\n@click.argument(\"result_file\", nargs=1, type=click.File(\"w+\", encoding=\"utf-8\"), default=\"schema_doc.html\")\n@click.option(\n \"--config-file\", type=click.File(\"r\", encoding=\"utf-8\"), help=\"JSON or YAML file containing generation parameters\"\n)\n@click.option(\n \"--config\",\n multiple=True,\n help=\"Override generation parameters from the configuration file. \"\n \"Format is parameter_name=parameter_value. For example: --config minify=false. Can be repeated.\",\n)\n@click.option(\"--minify/--no-minify\", default=True, help=\"Run minification on the HTML result\")\n@click.option(\n \"--deprecated-from-description\", is_flag=True, help=\"Look in the description to find if an attribute is deprecated\"\n)\n@click.option(\n \"--default-from-description\", is_flag=True, help=\"Look in the description to find an attribute default value\"\n)\n@click.option(\"--expand-buttons\", is_flag=True, help=\"Add 'Expand all' and 'Collapse all' buttons at the top\")\n@click.option(\"--copy-css/--no-copy-css\", default=True, help=f\"Copy {CSS_FILE_NAME} to the folder of the result_file\")\n@click.option(\"--copy-js/--no-copy-js\", default=True, help=f\"Copy {JS_FILE_NAME} to the folder of the result_file\")\n@click.option(\n \"--link-to-reused-ref/--no-link-to-reused-ref\",\n default=True,\n help=\"If set and 2 parts of the schema refer to the same definition, the definition will only be rendered once \"\n \"and all other references will be replaced by a link.\",\n)\ndef main(\n schema_file: TextIO,\n result_file: TextIO,\n config_file: TextIO,\n config: List[str],\n minify: bool,\n deprecated_from_description: bool,\n default_from_description: bool,\n expand_buttons: bool,\n copy_css: bool,\n copy_js: bool,\n link_to_reused_ref: bool,\n) -> None:\n start = datetime.now()\n config = _get_final_config(\n minify=minify,\n deprecated_from_description=deprecated_from_description,\n default_from_description=default_from_description,\n expand_buttons=expand_buttons,\n copy_css=copy_css,\n copy_js=copy_js,\n link_to_reused_ref=link_to_reused_ref,\n config=config_file,\n config_parameters=config,\n )\n\n generate_from_file_object(schema_file, result_file, config=config)\n duration = datetime.now() - start\n print(f\"Generated {result_file.name} in {duration}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"json_schema_for_humans/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":59019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"203835243","text":"from flask import Flask, app, jsonify, request\r\n\r\napp=Flask(__name__)\r\n\r\n\r\ncontacts= [\r\n {\r\n \"Contact\": \"860508918\",\r\n \"id\":1\r\n },\r\n {\r\n \"Contact\": \"869008918\",\r\n \"id\":2\r\n },\r\n {\r\n \"Contact\": \"98765421\",\r\n \"id\":3\r\n },\r\n {\r\n \"Contact\": \"1391293\",\r\n \"id\":4\r\n },\r\n {\r\n \"Contact\": \"123456789\",\r\n \"id\":5\r\n },\r\n {\r\n \"Contact\": \"987654\",\r\n \"id\":6\r\n },\r\n {\r\n \"Contact\": \"987654\",\r\n \"id\":7\r\n },\r\n]\r\n\r\n@ app.route('/')\r\ndef home():\r\n return \"Welcome to the home page. Let's take a look at some contacts.\"\r\n\r\n@app.route('/sendinfo', methods=['POST'])\r\ndef sendinfo():\r\n if not request.json:\r\n return jsonify({'status':'Error', 'Message':'Invalid Data'}, 400)\r\n temp = {\r\n 'id': contacts[-1]['id'] + 1,\r\n 'Contact': request.json['Contact'],\r\n }\r\n contacts.append(temp)\r\n return jsonify({'status':'Success', 'Message':'Data Added'}, 201)\r\n\r\n@app.route('/getinfo', methods=['GET'])\r\ndef getinfo():\r\n return jsonify({'data': contacts})\r\n\r\n\r\n\r\n#If the main process is running, then it will run the app.\r\nif __name__ == '__main__':\r\n app.run(debug=True)","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207971987","text":"from gensim.models import word2vec\n\nmodel = word2vec.Word2Vec.load(\"./chapter10/word_vec.bin\")\nwith open(\"./chapter10/knock94_out.txt\", \"w\") as f:\n for line in open(\"./chapter10/combined.tab\", \"r\").readlines():\n words = line.strip().split()\n try:\n sim = model.similarity(words[0], words[1])\n except:\n sim = 0\n print(line.strip()+\"\\t\"+str(sim), file=f)\n","sub_path":"wanghongfei/chapter10/knock94.py","file_name":"knock94.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"220489785","text":"from tensorflow import keras\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport cv2\r\nimport os\r\n\r\ndef preprocess_image(image):\r\n image = np.array(image)\r\n img_reshaped = tf.reshape(image, [1, image.shape[0], image.shape[1], image.shape[2]])\r\n image = tf.image.convert_image_dtype(img_reshaped, tf.float32, name=\"images\") \r\n return image\r\n\r\ndef mask_detect(mask_points, net, frame):\r\n wearing_mask = \"unknown\"\r\n \r\n (x, y, w, h) = mask_points #Retrieve mask points\r\n roi_face = frame[int(y - 2 * w / 5):int(y + 3 * w / 5), x:(x + w)] #Cropping face\r\n\r\n HEIGHT, WIDTH = roi_face.shape[:2]\r\n if HEIGHT >= 32 and WIDTH >= 32: #Only detecting face images 32X32 or larger\r\n save_loc = os.path.join(\"./mask_images\", '_head_shot.jpg')\r\n cv2.imwrite(save_loc, roi_face)\r\n\r\n pic = cv2.imread(save_loc)\r\n pic = cv2.cvtColor(pic,cv2.COLOR_BGR2RGB)\r\n pic = cv2.resize(pic,(128,128))\r\n\r\n pic = preprocess_image(pic)\r\n prediction = np.argmax(net.predict(pic))\r\n\r\n if prediction == 0:\r\n wearing_mask = \"nomask\"\r\n elif prediction == 1:\r\n wearing_mask = \"mask\"\r\n else:\r\n wearing_mask = \"unknown\"\r\n \r\n\r\n return wearing_mask","sub_path":"Detection/mask_detection.py","file_name":"mask_detection.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"331925376","text":"# Dependancies\nimport os\nimport csv\n\nbudgetFileInput = input(\"What is the name of your file? (include extension) : \")\n\n# Define file path\ncsvPath = os.path.join('raw_data', budgetFileInput)\n#csvPath = os.path.join('raw_data', 'budget_data_2.csv')\n\n# Open file\nwith open(csvPath, newline = '') as csvFile:\n\n# Define reader\n csvReader = csv.reader(csvFile, delimiter = ',')\n#Skip the header\n next(csvReader, None)\n\n# Define variables\n totalMonths = 0\n totalRevenue = 0\n priorRevenue = 0\n monthChange = 0\n revenueChange = 0\n avgRevenue = 0\n highChange = 0\n highDate = ''\n lowChange = 0\n lowDate = ''\n\n# Start looping through the data\n for budgetData in csvReader:\n\n # Set up variable on first loop\n if totalMonths == 0:\n priorRevenue = int(budgetData[1])\n highChange = int(budgetData[1])\n highDate = budgetData[0]\n lowChange = int(budgetData[1])\n lowDate = budgetData[0]\n\n \n\n # Calc month change then test for highs and lows\n monthChange = int(budgetData[1]) - priorRevenue\n\n if monthChange > highChange:\n highChange = monthChange\n highDate = budgetData[0]\n\n if monthChange < lowChange:\n lowChange = monthChange\n lowDate = budgetData[0]\n \n # aggregate revenue and revnue changes\n totalRevenue = totalRevenue + int(budgetData[1])\n revenueChange = revenueChange + monthChange\n \n # reset priorRevenue and increment month counter\n priorRevenue = int(budgetData[1])\n totalMonths += 1 \n\n\n # Calculate average revenue\n avgRevenue = int(revenueChange/totalMonths)\n\n # Print to terminal\n print(\" \")\n print(\"----------------------------------------------------\")\n print(\" Financial Analysis \")\n print(\"----------------------------------------------------\")\n print(f\"Total Months: {totalMonths}\")\n print(f\"Total Revenue: ${totalRevenue} \")\n print(f\"Average Revenue Change: ${avgRevenue}\")\n print(f\"Greatest Increase in Revenue: {highDate} - ${highChange}\")\n print(f\"Greatest Decrease in Revenue: {lowDate} - ${lowChange}\")\n\n\n # Setup Output file name\n parseFileName = budgetFileInput.split(\".\")\n budgetFileOutput = parseFileName[0] + \".txt\"\n #print(budgetFileOutput)\n\n\n # Write to file\n file = open(budgetFileOutput,\"w\") \n file.write(\"\\n \")\n file.write(\"\\n----------------------------------------------------\")\n file.write(\"\\n Financial Analysis \")\n file.write(\"\\n----------------------------------------------------\")\n file.write(f\"\\nTotal Months: {totalMonths}\")\n file.write(f\"\\nTotal Revenue: ${totalRevenue} \")\n file.write(f\"\\nAverage Revenue Change: ${avgRevenue}\")\n file.write(f\"\\nGreatest Increase in Revenue: {highDate} - ${highChange}\")\n file.write(f\"\\nGreatest Decrease in Revenue: {lowDate} - ${lowChange}\")\n \n file.close() ","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"401262473","text":"class User:\r\n def __init__(self, first, last, age, gender=\"male\"):\r\n self.first_name = first\r\n self.last_name = last\r\n self.age = age\r\n self.gender = gender\r\n\r\n def profile(self):\r\n profile = \"First name: \" + self.first_name.title() + \"\\nLast name: \" + self.last_name.title() + \"\\nGender: \" + self.gender + \\\r\n '\\nAge: ' + str(self.age) + \"\\n\"\r\n return profile\r\n\r\n def greet_user(self):\r\n print(\"Hello \" + self.first_name.title() + \" \" + self.last_name.title() + \" !\")\r\n\r\n\r\nuser1 = User('chou', 'jay', 30)\r\nuser2 = User('helen', 'keller', 25, 'female')\r\nuser3 = User('he', 'yu', 20)\r\n\r\nuser1.greet_user()\r\nuser2.greet_user()\r\nuser3.greet_user()\r\nprint(\"\\n\")\r\nprint(user1.profile())\r\nprint(user2.profile())\r\nprint(user3.profile())","sub_path":"9-3 用户.py","file_name":"9-3 用户.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"131459395","text":"# 3. Реализовать функцию my_func(), которая принимает три позиционных аргумента, и возвращает сумму\n# наибольших двух аргументов.\ndef my_func(number1: float, number2: float, number3: float) -> float:\n result = [ number1, number2, number3 ]\n result.sort()\n print(result[1]+result[2])\n\nnumber1 = float(input('Enter the first number: '))\nnumber2 = float(input('Enter the second number: '))\nnumber3 = float(input('Enter the third number: '))\n\nmy_func(number1, number2, number3)","sub_path":"task3_les3.py","file_name":"task3_les3.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"80099364","text":"from django.shortcuts import render\r\nfrom studapp.models import Student\r\nfrom django.http import HttpResponse\r\n\r\n# Create your views here.\r\n\r\ndef display_student_info(request):\r\n qs=Student.objects.all()\r\n\r\n #context = {'qs':qs}\r\n print(\"Inside Views Display all Method\")\r\n #return render(request, 'studapp/studresults.html',context)\r\n return render(request, 'studapp/stdresultsbase.html',{'qs':qs})\r\n\r\n\r\ndef delete(request):\r\n\r\n sid1=int(request.GET[\"sid\"])\r\n #qs=Student.objects.all().delete()[0]\r\n\r\n \r\n qs = Student.objects.filter(sid=sid1).delete()\r\n\r\n srec = Student.objects.get(sid=sid1)\r\n \r\n sname=request.GET[\"sname\"]\r\n sm1=int(request.GET[\"sm1\"])\r\n sm2=int(request.GET[\"sm2\"])\r\n sm3=int(request.GET[\"sm3\"])\r\n\r\n srec.sname = sname\r\n srec.sm1 = sm1\r\n srec.sm2 = sm2\r\n srec.sm3 = sm3\r\n srec.tot = sm1+sm2+sm3\r\n\r\n srec.save()\r\n\r\n\r\n\r\n html = \"Student record is deleted \"\r\n return HttpResponse(html)\r\n\r\ndef create_student_info(request):\r\n sid=int(request.GET[\"sid\"])\r\n sname=request.GET[\"sname\"]\r\n sm1=int(request.GET[\"sm1\"])\r\n sm2=int(request.GET[\"sm2\"])\r\n sm3=int(request.GET[\"sm3\"])\r\n tot=sm1+sm2+sm3\r\n s=Student(sid,sname,sm1,sm2,sm3,tot)\r\n s.save()\r\n print(\"Inside Views Create Method\")\r\n html = \"Student record is saved \"\r\n return HttpResponse(html)\r\n \r\n\"\"\"\r\n\r\ndef add_student_info(request):\r\n qs=Student.student.getorcreate()\r\n return render(response, \"studapp/studresults.html\",{\"qs\":qs})\r\n\"\"\"\r\n","sub_path":"studapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"115139574","text":"#!/usr/bin/env python\n\n\"\"\"\nEntry point for python server.\n\"\"\"\nimport sys\nimport logging\nimport argparse\n\n\nfrom app import app_config\nfrom app.server import initialize_app\nfrom app.logging_config import set_log_level\n\nlog = logging.getLogger(__name__)\n\nAPPLICATION_MODES = {\n 'prod': app_config.Config,\n 'dev': app_config.DevelopmentConfig,\n 'test': app_config.TestingConfig\n}\n\n\ndef setup_argparser():\n parser = argparse.ArgumentParser(prog='Python-Server', description='Python Server Component')\n parser.add_argument(\n '--mode', choices=APPLICATION_MODES.keys(), default='prod', help='Application run mode'\n )\n return parser\n\n\ndef main():\n \"\"\"\n Run the flask app\n \"\"\"\n parser = setup_argparser()\n args = parser.parse_args()\n config = APPLICATION_MODES[args.mode]\n\n set_log_level(args.mode)\n flask_app = initialize_app(config)\n\n if args.mode == 'prod':\n use_reloader = False\n else:\n use_reloader = True\n\n flask_app.run(debug=config.FLASK_DEBUG, host=config.SERVER_HOST, port=config.SERVER_PORT, use_reloader=use_reloader)\n return 0\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"265134023","text":"from flask import render_template, flash, redirect, request, url_for, g, session\nfrom micro import app, db, lm\nfrom .forms import LoginForm, RegForm, EditForm, PostForm\nfrom .models import User, Post\nfrom passlib.hash import sha256_crypt\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom datetime import datetime\nfrom config import POSTS_PER_PAGE\n\n# searching to validate user exists u = models.User.query.filter_by(nickname='Arron')\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index/', methods=['GET', 'POST'])\n@app.route('/index/', methods=['GET', 'POST'])\n@login_required\ndef index(page=1):\n \"\"\" Checks if user is logged in and if not is kicked to the sign in page \"\"\"\n user = g.user\n form = PostForm(request.form)\n if form.validate_on_submit():\n post = Post(body=form.post.data, timestamp=datetime.utcnow(), author=g.user)\n db.session.add(post)\n db.session.commit()\n return redirect(url_for('index'))\n\n posts = g.user.followed_posts().paginate(page, POSTS_PER_PAGE, False)\n\n # return the index template and pass the template the keword arguments\n return render_template('index.html', title='Home',form=form, user=user, posts=posts)\n\n# Flask-Login user loader\n@lm.user_loader\ndef load_user(id):\n \"\"\"Required function for Flask-Login to retrieve the user from the database\"\"\"\n return User.query.get(int(id))\n\n@app.before_request\ndef before_request():\n \"\"\"Assigns the current_user global from Flask-Login to Flask's g global object\"\"\"\n g.user = current_user\n if g.user.is_authenticated:\n g.user.last_seen = datetime.utcnow()\n db.session.add(g.user)\n db.session.commit()\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n \"\"\"Checks to see if user is already logged in and if so kicks them back to index\n if not the form will be validated and if the user checks out will be kicked over to index\"\"\"\n if g.user is not None and g.user.is_authenticated:\n return redirect(url_for('index'))\n\n form = LoginForm(request.form)\n if form.validate_on_submit():\n\n user = User.query.filter_by(nickname=form.username.data).first()\n remember_me = form.remember_me.data\n if user and sha256_crypt.verify(form.password.data, user.pwd):\n\n login_user(user, remember = remember_me)\n return redirect(request.args.get('next') or url_for('index'))\n else:\n if user:\n flash('password is incorrect')\n else:\n flash('Username is incorrect')\n return render_template('login.html', title='Sign In', form=form)\n\n return render_template('login.html', title='Sign In', form=form)\n\n@app.route('/logout/')\ndef logout():\n \"\"\"Uses Flask-Login's logout function to clear the user's session\"\"\"\n logout_user()\n return redirect(url_for('index'))\n\n@app.route('/register/', methods=['GET', 'POST'])\ndef register():\n \"\"\"Checks to see if the user is logged in and if so kicks them to index\n if not checks if the user or email already exists if they do not it loads them\n as users into the database. Then requests for a login\"\"\"\n if g.user is not None and g.user.is_authenticated:\n return redirect(url_for('index'))\n\n form = RegForm(request.form)\n\n if form.validate_on_submit():\n username = User.query.filter_by(nickname=form.username.data).first()\n address = User.query.filter_by(email=form.email.data).first()\n if username:\n flash('Username already exists.')\n elif address:\n flash('Email address already exists.')\n else:\n hashed = sha256_crypt.encrypt((form.password.data))\n new_user = User(nickname=form.username.data, email=form.email.data, pwd=hashed)\n db.session.add(new_user)\n db.session.commit()\n user = User.query.filter_by(nickname=form.username.data).first()\n login_user(user)\n db.session.add(user.follow(user))\n db.session.commit()\n return redirect(url_for('index'))\n return render_template('register.html', title='Register', form=form)\n\n else:\n return render_template('register.html', title='Register', form=form)\n\n@app.route('/user//')\n@app.route('/user///')\n@login_required\ndef user(nickname, page=1):\n \"\"\"This function takes in an object nickname to dynamically create a profile page.\n If the nickname does not exist the user is redirected to the index page otherwise\n the profile page is built\"\"\"\n user = User.query.filter_by(nickname=nickname).first()\n if user == None:\n flash('User %s not found.' % nickname)\n return redirect(url_for('index'))\n posts = user.sorted_posts().paginate(page, POSTS_PER_PAGE, False)\n return render_template('user.html', user=user, posts=posts, title=user.nickname)\n\n@app.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit():\n form = EditForm(request.form)\n\n if form.validate_on_submit():\n g.user.nickname = form.nickname.data\n g.user.about_me = form.about_me.data\n db.session.add(g.user)\n db.session.commit()\n return redirect(url_for('edit'))\n else:\n form.nickname.data = g.user.nickname\n form.about_me.data = g.user.about_me\n return render_template('edit.html', title=g.user.nickname, form=form)\n\n@app.route('/follow//')\n@login_required\ndef follow(nickname):\n user = User.query.filter_by(nickname=nickname).first()\n if user is None:\n flash('User %s not found' % nickname)\n return redirect(url_for('index'))\n if user == g.user:\n flash('You cannot follow yourself!')\n return redirect(url_for('user', nickname=nickname))\n u = g.user.follow(user)\n if u is None:\n flash('Cannot follow' + nickname + '.')\n return redirect(url_for('user', nickname=nickname))\n db.session.add(u)\n db.session.commit()\n flash('You are now following ' + nickname + '.')\n return redirect(url_for('user', nickname=nickname))\n\n@app.route('/unfollow//')\n@login_required\ndef unfollow(nickname):\n user = User.query.filter_by(nickname=nickname).first()\n if user is None:\n flash('User %s not found' % nickname)\n return redirect(url_for('index'))\n if user == g.user:\n flash('You cannot unfollow yourself!')\n return redirect(url_for('user', nickname=nickname))\n u = g.user.unfollow(user)\n if u is None:\n flash('Cannot unfollow' + nickname + '.')\n return redirect(url_for('user', nickname=nickname))\n db.session.add(u)\n db.session.commit()\n flash('You are no longer following ' + nickname + '.')\n return redirect(url_for('user', nickname=nickname))\n\n# error 404 handeling\n@app.errorhandler(404)\ndef page_not_found(errors):\n return render_template('404.html', title=g.user.nickname)\n\n@app.errorhandler(500)\ndef internal_error(error):\n \"\"\"This manages 500 errors. It use rollback for SQL management\"\"\"\n db.session.rollback()\n return render_template('500.html', title=g.user.nickname), 500\n","sub_path":"micro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482902115","text":"import os\nimport contextlib\n\nfrom termcolor import colored, cprint\n\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef print_block_message(message, colour=\"green\"):\n terminal_columns = 80\n\n def print_edge(is_top):\n if is_top:\n left_corner = u\"┏\"\n right_corner = u\"┓\"\n else:\n left_corner = u\"┗\"\n right_corner = u\"┛\"\n cprint(\n left_corner + u\"━\" * (terminal_columns-2) +\n right_corner, 'green', attrs=['bold'])\n\n def print_text():\n chunk_size = terminal_columns - 4\n string_chunks = [\n message[idx:idx + chunk_size] for\n idx in range(0, len(message), chunk_size)\n ]\n for string_chunk in string_chunks:\n line_string = \"┃ \" + string_chunk\n line_string += \" \" * (terminal_columns - len(line_string) - 1) + \"┃\"\n cprint(line_string, colour, attrs=[\"bold\"])\n\n print_edge(is_top=True)\n print_text()\n print_edge(is_top=False)\n\n\n@contextlib.contextmanager\ndef change_dir(dir_name):\n try:\n os.chdir(os.path.join(ROOT_DIR, dir_name))\n yield\n finally:\n os.chdir(ROOT_DIR)\n","sub_path":"tasks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"268451659","text":"#-*- coding : utf-8 -*-\n#导入模块\n#这是python模块\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nimport time , random , string , binascii , csv , json ,sys \n#下边是自己的模块\nimport read_config\n\n#这里是全局变量\ndriver = \"\" #这是浏览器对象\n\nprofile = \"\"#设置浏览器基本设置\n\nbaiduTitle = \"\"#设置百度标题,用于判断是否页面已经载入\n\n#这是初始化浏览器的办法\ndef setHttpsProxy(ip,port,useragent = \"\"):#参数 ip地址 端口 这是useragent\n global driver , profile#全局变量\n profile = webdriver.FirefoxProfile(read_config.value(\"firfox_default_file\"))#使用本地的firfox的配置文件\n #profile.set_preference('network.proxy.type', 1)#设置浏览器上完方式为手动\n if ip !='':\n profile.set_preference('network.proxy.ssl', ip)#这里设置代理ip\n profile.set_preference('network.proxy.ssl_port', port)#这是设置代理的端口\n if useragent != \"\" :#判断usergaent是否为空\n profile.set_preference('general.useragent.override', useragent)#这是里设置useragent\n profile.update_preferences()\n driver = webdriver.Firefox(profile)#设置浏览器\n driver.maximize_window()#浏览器最大化(这个可选)\n\n\ndef pageReader(times = 10, stoptimes = 3):#页面停留\n global driver#全局变量\n time.sleep(stoptimes*read_config.value(\"page_stop_time\"))#页面停留时间默认3分钟,因为百度会根据时间判断页面的重要性\n num = 180 #固定值是180根据百度搜多搜索页面高度1879\n for x in range(1,int(random.uniform(4,times))): #循环 设置循环表示要跳转几次. 上线 的话 可以多添加 几次\n time.sleep(3) #每次循环之后添加停留时间增加容错率\n num = abs(num + int(random.sample([180,-180],1)[0]))\n driver.execute_script(\"window.scrollBy(0,%s)\" % num,\"\") #最后了 开始 跳转 就酱\n\n\n\ndef baidu_list_page_reader():#这个页面 是百度 搜索 列表的阅读 页面 不知带 有用没 先 用用吧\n global driver#全局变量\n time.sleep(5) #先休息5秒防止出错\n num = 180 #固定值是180根据百度搜多搜索页面高度1879\n #先要 跳转到 页面 最下边\n for x in xrange(1,11):\n driver.execute_script(\"window.scrollBy(0,%s)\" %(x*num),\"\") #最后了 开始 跳转 就酱\n #之后开始 随机跳转\n pageReader(10,0.2)\n\n\ndef noClick():#这个办法主要是 放一些 排除 的网站,然后 其他 的网站 都要 点击\n for x in read_config.r_no_click():\n driver.execute_script(x,\"\")\n\ndef baiduNextPage():#这个方法主要实现的是跳转到百度下一页 的页面.时间 的话 可以 根据 电脑 适当 的调节\n global baiduTitle , driver\n time.sleep(5)#等待页面载入\n assert baiduTitle in driver.title#确定页面是百度搜索页面\n driver.implicitly_wait(10)#隐试等待\n driver.execute_script(\"window.scrollBy(0,document.body.scrollHeight)\",\"\")#滚动到最下边.没什么卵用\n driver.find_element_by_link_text(\"下一页>\").click()\n\n\n#下边是两套规则\ndef type1():#规则1 除了过滤器其他的随机点\n time.sleep(5) #时间停留5秒 增加系统容错率\n for x in range(read_config.value(\"baidupagenumber1\")):\n time.sleep(3)\n noClick()#过滤掉不要的链接\n content_list_num = driver.find_elements_by_tag_name(\"h3\")\n for x in content_list_num:\n try:\n if suiji():\n x.find_element_by_tag_name(\"a\").click()\n time.sleep(5)\n driver.switch_to_window(driver.window_handles[-1])\n pageReader()\n driver.close()\n driver.switch_to_window(driver.window_handles[0])\n time.sleep(5)\n except:\n pass\n baiduNextPage()\n\ndef type2(link):#规则2 点击特定的页面 其他的不点\n time.sleep(5) #时间停留5秒 增加系统容错率\n for x in range(read_config.value(\"baidupagenumber2\")):#这是翻页页数在配置文件设置\n try:\n driver.find_element_by_partial_link_text(link).click()\n time.sleep(5)\n driver.switch_to_window(driver.window_handles[-1])\n pageReader()\n driver.close()\n driver.switch_to_window(driver.window_handles[0])\n time.sleep(5)\n except:\n pass\n baiduNextPage()\n\n\ndef suiji(sum = 7):#提��随机数 直接返回布尔值 默认为70%\n if int(random.uniform(0,10))>man()>>except')\n return False\n\n\n\n\ndef main():#用于测试种种\n search_inf = {'ip':'','port':'','useragent':'','search_type':True,'click_link':'www.51yam.com','keyword':'富硒六味地黄丸'}\n request = man(search_inf)\n print(request)\n \n\nif __name__ == '__main__':\n sys.exit(int(main() or 0))\n","sub_path":"baidu_click/pc_search.py","file_name":"pc_search.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"348036508","text":"import numpy\r\nfrom random import randint\r\nfrom customized_class.csv_init import CSV_init\r\nfrom scipy.ndimage import filters,interpolation\r\nfrom scipy.misc import imresize\r\nimport scipy.spatial as spatialdist\r\nfrom random import randint\r\nX_train=numpy.load('traindata_3.npy')\r\ny=numpy.load('y_csv.npy')\r\n\r\ncounter=0\r\nfor img in X_train[0:len(X_train)]:\r\n # if y[counter]!=3:\r\n temp_img=numpy.reshape(img,(96,96,3))\r\n img2 = interpolation.rotate(temp_img, randint(-60, 0))\r\n img2 = img2[randint(0, 15):96 - randint(0, 10), randint(0, 15):96 - randint(0, 10)]\r\n img2 = imresize(img2, (96, 96, 3))\r\n img2=numpy.reshape(img2,(1,96*96*3))\r\n X_train=numpy.append(X_train,img2,axis=0)\r\n y=numpy.append(y,[y[counter]],axis=0)\r\n img2 = interpolation.rotate(temp_img, randint(0, 60))\r\n img2 = img2[randint(0, 15):96 - randint(0, 10), randint(0, 15):96 - randint(0, 10)]\r\n img2 = imresize(img2, (96, 96, 3))\r\n img2=numpy.reshape(img2,(1,96*96*3))\r\n X_train=numpy.append(X_train,img2,axis=0)\r\n y=numpy.append(y,[y[counter]],axis=0)\r\n counter += 1\r\n\r\n\r\nnumpy.save('X_3_augmented2',X_train)\r\nnumpy.save('y_3_augmented2',y)\r\n\r\n\r\n\r\n","sub_path":"face_rec/final_project_301/image_augmentation.py","file_name":"image_augmentation.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"302647608","text":"from urllib2 import urlopen\nfrom datetime import date, datetime\nfrom urllib import urlencode\n#from pylab import plot, show\nimport urllib2\nimport json\nfrom pprint import pprint\nfrom StringIO import StringIO\nfrom numpy import genfromtxt\n\ndef extract_station_data(code):\n url = \"http://data.hisparc.nl/api/station/%d/\"%(code)\n try:\n response = json.loads(urllib2.urlopen(url).read())\n except urllib2.HTTPError:\n return None\n else:\n return response\n\ndef get_stations():\n data = []\n url = 'http://data.hisparc.nl/api/stations'\n response = json.loads(urllib2.urlopen(url).read())\n for station in response:\n data.append(station['number'])\n return data\n\ndef get_events(station, start, end):\n url = 'http://data.hisparc.nl/data/'+str(station)+'/events'\n query = urlencode({'download': False, 'start': start,'end': end})\n full_url = url + '?' + query\n data = urlopen(full_url).read()\n format = [('date', 'datetime64[D]'), ('time', '|S8'),\n ('timestamp', 'uint32'), ('nanoseconds', 'uint32'),\n ('pulseheights', '4int16'), ('integrals', '4int32'),\n ('n1', 'float32'), ('n2', 'float32'),\n ('n3', 'float32'), ('n4', 'float32'),\n ('t1', 'float32'), ('t2', 'float32'),\n ('t3', 'float32'), ('t4', 'float32'),\n ('t_trigger', 'float32')]\n a = genfromtxt(StringIO(data), delimiter=\"\\t\", dtype=format)\n return a\n\ndef main():\n station_ids = [3] #get_stations()\n for code in station_ids:\n out = extract_station_data(code)\n if out is not None:\n pprint(out)\n out = get_events(code,datetime(2013, 7, 2, 11, 0),datetime(2013, 7, 2, 12, 0))\n print(out)\n\nif __name__ == '__main__':\n main()","sub_path":"station_data.py","file_name":"station_data.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"569970622","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 19 17:40:13 2021\r\n\r\n@author:Claudio Duran-Alarcon\r\n\"\"\"\r\n# Script to select and read ghcnm date by area and station code.\r\n\r\ndef select_stations(station_code = None, \r\n area = [-90,-180,-60,180], \r\n metadata_fname = \"ghcnm.tavg.v4.0.1.20210818.qcu.inv\",\r\n data_fname = \"ghcnm.tavg.v4.0.1.20210818.qcu.data\",\r\n output_list = 'ghcnm_selected_stations.csv',\r\n output_path = \"/data\"):\r\n\r\n import pandas as pd\r\n\r\n txt = open(metadata_fname)\r\n stns = txt.readlines()\r\n\r\n code = []\r\n latitude = []\r\n longitude = []\r\n elevation = []\r\n name = []\r\n\r\n for s in stns:\r\n line = s.split()\r\n if station_code == None:\r\n lat = float(line[1])\r\n lon = float(line[2])\r\n if (lat >= area[0]) & (lat <= area[2]) & (lon >= area[1]) & (lon <= area[3]): \r\n code.append(line[0])\r\n latitude.append(lat)\r\n longitude.append(lon)\r\n elevation.append(line[3])\r\n name.append(line[4])\r\n else:\r\n stn_code = line[0]\r\n if stn_code in station_code:\r\n code.append(stn_code)\r\n latitude.append(float(line[1]))\r\n longitude.append(float(line[2]))\r\n elevation.append(line[3])\r\n name.append(line[4]) \r\n\r\n dic = {\r\n 'Code' : code,\r\n 'Latitude' : latitude,\r\n 'Longitude' : longitude,\r\n 'Elevation' : elevation,\r\n 'Name' : name,\r\n }\r\n\r\n stations = pd.DataFrame(dic)\r\n\r\n stations.to_csv(output_list,index = False, columns = ['Code', 'Latitude', 'Longitude','Elevation','Name'])\r\n \r\n get_stations(output_list,data_fname = data_fname, path_out = output_path)\r\n \r\n return(stations)\r\n \r\ndef get_stations(selected_stns_fname,\r\n data_fname = \"ghcnm.tavg.v4.0.1.20210818.qcu.dat\",\r\n path_out = \"data/\"):\r\n \r\n #print(selected_stns_fname,data_fname,path_out)\r\n\r\n import pandas as pd\r\n import numpy as np\r\n import os\r\n\r\n if os.path.exists(path_out) == False: os.mkdir(path_out)\r\n \r\n stations = pd.read_csv(selected_stns_fname)\r\n\r\n txt = open(data_fname)\r\n lines = txt.readlines()\r\n station_codes = np.array([l[:11] for l in lines])\r\n\r\n for stn in range(np.size(stations['Code'])):\r\n \r\n fname_out = path_out+stations['Code'][stn]+'.csv'\r\n \r\n if os.path.isfile(fname_out) == False:\r\n \r\n pix = np.squeeze(np.where(station_codes == stations['Code'][stn]))\r\n\r\n nyears = np.size(pix)\r\n date = []\r\n T = []\r\n QC = []\r\n \r\n for l in np.array(lines)[pix]:\r\n for m in range(12):\r\n date.append(l[11:15]+'-'+str(m+1).zfill(2))\r\n T.append(float(l[19+(m)*8:19+5+(m)*8])) \r\n QC.append(l[26+(m)*8:26+1+(m)*8])\r\n T = np.array(T) \r\n T = np.ma.masked_where(T == -9999, T)/100.\r\n\r\n dic = {\r\n 'Date [YYYY-MM]' : date,\r\n 'Temperature [°C]' : T,\r\n 'QCFLAG' : QC,\r\n }\r\n\r\n data = pd.DataFrame(dic)\r\n\r\n data.to_csv(fname_out,index = False, columns = ['Date [YYYY-MM]', 'Temperature [°C]', 'QCFLAG'])\r\n \r\ndef read_date(fname):\r\n import pandas as pd\r\n\r\n df = pd.read_csv(fname, index_col=[0])\r\n df.index = pd.to_datetime(df.index, format=\"%Y-%m\")\r\n \r\n return(df)\r\n\r\nimport argparse, os\r\nimport numpy as np\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description=\"Tool to select and read ghcnm datasets by area OR station code\")\r\n parser.add_argument(\"-d\",\"--data_path\", metavar='\\b', type=str, help=\"Filepath to ghcnm data (ghcnm.tavg.vn.y.z.YYYMMDD.qcu.dat)\", required=True)\r\n parser.add_argument(\"-m\",\"--metadata_path\", metavar='\\b', type=str, help=\"Filepath to ghcnm metadata (ghcnm.tavg.vn.y.z.YYYMMDD.qcu.inv). If not given, --data_path with '.inv' extension is used as default\")\r\n parser.add_argument(\"-o\",\"--output_path\", metavar='\\b', type=str, help=\"Output filepath (default = 'ghcnm_out/')\", default = 'ghcnm_out/')\r\n parser.add_argument(\"-s\",\"--stations_list\", metavar='\\b', type=str, help=\"Name of the file containing the list selected stations (default = 'stations.csv')\", default = 'stations.csv')\r\n parser.add_argument(\"-c\",\"--codes\", metavar='\\b', type=str, nargs='+', help=\"List of station codes within the ghcnm dataset [code1 code2 code3 code4 ...]. When --codes is provides, --area is not used\")\r\n parser.add_argument(\"-a\",\"--area\", metavar='\\b', type=float, nargs='+', help=\"Geographic coordinates of the opposite vertices of a rectangle [minlat minlon maxlat maxlon]\", default = [9999,9999,9999,9999])\r\n args = parser.parse_args()\r\n\r\n missing_args = False\r\n\r\n if (9999 in args.area) and (args.codes == None): \r\n print( ' ')\r\n parser.error('No action requested, add --area or --codes')\r\n missing_args = True\r\n\r\n if missing_args == False:\r\n ### Retrieve stations\r\n ## Selected by station code OR area\r\n\r\n data_path_abs = os.path.abspath(os.path.normpath(args.data_path))\r\n \r\n if args.metadata_path != None:\r\n metadata_path_abs = os.path.abspath(os.path.normpath(args.metadata_path))\r\n else:\r\n metadata_path_abs = data_path_abs[:-3] + 'inv'\r\n\r\n if os.path.exists(os.path.normpath(args.output_path)) == False: os.mkdir(os.path.normpath(args.output_path))\r\n if os.path.exists(os.path.normpath(args.output_path) + '/stations/') == False: os.mkdir(os.path.normpath(args.output_path) + '/stations/')\r\n if os.path.exists(os.path.normpath(args.output_path) + '/stations/') == False: os.mkdir(os.path.normpath(args.output_path) + '/data/')\r\n\r\n os.chdir(os.path.normpath(args.output_path))\r\n\r\n stations = select_stations(\r\n station_code = args.codes, \r\n area = args.area, \r\n output_list = 'stations/' + args.stations_list,\r\n data_fname = data_path_abs,\r\n metadata_fname = metadata_path_abs,\r\n output_path = 'data/',\r\n ) \r\n print(\"###############################################################\")\r\n print(\" \")\r\n if np.size(stations.Code) == 1:\r\n print(np.size(stations.Code), \"station was found.\")\r\n else:\r\n print(np.size(stations.Code), \"stations were found.\")\r\n if np.size(stations.Code) > 0:\r\n print(\" \")\r\n print(stations)\r\n print(\" \")\r\n print(\"Output files were saved in \",\"'\" + os.path.abspath(os.getcwd())+ \"'\")\r\n print(\" \")\r\n print(\"###############################################################\")\r\n","sub_path":"ghcnm_search.py","file_name":"ghcnm_search.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"202061907","text":"import enchant\nimport sys\n\n#get the input name from the command line arguments\ninput_name = sys.argv[1]\ninput_name = input_name.lower()\n\n#load the list of name from the text file\nname_list = enchant.PyPWL(\"name_list.txt\")\n\n#check if the name exist in the text file\nname_exist = name_list.check(input_name)\n\nprint(\"Name exist: \", name_exist)\n\nif not name_exist:\n\t#get the correction/suggestion for the input word\n\tsuggestion = name_list.suggest(input_name)\n\n\tprint(\"Input: \", input_name)\n\tprint(\"Suggestion: \", suggestion)","sub_path":"spell_checker.py","file_name":"spell_checker.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"199134372","text":"\"\"\"runs job foreign key\n\nRevision ID: 1a2a9e299a4c\nRevises: 5a583b3c6089\nCreate Date: 2015-07-15 15:51:24.750456\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1a2a9e299a4c'\ndown_revision = '5a583b3c6089'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n \n ### Upgrade to version that contains job_id in Runs directly. ###\n\n op.add_column('test_runs', sa.Column('job_id', sa.Integer(), nullable=True), schema='jsil')\n op.create_foreign_key('test_runs_job_id_fkey', 'test_runs', 'test_jobs', ['job_id'], ['id'], source_schema='jsil', referent_schema='jsil')\n # Run an update to import values of job_id into new foregin key column.\n op.execute('update jsil.test_runs set job_id = jsil.test_batches.job_id from jsil.test_batches where jsil.test_batches.id = batch_id;')\n ### end Alembic commands ###\n\n\ndef downgrade():\n op.drop_constraint('test_runs_job_id_fkey', 'test_runs', schema='jsil', type_='foreignkey')\n op.drop_column('test_runs', 'job_id')\n ### end Alembic commands ###\n","sub_path":"testing/alembic/versions/1a2a9e299a4c_runs_job_foreign_key.py","file_name":"1a2a9e299a4c_runs_job_foreign_key.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166438046","text":"\"\"\" Reto semana 7\n Pablo Andres Mayorga\n Junio 18-2021 \"\"\"\n\n\n#importo el modulo de Funciones \nimport funciones as fun\n\n#pregunto la cantidad de turnos que se asignaran en el dia\ncantidad=int(input(\"Por favor ingrese la cantidad deturnos a asignar: \"))\n\n#envia la cantidad y llama a la funcion asignar turnos\nturno=fun.asignar_turno(cantidad)\n\nfun.datos_registros(turno)# Felicitaciones hemos llegado al final de este ciclo. Hasta pronto\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"259727433","text":"import wx\r\n\r\nclass ColorDialog(wx.Dialog):\r\n\r\n def __init__(self, mediator, parent, ID, title, size=wx.DefaultSize, pos=wx.DefaultPosition,\r\n style=wx.DEFAULT_DIALOG_STYLE):\r\n wx.Dialog.__init__(self,parent,ID, title, [200,200])\r\n \r\n self.mediator = mediator\r\n \r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n\r\n for addItem in range(mediator.size()):\r\n box = mediator.WidgetForItem(self, addItem)\r\n if box:\r\n sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)\r\n \r\n btnSizer = wx.StdDialogButtonSizer()\r\n okBtn = wx.Button(self, wx.ID_OK)\r\n btnSizer.AddButton(okBtn)\r\n cancelBtn = wx.Button(self, wx.ID_CANCEL)\r\n btnSizer.AddButton(cancelBtn)\r\n applyBtn = wx.Button(self, wx.ID_APPLY)\r\n applyBtn.SetDefault()\r\n btnSizer.AddButton(applyBtn)\r\n btnSizer.Realize()\r\n sizer.Add(btnSizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)\r\n \r\n #self.Bind(wx.EVT_BUTTON, self.OnApply, okBtn)\r\n self.Bind(wx.EVT_BUTTON, self.OnApply, applyBtn)\r\n \r\n self.SetSizer(sizer)\r\n sizer.Fit(self)\r\n \r\n def OnApply(self, event,):\r\n self.mediator.Apply(event)\r\n","sub_path":"wxVTK/src/ColorDialog.py","file_name":"ColorDialog.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156156253","text":"import bibtexparser\nimport requests\nimport sys\nimport urllib.parse as url\nimport gender_guesser.detector as gender\nfrom collections import Counter\nfrom nameparser import HumanName\n\ndef load_email():\n with open('EMAIL') as f:\n return url.quote(f.readline().strip())\n\ndef output_formatter(category, count):\n header = category.capitalize().replace('_', ' ').replace('Andy', 'Androgynous')\n return '{}: {}'.format(header, count)\n\ndef load_bibtex(filename):\n try:\n with open(filename) as bibtex_file:\n return bibtexparser.load(bibtex_file)\n except FileNotFoundError:\n sys.exit('ERROR: Bibtex file, {}, not found.'.format(filename))\n except KeyError:\n sys.exit('ERROR: Bibtex file appears to be incorrectly formatted.\\n'\n 'Check for missing brackets around dates or mismatched brackets.')\n\n\nrefs = load_bibtex(sys.argv[1])\nheaders = {'User-Agent': 'GenderCheck/0.1 (https://swdg.io; mailto:{})'.format(load_email())}\n\nd = gender.Detector(case_sensitive=False)\ngenders = []\n\nprint('\\n\\n\\tFound {} references.'.format(len(refs.entries)))\nprint('\\tReferences without a DOI cannot be processed.\\n\\n')\n\nfor ref in refs.entries:\n try:\n doi = url.quote(ref['doi'])\n query = 'https://api.crossref.org/works/{}'.format(doi)\n r = requests.get(query, headers=headers)\n try:\n for author in r.json()['message']['author']:\n name = author['given']\n name = HumanName(name)\n\n # Check for people with a first initial rather than a first name\n if name.is_an_initial(name.first):\n # These people may use a middle name as their given name\n if len(name.middle) > 1:\n genders.append(d.get_gender(name.middle))\n else:\n # Seems like a first name we can process\n genders.append(d.get_gender(name.first))\n\n except:\n # No author record - possibly citing an organisation, or invalid DOI\n pass\n\n except:\n # Call to Crossref has failed in a bad way\n pass\n\ncounts = Counter(genders)\ntotal_count = 0\n\nfor c in counts.items():\n print(output_formatter(*c))\n total_count += c[1]\n\nprint('Total authors processed: {}\\n\\n'.format(total_count))\n","sub_path":"gender-refs.py","file_name":"gender-refs.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544348102","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport keras\nimport matplotlib.pyplot as plt\n\n\n# # Reading the data\n\n# In[106]:\n\n\nd=pd.read_csv('Wine.csv',sep=',',encoding='latin')\n\n\n# In[107]:\n\n\nd.head()\n\n\n# In[108]:\n\n\nd.isnull().sum()\n\n\n# In[109]:\n\n\nt={1:0,2:1,3:2}\n\n\n# In[110]:\n\n\nd['Customer_Segment']=d['Customer_Segment'].map(t)\n\n\n# In[205]:\n\n\nx=d.drop(columns=['Customer_Segment'])\ny=d['Customer_Segment']\n\n\n# # Spliting the data\n\n# In[206]:\n\n\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=100)\n\n\n# In[207]:\n\n\nfrom sklearn.preprocessing import StandardScaler\ns=StandardScaler()\n\n\n# In[208]:\n\n\nx_train=s.fit_transform(x_train)\nx_test=s.fit_transform(x_test)\n\n\n# In[209]:\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n\n# In[210]:\n\n\nx_train.shape\n\n\n# In[211]:\n\n\nfrom keras.utils import np_utils\nfrom sklearn.preprocessing import OneHotEncoder\nct=OneHotEncoder()\n\n\n# In[212]:\n\n\ny_train=np_utils.to_categorical(y_train)\ny_test=np_utils.to_categorical(y_test)\n\n\n# In[213]:\n\n\ny_train\n\n\n# # Building the Model\n\n# In[214]:\n\n\nclf=Sequential()\nclf.add(Dense(output_dim=6,init='uniform',activation='relu',input_dim=13))\nclf.add(Dense(output_dim=6,init='uniform',activation='relu'))\nclf.add(Dense(output_dim=3,init='uniform',activation='sigmoid'))\n\n\n# In[215]:\n\n\nclf.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\nclf.fit(x_train,y_train,batch_size=10,nb_epoch=40)\n\n\n# In[216]:\n\n\nclf.summary()\n\n\n# In[223]:\n\n\ny_pred=clf.predict(x_test)\n\n\n# In[227]:\n\n\nl=[]\nfor i in range(0,len(y_test)):\n k=np.argmax(y_pred[i])\n l.append(k)\n \n\n\n# In[229]:\n\n\ny_pred=np.array(l)\n\n\n# # Using pytorch\n\n# In[132]:\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset,DataLoader\n\n\n# In[161]:\n\n\nx=d.drop(columns=['Customer_Segment']).values\ny=d['Customer_Segment'].values\n\n\n# In[162]:\n\n\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=100)\n\n\n# In[163]:\n\n\nx_train=s.fit_transform(x_train)\nx_test=s.fit_transform(x_test)\n\n\n# In[164]:\n\n\nx_train=torch.FloatTensor(x_train)\nx_test=torch.FloatTensor(x_test)\n\n\n# In[165]:\n\n\ny_train=torch.LongTensor(y_train)\ny_test=torch.LongTensor(y_test)\n\n\n# In[166]:\n\n\ntrainloader=DataLoader(x_train,batch_size=60,shuffle=True)\ntestloader=DataLoader(x_test,batch_size=60,shuffle=False)\n\n\n# In[195]:\n\n\nclass Model(nn.Module):\n def __init__(self,in_features=13,h1=10,h2=10,out_features=3):\n super().__init__()\n self.fc1=nn.Linear(in_features,h1)\n self.fc2=nn.Linear(h1,h2)\n self.out=nn.Linear(h2,out_features)\n \n def forward(self,x):\n x=F.relu(self.fc1(x))\n x=F.relu(self.fc2(x))\n x=self.out(x)\n \n return x\n\n\n# In[196]:\n\n\nmodel=Model()\n\n\n# In[197]:\n\n\ncriterion=nn.CrossEntropyLoss()\noptimizer=torch.optim.Adam(model.parameters(),lr=0.01)\n\n\n# In[198]:\n\n\nepochs=100\nlosses=[]\n\n\nfor i in range(epochs):\n i=i+1\n y_pred=model.forward(x_train)\n loss=criterion(y_pred,y_train)\n losses.append(loss)\n \n \n if i%10==1:\n print(f'epoch:{i:2} loss:{loss.item():10.8f}')\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n\n\n# In[199]:\n\n\nplt.plot(range(epochs),losses)\nplt.ylabel('Loss')\nplt.xlabel('epoch')\n\n\n# In[200]:\n\n\nwith torch.no_grad():\n y_val=model.forward(x_test)\n loss=criterion(y_val,y_test)\nprint(f'{loss:.8f}')\n\n\n# In[201]:\n\n\ncorrect=0\nwith torch.no_grad():\n for i ,data in enumerate(x_test):\n y_val=model.forward(x_test)\n if y_val[0].argmax().item()==y_test[i]:\n correct=correct+1\nprint(f'\\n{correct} out of {len(y_test)} = {100*correct/len(y_test):.2f}% correct')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"edureka mlp project.py","file_name":"edureka mlp project.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"375421405","text":"# -*- coding: utf-8 -*- \nimport numpy as np \n\n\"\"\"\nkneighbors_graph(X, n_neighbors, metric)\nInput: \n the dataset 'X'; each row is an observation\n IMPORTANT parameter 'n_neighbors'\n the distance function 'metric'\nOutput:\n the graph\n the incidence matrix G\n\"\"\"\ndef kneighbors_graph(X, n_neighbors, metric):\n nrows = X.shape[0]\n dist = metric\n \n #Generate the graph structure\n graph = {}\n for i in range(nrows): \n xi = X[i, :] \n distance_to_xi = dist(xi, X)\n nearest_k_points = np.argsort(distance_to_xi)[1:(n_neighbors+1)] \n xi_connect_to = []\n \n for point in nearest_k_points:\n if point in graph.keys(): \n if i not in graph[point]:\n xi_connect_to.append(point)\n else:\n xi_connect_to.append(point)\n graph[i] = xi_connect_to\n \n #Find the incident matrix\n G = np.zeros((n_neighbors*nrows, nrows)) \n for i in graph:\n fill_row = i*n_neighbors\n for j in graph[i]:\n if j > i:\n G[fill_row,i] = 1\n G[fill_row,j] = -1\n else:\n G[fill_row,i] = -1\n G[fill_row,j] = 1 \n fill_row += 1 \n G = G.astype('float32')\n return (graph, G)\n\n\"\"\"\nInput: \n the dataset 'X'; each row is an observation\n the distance function 'dist'\nOutput:\n the graph\n the incidence matrix G \n\"\"\"\n#def epsilon_neighbors_graph(X, dist, epsilon):\n# pass","sub_path":"K-NN-FL/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"336197297","text":"# SPDX-License-Identifier: MIT\n# SPDX-FileCopyrightText: Copyright 2019-2021 Heal Research\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom scipy import stats\nimport random, time, sys, os, json\n\nimport operon as Operon\n\nds = Operon.Dataset('../../data/Poly-10.csv', has_header=True)\ntraining_range = Operon.Range(0, ds.Rows // 2)\ntest_range = Operon.Range(ds.Rows // 2, ds.Rows)\ntarget = ds.GetVariable('Y')\ninputs = Operon.VariableCollection(v for v in ds.Variables if v.Name != target.Name)\n\ny_train = ds.Values[training_range.Start:training_range.End, target.Index]\n\nrng = Operon.RomuTrio(random.randint(1, 1000000))\n\nproblem = Operon.Problem(ds, inputs, target.Name, training_range, test_range)\nconfig = Operon.GeneticAlgorithmConfig(generations=1000, max_evaluations=1000000, local_iterations=0, population_size=1000, pool_size=1000, p_crossover=1.0, p_mutation=0.25, seed=1)\n\nselector = Operon.TournamentSelector(objective_index=0)\nselector.TournamentSize = 5\n\npset = Operon.PrimitiveSet()\npset.SetConfig(Operon.PrimitiveSet.Arithmetic | Operon.NodeType.Exp | Operon.NodeType.Log | Operon.NodeType.Sin | Operon.NodeType.Cos)\n\nminL, maxL = 1, 50\nmaxD = 10\nbtc = Operon.BalancedTreeCreator(pset, inputs, bias=0.0)\ninitializer = Operon.UniformInitializer(btc, maxD, maxL)\nmut_onepoint = Operon.OnePointMutation()\nmut_changeVar = Operon.ChangeVariableMutation(inputs)\nmut_changeFunc = Operon.ChangeFunctionMutation(pset)\nmut_replace = Operon.ReplaceSubtreeMutation(btc, maxD, maxL)\nmutation = Operon.MultiMutation()\nmutation.Add(mut_onepoint, 1)\nmutation.Add(mut_changeVar, 1)\nmutation.Add(mut_changeFunc, 1)\nmutation.Add(mut_replace, 1)\ncrossover = Operon.SubtreeCrossover(0.9, maxD, maxL)\n\nevaluator = Operon.RSquaredEvaluator(problem)\nevaluator.Budget = 1000 * 1000\nevaluator.LocalOptimizationIterations = 0\n\ngenerator = Operon.BasicOffspringGenerator(evaluator, crossover, mutation, selector, selector)\nreinserter = Operon.ReplaceWorstReinserter(0)\ngp = Operon.GeneticProgrammingAlgorithm(problem, config, initializer, generator, reinserter)\n\ngen = 0\nmax_ticks = 50\ninterval = 1 if config.Generations < max_ticks else int(np.round(config.Generations / max_ticks, 0))\ncomp = Operon.SingleObjectiveComparison(0)\nt0 = time.time()\n\ndef report():\n global gen\n best = gp.BestModel(comp)\n bestfit = best.GetFitness(0)\n sys.stdout.write('\\r')\n cursor = int(np.round(gen / config.Generations * max_ticks))\n for i in range(cursor):\n sys.stdout.write('\\u2588')\n sys.stdout.write(' ' * (max_ticks-cursor))\n sys.stdout.write(f'{100 * gen/config.Generations:.1f}%, generation {gen}/{config.Generations}, train quality: {1-bestfit:.6f}, elapsed: {time.time()-t0:.2f}s')\n sys.stdout.flush()\n gen += 1\n\ngp.Run(rng, report, threads=4)\nbest = gp.BestModel(comp)\nmodel_string = Operon.TreeFormatter.Format(best.Genotype, ds, 6)\n#print(f'\\n{model_string}')\n","sub_path":"python/examples/gp-cpp-threaded.py","file_name":"gp-cpp-threaded.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274129405","text":"def flipcheck(content):\n \"\"\"Checks a string for anger and soothes said anger\n\n Args:\n content (str): The message to be flipchecked\n\n Returns:\n putitback (str): The righted table or text\n \"\"\"\n\n # Prevent tampering with flip\n punct = \"\"\"!\"#$%&'*+,-./:;<=>?@[\\]^_`{|}~ ━─\"\"\"\n tamperdict = str.maketrans('', '', punct)\n tamperproof = content.translate(tamperdict)\n\n # Unflip\n if \"(╯°□°)╯︵\" in tamperproof:\n # For tables\n if \"┻┻\" in tamperproof:\n # Calculate table length\n length = 0\n for letter in content:\n if letter == \"━\":\n length += 1.36\n elif letter == \"─\":\n length += 1\n elif letter == \"-\":\n length += 0.50\n\n # Construct table\n putitback = \"┬\"\n\n for i in range(int(length)):\n putitback += \"─\"\n\n putitback += \"┬ ノ( ゜-゜ノ)\"\n\n return putitback\n\n # For text\n else:\n # Create dictionary for flipping text\n flipdict = str.maketrans(\n 'abcdefghijklmnopqrstuvwxyzɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎz😅🙃😞😟😠😡☹🙁😱😨😰😦😧😢😓😥😭',\n 'ɐqɔpǝɟbɥıظʞןɯuodbɹsʇnʌʍxʎzabcdefghijklmnopqrstuvwxyz😄🙂🙂🙂🙂🙂🙂😀😀🙂😄🙂🙂😄😄😄😁'\n )\n\n # Construct flipped text\n flipstart = content.index('︵')\n flipped = content[flipstart+1:]\n flipped = str.lower(flipped).translate(flipdict)\n\n putitback = ''.join(list(reversed(list(flipped))))\n\n putitback += \"ノ( ゜-゜ノ)\"\n\n return putitback\n else:\n return False\n","sub_path":"venv/Lib/site-packages/modis/discord_modis/modules/tableflip/api_flipcheck.py","file_name":"api_flipcheck.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"559339225","text":"#! /usr/bin/env python\n# coding=utf-8\nimport time\nimport libpy3auboi5\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport os\nfrom math import pi\n\n# 创建一个logger\n#logger = logging.getLogger()\n\nlogger = logging.getLogger('main.robotcontrol')\n\n\ndef logger_init():\n # Log等级总开关\n logger.setLevel(logging.INFO)\n\n # 创建log目录\n if not os.path.exists('./logfiles'):\n os.mkdir('./logfiles')\n\n # 创建一个handler,用于写入日志文件\n logfile = './logfiles/robot-ctl-python.log'\n\n # 以append模式打开日志文件\n # fh = logging.FileHandler(logfile, mode='a')\n fh = RotatingFileHandler(logfile, mode='a', maxBytes=1024*1024*50, backupCount=30)\n\n # 输出到file的log等级的开关\n fh.setLevel(logging.INFO)\n\n # 再创建一个handler,用于输出到控制台\n ch = logging.StreamHandler()\n\n # 输出到console的log等级的开关\n ch.setLevel(logging.INFO)\n\n # 定义handler的输出格式\n # formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n formatter = logging.Formatter(\"%(asctime)s [%(thread)u] %(levelname)s: %(message)s\")\n\n # 为文件输出设定格式\n fh.setFormatter(formatter)\n\n # 控制台输出设定格式\n ch.setFormatter(formatter)\n\n # 设置文件输出到logger\n logger.addHandler(fh)\n\n # 设置控制台输出到logger\n logger.addHandler(ch)\n\n\nclass RobotEventType:\n RobotEvent_armCanbusError = 0 # 机械臂CAN总线错误\n RobotEvent_remoteHalt = 1 # 机械臂停止\n RobotEvent_remoteEmergencyStop = 2 # 机械臂远程急停\n RobotEvent_jointError = 3 # 关节错误\n RobotEvent_forceControl = 4 # 力控制\n RobotEvent_exitForceControl = 5 # 退出力控制\n RobotEvent_softEmergency = 6 # 软急停\n RobotEvent_exitSoftEmergency = 7 # 退出软急停\n RobotEvent_collision = 8 # 碰撞\n RobotEvent_collisionStatusChanged = 9 # 碰撞状态改变\n RobotEvent_tcpParametersSucc = 10 # 工具动力学参数设置成功\n RobotEvent_powerChanged = 11 # 机械臂电源开关状态改变\n RobotEvent_ArmPowerOff = 12 # 机械臂电源关闭\n RobotEvent_mountingPoseChanged = 13 # 安装位置发生改变\n RobotEvent_encoderError = 14 # 编码器错误\n RobotEvent_encoderLinesError = 15 # 编码器线数不一致\n RobotEvent_singularityOverspeed = 16 # 奇异点超速\n RobotEvent_currentAlarm = 17 # 机械臂电流异常\n RobotEvent_toolioError = 18 # 机械臂工具端错误\n RobotEvent_robotStartupPhase = 19 # 机械臂启动阶段\n RobotEvent_robotStartupDoneResult = 20 # 机械臂启动完成结果\n RobotEvent_robotShutdownDone = 21 # 机械臂关机结果\n RobotEvent_atTrackTargetPos = 22 # 机械臂轨迹运动到位信号通知\n RobotEvent_SetPowerOnDone = 23 # 设置电源状态完成\n RobotEvent_ReleaseBrakeDone = 24 # 机械臂刹车释放完成\n RobotEvent_robotControllerStateChaned = 25 # 机械臂控制状态改变\n RobotEvent_robotControllerError = 26 # 机械臂控制错误----一般是算法规划出现问题时返回\n RobotEvent_socketDisconnected = 27 # socket断开连接\n RobotEvent_overSpeed = 28 # 超速\n RobotEvent_algorithmException = 29 # 机械臂算法异常\n RobotEvent_boardIoPoweron = 30 # 外部上电信号\n RobotEvent_boardIoRunmode = 31 # 联动/手动\n RobotEvent_boardIoPause = 32 # 外部暂停信号\n RobotEvent_boardIoStop = 33 # 外部停止信号\n RobotEvent_boardIoHalt = 34 # 外部关机信号\n RobotEvent_boardIoEmergency = 35 # 外部急停信号\n RobotEvent_boardIoRelease_alarm = 36 # 外部报警解除信号\n RobotEvent_boardIoOrigin_pose = 37 # 外部回原点信号\n RobotEvent_boardIoAutorun = 38 # 外部自动运行信号\n RobotEvent_safetyIoExternalEmergencyStope = 39 # 外部急停输入01\n RobotEvent_safetyIoExternalSafeguardStope = 40 # 外部保护停止输入02\n RobotEvent_safetyIoReduced_mode = 41 # 缩减模式输入\n RobotEvent_safetyIoSafeguard_reset = 42 # 防护重置\n RobotEvent_safetyIo3PositionSwitch = 43 # 三态开关1\n RobotEvent_safetyIoOperationalMode = 44 # 操作模式\n RobotEvent_safetyIoManualEmergencyStop = 45 # 示教器急停01\n RobotEvent_safetyIoSystemStop = 46 # 系统停止输入\n RobotEvent_alreadySuspended = 47 # 机械臂暂停\n RobotEvent_alreadyStopped = 48 # 机械臂停止\n RobotEvent_alreadyRunning = 49 # 机械臂运行\n RobotEvent_None = 999999\n\n # 非错误事件\n NoError = (RobotEvent_forceControl,\n RobotEvent_exitForceControl,\n RobotEvent_tcpParametersSucc,\n RobotEvent_powerChanged,\n RobotEvent_mountingPoseChanged,\n RobotEvent_robotStartupPhase,\n RobotEvent_robotStartupDoneResult,\n RobotEvent_robotShutdownDone,\n RobotEvent_SetPowerOnDone,\n RobotEvent_ReleaseBrakeDone,\n RobotEvent_atTrackTargetPos,\n RobotEvent_robotControllerStateChaned,\n RobotEvent_robotControllerError,\n RobotEvent_algorithmException,\n RobotEvent_alreadyStopped,\n RobotEvent_alreadyRunning,\n RobotEvent_boardIoPoweron,\n RobotEvent_boardIoRunmode,\n RobotEvent_boardIoPause,\n RobotEvent_boardIoStop,\n RobotEvent_boardIoHalt,\n RobotEvent_boardIoRelease_alarm,\n RobotEvent_boardIoOrigin_pose,\n RobotEvent_boardIoAutorun,\n RobotEvent_safetyIoExternalEmergencyStope,\n RobotEvent_safetyIoExternalSafeguardStope,\n RobotEvent_safetyIoReduced_mode,\n RobotEvent_safetyIoSafeguard_reset,\n RobotEvent_safetyIo3PositionSwitch,\n RobotEvent_safetyIoOperationalMode,\n RobotEvent_safetyIoManualEmergencyStop,\n RobotEvent_safetyIoSystemStop,\n RobotEvent_alreadySuspended,\n RobotEvent_alreadyStopped,\n RobotEvent_alreadyRunning\n )\n\n UserPostEvent = (RobotEvent_robotControllerError,\n RobotEvent_safetyIoExternalSafeguardStope,\n RobotEvent_safetyIoSystemStop\n )\n ClearErrorEvent = (RobotEvent_armCanbusError,\n RobotEvent_remoteEmergencyStop,\n RobotEvent_jointError,\n RobotEvent_collision,\n RobotEvent_collisionStatusChanged,\n RobotEvent_encoderError,\n RobotEvent_encoderLinesError,\n RobotEvent_currentAlarm,\n RobotEvent_softEmergency,\n RobotEvent_exitSoftEmergency\n )\n\n def __init__(self):\n pass\n\n\nclass RobotErrorType:\n RobotError_SUCC = 0 # 无错误\n RobotError_Base = 2000\n RobotError_RSHD_INIT_FAILED = RobotError_Base + 1 # 库初始化失败\n RobotError_RSHD_UNINIT = RobotError_Base + 2 # 库未初始化\n RobotError_NoLink = RobotError_Base + 3 # 无链接\n RobotError_Move = RobotError_Base + 4 # 机械臂移动错误\n RobotError_ControlError = RobotError_Base + RobotEventType.RobotEvent_robotControllerError\n RobotError_LOGIN_FAILED = RobotError_Base + 5 # 机械臂登录失败\n RobotError_NotLogin = RobotError_Base + 6 # 机械臂未登录\n RobotError_ERROR_ARGS = RobotError_Base + 7 # 参数错误\n\n def __init__(self):\n pass\n\n\nclass RobotEvent:\n def __init__(self, event_type=RobotEventType.RobotEvent_None, event_code=0, event_msg=''):\n self.event_type = event_type\n self.event_code = event_code\n self.event_msg = event_msg\n\n\n# noinspection SpellCheckingInspection\nclass RobotError:\n def __init__(self, error_type=RobotErrorType.RobotError_SUCC, error_code=0, error_msg=''):\n self.error_type = error_type\n self.error_cdoe = error_code\n self.error_msg = error_msg\n\n def __str__(self):\n return \"RobotError type{0} code={1} msg={2}\".format(self.error_type, self.error_cdoe, self.error_msg)\n\n\nclass RobotDefaultParameters:\n # 缺省的动力学参数\n tool_dynamics = {\"position\": (0.0, 0.0, 0.0), \"payload\": 1.0, \"inertia\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)}\n\n # 缺省碰撞等级\n collision_grade = 6\n\n def __init__(self):\n pass\n\n def __str__(self):\n return \"Robot Default parameters, tool_dynamics:{0}, collision_grade:{1}\".format(self.tool_dynamics,\n self.collision_grade)\n\n\nclass RobotMoveTrackType:\n # 圆弧\n ARC_CIR = 2\n # 轨迹\n CARTESIAN_MOVEP = 3\n # 以下四种三阶样条插值曲线都有起始和结束点加速��不连续的情况,不适合与新关节驱动版本\n # 三次样条插值(过控制点),自动优化轨迹运行时间,目前不支持姿态变化\n CARTESIAN_CUBICSPLINE = 4\n # 需要设定三次均匀B样条插值(过控制点)的时间间隔,目前不支持姿态变化\n CARTESIAN_UBSPLINEINTP = 5\n # 三阶样条插值曲线\n JIONT_CUBICSPLINE = 6\n # 可用于轨迹回放\n JOINT_UBSPLINEINTP = 7\n\n def __init__(self):\n pass\n\n\nclass RobotIOType:\n # 控制柜IO\n ControlBox_DI = 0\n ControlBox_DO = 1\n ControlBox_AI = 2\n ControlBox_AO = 3\n # 用户IO\n User_DI = 4\n User_DO = 5\n User_AI = 6\n User_AO = 7\n\n def __init__(self):\n pass\n\n\nclass RobotToolIoName:\n tool_io_0 = \"T_DI/O_00\"\n tool_io_1 = \"T_DI/O_01\"\n tool_io_2 = \"T_DI/O_02\"\n tool_io_3 = \"T_DI/O_03\"\n\n tool_ai_0 = \"T_AI_00\"\n tool_ai_1 = \"T_AI_01\"\n\n def __init__(self):\n pass\n\n\nclass RobotUserIoName:\n # 控制柜用户DI\n user_di_00 = \"U_DI_00\"\n user_di_01 = \"U_DI_01\"\n user_di_02 = \"U_DI_02\"\n user_di_03 = \"U_DI_03\"\n user_di_04 = \"U_DI_04\"\n user_di_05 = \"U_DI_05\"\n user_di_06 = \"U_DI_06\"\n user_di_07 = \"U_DI_07\"\n user_di_10 = \"U_DI_10\"\n user_di_11 = \"U_DI_11\"\n user_di_12 = \"U_DI_12\"\n user_di_13 = \"U_DI_13\"\n user_di_14 = \"U_DI_14\"\n user_di_15 = \"U_DI_15\"\n user_di_16 = \"U_DI_16\"\n user_di_17 = \"U_DI_17\"\n\n # 控制柜用户DO\n user_do_00 = \"U_DO_00\"\n user_do_01 = \"U_DO_01\"\n user_do_02 = \"U_DO_02\"\n user_do_03 = \"U_DO_03\"\n user_do_04 = \"U_DO_04\"\n user_do_05 = \"U_DO_05\"\n user_do_06 = \"U_DO_06\"\n user_do_07 = \"U_DO_07\"\n user_do_10 = \"U_DO_10\"\n user_do_11 = \"U_DO_11\"\n user_do_12 = \"U_DO_12\"\n user_do_13 = \"U_DO_13\"\n user_do_14 = \"U_DO_14\"\n user_do_15 = \"U_DO_15\"\n user_do_16 = \"U_DO_16\"\n user_do_17 = \"U_DO_17\"\n\n # 控制柜模拟量IO\n user_ai_00 = \"VI0\"\n user_ai_01 = \"VI1\"\n user_ai_02 = \"VI2\"\n user_ai_03 = \"VI3\"\n\n user_ao_00 = \"VO0\"\n user_ao_01 = \"VO1\"\n user_ao_02 = \"VO2\"\n user_ao_03 = \"VO3\"\n\n def __init__(self):\n pass\n\n\nclass RobotStatus:\n # 机械臂当前停止\n Stopped = 0\n # 机械臂当前运行\n Running = 1\n # 机械臂当前暂停\n Paused = 2\n # 机械臂当前恢复\n Resumed = 3\n\n def __init__(self):\n pass\n\n\nclass RobotRunningMode:\n # 机械臂仿真模式\n RobotModeSimulator = 0\n # 机械臂真实模式\n RobotModeReal = 1\n\n def __init__(self):\n pass\n\n\nclass RobotToolPowerType:\n OUT_0V = 0\n OUT_12V = 1\n OUT_24V = 2\n\n def __init__(self):\n pass\n\n\nclass RobotToolIoAddr:\n TOOL_DIGITAL_IO_0 = 0\n TOOL_DIGITAL_IO_1 = 1\n TOOL_DIGITAL_IO_2 = 2\n TOOL_DIGITAL_IO_3 = 3\n\n def __init__(self):\n pass\n\n\nclass RobotCoordType:\n # 基座坐标系\n Robot_Base_Coordinate = 0\n # 末端坐标系\n Robot_End_Coordinate = 1\n # 用户坐标系\n Robot_World_Coordinate = 2\n\n def __init__(self):\n pass\n\n\nclass RobotCoordCalMethod:\n CoordCalMethod_xOy = 0\n CoordCalMethod_yOz = 1\n CoordCalMethod_zOx = 2\n CoordCalMethod_xOxy = 3\n CoordCalMethod_xOxz = 4\n CoordCalMethod_yOyx = 5\n CoordCalMethod_yOyz = 6\n CoordCalMethod_zOzx = 7\n CoordCalMethod_zOzy = 8\n\n def __init__(self):\n pass\n\n\nclass RobotToolDigitalIoDir:\n # 输入\n IO_IN = 0\n # 输出\n IO_OUT = 1\n\n def __init__(self):\n pass\n\n\nclass Auboi5Robot:\n # 客户端个数\n __client_count = 0\n\n def __init__(self):\n self.rshd = -1\n self.connected = False\n self.last_error = RobotError()\n self.last_event = RobotEvent()\n Auboi5Robot.__client_count += 1\n\n def __del__(self):\n Auboi5Robot.__client_count -= 1\n self.uninitialize()\n logger.info(\"client_count={0}\".format(Auboi5Robot.__client_count))\n\n def __str__(self):\n return \"RSHD={0}, connected={1}\".format(self.rshd, self.connected)\n\n @staticmethod\n def get_local_time():\n \"\"\"\"\n * FUNCTION: get_local_time\n * DESCRIPTION: 获取系统当前时间\n * INPUTS: 无输入\n * OUTPUTS:\n * RETURNS: 输出系统当前时间字符串\n * NOTES:\n \"\"\"\n return time.strftime(\"%b %d %Y %H:%M:%S\", time.localtime(time.time()))\n\n def robot_event_callback(self, event):\n \"\"\"\"\n * FUNCTION: robot_event_callback\n * DESCRIPTION: 机械臂事件\n * INPUTS: 无输入\n * OUTPUTS:\n * RETURNS: 系统事件回调函数\n * NOTES:\n \"\"\"\n print(\"event={0}\".format(event))\n if event['type'] not in RobotEventType.NoError:\n self.last_error = RobotError(event['type'], event['code'], event['content'])\n else:\n self.last_event = RobotEvent(event['type'], event['code'], event['content'])\n\n @staticmethod\n def raise_error(error_type, error_code, error_msg):\n \"\"\"\"\n * FUNCTION: raise_error\n * DESCRIPTION: 抛出异常事件\n * INPUTS: 无输入\n * OUTPUTS:\n * RETURNS: 无\n * NOTES:\n \"\"\"\n raise RobotError(error_type, error_code, error_msg)\n\n def check_event(self):\n \"\"\"\"\n * FUNCTION: check_event\n * DESCRIPTION: 检查机械臂是否发生异常事件\n * INPUTS: input\n * OUTPUTS: output\n * RETURNS: void\n * NOTES: 如果接收到的是异常事件,则函数抛出异常事件\n \"\"\"\n if self.last_error.error_type != RobotErrorType.RobotError_SUCC:\n raise self.last_error\n if self.rshd == -1 or not self.connected:\n self.raise_error(RobotErrorType.RobotError_NoLink, 0, \"no socket link\")\n\n @staticmethod\n def initialize():\n \"\"\"\"\n * FUNCTION: initialize\n * DESCRIPTION: 初始化机械臂控制库\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n result = libpy3auboi5.initialize()\n if result == RobotErrorType.RobotError_SUCC:\n return RobotErrorType.RobotError_SUCC\n else:\n return RobotErrorType.RobotError_RSHD_INIT_FAILED\n\n @staticmethod\n def uninitialize():\n \"\"\"\"\n * FUNCTION: uninitialize\n * DESCRIPTION: 反初始化机械臂控制库\n * INPUTS: input\n * OUTPUTS: output\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n return libpy3auboi5.uninitialize()\n\n def create_context(self):\n \"\"\"\"\n * FUNCTION: create_context\n * DESCRIPTION: 创建机械臂控制上下文句柄\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RSHD\n * NOTES:\n \"\"\"\n self.rshd = libpy3auboi5.create_context()\n return self.rshd\n\n def get_context(self):\n \"\"\"\"\n * FUNCTION: get_context\n * DESCRIPTION: 获取机械臂当前控制上下文\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 上下文句柄RSHD\n * NOTES:\n \"\"\"\n return self.rshd\n\n def connect(self, ip='localhost', port=8899):\n \"\"\"\"\n * FUNCTION: connect\n * DESCRIPTION: 链接机械臂服务器\n * INPUTS: ip 机械臂服务器地址\n * port 端口号\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES:\n \"\"\"\n logger.info(\"ip={0}, port={1}\".format(ip, port))\n if self.rshd >= 0:\n if not self.connected:\n if libpy3auboi5.login(self.rshd, ip, port) == 0:\n self.connected = True\n time.sleep(0.5)\n return RobotErrorType.RobotError_SUCC\n else:\n logger.error(\"login failed!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n else:\n logger.info(\"already connected.\")\n return RobotErrorType.RobotError_SUCC\n else:\n logger.error(\"RSHD uninitialized!!!\")\n return RobotErrorType.RobotError_RSHD_UNINIT\n\n def disconnect(self):\n \"\"\"\"\n * FUNCTION: disconnect\n * DESCRIPTION: 断开机械臂服务器链接\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n if self.rshd >= 0 and self.connected:\n libpy3auboi5.logout(self.rshd)\n self.connected = False\n time.sleep(0.5)\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def robot_startup(self, collision=RobotDefaultParameters.collision_grade,\n tool_dynamics=RobotDefaultParameters.tool_dynamics):\n \"\"\"\n * FUNCTION: robot_startup\n * DESCRIPTION: 启动机械臂\n * INPUTS: collision:碰撞等级范围(0~10) 缺省:6\n * tool_dynamics:运动学参数\n * tool_dynamics = 位置,单位(m) :{\"position\": (0.0, 0.0, 0.0),\n * 负载,单位(kg): \"payload\": 1.0,\n * 惯量: \"inertia\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)}\n *\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.robot_startup(self.rshd, collision, tool_dynamics)\n time.sleep(0.5)\n self.set_robot_event_callback(self.robot_event_callback)\n return result\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def robot_shutdown(self):\n \"\"\"\n * FUNCTION: robot_shutdown\n * DESCRIPTION: 关闭机械臂\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.robot_shutdown(self.rshd)\n time.sleep(0.5)\n return result\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def enable_robot_event(self):\n self.check_event()\n if self.rshd >= 0 and self.connected:\n self.set_robot_event_callback(self.robot_event_callback)\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def init_profile(self):\n \"\"\"\"\n * FUNCTION: init_profile\n * DESCRIPTION: 初始化机械臂控制全局属性\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES: 调用成功后,系统会自动清理掉之前设置的用户坐标系,\n * 速度,加速度等等属性\n \"\"\"\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.init_global_move_profile(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_joint_maxacc(self, joint_maxacc=(1.0, 1.0, 1.0, 1.0, 1.0, 1.0)):\n \"\"\"\n * FUNCTION: set_joint_maxacc\n * DESCRIPTION: 设置六个关节的最大加速度\n * INPUTS: joint_maxacc:六个关节的最大加速度,单位(rad/s)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_joint_maxacc(self.rshd, joint_maxacc)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_joint_maxacc(self):\n \"\"\"U_DO_00\n * FUNCTION: get_joint_maxacc\n * DESCRIPTION: 获取六个关节的最大加速度\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 六个关节的最大加速度单位(rad/s^2)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_joint_maxacc(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_joint_maxvelc(self, joint_maxvelc=(1.0, 1.0, 1.0, 1.0, 1.0, 1.0)):\n \"\"\"\n * FUNCTION: set_joint_maxvelc\n * DESCRIPTION: 设置六个关节的最大速度\n * INPUTS: joint_maxvelc:六个关节的最大速度,单位(rad/s)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_joint_maxvelc(self.rshd, joint_maxvelc)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_joint_maxvelc(self):\n \"\"\"\n * FUNCTION: get_joint_maxvelc\n * DESCRIPTION: 获取六个关节的最大速度\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 六个关节的最大速度(rad/s)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_joint_maxvelc(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_end_max_line_acc(self, end_maxacc=0.1):\n \"\"\"\n * FUNCTION: set_end_max_line_acc\n * DESCRIPTION: 设置机械臂末端最大线加速度\n * INPUTS: end_maxacc:末端最大加线速度,单位(m/s^2)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_end_max_line_acc(self.rshd, end_maxacc)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_end_max_line_acc(self):\n \"\"\"\n * FUNCTION: get_end_max_line_acc\n * DESCRIPTION: 获取机械臂末端最大线加速度\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 机械臂末端最大加速度,单位(m/s^2)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_end_max_line_acc(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_end_max_line_velc(self, end_maxvelc=0.1):\n \"\"\"\n * FUNCTION: set_end_max_line_velc\n * DESCRIPTION: 设置机械臂末端最大线速度\n * INPUTS: end_maxacc:末端最大线速度,单位(m/s)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_end_max_line_velc(self.rshd, end_maxvelc)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_end_max_line_velc(self):\n \"\"\"\n * FUNCTION: get_end_max_line_velc\n * DESCRIPTION: 获取机械臂末端最大线速度\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 机械臂末端最大速度,单位(m/s)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_end_max_line_velc(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_end_max_angle_acc(self, end_maxacc=0.1):\n \"\"\"\n * FUNCTION: set_end_max_angle_acc\n * DESCRIPTION: 设置机械臂末端最大角加速度\n * INPUTS: end_maxacc:末端最大加速度,单位(rad/s^2)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_end_max_angle_acc(self.rshd, end_maxacc)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_end_max_angle_acc(self):\n \"\"\"\n * FUNCTION: get_end_max_angle_acc\n * DESCRIPTION: 获取机械臂末端最大角加速度\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 机械臂末端最大角加速度,单位(m/s^2)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_end_max_angle_acc(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_end_max_angle_velc(self, end_maxvelc=0.1):\n \"\"\"\n * FUNCTION: set_end_max_angle_velc\n * DESCRIPTION: 设置机械臂末端最大角速度\n * INPUTS: end_maxacc:末端最大速度,单位(rad/s)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_end_max_line_velc(self.rshd, end_maxvelc)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_end_max_angle_velc(self):\n \"\"\"\n * FUNCTION: get_end_max_angle_velc\n * DESCRIPTION: 获取机械臂末端最大角速度\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 机械臂末端最大速度,单位(rad/s)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_end_max_line_velc(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def move_to_target_in_cartesian(self, pos, rpy_xyz):\n \"\"\"\n * FUNCTION: move_to_target_in_cartesian\n * DESCRIPTION: 给出笛卡尔坐标值和欧拉角,机械臂轴动到目标位置和姿态\n * INPUTS: pos:位置坐标(x,y,z),单位(m)\n * rpy:欧拉角(rx,ry,rz),单位(度)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n # 度 -> 弧度\n rpy_xyz = [i / 180.0 * pi for i in rpy_xyz]\n # 欧拉角转四元数\n ori = libpy3auboi5.rpy_to_quaternion(self.rshd, rpy_xyz)\n\n #logger.info(\"rpy=========={0}\".format(rpy_xyz))\n #logger.info(\"rpy--->ori=========={0}\".format(ori))\n\n #ppp = libpy3auboi5.quaternion_to_rpy(self.rshd, ori)\n #logger.info(\"ori---->rpy=========={0}\".format(ppp))\n\n # 逆运算得关节角\n joint_radian = libpy3auboi5.get_current_waypoint(self.rshd)\n # joint_radian = [0, 0, 0, 0, 0, 0]\n #logger.info(joint_radian)\n ik_result = libpy3auboi5.inverse_kin(self.rshd, joint_radian['joint'], pos, ori)\n\n logging.info(\"ik_result====>{0}\".format(ik_result))\n\n # 轴动到目标位置\n result = libpy3auboi5.move_joint(self.rshd, ik_result[\"joint\"])\n if result != RobotErrorType.RobotError_SUCC:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"move error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def move_joint(self, joint_radian=(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)):\n \"\"\"\n * FUNCTION: move_joint\n * DESCRIPTION: 机械臂轴动\n * INPUTS: joint_radian:六个关节的关节角,单位(rad)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.move_joint(self.rshd, joint_radian)\n if result != RobotErrorType.RobotError_SUCC:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"move error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def move_line(self, joint_radian=(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)):\n \"\"\"\n * FUNCTION: move_line\n * DESCRIPTION: 机械臂保持当前姿态直线运动\n * INPUTS: joint_radian:六个关节的关节角,单位(rad)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.move_line(self.rshd, joint_radian)\n if result != RobotErrorType.RobotError_SUCC:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"move error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def move_rotate(self, user_coord, rotate_axis, rotate_angle):\n \"\"\"\n * FUNCTION: move_rotate\n * DESCRIPTION: 保持当前位置变换姿态做旋转运动\n * INPUTS: user_coord:用户坐标系\n * user_coord = {'coord_type': 2,\n * 'calibrate_method': 0,\n * 'calibrate_points':\n * {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n * 'tool_desc':\n * {\"pos\": (0.0, 0.0, 0.0),\n * \"ori\": (1.0, 0.0, 0.0, 0.0)}\n * }\n * rotate_axis:转轴(x,y,z) 例如:(1,0,0)表示沿Y轴转动\n * rotate_angle:旋转角度 单位(rad)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.move_rotate(self.rshd, user_coord, rotate_axis, rotate_angle)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def clear_offline_track(self):\n \"\"\"\n * FUNCTION: clear_offline_track\n * DESCRIPTION: 清理服务器上的非在线轨迹运动数据\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.clear_offline_track(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def append_offline_track_waypoint(self, waypoints):\n \"\"\"\n * FUNCTION: append_offline_track_waypoint\n * DESCRIPTION: 向服务器添加非在线轨迹运动路点\n * INPUTS: waypoints 非在线轨迹运动路点元祖(可包含小于3000个路点), 单位:弧度\n * 例如:((0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * (0.0,-0.000001,-0.000001,0.000001,-0.000001, 0.0))\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.append_offline_track_waypoint(self.rshd, waypoints)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def append_offline_track_file(self, track_file):\n \"\"\"\n * FUNCTION: append_offline_track_file\n * DESCRIPTION: 向服务器添加非在线轨迹运动路点文件\n * INPUTS: 路点文件全路径,路点文件的每一行包含六个关节的关节角(弧度),用逗号隔开\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.append_offline_track_file(self.rshd, track_file)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def startup_offline_track(self):\n \"\"\"\n * FUNCTION: startup_offline_track\n * DESCRIPTION: 通知服务器启动非在线轨迹运动\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.startup_offline_track(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def stop_offline_track(self):\n \"\"\"\n * FUNCTION: stop_offline_track\n * DESCRIPTION: 通知服务器停止非在线轨迹运动\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.stop_offline_track(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def enter_tcp2canbus_mode(self):\n \"\"\"\n * FUNCTION: enter_tcp2canbus_mode\n * DESCRIPTION: 通知服务器进入TCP2CANBUS透传模式\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.enter_tcp2canbus_mode(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def leave_tcp2canbus_mode(self):\n \"\"\"\n * FUNCTION: leave_tcp2canbus_mode\n * DESCRIPTION: 通知服务器退出TCP2CANBUS透传模式\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.leave_tcp2canbus_mode(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_waypoint_to_canbus(self, joint_radian=(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)):\n \"\"\"\n * FUNCTION: set_waypoint_to_canbus\n * DESCRIPTION: 透传运动路点到CANBUS\n * INPUTS: joint_radian:六个关节的关节角,单位(rad)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_waypoint_to_canbus(self.rshd, joint_radian)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def remove_all_waypoint(self):\n \"\"\"\n * FUNCTION: remove_all_waypoint\n * DESCRIPTION: 清除所有已经设置的全局路点\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.remove_all_waypoint(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def add_waypoint(self, joint_radian=(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)):\n \"\"\"\n * FUNCTION: add_waypoint\n * DESCRIPTION: 添加全局路点用于轨迹运动\n * INPUTS: joint_radian:六个关节的关节角,单位(rad)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.add_waypoint(self.rshd, joint_radian)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_blend_radius(self, blend_radius=0.01):\n \"\"\"\n * FUNCTION: set_blend_radius\n * DESCRIPTION: 设置交融半径\n * INPUTS: blend_radius:交融半径,单位(m)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n if 0.01 >= blend_radius <= 0.05:\n return libpy3auboi5.set_blend_radius(self.rshd, blend_radius)\n else:\n logger.warn(\"blend radius value range must be 0.01~0.05\")\n return RobotErrorType.RobotError_ERROR_ARGS\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_circular_loop_times(self, circular_count=1):\n \"\"\"\n * FUNCTION: set_circular_loop_times\n * DESCRIPTION: 设置圆运动圈数\n * INPUTS: circular_count:圆的运动圈数\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES: 当circular_count大于0时,机械臂进行圆运动circular_count次\n * 当circular_count等于0时,机械臂进行圆弧轨迹运动\n \"\"\"\n self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_circular_loop_times(self.rshd, circular_count)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_user_coord(self, user_coord):\n \"\"\"\n * FUNCTION: set_user_coord\n * DESCRIPTION: 设置用户坐标系\n * INPUTS: user_coord:用户坐标系\n * user_coord = {'coord_type': RobotCoordType.Robot_World_Coordinate,\n * 'calibrate_method': RobotCoordCalMethod.CoordCalMethod_xOy,\n * 'calibrate_points':\n * {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n * 'tool_desc':\n * {\"pos\": (0.0, 0.0, 0.0),\n * \"ori\": (1.0, 0.0, 0.0, 0.0)}\n * }\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_user_coord(self.rshd, user_coord)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_base_coord(self):\n \"\"\"\n * FUNCTION: set_base_coord\n * DESCRIPTION: 设置基座坐标系\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_base_coord(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def check_user_coord(self, user_coord):\n \"\"\"\n * FUNCTION: check_user_coord\n * DESCRIPTION: 检查用户坐标系参数设置是否合理\n * INPUTS: user_coord:用户坐标系\n * user_coord = {'coord_type': 2,\n * 'calibrate_method': 0,\n * 'calibrate_points':\n * {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n * 'tool_desc':\n * {\"pos\": (0.0, 0.0, 0.0),\n * \"ori\": (1.0, 0.0, 0.0, 0.0)}\n * }\n * OUTPUTS:\n * RETURNS: 合理返回: RobotError.RobotError_SUCC\n * 不合理返回: 其他\n * NOTES:\n \"\"\"\n return libpy3auboi5.check_user_coord(self.rshd, user_coord)\n\n def set_relative_offset_on_base(self, relative_pos, relative_ori):\n \"\"\"\n * FUNCTION: set_relative_offset_on_base\n * DESCRIPTION: 设置基于基座标系运动偏移量\n * INPUTS: relative_pos=(x, y, z) 相对位移,单位(m)\n * relative_ori=(w,x,y,z) 相对姿态\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_relative_offset_on_base(self.rshd, relative_pos, relative_ori)\n\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_relative_offset_on_user(self, relative_pos, relative_ori, user_coord):\n \"\"\"\n * FUNCTION: set_relative_offset_on_user\n * DESCRIPTION: 设置基于用户标系运动偏移量\n * INPUTS: relative_pos=(x, y, z) 相对位移,单位(m)\n * relative_ori=(w,x,y,z) 目标姿态\n * user_coord:用户坐标系\n * user_coord = {'coord_type': 2,\n * 'calibrate_method': 0,\n * 'calibrate_points':\n * {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n * 'tool_desc':\n * {\"pos\": (0.0, 0.0, 0.0),\n * \"ori\": (1.0, 0.0, 0.0, 0.0)}\n * }\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_relative_offset_on_user(self.rshd, relative_pos, relative_ori, user_coord)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_no_arrival_ahead(self):\n \"\"\"\n * FUNCTION: set_no_arrival_ahead\n * DESCRIPTION: 取消提前到位设置\n * INPUTS:\n *\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.set_no_arrival_ahead(self.rshd)\n if result != 0:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"set no arrival ahead error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_arrival_ahead_distance(self, distance=0.0):\n \"\"\"\n * FUNCTION: set_arrival_ahead_distance\n * DESCRIPTION: 设置距离模式下的提前到位距离\n * INPUTS: distance 提前到位距离 单位(米)\n *\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.set_arrival_ahead_distance(self.rshd, distance)\n if result != 0:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"set arrival ahead distance error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_arrival_ahead_time(self, sec=0.0):\n \"\"\"\n * FUNCTION: set_arrival_ahead_time\n * DESCRIPTION: 设置时间模式下的提前到位时间\n * INPUTS: sec 提前到位时间 单位(秒)\n *\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.set_arrival_ahead_time(self.rshd, sec)\n if result != 0:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"set arrival ahead time error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def set_arrival_ahead_blend(self, distance=0.0):\n \"\"\"\n * FUNCTION: set_arrival_ahead_blend\n * DESCRIPTION: 设置距离模式下交融半径距离\n * INPUTS: blend 交融半径 单位(米)\n *\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.set_arrival_ahead_blend(self.rshd, distance)\n if result != 0:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"set arrival ahead blend error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def move_track(self, track):\n \"\"\"\n * FUNCTION: move_track\n * DESCRIPTION: 轨迹运动\n * INPUTS: track 轨迹类型,包括如下:\n * 圆弧运动RobotMoveTrackType.ARC_CIR\n * 轨迹运动RobotMoveTrackType.CARTESIAN_MOVEP\n *\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n result = libpy3auboi5.move_track(self.rshd, track)\n if result != 0:\n self.raise_error(RobotErrorType.RobotError_Move, result, \"move error\")\n else:\n return RobotErrorType.RobotError_SUCC\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def forward_kin(self, joint_radian=(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)):\n \"\"\"\n * FUNCTION: forward_kin\n * DESCRIPTION: 正解\n * INPUTS: joint_radian:六个关节的关节角,单位(rad)\n * OUTPUTS:\n * RETURNS: 成功返回: 关节正解结果,结果为详见NOTES\n * 失败返回: None\n *\n * NOTES: 六个关节角 {'joint': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n * 位置 'pos': [-0.06403157614989634, -0.4185973810159096, 0.816883228463401],\n * 姿态 'ori': [-0.11863209307193756, 0.3820514380931854, 0.0, 0.9164950251579285]}\n \"\"\"\n if self.rshd >= 0:\n return libpy3auboi5.forward_kin(self.rshd, joint_radian)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def inverse_kin(self, joint_radian=(0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000),\n pos=(0.0, 0.0, 0.0), ori=(1.0, 0.0, 0.0, 0.0)):\n \"\"\"\n * FUNCTION: forward_kin\n * DESCRIPTION: 逆解\n * INPUTS: joint_radian:起始点六个关节的关节角,单位(rad)\n * pos位置(x, y, z)单位(m)\n * ori位姿(w, x, y, z)\n * OUTPUTS:\n * RETURNS: 成功返回: 关节正解结果,结果为详见NOTES\n * 失败返回: None\n *\n * NOTES: 六个关节角 {'joint': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n * 位置 'pos': [-0.06403157614989634, -0.4185973810159096, 0.816883228463401],\n * 姿态 'ori': [-0.11863209307193756, 0.3820514380931854, 0.0, 0.9164950251579285]}\n \"\"\"\n if self.rshd >= 0:\n return libpy3auboi5.inverse_kin(self.rshd, joint_radian, pos, ori)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def base_to_user(self, pos, ori, user_coord, user_tool):\n \"\"\"\n * FUNCTION: base_to_user\n * DESCRIPTION: 用户坐标系转基座坐标系\n * INPUTS: pos:基座标系下的位置(x, y, z)单位(m)\n * ori:基座标系下的姿态(w, x, y, z)\n * user_coord:用户坐标系\n * user_coord = {'coord_type': 2,\n * 'calibrate_method': 0,\n * 'calibrate_points':\n * {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n * 'tool_desc':\n * {\"pos\": (0.0, 0.0, 0.0),\n * \"ori\": (1.0, 0.0, 0.0, 0.0)}\n * }\n * user_tool用户工具描述\n * user_tool={\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * OUTPUTS:\n * RETURNS: 成功返回: 返回位置和姿态{\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * 失败返回: None\n *\n * NOTES:\n \"\"\"\n return libpy3auboi5.base_to_user(self.rshd, pos, ori, user_coord, user_tool)\n\n def user_to_base(self, pos, ori, user_coord, user_tool):\n \"\"\"\n * FUNCTION: user_to_base\n * DESCRIPTION: 用户坐标系转基座标系\n * INPUTS: pos:用户标系下的位置(x, y, z)单位(m)\n * ori:用户标系下的姿态(w, x, y, z)\n * user_coord:用户坐标系\n * user_coord = {'coord_type': 2,\n * 'calibrate_method': 0,\n * 'calibrate_points':\n * {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n * \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n * 'tool_desc':\n * {\"pos\": (0.0, 0.0, 0.0),\n * \"ori\": (1.0, 0.0, 0.0, 0.0)}\n * }\n * user_tool用户工具描述\n * user_tool={\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * OUTPUTS:\n * RETURNS: 成功返回: 返回位置和姿态{\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * 失败返回: None\n *\n * NOTES:\n \"\"\"\n return libpy3auboi5.user_to_base(self.rshd, pos, ori, user_coord, user_tool)\n\n def base_to_base_additional_tool(self, flange_pos, flange_ori, user_tool):\n \"\"\"\n * FUNCTION: base_to_base_additional_tool\n * DESCRIPTION: 基坐标系转基座标得到工具末端点的位置和姿态\n * INPUTS: pos:基于基座标系的法兰盘中心位置信息(x, y, z)单位(m)\n * ori:基于基座标系的姿态信息(w, x, y, z)\n * user_tool用户工具描述\n * user_tool={\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * OUTPUTS:\n * RETURNS: 成功返回: 返回基于基座标系的工具末端位置位置和姿态信息{\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * 失败返回: None\n *\n * NOTES:\n \"\"\"\n return libpy3auboi5.base_to_base_additional_tool(self.rshd, flange_pos, flange_ori, user_tool)\n\n def rpy_to_quaternion(self, rpy):\n \"\"\"\n * FUNCTION: rpy_to_quaternion\n * DESCRIPTION: 欧拉角转四元数\n * INPUTS: rpy:欧拉角(rx, ry, rz),单位(m)\n * OUTPUTS:\n * RETURNS: 成功返回: 四元数结果,结果为详见NOTES\n * 失败返回: None\n *\n * NOTES: 四元素(w, x, y, z)\n \"\"\"\n if self.rshd >= 0:\n return libpy3auboi5.rpy_to_quaternion(self.rshd, rpy)\n else:\n logger.warn(\"RSHD uninitialized !!!\")\n return None\n\n def quaternion_to_rpy(self, ori):\n \"\"\"\n * FUNCTION: quaternion_to_rpy\n * DESCRIPTION: 四元数转欧拉角\n * INPUTS: 四元数(w, x, y, z)\n * OUTPUTS:\n * RETURNS: 成功返回: 欧拉角结果,结果为详见NOTES\n * 失败返回: None\n *\n * NOTES: rpy:欧拉角(rx, ry, rz),单位(m)\n \"\"\"\n if self.rshd >= 0:\n return libpy3auboi5.quaternion_to_rpy(self.rshd, ori)\n else:\n logger.warn(\"RSHD uninitialized !!!\")\n return None\n\n def set_tool_end_param(self, tool_end_param):\n \"\"\"\n * FUNCTION: set_tool_end_param\n * DESCRIPTION: 设置末端工具参数\n * INPUTS: 末端工具参数: tool_end_param={\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_tool_end_param(self.rshd, tool_end_param)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_none_tool_dynamics_param(self):\n \"\"\"\n * FUNCTION: set_none_tool_dynamics_param\n * DESCRIPTION: 设置无工具的动力学参数\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_none_tool_dynamics_param(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_tool_dynamics_param(self, tool_dynamics):\n \"\"\"\n * FUNCTION: set_tool_end_param\n * DESCRIPTION: 设置工具的动力学参数\n * INPUTS: tool_dynamics:运动学参数\n * tool_dynamics = 位置,单位(m) :{\"position\": (0.0, 0.0, 0.0),\n * 负载,单位(kg): \"payload\": 1.0,\n * 惯量: \"inertia\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)}\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_tool_dynamics_param(self.rshd, tool_dynamics)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_tool_dynamics_param(self):\n \"\"\"\n * FUNCTION: get_tool_dynamics_param\n * DESCRIPTION: 获取末端工具参数\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 运动学参数\n * tool_dynamics = 位置,单位(m) :{\"position\": (0.0, 0.0, 0.0),\n * 负载,单位(kg): \"payload\": 1.0,\n * 惯量: \"inertia\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)}\n *\n * 失败返回: None\n *\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_tool_dynamics_param(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_none_tool_kinematics_param(self):\n \"\"\"\n * FUNCTION: set_none_tool_kinematics_param\n * DESCRIPTION: 设置无工具运动学参数 \n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_none_tool_kinematics_param(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_tool_kinematics_param(self, tool_end_param):\n \"\"\"\n * FUNCTION: set_tool_kinematics_param\n * DESCRIPTION: 设置工具的运动学参数 \n * INPUTS: 末端工具参数: tool_end_param={\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n *\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_tool_kinematics_param(self.rshd, tool_end_param)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_tool_kinematics_param(self):\n \"\"\"\n * FUNCTION: set_tool_kinematics_param\n * DESCRIPTION: 设置工具的运动学参数 \n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 工具的运动学参数\n * tool_end_param={\"pos\": (x, y, z), \"ori\": (w, x, y, z)}\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_tool_kinematics_param(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def move_stop(self):\n \"\"\"\n * FUNCTION: move_stop\n * DESCRIPTION: 停止机械臂运动\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.move_stop(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def move_pause(self):\n \"\"\"\n * FUNCTION: move_pause\n * DESCRIPTION: 暂停机械臂运动\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.move_pause(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def move_continue(self):\n \"\"\"\n * FUNCTION: move_continue\n * DESCRIPTION: 暂停后回复机械臂运动\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.move_continue(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def collision_recover(self):\n \"\"\"\n * FUNCTION: collision_recover\n * DESCRIPTION: 机械臂碰撞后恢复\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.collision_recover(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_robot_state(self):\n \"\"\"\n * FUNCTION: get_robot_state\n * DESCRIPTION: 获取机械臂当前状态\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 机械臂当前状态\n * 机械臂当前停止:RobotStatus.Stopped\n * 机械臂当前运行:RobotStatus.Running\n * 机械臂当前暂停:RobotStatus.Paused\n * 机械臂当前恢复:RobotStatus.Resumed\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_robot_state(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_work_mode(self, mode=0):\n \"\"\"\n * FUNCTION: set_work_mode\n * DESCRIPTION: 设置机械臂服务器工作模式\n * INPUTS: mode:服务器工作模式\n * 机械臂仿真模式:RobotRunningMode.RobotModeSimulator\n * 机械臂真实模式:RobotRunningMode.RobotModeReal\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_work_mode(self.rshd, mode)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n def get_work_mode(self):\n \"\"\"\n * FUNCTION: set_work_mode\n * DESCRIPTION: 获取机械臂服务器当前工作模式\n * INPUTS: mode:服务器工作模式\n * 机械臂仿真模式:RobotRunningMode.RobotModeSimulator\n * 机械臂真实模式:RobotRunningMode.RobotModeReal\n * OUTPUTS:\n * RETURNS: 成功返回: 服务器工作模式\n * 机械臂仿真模式:RobotRunningMode.RobotModeSimulator\n * 机械臂真实模式:RobotRunningMode.RobotModeReal\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_work_mode(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_collision_class(self, grade=6):\n \"\"\"\n * FUNCTION: set_collision_class\n * DESCRIPTION: 设置机械臂碰撞等级\n * INPUTS: grade碰撞等级:碰撞等级 范围(0~10)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_collision_class(self.rshd, grade)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n def is_have_real_robot(self):\n \"\"\"\n * FUNCTION: is_have_real_robot\n * DESCRIPTION: 获取当前是否已经链接真实机械臂\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 1:存在 0:不存在\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.is_have_real_robot(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def is_online_mode(self):\n \"\"\"\n * FUNCTION: is_online_mode\n * DESCRIPTION: 当前机械臂是否运行在联机模式\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 1:在 0:不在\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.is_online_mode(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def is_online_master_mode(self):\n \"\"\"\n * FUNCTION: is_online_master_mode\n * DESCRIPTION: 当前机械臂是否运行在联机主模式\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 1:主模式 0:从模式\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.is_online_master_mode(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_joint_status(self):\n \"\"\"\n * FUNCTION: get_joint_status\n * DESCRIPTION: 获取机械臂当前状态信息\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 返回六个关节状态,包括:电流,电压,温度\n * {'joint1': {'current': 电流(毫安), 'voltage': 电压(伏特), 'temperature': 温度(摄氏度)},\n * 'joint2': {'current': 0, 'voltage': 0.0, 'temperature': 0},\n * 'joint3': {'current': 0, 'voltage': 0.0, 'temperature': 0},\n * 'joint4': {'current': 0, 'voltage': 0.0, 'temperature': 0},\n * 'joint5': {'current': 0, 'voltage': 0.0, 'temperature': 0},\n * 'joint6': {'current': 0, 'voltage': 0.0, 'temperature': 0}}\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_joint_status(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_current_waypoint(self):\n \"\"\"\n * FUNCTION: get_current_waypoint\n * DESCRIPTION: 获取机械臂当前位置信息\n * INPUTS: grade碰撞等级:碰撞等级 范围(0~10)\n * OUTPUTS:\n * RETURNS: 成功返回: 关节位置信息,结果为详见NOTES\n * 失败返回: None\n *\n * NOTES: 六个关节角 {'joint': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n * 位置 'pos': [-0.06403157614989634, -0.4185973810159096, 0.816883228463401],\n * 姿态 'ori': [-0.11863209307193756, 0.3820514380931854, 0.0, 0.9164950251579285]}\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_current_waypoint(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_diagnosis_info(self):\n \"\"\"\n * FUNCTION: get_diagnosis_info\n * DESCRIPTION: 获取机械臂当前位置信息\n * INPUTS: grade碰撞等级:碰撞等级 范围(0~10)\n * OUTPUTS:\n * RETURNS: 成功返回: 关节位置信息,结果为详见NOTES\n * 失败返回: None\n *\n * NOTES: 六个关节角 {'joint': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n * 位置 'pos': [-0.06403157614989634, -0.4185973810159096, 0.816883228463401],\n * 姿态 'ori': [-0.11863209307193756, 0.3820514380931854, 0.0, 0.9164950251579285]}\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_diagnosis_info(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_board_io_config(self, io_type=RobotIOType.User_DO):\n \"\"\"\n * FUNCTION: get_board_io_config\n * DESCRIPTION:\n * INPUTS: io_type:IO类型:RobotIOType\n * OUTPUTS:\n * RETURNS: 成功返回: IO配置\n * [{\"id\": ID\n * \"name\": \"IO名字\"\n * \"addr\": IO地址\n * \"type\": IO类型\n * \"value\": IO当前值},]\n *\n * 失败返回: None\n * NOTES: RobotIOType详见class RobotIOType\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_board_io_config(self.rshd, io_type)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_board_io_status(self, io_type, io_name):\n \"\"\"\n * FUNCTION: get_board_io_status\n * DESCRIPTION: 获取IO状态\n * INPUTS: io_type:类型\n * io_name:名称 RobotUserIoName.user_dx_xx\n * OUTPUTS:\n * RETURNS: 成功返回: IO状态 double数值(数字IO,返回0或1,模拟IO返回浮点数)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_board_io_status(self.rshd, io_type, io_name)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_board_io_status(self, io_type, io_name, io_value):\n \"\"\"\n * FUNCTION: set_board_io_status\n * DESCRIPTION: 设置IO状态\n * INPUTS: io_type:类型\n * io_name:名称 RobotUserIoName.user_dx_xx\n * io_value:状态数值(数字IO,返回0或1,模拟IO返回浮点数)\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_board_io_status(self.rshd, io_type, io_name, io_value)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n def set_tool_power_type(self, power_type=RobotToolPowerType.OUT_0V):\n \"\"\"\n * FUNCTION: set_tool_power_type\n * DESCRIPTION: 设置工具端电源类型\n * INPUTS: power_type:电源类型\n * RobotToolPowerType.OUT_0V\n * RobotToolPowerType.OUT_12V\n * RobotToolPowerType.OUT_24V\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_tool_power_type(self.rshd, power_type)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n def get_tool_power_type(self):\n \"\"\"\n * FUNCTION: get_tool_power_type\n * DESCRIPTION: 获取工具端电源类型\n * INPUTS: power_type:电源类型\n\n * OUTPUTS:\n * RETURNS: 成功返回: 电源类型,包括如下:\n * RobotToolPowerType.OUT_0V\n * RobotToolPowerType.OUT_12V\n * RobotToolPowerType.OUT_24V\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_tool_power_type(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_tool_io_type(self, io_addr=RobotToolIoAddr.TOOL_DIGITAL_IO_0,\n io_type=RobotToolDigitalIoDir.IO_OUT):\n \"\"\"\n * FUNCTION: set_tool_io_type\n * DESCRIPTION: 设置工具端数字IO类型\n * INPUTS: io_addr:工具端IO地址 详见class RobotToolIoAddr\n * io_type:工具端IO类型 详见class RobotToolDigitalIoDir\n\n * OUTPUTS:\n * RETURNS: 成功返回: IO类型,包括如下:\n * RobotToolDigitalIoDir.IO_IN\n * RobotToolDigitalIoDir.IO_OUT\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_tool_io_type(self.rshd, io_addr, io_type)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n def get_tool_power_voltage(self):\n \"\"\"\n * FUNCTION: get_tool_power_voltage\n * DESCRIPTION: 获取工具端电压数值\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 返回电压数值,单位(伏特)\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_tool_power_voltage(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def get_tool_io_status(self, io_name):\n \"\"\"\n * FUNCTION: get_tool_io_status\n * DESCRIPTION: 获取工具端IO状态\n * INPUTS: io_name:IO名称\n\n * OUTPUTS:\n * RETURNS: 成功返回: 返回工具端IO状态\n *\n * 失败返回: None\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_tool_io_status(self.rshd, io_name)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_tool_io_status(self, io_name, io_status):\n \"\"\"\n * FUNCTION: set_tool_io_status\n * DESCRIPTION: 设置工具端IO状态\n * INPUTS: io_name:工具端IO名称\n * io_status:工具端IO状态: 取值范围(0或1)\n\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n #self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_tool_do_status(self.rshd, io_name, io_status)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n def startup_excit_traj_track(self, track_file='', track_type=0, subtype=0):\n \"\"\"\n * FUNCTION: startup_excit_traj_track\n * DESCRIPTION: 通知服务器启动辨识轨迹运动\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.startup_excit_traj_track(self.rshd, track_file, track_type, subtype)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_NotLogin\n\n def get_dynidentify_results(self):\n \"\"\"\n * FUNCTION: get_dynidentify_results\n * DESCRIPTION: 获取辨识结果\n * INPUTS:\n * OUTPUTS:\n * RETURNS: 成功返回: 辨识结果数组\n * 失败返回: None\n * NOTES:\n \"\"\"\n self.check_event()\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.get_dynidentify_results(self.rshd)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return None\n\n def set_robot_event_callback(self, callback):\n \"\"\"\n * FUNCTION: set_robot_event_callback\n * DESCRIPTION: 设置机械臂事件回调函数\n * INPUTS: callback:回调函数名称\n * OUTPUTS:\n * RETURNS: 成功返回: RobotError.RobotError_SUCC\n * 失败返回: 其他\n * NOTES:\n \"\"\"\n if self.rshd >= 0 and self.connected:\n return libpy3auboi5.set_robot_event_callback(self.rshd, callback)\n else:\n logger.warn(\"RSHD uninitialized or not login!!!\")\n return RobotErrorType.RobotError_LOGIN_FAILED\n\n\n# 测试函数\ndef test(test_count):\n # 初始化logger\n logger_init()\n\n # 启动测试\n logger.info(\"{0} test beginning...\".format(Auboi5Robot.get_local_time()))\n\n # 系统初始化\n Auboi5Robot.initialize()\n\n # 创建机械臂控制类\n robot = Auboi5Robot()\n\n # 创建上下文\n handle = robot.create_context()\n\n # 打印上下文\n logger.info(\"robot.rshd={0}\".format(handle))\n\n try:\n\n # 链接服务器\n #ip = 'localhost'\n ip = '192.168.0.100'\n\n port = 8899\n result = robot.connect(ip, port)\n\n if result != RobotErrorType.RobotError_SUCC:\n logger.info(\"connect server{0}:{1} failed.\".format(ip, port))\n else:\n # # 重新上电\n #robot.robot_shutdown()\n #\n # # 上电\n robot.robot_startup()\n #\n # # 设置碰撞等级\n robot.set_collision_class(7)\n\n # 设置工具端电源为12v\n # robot.set_tool_power_type(RobotToolPowerType.OUT_12V)\n\n # 设置工具端IO_0为输出\n #robot.set_tool_io_type(RobotToolIoAddr.TOOL_DIGITAL_IO_0, RobotToolDigitalIoDir.IO_OUT)\n\n # 获取工具端IO_0当前状态\n #tool_io_status = robot.get_tool_io_status(RobotToolIoName.tool_io_0)\n #logger.info(\"tool_io_0={0}\".format(tool_io_status))\n\n # 设置工具端IO_0状态\n #robot.set_tool_io_status(RobotToolIoName.tool_io_0, 1)\n\n\n # 获取控制柜用户DO\n #io_config = robot.get_board_io_config(RobotIOType.User_DO)\n\n # 输出DO配置\n #logger.info(io_config)\n\n # 当前机械臂是否运行在联机模式\n #logger.info(\"robot online mode is {0}\".format(robot.is_online_mode()))\n\n # 循环测试\n while test_count > 0:\n test_count -= 1\n\n joint_status = robot.get_joint_status()\n logger.info(\"joint_status={0}\".format(joint_status))\n\n # 初始化全局配置文件\n robot.init_profile()\n\n # 设置关节最大加速度\n robot.set_joint_maxacc((1.5, 1.5, 1.5, 1.5, 1.5, 1.5))\n\n # 设置关节最大加速度\n robot.set_joint_maxvelc((1.5, 1.5, 1.5, 1.5, 1.5, 1.5))\n\n joint_radian = (0.541678, 0.225068, -0.948709, 0.397018, -1.570800, 0.541673)\n logger.info(\"move joint to {0}\".format(joint_radian))\n logger.info(\"1111111111111111111111111111111111111111111111111111111\")\n\n robot.move_joint(joint_radian)\n logger.info(\"222222222222222222222222222222222222222222222222222222222\")\n\n\n # 获取关节最大加速度\n logger.info(robot.get_joint_maxacc())\n\n # 正解测试\n fk_ret = robot.forward_kin((-0.000003, -0.127267, -1.321122, 0.376934, -1.570796, -0.000008))\n logger.info(fk_ret)\n\n # 逆解\n joint_radian = (0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)\n ik_result = robot.inverse_kin(joint_radian, fk_ret['pos'], fk_ret['ori'])\n logger.info(ik_result)\n\n # 轴动1\n joint_radian = (0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)\n logger.info(\"move joint to {0}\".format(joint_radian))\n robot.move_joint(joint_radian)\n\n # 轴动2\n joint_radian = (0.541678, 0.225068, -0.948709, 0.397018, -1.570800, 0.541673)\n logger.info(\"move joint to {0}\".format(joint_radian))\n robot.move_joint(joint_radian)\n\n # 轴动3\n joint_radian = (-0.000003, -0.127267, -1.321122, 0.376934, -1.570796, -0.000008)\n logger.info(\"move joint to {0}\".format(joint_radian))\n robot.move_joint(joint_radian)\n\n # 设置机械臂末端最大线加速度(m/s)\n robot.set_end_max_line_acc(0.5)\n\n # 获取机械臂末端最大线加速度(m/s)\n robot.set_end_max_line_velc(0.2)\n\n # 清除所有已经设置的全局路点\n robot.remove_all_waypoint()\n\n # 添加全局路点1,用于轨迹运动\n joint_radian = (-0.000003, -0.127267, -1.321122, 0.376934, -1.570796, -0.000008)\n robot.add_waypoint(joint_radian)\n\n # 添加全局路点2,用于轨迹运动\n joint_radian = (-0.211675, -0.325189, -1.466753, 0.429232, -1.570794, -0.211680)\n robot.add_waypoint(joint_radian)\n\n # 添加全局路点3,用于轨迹运动\n joint_radian = (-0.037186, -0.224307, -1.398285, 0.396819, -1.570796, -0.037191)\n robot.add_waypoint(joint_radian)\n\n # 设置圆运动圈数\n robot.set_circular_loop_times(3)\n\n # 圆弧运动\n logger.info(\"move_track ARC_CIR\")\n robot.move_track(RobotMoveTrackType.ARC_CIR)\n\n # 清除所有已经设置的全局路点\n robot.remove_all_waypoint()\n\n # 机械臂轴动 回到0位\n joint_radian = (0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000)\n logger.info(\"move joint to {0}\".format(joint_radian))\n robot.move_joint(joint_radian)\n\n # 断开服务器链接\n robot.disconnect()\n\n except RobotError as e:\n logger.error(\"{0} robot Event:{1}\".format(robot.get_local_time(), e))\n\n\n finally:\n # 断开服务器链接\n if robot.connected:\n # 关闭机械臂\n robot.robot_shutdown()\n # 断开机械臂链接\n robot.disconnect()\n # 释放库资源\n Auboi5Robot.uninitialize()\n logger.info(\"{0} test completed.\".format(Auboi5Robot.get_local_time()))\n\n\ndef step_test():\n # 初始化logger\n logger_init()\n\n # 启动测试\n logger.info(\"{0} test beginning...\".format(Auboi5Robot.get_local_time()))\n\n # 系统初始化\n Auboi5Robot.initialize()\n\n # 创建机械臂控制类\n robot = Auboi5Robot()\n\n # 创建上下文\n handle = robot.create_context()\n\n # 打印上下文\n logger.info(\"robot.rshd={0}\".format(handle))\n\n try:\n\n # 链接服务器\n ip = 'localhost'\n port = 8899\n result = robot.connect(ip, port)\n\n if result != RobotErrorType.RobotError_SUCC:\n logger.info(\"connect server{0}:{1} failed.\".format(ip, port))\n else:\n # 重新上电\n robot.robot_shutdown()\n\n # 上电\n robot.robot_startup()\n\n # 设置碰撞等级\n robot.set_collision_class(7)\n\n # # 初始化全局配置文件\n # robot.init_profile()\n #\n # # logger.info(robot.get_board_io_config(RobotIOType.User_DI))\n #\n # # 获取当前位置\n # logger.info(robot.get_current_waypoint())\n #\n # joint_radian = (0, 0, 0, 0, 0, 0)\n # # 轴动到初始位置\n # robot.move_joint(joint_radian)\n #\n # # 沿Z轴运动0.1毫米\n # current_pos = robot.get_current_waypoint()\n #\n # current_pos['pos'][2] -= 0.001\n #\n # ik_result = robot.inverse_kin(current_pos['joint'], current_pos['pos'], current_pos['ori'])\n # logger.info(ik_result)\n #\n # # joint_radian = (0.541678, 0.225068, -0.948709, 0.397018, -1.570800, 0.541673)\n # # logger.info(\"move joint to {0}\".format(joint_radian))\n # # robot.move_joint(joint_radian)\n #\n # robot.move_line(ik_result['joint'])\n\n # 断开服务器链接\n robot.disconnect()\n\n except RobotError as e:\n logger.error(\"robot Event:{0}\".format(e))\n\n finally:\n # 断开服务器链接\n if robot.connected:\n # 断开机械臂链接\n robot.disconnect()\n # 释放库资源\n Auboi5Robot.uninitialize()\n logger.info(\"{0} test completed.\".format(Auboi5Robot.get_local_time()))\n\n\ndef excit_traj_track_test():\n # 初始化logger\n logger_init()\n\n # 启动测试\n logger.info(\"{0} test beginning...\".format(Auboi5Robot.get_local_time()))\n\n # 系统初始化\n Auboi5Robot.initialize()\n\n # 创建机械臂控制类\n robot = Auboi5Robot()\n\n # 创建上下文\n handle = robot.create_context()\n\n # 打印上下文\n logger.info(\"robot.rshd={0}\".format(handle))\n\n try:\n\n # 链接服务器\n ip = 'localhost'\n port = 8899\n result = robot.connect(ip, port)\n\n if result != RobotErrorType.RobotError_SUCC:\n logger.info(\"connect server{0}:{1} failed.\".format(ip, port))\n else:\n\n # 重新上电\n # robot.robot_shutdown()\n\n # 上电\n # robot.robot_startup()\n\n # 设置碰撞等级\n # robot.set_collision_class(7)\n\n joint_radian = (0, 0, 0, 0, 0, 0)\n # 轴动到初始位置\n robot.move_joint(joint_radian)\n\n logger.info(\"starup excit traj track....\")\n\n # 启动辨识轨迹\n #robot.startup_excit_traj_track(\"dynamics_exciting_trajectories/excitTraj1.offt\", 1, 0)\n\n # 延时两秒等待辨识结果\n #time.sleep(5)\n\n # 获取辨识结果\n dynidentify_ret = robot.get_dynidentify_results()\n logger.info(\"dynidentify result={0}\".format(dynidentify_ret))\n for i in range(0,54):\n\t dynidentify_ret[i] = dynidentify_ret[i]/1024.0\n logger.info(\"dynidentify result={0}\".format(dynidentify_ret))\n\n # 断开服务器链接\n robot.disconnect()\n\n except RobotError as e:\n logger.error(\"robot Event:{0}\".format(e))\n\n\n finally:\n # 断开服务器链接\n if robot.connected:\n # 断开机械臂链接\n robot.disconnect()\n # 释放库资源\n Auboi5Robot.uninitialize()\n\n\ndef move_rotate_test():\n # 初始化logger\n logger_init()\n\n # 启动测试\n logger.info(\"{0} test beginning...\".format(Auboi5Robot.get_local_time()))\n\n # 系统初始化\n Auboi5Robot.initialize()\n\n # 创建机械臂控制类\n robot = Auboi5Robot()\n\n # 创建上下文\n handle = robot.create_context()\n\n # 打印上下文\n logger.info(\"robot.rshd={0}\".format(handle))\n\n try:\n\n # 链接服务器\n ip = 'localhost'\n port = 8899\n result = robot.connect(ip, port)\n\n if result != RobotErrorType.RobotError_SUCC:\n logger.info(\"connect server{0}:{1} failed.\".format(ip, port))\n else:\n\n # 重新上电\n # robot.robot_shutdown()\n\n # 上电\n # robot.robot_startup()\n\n # 设置碰撞等级\n # robot.set_collision_class(7)\n\n # joint_radian = (1, 0, 0, 0, 0, 0)\n # # 轴动到初始位置\n # robot.move_joint(joint_radian)\n\n joint_radian = (0.541678, 0.225068, -0.948709, 0.397018, -1.570800, 0.541673)\n logger.info(\"move joint to {0}\".format(joint_radian))\n robot.move_joint(joint_radian)\n\n # 获取当前位置\n current_pos = robot.get_current_waypoint()\n\n # 工具转轴的向量(相对于法兰盘,这样需要测量得到x,y,z本测试样例默认以x=0,y=0,z轴为0.1米)\n tool_pos_on_end = (0, 0, 0.10)\n\n # 工具姿态(w,x,y,z 相对于法兰盘,不知道的情况下,默认填写如下信息)\n tool_ori_on_end = (1, 0, 0, 0)\n\n tool_desc = {\"pos\": tool_pos_on_end, \"ori\": tool_ori_on_end}\n\n # 得到法兰盘工具末端点相对于基座坐标系中的位置\n tool_pos_on_base = robot.base_to_base_additional_tool(current_pos['pos'],\n current_pos['ori'],\n tool_desc)\n\n logger.info(\"current_pos={0}\".format(current_pos['pos'][0]))\n\n logger.info(\"tool_pos_on_base={0}\".format(tool_pos_on_base['pos'][0]))\n\n # 讲工具转轴向量平移到基座坐标系下(旋转方向符合右手准则)\n rotate_axis = map(lambda a, b: a - b, tool_pos_on_base['pos'], current_pos['pos'])\n\n logger.info(\"rotate_axis={0}\".format(rotate_axis))\n\n # 坐标系默认使用基座坐标系(默认填写下面的值就可以了)\n user_coord = {'coord_type': RobotCoordType.Robot_Base_Coordinate,\n 'calibrate_method': 0,\n 'calibrate_points':\n {\"point1\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n \"point2\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),\n \"point3\": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)},\n 'tool_desc':\n {\"pos\": (0.0, 0.0, 0.0),\n \"ori\": (1.0, 0.0, 0.0, 0.0)}\n }\n\n # 调用转轴旋转接口,最后一个参数为旋转角度(弧度)\n robot.move_rotate(user_coord, rotate_axis, 1)\n\n # 断开服务器链接\n robot.disconnect()\n\n except RobotError as e:\n logger.error(\"robot Event:{0}\".format(e))\n\n finally:\n # 断开服务器链接\n if robot.connected:\n # 断开机械臂链接\n robot.disconnect()\n # 释放库资源\n Auboi5Robot.uninitialize()\n\n\ndef test_rsm():\n # 初始化logger\n logger_init()\n\n # 启动测试\n logger.info(\"{0} test beginning...\".format(Auboi5Robot.get_local_time()))\n\n # 系统初始化\n Auboi5Robot.initialize()\n\n # 创建机械臂控制类\n robot = Auboi5Robot()\n\n # 创建上下文\n handle = robot.create_context()\n\n # 打印上下文\n logger.info(\"robot.rshd={0}\".format(handle))\n\n print(\"---------------------robot control----------------------\")\n\n try:\n\n # 链接服务器\n ip = 'localhost'\n port = 8899\n result = robot.connect(ip, port)\n\n robot.enable_robot_event()\n\n if result != RobotErrorType.RobotError_SUCC:\n logger.info(\"connect server{0}:{1} failed.\".format(ip, port))\n else:\n\n # robot.move_pause()\n\n #joint_radian = (0, 0, 0, 0, 0, 0)\n # 轴动到初始位置\n #robot.move_joint(joint_radian)\n\n while True:\n time.sleep(0.05)\n\n # 断开服务器链接\n robot.disconnect()\n\n except RobotError as e:\n logger.error(\"robot Event:{0}\".format(e))\n\n\n finally:\n # 断开服务器链接\n if robot.connected:\n # 断开机械臂链接\n robot.disconnect()\n # 释放库资源\n Auboi5Robot.uninitialize()\n\nif __name__ == '__main__':\n test(10)\n #test_rsm()\n logger.info(\"test completed\")\n","sub_path":"deepclaw/driver/arms/aubo/robotcontrol.py","file_name":"robotcontrol.py","file_ext":"py","file_size_in_byte":96990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645557189","text":"import numpy as np\nimport keras\n\n\"\"\"\n\n\"\"\"\n\n\nclass DataGenerator(keras.utils.Sequence):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, input_indices: np.ndarray, input_shape: np.ndarray, samples_path: list, labels_path: list,\n batch_size: int = 32, n_classes=2, shuffle=True, signals_in: int = 1, op_mode: str = 'point',\n label: str = 'binary', signal_type: int = 0):\n\n \"\"\"\n\n :param input_indices:\n :param input_shape:\n :param samples_path:\n :param labels_path:\n :param batch_size:\n :param n_classes:\n :param shuffle:\n :param signals_in:\n :param op_mode:\n :param label:\n :param signal_type:\n \"\"\"\n\n # Save direct paths for the signals\n self.signals_in = signals_in\n\n if self.signals_in == 1:\n self.ecg_path = samples_path[0]\n\n elif self.signals_in == 2:\n self.ecg_path = samples_path[0]\n self.r_peaks_path = samples_path[1]\n\n elif self.signals_in == 3:\n self.ecg_path = samples_path[0]\n self.r_peaks_path = samples_path[1]\n self.entropy_path = samples_path[2]\n\n # Save direct paths for the lables\n self.dont_care_path = labels_path[0]\n self.nsr_beats_path = labels_path[1]\n self.af_predict_path = labels_path[2]\n self.af_beats_path = labels_path[3]\n\n self.signal_type = signal_type\n self.label = label\n self.samples_path = samples_path\n self.labels_path = labels_path\n self.input_shape = input_shape\n self.batch_size = batch_size\n self.input_indices = input_indices\n self.n_classes = n_classes\n self.shuffle = shuffle\n self.indexes = 0\n self.op_mode = op_mode\n self.on_epoch_end()\n\n def __len__(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n return int(np.floor(len(self.input_indices) / self.batch_size))\n\n def __getitem__(self, index):\n \"\"\"\n\n :param index:\n :return:\n \"\"\"\n\n # Generate indexes of the batch\n indexes = self.indexes[(index * self.batch_size):((index + 1) * self.batch_size)]\n\n # Find list of indices\n list_indices_temp = [self.input_indices[k] for k in indexes]\n\n # Generate data\n x, y = self.__data_generation(list_indices_temp)\n\n return x, y\n\n def on_epoch_end(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n self.indexes = np.arange(len(self.input_indices))\n\n if self.shuffle:\n np.random.shuffle(self.indexes)\n\n def __data_generation(self, list_indices_temp):\n \"\"\"\n\n :param list_indices_temp:\n :return:\n \"\"\"\n\n # x : (n_samples, *dim, n_channels)\n # Initialization\n x = np.empty((self.batch_size, *self.input_shape))\n\n if self.op_mode == 'segment':\n if self.label == 'binary':\n y = np.empty((self.batch_size, 1, 1), dtype=int)\n else:\n y = np.empty((self.batch_size, 1, 4), dtype=int)\n\n else:\n if self.label == 'binary':\n y = np.empty((self.batch_size, *self.input_shape), dtype=int)\n\n else:\n y = np.empty((self.batch_size, self.input_shape[0], 4), dtype=int)\n\n # Generate data\n max_ind = np.max(list_indices_temp) + self.input_shape[0]\n min_ind = np.min(list_indices_temp)\n\n ecg = np.load(self.ecg_path)[min_ind:max_ind]\n\n if self.signals_in == 2:\n r_peaks = np.load(self.r_peaks_path)[min_ind:max_ind]\n\n elif self.signals_in == 3:\n r_peaks = np.load(self.r_peaks_path)[min_ind:max_ind]\n entropy = np.load(self.entropy_path)[min_ind:max_ind]\n\n for i, ind in enumerate(list_indices_temp):\n # Store sample\n # if self.signals_in == 1:\n # x[i, :] = np.load(self.ecg_path)[ind:(ind + self.input_shape[0])]\n #\n # elif self.signals_in == 2:\n # x[i, :] = np.array([ecg[ind:(ind + self.input_shape[0])], r_peaks[ind:(ind + self.input_shape[0])]]).T\n #\n # elif self.signals_in == 3:\n # x[i, :] = np.array([ecg[ind:(ind + self.input_shape[0])], r_peaks[ind:(ind + self.input_shape[0])],\n # entropy[ind:(ind + self.input_shape[0])]]).T\n\n # Normalize index\n ind -= min_ind\n if self.signal_type == 0:\n x[i, :] = ecg[ind:(ind + self.input_shape[0])]\n\n elif self.signal_type == 1:\n x[i, :] = r_peaks[ind:(ind + self.input_shape[0])]\n\n else:\n x[i, :] = entropy[ind:(ind + self.input_shape[0])]\n\n del ecg\n\n if self.label == 'catagorical':\n dont_care = np.load(self.dont_care_path)[min_ind:max_ind]\n norm_sin = np.load(self.nsr_beats_path)[min_ind:max_ind]\n af_predict = np.load(self.af_predict_path)[min_ind:max_ind]\n af_beat = np.load(self.af_beats_path)[min_ind:max_ind]\n\n else:\n af_predict = np.load(self.af_predict_path)[min_ind:max_ind]\n\n for i, ind in enumerate(list_indices_temp):\n # Load labels\n # Normalize index\n ind -= min_ind\n if self.label == 'catagorical':\n dc = dont_care[ind:(ind + self.input_shape[0])]\n nsr = norm_sin[ind:(ind + self.input_shape[0])]\n afp = af_predict[ind:(ind + self.input_shape[0])]\n afb = af_beat[ind:(ind + self.input_shape[0])]\n\n if self.op_mode == 'segment':\n tmp_y = np.array([dc, nsr, afp, afb]).T\n\n labels_sum = np.sum(tmp_y, axis=0)\n label = np.argmax(labels_sum)\n\n if label.size > 1:\n label = label[0]\n\n y_place_holder = np.zeros([1, 4])\n y_place_holder[0, label] = 1\n y[i, :] = y_place_holder\n\n else:\n y[i, :] = np.array([dc, nsr, afp, afb]).T\n\n else:\n if self.op_mode == 'segment':\n tmp_y = af_predict[ind:(ind + self.input_shape[0])]\n\n labels_sum = np.mean(tmp_y, axis=0)\n\n if labels_sum > 0.5:\n label = 1\n\n else:\n label = 0\n\n y[i, 0, 0] = label\n\n else:\n y[i, :] = af_predict[ind:(ind + self.input_shape[0]), :]\n\n return x, y\n","sub_path":"ML/Utilities/SignalsGenerator.py","file_name":"SignalsGenerator.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143424081","text":"import numpy as np\n\n\n\ndef remove_line(img_bw, axis=0, tickness=0):\n \"\"\" Remove the central line from a properly formatted figure. The image\n should have only black and white pixel, the line is expected to be white\n with a black background. If 'axis' == 0, it removes vertical lines, if\n 'axis' == 1, it removes orizontal lines (if any). All the pixels whose\n distance from the line is less or equal then 'tickness' are removed as well.\n \"\"\"\n #percentage of active pixel over the whom the column is considered as full\n threshold = 0.5*img_bw.shape[axis]\n # indices of columns to be cleaned\n ind = [i for i in range(img_bw.shape[1-axis]) if np.sum(img_bw[:,i])>threshold]\n # increases the tickness of the line\n for i in ind:\n for n_pix in range(tickness):\n ind.append(i+n_pix)\n ind.append(i-n_pix)\n ind = list(set(ind)) #removes duplicates\n img_bw[:, ind] = np.zeros((img_bw.shape[axis], len(ind)))\n return img_bw\n\n\n\ndef select_coord(img_bw):\n \"\"\" Stores the coordinates of all the white points contained\n in an array.\n \"\"\"\n return np.array([[i, j] for i in range(img_bw.shape[0])\n for j in range(img_bw.shape[1]) if img_bw[i,j] == 1])\n\n\n\ndef select_points(img_bw, img_gray):\n \"\"\" Stores the coordinates of all the white points contained\n in an array plus the gray level in the associated gray scale\n image.\n \"\"\"\n return np.array([[i, j, img_gray[i,j]] for i in range(img_bw.shape[0])\n for j in range(img_bw.shape[1]) if img_bw[i,j] == 1])\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n######## Denoising ########\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# def denoisingCV(img_name):\n# ''' Takes as input the name of the file where the image is stored\n# '''\n# import cv2\n#\n# img = cv2.imread(img_name)\n# b,g,r = cv2.split(img) # get b,g,r\n# rgb_img = cv2.merge([r,g,b]) # switch it to rgb\n#\n# dst = cv2.fastNlMeansDenoisingColored(img, None, 5, 5, 21, 50)\n# b,g,r = cv2.split(dst) # get b,g,r\n# rgb_dst = cv2.merge([r,g,b]) # switch it to rgb\n# return rgb_dst\n#\n#\n#\n# def denoisingPCA(img, n):\n# ''' Takes as imput an image and an integer that represents the number\n# of principal components to consider and returns the denoised image.\n# '''\n# from sklearn.decomposition import PCA\n#\n# #img = mpimg.imread(img_name)\n# dim = img.shape\n# #img_r = np.reshape(img, (dim[0], dim[1]*4))\n#\n# img_r = img\n# ipca = PCA(n, svd_solver='randomized').fit(img_r)\n# img_c = ipca.transform(img_r)\n#\n# print('Explained variance ratio: ',np.sum(ipca.explained_variance_ratio_))\n# # To visualize how PCA has performed this compression, let's inverse\n# # transform the PCA output and reshape for visualization using imshow\n# temp = ipca.inverse_transform(img_c)\n# #reshaping to original size\n# temp = np.reshape(temp, dim)\n#\n# return temp\n#\n\n# %%\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n######## Pre-Clustering ########\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# def rgb2gray(rgb):\n# \"\"\" Convert a colored image into a gray scale image\n# \"\"\"\n# return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])\n#\n#\n#\n# def rgb2bw(rgb):\n# \"\"\" Convert a colored image into an image containing only black and white\n# pixels, all the gray shades are removed. Note that the function doesn't\n# work taking in input a grayscale image.\n# \"\"\"\n# bw = rgb2gray(rgb)\n# m = bw.mean()\n# bw[bw >= m] = 1; bw[bw < m] = 0\n# return bw\n#\n\n\n# %%\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n######## Plotting ########\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# def plotdenoised(img1, img2):\n# \"\"\" Plot 2 images side by side\n# \"\"\"\n# fig = plt.figure(figsize=[8,6])\n# ax = fig.add_subplot(121)\n# ax.imshow(img1)\n# ax.axes.get_xaxis().set_visible(False)\n# ax.axes.get_yaxis().set_visible(False)\n# ax = fig.add_subplot(122)\n# ax.imshow(img2)\n# ax.axes.get_xaxis().set_visible(False)\n# ax.axes.get_yaxis().set_visible(False)\n# ax.set_frame_on(False)\n# plt.show()\n\n\n#\n# def plot_noaxbw(img):\n# \"\"\" plot an image without white borders and axis\n# \"\"\"\n# fig = plt.figure(figsize=[6,6])\n# ax = fig.add_subplot(111)\n# ax.imshow(img, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)\n# ax.axes.get_xaxis().set_visible(False)\n# ax.axes.get_yaxis().set_visible(False)\n# ax.set_frame_on(False)\n#\n","sub_path":"denoising.py","file_name":"denoising.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"19413615","text":"from pyimagesearch.shapedetector import ShapeDetector\nfrom pyimagesearch.colordetector import ColorDetector\n\nimport imutils as imutils\nimport keyboard\nimport mouse\nimport numpy as np\nfrom PIL import ImageGrab\nimport cv2\nimport playsound\nimport time\n\nanimals = []\n\n\ndef apply_brightness_contrast(input_img, brightness=0, contrast=0):\n if brightness != 0:\n if brightness > 0:\n shadow = brightness\n highlight = 255\n else:\n shadow = 0\n highlight = 255 + brightness\n alpha_b = (highlight - shadow) / 255\n gamma_b = shadow\n\n buf = cv2.addWeighted(input_img, alpha_b, input_img, 0, gamma_b)\n else:\n buf = input_img.copy()\n\n if contrast != 0:\n f = 131 * (contrast + 127) / (127 * (131 - contrast))\n alpha_c = f\n gamma_c = 127 * (1 - f)\n\n buf = cv2.addWeighted(buf, alpha_c, buf, 0, gamma_c)\n\n return buf\n\n\ndef process_img(image):\n final = apply_brightness_contrast(image, 12, 50)\n\n sd = ShapeDetector()\n cd = ColorDetector()\n # convert to gray\n # processed_img = imutils.resize(image, width=1500)\n # ratio = image.shape[0]/float(processed_img.shape[0])\n processed_img = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)\n processed_img = cv2.equalizeHist(processed_img)\n # processed_img = cv2.GaussianBlur(processed_img, (5, 5), 0)\n\n # cv2.imshow(\"Image\", processed_img)\n # cv2.waitKey(0)\n\n # edge detection\n processed_img = cv2.Canny(processed_img, threshold1=100, threshold2=200)\n # cv2.imshow(\"Image\", processed_img)\n # cv2.waitKey(0)\n #\n # kernel = np.ones((7, 7), np.uint8)\n # processed_img = cv2.dilate(processed_img, kernel, iterations=1)\n # processed_img = cv2.erode(processed_img, kernel, iterations=1)\n #\n\n # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n # thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n\n cnts = cv2.findContours(processed_img.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n game_board = []\n x_coordinates = []\n y_coordinates = []\n lowestY = 999999\n # for c in cnts:\n # # compute the center of the contour\n # M = cv2.moments(c)\n # if M[\"m00\"] == 0:\n # M[\"m00\"] = 1\n # cX = int(M[\"m10\"] / M[\"m00\"])\n # cY = int(M[\"m01\"] / M[\"m00\"])\n #\n # shape = sd.detect(c)\n # if shape == \"hexagon\":\n # print(str(cX) + \", \" + str(cY))\n # # draw the contour and center of the shape on the image\n # color = cd.determine_color(image, c)\n # cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n # cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)\n # cv2.putText(image, color, (cX - 20, cY - 20),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n # if cY < lowestY:\n # lowestY = cY\n # game_board.append([color])\n # x_coordinates.append([cX])\n # y_coordinates.append(cY)\n #\n # else:\n # for i in range(0, len(y_coordinates)):\n # if cY == y_coordinates[i]:\n # x_coordinates[i].append(cX)\n # game_board[i].append(color)\n # for i in range(0, len(x_coordinates)):\n # swapped = True\n # while swapped:\n # swapped = False\n # for j in range(len(x_coordinates[i]) - 1):\n # if x_coordinates[i][j] > x_coordinates[i][j + 1]:\n # # Swap the elements\n # x_coordinates[i][j], x_coordinates[i][j + 1] = x_coordinates[i][j + 1], x_coordinates[i][j]\n # game_board[i][j], game_board[i][j + 1] = game_board[i][j + 1], game_board[i][j]\n # # Set the flag to True so we'll loop again\n # swapped = True\n # game_board = game_board[::-1]\n # x_coordinates = x_coordinates[::-1]\n # y_coordinates = y_coordinates[::-1]\n\n # solve_game(game_board, x_coordinates, y_coordinates)\n # show the image\n # cv2.imshow(\"Image\", image)\n # cv2.waitKey(0)\n\n cv2.imwrite(\"./Gray_Image.jpg\", image)\n\n return image, cnts\n\n\ndef change_state(color):\n if color == \"black\":\n color = \"white\"\n else:\n color = \"black\"\n return color\n\n\ndef move(game_board, x, y):\n # print(\"X: \" + str(x) + \", Y: \" + str(y))\n # change self\n game_board[x][y] = change_state(game_board[x][y])\n\n # left and right\n if y > 0: # if column greater than 0\n game_board[x][y - 1] = change_state(game_board[x][y - 1]) # color to the left is changed\n if y < len(game_board[x]) - 1: # if column less than row length\n game_board[x][y + 1] = change_state(game_board[x][y + 1]) # color to the right is changed\n\n # top and bottom\n # if not last row\n if x < len(game_board) - 1:\n # if row below is bigger\n if len(game_board[x]) < len(game_board[x + 1]):\n game_board[x + 1][y] = change_state(game_board[x + 1][y]) # change left state\n game_board[x + 1][y + 1] = change_state(game_board[x + 1][y + 1]) # change right state\n\n # row below is smaller\n else:\n if y != len(game_board[x]) - 1: # if not last col\n game_board[x + 1][y] = change_state(game_board[x + 1][y]) # change right state\n if y != 0: # if not first col\n game_board[x + 1][y - 1] = change_state(game_board[x + 1][y - 1]) # change left state\n\n # if not first row\n if x != 0:\n # if row above is bigger\n # print(game_board)\n # print(len(game_board[x]))\n # print(len(game_board[x - 1]))\n if len(game_board[x]) < len(game_board[x - 1]):\n game_board[x - 1][y] = change_state(game_board[x - 1][y]) # change left state\n game_board[x - 1][y + 1] = change_state(game_board[x - 1][y + 1]) # change right state\n\n # row above is smaller\n else:\n # print(\"THIS \" + str(y) + \"LENGTH \" + str(len(game_board[x]) - 1))\n if y != len(game_board[x]) - 1: # if not last col\n game_board[x - 1][y] = change_state(game_board[x - 1][y]) # change right state\n if y != 0: # if not first col\n game_board[x - 1][y - 1] = change_state(game_board[x - 1][y - 1]) # change left state\n # print(game_board)\n return game_board\n\n\n# top and bottom\n\n\ndef start_game(image, cnts):\n game_board = []\n x_coordinates = []\n y_coordinates = []\n lowestY = 999999\n sd = ShapeDetector()\n cd = ColorDetector()\n\n for c in cnts:\n # compute the center of the contour\n M = cv2.moments(c)\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n shape = sd.detect(c)\n if shape == \"hexagon\":\n # print(str(cX) + \", \" + str(cY))\n # draw the contour and center of the shape on the image\n color = cd.determine_color(image, c)\n cv2.drawContours(image, [c], -1, (0, 255, 0), 2)\n cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)\n cv2.putText(image, color, (cX - 20, cY - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n if cY < lowestY:\n lowestY = cY\n game_board.append([color])\n x_coordinates.append([cX])\n y_coordinates.append(cY)\n\n else:\n for i in range(0, len(y_coordinates)):\n if cY == y_coordinates[i]:\n x_coordinates[i].append(cX)\n game_board[i].append(color)\n if len(game_board) == 0:\n return\n for i in range(0, len(x_coordinates)):\n swapped = True\n while swapped:\n swapped = False\n for j in range(len(x_coordinates[i]) - 1):\n if x_coordinates[i][j] > x_coordinates[i][j + 1]:\n # Swap the elements\n x_coordinates[i][j], x_coordinates[i][j + 1] = x_coordinates[i][j + 1], x_coordinates[i][j]\n game_board[i][j], game_board[i][j + 1] = game_board[i][j + 1], game_board[i][j]\n # Set the flag to True so we'll loop again\n swapped = True\n game_board = game_board[::-1]\n x_coordinates = x_coordinates[::-1]\n y_coordinates = y_coordinates[::-1]\n solve_game(game_board, x_coordinates, y_coordinates)\n\n\n# TODO: Create a more efficient algorithm\ndef solving_down(game_board, row, color):\n instructions = []\n i = 0\n # if (row == 0) or (len(game_board[row]) < len(game_board[row + 1])): # if row below is smaller\n if row < 3:\n # print(row)\n # print(game_board)\n while i < len(game_board[row]):\n tile = game_board[row][i]\n if tile == color:\n if i < len(game_board[row]) - 1 and game_board[row][i + 1] == color:\n instructions.append(i + 1)\n i += 2\n else:\n x = i\n if x + 1 <= len(game_board[row]) / 2:\n while x > -1:\n instructions.append(x)\n x -= 1\n else:\n\n while x < len(game_board[row]):\n x += 1\n instructions.append(x)\n\n i += 1\n else:\n i += 1\n return row + 1, instructions\n else:\n counter = 0\n for tile in game_board[row]:\n if tile == color:\n counter += 1\n if counter % 2 == 0:\n while i < len(game_board[row]):\n tile = game_board[row][i]\n if tile == color:\n if i < len(game_board[row]) - 1 and game_board[row][i + 1] == color:\n instructions.append(i)\n i += 2\n else:\n x = i - 1\n while x > -1:\n instructions.append(x)\n x -= 1\n i += 1\n else:\n i += 1\n return row + 1, instructions\n else:\n if row == 3:\n x = len(game_board[3]) - 1\n while x > - 1:\n instructions.append(x)\n x -= 1\n return row, instructions\n\n if row == 4:\n x = len(game_board[1]) - 1\n while x > -1:\n instructions.append(x)\n x -= 1\n return row - 3, instructions\n\n if row == 5:\n x = len(game_board[2]) - 1\n while x > -1:\n instructions.append(x)\n x -= 1\n return row - 3, instructions\n # print(instructions)\n return instructions\n\n\n# executes instructions one row at a time\ndef execute_instructions(instructions, row, x_coords, y_coords, game_board):\n y = y_coords[row]\n for instruction in instructions:\n x = x_coords[row][instruction]\n mouse.move(x, y, absolute=True, duration=0.25)\n game_board = move(game_board, row, instruction)\n mouse.click()\n return game_board\n\n\ndef test_instructions(instructions, row, game_board):\n for instruction in instructions:\n game_board = move(game_board=game_board, x=row, y=instruction)\n return game_board\n\n\ndef solve_game(game_board, x_coords, y_coords):\n # test for black\n # temp_game_board = game_board\n # row = 0\n # maxiter = 0\n # black_counter = 0\n # black_all_instructions = []\n # while row < 6:\n # row, instructions = solving_down(game_board=temp_game_board, row=row, color=\"black\")\n # temp_game_board = test_instructions(instructions=instructions, row=row, game_board=temp_game_board)\n # black_counter += len(instructions)\n # if maxiter == 30:\n # break\n # else:\n # maxiter += 1\n # black_all_instructions.append((row, instructions))\n #\n # # test for white\n # temp_game_board = game_board\n # row = 0\n # maxiter = 0\n # white_counter = 0\n # white_all_instructions = []\n # while row < 6:\n # row, instructions = solving_down(game_board=temp_game_board, row=row, color=\"white\")\n # temp_game_board = test_instructions(instructions=instructions, row=row, game_board=temp_game_board)\n # white_counter += len(instructions)\n # if maxiter == 30:\n # break\n # else:\n # maxiter += 1\n # white_all_instructions.append((row, instructions))\n #\n # print(\"White: \" + str(white_counter) + \", Black: \" + str(black_counter))\n\n # test for black\n row = 0\n maxiter = 0\n while row < 6:\n row, instructions = solving_down(game_board=game_board, row=row, color=\"black\")\n game_board = execute_instructions(instructions, row, x_coords, y_coords, game_board)\n # print(row)\n # print(instructions)\n if maxiter == 30:\n break\n else:\n maxiter += 1\n\n # if white_counter < black_counter:\n # for row, instructions in white_all_instructions:\n # execute_instructions(instructions, row, x_coords, y_coords)\n # else:\n # for row, instructions in black_all_instructions:\n # execute_instructions(instructions, row, x_coords, y_coords)\n\n\ndef press_next(image, cnts):\n sd = ShapeDetector()\n cd = ColorDetector()\n\n pressed = False\n\n for c in cnts:\n M = cv2.moments(c)\n if M[\"m00\"] == 0:\n M[\"m00\"] = 1\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n shape = sd.detect(c)\n if shape == \"rectangle\":\n print(str(cX) + \", \" + str(cY))\n mouse.move(cX, cY, absolute=True, duration=0.05)\n mouse.click()\n pressed = True\n\n\ndef screen_grab():\n screengrab = ImageGrab.grab()\n screengrab_raw = np.array(screengrab, dtype='uint8') \\\n .reshape((screengrab.size[1], screengrab.size[0], 3))\n\n image, cnts = process_img(screengrab_raw)\n return image, cnts\n\n\nlimit = 0\nkeyboard.wait('`')\nwhile True:\n # try: # used try so that if user pressed other than the given key error will not be shown\n try:\n image, cnts = screen_grab()\n start_game(image, cnts)\n except:\n print()\n # playsound.playsound('Card Shuffle sound effect.mp3')\n\n time.sleep(0.1)\n try:\n image, cnts = screen_grab()\n start_game(image, cnts)\n except:\n print()\n # playsound.playsound('Card Shuffle sound effect.mp3')\n time.sleep(0.1)\n try:\n image, cnts = screen_grab()\n start_game(image, cnts)\n except:\n print()\n # playsound.playsound('Card Shuffle sound effect.mp3')\n time.sleep(0.1)\n try:\n image, cnts = screen_grab()\n start_game(image, cnts)\n except:\n # playsound.playsound('Card Shuffle sound effect.mp3')\n time.sleep(0.3)\n time.sleep(0.1)\n try:\n image, cnts = screen_grab()\n start_game(image, cnts)\n except:\n playsound.playsound('Wrong Buzzer Sound effect.mp3')\n print(\"Hexagon\")\n keyboard.wait('`')\n time.sleep(0.5)\n\n press_flag = False\n try:\n image, cnts = screen_grab()\n press_next(image, cnts)\n press_flag = True\n except:\n playsound.playsound('Card Shuffle sound effect.mp3')\n time.sleep(0.25)\n\n if not press_flag:\n time.sleep(0.25)\n try:\n image, cnts = screen_grab()\n press_next(image, cnts)\n press_flag = True\n except:\n print(\"PressNext\")\n if not press_flag:\n time.sleep(0.25)\n try:\n image, cnts = screen_grab()\n press_next(image, cnts)\n press_flag = True\n except:\n playsound.playsound('Wrong Buzzer Sound effect.mp3')\n print(\"PressNext\")\n keyboard.wait('`')\n\n\n time.sleep(0.5)\n\n if limit > 49:\n limit = 0\n playsound.playsound('Sparkle-sound-effect.mp3')\n keyboard.wait('`')\n else:\n limit += 1\n # except:\n # print(\"ERROR\")\n # # break # if user pressed a key other than the given key the loop will break\n","sub_path":"main_saved.py","file_name":"main_saved.py","file_ext":"py","file_size_in_byte":16583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236816424","text":"def merge(height):\n if len(height) == 0:\n return 0\n if len(height) == 1:\n return height[0]\n m = min(height)\n m_idx = height.index(m)\n height = [h - m for h in height]\n left_height = height[:m_idx]\n right_height = height[m_idx + 1:]\n return merge(left_height) + merge(right_height) + m\n\nif __name__ == '__main__':\n N = int(input())\n h = list(map(int, input().split()))\n print(merge(h))\n","sub_path":"abc116_c.py","file_name":"abc116_c.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292206945","text":"# coding:utf-8\n\nimport json\nfrom tornado.web import RequestHandler\nfrom Common.CommonVar import Session, logger,AvnData,strip,port,USBI_CMD_DICT\nfrom DB.avn_table import AvnSimu as avn\nfrom API.USBI import USBIAgree\n\nfrom concurrent.futures import ThreadPoolExecutor\nfrom tornado.concurrent import run_on_executor\nfrom tornado import gen\nimport tornado\nfrom Common.CommonFun import *\nimport Common.CommonVar\nfrom binascii import hexlify, unhexlify\n\nfrom contextlib import closing\nimport datetime\n\n\nclass Executor(ThreadPoolExecutor):\n \"\"\" 创建多线程的线程池,线程池的大小为10\n 创建多线程时使用了单例模式,如果Executor的_instance实例已经被创建,\n 则不再创建,单例模式的好处在此不做讲解\n \"\"\"\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not getattr(cls, '_instance', None):\n cls._instance = ThreadPoolExecutor(max_workers=2000)\n return cls._instance\n\nclass BaseHandler(RequestHandler):\n # executor为RequestHandler中的一个属性,在使用run_on_executor时,必须要用,不然会报错\n # executor在此设计中为设计模式中的享元模式,所有的对象共享executor的值\n executor = Executor()\n def initialize(self):\n # self.request.method = 'POST'\n # print(self.request.method, type(self.request.method))\n self.logger = Common.CommonVar.logger\n self.set_default_header()\n\n def set_default_header(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n # self.set_header('Access-Control-Allow-Origin', 'http://localhost:8080')\n self.set_header('Access-Control-Allow-Headers', 'X-Requested-With')\n self.set_header('Access-Control-Allow-Headers', '*')\n self.set_header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, PATCH, OPTIONS')\n self.set_header('Content-Type', 'application/json; charset=UTF-8')\n self.set_header('Access-Control-Allow-Headers', 'Content-Type')\n\n\nclass GetAvnData(BaseHandler):\n\n @gen.coroutine\n def post(self):\n result = self.response_msg()\n return_data = json.dumps(result, ensure_ascii=False)\n\n self.write(return_data)\n\n def response_msg(self):\n session = Session()\n avn_data = session.query(avn).all()\n avn_new_data = self.db_2_vue(avn_data)\n result = {'avn_data': avn_new_data, 'child_data':{}}\n return result\n\n def db_2_vue(self, avn_data):\n result = []\n\n for pid, vue_line in enumerate(avn_data, start=1):\n line = eval(str(vue_line))\n try:\n count_str = eval(line.get('content').replace('null','\"\"')) if line.get('content') != '' else ''\n if len(count_str) >2:\n result.append(self.add_dict_child(line, pid))\n elif pid ==2:\n result.append(self.add_dict_child(line, pid))\n else:\n result.append(line)\n except:\n pass\n return result\n\n def add_dict_child(self, line,pid):\n result = line\n content_dict = eval(line.get('content').replace('null','\"\"'))\n result['children'] = []\n for sonid,son in enumerate(content_dict, start=1):\n # childDict = {}\n # childDict['id'] = str(pid) + '.' + str(sonid)\n # childDict['type'] = son\n # childDict['content'] = str(content_dict.get(son))\n # childDict['count'] = ''\n # childDict['current'] = ''\n childDictid,childDict = self.set_table(son,str(content_dict.get(son)), pid, sonid)\n if type(content_dict.get(son)) is list:\n childDict['children'] = self.add_list_child(content_dict.get(son), childDictid)\n if type(content_dict.get(son)) is dict:\n childDict['children'] = self.add_list_child(content_dict.get(son).values(), childDictid,title='基站')\n result['children'].append(childDict)\n return result\n\n\n def add_list_child(self, childlist,pid,title='卫星'):\n result = []\n for childid, child in enumerate(childlist, start=1):\n child_result_id, child_result = self.set_table('%s%d'%(title, childid), '详细...', pid, childid)\n son_list = []\n for sonid, son in enumerate(child, start=1):\n id,son_result = self.set_table(son,child.get(son),child_result_id, sonid)\n son_list.append(son_result)\n child_result['children'] =son_list\n result.append(child_result)\n return result\n\n\n def set_table(self, key,value, pid, sonid):\n childDict = {}\n childDict['id'] = str(pid) + '.' + str(sonid)\n childDict['type'] = key\n childDict['content'] = value\n childDict['count'] = ''\n childDict['current'] = ''\n return childDict['id'],childDict\n\n def db_2_dict(self, avn_data):\n result = []\n for pid,r in enumerate(avn_data,start=1):\n #result[avn_data.index(r)] = eval(str(r))\n para_dict = eval(str(r))\n child = []\n\n try:\n content_dict = eval(para_dict.get('content')) if para_dict.get('content')!='' else {}\n except:\n pass\n for id,content_key in enumerate(content_dict, start=1):\n childDict = {}\n iter_index = 10\n childDict['id'] = str(pid) + '.' +str(id)\n childDict['type'] = content_key\n childDict['count'] = r.count\n childDict['current'] = r.current.strftime('%Y-%m-%d %H:%M:%S')\n if type(content_dict.get(content_key)) is dict:\n\n children = self.set_child(content_dict.get(content_key), childDict['id'], iter_index*10)\n if children:\n para_dict['children'] = children\n elif type(content_dict.get(content_key)) is list:\n children = self.set_childlist(content_dict.get(content_key), childDict['id'])\n if children:\n para_dict['children'] = children\n\n childDict['content'] = content_dict.get(content_key)\n if 'creatime' in para_dict:\n del para_dict['creatime']\n del para_dict['_locked']\n result.append(para_dict)\n return result\n\n def set_childlist(self,child,pid):\n child_list = []\n if child:\n for id, son in enumerate(child, start=1):\n pass\n\n def set_child(self, child,pid,iter_index):\n child_list = []\n if child:\n for id,son in enumerate(child, start=1):\n if type(child.get(son)) is not dict:\n childDict = {}\n childDict['id'] = str(pid) + '.' + str(id)\n childDict['type'] = son\n childDict['content'] = child.get(son)\n childDict['count'] = ''\n childDict['current'] = ''\n child_list.append(childDict)\n else:\n childDict = {}\n childDict['id'] = pid + '.' + str(id)\n childDict['type'] = '详细信息'\n childDict['content'] = '...'\n childDict['count'] = ''\n childDict['current'] = ''\n child_son = self.set_child(child.get(son),childDict['id'], iter_index*10)\n childDict['children'] = child_son\n\n child_list.append(childDict)\n\n return child_list\n\n\n\n\nclass IboxLogin(BaseHandler):\n\n def post(self):\n try:\n host = self.get_argument('serverip')\n port = self.get_argument('serverport')\n USBI = USBIAgree.get_instances(host, port)\n USBI.connect_socket()\n session = USBI.session\n\n #with closing(session):\n if session:\n USBI.sendMsg('0001')\n\n result = {'result': 0, 'Conn': 1}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n except Exception as e:\n result = {'result': 1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n def put(self):\n try:\n result = {'result': 0, 'Conn': 0}\n host = self.get_argument('serverip')\n port = self.get_argument('serverport')\n return_data = json.dumps(result, ensure_ascii=False)\n usbi = USBIAgree.get_instances(host, port)\n if usbi.session:\n usbi.handle_close()\n self.write(return_data)\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n\n def options(self):\n result = {'result': 0}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n\nclass CheckAVN(BaseHandler):\n\n def post(self):\n try:\n return_data = self.response_msg()\n self.write(return_data)\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n def response_msg(self):\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n index = self.get_argument('index')\n cmd = USBI_CMD_DICT.get(index)\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n result_return = USBI.sendMsg(cmd)\n result = {'result': 0, 'Conn': 1, 'result_return': str(result_return)}\n return_data = json.dumps(result, ensure_ascii=False)\n return return_data\n else:\n result = {'result': -1, 'Conn': 1, 'error': 'connect tcp error'}\n return_data = json.dumps(result, ensure_ascii=False)\n return return_data\n def put(self):\n try:\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n index = self.get_argument('index')\n cmd = index.lower()\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.sendMsg(cmd)\n result = {'result': 0, 'Conn': 1}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n else:\n result = {'result': -1, 'Conn': 1, 'error': 'connect tcp error'}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n def options(self):\n result = {'result': 0}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n\nclass telematics_Call(BaseHandler):\n\n def post(self):\n try:\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n CallType = self.get_argument('CallType')\n CallCenterID = self.get_argument('CallCenterID')\n cmd = '0203'\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.sendMsg(cmd, CallType=CallType, CallCenterID=CallCenterID)\n result = {'result': 0, 'Conn': 1}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n else:\n result = {'result': -1, 'Conn': 1, 'error': 'connect tcp error'}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\nclass RemoteReflash_Check(BaseHandler):\n\n def post(self):\n try:\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n DataValue = self.get_argument('DataValue')\n cmd = '0207'\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.sendMsg(cmd, DataValue=DataValue)\n result = {'result': 0, 'Conn': 1}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n else:\n result = {'result': -1, 'Conn': 1, 'error': 'connect tcp error'}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\nclass RemoteReflash_Perform(BaseHandler):\n\n def post(self):\n try:\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n ex_ECU_ID = self.get_argument('ex_ECU_ID')\n ReflashReqInfo = self.get_argument('ReflashReqInfo')\n cmd = '0208'\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.sendMsg(cmd, ex_ECU_ID=ex_ECU_ID, ReflashReqInfo=ReflashReqInfo)\n result = {'result': 0, 'Conn': 1}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n else:\n result = {'result': -1, 'Conn': 1, 'error': 'connect tcp error'}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\nclass Booking_UpdateTime(BaseHandler):\n\n def post(self):\n try:\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n ECU_ID = self.get_argument('ECU_ID')\n DATE = self.get_argument('DATE')\n cmd = '020d'\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.sendMsg(cmd, ECU_ID=ECU_ID, DATE=DATE)\n result = {'result': 0, 'Conn': 1}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n else:\n result = {'result': -1, 'Conn': 1, 'error': 'connect tcp error'}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n except Exception as e:\n result = {'result': -1, 'Conn': 1, 'error': e}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\nclass resetbox(BaseHandler):\n\n def post(self):\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n reset_value = self.get_argument('reset_value')\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.sendMsg('0209',reset_value=reset_value)\n result = {'result': 0}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\nclass setFault(BaseHandler):\n\n def post(self):\n strip = self.get_argument('serverip')\n port = self.get_argument('serverport')\n if strip and port:\n USBI = USBIAgree.get_instances(strip, port)\n session = USBI.session\n if session:\n USBI.nosendlist.append('0301')\n USBI.nosendlist.append('0304')\n result = {'result': 0}\n return_data = json.dumps(result, ensure_ascii=False)\n self.write(return_data)\n\n\n\n","sub_path":"Simu/avn_simu.py","file_name":"avn_simu.py","file_ext":"py","file_size_in_byte":16953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"302154951","text":"import json\nimport random\nimport datetime\nfrom datetime import datetime\n\nclass Result():\n def __init__(self, attempts, name, date):\n self.attempts = score\n self.name = name\n self.date = date\n\nmyattempts = 0\nmyname=input(\"Bitte Namen eingeben: \")\n\ndef display_highscores(x=0, scores = []):\n print(\"-------------------------------------------------------------\")\n print(\"High Scores:\")\n with open(\"scores.json\",\"r\") as file:\n scores = json.loads(file.read())\n sort_scores = sorted(scores, key=lambda i: i['attempts'])\n while x < 3:\n entries = sort_scores[x]\n t=entries['date']\n t=datetime.strptime(t, '%Y-%m-%d %H:%M:%S.%f')\n print(f\"{entries['name']}'s Score am {t.day}.{t.month}.{t.year}: {entries['attempts']}\")\n x += 1\n print(f\"Schaffst du es die Zahl mit weniger Versuchen zu erraten, {myname}?!\")\n print(\"-------------------------------------------------------------\")\n return scores\n\ndef write_scores_to_sheet(scores,attempts,current_time = datetime.now()):\n with open(\"scores.json\", \"w\") as file:\n this_games_score = Result(attempts=myattempts, date=str(current_time), name=myname)\n scores.append(this_games_score.__dict__)\n file.write(json.dumps(scores))\n\n\ndef play_game(scores, secret=random.randint(1,30), attempts=0):\n print(\"-------------------------------------------------------------\")\n guess = int(input(f\"Bitte gib eine Zahl zwischen 1 und 30 ein, {myname}: \"))\n lower_limit = 1\n upper_limit = 30\n while True:\n myattempts += 1\n if guess == secret:\n print(f\"Gewonnen, {guess} war die richtige Zahl!\")\n print(f\"Du hast {attempts} Versuche gebraucht, {myname}!\")\n write_scores_to_sheet(attempts=myattempts, scores=scores)\n break\n elif guess > secret:\n print(f\"Die gesuchte Zahl ist kleiner als {guess}...\")\n upper_limit=guess\n guess = int(input(f\"Bitte gib eine Zahl, zwischen {lower_limit} und {guess} ein: \"))\n\n elif guess < secret:\n print(f\"Die gesuchte Zahl ist größer als {guess}...\")\n lower_limit=guess\n guess = int(input(f\"Bitte gib eine Zahl, zwischen {guess} und {upper_limit} ein: \"))\n\n\nplay_game(scores=display_highscores())\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"2951061","text":"import numpy as np\nimport copy\nfrom datetime import date\n\n__author__ = 'Gregory'\n\n\nclass DataPackage:\n def __init__(self, input_settings):\n self.training_start_date_str = input_settings['training_start_date']\n self.training_end_date_str = input_settings['training_end_date']\n self.validation_start_date_str = input_settings['validation_start_date']\n self.validation_end_date_str = input_settings['validation_end_data']\n\n self.training_start_date = date(int(self.training_start_date_str[6:10]),\n int(self.training_start_date_str[3:5]), int(self.training_start_date_str[:2]))\n self.training_end_date = date(int(self.training_end_date_str[6:10]),\n int(self.training_end_date_str[3:5]), int(self.training_end_date_str[:2]))\n self.validation_start_date = date(int(self.validation_start_date_str[6:10]),\n int(self.validation_start_date_str[3:5]),\n int(self.validation_start_date_str[:2]))\n self.validation_end_date = date(int(self.validation_end_date_str[6:10]),\n int(self.validation_end_date_str[3:5]), int(self.validation_end_date_str[:2]))\n\n self.training_input = []\n self.training_target = []\n self.validation_input = []\n self.validation_target = []\n\n # Format: id, name, units, [max val, min val], time_spot\n self.training_input_meta = [[], [], [], [], []]\n self.training_target_meta = [[], [], [], [], []]\n self.validation_input_meta = [[], [], [], [], []]\n self.validation_target_meta = [[], [], [], [], []]\n\n def get_min_max_list(self):\n max_min_list = [self.training_input_meta[3], self.training_target_meta[3], self.validation_input_meta[3],\n self.validation_target_meta[3]]\n return max_min_list\n\n\nclass DataPacker:\n def __init__(self, meter_groups=None):\n if meter_groups is not None:\n self.meter_groups = meter_groups\n else:\n self.meter_groups = []\n\n def add_meter_group(self, meter_groups):\n self.meter_groups.extend(meter_groups)\n\n def get_groups_data(self, settings, start_date, end_date, adv_override=None):\n data = []\n meta = [[], [], [], [], []]\n for group in self.meter_groups:\n for meter in group.meters_in_group:\n if str(meter.id) in settings:\n window, adv, precision = settings[str(meter.id)].split(',')\n if adv_override is not None:\n adv = adv_override\n data.extend(meter.get_meter_data(start_date, end_date, int(window), int(adv)))\n\n for i in range(int(window)):\n meta[0].append('({}).({})#t_({})'.format(group.group_name, str(meter.id), str(-i + int(adv))))\n meta[1].append(meter.name)\n meta[2].append(meter.units)\n meta[3].append([meter.min_value, meter.max_value])\n meta[4].append(-i + int(adv))\n\n if str(meter.id) + '_max_values' in settings:\n max_val_params = settings[str(meter.id) + '_max_values'].split(',')\n for i in range(int(max_val_params[0])):\n data.append(meter.get_meter_max_value(\n start_date, end_date, int(max_val_params[1 + i * 3]), int(max_val_params[2 + i * 3])))\n\n meta[0].append('({}).({})#duration_({})'.format(\n group.group_name, str(meter.id) + '_max_values', str(max_val_params[1 + i * 3])))\n meta[1].append(meter.name + '_max_values')\n meta[2].append(meter.units)\n meta[3].append([meter.min_value, meter.max_value])\n meta[4].append(str(max_val_params[1 + i * 3]))\n\n if str(meter.id) + '_eroding_max' in settings:\n max_val_params = settings[str(meter.id) + '_eroding_max'].split(',')\n for i in range(int(max_val_params[0])):\n data.append(meter.get_meter_eroding_max(\n start_date, end_date, float(max_val_params[1 + i * 3]), int(max_val_params[2 + i * 3])))\n\n meta[0].append('({}).({})#duration_({})'.format(\n group.group_name, str(meter.id) + '_eroding_max', str(max_val_params[1 + i * 3])))\n meta[1].append(meter.name + '_eroding_max')\n meta[2].append(meter.units)\n meta[3].append([meter.min_value, meter.max_value])\n meta[4].append(str(max_val_params[1 + i * 3]))\n\n return np.array(data).round(2).T, meta\n\n # if 'Day' in settings:\n # day_counter = settings['Day']\n # data_destination.append()\n # # TODO: Finish adding counter\n\n def package_data(self, input_settings, target_settings, forecast_horizon_override=None):\n\n package = DataPackage(input_settings)\n\n input_data, input_meta = self.get_groups_data(input_settings, package.training_start_date,\n package.validation_end_date)\n\n target_data, target_meta = self.get_groups_data(target_settings, package.training_start_date,\n package.validation_end_date,\n adv_override=forecast_horizon_override)\n\n training_end_value = (package.training_end_date - package.training_start_date).days * 96\n validation_start_value = (package.validation_start_date - package.training_start_date).days * 96\n\n package.training_input = copy.deepcopy(input_data[0:training_end_value])\n package.training_input_meta = copy.deepcopy(input_meta)\n\n package.validation_input = copy.deepcopy(input_data[validation_start_value:])\n package.validation_input_meta = copy.deepcopy(input_meta)\n\n package.training_target = copy.deepcopy(target_data[0:training_end_value])\n package.training_target_meta = copy.deepcopy(target_meta)\n\n package.validation_target = copy.deepcopy(target_data[validation_start_value:])\n package.validation_target_meta = copy.deepcopy(target_meta)\n\n return package\n","sub_path":"turbidityforecastingeann/datapacker.py","file_name":"datapacker.py","file_ext":"py","file_size_in_byte":6523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647356187","text":"from .gamestate import GameState\nimport pygame as pg\nfrom settings import *\nfrom os import path\nimport json\n\ngame_folder = path.dirname(path.dirname(__file__))\nsave_folder = path.join(game_folder, \"saves\")\n\nclass ScoreScreen(GameState):\n def __init__(self):\n super(ScoreScreen, self).__init__()\n self.title = self.font.render(\"SCORE SCREEN\", True, pg.Color(\"dodgerblue\"))\n self.title_rect = self.title.get_rect(center=self.screen_rect.center)\n self.persist = {}\n\n def startup(self, persistent):\n self.persist = persistent\n\n def get_event(self, event):\n if event.type == pg.QUIT:\n self.quit = True\n keystate = pg.key.get_pressed()\n if keystate[pg.K_RETURN]:\n player_name = self.persist['player']\n with open(path.join(save_folder, player_name+'-save.txt'), 'w') as file:\n file.write(json.dumps(self.persist))\n\n\n self.next_state = \"OUTRO_LEVEL_1\"\n self.done = True\n\n def draw(self, surface):\n cleared_level = self.persist['cleared_level']\n score = self.persist['cleared_level_score']\n score_string = \"Score: \" + str(score)\n score_text = self.font.render(score_string, True, pg.Color(\"dodgerblue\"))\n score_rect = score_text.get_rect(center=(WIDTH/2,HEIGHT / 4))\n surface.blit(self.title, self.title_rect)\n surface.blit(score_text, score_rect)\n","sub_path":"states/scorescreen.py","file_name":"scorescreen.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183014790","text":"import random\nimport pygame.time\nfrom nlc_dino_runner.components.obstacles.cactus import Cactus\nfrom nlc_dino_runner.components.obstacles.bird import Bird\nfrom nlc_dino_runner.utils.constants import SMALL_CACTUS, LARGE_CACTUS, BIRD #HIT_SOUND, GAME_OVER_SOUND #AÑADIDO\n\n\n\nclass ObstaclesManager:\n def __init__(self):\n self.obstacles_list = []\n\n def update(self, game):\n obstacles_type = [Cactus(SMALL_CACTUS), Cactus(LARGE_CACTUS, 290), Bird(BIRD, random.randint(180, 260))]\n if len(self.obstacles_list) == 0 :\n self.obstacles_list.append(random.choice(obstacles_type))\n\n for obstacle in self.obstacles_list:\n obstacle.update(game.game_speed, self.obstacles_list)\n\n if game.player.hammer and game.player.hammer.rect.colliderect(obstacle.rect):\n self.obstacles_list.remove(obstacle)\n\n if game.player.dino_rect.colliderect(obstacle.rect):\n if game.player.shield:\n self.obstacles_list.remove(obstacle)\n elif game.live_manager.lives > 1:\n #HIT_SOUND.play() # AÑADIDO\n game.live_manager.reduce_lives()\n game.player.shield = True\n start_time = pygame.time.get_ticks()\n game.player.shield_time_up = start_time + 1000\n else:\n game.player.draw_dead(game.screen)\n if game.points > game.highest_score:\n game.player.draw_dead(game.screen)\n game.highest_score = game.points\n pygame.time.delay(1500)\n game.playing = False\n game.death_count += 1\n #GAME_OVER_SOUND.play()\n break\n\n def draw(self, screen):\n for obstacles in self.obstacles_list:\n obstacles.draw(screen)\n\n def reset_obstacles(self):\n self.obstacles_list = []\n","sub_path":"nlc_dino_runner/components/obstacles/obtaclesManager.py","file_name":"obtaclesManager.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"81781801","text":"#!/usr/bin/env python3\nimport sys\nfrom pathlib import Path\nfrom typing import List, Set\n\nimport PyQt5.QtWidgets as Widgets\nfrom PyQt5 import uic, QtCore\n\nMAIN_WINDOW_PATH = Path('./ui/MainWindow.ui')\nERROR_WINDOW_PATH = Path('./ui/ErrorWindow.ui')\nINPUT_WINDOW_PATH = Path('./ui/InputWindow.ui')\nSEARCH_WINDOW_PATH = Path('./ui/SearchWindow.ui')\n\n\ndef sltext(text: str) -> str:\n return text.lower().strip()\n\n\nclass Point:\n def __init__(self,\n word: str = '',\n pages: List[int] = None) -> None:\n self._word = word.strip()\n self._pages = set(pages) or set()\n\n @property\n def word(self) -> str:\n return self._word\n\n @property\n def pages(self) -> List[int]:\n return list(sorted(self._pages))\n\n def update(self,\n pages: List[int] or Set[int]) -> None:\n self._pages = self._pages.union(pages)\n\n def __contains__(self,\n page: int) -> bool:\n return page in self.pages\n\n def __str__(self) -> str:\n num = f\"Слово '{self.word}'\"\n pages = ', '.join(str(page) for page in self.pages)\n pages = f\"На страницах: {pages}\"\n\n return f\"{num}\\n{pages}\"\n\n\nclass Pointer:\n def __init__(self,\n points: List[Point] = None) -> None:\n self._points = points or []\n\n @property\n def points(self) -> List[Point]:\n return self._points\n\n def search(self,\n word: str) -> Point or None:\n word = sltext(word)\n for point in self.points:\n if word == sltext(point.word):\n return point\n\n def add(self,\n point: Point) -> None:\n if point.word not in self:\n self._points += [point]\n return\n\n for i in range(len(self)):\n if self._points[i].word == point.word:\n self._points[i].update(point.pages)\n\n def __contains__(self,\n word: str) -> bool:\n return any(word.strip() == point.word for point in self)\n\n def __getitem__(self,\n word: str) -> Point or None:\n word = word.strip()\n if word not in self:\n return\n for point in self:\n if word == point.word:\n return point\n\n def __iter__(self) -> iter:\n return iter(self.points)\n\n def __str__(self) -> str:\n if len(self) == 0:\n return ''\n\n corner = '-' * 25\n inside = '\\n' + '-' * 20 + '\\n'\n\n points = inside.join(str(point) for point in self.points)\n return f\"{corner}\\n{points}\\n{corner}\"\n\n def __len__(self) -> int:\n return len(self.points)\n\n\npointer = Pointer()\n\n\nclass MainWindow(Widgets.QMainWindow):\n def __init__(self, *args) -> None:\n super().__init__()\n uic.loadUi(MAIN_WINDOW_PATH, self)\n\n self.initUI()\n\n def initUI(self) -> None:\n self.InputWindow = InputWindow(self, [])\n self.ErrorWindow = ErrorWindow(self, [])\n self.SearchWindow = SearchWindow(self, [])\n self.setWindowTitle(\"Предметный указатель\")\n\n self.SearchButton.clicked.connect(self.search)\n self.AddButton.clicked.connect(self.add)\n self.ExitButton.clicked.connect(self.close)\n\n self.checkThreadTimer = QtCore.QTimer(self)\n self.checkThreadTimer.setInterval(500)\n self.checkThreadTimer.start()\n\n self.checkThreadTimer.timeout.connect(self.show)\n\n def show(self) -> None:\n self.PointsBrowser.setText(str(pointer))\n super().show()\n\n def add(self) -> None:\n self.InputWindow.close()\n self.InputWindow.show()\n\n def search(self) -> None:\n self.SearchWindow.close()\n\n if len(pointer) == 0:\n self.error(\"Предметный указатель пуст, негде искать\")\n else:\n self.SearchWindow.show()\n\n def error(self, msg: str) -> None:\n self.ErrorWindow.close()\n self.ErrorWindow.display(msg)\n\n def close(self) -> None:\n self.ErrorWindow.close()\n self.InputWindow.close()\n self.SearchWindow.close()\n\n super().close()\n\n\nclass InputWindow(Widgets.QWidget):\n def __init__(self, *args) -> None:\n super().__init__()\n uic.loadUi(INPUT_WINDOW_PATH, self)\n\n self.initUI()\n\n def initUI(self) -> None:\n self.EnterButton.clicked.connect(self.input)\n self.ClearButton.clicked.connect(self.clear)\n self.setWindowTitle(\"Ввод компонентов указателя\")\n\n def stext(self, obj) -> str:\n return obj.text().strip()\n\n def clear(self) -> None:\n self.WordInput.clear()\n self.PagesInput.clear()\n\n def input(self) -> None:\n if len(self.stext(self.WordInput)) == 0 or \\\n len(self.stext(self.PagesInput)) == 0:\n self.clear()\n return\n word = self.stext(self.WordInput)\n pages = self.stext(self.PagesInput)\n if ',' in pages:\n pages = pages.split(',')\n else:\n pages = pages.split()\n\n try:\n pages = [int(page) for page in pages]\n except ValueError:\n pass\n else:\n pointer.add(Point(word, pages))\n finally:\n self.clear()\n\n\nclass ErrorWindow(Widgets.QWidget):\n def __init__(self, *args) -> None:\n super().__init__()\n uic.loadUi(ERROR_WINDOW_PATH, self)\n\n self.initUI()\n\n def initUI(self) -> None:\n self.Box.clicked.connect(self.close)\n self.setWindowTitle(\"Ошибка\")\n\n def display(self, message: str) -> None:\n error = \"
Ошибка!

\"\n self.ErrorBrowser.setText(f\"{error}\\n{message}\")\n self.show()\n\n\nclass SearchWindow(Widgets.QWidget):\n def __init__(self, *args) -> None:\n super().__init__()\n uic.loadUi(SEARCH_WINDOW_PATH, self)\n\n self.initUI()\n\n def initUI(self) -> None:\n self.SearchButton.clicked.connect(self.show_results)\n self.ExitButton.clicked.connect(self.close)\n self.setWindowTitle(\"Поиск слова\")\n\n def show_results(self) -> None:\n if len(self.QueryInput.text().strip()) == 0:\n self.ResultsBrowser.setText(\"
Введите запрос
\")\n return\n\n query = self.QueryInput.text().strip()\n self.clear()\n\n point = pointer.search(query)\n\n if point is None:\n self.ResultsBrowser.setText(\"Слово не найдено\")\n else:\n msg = f\"Слово '{query}' встречается на: \"\n pages = ', '.join(str(page) for page in point.pages)\n self.ResultsBrowser.setText(f\"{msg} {pages} страницах\")\n\n def clear(self) -> None:\n self.ResultsBrowser.clear()\n self.QueryInput.clear()\n\n def close(self) -> None:\n self.clear()\n super().close()\n\n\ndef main() -> None:\n app = Widgets.QApplication(sys.argv)\n RoutesWindow = MainWindow()\n RoutesWindow.show()\n exit(app.exec_())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Programming-basics/Labs/Lab7/Task2/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"121309281","text":"import FBKlasa as FB\r\n\r\nclass api:\r\n def check(word, list):\r\n if word in list:\r\n return True\r\n else:\r\n return False\r\n \r\n opsionet = ['LINK', 'LIKES', 'BIRTHDAY', 'HOMETOWN', 'LOCATION', 'FRIENDS', 'POSTS', 'GENDER', 'EMAIL']\r\n \r\n FB.Pershendetje()\r\n while(True):\r\n print('Shtypni: Show dhe njerin nga opsionet')\r\n print(*opsionet)\r\n hyrja = input().split(' ',2)\r\n hyrja1 = hyrja[0]\r\n if(hyrja1.upper()=='SHOW'):\r\n hyrja2 = hyrja[1]\r\n if check(hyrja2.upper(),opsionet):\r\n FB.showOption(hyrja[1])\r\n else:\r\n print(\"Gabim Sintaksa\")\r\n else:\r\n print(\"Gabim Sintaksa\")","sub_path":"Graph-API FB/Facebook.py","file_name":"Facebook.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"121339793","text":"\r\ndef convert_prediction_to_mask(prediction, thr: float=0):\r\n mask = prediction > thr\r\n return mask\r\n\r\n# INTERSECTION OVER UNION\r\ndef get_iou(prediction, target):\r\n\r\n if target.shape != prediction.shape:\r\n raise Exception('A target shape doesn`t match with a prediction shape')\r\n\r\n if target.dim() != 3:\r\n raise Exception(f'A target dim is {target.dim()}. Must be 3.')\r\n\r\n pred_copy = prediction.clone()\r\n pred_copy = convert_prediction_to_mask(pred_copy)\r\n \r\n target_copy = target.clone()\r\n target_copy = convert_prediction_to_mask(target_copy)\r\n\r\n intersection = torch.bitwise_and(target_copy, pred_copy).sum().item()\r\n union = torch.bitwise_or(target_copy, pred_copy).sum().item()\r\n \r\n if (target_copy.sum().item() == 0) and (pred_copy.sum().item() == 0):\r\n return 1\r\n elif union == 0:\r\n return 0\r\n\r\n return intersection / union\r\n\r\ndef get_mean_iou(predictions, targets):\r\n\r\n with torch.no_grad():\r\n if targets.shape != predictions.shape:\r\n raise Exception('A targets shape doesn`t match with a predictions shape')\r\n\r\n if targets.dim() != 4:\r\n raise Exception(f'A target dim is {targets.dim()}. Must be 4.')\r\n\r\n iou_sum = 0\r\n for i in range(targets.shape[0]):\r\n iou = get_iou(targets[i], predictions[i])\r\n iou_sum += iou\r\n mean_iou = iou_sum / targets.shape[0]\r\n return mean_iou\r\n\r\n# PIXEL ACCURACY\r\ndef get_pixel_acc(prediction, target):\r\n\r\n if target.shape != prediction.shape:\r\n raise Exception('A target shape doesn`t match with a prediction shape')\r\n\r\n if target.dim() != 3:\r\n raise Exception(f'A target dim is {target.dim()}. Must be 3.')\r\n\r\n pred_copy = prediction.clone()\r\n pred_copy = convert_prediction_to_mask(pred_copy)\r\n\r\n target_copy = target.clone()\r\n target_copy = convert_prediction_to_mask(target_copy)\r\n\r\n same = (target_copy == pred_copy).sum().item()\r\n channels, height, width = target.shape\r\n area = height * width * channels\r\n acc = same / area\r\n return acc\r\n\r\ndef get_mean_pixel_acc(predictions, targets):\r\n\r\n with torch.no_grad():\r\n if targets.shape != predictions.shape:\r\n raise Exception('A targets shape doesn`t match with a predictions shape')\r\n\r\n if targets.dim() != 4:\r\n raise Exception(f'A target dim is {targets.dim()}. Must be 4.')\r\n\r\n acc_sum = 0\r\n for i in range(targets.shape[0]):\r\n acc = get_pixel_acc(targets[i], predictions[i])\r\n acc_sum += acc\r\n mean_acc = acc_sum / targets.shape[0]\r\n return mean_acc\r\n","sub_path":"train/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"629464999","text":"#-------------------------------------------------------------------------------\n# Copyright (c) 2012 Michael Hull.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# - Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice, \n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#-------------------------------------------------------------------------------\nimport logging\nimport os\nimport inspect\n\n\nclass LogMgrState(object):\n Ready = \"Ready\"\n Configuring = \"Configuring\"\n Uninitialised = \"Uninitalised\"\n\n\nclass LogMgr(object):\n \n initState = LogMgrState.Uninitialised \n loggers = {}\n \n \n @classmethod\n def config(cls):\n from locmgr import LocMgr\n \n if cls.initState == LogMgrState.Configuring: return\n if cls.initState == LogMgrState.Ready: return\n \n \n cls.initState = LogMgrState.Configuring\n \n logfilename = os.path.join(LocMgr.getLogPath(), \"log.html\")\n logging.basicConfig(filename=logfilename, level=logging.INFO)\n \n cls.initState = LogMgrState.Ready\n \n cls.infoFromLogger(\"Logger Started OK\")\n\n\n\n @classmethod\n def PyfileToClass(cls, filename):\n localPath = filename\n morphforgeLib = False\n if \"morphforge\" in filename:\n localPath = \"morphforge\" + filename.split(\"morphforge\")[-1]\n morphforgeLib = True\n localPath = localPath.replace(\".py\", \"\")\n localPath = localPath.replace(\"/\", \".\")\n return localPath, morphforgeLib\n \n \n @classmethod\n def getCaller(cls):\n currentFrame = inspect.currentframe()\n outerFrames = inspect.getouterframes(currentFrame)\n outFramesNotThisClass = [f for f in outerFrames if not f[1].endswith(\"logmgr.py\") ]\n\n prevCallFrame = outFramesNotThisClass[0]\n caller = cls.PyfileToClass(prevCallFrame[1])\n return caller, prevCallFrame[2]\n \n @classmethod\n def infoFromLogger(cls, msg):\n packageName = \"morphforge.core.logmgr\"\n if not packageName in cls.loggers:\n cls.loggers[packageName] = cls.createLogger(packageName)\n cls.loggers[packageName].info(msg)\n \n \n \n \n \n @classmethod\n def _isLoggingActiveAndReady(cls):\n \n\n if cls.initState == LogMgrState.Ready: \n from settingsmgr import SettingsMgr\n if not SettingsMgr.isLogging(): return False \n return True\n elif cls.initState == LogMgrState.Configuring: return False\n elif cls.initState == LogMgrState.Uninitialised:\n cls.config()\n return True\n else:\n raise ValueError()\n \n \n \n @classmethod\n def info(cls, msg):\n if not cls._isLoggingActiveAndReady(): return \n cls.getLogger().info(msg)\n \n\n @classmethod\n def debug(cls, msg):\n if not cls._isLoggingActiveAndReady(): return \n cls.getLogger().debug(msg)\n \n @classmethod\n def warning(cls, msg):\n if not cls._isLoggingActiveAndReady(): return \n cls.getLogger().warning(msg)\n\n \n \n \n \n @classmethod\n def createLogger(cls, logName):\n logger = logging.getLogger(logName)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n logger.addHandler(ch)\n return logger\n \n \n @classmethod\n def getLogger(cls):\n \n # Find Who called us:\n callMod = \"DISABLEDLOGGING\" \n #(callMod, isMorphforgeLib), lineNum = cls.getCaller()\n \n \n if not callMod in cls.loggers:\n cls.loggers[callMod] = cls.createLogger(callMod)\n return cls.loggers[callMod]\n\n \n","sub_path":"src/morphforge/core/mgrs/logmgr.py","file_name":"logmgr.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427904486","text":"from operator import itemgetter\n \nfrom .models import AllContents\n\ndef search_func(user_terms): \n \"\"\"\n # Notes:\n 1) to get an \"and\" condition instead of \"or\", just add one filter after another.\n 2) But what I want is an \"or\" condition which I will later rank in order of number of search hits for each post.\n 3) values() would have produced a dictionary. Instead I used values_list\n 4) I couldn't used the or char with just one filter, as that would incur loss of which search terms were found\n 5) So instead I am doing multiple queries, one for each search term. Thus the search terms are in a loop \n # Known bugs: \n 1) If I type in sugar snap peas as input, it doesn't come up in bold\n 2) Type in \"pie\" and you get too many results with \"piece\" or \"bed\" produces \"cubed\", \"tart\" leads to \"start\" and \"starting\"\n \"\"\"\n num_terms = len(user_terms) # How many search terms did the user input \n \n q_converted=[None] * num_terms # q_converted is for when we convert from list of tuples to list of lists \n\n # See if the user has requested one or more ingredients to be excluded. They would do this with a minus sign.\n '''\n OLD CODE\n unwanted_ingredients = []\n for term in user_terms:\n if term[0]==\"-\": # If the first character is a minus sign, this means the user wants no posts with this term in it\n unwanted_ingredients.append(term[1:]) \n '''\n # NEW CODE\n unwanted_ingredients = [term[1:] for term in user_terms if term[0]==\"-\" ] \n\n # Now get all the posts that match all the remaining user search terms \n queryset=[None] * num_terms # Initialize queryset list with None \n for i, term in enumerate(user_terms):\n queryset[i] = AllContents.objects.filter(fullpost__icontains=term)\\\n .values_list('hyperlink', 'title') # We now have a list of querysets \n \n\n \n # Loop through any unwanted ingredients and exclude them \n for neg_term in unwanted_ingredients:\n for j in range(0,num_terms):\n queryset[j] = queryset[j].exclude(fullpost__icontains=neg_term) \n\n # So now we have one or more querysets (one queryset for each search term)\n # each of which each contains a list of tuples. We need to convert the list(s) of tuples to list(s) of lists. \n for j in range(0, num_terms): # convert to a list of lists\n q_converted[j]=list(map(list, queryset[j])) \n\n # Now stuff the search term(s) we found into each query result so that we can later show the user all the terms\n # satisfied by each post. We're putting them at position zero.\n for i, term in enumerate(user_terms): # this shows the search terms in the user's order\n for techpost in q_converted[i]:\n techpost.insert(0, term) \n\n # We currently have one query result for each search term. So next, combine all the query results into one list\n combined_list=[] \n for i in range(0,num_terms):\n combined_list = combined_list + q_converted[i] \n\n # If the combined list is empty, then we can go ahead and return now, and tell user there are no results \n if not combined_list: \n count = 0 \n trimmed_list = [['None']]\n context={'count': count, 'trimmed_list': trimmed_list} \n return(context) \n \n # Now sort the query results list by url so that the duplicates are grouped together \n #combined_list.sort(key=itemgetter(1)) # sort the list by the url \n \n # This next code snippet will remove all the duplicate post urls, starting with some setup, and then a for loop \n trimmed_list=[] \n trimmed_list.append(combined_list[0]) # put the first entire post into trimmed_list \n previous_post=trimmed_list[0] \n post_counter = 1\n # Now remove duplicate posts, while preserving the search hits found for each post.\n # I designed my for loop to leverage the sortedness (done above) which grouped the duplicate posts together\n for next_post in combined_list[1:]: # we need to start at the second element; that's the url\n if next_post[1] == previous_post[1]: # compare the urls\n post_counter += 1 # we are counting duplicates here \n new_string = next_post[0] + \", \" + previous_post[0] # We preserve the seach terms associated with each post \n trimmed_list[-1][0]= new_string # replace the search term string in the trimmed_list \n else: # We land here when there are no more dupes in the current grouping of dupes\n # put the post_counter at the end of the previous record\n previous_post.append(str(post_counter)) \n post_counter = 1 # reset the post counter so we can count the next set of dupes\n trimmed_list.append(next_post) \n previous_post = trimmed_list[-1] # now advance previous_post for the next time thru the loop \n previous_post.append(str(post_counter)) # The last post needs its counter \n \n\n for term_str in trimmed_list: \n post_title = term_str[2] # this is a more user-friendly name \n term_lis = term_str[0].split(',') \n for one_term in term_lis: \n \n one_term_stripped = one_term.strip() \n if one_term_stripped[-1] == \"s\":\n one_term_stripped = one_term_stripped[:-1] \n if (one_term_stripped.lower() in post_title.lower()): \n # if title and term (sadly, I also have to use a module called title. Two different things.)\n # Note: sugar snap peas is not meeting the first if. Instead it's meeting the last else \n post_title = post_title.lower().replace(one_term_stripped.lower(), \"\" + one_term_stripped.title() + \"\") \n post_title = post_title.title() # when I add this, then I get the Capital S problem back\n # The next ifs are bandaid code. What I'm trying to write is a very simple search engine, which is actually beyond\n # my ability. So I'm leaving the bandaid code for now, as at least it makes the results look better. \n if \"S\" in post_title: \n post_title=post_title.replace(\"S\", \"s\") \n if \"'S\" in post_title: \n post_title=post_title.replace(\"'S\",\"'s\") \n if \"'s\" in post_title:\n post_title=post_title.replace(\"'s\", \"'s\") \n if \"A\" in post_title:\n post_title=post_title.replace(\"A\", \"a\")\n if \"And\" in post_title:\n post_title=post_title.replace(\"And\", \"and\")\n if \"For\" in post_title:\n post_title=post_title.replace(\"For\", \"for\")\n if \"With\" in post_title:\n post_title=post_title.replace(\"With\", \"with\") \n if \"The\" in post_title:\n post_title=post_title.replace(\"The\", \"the\") \n if \"In\" in post_title:\n post_title=post_title.replace(\"In\", \"in\") \n if \"Or \" in post_title:\n post_title=post_title.replace(\"Or \", \"or \") \n if \"From\" in post_title:\n post_title=post_title.replace(\"From\", \"from\") \n term_str[2]=post_title # restore the less user-friendly name\n # Now get the context ready for returning to the view. Sort the results by relepvancy, which is how many terms found\n count=len(trimmed_list) \n \n trimmed_list.sort(key=itemgetter(-1), reverse=True) # Order and reverse the list title\n trimmed_list.sort(key=itemgetter(0)) # Sort by secondary key which will alphabetize the search terms\n trimmed_list.sort(key=itemgetter(-1), reverse=True) # Order and reverse the list \n context={'count': count, 'trimmed_list': trimmed_list} \n return(context)\n\n \n ","sub_path":"techposts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209278387","text":"import configparser\nfrom urllib.request import urlopen\nimport json\nimport pandas as pd\nimport os\nimport time\nimport requests\nimport zipfile\n\nKEY_CODE = '代號'\nKEY_NAME = '股名'\nKEY_EPS_THIS_SEASON = 'Eps_this_season'\n\nif __name__ == '__main__':\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n stock_list = config['Target']['Number']\n stocks = json.loads(config.get(\"Target\",\"Number\"))\n stock_str_list = [str(s) for s in stocks]\n print(\"stocks: \")\n print(stock_str_list)\n\n start_time = config['FinancialReport']['start_season']\n end_time = config['FinancialReport']['end_season']\n\n [start_year, start_season] = start_time.split('/')\n [end_year, end_season] = end_time.split('/')\n\n season_str_list = [\"Q1\", \"Q2\", \"Q3\", \"Q4\"]\n season_query_strings = []\n\n if end_year < start_year:\n print(\"End year cannot be less than start year, please modify your config.\")\n exit()\n\n if end_year == start_year:\n if season_str_list.index(end_season) < season_str_list.index(start_season):\n print(\"End season cannot be less than start season, please modify your config.\")\n exit()\n else :\n if season_str_list.index(end_season) == season_str_list.index(start_season):\n season_query_strings.append(start_year+start_season)\n else :\n for i in range(season_str_list.index(start_season), season_str_list.index(end_season)+1):\n season_query_strings.append(start_year+season_str_list[i])\n else :\n for i in range(season_str_list.index(start_season), season_str_list.index(\"Q4\")+1):\n season_query_strings.append(start_year+season_str_list[i])\n\n for year in range(int(start_year)+1, int(end_year)):\n for season in season_str_list:\n season_query_strings.append(str(year)+season)\n\n for i in range(season_str_list.index(\"Q1\"), season_str_list.index(end_season)+1):\n season_query_strings.append(end_year+season_str_list[i])\n\n df_all = pd.DataFrame([], columns=[KEY_NAME] + season_query_strings, index=stock_str_list)\n\n for season_str in season_query_strings:\n # check whether the financial info zipfile of this season exists\n filename = './' + season_str + '_C05001.zip'\n exist = os.path.isfile(filename)\n if not exist:\n url = 'https://www.twse.com.tw/statistics/count?url=/staticFiles/inspection/inspection/05/001/{}.zip'.format(season_str+'_C05001')\n resp = requests.get(url)\n if resp.status_code == requests.codes.ok:\n print(season_str + '_C05001.zip' + ' downloaded.')\n with open(filename, 'wb') as f:\n f.write(resp.content)\n time.sleep(2)\n else :\n print(season_str + '_C05001.zip' + ' download failed.')\n else:\n print(season_str + '_C05001.zip' + ' already exists.')\n\n for season_str in season_query_strings:\n # check whether the financial zip file is extracted\n filename = './' + season_str + '.xls'\n exist = os.path.isfile(filename)\n if not exist:\n zip_filename = './' + season_str + '_C05001.zip'\n with zipfile.ZipFile(zip_filename, 'r') as zip_ref:\n print('extract file ' + zip_filename)\n zip_ref.extractall('./')\n else : \n print('excel file ' + filename + 'already exits')\n \n excel_filename = './' + season_str + '.xls'\n df = pd.read_excel (excel_filename)\n df = df.rename(columns={'Unnamed: 0':KEY_CODE, 'Unnamed: 1':KEY_NAME, 'Unnamed: 13':KEY_EPS_THIS_SEASON})\n print(\"xls read: \" + excel_filename)\n results = df.loc[df[KEY_CODE].isin(stock_str_list)]\n# print(results.loc[:, [KEY_CODE, KEY_NAME, KEY_EPS_THIS_SEASON]])\n for index, row in results.iterrows():\n df_all.loc[row[KEY_CODE]][season_str] = row[KEY_EPS_THIS_SEASON]\n df_all.loc[row[KEY_CODE]][KEY_NAME] = row[KEY_NAME]\n\n# print(df_all)\n df_all.to_csv('{}-{}歷年EPS(元-每股).csv'.format(season_query_strings[0], season_query_strings[-1]))\n# exit()\n","sub_path":"src/eps_crawler.py","file_name":"eps_crawler.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"236657340","text":"import numpy as np\nimport stock\n\ndef corr(name1, name2):\n\tst1 = stock.load(name1)\n\tst2 = stock.load(name2)\n\t\n\tx = []\n\ty = []\n\tfor i in st1.date:\n\t\tif i in st2.date:\n\t\t\t# scan for index in st1 and st2\n\t\t\tfor j in range(len(st1.date)):\n\t\t\t\tif i == st1.date[j]:\n\t\t\t\t\tx.append(st1.close[j])\n\t\t\tfor j in range(len(st2.date)):\n\t\t\t\tif i == st2.date[j]:\n\t\t\t\t\ty.append(st2.close[j])\n\t\n\tretval = None\n\tif len(x) == len(y) and len(x) > 10:\n\t\tretval = np.corrcoef(x, y)[0][1] * 100\n\treturn retval\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"154499892","text":"\"\"\"Set up batch file shortcuts for simplified virtual environment workflow.\n\nFor more information, see this blog post:\nhttp://mahugh.com/2017/04/02/python-virtual-environments/\n\nCopyright (C) 2017 Doug Mahugh. All Rights Reserved. MIT License.\n\"\"\"\nimport datetime\nimport os\nimport sys\n\nimport pip\n\ndef installed_not_in_req(): #------------------------------------------------<<<\n \"\"\"Return list of installed packages that are not in the current\n requirements.txt file.\"\"\"\n if not os.path.isfile('requirements.txt'):\n return [] # no requirements.txt file\n\n installed = []\n for distro in pip.get_installed_distributions():\n requirement = str(distro.as_requirement()).lower()\n nameonly = requirement.split('=')[0]\n if not nameonly in ['pip', 'setuptools']:\n installed.append(requirement)\n\n reqs = []\n for line in open('requirements.txt', 'r').readlines():\n this_req = line.lower().strip()\n if 'git+https://github.com' in this_req:\n # special handling here for packages installed from github, since\n # they don't appear with samy syntax in requirements.txt and pip\n nameonly = this_req.split('=')[-1].lower()\n # remove from installed any same-named package ...\n installed = [_ for _ in installed if not _.split('=')[0] == nameonly]\n else:\n reqs.append(this_req)\n\n missing = []\n for pkg in installed:\n if not pkg in reqs:\n missing.append(pkg)\n\n return missing\n\ndef same_env(folder, venv): #------------------------------------------------<<<\n \"\"\"Returns True if specified folder is the home folder for (above) the\n specified venv folder, or a subfolder under it.\"\"\"\n home_folder = venv[:-4] # remove '\\env' from end of venv folder\n return folder.startswith(home_folder)\n\ndef setup_script(): #--------------------------------------------------------<<<\n \"\"\"Create batch files for simple virtual environment workflow at the Windows\n command prompt.\"\"\"\n print(' creating virtual environment batch files '.center(80, '-'))\n\n # Batch files are created in the Scripts subfolder under the current Python\n # executable's file location. This *should* be in the Windows path, and\n # a warning message is displayed if this is not true.\n python_interpreter = sys.executable # full path to Python interpreter\n python_folder = os.path.split(python_interpreter)[0]\n scripts_folder = os.path.join(python_folder, 'Scripts')\n print(' folder: ' + scripts_folder)\n verify_path(scripts_folder) # verify destination folder is in search path\n\n # NE.bat - New Environment\n # creates new virtual environment in env subfolder of current folder\n write_ne_bat(scripts_folder)\n\n # AE.bat - Activate Environment\n # activates virtual environment from current env subfolder, after\n # deactivating any currently active virtual environment\n write_ae_bat(scripts_folder)\n\n # DE.bat - Deactivate Environment\n # deactivates currently activated virtual environment, if any\n write_de_bat(scripts_folder)\n\n # SE.bat - Show Environment\n # shows current virtual environment status and current folder's relationship\n write_se_bat(scripts_folder)\n\ndef show_status(): #---------------------------------------------------------<<<\n \"\"\"Display current virtual environment status.\"\"\"\n print(' VIRTUAL ENVIRONMENT STATUS '.center(80, '-'))\n\n active_ve = venv_folder()\n if active_ve:\n print('Active environment: ' + active_ve)\n pydetails = sys.version.split(' ')[0] + ' ' + \\\n '(32-bit)' if '32 bit' in sys.version else '' + \\\n '(64-bit)' if '64 bit' in sys.version else ''\n print(' Python version: ' + pydetails)\n else:\n print('Active environment: NONE')\n\n print(80*'-')\n current_folder = os.getcwd()\n print(' Current folder: ' + current_folder)\n\n if active_ve:\n # if an environment is active, check whether we're under it\n if same_env(current_folder, active_ve):\n print(20*' ' + 'current folder matches active virtual environment')\n if os.path.isfile('requirements.txt'):\n # there is a requirements.txt in current folder, check accuracy\n missing_reqs = installed_not_in_req()\n if missing_reqs:\n print(80*'-')\n print('*** WARNING *** ' + \\\n 'installed packages not found in requirements.txt:')\n print(','.join(missing_reqs))\n else:\n print(20*' ' + 'CURRENT FOLDER NOT IN ACTIVE VIRTUAL ENVIRONMENT')\n\n if active_ve == current_folder + '\\\\env':\n return # we're in home folder of active environment\n # If we're in a different folder, check whether it has a virtual\n # environment under it, and display a message if so\n if os.path.isfile(current_folder + '\\\\env\\\\Scripts\\\\activate.bat'):\n print(20*' ' +\\\n 'current folder has a virtual environment, AE=activate')\n\ndef venv_folder(): #---------------------------------------------------------<<<\n \"\"\"Returns the location of the currently active virtual environment, or\n None if no virtual environment is active.\"\"\"\n # sys.prefix is the active virtual environment's home directory\n # sys.base_prefix is the location of the global Python interpreter\n if sys.prefix == sys.base_prefix:\n return None\n else:\n return sys.prefix\n\ndef verify_path(python_scripts_folder): #------------------------------------<<<\n \"\"\"Verifies that the specified folder is in the current search path.\n Displays a confirmation message if true, WARNING message if false.\n \"\"\"\n # note that we ignore case, and we also strip a trailing backslash off\n # the search paths (because that is irrelevant, and the existence of the\n # trailing slash is inconsistent across Python versions)\n windows_search_path = os.environ['PATH']\n search_paths = windows_search_path.split(';') # assume Windows ; separator\n if not python_scripts_folder.lower().rstrip('\\\\') in \\\n [_.lower().rstrip('\\\\') for _ in search_paths]:\n print('*WARNING*: ' +\n 'Scripts folder is not in current Windows search path!',\n file=sys.stderr)\n else:\n print('confirmed: ' +\n 'Scripts folder is in current Windows search path')\n\ndef write_ae_bat(folder): #--------------------------------------------------<<<\n \"\"\"Writes the ae.bat \"activate environment\" batch file.\"\"\"\n print(' writing: ae.bat (Activate Environment)')\n timestamp = str(datetime.datetime.now())[:19]\n content = '\\n'.join( \\\n ['@REM batch file created by venv_setup.py ' + timestamp,\n '@call deactivate >nul 2>&1',\n '@env\\\\Scripts\\\\activate'])\n open(os.path.join(folder, 'ae.bat'), 'w').write(content)\n\ndef write_de_bat(folder): #--------------------------------------------------<<<\n \"\"\"Writes the de.bat \"deactivate environment\" batch file.\"\"\"\n print(' writing: de.bat (Deactivate Environment)')\n timestamp = str(datetime.datetime.now())[:19]\n content = '\\n'.join( \\\n ['@REM batch file created by venv_setup.py ' + timestamp,\n '@deactivate'])\n open(os.path.join(folder, 'de.bat'), 'w').write(content)\n\ndef write_ne_bat(folder): #--------------------------------------------------<<<\n \"\"\"Writes the ne.bat \"new environment\" batch file.\"\"\"\n print(' writing: ne.bat (New Environment)')\n timestamp = str(datetime.datetime.now())[:19]\n content = '\\n'.join( \\\n ['@REM batch file created by venv_setup.py ' + timestamp,\n '@ECHO ' + ' creating Python virtual environment '.center(54, '-'),\n '@ECHO project home folder: %cd%',\n '@ECHO virtual environment: %cd%\\\\env',\n '@ECHO copying files ...',\n '@python -m venv env',\n '@ECHO *** COMPLETED *** ' + 'use AE to activate, DE to de-activate'])\n open(os.path.join(folder, 'ne.bat'), 'w').write(content)\n\ndef write_se_bat(folder): #--------------------------------------------------<<<\n \"\"\"Writes the se.bat \"show environment\" batch file.\"\"\"\n print(' writing: se.bat (Show Environment)')\n timestamp = str(datetime.datetime.now())[:19]\n # we include the full path to venv_setup.py, to avoid launching a program\n # of the same name that happens to be in the current folder at runtime\n filename = os.path.realpath(sys.argv[0])\n # note the 'status' argument on the venv_setup.py command line, which\n # causes execution of show_status() instead of setup_script() at runtime\n content = '\\n'.join( \\\n ['@REM batch file created by venv_setup.py ' + timestamp,\n '@python ' + filename + ' status'])\n open(os.path.join(folder, 'se.bat'), 'w').write(content)\n\nif __name__ == '__main__':\n if 'status' in [_.lower() for _ in sys.argv]:\n show_status()\n else:\n setup_script()\n","sub_path":"venv_setup.py","file_name":"venv_setup.py","file_ext":"py","file_size_in_byte":8973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"12562097","text":"from sympy import isprime, nextprime\nfrom collections import Counter\nfrom operator import mul\nfrom functools import reduce\nfrom itertools import combinations\n\ndef prime_decomposition(n) :\n l_p = []\n p = nextprime(1)\n while p <= n :\n while n % p == 0 :\n l_p.append(p)\n n //= p\n p = nextprime(p)\n return l_p\n \ndef s_divisors(n) :\n l_p = [1] + prime_decomposition(n)\n l_d = []\n for r in range(1, len(l_p) +1) :\n for c in combinations(l_p, r) :\n l_d.append( reduce(mul, (i for i in c )))\n return set(l_d)\n \ndef test (n) :\n return all(isprime(d + n//d) for d in s_divisors(n))\n\nprint(len([ 1 for n in range(1, 100 * 1000 * 1000) if test(n)])) \n","sub_path":"WIP_projectEuler_p357.py","file_name":"WIP_projectEuler_p357.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"265066076","text":"# -*- coding: utf-8 -*-\n# Copyright (c) St. Anne's University Hospital in Brno. International Clinical\n# Research Center, Biomedical Engineering;\n# Institute of Scientific Instruments of the CAS, v. v. i., Medical signals -\n# Computational neuroscience. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n# Std imports\n\n# Third pary imports\nimport pytest\n\nimport numpy as np\n\n# Local imports\n\n\n@pytest.fixture(scope=\"module\")\ndef create_testing_artifact_data():\n \"\"\"\n Creates testing data\n \"\"\"\n\n freqs = [2.5, 6.0, 10.0, 16.0, 32.5, 67.5, 165.0, 425.0, 800.0, 1500.0]\n amps = [1, 0.8, 0.6, 0.4, 0.2, 0.1, 0.01, 0.001, 0.0005, 0.0001]\n \n fs = 5000\n n = fs*10\n data = np.zeros(n)\n\n x = np.arange(n)\n for i,freq in enumerate(freqs):\n a = amps[i]\n y = a * np.sin(2 * np.pi * freq * x / fs)\n data += y\n \n data[int(n/10):int(2*n/10)] = 0\n data[int(3*n/10):int(4*n/10)] = max(data)\n data[int(5*n/10)] = max(data)\n \n hf_noise = np.zeros(int(n/10))\n x = np.arange(int(n/10))\n amps = [0.3, 0.1, 0.01]\n for i,freq in enumerate([650.0, 800.0, 950.0]):\n a = amps[i]\n y = a * np.sin(2 * np.pi * freq * x / fs)\n hf_noise += y\n data[int(6*n/10):int(7*n/10)] = data[int(6*n/10):int(7*n/10)]+hf_noise\n \n x = np.arange(int(n/10))\n line_noise = 1 * np.sin(2 * np.pi * 50.0 * x / fs)\n data[int(8*n/10):int(9*n/10)] = data[int(8*n/10):int(9*n/10)]+line_noise\n\n return data\n","sub_path":"epycom/artifact_detection/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640047604","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 04_allkinddec.py\n@Time : 2020/04/11 21:40:58\n@Author : xdbcb8 \n@Version : 1.0\n@Contact : xdbcb8@qq.com\n@WebSite : www.xdbcb8.com\n'''\n\n# here put the import lib\n\nimport functools\n\ndef npnr(func):\n @functools.wraps(func)\n def wrapper():\n print('npnr,没有参数,没有返回值的装饰器')\n func()\n print('调用结束')\n return\n return wrapper\n\ndef nphr(func):\n @functools.wraps(func)\n def wrapper():\n print('nphr,没有参数,有返回值的装饰器')\n result = func()\n print('调用结束')\n return result\n return wrapper\n\ndef hphr(func):\n @functools.wraps(func)\n def wrapper(*args, **kws):\n print('hphr,有参数,有返回值的装饰器')\n result = func(*args, **kws)\n print('调用结束')\n return result \n return wrapper\n\ndef isprime(n):\n '''\n 判断输入是否为质数\n '''\n for i in range(2, n // 2 + 1):\n if n % i == 0:\n return False\n else:\n return True\n\n@npnr\ndef p2000():\n for i in range(1,2001):\n if isprime(i):\n print('{}'.format(i), end = '\\t')\n else:\n print('\\n')\n\n@nphr\ndef count():\n count = 0\n for i in range(2, 10001):\n if isprime(i):\n count += 1\n return count\n\n@hphr\ndef countp(m):\n count = 0\n for i in range(2, m + 1):\n if isprime(i):\n count += 1\n return count\n\np2000()\nprint(count())\nprint(countp(20))","sub_path":"homework5/04_allkinddec.py","file_name":"04_allkinddec.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"565034913","text":"# -*- coding: utf-8 -*-\n# @Time : 5/20/19 9:19 AM\n# @Author : dbchan\n# @Software: PyCharm\nfrom math import sqrt, sin, atan2, cos\n\n\ndef convert(lat, lon):\n x_pi = 3.14159265358979324\n z = sqrt(lat * lat + lon * lon) + 0.00002 * sin(lat * x_pi)\n theta = atan2(lat, lon) + 0.000003 * cos(lon * x_pi)\n db_lon = z * cos(theta) + 0.0065\n bd_lat = z * sin(theta) + 0.006\n return bd_lat, db_lon\n\n\nif __name__ == '__main__':\n print(convert(121.429627, 31.204399))\n","sub_path":"api/common_func/tx_to_bd.py","file_name":"tx_to_bd.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226812724","text":"from airflow import DAG\n#from airflow.operators import python_operator\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime, timedelta\n\nimport pymssql\nimport requests,json,os,time\nimport xml\nimport smtplib\nfrom xml.etree import ElementTree as ET\nfrom xml.etree.ElementTree import Element as EL\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nconn = pymssql.connect(host='192.168.3.40', user='sysdba', password='e$1s_s', database='ESIDB')\ncur = conn.cursor()\n\n\ndef GenerateAndSend(INVOICE_ID):\n se = requests.Session()\n se.auth = ('jasperadmin', 'jasperadmin')\n se.headers={'content-type':'application/text'}\n bas_ULR='http://192.168.3.146:8051/jasperserver/rest/'\n response = se.get(bas_ULR+'resource/reports/Invoice')\n ECUDU_DESCRIPTIV=response.text\n mystr=ECUDU_DESCRIPTIV.split('')\n myparam=''+ str(INVOICE_ID)+''\n print(myparam+mystr[1])\n response = se.put(bas_ULR+'report/Invoice',data=myparam+mystr[1])\n tree = ET.fromstring(response.content)\n UUID = tree.findall('uuid')[0].text\n print(UUID)\n url=bas_ULR+'report/'+UUID+'?file=ecudureport'\n response = se.get(bas_ULR+'report/'+UUID+'?file=report')\n\n try:\n\n msg = MIMEMultipart()\n msg['Subject'] = '*********TEST********* Review and Confirm Invoice'\n msg['From'] = \"mehrdadn@integenx.com\"\n msg['To'] = \"mehrdadn@integenx.com\" #leilae@integenx.com;\n img1 = MIMEApplication(response.content, 'pdf')\n img1['Content-Disposition'] = 'attachment; filename=\"'+str(INVOICE_ID)+'.pdf\"'\n msg.attach(img1)\n s = smtplib.SMTP('IXI-EXCH.microchipbiotech.com')\n s.send_message(msg)\n s.quit()\n except Exception as e:\n print(e)\ndef Invoice_JOB():\n\n try:\n s='select INVOICE_NUMBER from ARFIM where INVOICE_DATE= DATEADD(day, -10, CONVERT (date, SYSDATETIME())) '\n #s='select INVOICE_NUMBER from ARFIM where INVOICE_DATE= cast(getdate() as date)'\n cur.execute(s)\n while True:\n row = cur.fetchone()\n print(row[0])\n GenerateAndSend(row[0])\n if row is None:\n #return 'Done'\n break\n #yield row\n #results = cur.fetchall()\n except Exception as e :\n print(e)\\\n\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2018, 6, 1),\n 'email': ['airflow@airflow.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n 'schedule_interval': '@hourly',\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\ndag = DAG('send_invoice', default_args=default_args)\nsend_invoice_task = \\\n PythonOperator(task_id='send_invoice',\n provide_context=True,\n python_callable=Invoice_JOB,\n dag=dag)\n\n\n\n\n\n\n","sub_path":"AIRFLOW/OrderBacklogSaleDAG.py","file_name":"OrderBacklogSaleDAG.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"89798656","text":"import json\nimport pathlib\nfrom urllib.parse import urlencode\n\nimport hug\nfrom marshmallow import fields\nfrom sqlalchemy import and_, case, func\n\nimport walshify\nfrom notify import notify_user\nfrom walshify.db.db_io import IOManager, Session\nfrom walshify.db.models import Email, Generated, Vote\nfrom walshify.headlines import Headlines, load_headlines\n\nload_headlines()\nheadline_engine = Headlines()\n\n\nwith open(\"walshify/assets/email_template.html\") as tmplt:\n email_template = tmplt.read()\n\napi = hug.API(__name__)\napi.http.add_middleware(hug.middleware.CORSMiddleware(api, max_age=10))\n\nif walshify.security.require_https:\n from falcon_require_https import RequireHTTPS\n\n hug.API(__name__).http.add_middleware(RequireHTTPS())\n\n\n@hug.directive()\ndef SqlalchemySession(**kwargs):\n return IOManager().session\n\n\n@hug.directive()\nclass URLFactory:\n def __init__(self, default=None, request=None, **kwargs):\n self.base_url = f\"{request.prefix}{api.http.base_url}\".strip(\n \"/\"\n ) # Remove any trailing slashes\n self.path = default if default is not None else \"\"\n\n def __call__(self, path=None, params=None):\n if path is None:\n path = self.path\n print(\"Path is\")\n print(path)\n path = path.strip(\"/\")\n path = \"/\" + path if path else \"\"\n query = \"?\" + urlencode(params) if params else \"\"\n return f\"{self.base_url}{path}{query}\"\n\n\n@hug.get(\"/headline\")\ndef headline(\n session: SqlalchemySession,\n email: fields.Email() = None,\n count: fields.Integer() = 1,\n):\n \"\"\"Returns a new generated headline.\"\"\"\n _headlines = []\n if email is not None: # Try to get a generated headline not yet seen\n _email = get_email(email=email, session=session)\n if _email and _email[\"last_seen\"] is not None:\n _headlines = (\n (\n session.query(Generated)\n .filter(Generated.id > _email[\"last_seen\"])\n .order_by(Generated.id)\n )\n .limit(count)\n .all()\n )\n if len(_headlines) < count: # Generate a headline if none was retrieved\n new_headlines = [\n Generated(text=headline_engine.generated)\n for i in range(0, count - len(_headlines))\n ]\n session.add_all(new_headlines)\n _headlines.extend(new_headlines)\n if email is not None: # Update Last seen with headline id\n _email = session.query(Email).filter(Email.email == email).first()\n _email.last_seen = max([headline.id for headline in _headlines])\n session.commit()\n return [\n {\"text\": _headline.text, \"id\": int(_headline.id), \"vote\": \"none\"}\n for _headline in _headlines\n ]\n\n\n@hug.get(\"/email\", examples=\"email=example@professionaltruthgroomer.com\")\n@hug.local()\ndef get_email(email: fields.Email(), session: SqlalchemySession):\n \"\"\"Returns the status of an email address from the database\"\"\"\n subquery = (\n session.query(Vote.email_id, func.count(Vote.generated_id).label(\"unverified\"))\n .filter(Vote.verified == False)\n .group_by(Vote.email_id)\n .subquery()\n )\n\n email_hash = walshify.security.simple_hash(email.lower())\n result = (\n session.query(Email, func.coalesce(subquery.c.unverified, 0))\n .filter(Email.email_hash == email_hash)\n .outerjoin(subquery, Email.id == subquery.c.email_id)\n .first()\n )\n\n if result is None:\n _email = Email(**add_email(email=email, session=session))\n unverified = 0\n else:\n _email, unverified = result\n if unverified is None:\n unverified = 0\n if _email.last_seen is None:\n _email.last_seen = None\n if _email.unsubscribe == True:\n return {\"unsubscribe\": True, \"id\": email.id}\n else:\n return {\n \"unsubscribe\": False,\n \"id\": _email.id,\n \"last_seen\": _email.last_seen,\n \"unverified\": unverified,\n }\n\n\n@hug.local()\ndef add_email(\n email: fields.Email(),\n session: SqlalchemySession,\n hash_only: fields.Boolean() = False,\n):\n \"\"\"Add the supplied email to the database\"\"\"\n _email = Email(email_hash=walshify.security.simple_hash(email))\n if not hash_only:\n _email.email = email.lower()\n session.add(_email)\n session.commit()\n return {\n \"id\": _email.id,\n \"email\": email,\n \"unsubscribe\": False,\n \"last_seen\": _email.last_seen,\n }\n\n\n@hug.post(\"/unsubscribe\", examples=\"email=example@professionaltruthgroomer.com\")\ndef unsubscribe(email: fields.Email(), session: SqlalchemySession):\n \"\"\"Removes the supplied email from the database and prevents future emails based on a hash.\"\"\"\n status = get_email(email, session)\n if status[\"unsubscribe\"]:\n return {\"status\": \"failed\", \"error\": f\"{email} already unsubscribed\"}\n elif not status[\"exists\"]:\n add_email(email=email, session=session, hash_only=True)\n\n email = session.query(Email).filter_by(email=email).first()\n email.email = None\n email.unsubscribe = True\n session.commit()\n return {\"status\": \"success\"}\n\n\n@hug.post(\"/vote\", examples=\"email=example@professionaltruthgroomer.com&headline_id=26\")\ndef vote(\n email: fields.Email(),\n headline_id: fields.Integer(),\n session: SqlalchemySession,\n up: fields.Boolean() = True,\n):\n \"\"\"Initiate a vote for the provided email and headline_id. up=False removes a previous vote.\"\"\"\n _email = get_email(email=email, session=session)\n if _email[\"unsubscribe\"]:\n return {\"status\": \"failed\", \"error\": f\"{email} is unsubscribed\"}\n else:\n email_id = _email[\"id\"]\n if up:\n vote = Vote(email_id=email_id, generated_id=headline_id)\n session.add(vote)\n activity = \"new vote\"\n elif not up:\n vote = (\n session.query(Vote)\n .filter(Vote.email_id == email_id)\n .filter(Vote.generated_id == headline_id)\n .first()\n )\n if vote:\n session.delete(vote)\n activity = \"deleted vote\"\n else:\n activity = \"none\"\n session.commit()\n return {\"status\": \"success\", \"activity\": activity}\n\n\n@hug.get(\"/top\")\n@hug.local()\ndef top(session: SqlalchemySession):\n subquery = (\n session.query(\n Vote.generated_id,\n func.sum(case([(Vote.verified == True, 1)], else_=0)).label(\n \"verified_vote_count\"\n ),\n func.count(Vote.generated_id).label(\"all_vote_count\"),\n )\n .group_by(Vote.generated_id)\n .subquery()\n )\n result = (\n session.query(\n Generated, subquery.c.verified_vote_count, subquery.c.all_vote_count\n )\n .join(subquery, Generated.id == subquery.c.generated_id)\n .order_by(\n subquery.c.verified_vote_count.desc(), subquery.c.all_vote_count.desc()\n )\n .limit(10)\n .all()\n )\n return [\n {\"headline\": thing[0].text, \"verified_votes\": thing[1], \"all_votes\": thing[2]}\n for thing in result\n ]\n\n\n@hug.get(\"/unverified\", examples=\"email=example@professionaltruthgroomer.com\")\ndef unverified(email: fields.Email(), session: SqlalchemySession):\n _email = get_email(email=email, session=session)\n if _email[\"unsubscribe\"]:\n return {\"status\": \"failed\", \"error\": f\"{email} is unsubscribed\"}\n\n result = (\n session.query(Generated)\n .join(Vote, Generated.id == Vote.generated_id)\n .order_by(Generated.id)\n .filter(Vote.email_id == _email[\"id\"])\n .all()\n )\n return [{\"text\": v.text, \"headline_id\": v.id} for v in result]\n\n\n@hug.post(\"/verify\", examples=\"email=example@professionaltruthgroomer.com\")\ndef finalize_verify(\n action_url: str,\n email: fields.Email(),\n session: SqlalchemySession,\n url_factory: URLFactory,\n unsubscribe_url: str = None,\n):\n _email = get_email(email=email, session=session)\n if _email[\"unsubscribe\"]:\n return {\"status\": \"failed\", \"error\": f\"{email} is unsubscribed\"}\n else:\n result = (\n session.query(Generated)\n .join(Vote, Generated.id == Vote.generated_id)\n .order_by(Generated.id)\n .filter(Vote.email_id == _email[\"id\"])\n .all()\n )\n\n jwt = walshify.security.jwt_encode(\n {\"type\": \"confirm\", \"email\": _email, \"ids\": [g.id for g in result]}\n )\n\n link = url_factory(path=\"/external\", action=jwt)\n unsubscribe = \"This should really be an unsubcribe link\"\n html = email_template.replace(\"{{vote_url}}\", link).replace(\n \"{{unsubscribe_url}}\", unsubscribe\n )\n\n post = notify_user(email, \"hi\", html=html)\n print(post.text)\n\n return {\"status\": \"success\", \"action\": \"email\"}\n\n\n@hug.get(\"/external\", examples=\"email=example@professionaltruthgroomer.com\")\ndef external(action: str, session: SqlalchemySession):\n\n jwt = walshify.security.jwt_decode(action)\n\n session.query(Vote).join(Email, Vote.email_id == Email.id).filter(\n Email.email == jwt[\"email\"]\n ).filter(Email.unsubscribe == False).filter(Vote.verified == False).update(\n {Vote.verified: True}, synchronize_session=False\n )\n\n return {\"status\": \"success\"}\n","sub_path":"walshify/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"307503362","text":"import json\nimport re\n\nfrom itertools import chain\n\nfrom django.views import generic\nfrom django.views.generic.base import TemplateView\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.shortcuts import render, get_object_or_404\nfrom django.db.models import Q\n\nfrom conditions.models import ConditionType, ConditionSet, Medium\nfrom datasets.models import Dataset, Data\n\nfrom conditions.forms import SearchForm\n\nfrom libchebipy import ChebiEntity\n\n\ndef index(request):\n\n if 'q' in request.GET:\n\n form = SearchForm(request.GET)\n q = request.GET['q'].strip()\n\n f = Q(systematic_name__icontains=q) | \\\n Q(common_name__icontains=q) | \\\n Q(display_name__icontains=q) | \\\n Q(conditions__type__name__icontains=q) | \\\n Q(conditions__type__other_names__icontains=q) | \\\n Q(conditions__type__chebi_name__icontains=q) | \\\n Q(conditions__type__pubchem_name__icontains=q)\n\n g = Count('dataset', filter=~Q(paper__latest_data_status__status__name='not relevant'))\n\n queryset1 = ConditionSet.objects.all()\n queryset1 = queryset1.filter(f).annotate(num_datasets=g).filter(num_datasets__gte=0).distinct()\n\n queryset2 = Medium.objects.all()\n queryset2 = queryset2.filter(f).annotate(num_datasets=g).filter(num_datasets__gte=0).distinct()\n\n queryset = list(chain(queryset1, queryset2))\n\n return render(request, 'conditions/index.html', {\n 'queryset': queryset,\n 'form': form,\n 'q': q,\n })\n else:\n form = SearchForm()\n\n return render(request, 'conditions/index.html', {\n 'form': form,\n })\n\n\nclass ConditiontypeDetailView(generic.DetailView):\n model = ConditionType\n template_name = 'conditions/detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(ConditiontypeDetailView, self).get_context_data(**kwargs)\n context['DOWNLOAD_PREFIX'] = settings.DOWNLOAD_PREFIX\n context['USER_AUTH'] = self.request.user.is_authenticated()\n context['papers'] = context['object'].datasets\n context['id'] = context['object'].id\n return context\n\n\ndef conditionclass(request, class_id):\n class_entity = ChebiEntity('CHEBI:' + str(class_id))\n class_name = class_entity.get_name()\n children = []\n for relation in class_entity.get_incomings():\n if relation.get_type() == 'has_role':\n tid = relation.get_target_chebi_id()\n tid = re.search('(?<=CHEBI:)(\\d)*', tid)\n tid = int(tid.group(0))\n children.append(tid)\n\n conditiontypes = ConditionType.objects.filter(chebi_id__in=children)\n datasets = Dataset.objects.filter(conditionset__conditions__type__in=conditiontypes)\\\n .exclude(paper__latest_data_status__status__name='not relevant').distinct()\n return render(request, 'conditions/class.html', {\n 'id': class_id,\n 'class_name': class_name,\n 'conditiontypes': conditiontypes,\n 'papers': datasets,\n 'DOWNLOAD_PREFIX': settings.DOWNLOAD_PREFIX,\n 'USER_AUTH': request.user.is_authenticated()\n })\n\n\nclass MediumDetailView(generic.DetailView):\n model = Medium\n template_name = 'conditions/conditionset_medium_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(MediumDetailView, self).get_context_data(**kwargs)\n context['DOWNLOAD_PREFIX'] = settings.DOWNLOAD_PREFIX\n context['USER_AUTH'] = self.request.user.is_authenticated()\n context['papers'] = context['object'].datasets\n context['id'] = context['object'].id\n return context\n\n\nclass ConditionSetDetailView(generic.DetailView):\n model = ConditionSet\n template_name = 'conditions/conditionset_medium_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(ConditionSetDetailView, self).get_context_data(**kwargs)\n context['DOWNLOAD_PREFIX'] = settings.DOWNLOAD_PREFIX\n context['USER_AUTH'] = self.request.user.is_authenticated()\n context['papers'] = context['object'].datasets\n context['id'] = context['object'].id\n return context\n\n\n","sub_path":"conditions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470278625","text":"def word_count(prompt):\n\twords = prompt.split()\n\tl = list()\n\tfor i in words:\n\t\tk = list()\n\t\tc = 0\n\t\tfor x in words:\n\t\t\tif i == x:\n\t\t\t\t\tc = c + 1\t\t\n\t\tk.append(i)\n\t\tk.append(c)\n\t\tif k not in l:\n\t\t\tl.append(k)\n\tres = dict(l)\n\treturn res\n","sub_path":"assignments/python/wc/src/429.py","file_name":"429.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"592018319","text":"import sys\nimport re\nimport csv\nimport argparse\nimport logging\n\"\"\"\n\"\"\"\n\n\ndef circle_name(s):\n\n strUrl = \"\"\n\n oCRe = re.search(\"(.*?)\", s, re.I)\n if oCRe != None:\n strUrl = oCRe.group(1)\n else:\n strUrl = \"\"\n\n if strUrl.find(\"http://www.creation.gr.jp/jump/?\") == 0:\n strUrl = strUrl[len(\"http://www.creation.gr.jp/jump/?\"):]\n\n return strUrl\n\n\n#\ndef circle_info(s):\n\n strUrl = \"\"\n strCircle = \"\"\n\n oCRe = re.search(\"(.*?)\", s, re.I)\n\n if oCRe is not None:\n strUrl = oCRe.group(1)\n strCircle = oCRe.group(2)\n else:\n strCircle = s\n\n return strCircle, strUrl\n\n\n#\ndef main():\n\n o_parser = argparse.ArgumentParser()\n o_parser.add_argument(\"-v\",\n \"--verbose\",\n action=\"store_true\",\n required=False,\n default=False)\n\n o_parser.add_argument(\"filename\")\n\n o_argv = o_parser.parse_args()\n\n if o_argv.verbose is True:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n o_log = logging.getLogger(__name__)\n o_handler = logging.StreamHandler()\n o_handler.setLevel(log_level)\n o_log.setLevel(log_level)\n o_log.addHandler(o_handler)\n o_log.propagate = False\n\n with open(o_argv.filename, \"r\", encoding=\"utf-8\") as f:\n text_html = f.read()\n\n list_circle = re.findall(\n '
.*?
.*?
.*?
',\n text_html, re.DOTALL)\n\n list_record = []\n for html_circle in list_circle:\n\n o_re = re.search(\n '
.*?
'\n '.*?'\n '
(.*?)
'\n '.*?'\n '
.*?
'\n '.*?'\n '
(.*?)
'\n '.*?'\n '
.*?
'\n '.*?'\n '
(.*?)
'\n '.*?'\n '
(.*?)
', html_circle, re.DOTALL)\n\n if o_re is not None:\n list_value = [v.strip() for v in o_re.groups()]\n\n list_href = [\n v.strip() for v in re.findall('= 0:\n url_pixiv = href\n elif href.find(\"twitter\") >= 0:\n url_twitter = href\n else:\n url = href\n\n list_record.append(list_value[0:3] + [url, url_twitter, url_pixiv])\n\n with open(o_argv.filename + \".csv\", \"w\", encoding=\"utf-8\") as f:\n csv_w = csv.writer(f)\n csv_w.writerows(list_record)\n \"\"\"\n for strTableBody in listTable:\n listCircle = re.findall(\"(.*?)\", strTableBody, re.I)\n\n for strCircle in listCircle:\n\n oCRe = re.search(\n \"(.*?).*?\"\n \"(.*?).*?\"\n \"(.*?).*?\"\n \"(.*?).*?\"\n \"\", strCircle, re.I)\n\n if oCRe != None:\n\n strWriter = \"\"\n strPixiv = \"\"\n\n strCircle, strUrl = circle_info(oCRe.group(1))\n strName, strTwitter = circle_info(oCRe.group(3))\n strSpace = oCRe.group(4)\n\n if strUrl.find(\"http://www.pixiv\") >= 0:\n strPixiv = strUrl\n strUrl = \"\"\n if strUrl.find(\"https://twitter\") >= 0:\n strTwitter = strUrl\n strUrl = \"\"\n if strUrl.find(\"http://twitter\") >= 0:\n strTwitter = strUrl\n strUrl = \"\"\n\n print \"\\t\".join([\n strSpace, strCircle, strWriter, strUrl, strTwitter,\n strPixiv\n ])\n\n\n# print oCRe.group(1)\n \"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n\n# [EOF]\n","sub_path":"util/ht2c_comic_v_market.py","file_name":"ht2c_comic_v_market.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"253058180","text":"'''\r\nCreated on Feb 22, 2019\r\n\r\n@author: vijay.pal01\r\n'''\r\n#PF-Tryout\r\n\r\n#debug the below code\r\n\r\n\r\ncounter1=0\r\ncounter2=0\r\nrows =5\r\n\r\nwhile(counter1= len(w1):\n return len(w2)-j\n if j >= len(w2):\n return len(w1)-i\n\n # if already calculated\n if self.dp[i][j] != -1:\n return self.dp[i][j]\n\n if w1[i] == w2[j]:\n # no need to perform any kind of operation\n self.dp[i][j] = 0 + self.minDisRecursive(w1, w2, i+1, j+1)\n else:\n # need to perform any 1 operation\n # replace - i +1, j +1, because we replace 1 char in w1 and go ahead in both\n # delete - i + 1, j - deleting in the word1, to get to word2\n # insert - i, j +1, inserting some value that is related to j, so that's why increaing j, not i because i is already incremented\n self.dp[i][j] = 1 + min(self.minDisRecursive(w1, w2, i, j+1),\n self.minDisRecursive(w1, w2, i+1, j), self.minDisRecursive(w1, w2, i+1, j+1))\n return self.dp[i][j]\n\n def safeIndex(self, i, j):\n return i >= 0 and j >= 0 and i < len(self.dp) and j < len(self.dp[0])\n\n def getValue(self, i, j):\n return self.dp[i][j] if self.safeIndex(i, j) else 0\n\n def minDisTabulation(self, w1, w2):\n for i in range(len(self.dp)):\n for j in range(len(self.dp[0])):\n if i == 0:\n self.dp[i][j] = j\n elif j == 0:\n self.dp[i][j] = i\n elif w1[i-1] == w2[j-1]:\n self.dp[i][j] = self.dp[i-1][j-1]\n else:\n self.dp[i][j] = 1 + min(self.dp[i-1][j],\n self.dp[i][j-1], self.dp[i-1][j-1])\n\n def minDistance(self, word1: str, word2: str) -> int:\n if len(word1) == 0 or len(word2) == 0:\n return max(len(word1), len(word2))\n\n self.dp = [[0 for j in range(len(word2)+1)]\n for i in range(len(word1)+1)]\n\n # self.minDisRecursive(word1, word2, 0, 0)\n # return self.dp[0][0]\n self.minDisTabulation(word1, word2)\n # print(self.dp)\n return self.dp[-1][-1]\n","sub_path":"LeetCode/72. Edit Distance.py","file_name":"72. Edit Distance.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"55586920","text":"import csv\nimport json\nimport os\nimport urllib\nimport urllib.request\n\n\nclass InstagramCommentsScraper:\n '''\n Scraper for Instagram Comments.\n This implementation is broken due to recent GraphQL changes and only around 25 comments can be scraped as of now\n '''\n\n def _extract_get_comments_data(self, json_response):\n comments_list, usernames_list, timestamps_list = [], [], []\n for node in json_response['graphql']['shortcode_media']['edge_media_to_parent_comment']['edges']:\n comments_list.append(node['node']['text'].encode('utf-8', 'replace').decode())\n usernames_list.append(node['node']['owner']['username'])\n timestamps_list.append(node['node']['created_at'])\n\n return comments_list, usernames_list, timestamps_list\n\n def _extract_post_json(self, url, urllib_proxies=None):\n url = f\"https://www.instagram.com/p/{url.split('/')[-2]}/?__a=1\"\n\n req = urllib.request.Request(url, None, {\"User-Agent\": \"Mozilla/5.0\"})\n\n if urllib_proxies:\n handler = urllib.request.ProxyHandler(urllib_proxies)\n opener = urllib.request.build_opener(handler)\n urllib.request.install_opener(opener)\n\n response = urllib.request.urlopen(req)\n json_response = json.load(response)\n\n texts, usernames, timestamp = self._extract_get_comments_data(json_response)\n\n return timestamp, texts, usernames\n\n def scrape(self, url, out_path=None, urllib_proxies=None):\n '''\n Scraper function for scraping comments related to a specific Instagarm post\n url: str, URL for the Instagram post to be scraped\n out_path: [Optional] str, Path to output directory. If unspecified, current directory will be used\n urllib_proxies: [Optional] dict, Proxy information for urllib requests\n '''\n data = self._extract_post_json(url, urllib_proxies)\n\n with open('ScrapedComments.csv' if out_path is None else os.path.join(out_path, 'ScrapedComments.csv'), 'a+',\n encoding='utf-8', newline='') as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(['Timestamp', 'Text', 'Username'])\n for row in zip(*data):\n writer.writerow(row)\n","sub_path":"scrapera/text/instagram.py","file_name":"instagram.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"305497935","text":"import socket\nimport serial\nimport queue as q\nfrom threading import Timer,Thread,Event\nimport time\nimport csv\nimport random\n\n#from RFML2 import RFMLmain\n#from cnn import cnn_main\n\n#from Crypto.Util.Padding import pad\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nfrom Crypto.Util.py3compat import *\nimport base64\n\n# Crypto.Util.Padding pad function\ndef pad(data_to_pad, block_size, style='pkcs7'):\n\tpadding_len = block_size - len(data_to_pad) % block_size\n\tif style == 'pkcs7':\n\t\tpadding = bchr(padding_len) * padding_len\n\telif style == 'x923':\n\t\tpadding = bchr(0) * (padding_len - 1) + bchr(padding_len)\n\telif style == 'iso7816':\n\t\tpadding = bchr(128) + bchr(0) * (padding_len - 1)\n\telse:\n\t\traise ValueError(\"Unknown padding style\")\n\treturn data_to_pad + padding\n\n# global variable decleration\n#dataQueue = q.Queue()\nvoltage = 0\ncurrent = 0\npower = 0\ncumPower = 0\n\nHELLO = ('H').encode()\nACK = ('A').encode()\nNACK = ('N').encode()\nREADY = ('R').encode()\n\nclass SerClass:\n\n\tdef setup(self):\n\t\tself.ser = serial.Serial(\"/dev/serial0\", 115200)\n\t\tself.ser.reset_input_buffer()\n\t\tself.ser.reset_output_buffer()\n\n\tdef handshake(self):\n\t\tprint(\"InitiateHandshake\")\n\t\tself.ser.write(HELLO)\n\t\ttime.sleep(1)\n\t\tif self.ser.in_waiting > 0:\n\t\t\treply = self.ser.read().decode()\n\t\t\tif(reply == 'A'):\n\t\t\t\tself.ser.write(ACK)\n\t\t\t\tprint('Handshake Complete')\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint('pending')\n\t\treturn False\n\n\n\tdef run(self):\n\t\tglobal dataQueue\n\n\t\tself.setup()\n\t\twhile self.handshake() is False:\n\t\t\tcontinue\n\t\tdataThread = DataReceiveClass(self.ser)\n\t\tdataThread.start()\n\nclass SocketClass():\n\tcurrMove = None\n\tmessage = None\n\tRFMLmove = None\n\tslidingMove = None\n\n\tdef createMsg(self):\n\t\tglobal voltage\n\t\tglobal current\n\t\tglobal power\n\t\tglobal cumPower\n\n\t\tself.actions = ['handmotor', 'bunny', 'tapshoulder', 'rocket', 'cowboy', 'hunchback', 'jamesbond', 'chicken', 'movingsalute', 'whip', 'logout']\n\n\t\tif self.currMove == -1:\n\t\t\tself.message = None\n\t\telse:\n\t\t\tself.message = (\"#\" + self.actions[self.currMove] + \"|\" + str(format(voltage, '.2f')) + \"|\" + str(format(current, '.2f')) + \"|\" + str(format(power, '.2f')) + \"|\" + str(format(cumPower, '.2f')) + \"|\").encode('utf8').strip()\n\n\tdef machine(self):\n\t\t# ML code that will return an index\n\t\tself.currMove=0\n\t\tprint(\"Running ML code\")\n#\t\tRFMLmain()\n#\t\tcnn_main()\n\n\tdef run(self):\n\t\tSECRET_KEY = bytes(\"dancedancedance!\", 'utf8')\n\t\t# setup connection\n\t\tprint('Connecting to server')\n\t\tself.ipaddress = '192.168.137.167'\n\t\tself.port = 8080\n\t\tself.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.s.connect((self.ipaddress, self.port))\n\t\tprint(\"Connected to server \" +self.ipaddress+ \", port: \" +str(self.port))\n\n\t\twhile True:\n\t\t\tself.machine()\n\t\t\tself.createMsg()\n\t\t\tprint(self.message)\n\n\t\t\tiv = Random.new().read(AES.block_size)\n\t\t\tcipher = AES.new(SECRET_KEY, AES.MODE_CBC, iv)\n\t\t\tpadMessage = pad(self.message, AES.block_size)\n\t\t\tencryptMsg = cipher.encrypt(padMessage)\n\t\t\tencodedMsg = base64.b64encode(iv + encryptMsg)\n\n\t\t\ttime.sleep(5)\n\t\t\tself.s.send(encodedMsg)\n\n\t\tself.s.close()\n\nclass DataReceiveClass(Thread):\n\t#global dataQueue\n\n\tdef __init__(self, ser):\n\t\tThread.__init__(self)\n\t\tself.ser = ser\n\n\tdef run(self):\n\t\tself.readData()\n\n\tdef readData(self):\n\t\tglobal voltage\n\t\tglobal current\n\t\tglobal power\n\t\tglobal cumPower\n\n\t\tpacket = self.ser.readline().decode()\n\t\tpacket = packet.strip()\n\t\tprint(packet)\n\n\t\tcheckSum = packet.rsplit(\",\", 1)[1]\n\t\tpacket = packet.rsplit(\",\", 1)[0]\n\n\t\tcheckList = bytearray(packet.encode())\n\t\ttestSum = 0\n\n\t\tfor x in range(len(packet)):\n\t\t\ttestSum ^= checkList[x]\n#\t\tprint(\"values:\")\n#\t\tprint(checkSum)\n#\t\tprint(testSum)\n\n\t\tif(testSum == int(checkSum)):\n\t\t\tself.ser.write(NACK)\n\t\telse:\n#\t\t\tprint(\"populate\")\n\t\t\tself.ser.write(ACK)\n\t\t\twith open('/home/pi/Desktop/CG3002/RPI/dataset/dataset_val/hunchback_redo_val.csv', 'a') as csvfile:\n\t\t\t\tfilewriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n\t\t\t\tdataList = []\n\t\t\t\tfor x in range (0, 18):\n\t\t\t\t\tif x==0 or x==7:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif x==14:\n\t\t\t\t\t\tvoltage = float(packet.split(',', 18)[x])\n\t\t\t\t\telif x==15:\n\t\t\t\t\t\tcurrent = float(packet.split(',', 18)[x])\n\t\t\t\t\telif x==16:\n\t\t\t\t\t\tpower = float(packet.split(',', 18)[x])\n\t\t\t\t\telif x==17:\n\t\t\t\t\t\tcumPower = float(packet.split(',', 18)[x])\n\t\t\t\t\telse:\n\t\t\t\t\t\tval = float(packet.split(',', 18)[x])\n\t\t\t\t\t\tdataList.append(val)\n\t\t\t\tdataList.append(4) # append fixed action\n#\t\t\t\tprint(dataList)\n#\t\t\t\tprint(voltage)\n#\t\t\t\tprint(current)\n#\t\t\t\tprint(power)\n#\t\t\t\tprint(cumPower)\n\t\t\t\tfilewriter.writerow(dataList)\n\t\tTimer(0.001, self.readData).start()\n\n\nif __name__ == '__main__':\n\tSerComm = SerClass()\n\t#SerComm.run()\n\tserThread = Thread(target=SerComm.run)\n\n\tSocketComm = SocketClass()\n\t#SocketComm.run()\n\tsocketThread = Thread(target=SocketComm.run)\n\n\tserThread.start()\n\t#socketThread.start()\n","sub_path":"RPI/piClient.py","file_name":"piClient.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"370770531","text":"#!/usr/bin/env python3\nimport goslate\nimport os\nimport sqlite3\n\n\nclass Translate:\n dbname = ''\n\n def __init__(self, db):\n self.dbname = db\n self.firstTime()\n\n def connect(self):\n conn = sqlite3.connect(self.dbname)\n return conn\n\n def firstTime(self):\n if not os.path.exists(self.dbname):\n conn = self.connect()\n c = conn.cursor()\n # Create table\n c.execute('''CREATE TABLE translate\n (english text, spanish text, searchTimes integer)''')\n conn.commit()\n conn.close()\n\n def find(self, text):\n values = [(text)]\n conn = self.connect()\n c = conn.cursor()\n c.execute('SELECT spanish, searchTimes \\\n FROM translate WHERE english=?', values)\n res = c.fetchone()\n print(res[0])\n if res[1] >= 1:\n cont = self.getSearchTimes(text) + 1\n values = [(cont, text)]\n c.execute('UPDATE translate SET searchTimes=? \\\n WHERE english=?', (cont, text))\n conn.commit()\n print(\"Consulta numero \" + str(self.getSearchTimes(text))\n + \" a esta palabra ¬¬\")\n conn.close()\n\n def getSearchTimes(self, text):\n values = [(text)]\n conn = self.connect()\n c = conn.cursor()\n c.execute('SELECT searchTimes \\\n FROM translate WHERE english=?', values)\n res = c.fetchone()[0]\n conn.close()\n return res\n\n def exists(self, text):\n values = [(text)]\n conn = self.connect()\n c = conn.cursor()\n c.execute('SELECT count(english) \\\n FROM translate WHERE english=?', values)\n res = c.fetchone()[0]\n conn.close()\n return res\n\n def insert(self, txt, res):\n values = [(txt, res, 1)]\n conn = self.connect()\n c = conn.cursor()\n c.executemany('INSERT INTO translate VALUES(?, ?, ?)', values)\n conn.commit()\n conn.close()\n\n def translateTo(self, text, language='es'):\n if self.exists(text) >= 1:\n self.find(text)\n else:\n gs = goslate.Goslate()\n result = gs.translate(text, language)\n print(result)\n if (text != result):\n self.insert(text, result)\n return result\n","sub_path":"pyTranslate REST/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"553760383","text":"#!/usr/bin/env python\n\n'''\nCopyright (c) 2020 RIKEN\nAll Rights Reserved\nSee file LICENSE for details.\n'''\n\n\nimport os,sys\nimport log,traceback\n\n\ncigar_op={'M', 'I', 'D', 'N', 'S', 'H', 'P', '=', 'X'}\ncigar_ref_retain={'M', 'D', 'N', '=', 'X'}\ncigar_read_retain={'M', 'I', '=', 'X'}\n\n\ndef sam_to_kmer(args, params, filenames):\n log.logger.debug('started')\n try:\n def complement(string):\n seq_c=string.translate(str.maketrans('ATGC', 'TACG'))[::-1]\n return seq_c\n \n def count_clip(cigar):\n length=0\n tmp=''\n for c in cigar:\n if not c in cigar_op:\n tmp += c\n elif c == 'S':\n length += int(tmp)\n tmp=''\n else:\n tmp=''\n return length\n \n def calc_ref_len(cigar):\n length=0\n tmp=''\n for c in cigar:\n if not c in cigar_op:\n tmp += c\n elif c in cigar_ref_retain:\n length += int(tmp)\n tmp=''\n else:\n tmp=''\n return length\n \n def parse_seq_to_kmers(seq, cigar, genome_start, chr):\n # parse cigar\n parsed_cigar=[]\n tmp=''\n for c in cigar:\n if not c in cigar_op:\n tmp += c\n else:\n parsed_cigar.append([c, int(tmp)])\n tmp=''\n # clip seq\n left,right=0,0\n if 'S' in cigar:\n if parsed_cigar[0][0] == 'S':\n left=parsed_cigar[0][1]\n if parsed_cigar[-1][0] == 'S':\n right=parsed_cigar[-1][1]\n ref_pos=genome_start\n read_pos=left\n pos_d={}\n for c,l in parsed_cigar:\n if c == 'M':\n for _ in range(l):\n pos_d[read_pos]=ref_pos\n read_pos += 1\n ref_pos += 1\n elif c == 'I':\n for _ in range(l):\n pos_d[read_pos]=ref_pos\n read_pos += 1\n elif c == 'D':\n for _ in range(l):\n ref_pos += 1\n pos_d[read_pos]= ref_pos\n tmp=[]\n if (len(seq) - (left + right)) >= params.k:\n for i in range(left, len(seq) - right - params.k + 1, params.slide_bin):\n kmer_seq=seq[i:i + params.k]\n ref='%s:%d-%d' % (chr, pos_d[i], pos_d[i + params.k]) # 0-based start; 1-based end\n tmp.append([kmer_seq, ref])\n return tmp\n\n import pysam\n if args.c is None:\n infile=pysam.AlignmentFile(args.b, 'rb')\n else:\n infile=pysam.AlignmentFile(args.c, 'rc', reference_filename=args.fa)\n \n kmers_set=set()\n kmers_ls=[]\n for line in infile:\n line=line.tostring()\n ls=line.strip().split('\\t')\n if ls[2] == args.refseq_id: # only specified refseq\n ref_len=calc_ref_len(ls[5])\n map_start= int(ls[3]) - 1 # 0-based\n map_end= map_start + ref_len\n if args.refseq_start <= map_start and map_end <= args.refseq_end:\n if int(ls[1]) < 256: # only use primary alignment, discard supplementary and duplicate\n b=bin(int(ls[1]))\n if b[-2] == '1': # properly paired\n for i in ls[::-1]:\n if 'NM:i:' in i:\n nm= int(i.replace('NM:i:', ''))\n if 'S' in ls[5]:\n soft_clip_len=count_clip(ls[5])\n else:\n soft_clip_len=0\n if nm <= params.max_mut and soft_clip_len <= params.max_clip_len:\n if b[-5] == '1':\n seq=complement(ls[9])\n else:\n seq=ls[9]\n kmers_l=parse_seq_to_kmers(seq, ls[5], int(ls[3]) - 1, ls[2]) # 0-based\n kmers_ls.extend(kmers_l)\n for kmer,_ in kmers_l:\n kmers_set.add(kmer)\n kmers_set=sorted(list(kmers_set))\n kmers_d={}\n for kmer in kmers_set:\n kmers_d[kmer]=[]\n for kmer,pos in kmers_ls:\n kmers_d[kmer].append(pos)\n out=[]\n for kmer in kmers_d:\n pos_set=sorted(list(set(kmers_d[kmer])))\n attr=[]\n for pos in pos_set:\n attr.append('%s=%d' % (pos, kmers_d[kmer].count(pos)))\n out.append('%s\\t%d\\t%s\\n' % (kmer, len(kmers_d[kmer]), ';'.join(attr)))\n with open(filenames.summary, 'w') as outfile:\n outfile.write(''.join(out))\n \n except:\n log.logger.error('\\n'+ traceback.format_exc())\n exit(1)\n","sub_path":"scripts/kmer_count_hervk.py","file_name":"kmer_count_hervk.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"315967456","text":"import time\nfrom unittest.mock import patch, call, ANY\n\nfrom django.test import TestCase, override_settings\n\nfrom algoliasearch_django import algolia_engine\nfrom algoliasearch_django import get_adapter\nfrom algoliasearch_django import raw_search\nfrom algoliasearch_django import clear_objects\nfrom algoliasearch_django import update_records\n\nfrom .factories import WebsiteFactory\nfrom .models import Website\n\n\nclass SignalTestCase(TestCase):\n\n def test_save_signal(self):\n with patch.object(algolia_engine, 'save_record') as mock_save_record:\n websites = WebsiteFactory.create_batch(3)\n\n mock_save_record.assert_has_calls(\n [\n call(\n website,\n created=True,\n raw=False,\n sender=ANY,\n signal=ANY,\n update_fields=None,\n using=ANY\n )\n for website in websites\n ]\n )\n\n def test_delete_signal(self):\n with patch.object(algolia_engine, 'save_record'):\n websites = WebsiteFactory.create_batch(3)\n\n with patch.object(algolia_engine, 'delete_record') as mock_delete_record:\n websites[0].delete()\n websites[1].delete()\n\n mock_delete_record.assert_has_calls(\n [\n call(websites[0]),\n call(websites[1])\n ]\n )\n\n def test_unregistered_save_signal(self):\n algolia_engine.unregister(Website)\n\n with patch.object(algolia_engine, 'save_record') as mock_save_record:\n websites = WebsiteFactory.create_batch(3)\n mock_save_record.assert_not_called()\n\n algolia_engine.register(Website)\n\n def test_unregistered_delete_signal(self):\n algolia_engine.unregister(Website)\n\n websites = WebsiteFactory.create_batch(3)\n with patch.object(algolia_engine, 'delete_record') as mock_delete_record:\n websites[0].delete()\n websites[1].delete()\n mock_delete_record.assert_not_called()\n\n algolia_engine.register(Website)\n","sub_path":"tests/test_signal.py","file_name":"test_signal.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312071733","text":"from vehicle import Vehicle\n\nclass Airlock:\n airlock_id = -1\n inner_door_open = False\n outer_door_open = False\n vehicles_inside = []\n\n def __init__(self,id):\n self.airlock_id = id\n self.inner_door_open = False\n self.outer_door_open = False\n self.vehicles_inside = []\n\n def add_vehicle__from_outer(self,vehicle):\n if len(self.vehicles_inside) < 3:\n self.outer_door_open = True\n self.vehicles_inside.append(vehicle)\n self.outer_door_open = False\n else:\n print(\"Capacity of airlock \" + self.airlock_id + \" full.\\n\")\n\n def add_vehicle_from_inner(self,vehicle):\n if len(self.vehicles_inside) < 3:\n self.inner_door_open = True\n self.vehicles_inside.append(vehicle)\n self.inner_door_open = False\n else:\n print(\"Capacity of airlock \" + self.airlock_id + \" full.\\n\")\n\n def remove_vehicles(self):\n self.vehicles_inside = []","sub_path":"airlock.py","file_name":"airlock.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420940591","text":"'''\n107 - Crie um módulo chamado MOEDA.PY que tenha as funções incorporadas AUMENTAR(), DIMINUIR().\nFaça também um programa que importe esse módulo e use algumas dessas funções.\n\n108 - Adapte o código do DESAFIO 107, criando uma função adicional chamada REAL() que consiga mostrar os valores como um\nvalor monetário formatado.\n\n109 - Modifique as funções que foram criadas no DESAFIO 107 para que elas aceitem um parâmetro a mais, informando se o valor\nretornado por elas vai ser ou não formatado pela função MOEDA(), desenvolvida no DESAFIO 108.\n\n110 - Adicione ao módulo MOEDA.PY criado nos desafios anteriores, uma função chamada RESUMO(), que mostre na tela algumas\ninformações geradas pelas funções que já temos no módulo criado até aqui.\n\n===> Crie um pacote chamado utilidadesCeV que tenha dois módulos internos chamados MOEDA e DADO. Transfira todas as funções\nutilizadas nos DESAFIOS 107, 108 E 109 para o primeiro pacote e mantenha tudo funcionando.\n'''\nfrom ex111.utilidadesCeV.funcao import resumo\n\nn = float(input('Digite o valor do Produto » '))\ncredito = int(input('Digite a taxa de juros para parcelamento » '))\ndebito = int(input('Digite a taxa de juros para desconto » '))\n\nwhile True:\n conversao = str(input('Deseja converter o resultado para formato Moeda? ')).upper()[0].strip()\n\n if conversao == 'S':\n brasao = str(input('Digite o brasao de sua moeda » ')).upper()\n resumo(n, credito, debito, brasao, True)\n break\n\n elif conversao == 'N':\n resumo(n, credito, debito)\n break\n\n else:\n print(\n f'\\033[7;31m Entrada Inválida!!! \\033[m Digite somente \\033[36m S[SIM] \\033[m ou \\033[31m N[NÃO] \\033[m ')\n","sub_path":"Exercícios/ex111 - Transformando Módulos em Pacotes.py","file_name":"ex111 - Transformando Módulos em Pacotes.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"314865365","text":"# -*- coding: utf-8 -*-\n\n## VERSION\nCONFIG_VERSION = 'config_version'\n\n## analyzer configuration string\nANALYZER = 'analyzer'\nCONDITION_GROUP = 'condition_group'\nCONDITION_NAME = 'condition_name'\n\nCOLUMN_NAME = 'column_name'\nCOLUMN_TYPE = 'column_type'\nROW_STARTLINE = 'row_startline'\nCONDITION = 'condition'\nCONDITION_VALUE = 'value'\n\n## action configuration string\nACTION = 'action'\nACTION_TYPE = 'type'\n\n## email\nACTION_EMAIL = 'email'\nEMAIL_CONFIG = 'email_config'\nEMAIL_SUBJECT = 'subject'\nEMAIL_FROM = 'from'\nEMAIL_TO = 'to'\nEMAIL_SMTP = 'smtp'\nEMAIL_SMTP_USER = 'smtp_account'\nEMAIL_SMTP_PASSWD = 'smtp_password'\nEMAIL_MSG = 'msg'\nEMAIL_IMPORT_DATA = 'import_data'\n","sub_path":"exceltp/config_define.py","file_name":"config_define.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528540518","text":"# Adapted from https://github.com/painor/randomkickbot\nimport asyncio\nimport datetime\nimport html\nimport logging\nimport random\nimport time\n\nfrom telethon import events, utils\nfrom telethon.tl.custom import Button\nfrom telethon.tl.functions.channels import EditBannedRequest\nfrom telethon.tl.types import ChatBannedRights\n\nlogging.basicConfig(level=logging.INFO)\n\n\nGROUP = 'telethonofftopic'\nDELAY = 24 * 60 * 60\n\nclicked = asyncio.Event()\nchosen = None\nlast_talked = {}\n\n\nasync def init(bot):\n global last_talked\n\n @bot.on(events.NewMessage(GROUP))\n async def h(e):\n last_talked[e.sender_id] = time.time()\n\n async def kick_users():\n global chosen\n while True:\n clicked.clear()\n users = await bot.get_participants(GROUP)\n\n # Delete people who talked before but have left the group\n left = last_talked.keys() - {x.id for x in users}\n for x in left:\n del last_talked[x]\n\n lo = min(last_talked.values(), default=0)\n hi = time.time()\n delta = hi - lo\n if delta <= 0.0:\n chosen = random.choice(users)\n else:\n weights = (1 - ((last_talked.get(x.id, lo) - lo) / delta) for x in users)\n chosen = random.choices(users, weights)[0]\n\n chosen.name = html.escape(utils.get_display_name(chosen))\n start = time.time()\n try:\n await kick_user()\n except Exception:\n logging.exception('exception on kick user')\n\n took = time.time() - start\n wait_after_clicked = 8 * 60 * 60 - took\n if wait_after_clicked > 0:\n await asyncio.sleep(DELAY - took)\n\n async def kick_user():\n await bot.send_message(\n GROUP,\n '{}: you have 1 day to click this button or'\n ' you will be automatically kicked'.format(chosen.id, chosen.name),\n buttons=Button.inline('click me to stay', b'alive'), parse_mode='html'\n )\n\n try:\n await asyncio.wait_for(clicked.wait(), DELAY)\n except asyncio.TimeoutError:\n await bot.send_message(\n GROUP,\n f'{chosen.name} '\n f'was kicked for being inactive', parse_mode='html')\n\n await bot(EditBannedRequest(GROUP, chosen.id, ChatBannedRights(\n until_date=datetime.timedelta(minutes=1),\n view_messages=True\n )))\n\n @bot.on(events.CallbackQuery)\n async def save_him(event: events.CallbackQuery.Event):\n if event.data != b'alive':\n return\n\n if event.sender_id != chosen.id:\n await event.answer('Who are you again?')\n return\n\n clicked.set()\n await event.answer('Congrats you are saved')\n await event.edit(\n f'Congrats '\n f'{chosen.name} you made it!', parse_mode='html')\n\n # TODO This task is not properly terminated on disconnect\n bot.loop.create_task(kick_users())\n","sub_path":"randomkick.py","file_name":"randomkick.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"104269426","text":"import nltk\nimport pandas as pd\nimport numpy as np\n\nfrom nltk.corpus import stopwords\nfrom collections import Counter\n\n\ndef excerpt_counter(excerpt):\n\n excerpt = excerpt.encode('ascii','ignore')\n stop = stopwords.words('english')\n tokens = nltk.word_tokenize(excerpt)\n new_tokens = [x.lower().strip() for x in tokens]\n count_dict = Counter(tokens)\n for word in stop:\n count_dict.pop(word, None)\n return count_dict\n\n\ndef list_parser(tdb):\n totals = Counter({})\n i = 0\n for item in tdb:\n e = ''\n if 'excerpt' in item:\n e += item['excerpt']\n if 'given_title' in item:\n e += ' ' + item['given_title']\n totals += excerpt_counter(e)\n return totals\n\n\nif __name__ == '__main__':\n\n list_parser(tdb)\n\n\n","sub_path":"hack_frame.py","file_name":"hack_frame.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"282907848","text":"import logging\nfrom decimal import Decimal\n\nfrom main.models import AccountModel\nfrom main.models import TransactionModel\nfrom main.models import CurrencyTypeModel\nfrom main.models import CurrencyQuotesModel\n\n\n\nlogger = logging.getLogger('main')\n\n\ndef get_balance(user, requested_currency=None):\n account = AccountModel.objects.get(user=user)\n\n # Получим все транзакции для счета\n all_trns = TransactionModel.objects.filter(account=account)\n\n for t in all_trns:\n if not t.type == TransactionModel.DEBIT:\n continue\n\n # Сумма дебита в USD\n sum_debit = sum([t.amount * t.currency_rate.usd_exchange_rate\n for t in all_trns\n if t.type == TransactionModel.DEBIT])\n\n # Сумма кредита в USD\n sum_credit = sum([t.amount * t.currency_rate.usd_exchange_rate\n for t in all_trns\n if t.type == TransactionModel.CREDIT])\n\n logger.debug(sum_debit)\n logger.debug(sum_credit)\n\n # Получим актуальный рейт запрошенной или родной валюты\n currency = account.currency\n if requested_currency:\n currency = requested_currency\n\n rate = CurrencyQuotesModel.objects.filter(currency_type=currency).latest('datetime')\n\n return currency.alias, ((sum_debit - sum_credit) / rate.usd_exchange_rate).quantize(Decimal(\"1.000000\"))\n","sub_path":"backend/main/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"304826712","text":"import json\nimport logging\nimport os\nimport shutil\n\nfrom typing import NoReturn\n\nfrom sqlalchemy import not_\nfrom sqlalchemy.orm import joinedload\nfrom tornado.web import RequestHandler\n\nfrom apms.lib.config import config\nfrom apms.lib.db.database import Photo, User\nfrom apms.server.api_schemas import ApiResult, DeletePhotosRequest, PeopleTagRequest\n\nlog = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass PhotosHandler(RequestHandler):\n \"\"\"\n Handler for API methods of photos functions\n \"\"\"\n\n def get(self): # pylint: disable=too-many-locals\n \"\"\"Get photos\n ---\n summary: Get photos\n tags:\n - \"Photos\"\n parameters:\n - in: query\n name: page\n schema:\n type: integer\n description: Return photos from specified page\n - in: query\n name: elements_per_page\n schema:\n type: integer\n description: Number of photos on a page\n - in: query\n name: owner_id\n schema:\n type: integer\n description: Return photos of specified owner\n - in: query\n name: photos_of\n schema:\n type: integer\n description: Return photos of specified person\n - in: query\n name: photos_by\n schema:\n type: integer\n description: Return photos made by a specified author\n - in: query\n name: sort_by\n schema:\n type: string\n enum: ['date-downloaded', 'date-taken', 'rating']\n description: Sorting order\n - in: query\n name: photo_text\n schema:\n type: string\n description: Search specified pattern in photo text\n responses:\n 200:\n description: List of photos\n schema: GetPhotosResponseSchema\n \"\"\"\n page = int(self.get_query_argument(\"page\", 1))\n limit = int(self.get_query_argument(\"elements_per_page\", 100))\n offset = (page - 1) * limit\n owner_id = self.get_query_argument(\"owner_id\", None)\n photos_of = self.get_query_argument(\"photos_of\", None)\n photos_by = self.get_query_argument(\"photos_by\", None)\n sort_by = self.get_query_argument(\"sort_by\", \"date-downloaded\")\n missing = self.get_query_argument(\"missing\", None)\n to_delete = self.get_query_argument(\"to_delete\", None)\n # foreign = self.get_query_argument('foreign', None)\n small = self.get_query_argument(\"small\", None)\n photo_text = self.get_query_argument(\"photo_text\", None)\n\n session = self.application.settings[\"session_maker\"]\n with session() as session:\n if to_delete is not None:\n count, result = PhotosHandler.get_photos_to_delete(session)\n elif missing is not None:\n count, result = PhotosHandler.get_missing_photos(session)\n else:\n query = (\n session.query(Photo)\n .options(joinedload(Photo.owner))\n .filter(not_(Photo.deleted_by_me))\n )\n if owner_id:\n query = query.filter_by(owner_id=owner_id)\n if small:\n query = query.filter(Photo.width < 450)\n if photos_of is not None:\n query = query.filter(Photo.people.any(User.id == photos_of))\n if photos_by is not None:\n query = query.filter(Photo.authors.any(User.id == photos_by))\n if photo_text is not None:\n query = query.filter(Photo.text.ilike(f\"%{photo_text}%\"))\n\n if sort_by == \"date-downloaded\":\n query = query.order_by(Photo.date_downloaded.desc())\n elif sort_by == \"date-taken\":\n query = query.order_by(Photo.date_added.desc())\n elif sort_by == \"rating\":\n query = query.order_by(Photo.rating.desc())\n\n count = query.count()\n result = query.offset(offset).limit(limit).all()\n\n photos = {\n \"count\": count,\n \"page\": page,\n \"photos\": list(map(lambda photo: photo.to_json(), result)),\n }\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(json.dumps(photos))\n\n @staticmethod\n def get_missing_photos(session):\n query = (\n session.query(Photo)\n .options(joinedload(Photo.owner))\n .filter_by(deleted_by_me=False)\n )\n result = list(\n filter(\n lambda photo: not os.path.exists(\n os.path.join(config.photos_dir, photo.dir_name, photo.file_name)\n ),\n query.order_by(Photo.date_added.desc()).all(),\n )\n )\n return len(result), result[0:200]\n\n @staticmethod\n def get_photos_to_delete(session):\n query = (\n session.query(Photo)\n .options(joinedload(Photo.owner))\n .filter_by(deleted_by_me=True)\n )\n result = list(\n filter(\n lambda photo: os.path.exists(\n os.path.join(config.photos_dir, photo.dir_name, photo.file_name)\n ),\n query.order_by(Photo.date_downloaded.desc()).all(),\n )\n )\n return len(result), result\n\n def delete(self):\n \"\"\"Delete photos\n ---\n summary: Delete photos\n tags:\n - \"Photos\"\n requestBody:\n content:\n application/json:\n description: List of photos to delete\n schema: DeletePhotosRequest\n\n responses:\n 200:\n schema: ApiResult\n 400:\n schema: ApiResult\n \"\"\"\n try:\n data = self.request.body.decode()\n log.info(data)\n photos_to_delete = DeletePhotosRequest.Schema().loads(data).photos\n except Exception as ex: # pylint: disable=broad-except\n log.info(ex)\n resp = ApiResult(\"error\", str(ex))\n self.set_status(400)\n self.write(ApiResult.Schema().dumps(resp))\n return\n\n log.info(f\"Going to remove {photos_to_delete}\")\n session = self.application.settings[\"session_maker\"]\n with session() as session:\n query = session.query(Photo).filter(Photo.id.in_(photos_to_delete))\n count = query.count()\n if not count:\n self.set_status(404)\n self.write(json.dumps({\"result\": \"Error\", \"cause\": \"Not Found\"}))\n return\n for photo in query.all():\n if not os.path.isdir(config.trash_dir):\n os.makedirs(config.trash_dir)\n\n photo.deleted_by_me = True\n fname = os.path.join(config.photos_dir, photo.dir_name, photo.file_name)\n new_fname = os.path.join(config.trash_dir, photo.file_name)\n log.info(\"Moving {} -> {}\".format(fname, new_fname))\n try:\n shutil.move(fname, new_fname)\n except OSError:\n log.exception(\"Cannot move {} -> {}\".format(fname, new_fname))\n log.info(\"{} marked as deleted by me\".format(photo.id))\n session.commit()\n\n resp = ApiResult(\"success\", f\"deleted: {photos_to_delete}\")\n self.write(ApiResult.Schema().dumps(resp))\n\n\nclass PeopleTagHandler(RequestHandler):\n async def put(self)-> NoReturn:\n \"\"\"Tag people\n ---\n summary: Tag people on photo\n tags:\n - \"Photos\"\n requestBody:\n content:\n application/json:\n description: List of photos to tad along with list of people to add to each photo\n schema: PeopleTagRequest\n\n responses:\n 200:\n schema: ApiResult\n 400:\n schema: ApiResult\n \"\"\"\n try:\n data = self.request.body.decode()\n log.info(data)\n params = PeopleTagRequest.Schema().loads(data)\n\n session = self.application.settings[\"session_maker\"]\n with session() as session:\n people = session.query(User).filter(User.id.in_(params.people)).all()\n authors = session.query(User).filter(User.id.in_(params.authors)).all()\n for photo in (\n session.query(Photo).filter(Photo.id.in_(params.photos)).all()\n ):\n photo.people = (\n people\n if params.overwrite_people_tags\n else list(set(people + photo.people))\n )\n photo.authors = (\n authors\n if params.overwrite_authors_tags\n else list(set(authors + photo.authors))\n )\n session.commit()\n\n except Exception as ex: # pylint: disable=broad-except\n log.info(ex)\n resp = ApiResult(\"error\", str(ex))\n self.set_status(400)\n self.write(ApiResult.Schema().dumps(resp))\n return\n\n resp = ApiResult(\"success\", \"Tags have been added\")\n self.write(ApiResult.Schema().dumps(resp))\n","sub_path":"apms/server/handlers/photos.py","file_name":"photos.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262818737","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\nimport json\n\n#functions\nDATA = ('data.csv')\nDATE_COLUMN = 'date'\n@st.cache\ndef load_data():\n df = pd.read_csv(DATA, parse_dates=[DATE_COLUMN])\n return df\n\nwith open('countries.geo.json') as json_file:\n json_locations = json.load(json_file)\n\ndef draw_map_cases():\n fig = px.choropleth_mapbox(df, geojson=json_locations, locations='iso_code', color='total_cases_per_million',\n color_continuous_scale=\"Reds\",\n mapbox_style=\"carto-positron\",\n title = \"COVID 19 cases per million\",\n zoom=1,\n opacity=0.5,\n )\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n return fig\n\ndef draw_map_deaths():\n fig = px.choropleth_mapbox(df, geojson=json_locations, locations='iso_code', color='total_deaths_per_million',\n color_continuous_scale=\"Greys\",\n mapbox_style=\"carto-positron\",\n title = \"Deaths from COVID 19 per million\",\n zoom=1,\n opacity=0.5,\n )\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n return fig\n\ndef draw_map_vaccine():\n fig = px.choropleth_mapbox(df, geojson=json_locations, locations='iso_code', color='people_vaccinated_per_hundred',\n color_continuous_scale=\"BuGn\",\n mapbox_style=\"carto-positron\",\n title = \"Vaccinations from COVID 19 per hundred\",\n zoom=1,\n opacity=0.5,\n )\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n return fig\n\nst.title(\"COVID 19 IN THE WORLD DASHBOARD\")\nst.write(\"\"\"This dashboard will present the spread of COVID-19 in the world by visualizing the timeline of the total cases and deaths. As well as the total number of vaccinated people.\"\"\")\n\n#Titles and Mode selections\nst.sidebar.title(\"About\")\nst.sidebar.info(\n \"\"\"\n This app is Open Source dashboard.\n \"\"\"\n)\nst.sidebar.info(\"Feel free to collaborate and comment on the work. The github link can be found \"\n \"[here](https://github.com/yuliianikolaenko/COVID_dashboard_proglib).\")\n\n# Load data\ndf = load_data()\n\nshow_data = st.sidebar.checkbox('Show raw data')\nif show_data == True:\n st.subheader('Raw data')\n st.markdown(\n \"#### Data on COVID-19 (coronavirus) by Our World in Data could be found [here](https://github.com/owid/covid-19-data/tree/master/public/data).\")\n st.write(df)\n\n\n\n##### SIDEBAR\n#slider to chose date\nshow_timerange = st.sidebar.checkbox(\"Show date range\")\nif show_timerange == True:\n # Calculate the timerange for the slider\n min_ts = min(df[DATE_COLUMN]).to_pydatetime()\n max_ts = max(df[DATE_COLUMN]).to_pydatetime()\n day_date = pd.to_datetime(st.sidebar.slider(\"Date to chose\", min_value=min_ts, max_value=max_ts, value=max_ts))\n st.write(f\"Data for {day_date.date()}\")\n df = df[(df['date'] == day_date)]\n\n#selectbox to chose between cases, deaths or total_vaccinations\nselect_event = st.sidebar.selectbox('Show map', ('cases per million', 'deaths per million', 'vaccinated per hundred'))\nif select_event == 'cases per million':\n st.plotly_chart(draw_map_cases(), use_container_width=True)\n\nif select_event == 'deaths per million':\n st.plotly_chart(draw_map_deaths(), use_container_width=True)\n\nif select_event == 'vaccinated per hundred':\n st.plotly_chart(draw_map_vaccine(), use_container_width=True)\n\n\nst.sidebar.info(\"Author of the project [Linkedin](https://www.linkedin.com/in/yuliia-nikolaenko/)\")\n\n","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"567812169","text":"from __future__ import absolute_import, division, print_function\n\n#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: oeffner\n#\n# Created: 09/05/2019\n# Copyright: (c) oeffner 2019\n# Licence: \n#-------------------------------------------------------------------------------\n\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5.QtWidgets import ( QApplication, QCheckBox, QComboBox,\n QDial, QDialog, QFileDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,\n QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy,\n QSlider, QDoubleSpinBox, QSpinBox, QStyleFactory, QTableWidget,\n QTableWidgetItem, QTabWidget, QTextEdit, QVBoxLayout, QWidget )\n\nimport sys, zmq, subprocess, time\n\nclass NGL_HKLViewer(QDialog):\n def __init__(self, parent=None):\n super(NGL_HKLViewer, self).__init__(parent)\n self.context = None\n\n self.originalPalette = QApplication.palette()\n\n self.openFileNameButton = QPushButton(\"Load reflection file\")\n self.openFileNameButton.setDefault(True)\n self.openFileNameButton.clicked.connect(self.OpenReflectionsFile)\n\n self.flatPushButton = QPushButton(\"Flat Push Button\")\n self.flatPushButton.setFlat(True)\n self.flatPushButton.clicked.connect(self.DoSomething)\n\n self.MillerComboBox = QComboBox()\n self.MillerComboBox.activated.connect(self.MillerComboSelchange)\n\n self.MillerLabel = QLabel()\n self.MillerLabel.setText(\"Selected Miller Array\")\n\n self.FOMComboBox = QComboBox()\n self.FOMComboBox.activated.connect(self.FOMComboSelchange)\n\n self.FOMLabel = QLabel()\n self.FOMLabel.setText(\"Use Figure of Merits\")\n\n self.SpaceGroupComboBox = QComboBox()\n self.SpaceGroupComboBox.activated.connect(self.SpacegroupSelchange)\n\n self.SpacegroupLabel = QLabel()\n self.SpacegroupLabel.setText(\"Space Subgroups\")\n\n self.mergecheckbox = QCheckBox()\n self.mergecheckbox.setText(\"Merge data\")\n #self.mergecheckbox.setTristate (True)\n self.mergecheckbox.clicked.connect(self.MergeData)\n\n self.expandP1checkbox = QCheckBox()\n self.expandP1checkbox.setText(\"Expand to P1\")\n self.expandP1checkbox.clicked.connect(self.ExpandToP1)\n\n self.expandAnomalouscheckbox = QCheckBox()\n self.expandAnomalouscheckbox.setText(\"Show Friedel pairs\")\n self.expandAnomalouscheckbox.clicked.connect(self.ExpandAnomalous)\n\n self.sysabsentcheckbox = QCheckBox()\n self.sysabsentcheckbox.setText(\"Show Systematic Absences\")\n self.sysabsentcheckbox.clicked.connect(self.showSysAbsent)\n\n self.missingcheckbox = QCheckBox()\n self.missingcheckbox.setText(\"Show Missing\")\n self.missingcheckbox.clicked.connect(self.showMissing)\n\n self.onlymissingcheckbox = QCheckBox()\n self.onlymissingcheckbox.setText(\"Only Show Missing\")\n self.onlymissingcheckbox.clicked.connect(self.showOnlyMissing)\n\n self.showslicecheckbox = QCheckBox()\n self.showslicecheckbox.setText(\"Show Slice\")\n self.showslicecheckbox.clicked.connect(self.showSlice)\n\n self.sliceindexspinBox = QDoubleSpinBox()\n self.sliceindex = 0\n self.sliceindexspinBox.setValue(self.sliceindex)\n self.sliceindexspinBox.setDecimals(0)\n self.sliceindexspinBox.setSingleStep(1)\n self.sliceindexspinBox.setRange(0, 20)\n self.sliceindexspinBox.valueChanged.connect(self.onSliceIndexChanged)\n\n self.SliceLabelComboBox = QComboBox()\n self.SliceLabelComboBox.activated.connect(self.onSliceComboSelchange)\n self.sliceaxis = [ \"h\", \"k\", \"l\" ]\n self.SliceLabelComboBox.addItems( self.sliceaxis )\n\n self.HKLnameedit = QLineEdit('')\n self.HKLnameedit.setReadOnly(True)\n self.textInfo = QTextEdit()\n self.textInfo.setReadOnly(True)\n\n self.RadiiScaleGroupBox = QGroupBox(\"Radii Size of HKL Spheres\")\n\n #self.PowerScaleGroupBox = QGroupBox(\"Manual Power Scaling of Sphere Radii\")\n\n self.ManualPowerScalecheckbox = QCheckBox()\n self.ManualPowerScalecheckbox.setText(\"Manual Power Scaling of Sphere Radii\")\n self.ManualPowerScalecheckbox.clicked.connect(self.onManualPowerScale)\n\n self.power_scale_spinBox = QDoubleSpinBox(self.RadiiScaleGroupBox)\n self.nth_power_scale = 0.5\n self.power_scale_spinBox.setValue(self.nth_power_scale)\n self.power_scale_spinBox.setDecimals(2)\n self.power_scale_spinBox.setSingleStep(0.05)\n self.power_scale_spinBox.setRange(0.0, 1.0)\n self.power_scale_spinBox.valueChanged.connect(self.onPowerScaleChanged)\n self.powerscaleLabel = QLabel()\n self.powerscaleLabel.setText(\"Power scale Factor\")\n\n self.radii_scale_spinBox = QDoubleSpinBox(self.RadiiScaleGroupBox)\n self.radii_scale = 1.0\n self.radii_scale_spinBox.setValue(self.radii_scale)\n self.radii_scale_spinBox.setDecimals(1)\n self.radii_scale_spinBox.setSingleStep(0.1)\n self.radii_scale_spinBox.setRange(0.2, 2.0)\n self.radii_scale_spinBox.valueChanged.connect(self.onRadiiScaleChanged)\n self.radiiscaleLabel = QLabel()\n self.radiiscaleLabel.setText(\"Linear Scale Factor\")\n\n self.millertable = QTableWidget(0, 8)\n labels = [\"label\", \"type\", \"no. of HKLs\", \"span of HKLs\",\n \"min max data\", \"min max sigmas\", \"d_min, d_max\", \"symmetry unique\"]\n self.millertable.setHorizontalHeaderLabels(labels)\n # don't allow editing the miller array info\n self.millertable.setEditTriggers(QTableWidget.NoEditTriggers)\n\n self.createTopLeftGroupBox()\n #self.createTopRightGroupBox()\n self.createBottomLeftTabWidget()\n self.createRadiiScaleGroupBox()\n\n #topLayout = QHBoxLayout()\n #topLayout.addWidget(self.openFileNameButton)\n #topLayout.addStretch(1)\n\n mainLayout = QGridLayout()\n mainLayout.addWidget(self.openFileNameButton, 0, 0, 1, 1)\n mainLayout.addWidget(self.HKLnameedit, 1, 0, 1, 1)\n\n mainLayout.addWidget(self.topLeftGroupBox, 2, 0)\n #mainLayout.addWidget(self.topRightGroupBox, 1, 1)\n mainLayout.addWidget(self.RadiiScaleGroupBox, 3, 0)\n mainLayout.addWidget(self.bottomLeftGroupBox, 4, 0)\n mainLayout.setRowStretch(0, 0)\n mainLayout.setRowStretch(1, 0)\n mainLayout.setRowStretch(2, 0)\n mainLayout.setRowStretch(3, 0)\n mainLayout.setRowStretch(4, 1)\n #mainLayout.setColumnStretch(0, 1)\n #mainLayout.setColumnStretch(1, 0)\n self.setLayout(mainLayout)\n\n self.setWindowTitle(\"NGL-HKL-viewer\")\n self.cctbxproc = None\n self.LaunchCCTBXPython()\n self.out = None\n self.err = None\n self.miller_arrays = None\n self.matching_arrays = None\n self.bin_info = None\n self.html_url = None\n self.spacegroups = None\n self.info = None\n self.infostr = \"\"\n self.fileisvalid = False\n self.NewFileLoaded = False\n\n #self.msgqueuethrd = threading.Thread(target = self.update )\n #self.msgqueuethrd.daemon = True\n #self.msgqueuethrd.start()\n\n self.show()\n\n\n def update(self):\n #while 1:\n # time.sleep(1)\n if self.cctbxproc:\n if self.cctbxproc.stdout:\n print(self.cctbxproc.stdout.read().decode(\"utf-8\"))\n if self.cctbxproc.stderr:\n print(self.cctbxproc.stderr.read().decode(\"utf-8\"))\n if self.out:\n print(self.out.decode(\"utf-8\"))\n if self.err:\n print(self.err.decode(\"utf-8\"))\n #print(\"in update\\n\")\n if self.context:\n try:\n msg = self.socket.recv(flags=zmq.NOBLOCK) #To empty the socket from previous messages\n #msg = self.socket.recv()\n self.info = eval(msg.decode())\n\n ngl_hkl_infodict = self.info\n if ngl_hkl_infodict:\n self.miller_arrays = ngl_hkl_infodict[\"miller_arrays\"]\n self.matching_arrays = ngl_hkl_infodict[\"matching_arrays\"]\n self.bin_info = ngl_hkl_infodict[\"bin_info\"]\n self.html_url = ngl_hkl_infodict[\"html_url\"]\n self.spacegroups = ngl_hkl_infodict[\"spacegroups\"]\n self.mergedata = ngl_hkl_infodict[\"mergedata\"]\n self.infostr = ngl_hkl_infodict[\"info\"]\n self.NewFileLoaded = ngl_hkl_infodict[\"NewFileLoaded\"]\n self.fileisvalid = True\n\n if self.infostr:\n print(self.infostr)\n self.textInfo.setPlainText(self.infostr)\n #self.mergecheckbox.setEnabled(True)\n else:\n self.textInfo.setPlainText(\"\")\n #self.mergecheckbox.setEnabled(False)\n if self.NewFileLoaded:\n if self.mergedata == True : val = 2\n if self.mergedata == None : val = 1\n if self.mergedata == False : val = 0\n self.mergecheckbox.setCheckState(val )\n\n self.MillerComboBox.clear()\n self.MillerComboBox.addItems( [ (str(e[0]) + \" (\" + str(e[1]) +\")\" )\n for e in self.miller_arrays ] )\n self.FOMComboBox.clear()\n self.FOMComboBox.addItems( [ (str(e[0]) + \" (\" + str(e[1]) +\")\" )\n for e in self.miller_arrays ] )\n self.SpaceGroupComboBox.clear()\n self.SpaceGroupComboBox.addItems( self.spacegroups )\n\n self.millertable.setRowCount(len(self.miller_arrays))\n #self.millertable.setColumnCount(8)\n for n,millarr in enumerate(self.miller_arrays):\n for m,elm in enumerate(millarr):\n self.millertable.setItem(n, m, QTableWidgetItem(str(elm)))\n\n except Exception as e:\n #print( str(e) )\n pass\n\n\n\n\n def MergeData(self):\n if self.mergecheckbox.checkState()== 2:\n self.NGL_HKL_command('NGL_HKLviewer.mergedata = True')\n if self.mergecheckbox.checkState()== 1:\n self.NGL_HKL_command('NGL_HKLviewer.mergedata = None')\n if self.mergecheckbox.checkState()== 0:\n self.NGL_HKL_command('NGL_HKLviewer.mergedata = False')\n\n\n def ExpandToP1(self):\n if self.expandP1checkbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.expand_to_p1 = True')\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.expand_to_p1 = False')\n\n\n def ExpandAnomalous(self):\n if self.expandAnomalouscheckbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.expand_anomalous = True')\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.expand_anomalous = False')\n\n\n def showSysAbsent(self):\n if self.sysabsentcheckbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.show_systematic_absences = True')\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.show_systematic_absences = False')\n\n\n def showMissing(self):\n if self.missingcheckbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.show_missing = True')\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.show_missing = False')\n\n\n def showOnlyMissing(self):\n if self.onlymissingcheckbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.show_only_missing = True')\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.show_only_missing = False')\n\n\n def showSlice(self):\n if self.showslicecheckbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.slice_mode = True')\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.slice_mode = False')\n\n\n def onSliceComboSelchange(self,i):\n rmin = self.miller_arrays[self.MillerComboBox.currentIndex()][3][0][i]\n rmax = self.miller_arrays[self.MillerComboBox.currentIndex()][3][1][i]\n self.sliceindexspinBox.setRange(rmin, rmax)\n self.NGL_HKL_command(\"NGL_HKLviewer.viewer.slice_axis = %s\" % self.sliceaxis[i] )\n\n\n def onSliceIndexChanged(self, val):\n self.sliceindex = val\n self.NGL_HKL_command(\"NGL_HKLviewer.viewer.slice_index = %d\" %self.sliceindex)\n\n\n def onRadiiScaleChanged(self, val):\n self.radii_scale = val\n self.NGL_HKL_command(\"\"\"\n NGL_HKLviewer.viewer {\n nth_power_scale_radii = %f\n scale = %f\n }\n \"\"\" %(self.nth_power_scale, self.radii_scale)\n )\n\n\n def onPowerScaleChanged(self, val):\n self.nth_power_scale = val\n self.NGL_HKL_command(\"\"\"\n NGL_HKLviewer.viewer {\n nth_power_scale_radii = %f\n scale = %f\n }\n \"\"\" %(self.nth_power_scale, self.radii_scale)\n )\n\n\n def onManualPowerScale(self):\n if self.ManualPowerScalecheckbox.isChecked():\n self.NGL_HKL_command('NGL_HKLviewer.viewer.nth_power_scale_radii = %f' %self.nth_power_scale)\n self.power_scale_spinBox.setEnabled(True)\n else:\n self.NGL_HKL_command('NGL_HKLviewer.viewer.nth_power_scale_radii = -1.0')\n self.power_scale_spinBox.setEnabled(False)\n\n\n def OpenReflectionsFile(self):\n options = QFileDialog.Options()\n fileName, filtr = QFileDialog.getOpenFileName(self,\n \"Load reflections file\",\n \"\",\n \"All Files (*);;MTZ Files (*.mtz);;CIF (*.cif)\", \"\", options)\n if fileName:\n self.HKLnameedit.setText(fileName)\n self.fileisvalid = False\n self.NGL_HKL_command('NGL_HKLviewer.filename = \"%s\"' %fileName )\n self.MillerComboBox.clear()\n self.FOMComboBox.clear()\n #while not self.fileisvalid:\n # time.sleep(1)\n #print(\"file not valid\")\n\n\n def createTopLeftGroupBox(self):\n self.topLeftGroupBox = QGroupBox(\"Group 1\")\n\n layout = QGridLayout()\n layout.addWidget(self.MillerComboBox, 1, 1, 1, 1)\n layout.addWidget(self.MillerLabel, 1, 0, 1, 1)\n layout.addWidget(self.FOMComboBox, 2, 1, 1, 1)\n layout.addWidget(self.FOMLabel, 2, 0, 1, 1)\n layout.addWidget(self.SpaceGroupComboBox, 3, 1, 1, 1)\n layout.addWidget(self.SpacegroupLabel, 3, 0, 1, 1)\n layout.addWidget(self.mergecheckbox, 4, 0, 1, 1)\n layout.addWidget(self.expandP1checkbox, 4, 1, 1, 1)\n layout.addWidget(self.expandAnomalouscheckbox, 5, 0, 1, 1)\n layout.addWidget(self.sysabsentcheckbox, 5, 1, 1, 1)\n layout.addWidget(self.missingcheckbox, 6, 0, 1, 1)\n layout.addWidget(self.onlymissingcheckbox, 6, 1, 1, 1)\n\n layout.addWidget(self.showslicecheckbox, 7, 0, 1, 1)\n layout.addWidget(self.SliceLabelComboBox, 7, 1, 1, 1)\n layout.addWidget(self.sliceindexspinBox, 7, 2, 1, 1)\n #layout.addStretch(1)\n self.topLeftGroupBox.setLayout(layout)\n\n\n def createTopRightGroupBox(self):\n self.topRightGroupBox = QGroupBox(\"Group 2\")\n togglePushButton = QPushButton(\"Toggle Push Button\")\n togglePushButton.setCheckable(True)\n togglePushButton.setChecked(True)\n\n slider = QSlider(Qt.Horizontal, self.RadiiScaleGroupBox)\n slider.setValue(40)\n\n scrollBar = QScrollBar(Qt.Horizontal, self.RadiiScaleGroupBox)\n scrollBar.setValue(60)\n\n dial = QDial(self.RadiiScaleGroupBox)\n dial.setValue(30)\n dial.setNotchesVisible(True)\n\n layout = QVBoxLayout()\n layout.addWidget(self.openFileNameButton)\n layout.addWidget(togglePushButton)\n layout.addWidget(self.flatPushButton)\n\n layout.addWidget(slider)\n layout.addWidget(scrollBar)\n layout.addWidget(dial)\n\n layout.addStretch(1)\n self.topRightGroupBox.setLayout(layout)\n\n\n def DoSomething(self):\n print( self.miller_arrays )\n print( self.matching_arrays )\n print( self.bin_info )\n print( self.html_url )\n print( self.spacegroups )\n print(self.info)\n import code, traceback; code.interact(local=locals(), banner=\"\".join( traceback.format_stack(limit=10) ) )\n\n\n\n def createBottomLeftTabWidget(self):\n self.bottomLeftGroupBox = QGroupBox(\"Group 3\")\n layout = QGridLayout()\n\n \"\"\"\n self.bottomLeftTabWidget = QTabWidget()\n self.bottomLeftTabWidget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Ignored)\n tab1 = QWidget()\n tab1hbox = QHBoxLayout()\n tab1hbox.setContentsMargins(5, 5, 5, 5)\n tab1hbox.addWidget(self.millertable)\n tab1.setLayout(tab1hbox)\n tab2 = QWidget()\n\n tab2hbox = QHBoxLayout()\n tab2hbox.setContentsMargins(5, 5, 5, 5)\n tab2hbox.addWidget(self.textInfo)\n tab2.setLayout(tab2hbox)\n self.bottomLeftTabWidget.addTab(tab1, \"&Miller Arrays\")\n self.bottomLeftTabWidget.addTab(tab2, \"Information\")\n \"\"\"\n\n layout.addWidget(self.millertable, 0, 0, 1, 1)\n layout.addWidget(self.textInfo, 1, 0, 1, 1)\n layout.setRowStretch (0, 1)\n layout.setRowStretch (1 ,0)\n self.bottomLeftGroupBox.setLayout(layout)\n\n\n def MillerComboSelchange(self,i):\n self.NGL_HKL_command(\"NGL_HKLviewer.column = %d\" %i)\n if self.miller_arrays[ i ][1] == 'Map coeffs':\n self.FOMComboBox.setEnabled(True)\n else:\n self.FOMComboBox.setEnabled(False)\n\n self.SpaceGroupComboBox.clear()\n self.SpaceGroupComboBox.addItems( self.spacegroups )\n\n if self.miller_arrays[ i ][7] == False:\n self.mergecheckbox.setEnabled(True)\n else:\n self.mergecheckbox.setEnabled(False)\n\n\n def FOMComboSelchange(self,i):\n self.NGL_HKL_command(\"NGL_HKLviewer.fomcolumn = %d\" %i)\n\n\n def SpacegroupSelchange(self,i):\n self.NGL_HKL_command(\"NGL_HKLviewer.spacegroupchoice = %d\" %i)\n\n\n def createRadiiScaleGroupBox(self):\n layout = QGridLayout()\n layout.addWidget(self.ManualPowerScalecheckbox, 1, 0, 1, 2)\n layout.addWidget(self.powerscaleLabel, 2, 0, 1, 2)\n layout.addWidget(self.power_scale_spinBox, 2, 1, 1, 2)\n layout.addWidget(self.radiiscaleLabel, 3, 0, 1, 2)\n layout.addWidget(self.radii_scale_spinBox, 3, 1, 1, 2)\n layout.setColumnStretch (0, 1)\n layout.setColumnStretch (1 ,0)\n self.RadiiScaleGroupBox.setLayout(layout)\n\n\n\n def LaunchCCTBXPython(self):\n self.context = zmq.Context()\n self.socket = self.context.socket(zmq.PAIR)\n self.socket.bind(\"tcp://127.0.0.1:7895\")\n try: msg = self.socket.recv(flags=zmq.NOBLOCK) #To empty the socket from previous messages\n except Exception: pass\n\n cmdargs = 'cctbx.python.bat -i -c \"from crys3d.hklview import cmdlineframes; myHKLview = cmdlineframes.HKLViewFrame(useSocket=True, verbose=False)\"\\n'\n #self.cctbxproc = subprocess.Popen( cmdargs, shell=True, stdout=sys.stdout, stderr=sys.stderr)\n self.cctbxproc = subprocess.Popen( cmdargs, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout, stderr=sys.stderr)\n #self.cctbxproc = subprocess.Popen( cmdargs, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n #import code, traceback; code.interact(local=locals(), banner=\"\".join( traceback.format_stack(limit=10) ) )\n time.sleep(1)\n \"\"\"\n self.NGL_HKL_command('''\n NGL_HKLviewer {\n filename = \"C:\\\\Users\\\\oeffner\\\\Buser\\\\Tests\\LLGperResidue\\\\map_1six.1.mtz\"\n column=0\n viewer.expand_anomalous=True\n }\n ''')\n \"\"\"\n\n def NGL_HKL_command(self, cmdstr):\n #stdinstr = \"myHKLview.ExecutePhilString(\"+ cmdstr + \")\\n\"\n #self.cctbxproc.stdin.write( stdinstr.encode() )\n print(\"sending:\\n\" + cmdstr)\n self.socket.send(bytes(cmdstr,\"utf-8\"))\n print(\"stuff sent\")\n\n #( self.out, self.err ) = self.cctbxproc.communicate(input = bytes(cmdstr, 'utf-8') )\n\n #print(self.err)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n guiobj = NGL_HKLViewer()\n\n timer = QTimer()\n timer.setInterval(0.1)\n timer.timeout.connect(guiobj.update)\n timer.start(500)\n\n if guiobj.cctbxproc:\n guiobj.cctbxproc.terminate()\n sys.exit(app.exec_())\n","sub_path":"crys3d/hklview/NGL_HKLviewerGui.py","file_name":"NGL_HKLviewerGui.py","file_ext":"py","file_size_in_byte":19212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521892737","text":"\r\nclass Baseball_Game:\r\n \r\n # Constructor\r\n def __init__(self): \r\n\r\n self.end_of_game = False\r\n self.inning = 1\r\n self.top_of_inning = True\r\n self.outs = 0\r\n self.balls = 0\r\n self.strikes = 0\r\n self.runner_on_first = False\r\n self.runner_on_second = False\r\n self.runner_on_third = False\r\n self.home_score = 0\r\n self.away_score = 0\r\n \r\n \r\n # for printing the game state\r\n def __str__(self):\r\n \r\n if self.top_of_inning:\r\n top_bottom_inning_name = \"top\"\r\n else:\r\n top_bottom_inning_name = \"bottom\"\r\n \r\n if self.inning == 1:\r\n inning_suffix = \"st\"\r\n elif self.inning == 2:\r\n inning_suffix = \"nd\"\r\n elif self.inning == 3:\r\n inning_suffix = \"rd\"\r\n else:\r\n inning_suffix = \"th\"\r\n \r\n inning_string = \"{} of the {}{}\\n\".format(top_bottom_inning_name, self.inning, inning_suffix)\r\n \r\n score_string = \"Away: {} Home: {}\\n\".format(self.away_score, self.home_score)\r\n \r\n # should fix the plural on outs, balls and strikes\r\n outs_balls_strikes_string = \"{} outs, {} balls, and {} strikes\\n\".format(self.outs, self.balls, self.strikes)\r\n \r\n running_string = \"\"\r\n if self.runner_on_first:\r\n running_string = \"runner on 1st\\n\"\r\n \r\n if self.runner_on_second:\r\n running_string = running_string + \"runner on 2nd\\n\"\r\n\r\n if self.runner_on_third:\r\n running_string = running_string + \"runner on 3rd\\n\"\r\n\r\n if running_string == \"\":\r\n running_string = \"bases empty\\n\"\r\n \r\n if self.end_of_game:\r\n end_of_game_string = \"end of game\"\r\n else:\r\n end_of_game_string = \"\"\r\n \r\n #return inning_string + \"\\n\" + score_string + \"\\n\" + \"\\n\" + running_string + \"\\n\" + outs_balls_strikes_string + \"\\n\" + end_of_game_string \r\n return inning_string + score_string + running_string + outs_balls_strikes_string + end_of_game_string + \"\\n\"\r\n \r\n \r\n \r\n def update_half_inning(self): \r\n \r\n if self.end_of_game:\r\n return\r\n \r\n # check if it is the end of the game and the home team won\r\n if (self.inning >= 9) and (self.home_score > self.away_score):\r\n self.end_of_game = True\r\n return\r\n \r\n # check if it is the end of the visiting team won\r\n if (self.inning >= 9) and (self.top_of_inning == False) and (self.home_score < self.away_score):\r\n self.end_of_game = True\r\n return\r\n \r\n \r\n # otherwise it's not the end of the game so update the half inning\r\n if self.top_of_inning == True:\r\n self.top_of_inning = False\r\n else:\r\n self.top_of_inning = True\r\n self.inning = self.inning + 1\r\n\r\n # reset the balls, strikes and outs if a runner reaches base\r\n self.outs = 0\r\n self.balls = 0\r\n self.strikes = 0\r\n\r\n self.runner_on_first = self.runner_on_second = self.runner_on_third = False\r\n \r\n\r\n\r\n def add_runs(self, num_runs = 1):\r\n \r\n if self.top_of_inning:\r\n self.away_score = self.away_score + num_runs\r\n else:\r\n self.home_score = self.home_score + num_runs\r\n \r\n\r\n # add an out\r\n def add_outs(self, num_outs = 1): \r\n \r\n self.outs = self.outs + num_outs\r\n self.balls = 0\r\n self.strikes = 0\r\n \r\n if self.outs > 2:\r\n self.update_half_inning()\r\n \r\n # add strike\r\n def add_strike(self): \r\n \r\n self.strikes = self.strikes + 1\r\n\r\n if self.strikes == 3:\r\n self.add_outs(1)\r\n\r\n \r\n def add_runner_on_first(self):\r\n \r\n if self.runner_on_first and self.runner_on_second and self.runner_on_third: \r\n self.add_runs(1)\r\n \r\n elif self.runner_on_first and self.runner_on_second:\r\n self.runner_on_third = True\r\n \r\n elif self.runner_on_first:\r\n self.runner_on_second = True \r\n \r\n self.runner_on_first = True\r\n \r\n # reset the balls and strikes if a runner reaches base\r\n self.balls = 0\r\n self.strikes = 0\r\n \r\n \r\n # add ball\r\n def add_ball(self): \r\n \r\n self.balls = self.balls + 1\r\n\r\n if self.balls == 4:\r\n self.add_runner_on_first()\r\n\r\n \r\n \r\n def add_single(self):\r\n \r\n # runnings from second and third score on a single\r\n if self.runner_on_third:\r\n self.add_runs(1)\r\n self.runner_on_third = False\r\n \r\n if self.runner_on_second:\r\n self.add_runs(1)\r\n self.runner_on_second = False\r\n\r\n if self.runner_on_first:\r\n self.runner_on_second = True\r\n \r\n self.runner_on_first = True\r\n \r\n # reset the balls and strikes if a runner reaches base\r\n self.balls = 0\r\n self.strikes = 0\r\n \r\n \r\n \r\n\r\n def add_double(self):\r\n self.add_runs(self.runner_on_first + self.runner_on_second + self.runner_on_third)\r\n self.runner_on_first = self.runner_on_third = False\r\n self.runner_on_second = True\r\n \r\n # reset the balls and strikes if a runner reaches base \r\n self.balls = 0\r\n self.strikes = 0\r\n \r\n \r\n \r\n def add_triple(self):\r\n self.add_runs(self.runner_on_first + self.runner_on_second + self.runner_on_third)\r\n self.runner_on_first = self.runner_on_second = False\r\n self.runner_on_third = True\r\n \r\n # reset the balls and strikes if a runner reaches base \r\n self.balls = 0\r\n self.strikes = 0\r\n \r\n \r\n \r\n def add_home_run(self):\r\n self.add_runs(self.runner_on_first + self.runner_on_second + self.runner_on_third + 1)\r\n self.runner_on_first = self.runner_on_second = self.runner_on_third = False\r\n \r\n # reset the balls and strikes after a home run \r\n self.balls = 0\r\n self.strikes = 0\r\n \r\n \r\n \r\n \r\n def display_game(self, batter_name = \"\", center_string = \"\"):\r\n \r\n import matplotlib.pyplot as plt\r\n \r\n # draw the baseball diamond\r\n plt.plot([0, 1], [0, 1], color = \"black\");\r\n plt.plot([0, -1], [2, 1], color = \"black\");\r\n\r\n plt.axis(\"square\")\r\n\r\n \r\n \r\n # put the runners on base\r\n plt.plot([1], [1], color = \"red\", marker=\".\", markersize=25);\r\n \r\n \r\n \r\n\r\n # add the balls, strikes and outs\r\n plt.text(.7, .3, \"Balls: \" + str(self.balls));\r\n\r\n\r\n \r\n # add the top and bottom of the innning \r\n\r\n \r\n \r\n # add the scores\r\n\r\n\r\n \r\n # add the center string and batter string\r\n \r\n \r\n \r\n # turn off the axes to make the plot look better\r\n plt.axis(\"off\");\r\n\r\n ","sub_path":"labs/lab_05/baseball_game_with_graphics.py","file_name":"baseball_game_with_graphics.py","file_ext":"py","file_size_in_byte":7161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339393413","text":"\n\nimport argparse\nfrom env_wrapper import SkillWrapper\nimport gym\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv, VecVideoRecorder, SubprocVecEnv, VecFrameStack\nfrom stable_baselines import PPO2, A2C\nfrom manager import AtariPolicyManager\nfrom env_wrapper import ActionRemapWrapper\nfrom stable_baselines.common import set_global_seeds\nimport os\nimport glob\nimport re\nimport time\nfrom cmd_util import make_atari_env\nimport errno\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--env_id\", type=str)\nparser.add_argument(\"--skills\", type=str)\nparser.add_argument(\"--logdir\", type=str)\nparser.add_argument(\"--rl_model\", type=str, default=None, help=\"ppo or a2c\")\nparser.add_argument(\"--record\", action=\"store_true\")\nparser.add_argument(\"--log_action\", action=\"store_true\")\nparser.add_argument(\"--load_model\", type=str, default=None)\nparser.add_argument(\"--seed\", type=int, default=2000)\nparser.add_argument(\"--train_total_timesteps\", type=int, default=10000000)\nparser.add_argument(\"--eval_max_steps\", type=int, default=int(10e6))\nargs = parser.parse_args()\n\nenv_id = args.env_id\nskills = args.skills\nlogdir = args.logdir\ntrain_total_timesteps=args.train_total_timesteps\n\nMAX_VIDEO_LENGTH = 1000000\ndef str_to_skills(str_skills):\n str_skills = str_skills.replace(\" \", '')\n \n skills = []\n temp_idx = 0\n # print(str_skills[1:-1])\n str_skills = str_skills[1:-1]\n for idx, ch in enumerate(str_skills):\n if ch==\"[\":\n temp_idx = idx\n elif ch==\"]\":\n \n act_seq = str_skills[temp_idx+1:idx].split(\",\")\n skill = []\n for act in act_seq:\n skill.append(int(act))\n skills.append(skill)\n return skills\n\ndef record_():\n model_path = args.load_model\n os.path.isfile(model_path)\n # print(args.load_model)\n \n # search skills\n skills=None\n if args.skills is None:\n m=re.search(\"\\[[0-9\\, \\[\\]]*\\]\", model_path)\n if m is None:\n raise ValueError(\"load_model: {} does not contain skills\".format(model_path))\n # print(m.group(0))\n # exit(0)\n skills = str_to_skills(m.group(0))\n else:\n skills = str_to_skills(args.skills)\n # print(skills)\n \n # search env-id\n env_id_list = [\"Alien\", \"Seaquest\", \"BeamRider\", \"Breakout\", \"SpaceInvaders\", \"Qbert\", \"Pong\", \"Enduro\", \"KungFuMaster\"]\n env_id=None\n if args.env_id is None:\n searched = False\n # m = re.search(\"[A-Z][a-z]*-ramDeterministic-v4\", model_path)\n m = re.search(\"[A-Z][a-z]*NoFrameskip-v4\", model_path)\n if m is not None:\n searched = True\n env_id = m.group(0)\n \n if searched is not True:\n for id_ in env_id_list:\n if id_.lower() in model_path.lower():\n searched = True\n env_id = id_ + \"NoFrameskip-v4\"\n\n if searched is not True:\n raise ValueError(\"load_model: {} does not contain env id\".format(model_path))\n else:\n env_id=args.env_id\n save_path = args.logdir\n if save_path is None:\n save_path = os.path.dirname(model_path)\n \n print(\"ENV:{} \\nskills:{} \\nmodel_path:{} \\nsave_path:{}\\n\".format(env_id, skills, model_path, save_path))\n time.sleep(3)\n\n\n env_creator_ = lambda env:ActionRemapWrapper(env)\n env_creator = lambda env:SkillWrapper(env_creator_(env), skills=skills)\n env = VecFrameStack(make_atari_env(env_id, 1, args.seed, extra_wrapper_func=env_creator, logdir=save_path, wrapper_kwargs={\"episode_life\":False, \"clip_rewards\":False}), 4)\n # env_creator = make_env(1000, 'atari', env_id='SeaquestNoFrameskip-v4', rank=0, action_set=action_set, clip_rewards=False, video_recording_dir=args.logdir)\n # env = DummyVecEnv([lambda: env_creator()])\n # env = SubprocVecEnv([make_env(env_creator, i, skills) for i in range(num_cpu)])\n # env = SubprocVecEnv([make_env(1000, \n # 'atari', \n # env_id='SeaquestNoFrameskip-v4', \n # rank=0, \n # action_set=action_set)\n # for rank in range(15)])\n # env = DummyVecEnv([lambda:env_creator()])\n\n \n if args.load_model is None:\n raise NotImplementedError\n assert os.path.isfile(args.load_model)\n\n if args.rl_model == \"ppo\":\n model = PPO2.load(args.load_model)\n elif args.rl_model == \"a2c\":\n model = A2C.load(args.load_model)\n elif args.rl_model is None:\n if \"ppo\" in model_path:\n model = PPO2.load(model_path) \n elif \"a2c\" in model_path:\n model = A2C.load(model_path)\n else:\n raise ValueError(\"please specify rl_model\")\n else:\n raise ValueError(\"{} rl_model not recognize\".format(args.rl_model))\n\n # DEBUG\n set_global_seeds(args.seed)\n # env.seed(args.seed)\n \n obs = env.reset()\n if args.record:\n env = VecVideoRecorder(env, save_path, record_video_trigger=lambda x: x == 0, video_length=MAX_VIDEO_LENGTH)\n env.reset()\n total_rewards = 0 \n\n \n action_save_path=os.path.join(save_path, \"history_action.txt\")\n if args.log_action:\n try:\n os.remove(action_save_path)\n except OSError as e:\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occurred\n print(\"start evaluate\")\n with open(action_save_path, 'a') as f:\n for steps in range(args.eval_max_steps):\n action, _states = model.predict(obs)\n if args.log_action:\n # print(\"{}\".format(action[0]), sep=\" \", file=f)\n f.write(\"{} \".format(action[0]))\n obs, rewards, dones, info = env.step(action)\n total_rewards += rewards\n # if args.render:\n # env.render()\n # time.sleep(0.01)\n if bool(dones[0]) is True:\n break \n # if steps==args.eval_max_steps-1:\n # print(\"reach eval_max_steps({})\".format(args.eval_max_steps))\n print(\"steps: {}/{}\".format(steps+1, args.eval_max_steps))\n print(\"total_rewards: {}\".format(total_rewards))\n env.close()\n\ndef evaluate():\n skills = args.skills\n skills = str_to_skills(skills) \n\n empty_action=-1\n\n\n\n # if args.load_model is None:\n\n env_creator_ = lambda env:ActionRemapWrapper(env)\n env_creator = lambda env:SkillWrapper(env_creator_(env), skills=skills)\n\n \n #TODO change Mlppolicy to CnnPolicy\n # atari_manager = AtariPolicyManager(env_id=args.env_id, env_creator=env_creator, model=PPO2, policy=MlpPolicy, save_path = logdir, verbose=0, num_cpu=15)\n raise NotImplementedError\n\n # degenerate the skills e.g [[1,2,-1],[2,-1,3],[-1,-1,-1]] => [[1, 2], [2, 3]]\n skills = list(map(lambda skill:list(filter(lambda x: x!=empty_action, skill)), skills))\n skills = list(filter(lambda skill:len(skill)>1, skills))\n ave_score, ave_action_reward = atari_manager.get_rewards(skills, train_total_timesteps=train_total_timesteps)\n return ave_score\n # else:\n # assert os.path.exists(args.load_model)\n # model = PPO2.load(args.load_model)\n\n\n # if args.record:\n # # env_creator = lambda:ActionRemapWrapper(gym.make(env_id))\n # env = gym.make(env_id)\n # env = ActionRemapWrapper(env)\n # env = SkillWrapper(env,skills)\n # if args.load_model is None:\n # raise NotImplementedError\n # env = VecVideoRecorder(env, args.logdir, record_video_trigger=lambda x: x == 0)\n # obs = env.reset()\n # while True:\n # action, _states = model.predict(obs)\n # obs, rewards, dones, info = env.step(action)\n # # if args.render:\n # # env.render()\n # # time.sleep(0.01)\n # if bool(dones[0]) is True:\n # break\n\n# def record():\n\n# ## search in name\n# model_path = args.load_model\n# os.path.isfile(model_path)\n# # print(args.load_model)\n \n# # search skills\n# skills=None\n# if args.skills is None:\n# m=re.search(\"\\[[0-9\\, \\[\\]]*\\]\", model_path)\n# if m is None:\n# raise ValueError(\"load_model: {} does not contain skills\".format(model_path))\n# skills = str_to_skills(m.group(0))\n# else:\n# skills = str_to_skills(args.skills)\n# # print(skills)\n \n# # search env-id\n# env_id=None\n# if args.env_id is None:\n# m = re.search(\"[A-Z][a-z]*-ramDeterministic-v4\", model_path)\n# if m is None:\n# raise ValueError(\"load_model: {} does not contain env id\".format(model_path))\n# env_id = m.group(0)\n# else:\n# env_id=args.env_id\n\n# save_path = args.logdir\n# if save_path is None:\n# save_path = os.path.dirname(model_path)\n \n# print(\"ENV:{} \\nskills:{} \\nmodel_path:{} \\nsave_path:{}\\n\".format(env_id, skills, model_path, save_path))\n# time.sleep(3)\n \n# env = gym.make(env_id)\n# env = ActionRemapWrapper(env)\n# env = SkillWrapper(env,skills)\n# env = DummyVecEnv([lambda: env])\n# # env = DummyVecEnv([lambda: gym.make(env_id)])\n \n \n# if args.load_model is None:\n# raise NotImplementedError\n# assert os.path.isfile(args.load_model)\n# model = PPO2.load(args.load_model)\n \n# obs = env.reset()\n# env = VecVideoRecorder(env, save_path, record_video_trigger=lambda x: x == 0, video_length=MAX_VIDEO_LENGTH)\n# env.reset()\n# total_rewards = 0 \n# while True:\n# action, _states = model.predict(obs)\n# obs, rewards, dones, info = env.step(action)\n# total_rewards += rewards\n# # if args.render:\n# # env.render()\n# # time.sleep(0.01)\n# if bool(dones[0]) is True:\n# break \n# print(total_rewards)\n# env.close()\n \n\n\n# def test_search():\n# print(args.logdir)\n\n# search = os.path.join(args.logdir,\"*.txt\")\n# print(os.path.exists(args.logdir))\n# path = glob.glob(search)\n# # print(search)\n# print(path)\n\n\n\n\nif __name__ == \"__main__\":\n # test_search()\n if args.record and args.load_model is None:\n raise NotImplementedError(\"shold use specify load_model to record\")\n if args.record or args.log_action:\n record_()\n else:\n evaluate()","sub_path":"lib/evaluate_macros.py","file_name":"evaluate_macros.py","file_ext":"py","file_size_in_byte":10364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273704723","text":"\nimport json\nimport time\nimport os\nimport sys\nimport re\nimport pprint\nfrom os.path import join, basename, exists\n\nimport models\nimport losses\nimport readers\nimport eval_util\nimport utils\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow import app\nfrom tensorflow import flags\nfrom tensorflow import gfile\nfrom tensorflow import logging\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.python.lib.io import file_io\n\nfrom config import YParams\nfrom config import hparams as FLAGS\n\ndef find_class_by_name(name, modules):\n \"\"\"Searches the provided modules for the named class and returns it.\"\"\"\n modules = [getattr(module, name, None) for module in modules]\n return next(a for a in modules if a)\n\n\nclass Evaluate:\n\n def __init__(self):\n\n self.wait = 20\n\n def build_graph(self):\n \"\"\"Creates the Tensorflow graph for evaluation.\n \"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n local_device_protos = device_lib.list_local_devices()\n gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']\n gpus = gpus[:FLAGS.eval_num_gpu]\n num_gpus = len(gpus)\n\n if num_gpus > 0:\n logging.info(\"Using the {} GPUs\".format(num_gpus))\n num_towers = num_gpus\n device_string = '/gpu:{}'\n logging.info(\"Using total batch size of {} for evaluation \"\n \"over {} GPUs: batch size of {} per GPUs.\".format(\n self.batch_size, num_towers, self.batch_size // num_towers))\n else:\n logging.info(\"No GPUs found. Eval on CPU.\")\n num_towers = 1\n device_string = '/cpu:{}'\n logging.info(\"Using total batch size of {} for evalauton \"\n \"on CPU.\".format(self.batch_size))\n\n with tf.name_scope(\"train_input\"):\n images_batch, labels_batch = self.reader.input_fn()\n\n tower_inputs = tf.split(images_batch, num_towers)\n tower_labels = tf.split(labels_batch, num_towers)\n tower_logits, tower_label_losses = [], []\n for i in range(num_towers):\n # For some reason these 'with' statements can't be combined onto the same\n # line. They have to be nested.\n with tf.device(device_string.format(i)):\n with (tf.variable_scope(\"tower\", reuse=True if i > 0 else None)):\n with (slim.arg_scope([slim.model_variable, slim.variable],\n device=\"/cpu:0\" if num_gpus!=1 else \"/gpu:0\")):\n logits = self.model.create_model(tower_inputs[i],\n n_classes=self.reader.n_classes, is_training=False)\n tower_logits.append(logits)\n label_loss = self.loss_fn.calculate_loss(\n logits=logits, labels=tower_labels[i])\n tower_label_losses.append(label_loss)\n\n self.logits = tf.concat(tower_logits, 0)\n self.labels = tf.cast(labels_batch, tf.float32)\n self.labels_losses = tf.stack(tower_label_losses)\n self.summary_op = tf.summary.merge_all()\n\n def _get_global_step_from_ckpt(self, filename):\n regex = \"(?<=ckpt-)[0-9]+\"\n return int(re.findall(regex, filename)[-1])\n\n def get_checkpoint(self, last_global_step_val):\n if FLAGS.start_eval_from_ckpt:\n files = file_io.get_matching_files(\n join(self.train_dir, 'model.ckpt-*.index'))\n # No files\n if not files:\n return None, None\n files = sorted(files, key=self._get_global_step_from_ckpt)\n start_at = FLAGS.start_eval_from_ckpt\n if str(start_at).isdigit():\n start_at = int(start_at)\n files = list(filter(lambda x: self._get_global_step_from_ckpt(x) > start_at, files))\n for filename in files:\n filname_global_step = self._get_global_step_from_ckpt(filename)\n if last_global_step_val < filname_global_step:\n return filename[:-6], filname_global_step\n return None, None\n else:\n latest_checkpoint = tf.train.latest_checkpoint(self.train_dir)\n if latest_checkpoint is None:\n return None, None\n global_step = self._get_global_step_from_ckpt(latest_checkpoint)\n return latest_checkpoint, global_step\n\n\n def eval_loop(self, last_global_step_val, evl_metrics):\n \"\"\"Run the evaluation loop once.\n\n Args:\n last_global_step_val: the global step used in the previous evaluation.\n\n Returns:\n The global_step used in the latest model.\n \"\"\"\n latest_checkpoint, global_step_val = self.get_checkpoint(\n last_global_step_val)\n logging.info(\"latest_checkpoint: {}\".format(latest_checkpoint))\n\n if latest_checkpoint is None or global_step_val == last_global_step_val:\n time.sleep(self.wait)\n return last_global_step_val\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n logging.info(\"Loading checkpoint for eval: {}\".format(latest_checkpoint))\n\n # Restores from checkpoint\n self.saver.restore(sess, latest_checkpoint)\n sess.run(tf.local_variables_initializer())\n\n evl_metrics.clear()\n\n train_gpu = FLAGS.train_num_gpu\n train_batch_size = FLAGS.train_batch_size\n n_train_files = self.reader.n_train_files\n if train_gpu:\n epoch = ((global_step_val*train_batch_size*train_gpu) / n_train_files)\n else:\n epoch = ((global_step_val*train_batch_size) / n_train_files)\n\n examples_processed = 0\n while True:\n try:\n batch_start_time = time.time()\n\n fetches = [self.logits, self.labels, self.labels_losses,\n self.summary_op]\n logits_val, labels_val, loss_val, summary_val = sess.run(fetches)\n seconds_per_batch = time.time() - batch_start_time\n examples_per_second = self.batch_size / seconds_per_batch\n examples_processed += self.batch_size\n\n iteration_info_dict = evl_metrics.accumulate(logits_val, labels_val, loss_val)\n iteration_info_dict[\"examples_per_second\"] = examples_per_second\n\n iterinfo = utils.AddGlobalStepSummary(\n self.summary_writer,\n global_step_val,\n iteration_info_dict,\n summary_scope=\"Eval\")\n logging.info(\"examples_processed: %d | %s\", examples_processed,\n iterinfo)\n\n except tf.errors.OutOfRangeError as e:\n logging.info(\n \"Done with batched inference. Now calculating global performance \"\n \"metrics.\")\n # calculate the metrics for the entire epoch\n epoch_info_dict = evl_metrics.get()\n epoch_info_dict[\"epoch_id\"] = global_step_val\n\n self.summary_writer.add_summary(summary_val, global_step_val)\n epochinfo = utils.AddEpochSummary(\n self.summary_writer,\n global_step_val,\n epoch_info_dict,\n summary_scope=\"Eval\")\n logging.info(epochinfo)\n evl_metrics.clear()\n\n if FLAGS.stopped_at_n:\n self.counter += 1\n break\n\n except Exception as e:\n logging.info(\"Unexpected exception: {}\".format(e))\n sys.exit(0)\n\n return global_step_val\n\n def load_last_train_dir(self):\n while True:\n folders = tf.gfile.Glob(join(FLAGS.path, \"*\"))\n folders = list(filter(lambda x: \"logs\" not in x, folders))\n folders = sorted(folders, key=lambda x: basename(x))\n if folders:\n break\n return folders[-1]\n\n def load_config(self, train_dir):\n # Write json of flags\n model_flags_path = join(\"{}_logs\".format(train_dir), \"model_flags.yaml\")\n if not exists(model_flags_path):\n raise IOError(\"Cannot find file {}. Did you run train.py on the same \"\n \"--train_dir?\".format(model_flags_path))\n flags_dict = YParams(model_flags_path, \"eval\")\n return flags_dict\n\n def run(self):\n\n tf.set_random_seed(0) # for reproducibility\n\n # Setup logging & log the version.\n tf.set_random_seed(0) # for reproducibility\n\n # Setup logging & log the version.\n tf.logging.set_verbosity(logging.INFO)\n logging.info(\"Tensorflow version: {}.\".format(tf.__version__))\n\n if os.environ.get('CUDA_VISIBLE_DEVICES') is None:\n if FLAGS.eval_num_gpu == 0:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n else:\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(\n map(str, range(FLAGS.eval_num_gpu)))\n\n # self.train_dir = join(FLAGS.path, FLAGS.train_dir)\n self.train_dir = FLAGS.train_dir\n\n pp = pprint.PrettyPrinter(indent=2, compact=True)\n logging.info(pp.pformat(FLAGS.values()))\n\n with tf.Graph().as_default():\n if FLAGS.eval_num_gpu:\n self.batch_size = \\\n FLAGS.eval_batch_size * FLAGS.eval_num_gpu\n else:\n self.batch_size = FLAGS.eval_batch_size\n\n self.reader = find_class_by_name(FLAGS.reader, [readers])(\n self.batch_size, is_training=False)\n self.model = find_class_by_name(FLAGS.model, [models])()\n self.loss_fn = find_class_by_name(FLAGS.loss, [losses])()\n\n data_pattern = FLAGS.data_pattern\n if data_pattern is \"\":\n raise IOError(\"'data_pattern' was not specified. \"\n \"Nothing to evaluate.\")\n\n self.build_graph()\n logging.info(\"Built evaluation graph\")\n\n self.saver = tf.train.Saver(tf.global_variables())\n filename_suffix= \"_{}_{}\".format(\"eval\",\n re.findall(\"[a-z0-9]+\", data_pattern.lower())[0])\n self.summary_writer = tf.summary.FileWriter(\n self.train_dir,\n filename_suffix=filename_suffix,\n graph=tf.get_default_graph())\n\n evl_metrics = eval_util.EvaluationMetrics(self.reader.n_classes, 20)\n\n self.counter = 0\n last_global_step_val = 0\n while self.counter < FLAGS.stopped_at_n:\n last_global_step_val = self.eval_loop(last_global_step_val, evl_metrics)\n logging.info(\"Done evaluation -- number of eval reached.\")\n\n\n\nif __name__ == '__main__':\n evaluate = Evaluate()\n evaluate.run()\n","sub_path":"code/eval_youtube.py","file_name":"eval_youtube.py","file_ext":"py","file_size_in_byte":9828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73434709","text":"import math\n\ndef ulength(lists):\n ulist = []\n # for i in range(len(lists)):\n # for j in range(i+1,len(lists)):\n # ux = math.pow(lists[i][1]-lists[j][1],2)\n # uy = math.pow(lists[i][2]-lists[j][2],2)\n # u = math.sqrt(ux+uy)\n # ulist.append((lists[i][0]+','+lists[j][0],u))\n\n for i in range(len(lists)):\n for j in range(i+1,len(lists)):\n u = math.hypot(lists[i][1]-lists[j][1],lists[i][2]-lists[j][2])\n ulist.append((lists[i][0] + ',' + lists[j][0], u))\n return ulist\n\nmylist=[('짜장면',2,5),('짬뽕',2,4),('라면',4,5)]\nulist = ulength(mylist)\nprint(ulist)","sub_path":"module/math/uclidian.py","file_name":"uclidian.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"464170914","text":"class Solution:\n # @param A: a list of non-negative integers.\n # return: an integer\n def houseRobber(self, A):\n # write your code here\n if not A or len(A)==0:\n return 0\n n=len(A)\n dp=[0 for i in range(n+1)]\n for j in range(n-1,-1,-1):\n dp[j]=A[j]\n if j j:\n return True\n else:\n return False\n\n\nlicznik1 = 0\nprzez2 = 0\nprzez8 = 0\nmax3 = 0\nmin3 = 100000000000000000000000000000000000000000000000\nmax_i = 0\nmin_i = 0\ni = 1\nwith open(\"liczby.txt\", \"r\") as plik:\n for L in plik:\n L = L.strip()\n # 4.1\n if wiecej_zer(L):\n licznik1 += 1\n # 4.2\n if int(L) % 2 == 0:\n przez2 += 1\n if int(L) % 8 == 0:\n przez8 += 1\n # 4.3\n if int(L, 2) > max3:\n max3 = int(L, 2)\n max_i = i\n if int(L, 2) < min3:\n min3 = int(L, 2)\n min_i = i\n i += 1\n\n\nwith open(\"wyniki4.txt\", \"w\") as odp:\n odp.write(\"4.1\\n\" + str(licznik1))\n odp.write(\"\\n\\n4.2\\n\" + \"Przez 2: \" + str(przez2) + \"\\n\" +\n \"Przez 8: \" + str(przez8))\n odp.write(\"\\n\\n4.3\\n\" + \"Najmniejsza: \" + str(min_i) + \"\\n\" +\n \"Największa: \" + str(max_i))","sub_path":"2015 pr/zadanie4/zadanie4.py","file_name":"zadanie4.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"272179551","text":"import numpy as np\r\nimport cv2\r\nfrom scipy.spatial import distance as dist\r\nimport imutils\r\nimport os \r\n\r\nMIN_CONF = 0.3\r\nNMS_THRESH = 0.3\r\nSERIOUS_DISTANCE = 50\r\nALERT_DISTANCE = 80\r\n\r\ndef detect_people(frame, net, ln):\r\n\t\r\n\t(H, W) = frame.shape[:2]\r\n\tresults = []\r\n\r\n \t# construct a blob from the input frame \r\n\t# input dimension is (393, 700, 3) and output dimension is (1, 3, 416, 416) \r\n\t# cv2.dnn.blobFromImage returna 4-dimensional Mat with NCHW dimensions order.\r\n\tblob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),swapRB=True, crop=False)\r\n\t\r\n\t#sets the new input for the network\r\n\tnet.setInput(blob)\r\n\r\n\t# Runs forward pass to compute output of layer with name outputName here ln.\r\n\t# Returns list of 3 outputs each are numpy.ndarrays and the each output shape is\r\n\t# 507 x 85 ---> 13 x 13 x 3 x 85\r\n\t# 2028 x 85 ---> 26 x 26 x 3 x 85\r\n\t# 8112 x 85 ---> 52 x 52 x 3 x 85\r\n\tlayerOutputs = net.forward(ln)\r\n\r\n\tboxes = []\r\n\tcentroids = []\r\n\tconfidences = []\r\n\r\n \t# loop over each of the layer outputs (3)\r\n\tfor output in layerOutputs:\r\n\t\r\n # loop over each of the detections output shape will be 507 or 2028 or 8112,\r\n\t# detection shape will be 85 \r\n\t\tfor detection in output:\r\n \t\t\r\n\t\t\t# In the detection first 4 are box coordinates and last 80 are class probabilities\r\n\t\t\tscores = detection[5:]\r\n\t\t\tclassID = np.argmax(scores)\r\n\t\t\tconfidence = scores[classID]\r\n\r\n\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\r\n\r\n \t\t# filter detections by ensuring that the object detected was a person and\r\n\t\t\t# that the minimum confidence is met\r\n\t\t\tif classID == 0 and confidence > MIN_CONF:\r\n\t\t\t\t\r\n\t\t\t\t# scale the bounding box coordinates back relative to\r\n\t\t\t\t# the size of the image\r\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\r\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\r\n\r\n\t\t\t\t# use the center (x, y)-coordinates to derive the top left corner of bounding box\r\n\t\t\t\tx = int(centerX - (width / 2))\r\n\t\t\t\ty = int(centerY - (height / 2))\r\n\r\n\t\t\t\t# update our list of bounding box coordinates, centroids, and confidences\r\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\r\n\t\t\t\tcentroids.append((centerX, centerY))\r\n\t\t\t\tconfidences.append(float(confidence))\r\n\r\n \t# apply non-maxima suppression to suppress weak, overlapping bounding boxes\r\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CONF, NMS_THRESH)\r\n\t\r\n \t# ensure at least one detection exists\r\n\tif len(idxs) > 0:\r\n\r\n\t\t# idxs.flatten() returns the index of bounding boxes after non max supression.\r\n\t\tfor i in idxs.flatten():\r\n\r\n\t\t\t# extract the bounding box coordinates\r\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\r\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\r\n\t\t\t\r\n \t\t# update our results list to consist of the person\r\n\t\t\t# prediction probability, bounding box coordinates, and the centroid\r\n\t\t\tr = (confidences[i], (x, y, x + w, y + h), centroids[i])\r\n\t\t\tresults.append(r)\r\n\r\n\t# return the list of results\r\n\treturn results\r\n\r\nLABELS = open(\"yolov3_data/coco_classes.txt\").read().strip().split(\"\\n\")\r\n\r\nweightsPath = \"yolov3_data/yolov3.weights\"\r\nconfigPath = \"yolov3_data/yolov3.cfg\"\r\n\r\n#Reads a network model stored in Darknet model file.\r\nnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\r\n\r\n# determine only the *output* layer names that we need from YOLO\r\n# Unlike YOLO and YOLO2, which predict the output at the last layer, \r\n# YOLOv3 predicts boxes at 3 different scales as illustrated in the below image.\r\nln = net.getUnconnectedOutLayersNames()\r\n\r\nvs = cv2.VideoCapture(\"demovideo/Test_video_1.mp4\")\r\n\r\nwhile True:\r\n\r\n\t(grabbed, frame) = vs.read()\r\n\tif not grabbed:\r\n\t\tbreak\r\n\t\r\n\tframe = imutils.resize(frame, width=700)\r\n\tresults = detect_people(frame, net, ln)\r\n\t\r\n\t# initialize the set of indexes that violate the minimum social\r\n\t# distance\r\n\talert = set()\r\n\tserious = set()\r\n\ta_lines=list()\r\n\ts_lines=list()\r\n\r\n\t# ensure there are *at least* two people detections (required in\r\n\t# order to compute our pairwise distance maps)\r\n\tif len(results) >= 2:\r\n \r\n\t\t# extract all centroids from the results and compute the\r\n # Euclidean distances between all pairs of the centroids\r\n\t\tcentroids = np.array([r[2] for r in results])\r\n \r\n\t\t#calculate the distance matrix for finding euclidean distance between each centroids\r\n\t\tD = dist.cdist(centroids, centroids, metric=\"euclidean\")\r\n \r\n # loop over the upper triangular of the distance matrix\r\n\t\tfor i in range(0, D.shape[0]):\r\n \r\n\t\t\tfor j in range(i + 1, D.shape[1]):\r\n \r\n\t\t\t\t# check to see if the distance between any two\r\n # centroid pairs is less than the configured number of pixels\r\n\t\t\t\tif D[i, j] <= ALERT_DISTANCE:\r\n\t\t\t\t\tif(D[i,j])<= SERIOUS_DISTANCE:\r\n\t\t\t\t\t\ts_lines.append([centroids[i],centroids[j]])\r\n\t\t\t\t\t\tserious.add(i)\r\n\t\t\t\t\t\tserious.add(j)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ta_lines.append([centroids[i],centroids[j]])\r\n\t\t\t\t\t\talert.add(i)\r\n\t\t\t\t\t\talert.add(j)\r\n\r\n # loop over the results\r\n\tfor (i, (prob, bbox, centroid)) in enumerate(results):\r\n\t\t\r\n\t\t# extract the bounding box and centroid coordinates, then\r\n\t\t# initialize the color of the annotation\r\n\t\t(startX, startY, endX, endY) = bbox\r\n\t\t(cX, cY) = centroid\r\n\t\tcolor = (0, 255, 0)\r\n\r\n\t\t# if the index pair exists within the violation set, then\r\n\t\t# update the color\r\n\t\tif i in alert:\r\n\t\t\tcolor = (0,255,255)\r\n\t\telif i in serious:\r\n\t\t\tcolor = (0,0,255)\r\n\t\t\r\n\t\t# draw a bounding box around the person and the\r\n\t\t# centroid coordinates of the person,\r\n\t\tcv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)\r\n\t\r\n\t# draw connecting lines for violating alert people\r\n\tfor start,end in a_lines:\r\n\t\tcv2.line(frame,start,end,(0,255,255),1)\r\n\r\n\t# draw connecting lines for violating serious people\r\n\tfor start,end in s_lines:\r\n\t\tcv2.line(frame,start,end,(0,0,255),1)\r\n \r\n\t# draw the total number of social distancing violations on the\r\n\t# output frame\r\n\ttext = \"Social Distancing Violations: \"+str(len(serious)+len(alert))\r\n\tcv2.putText(frame,text, (10, frame.shape[0] - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)\r\n\tcv2.imshow(\"Frame\",frame)\r\n \r\n\tkey = cv2.waitKey(1) & 0xFF\r\n\tif key == ord(\"q\"): \r\n\t\tbreak\r\n ","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"104772274","text":"from tensorflow.keras.layers import Conv2D,Flatten, Dense, MaxPool2D, BatchNormalization, GlobalAveragePooling2D\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.models import load_model\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport tensorflow as tf\n\n\n\n\n\n\n#dimensions\nimg_height, img_width = (224, 224)\nbatch_size = 32\n\n#pre-processing images\ntrain_data_dir = r\"DataSet\\processed_data\\train\"\nvalid_data_dir = r\"DataSet\\processed_data\\val\"\ntest_data_dir = r\"DataSet\\processed_data\\test\"\n\ntrain_datagen = ImageDataGenerator(preprocessing_function=preprocess_input,\n shear_range=.2,\n zoom_range=.2,\n horizontal_flip=True,\n validation_split=.4)\n\ntrain_generator = train_datagen.flow_from_directory( train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical',\n subset='training')\nvalid_generator = train_datagen.flow_from_directory( valid_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical',\n subset='validation')\n\n\n\ntest_generator = train_datagen.flow_from_directory( valid_data_dir,\n target_size=(img_height, img_width),\n batch_size=1,\n class_mode='categorical',\n subset='validation')\n\n\ndef train():\n x, y = test_generator.next()\n print(x.shape)\n\n base_model = ResNet50(include_top=False, weights='imagenet')\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(1024, activation='relu')(x)\n\n # Number of classes V\n predictions = Dense(train_generator.num_classes, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n for layer in base_model.layers:\n layer.trainable = False\n\n # Finale layer\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n model.fit(train_generator, epochs=1)\n\n model.save(b'Model\\RestNet_Epoch_Test')\n\n # model = load_model(os.path.join(save_path, b'ResNet_50Flowers'))\n\n test_loss, test_acc = model.evaluate(test_generator, verbose=2)\n print('\\nTest Accuarcy', test_acc)\n\n\ntrain()\n","sub_path":"resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"177176147","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\n\n\nfrom bs4 import BeautifulSoup\nimport re\nfrom urllib import request\nfrom collections import deque\nfrom urllib.request import urlopen\nimport time\nimport random\n\ndef get_pic (url = \"\",path = \"\",i=0):\n pic_deque = deque()\n url = request.Request(url,headers={'User-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:47.0) Gecko/20100101 Firefox/47.0'})\n data = BeautifulSoup(urlopen(url),'html.parser')\n for x in data.findAll('img', {'src' : re.compile(\"^(http://(.*)\\.sinaimg\\.(.*)\\.jpg$)\")}):\n x = x.attrs[\"src\"] #attribute\n pic_deque.append(x)\n print(\"start\")\n while len(pic_deque) > 0:\n try:\n f = open(path + '/' + str(i) + '.jpg','wb')\n img = pic_deque.popleft()\n img = request.Request(img,headers={'User-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:46.0) Gecko/20100101 Firefox/47.0'})\n img = urlopen(img)\n a = img.read()\n f.write(a)\n print('get------>pic'+str(i))\n i+=1\n time.sleep(random.randint(3,15))\n except:\n continue;\n\n\n\nget_pic('http://jandan.net/ooxx/page-2039#comments','/Users/zhangzhichao/Documents/pic')\n\n\n\n\n#http://jandan.net/ooxx/page-2024#comments\n\n","sub_path":"python_crawler/getPicForJiandan.py","file_name":"getPicForJiandan.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"154128486","text":"# -*- coding: utf-8 -*-\n\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass Hospital2(models.Model):\n _name = 'hospital.hospital'\n _description = 'An Erp for Hospital Management'\n\n d_name = fields.Char('Doctor', copy=True)\n appointment_num = fields.Integer('Appointment Number')\n note = fields.Text('Appointment Description')\n\n @api.constrains('appointment_num')\n def _check_doctor_appointment_days(self):\n for check in self:\n if check.appointment_num > 30:\n raise ValidationError(_('doctor must not have more than 30 appointment request in same day.'))\n","sub_path":"hospital2/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"39816699","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom Components.config import config\r\nfrom Components.config import ConfigYesNo\r\nfrom Components.config import ConfigSubsection\r\n\r\nfrom Plugins.Extensions.ProjectValerie.__common__ import printl2 as printl, isInetAvailable\r\nfrom Plugins.Extensions.ProjectValerie.__plugin__ import Plugin, registerPlugin\r\n\r\nfrom Components.Language import language\r\nimport gettext\r\nimport os\r\nfrom Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE\r\n\r\ndef localeInit():\r\n\tlang = language.getLanguage()\r\n\tos.environ[\"LANGUAGE\"] = lang[:2]\r\n\tgettext.bindtextdomain(\"enigma2\", resolveFilename(SCOPE_LANGUAGE))\r\n\tgettext.textdomain(\"enigma2\")\r\n\tgettext.bindtextdomain(\"ProjectValerie\", \"%s%s\" % (resolveFilename(SCOPE_PLUGINS), \"Extensions/ProjectValerie/locale/\"))\r\n\r\ndef _(txt):\r\n\tt = gettext.dgettext(\"ProjectValerie\", txt)\r\n\tif t == txt:\r\n\t\tt = gettext.gettext(txt)\r\n\treturn t\r\n\r\n\r\nlocaleInit()\r\nlanguage.addCallback(localeInit)\r\n\r\n#------------------------------------------------------------------------------------------\r\n\r\nconfig.plugins.pvmc.plugins.sync = ConfigSubsection()\r\nconfig.plugins.pvmc.plugins.sync.fastsynconautostart = ConfigYesNo(default=False)\r\n\r\ndef settings():\r\n\ts = []\r\n\ts.append((_(\"Fast Sync on autostart\"), config.plugins.pvmc.plugins.sync.fastsynconautostart, ))\r\n\treturn s\r\n\r\ndef autostartPlugin(session):\r\n\tif isInetAvailable():\r\n\t\tfrom Plugins.Extensions.ProjectValerie.DMC_Plugins.DMC_SyncExtras.plugin import autostart\r\n\t\tautostart(session)\r\n\telse:\r\n\t\tprintl(\"Can not sync as no internet connection available!\", __name__, \"W\")\r\n\r\ndef startPlugin(session):\r\n\tif isInetAvailable():\r\n\t\tfrom Plugins.Extensions.ProjectValerie.DMC_Plugins.DMC_SyncExtras.plugin import ProjectValerieSync\r\n\t\tsession.open(ProjectValerieSync)\r\n\telse:\r\n\t\tfrom Screens.MessageBox import MessageBox\r\n\t\tsession.open(MessageBox,_(\"No internet connection available!\"), MessageBox.TYPE_INFO, timeout=10)\r\n\r\nregisterPlugin(Plugin(id=\"sync\", name=_(\"Synchronize\"), fnc=settings, where=Plugin.SETTINGS))\r\nregisterPlugin(Plugin(id=\"sync\", name=_(\"Synchronize\"), fnc=startPlugin, where=Plugin.MENU_SYSTEM, supportStillPicture=True, weight=10))\r\nif config.plugins.pvmc.plugins.sync.fastsynconautostart.value is True:\r\n\tregisterPlugin(Plugin(id=\"sync\", name=_(\"Synchronize\"), fnc=autostartPlugin, where=Plugin.AUTOSTART))\r\n","sub_path":"ValerieMediaCenter/DMC_Plugins/DMC_Sync.py","file_name":"DMC_Sync.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358350713","text":"import psycopg2\nfrom pandas import Series, DataFrame\nimport pandas as pd\n\nlocation = r'sepactivitySandbox_associations.csv'\naccount_location = r'accounts.csv'\n\nconn = psycopg2.connect(\"host=10.2.2.8 dbname='legacy_migration' user='postgres' password='postgre'\")\ncur = conn.cursor()\n\nactivity_associations = []\ndf = pd.read_csv(location)\ndf2 = pd.read_csv(account_location)\n\nzuora_associations = df2['Channel']\nzuora_associations = [str(x).split(':')[0] for x in zuora_associations if str(x) != 'nan']\nzuora_associations.sort()\nprint(zuora_associations)\n\nfor association in df['association'].values:\n activity_associations.append(association)\nactivity_associations = list(set(activity_associations))\nactivity_associations = [x for x in activity_associations if str(x) != 'nan']\nactivity_associations.sort()\nprint(activity_associations)\n\nassociation_query = \"\"\"select * from association order by association_name;\"\"\"\n\ncur.execute(association_query)\ndb_associations = cur.fetchall()\ndb_associations = [x[1].strip() for x in db_associations if x is not None]\n# print(db_associations)\n\ndifferences = list(set(activity_associations) - set(zuora_associations))\nprint(differences)\nprint(len(differences))\n\noutput_file = open('missing_associations.txt', 'w')\nfor association in differences:\n output_file.write(association + '\\n')\n\noutput_file.close()\n\n","sub_path":"association_check/association_check.py","file_name":"association_check.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447560116","text":"# Copyright 2021 Jared Hendrickson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unit_test_framework\n\nclass APIUnitTestFirewallVirtualIP(unit_test_framework.APIUnitTest):\n url = \"/api/v1/firewall/virtual_ip\"\n get_payloads = [{}]\n post_payloads = [\n {\n \"mode\": \"carp\",\n \"interface\": \"wan\",\n \"subnet\": \"172.16.77.239/32\",\n \"password\": \"testpass\",\n \"descr\": \"Unit Test\"\n },\n {\n \"mode\": \"proxyarp\",\n \"interface\": \"wan\",\n \"subnet\": \"172.16.77.240/32\",\n \"descr\": \"Unit Test\"\n }\n ]\n put_payloads = [\n {\n \"id\": 0,\n \"mode\": \"carp\",\n \"interface\": \"wan\",\n \"subnet\": \"172.16.77.229/32\",\n \"password\": \"newtestpass\",\n \"vhid\": 25,\n \"descr\": \"Updated unit Test\"\n },\n {\n \"id\": 1,\n \"mode\": \"proxyarp\",\n \"interface\": \"wan\",\n \"subnet\": \"172.16.77.230/32\",\n \"descr\": \"Updated unit Test\"\n }\n ]\n delete_payloads = [\n {\"id\": 0},\n {\"id\": 0}\n ]\n\nAPIUnitTestFirewallVirtualIP()","sub_path":"tests/test_api_v1_firewall_virtual_ip.py","file_name":"test_api_v1_firewall_virtual_ip.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"411015602","text":"import psycopg2\nimport textwrap\n\nfrom settings import DestDBCredentials\n\nclass SetupDBService:\n \"\"\" Service class that wraps rules to create tables and setup\n the necessary indexes and trigger to the destination database.\n\n It will skip the creation of resources that are already created.\n\n The creation of tables and triggers and be done separately if necessary.\n \"\"\"\n\n def run(self):\n \"\"\" Basic interface to the service to make the entire setup\n at once.\n \"\"\"\n self.create_tables()\n self.create_indexes()\n self.create_triggers()\n\n def _get_connection(self):\n return psycopg2.connect(dbname = DestDBCredentials.db_name, user = DestDBCredentials.user, password = DestDBCredentials.password, host = DestDBCredentials.host, port = DestDBCredentials.port)\n\n def _table_exists(self, table):\n result = True\n\n try:\n with self._get_connection() as connection:\n with connection.cursor() as cursor:\n cursor.execute('SELECT * FROM {} limit 1'.format(table))\n except psycopg2.errors.UndefinedTable as e:\n result = False\n\n return result\n\n def create_tables(self):\n \"\"\" Public interface to create the destination tables\n idempotently.\n \"\"\"\n sqlStatement = \"\"\n\n if not self._table_exists(\"stage_transactions\"):\n sqlStatement += \"\"\"\n CREATE TABLE stage_transactions (\n id UUID,\n user_id UUID NOT NULL,\n certified_by_user bigint,\n amount numeric(18,2) NOT NULL,\n status text,\n created bigint NOT NULL,\n updated bigint NOT NULL\n );\n \"\"\"\n\n if not self._table_exists(\"transactions\"):\n sqlStatement += \"\"\"\n CREATE TABLE transactions (\n id UUID PRIMARY KEY,\n user_id UUID NOT NULL,\n certified_by_user bigint,\n amount numeric(18,2) NOT NULL,\n status text,\n created bigint NOT NULL,\n updated bigint NOT NULL\n );\n \"\"\"\n\n if not self._table_exists(\"user_balances\"):\n sqlStatement += \"\"\"\n CREATE TABLE user_balances (\n user_id UUID PRIMARY KEY,\n balance numeric(18,2) NOT NULL,\n created bigint NOT NULL,\n updated bigint NOT NULL\n );\n \"\"\"\n\n if sqlStatement:\n with self._get_connection() as connection:\n with connection.cursor() as cursor:\n cursor.execute(textwrap.dedent(sqlStatement))\n\n def create_indexes(self):\n \"\"\" Public interface to create the necessary indexes in the destination database.\"\"\"\n\n sqlStatement = \"\"\"\n CREATE INDEX transactions_status_idx ON transactions USING btree (status);\n CREATE INDEX transactions_updated_idx ON transactions USING btree (updated);\n CREATE INDEX transactions_created_idx ON transactions USING brin (certified_by_user, to_timestamp(created/1000));\n CREATE INDEX transactions_user_certified_idx ON transactions USING btree (user_id, certified_by_user DESC);\n \"\"\"\n\n if sqlStatement:\n with self._get_connection() as connection:\n with connection.cursor() as cursor:\n try:\n cursor.execute(textwrap.dedent(sqlStatement))\n except psycopg2.errors.DuplicateTable as e:\n pass\n\n\n def create_triggers(self):\n \"\"\" Public interface to create the destination functions and trigger idempotently.\"\"\"\n\n sqlStatement = \"\"\"\n CREATE OR REPLACE FUNCTION commit_transaction()\n RETURNS trigger AS\n $BODY$\n BEGIN\n -- Upsert transaction\n INSERT INTO transactions (id, user_id, certified_by_user, amount, status, created, updated)\n VALUES (uuid(NEW.id), uuid(NEW.user_id), NEW.certified_by_user, NEW.amount, NEW.status, NEW.created, NEW.updated)\n ON CONFLICT ON CONSTRAINT transactions_pkey DO\n UPDATE\n SET certified_by_user=NEW.certified_by_user, amount=NEW.amount, status=NEW.status, updated=NEW.updated;\n\n -- Update balance\n IF NEW.certified_by_user IS NOT NULL AND NEW.status <> 'BLOCKED' THEN\n INSERT INTO user_balances (user_id, balance, created, updated)\n VALUES (uuid(NEW.user_id), NEW.amount, NEW.created, NEW.updated)\n ON CONFLICT ON CONSTRAINT user_balances_pkey DO\n UPDATE\n SET balance=(user_balances.balance + NEW.amount), updated=NEW.updated;\n END IF;\n\n RETURN NEW;\n END;\n $BODY$\n LANGUAGE plpgsql;\n\n CREATE TRIGGER commit_transactions\n AFTER INSERT OR UPDATE ON stage_transactions\n FOR EACH ROW\n EXECUTE PROCEDURE commit_transaction();\n \"\"\"\n\n if sqlStatement:\n with self._get_connection() as connection:\n with connection.cursor() as cursor:\n try:\n cursor.execute(textwrap.dedent(sqlStatement))\n except psycopg2.errors.DuplicateObject as e:\n pass\n","sub_path":"services/setup_db_service.py","file_name":"setup_db_service.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168241119","text":"import os\nfrom itertools import islice\nfrom django.views.generic import TemplateView\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.admin.utils import quote, unquote\n\nfrom .utils import readlines_reverse\n\n\nclass LogViewerView(TemplateView):\n \"\"\"\n LogViewerView class\n\n :cvar template_name: Name of the HTML template used to render the log files\n\n \"\"\"\n template_name = \"log_viewer/logfile_viewer.html\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(LogViewerView, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, file_name=None, page=1, **kwargs):\n \"\"\"\n Read and return log files to be showed in admin page\n\n :param file_name: log file name\n :param page: log viewer page\n \"\"\"\n context = super(LogViewerView, self).get_context_data(**kwargs)\n # Clean the `file_name` to avoid relative paths.\n file_name = unquote(file_name).replace('/..', '').replace('..', '')\n file_urls = []\n file_names = []\n file_display = []\n page = int(page)\n lines_per_page = getattr(settings, 'LOG_ITEMS_PER_PAGE', 50)\n current_file = file_name\n context['custom_file_list_title'] = getattr(\n settings, 'LOG_VIEWER_FILE_LIST_TITLE', False\n )\n context['original_file_name'] = file_name\n context['is_django_jet'] = getattr(\n settings, 'LOG_VIEWER_IS_DJANGO_JET', False\n )\n context['custom_style_file'] = getattr(\n settings, 'LOG_VIEWER_FILE_LIST_STYLES', False\n )\n\n context['log_files'] = []\n context['next_page'] = page + 1\n len_logs_dir = len(settings.LOGS_DIR)\n for root, _, files in os.walk(settings.LOGS_DIR):\n tmp_names = list(filter(lambda x: x.find('~') == -1, files))\n # if LOG_VIEWER_FILES is not set in settings\n # then all the files with '.log' extension are listed\n if hasattr(settings, 'LOG_VIEWER_FILES'):\n tmp_names = list(\n filter(\n lambda x: x in settings.LOG_VIEWER_FILES,\n tmp_names))\n else:\n tmp_names = [\n name for name in tmp_names if (\n name.split('.')[-1]) == 'log']\n file_names += tmp_names\n file_display += [('%s/%s' % (\n root[len_logs_dir:], name))[1:] for name in tmp_names]\n file_urls += list(map(lambda x: '%s/%s' % (root, x), tmp_names))\n for i, element in enumerate(file_display):\n context['log_files'].append({\n quote(element): {\n 'uri': file_urls[i],\n 'display': element,\n }\n })\n if file_name:\n try:\n with open(os.path.join(settings.LOGS_DIR, file_name)) as file:\n next_lines = list(\n islice(readlines_reverse(file, exclude='Not Found'),\n (page - 1) * lines_per_page,\n page * lines_per_page))\n if len(next_lines) < lines_per_page:\n context['last'] = True\n else:\n context['last'] = False\n context['logs'] = next_lines\n context['current_file'] = current_file\n context['file'] = file\n except (IOError, ValueError):\n pass\n else:\n context['last'] = True\n return context\n\nlog_viewer = LogViewerView.as_view()\n","sub_path":"log_viewer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"10748121","text":"import os\nimport re\nimport time\nimport sys\nimport socket\nfrom datetime import datetime\nimport pandas as pd\nimport logging\n\nfrom .history import histo_day, histo_hour, histo_minute\nfrom .price import coin_list\n\n\nCSV_HEADER = ['time', 'open', 'high', 'low', 'close', 'volumefrom', 'volumeto']\nHISTO_LIMIT = 2000\nDATE_FORMAT = '%Y-%m-%d %H:%M:%S'\nCOINMARKETCAP_TICKER_URL = \"https://api.coinmarketcap.com/v1/ticker/?limit=0\"\n# Note that the CoinMarketCap API v1 will be deprecated on November 30th, 2018\nCOINMARKETCAP_TO_CRYPTOCOMPARE = {\"MIOTA\": \"IOT\",\n \"NANO\": \"XRB\",\n \"ETHOS\": \"BQX\"}\nCRYPTOCOMPARE_EXPECTED_ERROR = r\"(.*)only available for the last 7 days(.*)\"\nCRYPTOCOMPARE_NO_DATA_ERROR = r\"Cryptocompare API Error: There is no data for the symbol(.*)\"\nREMOTE_SERVER = \"www.google.com\" # to check internet connection\nINTERNET_CHECK_RATE = 30 * 60 # 30 minutes\n\n\nclass Scraper():\n \"\"\"\n Scraper to dump easily the CryptoCompare Histo data (day, hour and minutes)\n into csv files.\n \"\"\"\n def __init__(self, path_root, logger=None):\n \"\"\"\n :param path_root: path where the csv files will be saved\n \"\"\"\n # Set the storing paths\n self.path_root = path_root\n self.path_day = os.path.join(path_root, \"day\")\n self.path_hour = os.path.join(path_root, \"hour\")\n self.path_minute = os.path.join(path_root, \"minute\")\n self.path_coin_ignore = os.path.join(path_root, \"coin_ignore_list.csv\")\n\n # Create a stdout logger if None\n if logger is None:\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - '\n '%(message)s')\n handler.setFormatter(formatter)\n log.addHandler(handler)\n self.log = log\n else:\n self.log = logger\n\n # Create missing directory\n for directory in [self.path_day, self.path_hour, self.path_minute]:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Scrap all method\n def scrap(self, rate, to_curr=\"BTC\", update=True, verbose=1):\n \"\"\"\n Scrap all the data of active coins.\n\n :param rate: minute/hour/day\n :param update: if set to True, the Scraper will try to append\n existing data.\n \"\"\"\n scrap_coin_func = {\n \"minute\": self.scrap_coin_minute,\n \"hour\": self.scrap_coin_hour,\n \"day\": self.scrap_coin_day\n }\n\n coinlist = [coin for coin in self.get_active_coin_list(verbose=verbose)\n if coin != to_curr]\n self.log.info(\"Scrapping %s %s data for %d coins...\"\n % (to_curr, rate, len(coinlist)))\n success = []\n for c in coinlist:\n try:\n scrap_coin_func[rate](c, to_curr, update=update,\n verbose=verbose)\n success.append(c)\n except KeyboardInterrupt:\n break\n except Exception as e:\n # If no data error, add the coin ignore list\n if re.match(CRYPTOCOMPARE_NO_DATA_ERROR, str(e)):\n with open(self.path_coin_ignore, \"a\") as f:\n f.write(c + \"\\n\")\n self.log.warning(\"No data for the symbol %s, \"\n \"coin added to ignore list.\" % c)\n else:\n self.log.error(\"Failed to scrap coin %s: %s\" % (c, str(e)))\n self.log.info(\"Successfully scraped %s %s data for %d coins\"\n % (to_curr, rate, len(success)))\n\n # Individual coin scraping methods\n def scrap_coin_day(self, from_curr, to_curr=\"BTC\", update=True, verbose=1):\n \"\"\"\n Dump CryptoCompare HistoDay data into a csv.\n \"\"\"\n if verbose:\n self.log.info(\"Scraping daily data of market %s-%s...\"\n % (from_curr, to_curr))\n self.wait_for_internet_connection(INTERNET_CHECK_RATE)\n filename = from_curr + \"-\" + to_curr + \".csv\"\n csv_path = os.path.join(self.path_day, filename)\n\n data = histo_day(from_curr, to_curr, all_data=True)\n df = pd.DataFrame(data, columns=CSV_HEADER)\n df[\"time\"] = pd.to_datetime(df[\"time\"], unit='s')\n # TOFIX: case when volumeto is 0 (last row for instance)\n\n df.to_csv(csv_path, index=False)\n\n def scrap_coin_hour(self, from_curr, to_curr=\"BTC\",\n update=True, verbose=1):\n \"\"\"\n Dump CryptoCompare HistoHour data into a csv.\n\n If update = True, the scraper will first try to load\n existing data in the root path, and retrieve only the missing\n data from CryptoCompare.\n \"\"\"\n if verbose:\n self.log.info(\"Scraping hourly data of market %s-%s...\"\n % (from_curr, to_curr))\n self.wait_for_internet_connection(INTERNET_CHECK_RATE)\n filename = from_curr + \"-\" + to_curr + \".csv\"\n csv_path = os.path.join(self.path_hour, filename)\n\n # If csv already exist, load it and retrieve last timestamp\n df_existing = None\n ts_end = 0\n if update and os.path.isfile(csv_path):\n df_existing = pd.read_csv(csv_path)\n ts_end = str_to_ts(df_existing.iloc[-1, 0], DATE_FORMAT)\n\n # Retrieve first chunk of data from CryptoCompare\n ts = int(time.time())\n data = histo_hour(from_curr, to_curr, limit=HISTO_LIMIT, to_ts=ts)\n df = pd.DataFrame(data, columns=CSV_HEADER)\n\n # Retrieve data from CryptoCompare until enough data have been fetched\n # i.e. no more data is available (high price = 0), or remaining data\n # is already in the existing csv\n while df.loc[0, \"high\"] > 0 and df.loc[0, \"time\"] > ts_end:\n ts = int(df.head(1)[\"time\"])\n\n data = histo_hour(from_curr, to_curr,\n limit=HISTO_LIMIT, to_ts=ts - 1)\n df2 = pd.DataFrame(data, columns=CSV_HEADER)\n df = pd.concat([df2, df], axis=0, ignore_index=True)\n\n # Format data: clean zeros and convert to datetime\n for idx, row in df.iterrows():\n if not row[2] == row[3] == 0:\n df = df.drop(df.index[:idx])\n break\n df[\"time\"] = pd.to_datetime(df[\"time\"], unit='s')\n\n # Merge with existing csv data\n if df_existing is not None:\n for idx, row in df.iterrows():\n if str(row[0]) == str(df_existing.iloc[-1, 0]):\n df = df.drop(df.index[:idx + 1])\n df = pd.concat([df_existing, df],\n axis=0, ignore_index=True)\n break\n\n df.to_csv(csv_path, index=False)\n\n def scrap_coin_minute(self, from_curr, to_curr=\"BTC\",\n update=True, verbose=1):\n \"\"\"\n Dump CryptoCompare HistoMinute data into a csv.\n\n If update = True, the scraper will first try to load\n existing data in the root path, and retrieve only the missing\n data from CryptoCompare.\n \"\"\"\n if verbose:\n self.log.info(\"Scraping minute data of market %s-%s...\"\n % (from_curr, to_curr))\n self.wait_for_internet_connection(INTERNET_CHECK_RATE)\n filename = from_curr + \"-\" + to_curr + \".csv\"\n csv_path = os.path.join(self.path_minute, filename)\n\n # If csv already exist, load it and retrieve last timestamp\n df_existing = None\n ts_end = 0\n if update and os.path.isfile(csv_path):\n df_existing = pd.read_csv(csv_path)\n ts_end = str_to_ts(df_existing.iloc[-1, 0], DATE_FORMAT)\n\n # Retrieve first chunk of data from CryptoCompare\n ts = int(time.time())\n data = histo_minute(from_curr, to_curr, limit=HISTO_LIMIT, to_ts=ts)\n df = pd.DataFrame(data, columns=CSV_HEADER)\n\n # Retrieve data from CryptoCompare until enough data have been fetched\n # i.e. no more data is available (high price = 0), or remaining data\n # is already in the existing csv\n while df.loc[0, \"high\"] > 0 and df.loc[0, \"time\"] > ts_end:\n ts = int(df.head(1)[\"time\"])\n\n try:\n data = histo_minute(from_curr, to_curr,\n limit=HISTO_LIMIT, to_ts=ts - 1)\n except ValueError as e:\n if re.match(CRYPTOCOMPARE_EXPECTED_ERROR, str(e)):\n break\n raise e\n\n df2 = pd.DataFrame(data, columns=CSV_HEADER)\n df = pd.concat([df2, df], axis=0, ignore_index=True)\n\n # Format data: clean zeros and convert to datetime\n for idx, row in df.iterrows():\n if not row[2] == row[3] == 0:\n df = df.drop(df.index[:idx])\n break\n df[\"time\"] = pd.to_datetime(df[\"time\"], unit='s')\n\n # Merge with existing csv data\n if df_existing is not None:\n for idx, row in df.iterrows():\n if str(row[0]) == str(df_existing.iloc[-1, 0]):\n df = df.drop(df.index[:idx + 1])\n df = pd.concat([df_existing, df],\n axis=0, ignore_index=True)\n break\n\n df.to_csv(csv_path, index=False)\n\n def get_active_coin_list(self, verbose=1):\n \"\"\"\n Return a list of active coins, sorted by market cap.\n To be eligible, the coin need to be referenced on both\n Cryptocompare and Coinmarketcap, and not be referenced\n in coin_ignore_list.csv.\n\n :return: list\n \"\"\"\n self.wait_for_internet_connection(INTERNET_CHECK_RATE)\n\n # Retrieve coins to ignore\n ignore_list = pd.read_csv(self.path_coin_ignore, header=None)\n\n # Retrieve active coin list from CoinMarketCap,\n # sorted by marketcap, and remove ignore list\n df = pd.read_json(COINMARKETCAP_TICKER_URL)\n len_CMC = len(df.symbol)\n df = df[~df.symbol.isin(ignore_list[0].values)]\n active_list = df.symbol.tolist()\n\n # Retrieve coin list from CryptoCompare\n cc_list = list(coin_list()[\"Data\"].keys())\n\n # Solve different naming issues\n # (for instance: CryptoCompare IOT = CoinMarketCap MIOTA)\n for idx, crypto in enumerate(active_list):\n if crypto in list(COINMARKETCAP_TO_CRYPTOCOMPARE.keys()):\n active_list[idx] = COINMARKETCAP_TO_CRYPTOCOMPARE[crypto]\n\n # Intersection of CoinMarketCap and CryptoCompare list\n inter_list = [k for k in active_list if k in cc_list]\n\n if verbose:\n self.log.info(\"%d coins available on CoinMarketCap\" % (len_CMC))\n self.log.info(\"%d coins available on CryptoCompare\"\n % (len(cc_list)))\n self.log.info(\"%d coins on ignore list\" % (len(ignore_list)))\n self.log.info(\"%d active coins available for scraping\"\n % len(inter_list))\n\n return inter_list\n\n def check_for_updates(self, rate, to_curr, max_timedelta):\n return True\n\n def wait_for_internet_connection(self, check_rate):\n while not is_connected(REMOTE_SERVER):\n self.log.info(\"No internet connection. Trying again in %d min...\"\n % int(check_rate / 60))\n time.sleep(check_rate)\n\n\n# Utils\ndef ts_to_str(ts):\n \"\"\"Convert timestamp to datetime string\"\"\"\n return datetime.fromtimestamp(int(ts)).strftime(DATE_FORMAT)\n\n\ndef str_to_ts(date_str, str_format=DATE_FORMAT):\n \"\"\"Convert datetime string to timestamp\"\"\"\n dt = datetime.strptime(date_str, str_format)\n return time.mktime(dt.timetuple())\n\n\ndef is_connected(hostname):\n \"\"\"Check if internet connection is available\"\"\"\n try:\n # see if we can resolve the host name -- tells us if there is\n # a DNS listening\n host = socket.gethostbyname(hostname)\n # connect to the host -- tells us if the host is actually\n # reachable\n socket.create_connection((host, 80), 2)\n return True\n except:\n return False\n","sub_path":"cryptoscrap/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":12442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"561704462","text":"from django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render\n\nfrom corehq.apps.domain.decorators import require_superuser, domain_admin_required\nfrom corehq.apps.domain.models import Domain\nfrom corehq.apps.commtrack.management.commands import bootstrap_psi\nfrom corehq.apps.commtrack.models import Product\nfrom corehq.apps.commtrack.forms import ProductForm\nfrom soil.util import expose_download\nimport uuid\nfrom django.core.urlresolvers import reverse\nfrom dimagi.utils.web import get_url_base\nfrom django.contrib import messages\nfrom corehq.apps.commtrack.tasks import import_locations_async,\\\n import_stock_reports_async\nimport json\nfrom couchdbkit import ResourceNotFound\n\nDEFAULT_PRODUCT_LIST_LIMIT = 10\n\n@domain_admin_required # TODO: will probably want less restrictive permission\ndef product_list(request, domain, template=\"commtrack/manage/products.html\"):\n page = request.GET.get('page', 1)\n limit = request.GET.get('limit', DEFAULT_PRODUCT_LIST_LIMIT)\n\n show_inactive = json.loads(request.GET.get('show_inactive', 'false'))\n\n total = len(Product.by_domain(domain))\n\n context = {\n 'domain': domain,\n }\n context.update(\n product_list=dict(\n page=page,\n limit=limit,\n total=total,\n ),\n show_inactive=show_inactive,\n pagination_limit_options=range(DEFAULT_PRODUCT_LIST_LIMIT, 51, DEFAULT_PRODUCT_LIST_LIMIT)\n )\n return render(request, template, context)\n\n@domain_admin_required # TODO: will probably want less restrictive permission\ndef product_fetch(request, domain):\n page = int(request.GET.get('page', 1))\n limit = int(request.GET.get('limit', DEFAULT_PRODUCT_LIST_LIMIT))\n skip = (page-1)*limit\n\n sort_by = request.GET.get('sortBy', 'abc')\n\n show_inactive = json.loads(request.GET.get('show_inactive', 'false'))\n\n products = Product.by_domain(domain) #limit=limit, skip=skip)\n def product_data(p):\n info = p._doc\n info['edit_url'] = reverse('commtrack_product_edit', kwargs={'domain': domain, 'prod_id': p._id})\n return info\n\n return HttpResponse(json.dumps(dict(\n success=True,\n current_page=page,\n product_list=[product_data(p) for p in products],\n )), 'text/json')\n\n@domain_admin_required # TODO: will probably want less restrictive permission\ndef product_edit(request, domain, prod_id=None): \n if prod_id:\n try:\n product = Product.get(prod_id)\n except ResourceNotFound:\n raise Http404\n else:\n product = Product(domain=domain)\n\n if request.method == \"POST\":\n form = ProductForm(product, request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Product saved!')\n return HttpResponseRedirect(reverse('commtrack_product_list', kwargs={'domain': domain}))\n else:\n form = ProductForm(product)\n\n context = {\n 'domain': domain,\n 'product': product,\n 'form': form,\n }\n\n template=\"commtrack/manage/product.html\"\n return render(request, template, context)\n\n@require_superuser\ndef bootstrap(request, domain):\n if request.method == \"POST\":\n D = Domain.get_by_name(domain)\n if D.commtrack_enabled:\n return HttpResponse('already configured', 'text/plain')\n else:\n bootstrap_psi.one_time_setup(D)\n return HttpResponse('set up successfully', 'text/plain')\n\n return HttpResponse('
')\n\n@require_superuser\ndef location_import(request, domain):\n if request.method == \"POST\":\n upload = request.FILES.get('locs')\n if not upload:\n return HttpResponse('no file uploaded')\n update_existing = bool(request.POST.get('update'))\n\n # stash this in soil to make it easier to pass to celery\n file_ref = expose_download(upload.read(),\n expiry=1*60*60)\n download_id = uuid.uuid4().hex\n import_locations_async.delay(download_id, domain, file_ref.download_id, update_existing)\n return _async_in_progress(request, domain, download_id)\n\n return HttpResponse(\"\"\"\n
\n
\n
\n
\n
\n\"\"\")\n\n@require_superuser\ndef historical_import(request, domain):\n if request.method == \"POST\":\n file_ref = expose_download(request.FILES['history'].read(),\n expiry=1*60*60)\n download_id = uuid.uuid4().hex\n import_stock_reports_async.delay(download_id, domain, file_ref.download_id)\n return _async_in_progress(request, domain, download_id)\n\n return HttpResponse(\"\"\"\n
\n
\n
\n
\n\"\"\")\n\ndef _async_in_progress(request, domain, download_id):\n messages.success(request,\n 'Your upload is in progress. You can check the progress here.' %\\\n (reverse('hq_soil_download', kwargs={'domain': domain, 'download_id': download_id})),\n extra_tags=\"html\")\n return HttpResponseRedirect(reverse('domain_homepage', args=[domain]))\n","sub_path":"corehq/apps/commtrack/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450974708","text":"import datetime\r\n\r\nfrom .base import Field\r\nfrom biwako import args\r\n\r\n\r\nclass Date(Field):\r\n \"\"\"\r\n A field that contains data in the form of dates,\r\n represented in Python by datetime.date.\r\n\r\n format\r\n A strptime()-style format string.\r\n See http://docs.python.org/library/datetime.html for details\r\n \"\"\"\r\n format = args.Argument(default='%Y-%m-%d')\r\n\r\n def decode(self, value):\r\n \"\"\"\r\n Parse a string value according to self.format\r\n and return only the date portion.\r\n \"\"\"\r\n if isinstance(value, datetime.date):\r\n return value\r\n return datetime.datetime.strptime(value, self.format).date()\r\n\r\n def encode(self, value):\r\n \"\"\"\r\n Format a date according to self.format and return that as a string.\r\n \"\"\"\r\n return value.strftime(self.format)\r\n\r\n\r\n","sub_path":"biwako/csv/fields/dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"186724015","text":"import csv\nimport random\n\nteams = [\n {\"name\": \"Dragons\", \"players\": []},\n {\"name\": \"Sharks\", \"players\": []},\n {\"name\": \"Raptors\", \"players\": []}\n]\n\n\ndef set_rosters():\n # Open CSV file and store players as a list of dicts\n with open('soccer_players.csv', newline='') as csvfile:\n player_reader = csv.DictReader(csvfile, delimiter=',')\n all_players = list(player_reader)\n\n # Shuffle the players and separate them based on experience\n random.shuffle(all_players)\n experienced_players = [\n player for player in all_players\n if player[\"Soccer Experience\"] == \"YES\"\n ]\n inexperienced_players = [\n player for player in all_players if player[\"Soccer Experience\"] == \"NO\"\n ]\n\n # Loop through the experienced & inexperienced players\n # to add them to each team\n while len(experienced_players) > 0:\n for team in teams:\n team[\"players\"].append(experienced_players.pop())\n\n while len(inexperienced_players) > 0:\n for team in teams:\n team[\"players\"].append(inexperienced_players.pop())\n\n # Write the rosters into a text file called teams.txt\n with open(\"teams.txt\", \"w\") as roster_file:\n for team in teams:\n roster_file.write(team[\"name\"] + \"\\n\")\n roster_file.write(\"-\" * 7 + \"\\n\")\n for player in team[\"players\"]:\n roster_file.write(\n \"{}, {}, {}\".format(\n player[\"Name\"],\n player[\"Soccer Experience\"],\n player[\"Guardian Name(s)\"]) + \"\\n\"\n )\n roster_file.write(\"\\n\\n\")\n\n print(\"Team rosters created!\")\n write_letters()\n\n\ndef write_letters():\n # Write a welcome letter to each parent\n for team in teams:\n for player in team[\"players\"]:\n with open(\n \"{}.txt\".format(\n player[\"Name\"].lower().replace(\" \", \"_\")\n ), \"w\") as letter_file:\n letter_file.write(\n \"Dear {},\".format(player[\"Guardian Name(s)\"]) + \"\\n\\n\")\n letter_file.write(\n \"{} has been selected to be a part of the {}. \".format(\n player[\"Name\"], team[\"name\"]))\n letter_file.write(\n \"Please bring them to the first practice \\\n on November 15th at 2pm. Thanks!\")\n print(\"Welcome letters sent!\")\n\nif __name__ == '__main__':\n # Kick off the program by calling the set_rosters function.\n set_rosters()\n","sub_path":"league_builder.py","file_name":"league_builder.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372435311","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('text', models.TextField(max_length=10000)),\n ('desc', models.TextField(max_length=50, blank=True)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('first_name', models.CharField(max_length=30)),\n ('last_name', models.CharField(max_length=30, blank=True)),\n ('Email', models.EmailField(max_length=253)),\n ('Profile_Pic', models.ImageField(blank=True, upload_to='User_ProfilePics/')),\n ('followers', models.ManyToManyField(to=settings.AUTH_USER_MODEL, related_name='following')),\n ],\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('heading', models.TextField(max_length=500)),\n ('desc', models.TextField(max_length=1000, blank=True)),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('views', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='q_user')),\n ],\n ),\n migrations.CreateModel(\n name='Topic',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('name', models.CharField(max_length=50)),\n ('description', models.CharField(max_length=200, blank=True)),\n ('related_pic', models.ImageField(blank=True, upload_to='Topic_Pics/')),\n ],\n ),\n migrations.AddField(\n model_name='answer',\n name='ques',\n field=models.ForeignKey(to='forum.Question'),\n ),\n migrations.AddField(\n model_name='answer',\n name='views',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='a_user'),\n ),\n ]\n","sub_path":"quora/forum/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348969274","text":"import sys, os\nimport numpy as np\nimport h5py as h5\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib as mpl\nimport palettable\nimport pylab\nfrom scipy.interpolate import interp1d\n\ncosmo_dir = os.path.dirname(os.path.dirname(os.getcwd())) + '/'\nsubDirectories = [x[0] for x in os.walk(cosmo_dir)]\nsys.path.extend(subDirectories)\nfrom tools import *\nfrom constants_cgs import *\nfrom spectra_functions import *\nfrom statistics_functions import get_highest_probability_interval\nfrom load_tabulated_data import load_power_spectrum_table, load_tabulated_data_boera, load_tabulated_data_viel\nfrom parameters_ewald import *\n\noutputs_file = '../../scale_outputs/outputs_cosmo_2048.txt'\noutputs = np.loadtxt( outputs_file )\n\ntransparent = True\n\n\n\n#Cosmological Parameters \ncosmo_h = data_ewald['h']\nH0 = cosmo_h * 100\nOmega_M = data_ewald['Omega_0']\nOmega_L = data_ewald['Omega_L']\n\n\n#Box parameters\nLbox = data_ewald['BoxSize'] #Mpc/h\nnPoints = 512\nnx = nPoints\nny = nPoints\nnz = nPoints\nncells = nx * ny * nz\n\ndataDir = '/data/groups/comp-astro/bruno/'\n# dataDir = '/home/bruno/Desktop/ssd_0/data/'\n\nuvb = 'pchw18'\n\n# input_dir = dataDir + 'cosmo_sims/ewald_512/grid_files/'\n# output_dir = dataDir + 'cosmo_sims/ewald_512/optical_depth/'\ninput_dir = dataDir + 'cosmo_sims/2048_hydro_50Mpc/output_files_{0}/'.format(uvb)\noutput_dir = dataDir + 'cosmo_sims/2048_hydro_50Mpc/optical_depth_{0}/'.format(uvb)\ncreate_directory( output_dir )\n\n\nuse_mpi = True\n\nif use_mpi :\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n nprocs = comm.Get_size()\nelse:\n rank = 0\n nprocs = 1\n\n\nnSnap = rank\nprint(\"nSnap: {0}\".format(nSnap))\n\n\n\n\noutputFileName = output_dir + 'optical_depth_grid_{0}.h5'.format(nSnap)\noutFile = h5.File( outputFileName, 'w')\n\n\n\n\n\n\ndata_out = {}\ndata_out['F_vals'] = []\n\n\nn_boxes = 512\n\nfor n_box in range(n_boxes):\n\n inFileName = input_dir + '{0}.h5.{1}'.format( nSnap, n_box)\n print(\"Loading File:\", inFileName)\n\n inFile = h5.File( inFileName, 'r')\n current_z = inFile.attrs['Current_z']\n data_in = inFile\n\n HI_density = data_in['HI_density'][...]\n\n Lya_lambda = 1.21567e-5 #cm Rest wave length of the Lyman Alpha Transition\n f_12 = 0.416 #Oscillator strength\n Lya_sigma = np.pi * cgs.e_charge**2 / cgs.M_e / cgs.c * f_12\n \n \n #Hubble parameter\n current_a = 1./(current_z + 1)\n a_dot = np.sqrt( Omega_M/current_a + Omega_L*current_a**2 ) * H0 \n H = a_dot / current_a\n H_cgs = H * 1e5 / cgs.Mpc \n \n \n dens_HI = HI_density / (current_a)**3\n\n #Convert to CGS Units\n dens_HI *= cgs.Msun / cgs.kpc**3 * cosmo_h**2\n n_HI = dens_HI / cgs.M_p\n \n \n tau_vals = Lya_sigma * Lya_lambda / H_cgs * n_HI\n \n F_vals = np.exp( - tau_vals )\n F_val = F_vals.mean()\n data_out['F_vals'].append( F_val )\n\n \nF_vals = np.array(data_out['F_vals'])\n\n#Save Optical Depth data\n# group_kernel.create_dataset( 'tau_vals', data=tau_vals)\noutFile.create_dataset( 'F_vals', data=F_vals)\n\n\noutFile.attrs['current_z'] = current_z\noutFile.close()\nprint(\"\\nSaved File: \", outputFileName)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"analysis/transmited_flux/old/get_optical_depth_grid_2048.py","file_name":"get_optical_depth_grid_2048.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422193957","text":"import cv2\n\nimg_color = cv2.imread('OrgImg/y_apple.jpg', cv2.IMREAD_COLOR) # 컬러로 이미지 입력\nimg_gray = cv2.cvtColor(img_color, cv2.COLOR_BGR2GRAY) # 컬러 이미지를 그레이스케일으로 변환\n\n# src(gray), 기준, 기준보다 픽셀값이 클때 변경할 값, THRESH_BINARY일 떄 기준보다 픽셀값이 낮으면 0(INV:반전)\nret, img_binary = cv2.threshold(img_gray, 136, 255, cv2.THRESH_BINARY_INV)\ncv2.imwrite('RstImg/img_binary_inv.jpg', img_binary)\n\n\nimg_result = cv2.bitwise_and(img_color, img_color, mask=img_binary)\n\ncv2.imwrite('RstImg/img_binary_rst.jpg', img_result)\n","sub_path":"binarization.py","file_name":"binarization.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"466313256","text":"import logging\nfrom persistent import Persistent\nfrom persistent.mapping import PersistentMapping\nfrom zope.component import adapts, getUtility, queryUtility\nfrom zope.formlib.form import FormFields\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom Products.CMFDefault.formlib.schema import SchemaAdapterBase\nfrom Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom zope.interface import implements\nfrom plone.app.controlpanel.form import ControlPanelForm\nfrom Products.MailHost.interfaces import IMailHost\nfrom email.parser import HeaderParser\nfrom email.mime.text import MIMEText\nfrom .interfaces import INotifierSchema, INotifier, ISubscriptions\nfrom zope.i18nmessageid import MessageFactory\n_ = MessageFactory('hl.plone.boardnotifications')\n\nlog = logging.getLogger('hl.plone.boardnotifications.notify')\n\n\nclass NotifierControlPanelAdapter(SchemaAdapterBase):\n\n adapts(IPloneSiteRoot)\n implements(INotifierSchema)\n\n def get_subject(self):\n util = queryUtility(INotifier)\n return getattr(util, 'subject', '')\n\n def set_subject(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.subject = value\n\n subject = property(get_subject, set_subject)\n\n def get_signature(self):\n util = queryUtility(INotifier)\n return getattr(util, 'signature', '')\n\n def set_signature(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.signature = value\n\n signature = property(get_signature, set_signature)\n\n def get_salutations(self):\n util = queryUtility(INotifier)\n salutations = getattr(util, 'salutations', '')\n if salutations:\n salutations = '\\n'.join([':'.join(item) for item in salutations.items()])\n return salutations\n\n def set_salutations(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n di = {}\n if value in (None, ''):\n di[''] = ''\n else:\n di.update([kv.split(':') for kv in value.split('\\n')])\n util.salutations = di\n\n salutations = property(get_salutations, set_salutations)\n\n def get_comment_edited_text(self):\n util = queryUtility(INotifier)\n return getattr(util, 'comment_edited_text', '')\n\n def set_comment_edited_text(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.comment_edited_text = value\n\n comment_edited_text = property(get_comment_edited_text, set_comment_edited_text)\n\n def get_thread_moved_text(self):\n util = queryUtility(INotifier)\n return getattr(util, 'thread_moved_text', '')\n\n def set_thread_moved_text(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.thread_moved_text = value\n\n thread_moved_text = property(get_thread_moved_text, set_thread_moved_text)\n\n def get_comment_deleted_text(self):\n util = queryUtility(INotifier)\n return getattr(util, 'comment_deleted_text', '')\n\n def set_comment_deleted_text(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.comment_deleted_text = value\n\n comment_deleted_text = property(get_comment_deleted_text, set_comment_deleted_text)\n\n def get_subscription_comment_added_text(self):\n util = queryUtility(INotifier)\n return getattr(util, 'subscription_comment_added_text', '')\n\n def set_subscription_comment_added_text(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.subscription_comment_added_text = value\n\n subscription_comment_added_text = property(get_subscription_comment_added_text, set_subscription_comment_added_text)\n\n def get_subscription_comment_edited_text(self):\n util = queryUtility(INotifier)\n return getattr(util, 'subscription_comment_edited_text', '')\n\n def set_subscription_comment_edited_text(self, value):\n util = queryUtility(INotifier)\n if util is not None:\n util.subscription_comment_edited_text = value\n\n subscription_comment_edited_text = property(get_subscription_comment_edited_text, set_subscription_comment_edited_text)\n\n\n\nclass NotifierControlPanel(ControlPanelForm):\n\n form_fields = FormFields(INotifierSchema)\n\n label = _(u'Settings for board notification mails')\n description = _(u'Here you can configure the mail texts for mails that are send out to creators or subscribers of board content when it changes. You can use the following keywords to replace them using content coming from the thread: %(threadtitle)s %(threadurl)s, %(boardtitle)s, %(mailsignature)s %(salutation)s and, when appropriate, %(commenturl)s')\n form_name = u' Notifier Settings'\n\n def updateWidgets(self):\n super(NotifierControlPanel, self).updateWidgets()\n self.widgets['subject'].size = 30\n\n\nclass Notifier(Persistent):\n\n implements(INotifier)\n\n valid_headers = ('Subject', 'From', 'To')\n\n def __init__(self):\n self.subject = None\n self.signature = None\n self.comment_edited_text = None\n self.comment_deleted_text = None\n self.subscription_comment_added_text = None\n self.subscription_comment_edited_text = None\n self.thread_moved_text = None\n self._salutations = PersistentMapping()\n\n def get_salutations(self):\n return self._salutations\n\n def set_salutations(self, mapping):\n if isinstance(mapping, PersistentMapping):\n self._salutations = mapping\n else:\n self._salutations = PersistentMapping()\n self._salutations.update(mapping)\n\n salutations = property(get_salutations, set_salutations)\n\n def _salutation_for_member(self, mdata):\n \"\"\"\n answer an appropriate salutation\n \"\"\"\n key = mdata.get('salutation', '')\n return self.salutations.get(key, '') % mdata\n\n @staticmethod\n def _encoding():\n return getUtility(ISiteRoot).getProperty('email_charset', 'utf-8')\n\n def _notify(self, mdata, text):\n headers = {}\n headers.update([tp for tp in HeaderParser().parsestr(text.encode(self._encoding())).items() if tp[0] in self.valid_headers])\n if headers.keys():\n text = '\\n\\n'.join(text.split('\\n\\n')[1:])\n msg = MIMEText(text, _charset=self._encoding())\n msg['Subject'] = self.subject\n msg['From'] = getUtility(ISiteRoot).email_from_address\n msg['To'] = mdata.get('email')\n for k, v in headers.items():\n msg.replace_header(k, v)\n mh = getUtility(IMailHost)\n mh.send(msg)\n\n def _thread_info(self, thread):\n di = {}\n di['threadtitle'] = thread.Title().decode(self._encoding())\n di['threadurl'] = thread.absolute_url()\n di['boardtitle'] = thread.getForum().Title().decode(self._encoding())\n di['mailsignature'] = self.signature\n return di\n\n @classmethod\n def _memberdata_for_content(cls, content):\n return cls._memberdata_for(content.Creator())\n\n @classmethod\n def _memberdata_for(cls, memberid):\n site = getUtility(ISiteRoot)\n mtool = getToolByName(site, 'portal_membership')\n mdtool = getToolByName(site, 'portal_memberdata')\n keys = mdtool.propertyIds()\n mdata = mtool.getMemberById(memberid)\n if mdata is None: # no memberdata, most likely the user has been deleted\n return\n result = {}\n result.update([(k, str(mdata.getProperty(k)).decode(cls._encoding())) for k in keys])\n return result\n\n def comment_edited(self, comment):\n \"\"\"\n a comment has been edited. Notify the creator of the comment.\n \"\"\"\n # do not notify the creator if she has edited the comment herself\n mtool = getToolByName(comment, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n creator = mtool.getMemberById(comment.Creator())\n if (member == creator) or creator is None or not self.comment_edited_text or not self.comment_edited_text.strip():\n return\n thread = comment.getConversation()\n di = self._thread_info(thread)\n di.update(self._memberdata_for_content(comment))\n di['salutation'] = self._salutation_for_member(di)\n di['commenturl'] = comment.absolute_url()\n self._notify(di, self.comment_edited_text % di)\n log.info('comment %s has been edited, notified owner %s' % (di['commenturl'], di.get('email')))\n\n def thread_moved(self, thread):\n \"\"\"\n a thread has been moved to a new board. Notify all contributors.\n \"\"\"\n if not self.thread_moved_text or not self.thread_moved_text.strip():\n return\n di = self._thread_info(thread)\n memberids = set([comment.Creator() for comment in thread.getComments()])\n for memberid in memberids:\n md = self._memberdata_for(memberid)\n if md is None:\n log.info('member with id %s could not be found, unable to send notification for %s' % (memberid, di['threadurl']))\n continue\n di.update(md)\n di['salutation'] = self._salutation_for_member(di)\n self._notify(di, self.thread_moved_text % di)\n log.info('thread %s has been moved, notified contributor %s' % (di['threadurl'], di.get('email')))\n\n def comment_deleted(self, comment):\n \"\"\"\n a comment has been deleted. Notify its creator.\n \"\"\"\n if not self.comment_deleted_text or not self.comment_deleted_text.strip():\n return\n thread = comment.getConversation()\n di = self._thread_info(thread)\n di['commenturl'] = comment.absolute_url()\n md = self._memberdata_for_content(comment)\n if md is None:\n log.info('member with id %s could not be found, unable to send notification for %s' % (comment.Creator(), di['commenturl']))\n return\n di.update(self._memberdata_for_content(comment))\n di['salutation'] = self._salutation_for_member(di)\n self._notify(di, self.comment_deleted_text % di)\n log.info('comment %s has been deleted, notified owner %s' % (di['commenturl'], di.get('email')))\n\n def subscription_comment_edited(self, comment):\n \"\"\"\n a comment has been edited. Notify thread subsribers.\n \"\"\"\n if not self.subscription_comment_edited_text or not self.subscription_comment_edited_text.strip():\n return\n thread = comment.getConversation()\n di = self._thread_info(thread)\n di['commenturl'] = comment.absolute_url()\n subscriptions = getUtility(ISubscriptions)\n subscribers = subscriptions.subscribers_for(thread)\n mdtool = getToolByName(comment, 'portal_memberdata')\n keys = mdtool.propertyIds()\n for mdata in subscribers:\n if mdata.getId() == comment.Creator():\n continue\n di.update([(k, str(mdata.getProperty(k)).decode(self._encoding())) for k in keys])\n di['salutation'] = self._salutation_for_member(di)\n self._notify(di, self.subscription_comment_edited_text % di)\n log.info('comment %s has been edited, notified subscriber %s' % (di['commenturl'], di.get('email')))\n\n def subscription_comment_added(self, comment):\n \"\"\"\n a comment has been added to a thread. Notify thread subscribers.\n \"\"\"\n if not self.subscription_comment_added_text or not self.subscription_comment_added_text.strip():\n return\n thread = comment.getConversation()\n di = self._thread_info(thread)\n di['commenturl'] = comment.absolute_url()\n subscriptions = getUtility(ISubscriptions)\n subscribers = subscriptions.subscribers_for(thread)\n mdtool = getToolByName(comment, 'portal_memberdata')\n keys = mdtool.propertyIds()\n for mdata in subscribers:\n if mdata.getId() == comment.Creator():\n continue\n di.update([(k, str(mdata.getProperty(k)).decode(self._encoding())) for k in keys])\n di['salutation'] = self._salutation_for_member(di)\n self._notify(di, self.subscription_comment_added_text % di)\n log.info('comment %s has been added, notified subscriber %s' % (di['commenturl'], di.get('email')))\n\n","sub_path":"hl/plone/boardnotifications/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":12472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"167777728","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 19 16:45:24 2018\n\n@author: Samuel\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_csv(\"stroopdata.csv\")\ndf.head()\n\ndf.columns\n\ndf.boxplot(column=['Congruent', 'Incongruent'], fontsize=15)\n\ndf.describe()\n\nfrom scipy import stats\n\ncong = df['Congruent']\nincon = df['Incongruent']\n\nTtest_indResult = stats.ttest_ind(incon, cong, equal_var=False, axis=0)\nTtest_indResult.statistic\nreal_t_score=Ttest_indResult.statistic\nreal_pvalue=1-Ttest_indResult.pvalue/2\n\nfrom math import sqrt\nfrom scipy.stats import t\nN1 = 24\nN2 = 24\ndf = (N1 + N2 - 2)\nstd1 = cong.std()\nstd2 = incon.std()\nstd_N1N2 = sqrt( ((N1 - 1)*(std1)**2 + (N2 - 1)*(std2)**2) / df) \n\ndiff_mean = incon.mean() - cong.mean()\nMoE = t.ppf(0.95, df) * std_N1N2 * sqrt(1/N1 + 1/N2)\nprint ('\\nThe difference between groups is {:3.1f} [{:3.1f} to {:3.1f}] (mean [95% CI])'.format(diff_mean, diff_mean - MoE, diff_mean + MoE))\n","sub_path":"stroop.py","file_name":"stroop.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"377746801","text":"# coding=utf-8\n\nimport urllib\nimport csv\nimport json\nfrom lxml import html\nimport os\nimport sys\n\nsys.path.append('..')\nimport crawl_util\n\ncatId_brand_map = {}\nids = []\n\nheaders = {\n 'cookie': 'everywhere_tool_welcome=true; cookie2=149567e2b2700e39dbf181102ace8093; t=3f5c8aca80cc8921bb51bc3d045ea196; _tb_token_=7836e18bed1bf; _samesite_flag_=true; xlly_s=1; _m_h5_tk=50ec79008d2321b01829af658c358162_1605503375565; _m_h5_tk_enc=04bf66032e0ca7782ebf9dfbe113bafb; unb=2393890925; sn=%E5%B0%8F%E5%86%9B%E4%BA%8C%3Azhe; csg=59f48849; skt=ba78363b5c6eee68; _cc_=UtASsssmfA%3D%3D; cna=EF7KF1epo1gCAX0jZcrb8N/W; uc1=cookie14=Uoe0aDgwMULZOw%3D%3D&cookie21=VFC%2FuZ9ajQ%3D%3D; v=0; isg=BI2N2AD67t1d8ErRef9bKGBHnK8HasE8zgYPQs8SyiSTxq14l75PDQZENlqgHdn0; l=eBTmL36cOoygoTpBBOfanurza77OSIRYYuPzaNbMiOCPO61B5TsVWZ7bEqY6C3GVhs_XR38TDWZWBeYBc3xonxvtPv2ARwMmn; tfstk=crxlBog8IsNss4bD50sWBoQYFLnOwpVdse8D0nz_q6xUmo5mprzNsoGcNQxLR',\n}\n\n\ndef brand_trademark():\n f = 'taobao_brand_trademark.csv'\n brand_trade_mark = set()\n if os.path.exists(f):\n with open(f, 'r') as r:\n r = csv.reader(r)\n next(r)\n for row in r:\n brand_trade_mark.add(row[0])\n else:\n with open(f, 'w') as w:\n w = csv.writer(w)\n w.writerow(['brand', 'nums', 'catId', '品牌名', '类目', '商标注册人', '商标注册号', '类目上的品牌状态', 'url'])\n urls = set()\n with open('urls.txt', 'r') as r:\n for url in r.readlines():\n urls.add(url.strip())\n with open(f, 'a') as w:\n w = csv.writer(w)\n with open('urls.txt', 'a') as url_w:\n with open('taobao_brand.csv', 'r') as r:\n r = csv.reader(r)\n next(r)\n cnt = 0\n for row in r:\n cnt += 1\n name = row[0]\n brand = name\n if name in brand_trade_mark or '测试' in name:\n continue\n catIds = json.loads(row[1])\n find = False\n name = name.replace('•', ' ')\n name = name.replace('▪', ' ')\n try:\n name = urllib.parse.quote(name, encoding='gb18030')\n except:\n print('failed, %s' % name)\n for i, catId in enumerate(catIds):\n url = 'https://baike.taobao.com/brandCategoryApply.htm?actionType=searchAppliableBrandCategories&categoryId=%s&brandName=%s' % (\n catId, name)\n if url in urls:\n continue\n if i > 0:\n print('%s \\033[4;32;40m%s, %s/%s, %s\\033[0m' % (\n ' ' * (len(str(cnt)) + 1), brand, i + 1, len(catIds), url))\n else:\n print('%s, %s, %s/%s, %s' % (cnt, brand, i + 1, len(catIds), url))\n\n rows = get_brand_trademark(url)\n\n if rows is False:\n return\n for row in rows:\n row = [brand, len(catIds), catId] + row + [url]\n w.writerow(row)\n if len(rows) > 0:\n find = True\n break\n else:\n url_w.write(url + '\\n')\n if not find:\n w.writerow([brand, len(catIds), catId] + [''] * 5 + [url])\n\n\ndef get_brand_trademark(url):\n page = crawl_util.crawl(url, headers=headers)\n content = page.content.decode('GB18030')\n if '品牌申请' not in content:\n print('Fail, need change cookie.')\n return False\n sel = html.document_fromstring(content, parser=html.HTMLParser(encoding='GB18030'))\n trs = sel.xpath('//table/tr')\n rows = []\n for tr in trs[1:]:\n row = []\n for td in tr.getchildren()[0:5]:\n txt = td.text.strip()\n if not txt:\n txt = td.getchildren()[0].text\n txt = txt.strip() if txt else ''\n row.append(txt)\n rows.append(row)\n return rows\n\n\nif __name__ == '__main__':\n brand_trademark()\n","sub_path":"work/crawl/taobao/taobao_brand.py","file_name":"taobao_brand.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"484585681","text":"\"\"\"\nExample inference instances for Assignment 05.\n\n2018-03-16: Revised by Tom Loredo for BDA18\n2020-03-05: Revised for BDA20\n\"\"\"\n\nimport numpy as np\nimport scipy\nimport matplotlib as mpl\nfrom matplotlib.pyplot import *\nfrom numpy import *\nfrom scipy import stats\n\nfrom poisson_binomial_cauchy import PoissonRateInference, BinomialInference, \\\n CauchyLocationInference\n\n\nion() # for interactive use in a terminal session\n\n\ndef g_mean(params):\n \"\"\"\n Return the function whose expectation gives the posterior mean, i.e.,\n just return the values of the params.\n \"\"\"\n params = asarray(params)\n return params\n\n\nhave_laplace = False\n\n\n#-------------------------------------------------------------------------------\n# 1st case: Poisson, const prior, (n,T) = (16, 2)\n\nr_u = 20. # upper limit for PDF calculation and plotting\n\n# Create a PRI instance and plot the PDF.\nprior_l, prior_u = 0., 1e5\nflat_pdf = 1./(prior_u - prior_l)\nn, T = 16, 2\npri = PoissonRateInference(T, n, flat_pdf, r_u)\npri.plot(alpha=.5)\n\nxlabel(r'Rate (s$^{-1}$)')\nylabel('PDF (s)')\ntitle('Poisson case')\n\nif have_laplace:\n # Laplace approx for the marg. like. and the mean:\n ampl, locn, sig, ml = pri.laplace()\n laplace_mean = pri.laplace(g_mean)\n post_mean_l = laplace_mean[3]/ml\n\n # Use results to plot a Gaussian PDF here.\n\n # Print using string formatting:\n print('Poisson case:')\n print('Marg. like.: {:.4e} (quad), {:.4e} (Laplace)'.format(pri.mlike, ml))\n print('Posterior mean: {:4.2f} (quad), {:4.2f} (Laplace)'.format(pri.post_mean, post_mean_l))\n print()\n\n#-------------------------------------------------------------------------------\n# 2nd case: Binomial, const prior, (n, n_trials) = (8, 12)\n\n# Define the data.\nn, n_trials = 8, 12\n\nbi = BinomialInference(n, n_trials)\nbfig = figure() # separate figure for binomial case\nbi.plot(alpha=.5)\n\nxlabel(r'$\\alpha$')\nylabel('Posterior PDF')\ntitle('Binomial case')\n\nif have_laplace:\n # Laplace approx for the marg. like. and the mean:\n laplace_ml = bi.laplace()\n laplace_mean = bi.laplace(g_mean)\n post_mean_l = laplace_mean[3]/laplace_ml[3]\n\n # Use results to plot a Gaussian PDF here.\n\n # Print using string formatting:\n print('Beta case:')\n print('Marg. like.: {:10.4e} (quad), {:10.4e} (Laplace)'.format(bi.mlike, laplace_ml[3]))\n print('Posterior mean: {:4.2f} (quad), {:4.2f} (Laplace)'.format(bi.post_mean, post_mean_l))\n print()\n\n#-------------------------------------------------------------------------------\n# 3rd case: Cauchy, const prior\n\n\nx0, d = 5., 3.\ndata = stats.cauchy(x0, d).rvs(5)\nflat_pdf = .001 # e.g., for prior range 1e3\n\ncli = CauchyLocationInference(d, data, flat_pdf, (-15., 25.))\ncfig = figure()\ncli.plot(alpha=.5)\n# xlim(-10, 15.)\n\nxlabel('$x_0$')\nylabel('Posterior PDF')\ntitle('Cauchy case; CDF method')\n\nsamps = []\nfor i in range(10000):\n samps.append(cli.samp_cdf())\nsamps = array(samps)\n\nhist(samps, 50, density=True, color='g', alpha=.5)\n\nif have_laplace:\n # Laplace approx for the marg. like. and the mean:\n ampl, locn, sig, ml = cli.laplace()\n laplace_mean = cli.laplace(g_mean)\n post_mean_l = laplace_mean[3]/ml\n\n # Use results to plot a Gaussian PDF here.\n\n # Print using string formatting:\n print('Cauchy case:')\n print('Marg. like.: {:.4e} (quad), {:.4e} (Laplace)'.format(cli.mlike, ml))\n print('Posterior mean: {:4.2f} (quad), {:4.2f} (Laplace)'.format(cli.post_mean, post_mean_l))\n","sub_path":"Lab07-Assignment05/Assignment05/Examples.py","file_name":"Examples.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"578006579","text":"import math\nfrom itertools import filterfalse\n\n\ndef num_buses(n):\n \"\"\" (int) -> int\n\n Precondition: n >= 0\n\n Return the minimum number of buses required to transport n people.\n Each bus can hold 50 people.\n\n >>> num_buses(75)\n 2\n >>> num_buses(0)\n 0\n >>> num_buses(10)\n 1\n >>> num_buses(50)\n 1\n >>> num_buses(150)\n 3\n \"\"\"\n return math.ceil(n / 50)\n\n\ndef stock_price_summary(price_changes):\n \"\"\" (list of number) -> (number, number) tuple\n\n price_changes contains a list of stock price changes. Return a 2-item\n tuple where the first item is the sum of the gains in price_changes and\n the second is the sum of the losses in price_changes.\n\n >>> stock_price_summary([0.01, 0.03, -0.02, -0.14, 0, 0, 0.10, -0.01])\n (0.14, -0.17)\n >>> stock_price_summary([-0.03, -0.14, -0.01])\n (0.0, -0.18)\n >>> stock_price_summary([0.03, 0.14, 0.01, 0.05])\n (0.23, 0.0)\n >>> stock_price_summary([])\n (0.0, 0.0)\n \"\"\"\n gain = sum(filterfalse(lambda x: x < 0, price_changes))\n loss = sum(filterfalse(lambda x: x > 0, price_changes))\n\n return (float(\"{:.2f}\".format(gain)), float(\"{:.2f}\".format(loss)))\n\n\ndef swap_k(L, k):\n \"\"\" (list, int) -> NoneType\n\n Precondtion: 0 <= k <= len(L) // 2\n\n Swap the first k items of L with the last k items of L.\n\n >>> nums = [1, 2, 3, 4, 5, 6]\n >>> swap_k(nums, 2)\n >>> nums\n [5, 6, 3, 4, 1, 2]\n >>> nums = [1, 2, 3]\n >>> swap_k(nums, 1)\n >>> nums\n [3, 2, 1]\n >>> swap_k(nums, 2)\n >>> nums\n [2, 3, 1, 2]\n >>> nums = [1, 2, 3]\n >>> swap_k(nums, 3)\n >>> nums\n [1, 2, 3, 1, 2, 3]\n >>> nums = [1, 2, 3]\n >>> swap_k(nums, 4)\n >>> nums\n [1, 2, 3]\n \"\"\"\n if k > len(L):\n return\n\n first_nums = L[:k]\n last_nums = L[-k:]\n middle = L[k:-k]\n L.clear()\n L.extend(last_nums + middle + first_nums)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"Python/Coursera/UnitTests/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219199341","text":"# Task\n# Given a base-10 integer, n, convert it to binary (base-2). Then find and print the base-10 integer denoting the maximum number of consecutive 1's in n's binary representation.\n\n# Input Format\n# A single integer, n.\n\n# Constraints\n# 1 <= n <= 10e6\n\n# Output Format\n# Print a single base-10 integer denoting the maximum number of consecutive 1's in the binary representation of n.\n\n# Solution 1\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\nif __name__ == '__main__':\n n = int(input())\n n = str(bin(n))[2:]\n\n lst = []\n\n for i in range(len(n)):\n count = 0\n while n[i] == '1':\n count += 1\n if i 0 and len(mem['data']['list']) > 0 and len(disk['data']['list']):\n business.append({\n 'biz_name' : biz['bk_biz_name'],\n 'cpu' : round(cpu['data']['list'][0]['cpu'], 2),\n 'mem' : round(mem['data']['list'][0]['mem'], 2),\n 'disk' : round(disk['data']['list'][0]['disk'], 2),\n })\n else:\n business.append({\n 'biz_name': biz['bk_biz_name'],\n 'cpu': 0,\n 'mem': 0,\n 'disk':0,\n })\n result = dict()\n\n else:\n break\n if type:\n business.sort(key=lambda k: (k.get(type, 0)), reverse=True)\n else:\n #排序 先排CPU,CPU相同排mem,都相同排disk\n business.sort(key=lambda k: (k.get('cpu', 0), k.get('mem', 0),k.get('disk', 0)), reverse=True)\n result['data'] = business\n result['code'] = 200\n result['message'] = \"Success\"\n return HttpResponse(json.dumps(result), content_type='application/json')\n\n\n@login_exempt\ndef disk_use(request):\n \"\"\"\n 获取业务下磁盘使用\n :param request:\n :return:\n \"\"\"\n user = 'admin'\n client = get_client_by_user(user)\n bizs = client.cc.search_business()\n business = []\n for biz in bizs['data']['info']:\n if biz['bk_biz_id'] == 2:\n continue\n kwargs = {\n 'sql': 'select max(in_use) as disk from ' + str(\n biz['bk_biz_id']) + '_system_disk order by time desc limit 1'\n }\n disk = client.monitor.query_data(kwargs)\n if disk['result'] != False and disk['code'] == '0':\n if len(disk['data']['list']) > 0:\n business.append({\n 'biz_name': biz['bk_biz_name'],\n 'disk_use': round(disk['data']['list'][0]['disk'], 2),\n })\n else:\n business.append({\n 'biz_name': biz['bk_biz_name'],\n 'disk_use': 0,\n })\n else:\n break\n result = dict()\n fun = operator.attrgetter('disk_use')\n business.sort(key = lambda x:x[\"disk_use\"], reverse=True)\n result['data'] = business[0:3]\n result['code'] = 200\n result['message'] = \"Success\"\n\n return HttpResponse(json.dumps(result), content_type='application/json')\n\n@login_exempt\ndef char_data(request):\n \"\"\"\n 图表数据\n :param request:\n :return:\n \"\"\"\n\n type = int(request.GET.get('type'))\n biz_id = request.GET.get('id')\n if biz_id is None or biz_id == '':\n biz_id = 2\n data_list = []\n if type == 1:\n kwargs = {\n 'sql': 'select max(usage) as usage from ' + str(biz_id) + '_system_cpu_detail where time >= \"1h\" '\n 'group by ip,minute10 order by time desc limit 10'\n }\n if type == 2:\n kwargs = {\n 'sql': 'select max(pct_used) as usage from ' + str(biz_id) + '_system_mem where time >= \"1h\"'\n ' group by ip,minute10 order by time desc limit 10'\n }\n if type == 3:\n kwargs = {\n 'sql': 'select max(in_use) as usage from ' + str(biz_id) + '_system_disk where time >= \"1h\" '\n 'group by ip,minute10 order by time desc limit 10'\n }\n user = 'admin'\n client = get_client_by_user(user)\n date_list = client.monitor.query_data(kwargs)\n datalist = []\n datelist = []\n if date_list['code'] == '0' and date_list['result'] != False:\n for date in date_list['data']['list']:\n datelist.append(datetime.fromtimestamp(date['time']/1000).strftime('%H:%M'))\n if date['usage'] is None:\n datalist.append(0)\n else:\n datalist.append( round(date['usage'],2))\n result = dict(data={\n 'data_list':datalist,\n 'date_list':datelist\n })\n\n result['code'] = 200\n result['message'] = \"Success\"\n else:\n result = dict()\n result['code'] = 500\n result['message'] = \"数据获取异常\"\n return HttpResponse(json.dumps(result), content_type='application/json')\n\n\n@login_exempt\ndef get_crux(request):\n \"\"\"\n 获取CPU使用最大业务id\n :param request:\n :return:\n \"\"\"\n user = 'admin'\n client = get_client_by_user(user)\n bizs = client.cc.search_business()\n business = []\n for biz in bizs['data']['info']:\n cpu_kwargs = {\n 'sql' : 'select max(usage) as cpu from ' +str(biz['bk_biz_id'])+ '_system_cpu_detail where time >= \"1m\" group by ip order by time desc limit 1'\n }\n\n cpu = client.monitor.query_data(cpu_kwargs)\n if cpu['result'] != False and cpu['code'] == '0':\n if len(cpu['data']['list']) > 0:\n business.append({\n 'biz_id': biz['bk_biz_id'],\n 'biz_name': biz['bk_biz_name'],\n 'cpu': round(cpu['data']['list'][0]['cpu'], 2),\n })\n else:\n business.append({\n 'biz_id': biz['bk_biz_id'],\n 'biz_name': biz['bk_biz_name'],\n 'cpu': 0,\n })\n result = dict()\n\n else:\n break\n business.sort(key=lambda k: (k.get('cpu', 0)), reverse=True)\n result['data'] = business[0]\n result['code'] = 200\n result['message'] = \"Success\"\n return HttpResponse(json.dumps(result), content_type='application/json')\n\n\n@login_exempt\ndef get_proc(request):\n \"\"\"\n 获取进程使用资源情况\n :param request:\n :return:\n \"\"\"\n user = 'admin'\n client = get_client_by_user(user)\n biz_id = 3\n para = {'bk_biz_id': biz_id}\n host = client.cc.search_host(para)\n\n kwargs = {\n 'sql' : 'select cpu_usage_pct,mem_usage_pct,mem_res,mem_virt,fd_num from '+str(biz_id)+'_system_proc group by proc_name order by time desc limit 1'\n }\n res = client.monitor.query_data(kwargs)\n data = dict(list=res['data']['list'])\n data['host'] = host['data']['info'][0]['host']['bk_host_innerip']\n if res['result'] != False and res['code'] == '0':\n if len(res['data']['list']) > 0:\n result=dict(data=data)\n result['code'] = 200\n result['message'] = \"Success\"\n\n else:\n result = dict()\n result['code'] = 500\n result['message'] = \"fail\"\n\n return HttpResponse(json.dumps(result), content_type='application/json')\n","sub_path":"home_application/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":13138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"480267674","text":"def main():\n H, W, K = map(int, input().split())\n \n if W == 1:\n print(1)\n exit()\n\n MOD = 10 ** 9 + 7\n dp = [[0] * W for _ in range(H+1)]\n dp[0][0] = 1\n\n for h in range(H):\n # 1: 線あり、 0: 線なし\n for i in range(2 ** (W-1)):\n # 連続した1がある→隣接する列に線が引かれている\n if bin(i).count('11') >= 1:\n continue\n perm = [k for k in range(W)]\n for j in range(W-1):\n if (i >> j) & 1:\n perm[j], perm[j+1] = perm[j+1], perm[j]\n\n for j in range(W):\n dp[h+1][perm[j]] += dp[h][j]\n dp[h+1][perm[j]] %= MOD\n\n print(dp[H][K-1])\n\n\n\nmain()","sub_path":"Python_codes/p03222/s472993569.py","file_name":"s472993569.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"559570892","text":"\n\n#calss header\nclass _INTELLIGENCE():\n\tdef __init__(self,): \n\t\tself.name = \"INTELLIGENCE\"\n\t\tself.definitions = [u'the ability to learn, understand, and make judgments or have opinions that are based on reason: ', u'secret information about the governments of other countries, especially enemy governments, or a group of people who collect and deal with this information: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_intelligence.py","file_name":"_intelligence.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598702932","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\n# sopel imports\nimport sopel.module\n\n\n# imports for system and OS access, directories\nimport os\nimport sys\n\n# imports based on THIS file\nmoduledir = os.path.dirname(__file__)\nshareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.append(shareddir)\nfrom BotShared import *\n\n\n\n\n\n\n\"\"\"\nbot.nick do this\n@module.rule('^\\.(.*)')\n@module.rule('(.*)')\n\"\"\"\n\n\n# TODO make sure restart and update save database\n@module.rule('^\\.|!(.*)')\n@module.thread(True)\ndef mainfunction(bot, trigger):\n\n botcom = botcom_symbol_trigger(bot, trigger)\n\n # Bots block\n if bot_check_inlist(bot, botcom.instigator, [bot.nick]):\n return\n\n # does not apply to bots\n if \"altbots\" in bot.memory:\n if bot_check_inlist(bot, botcom.instigator, bot.memory[\"altbots\"].keys()):\n return\n\n execute_main(bot, trigger, botcom)\n botdict_save(bot)\n\n\ndef execute_main(bot, trigger, botcom):\n\n # command issued, check if valid\n botcom.dotcommand = spicemanip.main(botcom.triggerargsarray, 1).lower()[1:]\n if botcom.dotcommand in bot.memory['dict_commands'].keys() and botcom.dotcommand not in bot.memory['module_commands'].keys():\n bot_dictcom_handle(bot, botcom)\n\n\ndef bot_dictcom_handle(bot, botcom):\n\n # command aliases\n if \"aliasfor\" in bot.memory['dict_commands'][botcom.dotcommand].keys():\n botcom.dotcommand = bot.memory['dict_commands'][botcom.dotcommand][\"aliasfor\"]\n\n # simplify usage of the bot command going forward\n # copy dict to not overwrite\n botcom.dotcommand_dict = copy.deepcopy(bot.memory['dict_commands'][botcom.dotcommand])\n\n # remainder, if any is the new arg list\n botcom.triggerargsarray = spicemanip.main(botcom.triggerargsarray, '2+')\n\n # patch for people typing \"...\", maybe other stuff, but this verifies that there is still a command here\n if not botcom.dotcommand:\n return\n\n botcom.maincom = botcom.dotcommand_dict[\"validcoms\"][0]\n\n # execute function based on command type\n botcom.commandtype = botcom.dotcommand_dict[\"type\"].lower()\n\n # allow && splitting\n botcom.multiruns = True\n if not botcom.channel_priv:\n if botcom.maincom in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"multirun_disabled_commands\"].keys():\n botcom.multiruns = False\n\n if not botcom.multiruns:\n bot_dictcom_process(bot, botcom)\n else:\n # IF \"&&\" is in the full input, it is treated as multiple commands, and is split\n commands_array = spicemanip.main(botcom.triggerargsarray, \"split_&&\")\n if commands_array == []:\n commands_array = [[]]\n for command_split_partial in commands_array:\n botcom.triggerargsarray = spicemanip.main(command_split_partial, 'create')\n\n # bot_dictcom_simple(bot, botcom) # TODO rename\n botcom.completestring = spicemanip.main(botcom.triggerargsarray, 0)\n\n bot_dictcom_process(bot, botcom)\n\n\ndef bot_dictcom_process(bot, botcom):\n\n # use the default key, unless otherwise specified\n botcom.responsekey = \"?default\"\n\n # handling for special cases\n posscom = spicemanip.main(botcom.triggerargsarray, 1)\n if posscom.lower() in [command.lower() for command in botcom.dotcommand_dict.keys()]:\n for command in botcom.dotcommand_dict.keys():\n if command.lower() == posscom.lower():\n posscom = command\n botcom.responsekey = posscom\n botcom.triggerargsarray = spicemanip.main(botcom.triggerargsarray, '2+', 'list')\n botcom.commandtype = botcom.dotcommand_dict[botcom.responsekey][\"type\"]\n\n botcom.nonstockoptions = []\n for command in botcom.dotcommand_dict.keys():\n if command not in [\"?default\", \"validcoms\", \"contributors\", \"author\", \"type\", \"filepath\", \"filename\", \"hardcoded_channel_block\", \"description\", \"exampleresponse\", \"example\", \"privs\"]:\n botcom.nonstockoptions.append(command)\n\n # This allows users to specify which reply by number by using an ! and a digit (first or last in string)\n validspecifides = ['block', 'unblock', 'last', 'random', 'count', 'view', 'add', 'del', 'remove', 'special', 'contribs', 'contrib', \"contributors\", 'author', \"alias\", \"filepath\", \"filename\", \"enable\", \"disable\", \"multiruns\", \"description\", \"exampleresponse\", \"example\", \"usage\", \"privs\"]\n botcom.specified = None\n argone = spicemanip.main(botcom.triggerargsarray, 1)\n if str(argone).startswith(\"--\") and len(str(argone)) > 2:\n if str(argone[2:]).isdigit():\n botcom.specified = int(argone[2:])\n elif bot_check_inlist(bot, str(argone[2:]), validspecifides):\n botcom.specified = str(argone[2:]).lower()\n elif bot_check_inlist(bot, str(argone[2:]), botcom.nonstockoptions):\n botcom.specified = str(argone[2:]).lower()\n botcom.responsekey = botcom.specified\n else:\n try:\n botcom.specified = w2n.word_to_num(str(argone[1:]))\n botcom.specified = int(botcom.specified)\n except ValueError:\n botcom.specified = None\n if botcom.specified:\n botcom.triggerargsarray = spicemanip.main(botcom.triggerargsarray, '2+', 'list')\n\n # commands that can be updated\n if botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"]:\n if botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"] == \"shared\":\n adjust_nick_array(bot, str(bot.nick), 'long', 'sayings', botcom.maincom + \"_\" + str(botcom.responsekey), botcom.dotcommand_dict[botcom.responsekey][\"responses\"], 'startup')\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = get_nick_value(bot, str(bot.nick), 'long', 'sayings', botcom.dotcommand_dict[\"validcoms\"][0] + \"_\" + str(botcom.responsekey)) or []\n elif botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"] == \"user\":\n adjust_nick_array(bot, str(botcom.instigator), 'long', 'sayings', botcom.maincom + \"_\" + str(botcom.responsekey), botcom.dotcommand_dict[botcom.responsekey][\"responses\"], 'startup')\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = get_nick_value(bot, str(botcom.instigator), 'long', 'sayings', botcom.dotcommand_dict[\"validcoms\"][0] + \"_\" + str(botcom.responsekey)) or []\n\n # Hardcoded commands Below\n if botcom.specified == 'enable':\n\n if botcom.channel_priv:\n return osd(bot, botcom.instigator, 'notice', \"This command must be run in the channel you which to \" + botcom.specified + \" it in.\")\n\n if botcom.maincom not in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"].keys():\n return osd(bot, botcom.channel_current, 'say', botcom.maincom + \" is already \" + botcom.specified + \"d in \" + str(botcom.channel_current))\n\n if not bot_command_modding_auth(bot, botcom):\n return osd(bot, botcom.channel_current, 'say', \"You are not authorized to \" + botcom.specified + \" \" + botcom.maincom + \" in \" + str(botcom.channel_current))\n\n del bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"][botcom.maincom]\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" is now \" + botcom.specified + \"d in \" + str(botcom.channel_current))\n botdict_save(bot)\n return\n\n elif botcom.specified == 'disable':\n\n if botcom.channel_priv:\n return osd(bot, botcom.instigator, 'notice', \"This command must be run in the channel you which to \" + botcom.specified + \" it in.\")\n\n if botcom.maincom in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"].keys():\n return osd(bot, botcom.channel_current, 'say', botcom.maincom + \" is already \" + botcom.specified + \"d in \" + str(botcom.channel_current))\n\n if not bot_command_modding_auth(bot, botcom):\n return osd(bot, botcom.channel_current, 'say', \"You are not authorized to \" + botcom.specified + \" \" + botcom.maincom + \" in \" + str(botcom.channel_current))\n\n trailingmessage = spicemanip.main(botcom.triggerargsarray, 0) or \"No reason given.\"\n timestamp = str(datetime.datetime.utcnow())\n bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"][botcom.maincom] = {\"reason\": trailingmessage, \"timestamp\": timestamp, \"disabledby\": botcom.instigator}\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" is now \" + botcom.specified + \"d in \" + str(botcom.channel_current) + \" at \" + str(timestamp) + \" for the following reason: \" + trailingmessage)\n botdict_save(bot)\n return\n\n elif botcom.specified == 'multiruns':\n\n if botcom.channel_priv:\n osd(bot, botcom.instigator, 'notice', \"This argument must be run in channel.\")\n return\n\n if not bot_command_modding_auth(bot, botcom):\n osd(bot, botcom.channel_current, 'say', \"You are not authorized to turn \" + botcom.specified + \" multicom usage in \" + str(botcom.channel_current))\n return\n\n onoff = spicemanip.main(botcom.triggerargsarray, 1)\n if onoff == 'on':\n if botcom.maincom not in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"multirun_disabled_commands\"].keys():\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" already has multicom usage \" + onoff + \" in \" + str(botcom.channel_current))\n else:\n del bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"multirun_disabled_commands\"][botcom.maincom]\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" now has multicom usage \" + onoff + \" in \" + str(botcom.channel_current))\n elif onoff == 'off':\n if botcom.maincom in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"multirun_disabled_commands\"].keys():\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" already has multicom usage \" + onoff + \" in \" + str(botcom.channel_current))\n else:\n trailingmessage = spicemanip.main(botcom.triggerargsarray, \"2+\") or \"No reason given.\"\n timestamp = str(datetime.datetime.utcnow())\n bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"multirun_disabled_commands\"][botcom.maincom] = {\"reason\": trailingmessage, \"timestamp\": timestamp, \"multi_disabledby\": botcom.instigator}\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" now has multicom usage \" + onoff + \" in \" + str(botcom.channel_current))\n else:\n if botcom.maincom not in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"multirun_disabled_commands\"].keys():\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" allows multicom use in \" + str(botcom.channel_current))\n else:\n osd(bot, botcom.channel_current, 'say', botcom.maincom + \" does not allow multicom use in \" + str(botcom.channel_current))\n\n botdict_save(bot)\n return\n\n elif botcom.specified == 'block':\n botcom.modulerun = False\n\n if not bot_command_modding_auth(bot, botcom):\n return osd(bot, botcom.channel_current, 'say', \"You are not authorized to enable/disable command usage.\")\n\n posstarget = spicemanip.main(botcom.triggerargsarray, 1) or 0\n if not posstarget:\n return osd(bot, botcom.channel_current, 'say', \"Who am I blocking from \" + str(botcom.maincom) + \" usage?\")\n\n if posstarget not in bot.memory[\"botdict\"][\"users\"].keys():\n return osd(bot, botcom.channel_current, 'say', \"I don't know who \" + str(posstarget) + \" is.\")\n\n currentblocks = get_nick_value(bot, posstarget, \"long\", 'commands', \"unallowed\") or []\n if botcom.maincom in currentblocks:\n return osd(bot, botcom.channel_current, 'say', str(posstarget) + \" is already blocked from using \" + botcom.maincom + \".\")\n\n adjust_nick_array(bot, posstarget, \"long\", 'commands', \"unallowed\", [botcom.maincom], 'add')\n botdict_save(bot)\n\n return osd(bot, botcom.channel_current, 'say', str(posstarget) + \" has been blocked from using \" + botcom.maincom + \".\")\n\n elif botcom.specified == 'unblock':\n botcom.modulerun = False\n\n if not bot_command_modding_auth(bot, botcom):\n return osd(bot, botcom.channel_current, 'say', \"You are not authorized to enable/disable command usage.\")\n\n posstarget = spicemanip.main(botcom.triggerargsarray, 1) or 0\n if not posstarget:\n return osd(bot, botcom.channel_current, 'say', \"Who am I unblocking from \" + str(botcom.maincom) + \" usage?\")\n\n if posstarget not in bot.memory[\"botdict\"][\"users\"].keys():\n return osd(bot, botcom.channel_current, 'say', \"I don't know who \" + str(posstarget) + \" is.\")\n\n currentblocks = get_nick_value(bot, posstarget, \"long\", 'commands', \"unallowed\") or []\n if botcom.maincom not in currentblocks:\n return osd(bot, botcom.channel_current, 'say', str(posstarget) + \" is already not blocked from using \" + botcom.maincom + \".\")\n\n adjust_nick_array(bot, posstarget, \"long\", 'commands', \"unallowed\", [botcom.maincom], 'del')\n botdict_save(bot)\n\n return osd(bot, botcom.channel_current, 'say', str(posstarget) + \" has been unblocked from using \" + botcom.maincom + \".\")\n\n elif botcom.specified == 'special':\n nonstockoptions = spicemanip.main(botcom.nonstockoptions, \"andlist\")\n return osd(bot, botcom.channel_current, 'say', \"The special options for \" + str(botcom.maincom) + \" command include: \" + str(nonstockoptions) + \".\")\n\n elif botcom.specified == 'count':\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" command has \" + str(len(botcom.dotcommand_dict[botcom.responsekey][\"responses\"])) + \" entries.\")\n\n elif botcom.specified == 'description':\n botcom.modulerun = False\n\n osd(bot, botcom.channel_current, 'say', str(botcom.specified).title() + \": \" + str(botcom.dotcommand_dict[\"description\"]))\n return\n\n elif botcom.specified == 'exampleresponse':\n botcom.modulerun = False\n\n osd(bot, botcom.channel_current, 'say', str(botcom.specified).title() + \": \" + str(botcom.dotcommand_dict[\"description\"]))\n return\n\n elif botcom.specified == 'privs':\n botcom.modulerun = False\n\n osd(bot, botcom.channel_current, 'say', str(botcom.specified).title() + \": \" + spicemanip.main(botcom.dotcommand_dict[\"privs\"], \"andlist\"))\n return\n\n elif botcom.specified in ['example', 'usage']:\n botcom.modulerun = False\n\n osd(bot, botcom.channel_current, 'say', str(botcom.specified).title() + \": \" + str(botcom.dotcommand_dict[\"description\"]))\n return\n\n elif botcom.specified == 'filepath':\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" file is located at \" + str(botcom.dotcommand_dict[\"filepath\"]))\n\n elif botcom.specified == 'filename':\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" file is located at \" + str(botcom.dotcommand_dict[\"filename\"]))\n\n elif botcom.specified == 'author':\n return osd(bot, botcom.channel_current, 'say', \"The author of the \" + str(botcom.maincom) + \" command is \" + botcom.dotcommand_dict[\"author\"] + \".\")\n\n elif botcom.specified in ['contrib', \"contributors\"]:\n return osd(bot, botcom.channel_current, 'say', \"The contributors of the \" + str(botcom.maincom) + \" command are \" + spicemanip.main(botcom.dotcommand_dict[\"contributors\"], \"andlist\") + \".\")\n\n elif botcom.specified == 'alias':\n return osd(bot, botcom.channel_current, 'say', \"The aliases of the \" + str(botcom.maincom) + \" command are \" + spicemanip.main(botcom.dotcommand_dict[\"validcoms\"], \"andlist\") + \".\")\n\n elif botcom.specified == 'view':\n if botcom.dotcommand_dict[botcom.responsekey][\"responses\"] == []:\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" command appears to have no entries!\")\n else:\n osd(bot, botcom.instigator, 'notice', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" command contains:\")\n listnumb, relist = 1, []\n for item in botcom.dotcommand_dict[botcom.responsekey][\"responses\"]:\n if listnumb <= 20:\n if isinstance(item, dict):\n relist.append(str(\"[#\" + str(listnumb) + \"] COMPLEX dict Entry\"))\n elif isinstance(item, list):\n relist.append(str(\"[#\" + str(listnumb) + \"] COMPLEX list Entry\"))\n else:\n relist.append(str(\"[#\" + str(listnumb) + \"] \" + str(item)))\n listnumb += 1\n osd(bot, botcom.instigator, 'say', relist)\n if listnumb > 20:\n osd(bot, botcom.instigator, 'say', \"List cut off after the 20th entry to prevent bot lag.\")\n return\n\n elif botcom.specified == 'add':\n\n if not botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"]:\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list cannot be updated.\")\n\n fulltext = spicemanip.main(botcom.triggerargsarray, 0)\n if not fulltext:\n return osd(bot, botcom.channel_current, 'say', \"What would you like to add to the \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list?\")\n\n if fulltext in botcom.dotcommand_dict[botcom.responsekey][\"responses\"]:\n return osd(bot, botcom.channel_current, 'say', \"The following was already in the \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list: '\" + str(fulltext) + \"'\")\n\n if botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"] == \"shared\":\n adjust_nick_array(bot, str(bot.nick), 'long', 'sayings', botcom.maincom + \"_\" + str(botcom.responsekey), fulltext, botcom.specified)\n elif botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"] == \"user\":\n adjust_nick_array(bot, str(botcom.instigator), 'long', 'sayings', botcom.maincom + \"_\" + str(botcom.responsekey), fulltext, botcom.specified)\n\n return osd(bot, botcom.channel_current, 'say', \"The following was added to the \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list: '\" + str(fulltext) + \"'\")\n\n elif botcom.specified in ['del', 'remove']:\n\n if not botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"]:\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list cannot be updated.\")\n\n fulltext = spicemanip.main(botcom.triggerargsarray, 0)\n if not fulltext:\n return osd(bot, botcom.channel_current, 'say', \"What would you like to remove from the \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list?\")\n\n if fulltext not in botcom.dotcommand_dict[botcom.responsekey][\"responses\"]:\n return osd(bot, botcom.channel_current, 'say', \"The following was already not in the \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list: '\" + str(fulltext) + \"'\")\n\n if botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"] == \"shared\":\n adjust_nick_array(bot, str(bot.nick), 'long', 'sayings', botcom.maincom + \"_\" + str(botcom.responsekey), fulltext, botcom.specified)\n elif botcom.dotcommand_dict[botcom.responsekey][\"updates_enabled\"] == \"user\":\n adjust_nick_array(bot, str(botcom.instigator), 'long', 'sayings', botcom.maincom + \"_\" + str(botcom.responsekey), fulltext, botcom.specified)\n\n return osd(bot, botcom.channel_current, 'say', \"The following was removed from the \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" entry list: '\" + str(fulltext) + \"'\")\n\n elif botcom.specified and not botcom.dotcommand_dict[botcom.responsekey][\"selection_allowed\"]:\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" response list cannot be specified.\")\n\n botcom.target = False\n\n currentblocks = get_nick_value(bot, botcom.instigator, \"long\", 'commands', \"unallowed\") or []\n if botcom.maincom in currentblocks:\n return osd(bot, botcom.channel_current, 'say', \"You appear to have been blocked by a bot admin from using the \" + botcom.maincom + \" command.\")\n\n if not botcom.channel_priv:\n\n if botcom.maincom in bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"].keys():\n reason = bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"][str(botcom.maincom)][\"reason\"]\n timestamp = bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"][str(botcom.maincom)][\"timestamp\"]\n bywhom = bot.memory[\"botdict\"]['servers_list'][botcom.server]['channels_list'][str(botcom.channel_current)][\"disabled_commands\"][str(botcom.maincom)][\"disabledby\"]\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" command was disabled by \" + bywhom + \" in \" + str(botcom.channel_current) + \" at \" + str(timestamp) + \" for the following reason: \" + str(reason))\n\n # hardcoded_channel_block\n if not botcom.channel_priv:\n if str(botcom.channel_current) in botcom.dotcommand_dict[\"hardcoded_channel_block\"]:\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" command cannot be used in \" + str(botcom.channel_current) + \" because it is hardcoded not to.\")\n\n # hardcoded_channel_block\n if not botcom.channel_priv:\n if str(botcom.channel_current) in botcom.dotcommand_dict[botcom.responsekey][\"hardcoded_channel_block\"]:\n return osd(bot, botcom.channel_current, 'say', \"The \" + str(botcom.maincom) + \" \" + str(botcom.responsekey or '') + \" command cannot be used in \" + str(botcom.channel_current) + \" because it is hardcoded not to.\")\n\n botcom.success = True\n if botcom.commandtype in ['simple', 'fillintheblank', \"target\", 'targetplusreason', 'sayings', \"readfromfile\", \"readfromurl\", \"ascii_art\", \"translate\", \"responses\"]:\n return bot_dictcom_responses(bot, botcom)\n else:\n command_function_run = str('bot_dictcom_' + botcom.commandtype + '(bot, botcom)')\n eval(command_function_run)\n\n\ndef bot_dictcom_responses(bot, botcom):\n\n commandrunconsensus = []\n reaction = False\n\n # A target is required\n if botcom.dotcommand_dict[botcom.responsekey][\"target_required\"]:\n\n # try first term as a target\n posstarget = spicemanip.main(botcom.triggerargsarray, 1) or 0\n targetbypass = botcom.dotcommand_dict[botcom.responsekey][\"target_bypass\"]\n targetchecking = bot_target_check(bot, botcom, posstarget, targetbypass)\n if not targetchecking[\"targetgood\"]:\n\n if botcom.dotcommand_dict[botcom.responsekey][\"target_backup\"]:\n botcom.target = botcom.dotcommand_dict[botcom.responsekey][\"target_backup\"]\n if botcom.target == 'instigator':\n botcom.target = botcom.instigator\n elif botcom.target == 'random':\n botcom.target = bot_random_valid_target(bot, botcom, 'random')\n else:\n for reason in ['self', 'bot', 'bots', 'offline', 'unknown', 'privmsg', 'diffchannel', 'diffbot']:\n if targetchecking[\"reason\"] == reason and botcom.dotcommand_dict[botcom.responsekey][\"react_\"+reason]:\n reaction = True\n commandrunconsensus.append(botcom.dotcommand_dict[botcom.responsekey][\"react_\"+reason])\n if not reaction:\n commandrunconsensus.append([targetchecking[\"error\"]])\n else:\n botcom.target = spicemanip.main(botcom.triggerargsarray, 1)\n botcom.triggerargsarray = spicemanip.main(botcom.triggerargsarray, '2+', 'list')\n\n # $blank input\n botcom.completestring = spicemanip.main(botcom.triggerargsarray, 0) or ''\n if botcom.dotcommand_dict[botcom.responsekey][\"blank_required\"]:\n\n if botcom.completestring == '' or not botcom.completestring:\n\n if botcom.dotcommand_dict[botcom.responsekey][\"blank_backup\"]:\n botcom.completestring = botcom.dotcommand_dict[botcom.responsekey][\"blank_backup\"]\n else:\n commandrunconsensus.append(botcom.dotcommand_dict[botcom.responsekey][\"blank_fail\"])\n\n if botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"]:\n if botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"] != []:\n if spicemanip.main(botcom.completestring, 1).lower() not in botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"]:\n botcom.completestring = botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"][0] + \" \" + botcom.completestring\n elif spicemanip.main(botcom.completestring, 1).lower() in botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"]:\n if spicemanip.main(botcom.completestring, 1).lower() != botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"][0]:\n botcom.triggerargsarray = spicemanip.main(botcom.triggerargsarray, '2+', 'list')\n if botcom.triggerargsarray != []:\n botcom.completestring = botcom.dotcommand_dict[botcom.responsekey][\"blank_phrasehandle\"][0] + \" \" + spicemanip.main(botcom.triggerargsarray, 0)\n\n if commandrunconsensus != []:\n botcom.success = False\n if botcom.dotcommand_dict[botcom.responsekey][\"response_fail\"] and not reaction:\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = botcom.dotcommand_dict[botcom.responsekey][\"response_fail\"]\n else:\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = commandrunconsensus[0]\n\n bot_dictcom_reply_shared(bot, botcom)\n\n\ndef bot_dictcom_reply_shared(bot, botcom):\n\n if botcom.specified:\n if botcom.specified > len(botcom.dotcommand_dict[botcom.responsekey][\"responses\"]):\n currentspecified = len(botcom.dotcommand_dict[botcom.responsekey][\"responses\"])\n else:\n currentspecified = botcom.specified\n botcom.replies = spicemanip.main(botcom.dotcommand_dict[botcom.responsekey][\"responses\"], currentspecified, 'return')\n botcom.replynum = currentspecified\n else:\n botcom.replies = spicemanip.main(botcom.dotcommand_dict[botcom.responsekey][\"responses\"], 'random', 'return')\n try:\n botcom.replynum = botcom.dotcommand_dict[botcom.responsekey][\"responses\"].index(botcom.replies)\n except Exception as e:\n botcom.replynum = 0\n botcom.replynum += 1\n botcom.totalreplies = len(botcom.dotcommand_dict[botcom.responsekey][\"responses\"])\n\n # This handles responses in list form\n if not isinstance(botcom.replies, list):\n botcom.replies = [botcom.replies]\n\n for rply in botcom.replies:\n\n # replies that can be evaluated as code\n if rply.startswith(\"time.sleep\"):\n eval(rply)\n else:\n\n # random number\n if \"$randnum\" in rply:\n if botcom.dotcommand_dict[botcom.responsekey][\"randnum\"]:\n randno = randint(botcom.dotcommand_dict[botcom.responsekey][\"randnum\"][0], botcom.dotcommand_dict[botcom.responsekey][\"randnum\"][1])\n else:\n randno = randint(0, 50)\n rply = rply.replace(\"$randnum\", str(randno))\n\n # blank\n if \"$blank\" in rply:\n rply = rply.replace(\"$blank\", botcom.completestring or '')\n\n # the remaining input\n if \"$input\" in rply:\n rply = rply.replace(\"$input\", spicemanip.main(botcom.triggerargsarray, 0) or botcom.maincom)\n\n # translation\n if botcom.dotcommand_dict[botcom.responsekey][\"translations\"]:\n rply = bot_translate_process(bot, rply, botcom.dotcommand_dict[botcom.responsekey][\"translations\"])\n\n # text to precede the output\n if botcom.dotcommand_dict[botcom.responsekey][\"prefixtext\"] and botcom.success:\n rply = spicemanip.main(botcom.dotcommand_dict[botcom.responsekey][\"prefixtext\"], 'random') + rply\n\n # text to follow the output\n if botcom.dotcommand_dict[botcom.responsekey][\"suffixtext\"] and botcom.success:\n rply = rply + spicemanip.main(botcom.dotcommand_dict[botcom.responsekey][\"suffixtext\"], 'random')\n\n # trigger.nick\n if \"$instigator\" in rply:\n rply = rply.replace(\"$instigator\", botcom.instigator or '')\n\n # random user\n if \"$randuser\" in rply:\n if not botcom.channel_priv:\n randuser = spicemanip.main(bot.memory[\"botdict\"][\"tempvals\"]['servers_list'][botcom.server]['channels_list'][botcom.channel_current]['current_users'], 'random')\n else:\n randuser = botcom.instigator\n rply = rply.replace(\"$randuser\", randuser)\n\n # current channel\n if \"$channel\" in rply:\n rply = rply.replace(\"$channel\", botcom.channel_current or '')\n\n # current channel\n if \"$server\" in rply:\n rply = rply.replace(\"$server\", botcom.server or '')\n\n # bot.nick\n if \"$botnick\" in rply:\n rply = rply.replace(\"$botnick\", bot.nick or '')\n\n # target\n if \"$target\" in rply:\n targetnames = botcom.target or ''\n if \"$targets\" in rply:\n if targetnames.lower() == \"your\":\n targetnames = targetnames\n elif targetnames.endswith(\"s\"):\n targetnames = targetnames + \"'\"\n else:\n targetnames = targetnames + \"s\"\n rply = rply.replace(\"$targets\", targetnames)\n else:\n targetnames = targetnames\n rply = rply.replace(\"$target\", targetnames)\n\n # smaller variations for the text\n if \"$replyvariation\" in rply:\n if botcom.dotcommand_dict[botcom.responsekey][\"replyvariation\"] != []:\n variation = spicemanip.main(botcom.dotcommand_dict[botcom.responsekey][\"replyvariation\"], 'random')\n rply = rply.replace(\"$replyvariation\", variation)\n else:\n rply = rply.replace(\"$replyvariation\", '')\n\n # smaller variations for the text\n if \"$responsekey\" in rply:\n rply = rply.replace(\"$responsekey\", str(botcom.responsekey))\n\n if \"$index\" in rply:\n rply = rply.replace(\"$index\", str(str(botcom.replynum) + \"/\" + str(botcom.totalreplies)))\n\n # display special options for this command\n if \"$specialoptions\" in rply:\n nonstockoptions = []\n for command in botcom.dotcommand_dict.keys():\n if command not in [\"?default\", \"validcoms\", \"contributors\", \"author\", \"type\", \"filepath\", \"filename\", \"hardcoded_channel_block\", \"description\", \"exampleresponse\", \"example\", \"usage\", \"privs\"]:\n nonstockoptions.append(command)\n nonstockoptions = spicemanip.main(nonstockoptions, \"andlist\")\n rply = rply.replace(\"$specialoptions\", nonstockoptions)\n\n # saying, or action?\n if rply.startswith(\"*a \"):\n rplytype = 'action'\n rply = rply.replace(\"*a \", \"\")\n else:\n rplytype = 'say'\n\n osd(bot, botcom.channel_current, rplytype, rply)\n\n\ndef bot_dictcom_gif(bot, botcom):\n\n if botcom.dotcommand_dict[botcom.responsekey][\"blank_required\"] and not botcom.completestring:\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = botcom.dotcommand_dict[botcom.responsekey][\"blank_fail\"]\n return bot_dictcom_reply_shared(bot, botcom)\n elif botcom.dotcommand_dict[botcom.responsekey][\"blank_required\"] and botcom.completestring:\n queries = [botcom.completestring]\n else:\n queries = botcom.dotcommand_dict[botcom.responsekey][\"responses\"]\n\n # which api's are we using to search\n if botcom.dotcommand in bot.memory[\"botdict\"][\"tempvals\"]['valid_gif_api_dict'].keys():\n searchapis = [botcom.dotcommand]\n elif \"queryapi\" in botcom.dotcommand_dict.keys():\n searchapis = botcom.dotcommand_dict[botcom.responsekey][\"queryapi\"]\n else:\n searchapis = bot.memory[\"botdict\"][\"tempvals\"]['valid_gif_api_dict'].keys()\n\n if botcom.specified:\n if botcom.specified > len(queries):\n botcom.specified = len(queries)\n query = spicemanip.main(queries, botcom.specified, 'return')\n else:\n query = spicemanip.main(queries, 'random', 'return')\n\n searchdict = {\"query\": query, \"gifsearch\": searchapis}\n\n # nsfwenabled = get_database_value(bot, bot.nick, 'channels_nsfw') or []\n # if botcom.channel_current in nsfwenabled:\n # searchdict['nsfw'] = True\n\n gifdict = getGif(bot, searchdict)\n\n if gifdict[\"error\"]:\n botcom.success = False\n if botcom.dotcommand_dict[botcom.responsekey][\"search_fail\"]:\n gifdict[\"error\"] = botcom.dotcommand_dict[botcom.responsekey][\"search_fail\"]\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = [gifdict[\"error\"]]\n else:\n botcom.dotcommand_dict[botcom.responsekey][\"responses\"] = [str(gifdict['gifapi'].title() + \" Result (\" + str(query) + \" #\" + str(gifdict[\"returnnum\"]) + \"): \" + str(gifdict[\"returnurl\"]))]\n\n botcom.specified = False\n bot_dictcom_reply_shared(bot, botcom)\n\n\ndef bot_dictcom_feeds(bot, botcom):\n\n if \"feeds\" not in bot.memory:\n feed_configs(bot)\n\n bot_startup_requirements_set(bot, \"feeds\")\n\n feed = botcom.dotcommand_dict[botcom.responsekey][\"responses\"][0]\n if feed not in bot.memory['feeds'].keys():\n return osd(bot, botcom.channel_current, 'say', feed + \" does not appear to be a valid feed.\")\n\n dispmsg = bot_dictcom_feeds_handler(bot, feed, True)\n if dispmsg == []:\n osd(bot, botcom.channel_current, 'say', feed + \" appears to have had an unknown error.\")\n else:\n osd(bot, botcom.channel_current, 'say', dispmsg)\n\n\ndef bot_dictcom_search(bot, botcom):\n bot.say(\"testing done\")\n","sub_path":"Modules/BotCore/Commands/Dot_Dict_Commands.py","file_name":"Dot_Dict_Commands.py","file_ext":"py","file_size_in_byte":35546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"473296684","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport regex\nfrom sefaria.model import *\nfrom sources import functions\nfrom parsing_utilities import util\n# from sources.Match import match_new\nfrom sources.Match.match import Match\nfrom sefaria.model.schema import AddressTalmud, SchemaNode, JaggedArrayNode\n\n\nmitzvah_number = regex.compile(u'@88\\(([\\u05d0-\\u05ea]{1,4})\\)')\n\ndef create_index():\n eben_ezra = create_schema()\n eben_ezra.validate()\n index = {\n \"title\": \"Eben Ezra on Lamentations\",\n \"categories\": [\"Commentary2\", \"Lamentations\", \"Eben Ezra\"],\n \"schema\": eben_ezra.serialize()\n }\n return index\n\n\ndef create_schema():\n eben_ezra = SchemaNode()\n eben_ezra.add_title('Eben Ezra on Lamentations', 'en', primary=True)\n eben_ezra.add_title(u'אבן עזרא על איכה', 'he', primary=True)\n eben_ezra.key = 'Eben Ezra on Lamentations'\n eben_ezra.append(create_intro_node())\n eben_ezra.append(create_commentary_node())\n return eben_ezra\n\n\ndef create_intro_node():\n intro = JaggedArrayNode()\n intro.add_title('Introduction', 'en', primary=True)\n intro.add_title(u'הקדמה', 'he', primary=True)\n intro.key = 'Introduction'\n intro.depth = 1\n intro.addressTypes = [\"Integer\"]\n intro.sectionNames = [\"Comment\"]\n return intro\n\ndef create_commentary_node():\n commentary = JaggedArrayNode()\n commentary.default = True\n commentary.key = 'default'\n commentary.depth = 3\n commentary.addressTypes = [\"Integer\", \"Integer\", \"Integer\"]\n commentary.sectionNames = [\"Perek\", \"Pasuk\", \"Comment\"]\n commentary.toc_zoom = 2\n return commentary\n\n\ndef parse():\n mishna_number = regex.compile(u'@22([\\u05d0-\\u05ea]{1,2})')\n eben_ezra, perek, pasuk = [], [], []\n intro_with_commentary = []\n first_perek = True\n new_perek = True\n last_mishna = 0\n\n with codecs.open('eben_ezra_eicha.txt', 'r', 'utf-8') as the_file:\n for each_line in the_file:\n if \"@00\" in each_line:\n continue\n\n elif \"@01\" in each_line:\n if first_perek:\n intro_with_commentary.append(pasuk)\n first_perek = False\n else:\n perek.append(pasuk)\n eben_ezra.append(perek)\n new_perek = True\n perek, pasuk = [], []\n\n elif \"@22\" in each_line:\n if not new_perek:\n perek.append(pasuk)\n pasuk = []\n else:\n new_perek = False\n\n last_mishna = fill_in_missing_sections_and_update_last(each_line, perek, mishna_number, '', last_mishna)\n\n else:\n pasuk.append(each_line)\n\n eben_ezra.append(perek)\n intro_with_commentary.append(eben_ezra)\n return intro_with_commentary\n\n\ndef fill_in_missing_sections_and_update_last(each_line, base_list, this_regex, filler, last_index):\n match_object = this_regex.search(each_line)\n current_index = util.getGematria(match_object.group(1))\n diff = current_index - last_index\n while diff > 1:\n base_list.append(filler)\n diff -= 1\n return current_index\n\n\ndef create_links(eee_ja):\n list_of_links = []\n for perek_index, perek in enumerate(eee_ja):\n for pasuk_index, pasuk in enumerate(perek):\n for comment_index, comment in enumerate(pasuk):\n list_of_links.append(create_link_dicttionary(perek_index+1, pasuk_index+1, comment_index+1))\n return list_of_links\n\n\ndef create_link_dicttionary(perek, pasuk, comment):\n return {\n \"refs\": [\n \"Lamentations.{}.{}\".format(perek, pasuk),\n \"Eben_Ezra_on_Lamentations.{}.{}.{}\".format(perek, pasuk, comment)\n ],\n \"type\": \"commentary\",\n }\n\n\ndef create_text(jagged_array):\n return {\n \"versionTitle\": \"Ibn Ezra on Lamentations -- Wikitext\",\n \"versionSource\": \"https://he.wikisource.org/wiki/%D7%90%D7%91%D7%9F_%D7%A2%D7%96%D7%A8%D7%90_%D7%A2%D7%9C_%D7%9E%D7%92%D7%99%D7%9C%D7%AA_%D7%90%D7%99%D7%9B%D7%94\",\n \"language\": \"he\",\n \"text\": jagged_array\n }\n","sub_path":"sources/Eben_Ezra_on_Eicha/eee_functions.py","file_name":"eee_functions.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"138854630","text":"\nimport os\nfrom pathlib import Path\n\nfrom hellflame.services.Service import Service\n\n\nclass DoctorService(Service):\n command_name = 'doctor'\n help_text = 'see information of environment'\n\n def __init__(self,subparsers):\n super().__init__(subparsers)\n\n # subparserの登録\n def register_parser(self,parser):\n pass\n\n # エントリーポイント\n def handler_function(self,args):\n print('\\033[36m::: >>> Enter: DoctorService\\033[0m')\n envs = ['MLPROG','MLDATA','MLEXP','MLTMP']\n print('\\033[36m>>> ======================= doctor start ====================== <<<\\033[0m')\n\n # 環境変数の設定状況\n print('# setted environment paths:')\n err_log = '\\n'\n err_flag = False\n for env in envs:\n if os.environ.get(env) is not None:\n print(' o | environment variable ',env,' is ',os.environ.get(env))\n else:\n if err_flag==False:\n err_log += '--- set the environment variable ---\\n'\n err_flag = True\n print(' x | environment variable ',env,' does not exist')\n err_log += 'export %s=\"/path/to/dir\"\\n'%(env)\n if err_flag:\n print(err_log)\n\n # 設定されたパスの存在状況\n print('# paths existence:')\n err_log = '\\n'\n err_flag = False\n for env in envs:\n if os.environ.get(env) is not None:\n p = Path(os.environ.get(env)).resolve()\n if p.exists():\n print(' o | environment variable ',env,':',p,' is exist')\n else:\n if err_flag==False:\n err_log += '--- make the directory ---\\n'\n err_flag = True\n print(' x | environment variable ',env,':',p,' does not exist')\n err_log += 'mkdir -p %s\\n'%(str(p))\n if err_flag:\n print(err_log)\n\n\n\n print('\\033[36m>>> ======================= doctor end ======================== <<<\\033[0m')\n print('\\033[36m::: <<< Exit: DoctorService\\033[0m')\n return 0\n\n","sub_path":"hellflame/services/DoctorService.py","file_name":"DoctorService.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566935888","text":"import re\nimport xlrd\n\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nUSERNAME = \"13058761124\"\nPASSWORD = \"uy5rRt6ik\"\nLOGIN_URL = \"http://newcentral.7yao.top/admin/index/index.html\"\nCONTEXT = '亲爱的玩家,我是VIP客服叶子,恭喜您成为我们的高端VIP成员,麻烦您联系VIP客服QQ:2851577564(偶玩-叶子),登记完成后获得VIP认证礼包,还有社区VIP标示!祝您游戏愉快!'\nTITLE = 'vip会员邀请函'\n\n# excel文件名\nFILE = '【灵剑奇缘】邮件模板.xlsx'\n\ndriver = webdriver.Chrome()\nwait = WebDriverWait(driver, 10)\n\n\ndef login():\n driver.get(LOGIN_URL)\n\n username = wait.until(EC.presence_of_element_located((By.ID, \"username\")))\n username.send_keys(USERNAME)\n\n password = wait.until(EC.presence_of_element_located((By.ID, \"password\")))\n password.send_keys(PASSWORD)\n\n submit = wait.until(EC.presence_of_element_located((By.XPATH, '//button[@type=\"submit\"]')))\n submit.click()\n\n\ndef choose_game():\n\n driver.get('http://newcentral.jswl.7yao.top/admin/send_mail/senditem.11')\n\n try:\n platform_id, server_id = re.findall('sbToggle_[0-9]{8}', driver.page_source)\n except:\n print('访问过于频繁,查看记录发送到第几行并调整从下一行开始运行')\n time.sleep(1)\n return choose_game()\n\n # 选择平台\n platform = wait.until(EC.presence_of_element_located((By.ID, platform_id)))\n platform.click()\n platform.send_keys(Keys.ARROW_DOWN)\n platform.send_keys(Keys.ENTER)\n time.sleep(1)\n\n # 选择servers\n attempt = 0\n platform_id2, server_id2 = re.findall('sbToggle_[0-9]{8}', driver.page_source)\n while server_id2 == server_id:\n platform_id2, server_id2 = re.findall('sbToggle_[0-9]{8}', driver.page_source)\n attempt += 1\n return server_id2\n\n\ndef search_actor(server_name, actor_name, server_id2):\n\n server = wait.until(EC.presence_of_element_located((By.ID, server_id2)))\n server.click()\n\n server_option_id = 'sbOptions_' + server_id2.split('_')[1]\n server_name = re.findall('[0-9]+', server_name)[0]\n server_option = wait.until(EC.presence_of_element_located((By.ID, server_option_id)))\n time.sleep(1)\n server_option.find_element_by_xpath('//label[@rel=\"{}\"]'.format(server_name)).click()\n\n # 输入角色名\n name = wait.until(EC.presence_of_element_located((By.ID, 'getactname')))\n name.clear()\n name.send_keys(actor_name)\n time.sleep(1)\n\n # 搜索\n wait.until(EC.presence_of_element_located((By.XPATH, '//button[@type=\"submit\"]'))).click()\n\n # 检测是否搜索到角色\n try:\n if wait.until(EC.presence_of_element_located((By.XPATH, '//td[@gm-create=\"false\"]'))):\n return True\n except:\n print('无角色数据:{}'.format(actor_name))\n return False\n\n\ndef send_mail():\n # 邮件内容\n mail_context = wait.until(EC.presence_of_element_located((By.XPATH, '//textarea[@name=\"context\"]')))\n mail_context.clear()\n mail_context.send_keys(CONTEXT)\n\n # 邮件标题\n mail_title = wait.until(EC.presence_of_element_located((By.XPATH, '//textarea[@name=\"title\"]')))\n mail_title.clear()\n mail_title.send_keys(TITLE)\n time.sleep(1)\n\n # 发送\n send_mail = wait.until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"senditem\"]/div/div[7]/button')))\n send_mail.click()\n\n\nif __name__ == '__main__':\n # 从第几行开始\n start = input('请输入从第几行开始(默认为2):')\n while not start.isdigit():\n start = input('请输入数字:')\n\n # 读取excel\n wb = xlrd.open_workbook(filename=FILE)\n ws = wb.sheet_by_name('Sheet1')\n data = []\n for i in range(int(start)-1, ws.nrows):\n info = ws.row_values(i)\n info.append(i)\n data.append(info)\n\n login()\n server_id2 = choose_game()\n\n for server_name, actor_name, context, title, row in data:\n if not server_name or not actor_name:\n pass\n else:\n if search_actor(server_name, actor_name, server_id2):\n send_mail()\n print('已发送完成:第{}行-{}-{}'.format(row+1, server_name, actor_name))\n else:\n print('无角色数据:第{}行-{}-{}'.format(row+1, server_name, actor_name))\n # driver.close()\n","sub_path":"send-mail.py","file_name":"send-mail.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539015051","text":"from app.util import return_resp\nfrom flask import request\nfrom app.db import coll\nfrom app.schemas import ensure_json, ensure_user_logged_in, ensure_feature_is_enabled\n\n\n@ensure_json()\n@ensure_user_logged_in()\n@ensure_feature_is_enabled(\"leave team\")\ndef leave():\n if request.method == 'POST':\n data = request.get_json(silent=True)\n email = data['user_email']\n email = email.strip().lower()\n user_in_a_team = coll(\"users\").find_one({\"_id\": email, \"hasateam\": True})\n if not user_in_a_team:\n return return_resp(400, \"User doesn't have a tram\")\n team_name = coll(\"teams\").find_one({\"members\": {\"$all\": [email]}}, {\"_id\"})['_id']\n team_size = len(coll(\"teams\").find_one({\"_id\": team_name})['members'])\n if team_size == 1:\n coll(\"teams\").delete_one({\"_id\": team_name})\n else:\n coll(\"teams\").update_one({\"_id\": team_name}, {\"$pull\": {\"members\": email}})\n coll(\"teams\").update_one({\"_id\": team_name}, {\"$set\": {\"complete\": False}})\n coll(\"users\").update_one({\"_id\": email}, {\"$set\": {\"hasateam\": False}})\n return return_resp(200, \"Success\")\n","sub_path":"app/leave_team.py","file_name":"leave_team.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"134123372","text":"f = open(\"input.txt\", \"r\")\na = f.read().split(\"\\n\")\n\nsize = len(a)\n\nm={}\nfor y in range(0, size):\n for x in range(0, size):\n m[(y,x)] = int(a[y][x])\n\ndef printmap(z):\n print(\"-----\")\n for y in range(0,size):\n line = \"\"\n for x in range(0,size):\n line += str(z[(y,x)])\n print(line)\n print(\"-----\")\n\ndef flashneighbors(amap, y, x, dq):\n if amap[(y,x)] > 9:\n d =[\n (0,-1),#left\n (1,-1),#leftdown\n (1,0),#down\n (1,1),#rightdown\n (0,1),#right\n (-1,1),#rightup\n (-1,0),#up\n (-1,-1),#leftup\n ]\n for i in d:\n dy,dx = i\n ny=dy+y\n nx=dx+x\n\n if ny>=0 and ny < size and nx>=0 and nx < size:\n amap[(ny,nx)]+=1\n\n if (ny,nx) not in dq:\n dq.append((ny,nx))\n\ndef reset(amap, flashed):\n for y,x in flashed:\n amap[(y,x)] = 0\n\n \ndef fillqueue():\n q=[]\n for y in range(0, size):\n for x in range(0, size):\n q.append((y,x))\n return q\n\ndef pt1(amap, current, end, fcount, dq):\n current+=1\n flashed = []\n if current==end:\n return fcount\n \n for y,x in dq:\n amap[(y,x)] += 1\n visited = dq.copy()\n\n while len(dq) > 0:\n # increase by 1\n y,x = dq.pop()\n\n #flash neighbors\n if amap[(y,x)] > 9 and (y,x) not in flashed:\n flashed.append((y,x))\n flashneighbors(amap, y,x, dq)\n\n #pt2\n if len(flashed) == size**2:\n print(\"so bright! \", current)\n return current\n \n #flash reset \n reset(amap,flashed)\n fcount += len(flashed)\n #print(\"step\", current, \"fcount\", fcount)\n #printmap(amap)\n #print(\"amap\", amap)\n\n return pt1(amap, current, end, fcount, visited)\n\nz=pt1(m, 0, 500+1, 0, fillqueue())\nprint(\"last\", z)\n","sub_path":"2021/day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588391926","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.conf.urls.static import static\nfrom django.views.defaults import page_not_found, server_error, bad_request, permission_denied\nfrom django.contrib import admin\nfrom wordx.articles.views import HomeView\nfrom wordx.articles.sitemaps import ArticleCategorySitemap, ArticleSitemap, HomeSitemap\nfrom wordx.cards.sitemaps import LanguageSitemap\n\nadmin.site.site_header = 'Администрирование WORD-X.ru'\n\nsitemaps = {\n 'home': HomeSitemap,\n 'languages': LanguageSitemap,\n 'article_categories': ArticleCategorySitemap,\n 'articles': ArticleSitemap,\n}\n\nurlpatterns = [\n url(r'^$', HomeView.as_view(), name='main'),\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps, }, name='django.contrib.sitemaps.views.sitemap'),\n url(r'^topics/', include('wordx.articles.urls', namespace='articles')),\n url(r'^dictionary/', include('wordx.cards.urls', namespace='dictionary')),\n url(r'^accounts/', include('wordx.accounts.urls', namespace='accounts')),\n url(r'^accounts/', include('django.contrib.auth.urls')),\n url(r'^wordxadmin/', include(admin.site.urls)),\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^select2/', include('django_select2.urls')),\n]\n\n\nif settings.DEBUG:\n urlpatterns += [\n url(r'^400/$', bad_request, kwargs={'exception': Exception(\"Bad Request!\")}),\n url(r'^403/$', permission_denied, kwargs={'exception': Exception(\"Permission Denied\")}),\n url(r'^404/$', page_not_found, kwargs={'exception': Exception(\"Page not Found\")}),\n url(r'^500/$', server_error, kwargs={'exception': Exception(\"Server Error\")}),\n url(r'^admin/', include(admin.site.urls)),\n ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \\\n + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"wordx/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"68084449","text":"def getMenu():\n print(\"1. Addition:\")\n print(\"2. Subtraction\")\n print(\"3. Multiplication\")\n print(\"4. Division\")\n print(\"5. Exit\")\ndef getChoice():\n valid = False\n while not valid:\n choice = int(input(\"Please choose an option:\"))\n if not choice < 6:\n print(\"Choice not valid\")\n else:\n valid = True\n return choice\n\ndef checkDigit(number):\n if number[0]==\"-\":\n number = int(number)*(-1)\n number= str(number)\n if not number.isdigit():\n valid = False\n else:\n valid = True\n return valid\n\ndef getInputs():\n numbers = []\n for num in range (0,2):\n valid = False\n while not valid:\n number = input((\"Input a number for the clculation: \"))\n if not checkDigit(number):\n print(\"Input not valid\")\n else:\n valid = True\n numbers.append(number)\n return numbers\n\n\ndef addition(numbers):\n print(\"Adding\", numbers[0] ,\"with\", numbers[1])\n result = int(numbers[0])+int(numbers[1])\n return result\n\ndef subtraction(numbers):\n print(\"Subtracting\", numbers[0], \"with\", numbers[1])\n result = int(numbers[0]) - int(numbers[1])\n return result\n\n\ndef multiplication(numbers):\n print(\"Multiplying\", numbers[0], \"with\", numbers[1])\n result = int(numbers[0]) * int(numbers[1])\n return result\n\ndef division(numbers):\n print(\"Dividing\", numbers[0], \"with\", numbers[1])\n result = int(numbers[0]) / int(numbers[1])\n return result\n\n\ndef decideCalculation(choice, numbers):\n if choice == 1:\n result = addition(numbers)\n elif choice == 2:\n result = subtraction(numbers)\n elif choice == 3:\n result = multiplication(numbers)\n else:\n result = division(numbers)\n return result\n\ndef main():\n getMenu()\n choice = getChoice()\n while choice != 5:\n numbers = getInputs()\n result = decideCalculation(choice, numbers)\n print (\"Answer is\", result)\n getMenu()\n choice = getChoice()\n print(\"Thank you for using this calculator\")\n\nmain()","sub_path":"Basic Calculator/BasicCalculator.py","file_name":"BasicCalculator.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"361614224","text":"# -*- coding: utf-8 -*-\n\nfrom lxml import etree\nimport re\nfrom .utils import get_page\nfrom random import choice\nfrom pyquery import PyQuery as pq\n\nfrom .logger_proxy import mylogger\nlogger=mylogger.get_logger('crawler')\n\n\nclass proxy_mataclass(type):\n \"\"\"\n 定义元类\n \"\"\"\n def __new__(cls, name,bases,attrs):\n count=0\n attrs['__crawlfunc__']=[]\n for k,v in attrs.items():\n if 'crawl_' in k:\n attrs['__crawlfunc__'].append(k)\n count +=1\n attrs['__crawlcount__']=count\n return type.__new__(cls,name,bases,attrs)\n\nclass crawler(object,metaclass=proxy_mataclass):\n \"\"\"\n metaclas取自定的元类。\n 第一:元类的attrs参数,收录自定义类的所有属性。\n 第二:我们自定义的这个proxy mataclass,的attrs属性,额外添加了两个属性,一个是’__crawlfunc__'属性,其对应的值为列表,用来存储包含crawl_字段的所有属性名称。\n 另一个额外的属性是\"__crawlcount__\",对应的值,存储了crawlfunc属性的个数。\n \"\"\"\n def get_crawler(self,callback):\n proxies=[]\n for proxy in eval('self.{}()'.format(callback)):\n logger.info('成功获取代理')\n proxies.append(proxy)\n return proxies\n\n def crawl_daili666(self,page_count=800):\n url='http://www.66ip.cn/{}.html'\n urls=[url.format(page) for page in range(page_count)]\n for u in urls:\n logger.info('begain crawl %s'%u)\n html=get_page(u)\n if html:\n doc=etree.HTML(html)#解析网页地址\n ip=doc.xpath('//div[@align=\"center\"]//table//tr[position()>1]//td[1]/text()')\n prot=doc.xpath('//div[@align=\"center\"]//table//tr[position()>1]//td[2]/text()')\n ip_address=list(zip(ip,prot))#元组列表\n for ip,port in ip_address:\n yield ':'.join([ip,port])#join的对象只能数字符型,此外join只接受一个参数,可以是列表,元组,字典,所以此处要用【】列表\n\n\n def crawl_ip181(self):\n start_url = 'http://www.ip181.com/'\n html = get_page(start_url)\n ip_address = re.compile('\\s*(.*?)\\s*(.*?)')\n # \\s* 匹配空格,起到换行作用\n re_ip_address = ip_address.findall(html)\n for address,port in re_ip_address:\n result = address + ':' + port\n yield result.replace(' ', '')\n\n\n def crawl_ip3366(self):\n for page in range(1, 4):\n start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)\n html = get_page(start_url)\n ip_address = re.compile('\\s*(.*?)\\s*(.*?)')\n # \\s * 匹配空格,起到换行作用\n re_ip_address = ip_address.findall(html)\n for address, port in re_ip_address:\n result = address+':'+ port\n yield result.replace(' ', '')\n\n","sub_path":"proxy_pool_new/proxy_pool_new/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"203736872","text":"# coding:utf8\n\nfrom typing import List\nclass Solution:\n dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n def exist(self, board: List[List[str]], word: str) -> bool:\n return self.exist_v1(board, word)\n\n def exist_v1(self, board: List[List[str]], word: str) -> bool:\n \"\"\" bfs \"\"\"\n if not word:\n return False\n rows, cols = len(board), len(board[0])\n for i in range(rows):\n for j in range(cols):\n if self.dfs(board, word, i, j, rows, cols):\n return True\n return False\n\n def dfs(self, board, word, rowIdx, colIdx, rows, cols, visited = None):\n \n if not visited:\n visited = set()\n\n if len(word) == 0:\n return True\n\n if rowIdx < 0 or rowIdx >= rows or colIdx < 0 or colIdx >= cols:\n return False\n \n #print(rowIdx, colIdx, board[rowIdx][colIdx], word, visited)\n\n if word[0] != board[rowIdx][colIdx]:\n return False\n\n visited.add((rowIdx, colIdx))\n res = False \n\n for dx, dy in Solution.dirs:\n nrowIdx, ncolIdx = rowIdx + dx, colIdx + dy\n if (nrowIdx, ncolIdx) not in visited:\n res = res or self.dfs(board, word[1:], nrowIdx, ncolIdx, rows, cols, visited)\n\n visited.remove((rowIdx, colIdx))\n\n return res\n\n\n\n\n\nif __name__ == '__main__':\n obj = Solution()\n board = [\n ['A','B','C','E'],\n ['S','F','C','S'],\n ['A','D','E','E']]\n word = 'ABCB'\n #word = 'ABCCED'\n board = [\n ['A', 'B'], \n ['C', 'D']]\n word = 'CDBA'\n res = obj.exist(board, word)\n print(res)\n\n","sub_path":"leetcode_everyday/pastqing_79.py","file_name":"pastqing_79.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273516207","text":"import tweepy\nfrom tweepy import OAuthHandler\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_secret = ''\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n\napi = tweepy.API(auth)\n\n#return all tweets from a user and save it in a text file\n\ndef save_text(screen):\n #an empty list to hold a list of objects\n tweets = []\n\n #pulls the initial 200 tweets (max)\n new_tweets = api.user_timeline(screen_name = screen, count = 200)\n\n #gets the last date that was retrieved\n maxdate = new_tweets[-1].id - 1\n\n print('init')\n #extends the list adding the newtweet objects\n tweets.extend(new_tweets)\n\n while len(new_tweets) != 0:\n #pulls the next 200 tweets\n new_tweets = api.user_timeline(screen_name = screen, count = 200, max_id = maxdate)\n #extends the list again\n tweets.extend(new_tweets)\n #gets the next maxdate\n maxdate = tweets[-1].id - 1\n #lets the user know how far along we are\n print('loaded ' + str(len(tweets)) + ' tweets so far')\n\n print('finished loading!')\n\n #this is the actual text\n savetweets = []\n #for i in the tweets list, pull the text and save it in a new list\n for i in tweets:\n savetweets.append(i.text)\n\n print('now saving it to a text file...')\n\n #open a file\n text_file = open(str(screen) + \"_tweets.txt\", \"w\")\n\n #write the tweet on a new line\n for i in savetweets:\n text_file.write(i + \"\\n\")\n\n #close the file\n text_file.close()\n print('finished!')\n\nif __name__ == '__main__':\n save_text(\"realdonaldtrump\")","sub_path":"Desktop/learningprogramming/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"390384757","text":"#!/usr/bin/python -tt\n# -*- coding: utf-8 -*-\n'''\n Copyright 2014-2015 Teppo Perä\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n'''\n\n\ndef is_container(obj):\n \"\"\"\n Checks whether the object is container or not.\n\n Container is considered an object, which includes other objects,\n thus string is not qualified, even it implments iterator protocol.\n\n >>> is_container(\"text\")\n False\n\n >>> is_container(tuple())\n True\n \"\"\"\n if isinstance(obj, str):\n return False\n\n return hasattr(obj, '__iter__')\n\n\ndef has_dict_protocol(obj):\n \"\"\"\n Checks whether object supports dict protocol.\n \"\"\"\n return hasattr(obj, \"__getitem__\") and hasattr(obj, \"__setitem__\")\n\n\ndef flatten(items):\n \"\"\"\n Flatten the nested arrays into single one.\n\n Example about list of lists.\n >>> list(flatten([[1, 2], [3, 4]]))\n [1, 2, 3, 4]\n\n Example of deeply nested irregular list:\n >>> list(flatten([[[1, 2]], [[[3]]], 4, 5, [[6, [7, 8]]]]))\n [1, 2, 3, 4, 5, 6, 7, 8]\n\n List of strings is handled properly too\n >>> list(flatten([\"one\", \"two\", [\"three\", \"four\"]]))\n ['one', 'two', 'three', 'four']\n \"\"\"\n for subitem in items:\n if is_container(subitem):\n for item in flatten(subitem):\n yield item\n else:\n yield subitem\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"src/pytraits/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"159291696","text":"import json\nimport re\n\n\nclass Compiler(object):\n tf = []\n tfjs = []\n torch = []\n main_map = {'tf': {}, 'tfjs': {}, 'torch': {}}\n base_defs = set()\n\n def __init__(self):\n with open('./output/tf/2.1.json') as tf_file, \\\n open('./output/torch/1.4.0.json') as torch_file, \\\n open('./output/tfjs/1.5.1.json') as tfjs_file:\n self.tf = json.load(tf_file)\n self.tfjs = json.load(tfjs_file)\n self.torch = json.load(torch_file)\n\n def normalize_func_name(self, name):\n alpha = re.compile('[a-zA-Z]')\n return alpha.sub('', name).lower()\n\n def generate_attrs(self, code):\n split_def = re.compile(r'^([\\w\\.]+)\\((.*)\\)')\n return split_def.match(code)[1].split('.')\n\n def populate_command(self, lib):\n for f in getattr(self, lib):\n nfunc = self.normalize_func_name(f['function_name'])\n f['attrs'] = self.generate_attrs(f['code'])\n f['args'] = self.hydrate_args(f['args'], f['kwargs'])\n del f['kwargs']\n self.main_map[lib][nfunc] = f\n self.base_defs.add(nfunc)\n\n def load_base_defs(self):\n self.populate_command('tf')\n self.populate_command('tfjs')\n self.populate_command('torch')\n\n def hydrate_args(self, base_args, base_kwargs):\n ba = [\n {\n 'name': self.normalize_func_name(a),\n 'is_kwarg': False,\n 'optional': a.endswith('?'),\n 'index': i\n } for i, a in enumerate(base_args)\n ]\n bk = [\n {\n 'name': self.normalize_func_name(a[0]),\n 'is_kwarg': True,\n 'optional': True\n } for a in base_kwargs\n ]\n return ba + bk\n\n def match_arg_names(self, base, match, to_lang):\n for base_arg in base:\n try:\n match_arg = next(\n m for m in match if m.get('name') == base_arg.get('name')\n )\n base_arg[to_lang] = match_arg.get('name', None)\n except Exception:\n # TODO: check for alternate word level translations\n base_arg[to_lang] = None\n return base\n\n def load_translations(self, from_lang):\n langs = ['torch', 'tfjs', 'tf']\n langs.pop(langs.index(from_lang))\n\n for d in self.base_defs:\n # Check if translation exists in our from language\n if d not in self.main_map[from_lang]:\n continue\n\n base_args = self.main_map[from_lang][d]['args']\n # Check if translatable to other langs\n for to_lang in langs:\n if d not in self.main_map[to_lang]:\n for a in self.main_map[from_lang][d]['args']:\n a.update({to_lang: None})\n continue\n\n # Format & match args\n match_args = self.main_map[to_lang][d]['args']\n self.main_map[from_lang][d]['args'] = self.match_arg_names(\n base_args, match_args, to_lang\n )\n\n def output_data(self):\n with open('../../static/mapped_commands.json', 'w',\n encoding='utf8') as f:\n json.dump(self.main_map, f, indent=4, ensure_ascii=False)\n\n\ndef main():\n c = Compiler()\n c.load_base_defs()\n c.load_translations('tfjs')\n c.load_translations('torch')\n c.load_translations('tf')\n c.output_data()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"docs-crawler/docs/aggregate_crawler_output.py","file_name":"aggregate_crawler_output.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"451842043","text":"from flask import Flask\nimport web_frontend.blueprints as blueprints \n\n\napplication = app = Flask(__name__)\napplication.register_blueprint(blueprints.root)\napplication.debug=True\napp.secret_key = b'/f\\xc7\\xf3\\x07yZ\\xd2{\\xfdi\\xcey\\xa44\\xd6\\xb3\\x1c\\xab\\x82\\xab\\x03\\xf0&(?\\x7f\\xfa\\x16>'\n\nif __name__ == \"__main__\":\n application.run(debug=True,port=1200)\n","sub_path":"web_frontend/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"121714487","text":"'''\n Base for ae_multi-tri.\n Only has reconstruction loss.\n\n'''\n\nimport keras\nfrom keras.layers import Conv1D, Conv2DTranspose, Input, Flatten, Dense, Lambda, Reshape, UpSampling1D, AveragePooling1D\nfrom keras.layers import BatchNormalization\nfrom keras.models import Model\nfrom keras.datasets import mnist\nfrom keras.losses import binary_crossentropy, mean_squared_error\nfrom keras import backend as K\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils import *\nfrom config import get_config\n\nfrom tensorflow.python.framework.ops import disable_eager_execution\ndisable_eager_execution()\n\n\ndef Conv1DTranspose(input_tensor, filters, kernel_size, strides=2, padding='same', activation='relu', name='conv12d'):\n \"\"\"\n input_tensor: tensor, with the shape (batch_size, time_steps, dims)\n filters: int, output dimension, i.e. the output tensor will have the shape of (batch_size, time_steps, filters)\n kernel_size: int, size of the convolution kernel\n strides: int, convolution step size\n padding: 'same' | 'valid'\n \"\"\"\n x = Lambda(lambda x: K.expand_dims(x, axis=2))(input_tensor)\n x = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding, activation='relu', name = name)(x)\n x = Lambda(lambda x: K.squeeze(x, axis=2))(x)\n return x\n\nconfig = get_config()\n\n(X,y) = loaddata_nosplit_scaled(config.input_size, config.feature)\nclasses = ['A', 'E', 'j', 'L', 'N', 'P', 'R', 'V']#['N','V','/','A','F','~']#,'L','R',f','j','E','a']#,'J','Q','e','S']\nXe = np.expand_dims(X, axis=2)\nfrom sklearn.model_selection import train_test_split\nXe, Xvale, y, yval = train_test_split(Xe, y, test_size=0.25, random_state=1)\n\nimport pandas as pd\ny = np.array(pd.DataFrame(y).idxmax(axis=1))\nyval = np.array(pd.DataFrame(yval).idxmax(axis=1))\n\ntarget_train = y\ntarget_test = yval \n\n# Data & model configuration\nbatch_size = 256\nno_epochs = 50\nvalidation_split = 0.2\nverbosity = 1\nlatent_dim = 2\nnum_channels = 1\n\n# Reshape data\n\ninput_train = Xe\ninput_test = Xvale\ninput_shape = (config.input_size, 1)\n\n# Parse numbers as floats\ninput_train = input_train.astype('float32')\ninput_test = input_test.astype('float32')\n\n\n# # =================\n# # Encoder\n# # =================\n\n# Definition\ni = Input(shape=input_shape, name='encoder_input')\ncx = Conv1D(filters=8, kernel_size=16, strides=2, padding='same', activation='relu')(i)\ncx = BatchNormalization()(cx)\ncx = Conv1D(filters=16, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\ncx = BatchNormalization()(cx)\ncx = Conv1D(filters=16, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\ncx = BatchNormalization()(cx)\ncx = Conv1D(filters=1, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\neo = BatchNormalization()(cx)\n\n\nconv_shape = K.int_shape(cx)\nprint(conv_shape)\n# Define sampling with reparameterization trick\ndef sample_z(args):\n mu, sigma = args\n batch = K.shape(mu)[0]\n dim = K.int_shape(mu)[1]\n eps = K.random_normal(shape=(batch, dim))\n return mu + K.exp(sigma / 2) * eps\n\n\n# Instantiate encoder\nencoder = Model(i, eo, name='encoder')\n#encoder = Model(i, [mu, sigma, z], name='encoder')\nencoder.summary()\n\n# # =================\n# # Encoder_2\n# # =================\n\n# Definition\ni_2 = Input(shape=input_shape, name='encoder2_input')\ncx = Conv1D(filters=8, kernel_size=16, strides=2, padding='same', activation='relu')(i_2)\ncx = BatchNormalization()(cx)\ncx = Conv1D(filters=16, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\ncx = BatchNormalization()(cx)\ncx = Conv1D(filters=16, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\ncx = BatchNormalization()(cx)\ncx = Conv1D(filters=1, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\neo_2 = BatchNormalization()(cx)\n\nconv_shape_2 = K.int_shape(cx)\n\n# Instantiate encoder\nencoder_2 = Model(i_2, eo_2, name='encoder_2')\nencoder_2.summary()\n\n# =================\n# Decoder\n# =================\n\n# Definition\nd_i = Input(shape=(conv_shape[1], conv_shape[2]*2), name='decoder_input')\ncx = UpSampling1D(size=2)(d_i)\ncx = Conv1D(filters=2, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\ncx = BatchNormalization()(cx)\ncx = UpSampling1D(size=2)(cx)\ncx = Conv1D(filters=2, kernel_size=16, strides=2, padding='same', activation='relu')(cx)\ncx = BatchNormalization()(cx)\ncx = UpSampling1D(size=2)(cx)\ncx = Conv1D(filters=1, kernel_size=16, strides=2, padding='same', activation='relu', name = 'conv12d3')(cx)\ncx = BatchNormalization()(cx)\ncx = UpSampling1D(size=4)(cx)\ncx = Conv1D(filters=1, kernel_size=16, strides=2, padding='same', activation='relu', name = 'conv12d4')(cx)\ncx = BatchNormalization()(cx)\ncx = UpSampling1D(size=4)(cx)\ncx = Conv1D(filters=num_channels, kernel_size=16, activation='relu', padding='same', name='decoder_output')(cx)\no = UpSampling1D(size=2)(cx)\n# Instantiate decoder\ndecoder = Model(d_i, o, name='decoder')\ndecoder.summary()\n\n# =================\n# VAE as a whole\n# =================\nfrom tensorflow import concat\n# Instantiate VAE\nvae_outputs = decoder(concat([encoder(i), encoder_2(i)], 2))\nvae = Model(i, vae_outputs, name='multi-ae')\nvae.summary()\n\n# Define loss\ndef reconstruction_loss(true, pred):\n # Reconstruction loss\n reconstruction_loss = mean_squared_error(K.flatten(true), K.flatten(pred))# * 256\n return reconstruction_loss\n #return K.mean(reconstruction_loss + kl_loss)\n\n# Compile VAE\nvae.compile(optimizer='adam', loss=reconstruction_loss)\n\n# Train autoencoder\nvae.fit(input_train, input_train, epochs = no_epochs, batch_size = batch_size, validation_data = (input_test, input_test))\n#vae.fit(input_train, input_train, epochs = no_epochs, batch_size = batch_size, validation_split = validation_split)\n\n\n# =================\n# Results visualization\n# Credits for original visualization code: https://keras.io/examples/variational_autoencoder_deconv/\n# (François Chollet).\n# =================\ndef viz_latent_space(encoder, data):\n input_data, target_data = data\n\n print(target_data.shape)\n #print(target_data.shape[0])\n #print(target_data)\n print(encoder.predict(input_data).shape)#.reshape((32,16))\n print(encoder.predict(input_data).reshape(input_data.shape[0], 16).shape)#.reshape((32,16))\n\n from sklearn.manifold import TSNE\n X_tsne = TSNE(n_components=2, random_state=1).fit_transform(encoder.predict(input_data).reshape(input_data.shape[0], 16))\n print(X_tsne.shape)\n print(X_tsne)\n\n\n plt.figure(figsize=(8, 10))\n scatter = plt.scatter(X_tsne[:,0], X_tsne[:,1], c=target_data, label = classes)\n plt.legend(handles=scatter.legend_elements()[0], labels=classes)\n plt.title(\"tsne\")\n plt.show()\n\ndef viz_latent_space_pca(encoder, data):\n input_data, target_data = data\n\n print(target_data.shape)\n #print(target_data.shape[0])\n #print(target_data)\n print(encoder.predict(input_data).shape)#.reshape((32,16))\n print(encoder.predict(input_data).reshape(input_data.shape[0], 16).shape)#.reshape((32,16))\n\n from sklearn.decomposition import PCA\n principalComponents = PCA(n_components=2, random_state = 1).fit_transform(encoder.predict(input_data).reshape(input_data.shape[0], 16)) \n print(principalComponents.shape)\n print(principalComponents) \n\n\n plt.figure(figsize=(8, 10))\n scatter = plt.scatter(principalComponents[:,0], principalComponents[:,1], c=target_data, label=classes)\n plt.legend(handles=scatter.legend_elements()[0], labels=classes)\n plt.title(\"pca\")\n plt.show()\n\ndef plot_some_signals(vae, data):\n x_vae_pred = vae.predict(data)\n\n from matplotlib import pyplot as plt\n xaxis = np.arange(0,config.input_size)\n for count in range(5):\n plt.plot(xaxis, x_vae_pred[count])\n plt.title(\"ae reconstructed beats\")\n plt.xlabel(\"beat length\")\n plt.ylabel(\"signal\")\n plt.show()\n\n# Plot results\ndata = (input_test, target_test)\nviz_latent_space(encoder, data)\nviz_latent_space(encoder_2, data)\nviz_latent_space_pca(encoder, data)\nviz_latent_space_pca(encoder2, data)\n\nplot_some_signals()\n#viz_decoded(encoder, decoder, data)","sub_path":"ae_multi.py","file_name":"ae_multi.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7701419","text":"import sys\nfrom PyQt4.QtGui import *\n\nif __name__ == \"__main__\":\n\n app = QApplication([])\n\n widget = QWidget()\n\n palette = QPalette()\n gradient = QLinearGradient(0, 0, 0, 400)\n gradient.setColorAt(0.0, QColor(240, 240, 240))\n gradient.setColorAt(1.0, QColor(240, 160, 160))\n palette.setBrush(QPalette.Window, QBrush(gradient))\n widget.setPalette(palette)\n\n widget.show()\n app.exec_()\n\n","sub_path":"cls1/lib/py/examples/qWinGradient.py","file_name":"qWinGradient.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112460077","text":"from functools import partial\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.callbacks import (EarlyStopping, ReduceLROnPlateau,\r\n TensorBoard)\r\nfrom tqdm import tqdm\r\n\r\nfrom nets.efficientdet import Efficientdet\r\nfrom nets.efficientdet_training import Generator, focal, smooth_l1, LossHistory\r\nfrom utils.anchors import get_anchors\r\nfrom utils.utils import BBoxUtility, ModelCheckpoint\r\n\r\n\r\n# 防止bug\r\ndef get_train_step_fn():\r\n @tf.function\r\n def train_step(imgs, focal_loss, smooth_l1_loss, targets0, targets1, net, optimizer):\r\n with tf.GradientTape() as tape:\r\n # 计算loss\r\n regression, classification = net(imgs, training=True)\r\n reg_value = smooth_l1_loss(targets0, regression)\r\n cls_value = focal_loss(targets1, classification)\r\n loss_value = reg_value + cls_value\r\n\r\n grads = tape.gradient(loss_value, net.trainable_variables)\r\n optimizer.apply_gradients(zip(grads, net.trainable_variables))\r\n return loss_value, reg_value, cls_value\r\n return train_step\r\n\r\n@tf.function\r\ndef val_step(imgs, focal_loss, smooth_l1_loss, targets0, targets1, net, optimizer):\r\n # 计算loss\r\n regression, classification = net(imgs)\r\n cls_value = smooth_l1_loss(targets0, regression)\r\n reg_value = focal_loss(targets1, classification)\r\n loss_value = reg_value + cls_value\r\n\r\n return loss_value, reg_value, cls_value\r\n\r\ndef fit_one_epoch(net, focal_loss, smooth_l1_loss, optimizer, epoch, epoch_size, epoch_size_val, gen, genval, \r\n Epoch, train_step=None):\r\n total_r_loss = 0\r\n total_c_loss = 0\r\n total_loss = 0\r\n \r\n val_loss = 0\r\n with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(gen):\r\n if iteration>=epoch_size:\r\n break\r\n images, targets0, targets1 = batch[0], batch[1], batch[2]\r\n targets0 = tf.convert_to_tensor(targets0)\r\n targets1 = tf.convert_to_tensor(targets1)\r\n loss_value, reg_value, cls_value = train_step(images, focal_loss, smooth_l1_loss, targets0, targets1, net, optimizer)\r\n total_loss += loss_value\r\n total_c_loss += cls_value\r\n total_r_loss += reg_value\r\n\r\n pbar.set_postfix(**{'conf_loss' : float(total_c_loss) / (iteration + 1), \r\n 'regression_loss' : float(total_r_loss) / (iteration + 1), \r\n 'lr' : optimizer._decayed_lr(tf.float32).numpy()})\r\n pbar.update(1)\r\n\r\n print('Start Validation')\r\n with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(genval):\r\n if iteration>=epoch_size_val:\r\n break\r\n # 计算验证集loss\r\n images, targets0, targets1 = batch[0], batch[1], batch[2]\r\n targets0 = tf.convert_to_tensor(targets0)\r\n targets1 = tf.convert_to_tensor(targets1)\r\n\r\n loss_value, _, _ = val_step(images, focal_loss, smooth_l1_loss, targets0, targets1, net, optimizer)\r\n # 更新验证集loss\r\n val_loss = val_loss + loss_value\r\n\r\n pbar.set_postfix(**{'total_loss': float(val_loss)/ (iteration + 1)})\r\n pbar.update(1)\r\n\r\n print('Finish Validation')\r\n print('\\nEpoch:'+ str(epoch+1) + '/' + str(Epoch))\r\n print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\r\n net.save_weights('logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.h5'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))\r\n \r\n#---------------------------------------------------#\r\n# 获得类和先验框\r\n#---------------------------------------------------#\r\ndef get_classes(classes_path):\r\n '''loads the classes'''\r\n with open(classes_path) as f:\r\n class_names = f.readlines()\r\n class_names = [c.strip() for c in class_names]\r\n return class_names\r\n\r\ngpus = tf.config.experimental.list_physical_devices(device_type='GPU')\r\nfor gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n\r\nfreeze_layers = [226, 328, 328, 373, 463, 565, 655, 802]\r\nimage_sizes = [512, 640, 768, 896, 1024, 1280, 1408, 1536]\r\n\r\n#----------------------------------------------------#\r\n# 检测精度mAP和pr曲线计算参考视频\r\n# https://www.bilibili.com/video/BV1zE411u7Vw\r\n#----------------------------------------------------#\r\nif __name__ == \"__main__\":\r\n #----------------------------------------------------#\r\n # 是否使用eager模式训练\r\n #----------------------------------------------------#\r\n eager = False\r\n #-------------------------------------------#\r\n # 训练前,请指定好phi和model_path\r\n # 二者所使用Efficientdet版本要相同\r\n #-------------------------------------------#\r\n phi = 1\r\n #----------------------------------------------------#\r\n # 获得图片路径和标签\r\n #----------------------------------------------------#\r\n annotation_path = '2007_train.txt'\r\n #----------------------------------------------------#\r\n # classes的路径,非常重要\r\n # 训练前一定要修改classes_path,使其对应自己的数据集\r\n #----------------------------------------------------#\r\n classes_path = '/mnt/disk/zhh/efficientdet-tf2-zhh/model_data/voc_classes.txt' \r\n #------------------------------------------------------#\r\n # 一共有多少类和多少先验框\r\n #------------------------------------------------------#\r\n class_names = get_classes(classes_path)\r\n num_classes = len(class_names) \r\n\r\n #------------------------------------------------------#\r\n # 权值文件请看README,百度网盘下载\r\n # 训练自己的数据集时提示维度不匹配正常\r\n # 预测的东西都不一样了自然维度不匹配\r\n #------------------------------------------------------#\r\n model_path = \"model_data/efficientdet-d1-voc.h5\"\r\n\r\n #------------------------------------------------------#\r\n # 创建Efficientdet模型\r\n #------------------------------------------------------#\r\n model = Efficientdet(phi,num_classes=num_classes)\r\n model.load_weights(model_path, by_name=True, skip_mismatch=True)\r\n\r\n #-------------------------------#\r\n # 获得先验框\r\n #-------------------------------#\r\n priors = get_anchors(image_sizes[phi])\r\n bbox_util = BBoxUtility(num_classes, priors)\r\n\r\n #----------------------------------------------------------------------#\r\n # 验证集的划分在train.py代码里面进行\r\n # 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。\r\n # 当前划分方式下,验证集和训练集的比例为1:9\r\n #----------------------------------------------------------------------#\r\n val_split = 0.1\r\n with open(annotation_path) as f:\r\n lines = f.readlines()\r\n np.random.seed(10101)\r\n np.random.shuffle(lines)\r\n np.random.seed(None)\r\n num_val = int(len(lines)*val_split)\r\n num_train = len(lines) - num_val\r\n\r\n #-------------------------------------------------------------------------------#\r\n # 训练参数的设置\r\n # logging表示tensorboard的保存地址\r\n # checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次\r\n # reduce_lr用于设置学习率下降的方式\r\n # early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛\r\n #-------------------------------------------------------------------------------#\r\n logging = TensorBoard(log_dir=\"logs\")\r\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1)\r\n checkpoint = ModelCheckpoint('logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',\r\n monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)\r\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)\r\n loss_history = LossHistory(\"logs\")\r\n\r\n #------------------------------------------------------#\r\n # 主干特征提取网络特征通用,冻结训练可以加快训练速度\r\n # 也可以在训练初期防止权值被破坏。\r\n # Init_Epoch为起始世代\r\n # Freeze_Epoch为冻结训练的世代\r\n # Epoch总训练世代\r\n #------------------------------------------------------#\r\n for i in range(freeze_layers[phi]):\r\n model.layers[i].trainable = False\r\n\r\n if True:\r\n #--------------------------------------------#\r\n # Batch_size不要太小,不然训练效果很差\r\n #--------------------------------------------#\r\n Batch_size = 8\r\n Lr = 1e-3\r\n Init_Epoch = 0\r\n Freeze_Epoch = 50\r\n\r\n epoch_size = num_train // Batch_size\r\n epoch_size_val = num_val // Batch_size\r\n\r\n if epoch_size == 0 or epoch_size_val == 0:\r\n raise ValueError(\"数据集过小,无法进行训练,请扩充数据集。\")\r\n\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))\r\n if eager:\r\n generator = Generator(bbox_util, Batch_size, lines[:num_train], lines[num_train:],\r\n (image_sizes[phi], image_sizes[phi]),num_classes)\r\n\r\n gen = tf.data.Dataset.from_generator(partial(generator.generate, train = True, eager = True), (tf.float32, tf.float32, tf.float32))\r\n gen_val = tf.data.Dataset.from_generator(partial(generator.generate, train = False, eager = True), (tf.float32, tf.float32, tf.float32))\r\n\r\n gen = gen.shuffle(buffer_size=Batch_size).prefetch(buffer_size=Batch_size)\r\n gen_val = gen_val.shuffle(buffer_size=Batch_size).prefetch(buffer_size=Batch_size)\r\n\r\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\r\n initial_learning_rate=Lr, decay_steps=epoch_size, decay_rate=0.95, staircase=True\r\n )\r\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\r\n\r\n for epoch in range(Init_Epoch,Freeze_Epoch):\r\n fit_one_epoch(model, focal(), smooth_l1(), optimizer, epoch, epoch_size, epoch_size_val, gen, gen_val, \r\n Freeze_Epoch, get_train_step_fn())\r\n else:\r\n gen = Generator(bbox_util, Batch_size, lines[:num_train], lines[num_train:],\r\n (image_sizes[phi], image_sizes[phi]),num_classes)\r\n model.compile(loss={\r\n 'regression' : smooth_l1(),\r\n 'classification': focal()\r\n },optimizer=keras.optimizers.Adam(Lr)\r\n ) \r\n model.fit(\r\n gen.generate(True), \r\n steps_per_epoch=epoch_size,\r\n validation_data=gen.generate(False),\r\n validation_steps=epoch_size_val,\r\n epochs=Freeze_Epoch, \r\n verbose=1,\r\n initial_epoch=Init_Epoch ,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping, loss_history]\r\n )\r\n\r\n for i in range(freeze_layers[phi]):\r\n model.layers[i].trainable = True\r\n\r\n if True:\r\n #--------------------------------------------#\r\n # Batch_size不要太小,不然训练效果很差\r\n #--------------------------------------------#\r\n Batch_size = 4\r\n Lr = 5e-5\r\n Freeze_Epoch = 50\r\n Epoch = 100\r\n \r\n epoch_size = num_train // Batch_size\r\n epoch_size_val = num_val // Batch_size\r\n\r\n if epoch_size == 0 or epoch_size_val == 0:\r\n raise ValueError(\"数据集过小,无法进行训练,请扩充数据集。\")\r\n\r\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, Batch_size))\r\n if eager:\r\n generator = Generator(bbox_util, Batch_size, lines[:num_train], lines[num_train:],\r\n (image_sizes[phi], image_sizes[phi]),num_classes)\r\n\r\n gen = tf.data.Dataset.from_generator(partial(generator.generate, train = True, eager = True), (tf.float32, tf.float32, tf.float32))\r\n gen_val = tf.data.Dataset.from_generator(partial(generator.generate, train = False, eager = True), (tf.float32, tf.float32, tf.float32))\r\n\r\n gen = gen.shuffle(buffer_size=Batch_size).prefetch(buffer_size=Batch_size)\r\n gen_val = gen_val.shuffle(buffer_size=Batch_size).prefetch(buffer_size=Batch_size)\r\n\r\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\r\n initial_learning_rate=Lr, decay_steps=epoch_size, decay_rate=0.95, staircase=True\r\n )\r\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\r\n \r\n for epoch in range(Freeze_Epoch,Epoch):\r\n fit_one_epoch(model, focal(), smooth_l1(), optimizer, epoch, epoch_size, epoch_size_val, gen, gen_val, \r\n Epoch, get_train_step_fn())\r\n else:\r\n gen = Generator(bbox_util, Batch_size, lines[:num_train], lines[num_train:],\r\n (image_sizes[phi], image_sizes[phi]),num_classes)\r\n model.compile(loss={\r\n 'regression' : smooth_l1(),\r\n 'classification': focal()\r\n },optimizer=keras.optimizers.Adam(Lr)\r\n ) \r\n model.fit(\r\n gen.generate(True), \r\n steps_per_epoch=epoch_size,\r\n validation_data=gen.generate(False),\r\n validation_steps=epoch_size_val,\r\n epochs=Epoch, \r\n verbose=1,\r\n initial_epoch=Freeze_Epoch,\r\n callbacks=[logging, checkpoint, reduce_lr, early_stopping, loss_history]\r\n )\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52333681","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom .models import Question\n\ndef index(request):\n # Get the 5 last questions\n latest_question_list = Question.objects.order_by('-pub_date')[:5]\n context = {\n 'latest_question_list' : latest_question_list\n }\n return render(request=request, template_name='polls/index.html',context=context)\n\ndef detail(request, question_id):\n try:\n question = Question.objects.get(pk=question_id)\n except Question.DoesNotExist:\n raise Http404(\"Question does not exist\")\n return render(request, 'polls/detail.html', {'question': question})\n \n\ndef results(request, question_id):\n response = \"You are looking at the results of question {}\"\n return HttpResponse(response.format(question_id))\n\ndef vote(request, question_id):\n return HttpResponse(\"You are voting on question {}\".format(question_id))\n\n\n","sub_path":"django-web-server/testpage/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"317900273","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n#must set these before loading numpy:\nos.environ[\"OMP_NUM_THREADS\"] = '4' # export OMP_NUM_THREADS=4\nos.environ[\"OPENBLAS_NUM_THREADS\"] = '4' # export OPENBLAS_NUM_THREADS=4\nos.environ[\"MKL_NUM_THREADS\"] = '6' # export MKL_NUM_THREADS=6\nos.environ[\"VECLIB_MAXIMUM_THREADS\"] = '4' # export VECLIB_MAXIMUM_THREADS=4\nos.environ[\"NUMEXPR_NUM_THREADS\"] = '6' # export NUMEXPR_NUM_THREADS=6\n\nimport numpy as np\nimport copy\nimport math\n\nimport scipy.special as ss\n\ntry:\n from CobBO.kernelspace import KernelSpace\nexcept:\n from kernelspace import KernelSpace\n\ntry:\n from bayesmark.abstract_optimizer import AbstractOptimizer\n from bayesmark.experiment import experiment_main\nexcept ImportError as e:\n AbstractOptimizer = object\n\n\nclass CobBO(AbstractOptimizer):\n def __init__(self, api_config, n_iter=100, pbounds=None, init_points=0, batch=1, random_state=None,\\\n noise=False, open_slow_trust_region=True, open_fast_trust_region=True,\\\n consistent_query=None, restart=False, allow_partition=True, minimization=False):\n \"\"\"Build a wrapper class to use the optimizer.\n\n Parameters\n ----------\n api_config : dict-like of dict-like\n Configuration of the optimization variables. See API description.\n n_iter : int\n The query budget for the experiment\n init_points : int\n The number of initial points provided\n \"\"\"\n self.api_config = api_config\n self.minimization = minimization\n\n # Set up a proper number of initial points if init_points is wrongly configured\n if init_points <= 0:\n if n_iter > 300:\n init_points = min(int(n_iter * 0.08), 500)\n else:\n init_points = np.clip(int(n_iter * 0.10), 5, 30)\n\n self.init_points = init_points\n self._n_iter = n_iter\n\n assert batch == 1, 'Currently CobBO supports a batch of one only'\n self.batch = batch\n\n self.pbounds = pbounds\n self.api = api_config is not None\n if self.api:\n param_type_dict_name_range, self.round_to_values, self.equiv_point_neighbor, \\\n self.logs_params, self.logits_params, self.cats_params, self.ints_params, self.cardinality = \\\n CobBO._api_config_to_pbounds_and_rounding(api_config)\n\n self.pbounds = {}\n for k, d in param_type_dict_name_range.items():\n self.pbounds = {**self.pbounds, **d}\n\n self.space = KernelSpace(self.pbounds, n_iter, init_points, batch, random_state,\n noise, open_slow_trust_region, open_fast_trust_region,\n consistent_query, restart, allow_partition)\n\n\n @staticmethod\n def _api_config_to_pbounds_and_rounding(api_config):\n \"\"\"Convert scikit-learn like api_config to CobBO's pbounds\n Example:\n api_config={'max_depth': {'type': 'int', 'space': 'linear', 'range': (1, 15)},\n 'min_samples_split': {'type': 'real', 'space': 'logit', 'range': (0.01, 0.99)},\n 'min_samples_leaf': {'type': 'real', 'space': 'logit', 'range': (0.01, 0.49)},\n 'min_weight_fraction_leaf': {'type': 'real', 'space': 'logit', 'range': (0.01, 0.49)},\n 'max_features': {'type': 'real', 'space': 'logit', 'range': (0.01, 0.99)},\n 'min_impurity_decrease': {'type': 'real', 'space': 'linear', 'range': (0.0, 0.5)}}\n Take api_config as argument so this can be static.\n \"\"\"\n # The ordering of iteration prob makes no difference, but just to be\n # safe and consistent with space.py, I will make sorted.\n param_list = sorted(api_config.keys())\n\n param_type_dict_name_range = {'real': {}, 'int': {}, 'bool': {}, 'cat': {}, 'ordinal': {}}\n round_to_values = {}\n equiv_point_neighbor = {}\n logits = []\n logs = []\n cats = []\n ints = []\n cardinality = 1\n for param_name in param_list:\n param_config = api_config[param_name]\n\n param_type = param_config[\"type\"]\n param_space = param_config.get(\"space\", None)\n param_range = param_config.get(\"range\", None)\n param_values = param_config.get(\"values\", None)\n\n # Setup for whitelist of values if provided:\n if (param_values is not None) and (param_type not in (\"cat\", \"ordinal\")):\n assert param_range is None\n param_values = sorted(np.unique(param_values))\n param_range = (param_values[0], param_values[-1])\n\n # handle different types\n if param_type in (\"cat\", \"ordinal\"):\n assert param_range is None\n assert param_values is not None\n upper = len(param_values) - 1\n low, high = 0, upper + 0.9999\n param_type_dict_name_range[param_type][param_name] = (low, high)\n cats.append(param_name)\n cardinality *= len(np.unique(param_values))\n\n def symbol_for_cat(x, param_values=param_values):\n return param_values[int(math.floor(x))]\n\n round_to_values[param_name] = symbol_for_cat\n\n def equiv_point_neighbor_cat(x, upper=upper):\n a = math.floor(x)\n gap = x - a\n if gap <= 0.5:\n neighbor = a - 1 if a >= 1 else a + 1\n else:\n neighbor = a + 1 if a < upper else a - 1\n\n equiv = a + 0.5\n return equiv, neighbor\n\n equiv_point_neighbor[param_name] = equiv_point_neighbor_cat\n\n elif param_type == \"int\":\n low, high = param_range\n param_type_dict_name_range[param_type][param_name] = (low, high + 0.9999)\n ints.append(param_name)\n cardinality *= (high - low + 1)\n round_to_values[param_name] = math.floor\n\n def equiv_point_neighbor_int(x, low=low, high=high):\n a = math.floor(x)\n gap = x - a\n if gap <= 0.5:\n neighbor = a - 1 if a > low else a + 1\n else:\n neighbor = a + 1 if a < high else a - 1\n\n equiv = a + 0.5\n return equiv, neighbor\n\n equiv_point_neighbor[param_name] = equiv_point_neighbor_int\n\n elif param_type == \"bool\":\n assert param_range is None\n assert param_values is None\n param_type_dict_name_range[param_type][param_name] = (0, 1)\n cardinality *= 2\n round_to_values[param_name] = np.around\n\n elif param_type == \"real\":\n cardinality = np.inf\n low, high = param_range\n if param_space == \"log\":\n low, high = np.log10(low), np.log10(high)\n logs.append(param_name)\n if param_space == \"logit\":\n low, high = ss.logit(low), ss.logit(high)\n logits.append(param_name)\n param_type_dict_name_range[param_type][param_name] = (low, high)\n\n else:\n assert False, \"type %s not handled in API\" % param_type\n\n return param_type_dict_name_range, round_to_values, equiv_point_neighbor, logs, logits, cats, ints, cardinality\n\n def suggest(self, n_suggestions=1):\n \"\"\"Get suggestions from the optimizer.\n\n Parameters\n ----------\n n_suggestions : int\n Desired number of parallel suggestions in the output\n Currently the algorithm is optimized for n_suggestions=1\n\n Returns\n -------\n next_guess : list of dict\n List of `n_suggestions` suggestions to evaluate the objective\n function. Each suggestion is a dictionary where each key\n corresponds to a parameter being optimized.\n \"\"\"\n X = self.suggest_as_real_values(n_suggestions)\n X = self.convert_real_to_target_type(X)\n\n return X\n\n def suggest_as_real_values(self, n_suggestions=1):\n X = self.space.impl_suggest_kernel(n_suggestions)\n X = [dict(zip(self.space.keys, x)) for x in X]\n return X\n\n def convert_real_to_target_type(self, X):\n if self.api:\n for x in X:\n for param_name in self.logits_params:\n x[param_name] = ss.expit(x[param_name])\n for param_name in self.logs_params:\n x[param_name] = 10 ** x[param_name]\n for param_name in self.cats_params:\n x[param_name] = self.round_to_values[param_name](x[param_name])\n for param_name in self.ints_params:\n x[param_name] = int(math.floor(x[param_name]))\n return X\n\n def observe(self, X, y, verbose=False):\n \"\"\"Feed an observation back.\n\n Parameters\n ----------\n X : list of dict-like\n Places where the objective function has already been evaluated.\n Each suggestion is a dictionary where each key corresponds to a\n parameter being optimized.\n y : array-like, shape (n,)\n Corresponding values where objective has been evaluated\n \"\"\"\n if self.api:\n if np.isinf(y).any():\n print(\"y contains -inf: y=\", y)\n # Convert linear to log\n for x in X:\n for param_name in self.logs_params:\n x[param_name] = np.log10(x[param_name])\n\n for param_name in self.logits_params:\n x[param_name] = ss.logit(x[param_name])\n\n # Minimization rather than CobBO's default maximization\n if self.minimization:\n y = [-yy for yy in y]\n\n try:\n _ = (_ for _ in y)\n except TypeError:\n y = [y]\n X = [X]\n\n self.space.last_eval_num = self.space.eval_num\n self.space.eval_num += len(X)\n\n # Update the model with new objective function observations\n for x_probe, target, is_rd_sample, k_indexes, util_id \\\n in zip(X, y, self.space.is_rd_sample_list, self.space.k_indexes_list, self.space.util_id_list):\n self.space.observe(x_probe, target, is_rd_sample, k_indexes, util_id)\n\n if verbose:\n self.space.heart_beat_print(num=25)\n\n def maximize(self, obj_func, optimizer, use_real_space=False):\n \"\"\"Maximize a given objective function\n\n Parameters\n ----------\n obj_func : method\n The objective function to be optimized\n optimizer : The CobBO optimizer object\n\n Returns\n -------\n best_point : A dictionary\n The point with the best objective value obsereved. Each key corresponds to a parameter being optimized.\n \"\"\"\n assert isinstance(optimizer, CobBO), ' A CobBO optimizer is expected'\n\n while optimizer.has_budget:\n if not use_real_space:\n x_probe_list = self.suggest(n_suggestions=self.batch)\n target_list = [obj_func(**x) for x in x_probe_list]\n self.observe(x_probe_list, target_list)\n else:\n x_probe_real_list = self.suggest_as_real_values(n_suggestions=self.batch)\n x_probe_list = self.convert_real_to_target_type(copy.deepcopy(x_probe_real_list))\n target_list = [obj_func(**x) for x in x_probe_list]\n self.observe(x_probe_real_list, target_list)\n\n return self.best_point\n\n @property\n def has_budget(self):\n return self.space.has_unused_trial_budget()\n\n @property\n def best_point(self):\n return self.space.max_param\n\n\nif __name__ == \"__main__\":\n experiment_main(CobBO)","sub_path":"CobBO/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":12015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1503406","text":"#!/usr/bin/env python\n\nimport mailbox\nimport sys\nimport email\nimport os\nimport glob\nimport shutil\n\nfolders = []\n\nfile_subscription = open(\"maildir/subscriptions\",\"a+\")\n# traverse root directory, and list directories as dirs and files as files\nfor root, dirs, files in os.walk(\"maildir\"):\n if files or dirs:\n folder = root\n print(\"Processing \" + folder)\n # change mailbox permission to lookup and read only (http://wiki2.dovecot.org/ACL)\n with open(folder + \"/dovecot-acl\",\"a+\") as f:\n f.write(\"owner lr\")\n f.close()\n if files:\n folder = root\n os.makedirs(folder + \"/cur\")\n os.makedirs(folder + \"/new\")\n os.makedirs(folder + \"/tmp\")\n for file in glob.glob(folder + \"/[0-9]*.\"):\n # mark message read\n shutil.move(file, file + \":2,S\")\n shutil.move(file + \":2,S\", folder + \"/cur\")\n folder = root[8:]\n if len(folder) > 0:\n \tfile_subscription.write(folder + \"\\n\")\nfile_subscription.close()\n\n\nif os.path.exists(\"maildir/cur\"):\n os.rmdir(\"maildir/cur\")\nif os.path.exists(\"maildir/new\"):\n os.rmdir(\"maildir/new\")\nif os.path.exists(\"maildir/tmp\"):\n os.rmdir(\"maildir/tmp\")\nif os.path.exists(\"maildir/dovecot-acl\"):\n os.remove(\"maildir/dovecot-acl\")","sub_path":"format_enron.py","file_name":"format_enron.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"620028102","text":"# -*- coding: UTF-8 -*- \n#! /usr/bin/python3\n__author__ = 'Liu.Eric'\n \nimport sys\nimport csv\nimport os\n\n\nimport ssq\nimport dlt\nimport linear\n\n# 获取数据在保存到目录\ndef save_data(rows,file):\n \n #with open(file, \"w\" if not os.path.exists(data_ssq_file) else \"a\", newline='') as f:\n \n with open(file, \"w\", newline='') as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n \n f.close()\n print (\"write over\")\n\n\nif __name__ == '__main__':\n print(\"python3 main !\")\n # 双色球 数据集名称\n data_ssq_file = '/Users/Liuhua/Projects/pythonprojects/caipiao/data/ssq.csv'\n data_dlt_file = '/Users/Liuhua/Projects/pythonprojects/caipiao/data/dlt.csv'\n ssq_rows = ssq.get_500_data()\n save_data(ssq_rows , data_ssq_file)\n \n \n #dlt_rows = dlt.get_500_data(100)\n #save_data(dlt_rows , data_dlt_file)\n #print(dlt_rows)\n\n #linear.get_data(data_ssq_file)\n \n print(\"end of work!\")\n\n","sub_path":"caipiao/cpmain.py","file_name":"cpmain.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460613951","text":"# DESAFIO 43\n\naltura = float(input('altura '))\npeso = float(input('peso '))\nimc = peso / pow(altura,2)\n\n\nif peso < 18.5:\n print('ABAIXO DO PESO')\nelif peso< 25:\n print('PESO IDEAL')\nelif peso < 30:\n print('SOBRE PESO')\nelif peso < 40:\n print('OBESIDADE')\nelse:\n print('OBESIDADE MORBIDA')\n\n\n\n","sub_path":"CursoEmVideoExercicios/Desafio043.py","file_name":"Desafio043.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"182839651","text":"import argparse\n\n\n# the following functions are used to build file names for saving data and displaying results\n\n\ndef make_study_string(params):\n return params.env_name + '_' + params.study_name + '_' + params.critic_update_method + '_' + \\\n params.critic_estim_method\n\n\ndef make_study_params_string(params):\n return 'trajs_' + str(params.nb_trajs) + '_update_threshold_' + str(params.update_threshold) + '_nb_updates_' + str(\n params.nb_updates)\n\n\ndef make_learning_params_string(params):\n return 'gamma_' + str(params.gamma) + '_tau_' + str(params.tau) + '_nstep_' + str(params.nstep) + '_lr_act_' + str(\n params.lr_actor) + '_lr_critic_' + str(params.lr_critic) + '_init_alpha_' + str(\n params.init_alpha) + '_lr_alpha_' + str(params.lr_alpha) + '_target_entropy_alpha_' + str(\n params.target_entropy_alpha)\n\n\ndef make_full_string(params):\n return make_study_string(params) + '_' + make_study_params_string(params) + '_' + make_learning_params_string(\n params)\n\n\ndef get_args():\n \"\"\"\n Standard function to specify the default value of the hyper-parameters of all policy gradient algorithms\n and experimental setups\n :return: the complete list of arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n # environment setting\n parser.add_argument('--env_name', type=str, default='Pendulum-v0', help='the environment name')\n parser.add_argument('--env_obs_space_name', nargs='+', type=str,\n default=[\"pos\", \"angle\"]) # [\"pos\", \"angle\", \"vx\", \"v angle\"]\n parser.add_argument('--render', type=bool, default=False, help='visualize the run or not')\n # study settings\n parser.add_argument('--study_name', type=str, default='pg', help='study name: pg, regress, nstep')\n parser.add_argument('--critic_update_method', type=str, default=\"dataset\",\n help='critic update method: batch or dataset')\n parser.add_argument('--policy_type', type=str, default=\"bernoulli\",\n help='policy type: bernoulli, normal, squashedGaussian, discrete')\n parser.add_argument('--team_name', type=str, default='default_team', help='team name')\n # study parameters\n parser.add_argument('--nb_repet', type=int, default=10, help='number of repetitions to get statistics')\n parser.add_argument('--nb_trajs', type=int, default=20, help='number of trajectories in a MC batch')\n parser.add_argument('--update_threshold', type=int, default=1000)\n parser.add_argument('--nb_updates', type=int, default=20, help='number of updates to the network per episode')\n parser.add_argument('--print_interval', type=int, default=20,\n help='the period in episodes to print the average reward over that period')\n # algo settings\n parser.add_argument('--gradients', type=str, nargs='+', default=['sum', 'discount', 'normalize'],\n help='other: baseline, beta')\n parser.add_argument('--critic_estim_method', type=str, default=\"td\",\n help='critic estimation method: mc, td or nstep')\n # learning parameters\n parser.add_argument('--batch_size', type=int, default=64, help='size of a minibatch')\n # Policy parameters\n parser.add_argument('--lr_actor', type=float, default=0.01, help='learning rate of the actor')\n parser.add_argument('--init_alpha', type=float, default=0.001)\n parser.add_argument('--lr_alpha', type=float, default=0.001)\n parser.add_argument('--target_entropy_alpha', type=float, default=-1.0)\n # Critic parameters\n parser.add_argument('--lr_critic', type=float, default=0.01, help='learning rate of the critic')\n parser.add_argument('--gamma', type=float, default=0.99, help='discount factor')\n parser.add_argument('--tau', type=float, default=0.01)\n\n parser.add_argument('--beta', type=float, default=0.1, help='temperature in AWR-like learning')\n parser.add_argument('--nstep', type=int, default=5, help='n in n-step return')\n parser.add_argument('--max_episode_steps', type=int, default=None, help='duration of an episode (step limit)')\n\n args = parser.parse_args()\n return args\n","sub_path":"arguments.py","file_name":"arguments.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"166097753","text":"from structFuntions import *\nfrom lib import *\n\nclass Light(object):\n def __init__(self, color =color(255,255,255),position =V3(0,0,0), intensity = 1):\n self.color = color\n self.position = position\n self.intensity = intensity\n\n\nclass Material(object):\n def __init__(self, diffuse, albedo, spec, refractive_index=0):\n self.diffuse = diffuse\n self.albedo = albedo\n self.spec = spec\n self.refractive_index = refractive_index\n\nclass Intersect(object):\n def __init__(self, distance=0, point=None, normal= None):\n self.distance = distance\n self.point = point\n self.normal = normal\n\n\n# Sphere class\nclass Sphere(object):\n def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material\n\n def ray_intersect(self, orig, direction):\n L = sub(self.center, orig)\n tca = dot(L, direction)\n l = length(L)\n d2 = l ** 2 - tca ** 2\n\n if d2 > self.radius ** 2:\n return None\n\n thc = (self.radius ** 2 - d2) ** 1 / 2\n t0 = tca - thc\n t1 = tca + thc\n\n if t0 < 0:\n t0 = t1\n\n if t0 < 0:\n return None\n\n hit = sum(orig, mul(direction,t0))\n normal = norm(sub(hit, self.center))\n\n return Intersect(\n distance=t0,\n point = hit,\n normal=normal\n )","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36021735","text":"'''\nCreated on Apr 22, 2019\n\n@author: Jesse Bahr\n\n@\n'''\n \nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n\nclass Square(QWidget):\n \n def __init__(self, x, y, *args, **kwargs):\n super(Square, self).__init__(*args, **kwargs)\n \n self.setFixedSize(QSize(100, 100))\n \n self.x = x\n self.y = y\n \n button = QPushButton('PyQt5 button', self)\n button.setToolTip('This is an example button')\n button.move(self.x, self.y)\n# button.clicked.connect(self.on_click)\n\n sizePolicy = QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(1)\n sizePolicy.setVerticalStretch(1)\n #sizePolicy.setHeightForWidth(button.sizePolicy().hasHeightForWidth())\n button.setSizePolicy(sizePolicy)\n \n def paintEvent(self, event):\n qp = QPainter()\n qp.begin(self)\n qp.setPen(QPen(Qt.black, 10, Qt.SolidLine))\n \n qp.drawLine(10, 10, 20, 20)\n qp.end()\n\n\n\n\nclass TicTacToeWindow(QMainWindow):\n \n PLAYER_WIN_SCORE = 10\n COMPUTER_WIN_SCORE = -10\n TIE_SCORE = 0\n \n def __init__(self, *args, **kwargs): \n super(TicTacToeWindow, self).__init__(*args, **kwargs)\n \n # Set the sides of the application\n self.top = 100\n self.left = 100\n self.right = 700\n self.bottom = 500 \n \n w = QWidget() \n \n hBox = QHBoxLayout()\n vBox = QVBoxLayout()\n \n vBox.addLayout(hBox)\n \n self.grid = QGridLayout()\n self.grid.setSpacing(1)\n \n vBox.addLayout(self.grid)\n w.setLayout(vBox)\n self.setCentralWidget(w)\n \n self.init_map()\n \n self.setGeometry(self.top, self.left, self.right, self.bottom)\n self.setWindowTitle(\"Tic Tac Toe\")\n\n self.show() \n \n def init_map(self):\n # Add positions to the map\n for x in range(0, 3):\n for y in range(0, 3):\n w = Square(x, y)\n self.grid.addWidget(w, y, x)\n \n def paintEvent(self, event):\n qp = QPainter()\n qp.begin(self)\n qp.setPen(QPen(Qt.black, 5, Qt.SolidLine))\n \n qp.drawLine(10, self.get_height()/3, self.get_width() - 10, self.get_height()/3)\n qp.drawLine(10, (self.get_height()*2)/3, self.get_width() - 10, (self.get_height()*2)/3)\n qp.drawLine(self.get_width()/3, 10, self.get_width()/3, self.get_height() - 10)\n qp.drawLine((self.get_width()*2)/3, 10, (self.get_width()*2)/3, self.get_height() - 10)\n qp.end()\n \n def get_height(self):\n return self.bottom - self.top\n \n def get_width(self):\n return self.right - self.left\n\n \nif __name__ == '__main__':\n \n app = QApplication([])\n window = TicTacToeWindow()\n app.exec_()","sub_path":"TicTacToePython/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"224401020","text":"# break will exit the loop when encountered\n# continue go to next iteration of loop ignore all the codes below it when encountered\n\n# bellow is a number guessing game you get 10 chances and \n# you have to enter a number and you will be told that you number\n# matches with the number \"gessThisNumber\"\n# or your number is bigger \n# or your number is smaller\n# along with number of attemps left\nguessThisNumber=30\ni=0\nwhile i<10:\n i=i+1;\n a=int(input(\"Enter Your Number : \"))\n if a==guessThisNumber:\n print(\"you found the number in \",i,\" attemps , it was \",guessThisNumber)\n # if this line is encountered then the loop will be breaked by following break command\n break\n if a>guessThisNumber:\n print(\"you entered a bigger number \",10-i,\" attemps left\")\n # if this line is encountered then the loop will be iterated and the control will go to \n # while and the loop will be runned again by following continue command\n continue\n if atau tau analysis'''\n \n def declareVariables(self, setup):\n\n super(H2TauTauTreeProducerMuEle, self).declareVariables(setup)\n\n self.bookMuon(self.tree, 'l1')\n self.bookEle (self.tree, 'l2')\n\n self.bookGenParticle(self.tree, 'l1_gen')\n self.bookGenParticle(self.tree, 'l2_gen')\n \n def process(self, event):\n \n super(H2TauTauTreeProducerMuEle, self).process(event)\n\n muon = event.diLepton.leg1() \n ele = event.diLepton.leg2()\n\n self.fillMuon(self.tree, 'l1', muon )\n self.fillEle (self.tree, 'l2', ele )\n\n if hasattr(muon, 'genp') : self.fillGenParticle(self.tree, 'l1_gen', muon.genp )\n if hasattr(ele , 'genp') : self.fillGenParticle(self.tree, 'l2_gen', ele .genp )\n\n self.fillTree(event)\n","sub_path":"CMGTools/H2TauTau/python/proto/analyzers/H2TauTauTreeProducerMuEle.py","file_name":"H2TauTauTreeProducerMuEle.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"627079289","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n :mod:`populate`\n ===========================================================================\n :synopsis: populates database tables and adds default info\n :author: Roberto Magán Carrión\n :contact: roberto.magan@uca.es, rmagan@ugr.es, robertomagan@gmail.com\n :organization: University of Cádiz, University of Granada\n :project: I2P Crawler\n :since: 0.0.1\n\"\"\"\n\n# Node status and type constants\n# NODE STATUS\nNS_COD_ONGOING = 'O'\nNS_COD_FINISHED = 'F'\nNS_COD_PENDING = 'P'\nNS_COD_NOTCRAWLEABLE = 'NC'\n# {type:description}\nNS_DEFAULT_INFO = {NS_COD_ONGOING:'O (Ongoing): The site is being crawled',\n NS_COD_FINISHED:'F (Finished): The site has been successfully crawled',\n NS_COD_PENDING:'P (Pending): The site is waiting to be launched again. May there was a processing error.',\n NS_COD_NOTCRAWLEABLE:'NC (Not Crawleable): The site cannot be crawled'}\n\n# NODE TYPE\nNT_COD_I2P = 'I2P'\nNT_COD_TOR = 'TOR'\nNT_COD_SURFACE = 'WEB'\n#{type:description}\nNT_DEFAULT_INFO = {NT_COD_I2P:'I2P eepsite',\n NT_COD_TOR:'TOR onion site',\n NT_COD_SURFACE:'Surface web site'}","sub_path":"crawler/i2p/database/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"148767684","text":"#!/usr/env/python\n# -*- coding: utf-8 -*-\n'''\nScript to gather historical statistics or update statistics for a given day\nwith info on whether an account became autoconfirmed in the first 30 days.\n\nCopyright (c) 2017 Morten Wang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport re\nimport logging\nimport datetime as dt\n\nfrom collections import namedtuple\n\nimport db\n\n## Named tuple for a data point for a user registration, with a 0/1\n## flag for whether the account was autoconfirmed, and the timestamp\n## when they were autoconfirmed (either 4 days after registration or\n## the time of their 10th edit if > 4 days).\nDataPoint = namedtuple('DataPoint', ['userid', 'autoconfirmed',\n 'ac_timestamp'])\n\n## Batch size when gathering batches of data from the database\nbatch_size = 100\n\ndef gather_data(db_conn, user_ids):\n '''\n Gather data about the autoconfirmed status in the account's first 30 days\n for the given user IDs. Returns a list of data points of users in no\n particular order. NOTE: This method assumes that the user made at least\n ten edits in the first 30 days.\n\n :param db_conn: Database connection to use for queries\n :type db_conn: MySQLdb.Connection\n\n :param user_ids: list of user IDs to gather data for\n :type user_ids: list\n '''\n\n ## Get the user's tenth edit\n tenth_edit_query = '''SELECT user_id,\n STR_TO_DATE(user_registration,\n \"%Y%m%d%H%i%S\") AS reg_date,\n DATE_ADD(\n STR_TO_DATE(user_registration,\n \"%Y%m%d%H%i%S\"),\n INTERVAL 4 DAY) AS reg_plus_4,\n rev_id,\n STR_TO_DATE(rev_timestamp,\n \"%Y%m%d%H%i%S\") AS rev_timestamp\n FROM \n ((SELECT user_id, user_registration,\n rev_id, rev_timestamp\n FROM revision_userindex r\n JOIN user\n ON rev_user=user_id\n WHERE user_id={user_id}\n AND rev_timestamp > user_registration\n AND rev_timestamp < DATE_FORMAT(\n DATE_ADD(\n STR_TO_DATE(user_registration,\n \"%Y%m%d%H%i%S\"),\n INTERVAL 30 DAY),\n \"%Y%m%d%H%i%S\")\n )\n UNION\n (SELECT user_id, user_registration,\n ar_rev_id AS rev_id,\n ar_timestamp AS rev_timestamp\n FROM archive_userindex\n JOIN user\n ON ar_user=user_id\n WHERE user_id={user_id}\n AND ar_timestamp > user_registration\n AND ar_timestamp < DATE_FORMAT(\n DATE_ADD(\n STR_TO_DATE(user_registration,\n \"%Y%m%d%H%i%S\"),\n INTERVAL 30 DAY),\n \"%Y%m%d%H%i%S\")\n )) AS user_activity\n ORDER BY rev_timestamp ASC\n LIMIT 10,1'''\n\n ## The data points we'll return\n datapoints = []\n\n with db.cursor(db_conn, 'dict') as db_cursor:\n for user_id in user_ids:\n db_cursor.execute(tenth_edit_query.format(user_id=user_id))\n ac_timestamp = 0\n is_ac = 1 # ref assumption of 10 edits in 30 days above\n \n for row in db_cursor:\n ## Default is that they were autoconfirmed when they made\n ## their tenth edit:\n ac_timestamp = row['rev_timestamp']\n\n fourth_day_timestamp = row['reg_plus_4']\n\n ## If the tenth edit occurred less than four days after\n ## registration, we use the fourth day instead.\n if ac_timestamp < fourth_day_timestamp:\n ac_timestamp = fourth_day_timestamp\n\n datapoints.append(DataPoint(user_id, is_ac, ac_timestamp))\n\n return(datapoints)\n\ndef gather_historic(local_db, wiki_db, start_date, end_date=None, step=7):\n '''\n Gather historic data over a larger timespan covering a given number\n of days at a time. Returns a list of DataPoint named tuples in no\n specific order.\n\n :param local_db: Database connection to the local database\n :type local_db: MySQLdb.Connection\n\n :param wiki_db: Database connection to the replicated Wikipedia database\n :type wiki_db: MySQLdb.Connection\n\n :param start_date: First date to gather data for\n :type start_date: datetime.date\n\n :param end_date: Last date to gather data for\n :type end_date: datetime.date\n\n :param step: Number of days to gather data for in each iteration\n :type step: int\n '''\n\n ## Get user IDs of users in our time span that have made at least\n ## 10 edits in the first 30 days (otherwise they cannot be autoconfirmed)\n userid_query = '''SELECT as_userid\n FROM account_stats\n WHERE as_reg_timestamp >= %(start)s\n AND as_reg_timestamp < %(end)s\n AND as_num_edits_30 >= 10'''\n \n cur_date = start_date\n delta_days = dt.timedelta(days=step)\n\n datapoints = []\n\n if not end_date:\n end_date = dt.date.today()\n \n while cur_date < end_date:\n stop_date = cur_date + delta_days\n if stop_date > end_date:\n stop_date = end_date\n\n logging.info('gathering data from {} to {}'.format(cur_date, stop_date))\n\n userids = []\n with db.cursor(local_db, 'dict') as db_cursor:\n db_cursor.execute(userid_query, {'start': cur_date,\n 'end': stop_date})\n for row in db_cursor:\n userids.append(row['as_userid'])\n\n logging.info('checking activity for {} users'.format(len(userids)))\n \n datapoints.extend(gather_data(wiki_db, userids))\n\n cur_date = stop_date\n\n return(datapoints)\n\ndef main():\n '''\n Run some tests.\n '''\n logging.basicConfig(level=logging.INFO)\n \n db_conn = db.connect('enwiki.labsdb', 'enwiki_p', '~/replica.my.cnf')\n local_db_conn = db.connect('tools.labsdb', 's53463__actrial_p',\n '~/replica.my.cnf')\n\n user_ids = [7990889,8625927,8625934,8625972,8626037,8626075,8626125,\n 8626169,8626175,8626266,8626337,8626355,8626362,8626409,\n 8626413,8626447,8626483,8626501,8626526,8626533,8626591,\n 8626592,8626605,8626648,8626711,8626713]\n \n # datapoints = gather_data(db_conn, user_ids)\n datapoints = gather_historic(local_db_conn, db_conn,\n dt.date(2009,1,1),\n dt.date(2009,2,1))\n print('got {} data points'.format(len(datapoints)))\n print(datapoints[:5])\n print(datapoints[-5:])\n \n return()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/autoconfirmed.py","file_name":"autoconfirmed.py","file_ext":"py","file_size_in_byte":8643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"256722645","text":"\n\n\n\n\n\n\n\n# n! = n*(n-1)*(n-2)*...\n# 5! = 5*4*3*2*1\n# 10! = 10*9*8...\n# def factorial(n):\n# output = 1\n# while n > 0:\n# output *= n\n# n -= 1\n# return output\n\n\n# def factorial(n, depth):\n# print('\\t'*depth, 'factorial(',n,')')\n# if n == 1:\n# print('\\t'*depth, 'returning 1')\n# return 1\n# result = n*factorial(n-1, depth+1)\n# print('\\t'*depth, 'returning', result)\n# return result\n\n# print(factorial(5, 1))\n\n# def say_hello():\n# print('hello')\n# main()\n\n# def main():\n# x = 5\n# say_hello()\n\n# main()\n\n\n\n\n# def c():\n# message = 'c'\n# print(message)\n# print('returning from c')\n\n# def b():\n# message = 'b'\n# print(message)\n# c()\n# print('returning from b')\n\n# def a():\n# message = 'a'\n# print(message)\n# b()\n# print('returning from a')\n\n# a()\n# print('done')\n\n\n\n# def peaks(llama):\n# data = 'hello'\n# print(data)\n\n# data = [1, 2, 3]\n# print(data)\n# peaks(data)\n\n\n\ndef get_dimensions():\n return 5, 8\n\n# d = get_dimensions()\n# print(d)\n# width = d[0]\n# height = d[1]\n\nwidth, height = get_dimensions()\nprint(width)\nprint(height)\n\n\nfruits = ['apples', 'bananas', 'pears']\n# swap the elements at index 0 and 1\ntemp = fruits[0]\nfruits[0] = fruits[1]\nfruits[1] = temp\n\n\nfruits[0], fruits[1] = fruits[1], fruits[0]\n\nprint(fruits)\n\n\n\n\n\n\n\n","sub_path":"1 Python/solutions/demo_functions.py","file_name":"demo_functions.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"344220957","text":"from aiohttp import web\nimport aiohttp_jinja2\nroutes = web.RouteTableDef()\nfrom .utils import Etich, Code\n\n\n@routes.get('/')\n@aiohttp_jinja2.template('index.html')\nasync def index(request):\n return {}\n\n@routes.post('/generate_code')\nasync def gen_code(request):\n num = await request.json()\n Code(num['number']).save_code()\n return web.Response(text=f\"{num['number']}\")\n\n@routes.post('/generate_label')\nasync def gen_etich(request):\n data = await request.json()\n Etich(**data).save_etich()\n return web.Response(text=f\"{data['number']}\")\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232374643","text":"###################################################################### \r\n# Edit the following function definition, replacing the words\r\n# 'name' with your name and 'hawkid' with your hawkid.\r\n# \r\n# Note: Your hawkid is the login name you use to access ICON, and not\r\n# your firsname-lastname@uiowa.edu email address.\r\n# \r\n# def hawkid():\r\n# return([\"Caglar Koylu\", \"ckoylu\"])\r\n###################################################################### \r\ndef hawkid():\r\n return([\"Eric Mykleby\", \"emykleby\"])\r\n\r\nimport arcpy\r\nimport sys, os\r\n\r\n###################################################################### \r\n# Problem 1 (20 Points)\r\n#\r\n# This function reads all the feature classes in a workspace, and\r\n# prints the number of feature classes by each shape type. For example,\r\n# polygon: 3, polyline: 2, point: 4\r\n\r\n###################################################################### \r\ndef printNumberOfFeatureClassesByShapeType(workspace):\r\n if arcpy.Exists(workspace): # checks if this is a valid workspace\r\n arcpy.env.workspace = workspace # sets workspace\r\n fclist = arcpy.ListFeatureClasses() # gets list of all FCs in the workspace\r\n shapeList = [] # creates an empty list to fill during for loop\r\n for fc in fclist: # iterates through each fc\r\n desc = arcpy.Describe(fc).shapeType # creates a description variable of the fc\r\n shapeList.append(desc) # adds shape types to list\r\n for shape in set(shapeList): # iterates through each shape type\r\n shapeCount = shapeList.count(shape) # counts each shape by type\r\n print(\"{}: {}\".format(shape,shapeCount)) # prints the shape type and amount\r\n else:\r\n print(\"Invalid workspace\")\r\n\r\n###################################################################### \r\n# Problem 2 (20 Points)\r\n#\r\n# This function reads all the feature classes in a workspace, and\r\n# prints the coordinate systems for each file\r\n\r\n###################################################################### \r\ndef printCoordinateSystems(workspace):\r\n if arcpy.Exists(workspace): # check for valid workspace\r\n arcpy.env.workspace = workspace # sets workspace\r\n fclist = arcpy.ListFeatureClasses() # gets list of all FCs in the workspace\r\n for fc in fclist: # iterates through each fc\r\n spatial_ref = arcpy.Describe(fc).spatialReference # creates a description variable for each fc along with the spatial reference\r\n if spatial_ref.name == \"Unknown\": # loops through each fc and prints feature class and spatial reference\r\n print(\"{} has an unknown spatial reference\".format(fc))\r\n else:\r\n print(\"The coordinate system of {} is {}\".format(fc, spatial_ref.name))\r\n\r\n###################################################################### \r\n# Problem 3 (60 Points)\r\n#\r\n# Given two feature classes in a workspace:\r\n# check whether their coordinate systems are\r\n# the same, and if not convert the projection of one of them to the other.\r\n# If one of them has a geographic coordinate system (GCS) and the other has\r\n# a projected coordinate system (PCS), then convert the GCS to PCS. \r\n\r\n###################################################################### \r\ndef autoConvertProjections(fc1, fc2, workspace):\r\n if arcpy.Exists(workspace): # check for valid workspace\r\n arcpy.env.workspace = workspace # sets workspace\r\n spatial_ref1 = arcpy.Describe(fc1).spatialReference # creates a description variable for each fc along with the spatial reference\r\n spatial_ref2 = arcpy.Describe(fc2).spatialReference\r\n if spatial_ref1.name == spatial_ref2.name: # if statement to determine if spatial reference names are the same\r\n print(\"Coordinate systems are the same\")\r\n elif (spatial_ref1.type == \"Geographic\") and (spatial_ref2.type == \"Geographic\"): # statements to determine if the spatial reference of each fc are the same type\r\n print(\"Coordinate systems are the same type\")\r\n elif (spatial_ref1.type == \"Projected\") and (spatial_ref2.type == \"Projected\"): \r\n print(\"Coordinate systems are the same type\")\r\n elif (spatial_ref1.type == \"Projected\") and (spatial_ref2.type == \"Geographic\"):\r\n arcpy.Project_management(fc1, \"fc2_proj\", spatial_ref2)\r\n print(\"New coordinate type for fc2 is Projected\")\r\n elif (spatial_ref1.type == \"Geographic\") and (spatial_ref2.type == \"Projected\"): # if they are not the same type, then whichever fc is not projected is changed to projected\r\n arcpy.Project_management(fc2, \"fc1_proj\", spatial_ref1)\r\n print(\"New coordinate type for fc1 is Projected\")\r\n \r\n######################################################################\r\n# MAKE NO CHANGES BEYOND THIS POINT.\r\n######################################################################\r\nif __name__ == '__main__' and hawkid()[1] == \"hawkid\":\r\n print('### Error: YOU MUST provide your hawkid in the hawkid() function.')\r\n","sub_path":"Assignments/emykleby_assignment_3makeup_corr.py","file_name":"emykleby_assignment_3makeup_corr.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"607868000","text":"\n\nimport requests\n\n\n# res = requests.get(\n# url=\"http://w.coral.qq.com/article/comment/up/to/6318381954794950326?targetid=2142180354\"\n# )\n#\n# print(res.text)\n\n\na = \"/1/courseware\"\nif \"\\\\\" in a:\n split_str = \"\\\\\"\nelse:\n split_str = \"/\"\nres = a.split(split_str)\nprint(res)\nprint(a)","sub_path":"20170929/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"18128051","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 1 09:17:42 2021\r\n\r\n@author: siddh\r\n\"\"\"\r\nimport dash\r\nimport numpy as np\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport pandas as pd\r\nimport plotly.graph_objs as go\r\nfrom dash.dependencies import Input, Output\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import r2_score\r\nfrom statsmodels.tsa.arima.model import ARIMA\r\nfrom sklearn.metrics import mean_squared_error\r\nimport warnings\r\nexternal_scripts = ['/assets/style.css']\r\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',\r\n FutureWarning)\r\nwarnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',\r\n FutureWarning)\r\n\r\n\r\n\r\napp = dash.Dash(__name__,external_scripts=external_scripts)\r\nserver = app.server\r\n\r\ndf = pd.read_csv(\"stock_data.csv\")\r\ndf.columns\r\ndf['Stock'].unique()\r\napp.layout = html.Div([\r\n # Setting the main title of the Dashboard\r\n html.H1(\"NSE Data Analysis\", style={\"textAlign\": \"center\"}),\r\n # Dividing the dashboard in tabs\r\n dcc.Tabs(id=\"tabs\", children=[\r\n # Defining the layout of the first Tab\r\n dcc.Tab(label='Stock Prices', children=[\r\n html.Div([\r\n html.H1(\"Stocks High vs Lows\", \r\n style={'textAlign': 'center'}),\r\n # Adding the first dropdown menu and the subsequent time-series graph\r\n dcc.Dropdown(id='my-dropdown',\r\n options=[{'label': 'ICICIBANK', 'value': 'ICICI'},\r\n {'label': 'SBIBANK','value': 'SBIN'}, \r\n {'label': 'HDFCBANK', 'value': 'HDFC'}, \r\n {'label': 'AXISBANK','value': 'AXIS'}], \r\n multi=True,value=['HDFC'],\r\n style={\"display\": \"block\", \"margin-left\": \"auto\", \r\n \"margin-right\": \"auto\", \"width\": \"60%\"}),\r\n dcc.Graph(id='highlow'),\r\n html.H1(\"Market Volume Compare\", style={'textAlign': 'center'}),\r\n # Adding the second dropdown menu and the subsequent time-series graph\r\n dcc.Dropdown(id='my-dropdown2',\r\n options=[{'label': 'ICICIBANK', 'value': 'ICICI'},\r\n {'label': 'SBIBANK','value': 'SBIN'}, \r\n {'label': 'HDFCBANK', 'value': 'HDFC'},\r\n {'label': 'AXISBANK','value': 'AXIS'}], \r\n multi=True,value=['HDFC'],\r\n style={\"display\": \"block\", \"margin-left\": \"auto\", \r\n \"margin-right\": \"auto\", \"width\": \"60%\"}),\r\n dcc.Graph(id='volume')\r\n ], className=\"container\"),\r\n ]),\r\n dcc.Tab(label='Machine Learning', children=[\r\n html.Div([html.H1(\"Machine Learning\", style={\"textAlign\": \"center\"}), html.H2(\"ARIMA Time Series Prediction\", style={\"textAlign\": \"left\"}),\r\n dcc.Dropdown(id='my-dropdowntest',options=[{'label': 'ICICIBANK', 'value': 'ICICI'},\r\n {'label': 'SBIBANK','value': 'SBIN'}, \r\n {'label': 'HDFCBANK', 'value': 'HDFC'},\r\n {'label': 'AXISBANK','value': 'AXIS'}],\r\n style={\"display\": \"block\", \"margin-left\": \"auto\", \"margin-right\": \"auto\", \"width\": \"50%\"}),\r\n dcc.RadioItems(id=\"radiopred\", value=\"High\", labelStyle={'display': 'inline-block', 'padding': 10},\r\n options=[{'label': \"High\", 'value': \"High\"}, {'label': \"Low\", 'value': \"Low\"},\r\n {'label': \"Volume\", 'value': \"Volume\"}], style={'textAlign': \"center\", }),\r\n dcc.Graph(id='traintest'), dcc.Graph(id='preds'),\r\n ], className=\"container\"),\r\n ]),\r\n]),\r\n])\r\n\r\n\r\n\r\n@app.callback(Output('highlow', 'figure'),\r\n [Input('my-dropdown', 'value')])\r\ndef update_graph(selected_dropdown):\r\n dropdown = {\"ICICI\": \"ICICIBANK\",\"SBIN\": \"SBIBANK\",\"HDFC\": \"HDFCBANK\",\"AXIS\": \"AXISBANK\",}\r\n trace1 = []\r\n trace2 = []\r\n for stock in selected_dropdown:\r\n trace1.append(\r\n go.Scatter(x=df[df[\"Stock\"] == stock][\"Date\"],\r\n y=df[df[\"Stock\"] == stock][\"High\"],\r\n mode='lines', opacity=0.7, \r\n name=f'High {dropdown[stock]}',textposition='bottom center'))\r\n trace2.append(\r\n go.Scatter(x=df[df[\"Stock\"] == stock][\"Date\"],\r\n y=df[df[\"Stock\"] == stock][\"Low\"],\r\n mode='lines', opacity=0.6,\r\n name=f'Low {dropdown[stock]}',textposition='bottom center'))\r\n traces = [trace1, trace2]\r\n data = [val for sublist in traces for val in sublist]\r\n figure = {'data': data,\r\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', \r\n '#FF7400', '#FFF400', '#FF0056'],\r\n height=600,\r\n title=f\"High and Low Prices for {', '.join(str(dropdown[i]) for i in selected_dropdown)} Over Time\",\r\n xaxis={\"title\":\"Date\",\r\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', \r\n 'step': 'month', \r\n 'stepmode': 'backward'},\r\n {'count': 6, 'label': '6M', \r\n 'step': 'month', \r\n 'stepmode': 'backward'},\r\n {'step': 'all'}])},\r\n 'rangeslider': {'visible': True}, 'type': 'date'},\r\n yaxis={\"title\":\"Price (INR)\"})}\r\n return figure\r\n\r\n\r\n@app.callback(Output('volume', 'figure'),\r\n [Input('my-dropdown2', 'value')])\r\ndef update_graph(selected_dropdown_value):\r\n dropdown = {\"ICICI\": \"ICICIBANK\",\"SBIN\": \"SBIBANK\",\"HDFC\": \"HDFCBANK\",\"AXIS\": \"AXISBANK\",}\r\n trace1 = []\r\n for stock in selected_dropdown_value:\r\n trace1.append(\r\n go.Scatter(x=df[df[\"Stock\"] == stock][\"Date\"],\r\n y=df[df[\"Stock\"] == stock][\"Volume\"],\r\n mode='lines', opacity=0.7,\r\n name=f'Volume {dropdown[stock]}', textposition='bottom center'))\r\n traces = [trace1]\r\n data = [val for sublist in traces for val in sublist]\r\n figure = {'data': data, \r\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', \r\n '#FF7400', '#FFF400', '#FF0056'],\r\n height=600,\r\n title=f\"Market Volume for {', '.join(str(dropdown[i]) for i in selected_dropdown_value)} Over Time\",\r\n xaxis={\"title\":\"Date\",\r\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', \r\n 'step': 'month', \r\n 'stepmode': 'backward'},\r\n {'count': 6, 'label': '6M',\r\n 'step': 'month', \r\n 'stepmode': 'backward'},\r\n {'step': 'all'}])},\r\n 'rangeslider': {'visible': True}, 'type': 'date'},\r\n yaxis={\"title\":\"Transactions Volume\"})}\r\n return figure\r\n\r\n@app.callback(Output('traintest', 'figure'),\r\n [Input('my-dropdowntest', 'value'), Input(\"radiopred\", \"value\"),])\r\ndef update_graph(stock , radioval):\r\n dropdown = {\"ICICI\": \"ICICIBANK\",\"SBIN\": \"SBIBANK\",\"HDFC\": \"HDFCBANK\",\"AXIS\": \"AXISBANK\",}\r\n radio = {\"High\": \"High Prices\", \"Low\": \"Low Prices\", \"Volume\": \"Market Volume\", }\r\n trace1 = []\r\n trace2 = []\r\n train_data = df[df['Stock'] == stock][-100:][0:int(100 * 0.8)]\r\n test_data = df[df['Stock'] == stock][-100:][int(100 * 0.8):]\r\n if (stock == None):\r\n trace1.append(\r\n go.Scatter(x= [0], y= [0],\r\n mode='markers', opacity=0.7, textposition='bottom center'))\r\n traces = [trace1]\r\n data = [val for sublist in traces for val in sublist]\r\n figure = {'data': data,\r\n 'layout': go.Layout(colorway=['#FF7400', '#FFF400', '#FF0056'],\r\n height=600, title=f\"{radio[radioval]}\",\r\n paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')}\r\n else:\r\n trace1.append(go.Scatter(x=train_data['Date'],y=train_data[radioval], mode='lines',\r\n opacity=0.7,name=f'Training Set',textposition='bottom center'))\r\n trace2.append(go.Scatter(x=test_data['Date'],y=test_data[radioval],mode='lines',\r\n opacity=0.6,name=f'Test Set',textposition='bottom center'))\r\n traces = [trace1, trace2]\r\n data = [val for sublist in traces for val in sublist]\r\n figure = {'data': data,\r\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', '#FF7400', '#FFF400', '#FF0056'],\r\n height=600,title=f\"{radio[radioval]} Train-Test Sets for {dropdown[stock]}\",\r\n xaxis={\"title\":\"Date\",\r\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', 'step': 'month', 'stepmode': 'backward'},\r\n {'count': 6, 'label': '6M', 'step': 'month', 'stepmode': 'backward'},\r\n {'step': 'all'}])},\r\n 'rangeslider': {'visible': True}, 'type': 'date'},yaxis={\"title\":\"Price (USD)\"}, paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')}\r\n return figure\r\n\r\n\r\n@app.callback(Output('preds', 'figure'),\r\n [Input('my-dropdowntest', 'value'), Input(\"radiopred\", \"value\"),])\r\ndef update_graph(stock, radioval):\r\n dropdown = {\"ICICI\": \"ICICIBANK\",\"SBIN\": \"SBIBANK\",\"HDFC\": \"HDFCBANK\",\"AXIS\": \"AXISBANK\",}\r\n radio = {\"High\": \"High Prices\", \"Low\": \"Low Prices\", \"Volume\": \"Market Volume\", }\r\n dropdown = {\"ICICI\": \"ICICIBANK\",\"SBIN\": \"SBIBANK\",\"HDFC\": \"HDFCBANK\",\"AXIS\": \"AXISBANK\",}\r\n trace1 = []\r\n trace2 = []\r\n if (stock == None):\r\n trace1.append(\r\n go.Scatter(x= [0], y= [0],\r\n mode='markers', opacity=0.7, textposition='bottom center'))\r\n traces = [trace1]\r\n data = [val for sublist in traces for val in sublist]\r\n figure = {'data': data,\r\n 'layout': go.Layout(colorway=['#FF7400', '#FFF400', '#FF0056'],\r\n height=600, title=f\"{radio[radioval]}\",\r\n paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')}\r\n else:\r\n test_data = df[df['Stock'] == stock][-100:][int(100 * 0.8):]\r\n train_data = df[df['Stock'] == stock][-100:][0:int(100 * 0.8)]\r\n train_ar = train_data[radioval].values\r\n test_ar = test_data[radioval].values\r\n history = [x for x in train_ar]\r\n predictions = list()\r\n for t in range(len(test_ar)):\r\n model = ARIMA(history, order=(3, 1, 0))\r\n model_fit = model.fit()\r\n output = model_fit.forecast()\r\n yhat = output[0]\r\n predictions.append(yhat)\r\n obs = test_ar[t]\r\n history.append(obs)\r\n error = mean_squared_error(test_ar, predictions)\r\n trace1.append(go.Scatter(x=test_data['Date'],y=test_data['High'],mode='lines',\r\n opacity=0.6,name=f'Actual Series',textposition='bottom center'))\r\n trace2.append(go.Scatter(x=test_data['Date'],y= predictions, mode='lines',\r\n opacity=0.7,name=f'Predicted Series (MSE: {error})',textposition='bottom center'))\r\n traces = [trace1, trace2]\r\n data = [val for sublist in traces for val in sublist]\r\n figure = {'data': data,\r\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', '#FF7400', '#FFF400', '#FF0056'],\r\n height=600,title=f\"{radio[radioval]} ARIMA Predictions vs Actual for {dropdown[stock]}\",\r\n xaxis={\"title\":\"Date\",\r\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', 'step': 'month', 'stepmode': 'backward'},\r\n {'count': 6, 'label': '6M', 'step': 'month', 'stepmode': 'backward'},\r\n {'step': 'all'}])},\r\n 'rangeslider': {'visible': True}, 'type': 'date'},yaxis={\"title\":\"Price (INR)\"}, paper_bgcolor='rgba(0,0,0,0)',\r\n plot_bgcolor='rgba(0,0,0,0)')}\r\n return figure\r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"519151658","text":"import sys\n\n\n## Simple script that will run through the ML data files and classify data\n## into trianary sets with a gap such that\n## -1 :: rating < -gap/2\n## 1 :: rating > gap/2\n## 0 :: rating > -gap/2 && rating < gap/2\n\n\n## Assuming the data has already been normalized\n##USAGE ::\n## $ python3 trinary_classify.py filename gap_value\n##\n## this will not change original file \n\n\nfilename = sys.argv[1]\ngap = float(sys.argv[2])\n\nf = open(filename, 'r')\n\nf_data_tri = open(filename+\".trianary\",'w')\n\nfirstline = f.readline()\nfor line in f:\n (user,movieid,rating,ts)=line.split(',')\n\n if float(rating)>gap/2:\n tri_rating = \"1.0\"\n elif float(rating)<-gap/2:\n tri_rating = \"-1.0\"\n else:\n tri_rating = \"0.0\"\n \n \n print(user + \",\" + movieid + \",\" + tri_rating +\n \",\" + ts, file=f_data_tri, end='')\n\n\nf.close()\nf_data_tri.close()\n","sub_path":"trinary_classify.py","file_name":"trinary_classify.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"229332159","text":"import calSimRank\r\nimport GenPosNegPairsbyRandom\r\nimport networkx as nx\r\nimport numpy as np\r\n\r\nfh = open(\"/home/ypei1/Embedding/NewData/CA-GrQc.edgelist\", 'rb')\r\nG = nx.read_edgelist(fh, nodetype=int)\r\nfh.close()\r\n\r\nnumOfNodes = len(G.nodes())\r\n\r\nfout = open('/home/ypei1/Embedding/NewData/CA-GrQc.simrank.sim', 'w')\r\n\r\nsimDict = calSimRank.simrank(G)\r\nsimilarity = [[0.0 for i in range(numOfNodes)] for j in range(numOfNodes)]\r\nfor key, val in simDict.items():\r\n\tfor k, v in val.items():\r\n\t\tsimilarity[key][k] = v\r\n\t\tfout.write(str(v)+' ')\r\n\tfout.write('\\n')\r\n\r\nfout.close()\r\n\r\n#training_data = np.asarray(GenPosNegPairsbyRandom.genPosNegPairs(similarity, 5, 1), dtype=np.uint32)\r\n#print training_data\r\n\r\n#print training_data[:,4]","sub_path":"struc2gauss/SimRank/ham.py","file_name":"ham.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"543367316","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2020 PyLadies, Lorena Mesa\n\nfrom fabric import Connection, task\nimport patchwork.transfers\n\ndef setup_context(ctx):\n ctx.host = '81.28.232.189'\n ctx.user = 'u52703'\n ctx.connect_kwargs = {'key_filename': ['~/.ssh/pyladies.com']}\n return ctx\n\n@task\ndef deploy(ctx):\n ctx = setup_context(ctx)\n with Connection(host=ctx.host, user=ctx.user, connect_kwargs=ctx.connect_kwargs) as connection:\n # Rsync WWW\n patchwork.transfers.rsync(connection, 'www/_site/', 'www/', exclude='.git', rsync_opts='-ua')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258321574","text":"# -*- coding: utf-8 -*-\n\nimport mimetypes\nfrom flask import Response\nimport pandas as pd\nfrom werkzeug.datastructures import Headers\n\n\ndef column_labels_dict(model):\n d = {}\n for c in model.__table__.columns:\n if c.info.get('T'):\n d[c.name] = c.info.get('T')\n else:\n d[c.name] = c.name\n return d\n\n\ndef orm_query_to_df(db, query_results, exclude_id=False,\n table_name_in_cols=True, columns=None):\n '''Transforms sqlalchemy query results into pandas DataFrame.\n params:\n query_results: SQLAlchemy query\n exclude_id: if True - excludes from dataframe all columns\n with \"id\" in name, default = False\n table_name_in_cols: if true prepends table name\n columns: if true selects only columns from the list\n '''\n first_row = query_results[0]\n # Checks if there is more than one table in query_results\n if not isinstance(first_row, tuple):\n table_name = first_row.__table__.name\n if not columns:\n table_columns = [c.name for c in first_row.__table__.columns]\n else:\n table_columns = columns\n df = pd.DataFrame([row.__dict__ for row in query_results],\n columns=table_columns)\n if table_name_in_cols:\n df.columns = ['{}_{}'.format(table_name, c)\n for c in df.columns]\n if exclude_id:\n cols = df.columns[df.columns.map(lambda x: 'id' not in x)]\n return df[cols]\n return df\n else:\n query_results = zip(*query_results)\n data = [orm_query_to_df(db, q, exclude_id, table_name_in_cols)\n # проверява дали записът е sqlalchemy object\n if isinstance(q[0], db.Model)\n # ако записът е стойност\n else pd.DataFrame(list(q))\n for q in query_results]\n return pd.concat(data, axis=1)\n\n\ndef df_to_html_table(df, index=False):\n df = df.to_html(\n classes=[\"table table-striped table-hover\", \"text-left\"],\n float_format=lambda x: '{:,.2f}'.format(x), justify='right',\n index=index)\n return df\n\n\ndef create_select_field_choices(list):\n \"\"\"Creates choices for WTF SelectField from list.\n \"\"\"\n return [(item, item) for item in list]\n\n\ndef set_form_fields_list(model, exclude_fields=['id']):\n '''\n Creates list of fields to be used in forms. By default 'id' is\n excluded.\n params:\n model: db.Model\n exclude_fields: list of fields from model to be excluded from form.\n '''\n\n fields = [c.name for c in model.__table__.columns]\n form_fields_list = set(fields) - set(exclude_fields)\n return list(form_fields_list)\n\n\ndef create_file_response(filename, data):\n response = Response()\n response.status_code = 200\n response.data = data\n mimetype_tuple = mimetypes.guess_type(filename)\n\n #HTTP headers for forcing file download\n response_headers = Headers({\n 'Pragma': \"public\", # required,\n 'Expires': '0',\n 'Cache-Control': 'must-revalidate, post-check=0, pre-check=0',\n 'Cache-Control': 'private', # required for certain browsers,\n 'Content-Type': mimetype_tuple[0],\n 'Content-Disposition': 'attachment; filename=\\\"%s\\\";' % filename,\n 'Content-Transfer-Encoding': 'binary',\n 'Content-Length': len(response.data)\n })\n\n if not mimetype_tuple[1] is None:\n response.update({\n 'Content-Encoding': mimetype_tuple[1]\n })\n\n response.headers = response_headers\n\n #as per jquery.fileDownload.js requirements\n response.set_cookie('fileDownload', 'true', path='/')\n\n ################################\n # Return the response\n #################################\n return response\n\n","sub_path":"fibweb/helpers/dbhelpers.py","file_name":"dbhelpers.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"652193957","text":"from utils import get_root_path\nfrom distance import hungarian_ged\nimport pickle\nimport networkx as nx\nfrom random import randint\nimport numpy as np\nfrom time import time\nfrom glob import glob\n\n\nclass Data(object):\n def __init__(self, train):\n name = self.__class__.__name__ + '_'\n self.train = True if train else False\n if train:\n name += 'train'\n else:\n name += 'test'\n name += self.name_suffix()\n self.name = name\n sfn = self.save_filename()\n try:\n self.load()\n print('%s loaded from %s' % (name, sfn))\n except Exception as e:\n self.init()\n self.num_graphs = len(self.graphs)\n self.save()\n print('%s saved to %s' % (name, sfn))\n\n def name_suffix(self):\n return ''\n\n def save(self):\n file = open(self.save_filename(), 'wb')\n file.write(pickle.dumps(self.__dict__))\n file.close()\n\n def load(self):\n file = open(self.save_filename(), 'rb')\n dp = file.read()\n file.close()\n self.__dict__ = pickle.loads(dp)\n\n def save_filename(self):\n return '{}/save/{}.pkl'.format(get_root_path(), self.name)\n\n def get_dist_mat(self, graphs1, graphs2):\n dist_mat = np.zeros((len(graphs1), len(graphs2)))\n print('Generating distance matrix of {}'.format(dist_mat.shape))\n print('i,j,#node_i,#node_j,dist,time')\n for i in range(len(graphs1)):\n for j in range(len(graphs2)):\n t = time()\n gi = graphs1[i]\n gj = graphs2[j]\n ged = hungarian_ged(gi, gj)\n dist_mat[i][j] = ged\n print('{},{},{},{},{},{:.5f}'.format( \\\n i, j, len(gi), len(gj), ged, time() - t))\n return dist_mat\n\n\nclass SynData(Data):\n def __init__(self, train):\n if train:\n self.num_graphs = SynData.train_num_graphs\n else:\n self.num_graphs = SynData.test_num_graphs\n super().__init__(train)\n\n def init(self):\n self.graphs = []\n for i in range(self.num_graphs):\n n = randint(5, 20)\n m = randint(n - 1, n * (n - 1) / 2)\n self.graphs.append(nx.gnm_random_graph(n, m))\n print('Randomly generated %s graphs' % self.num_graphs)\n if self.train:\n self.train_train_dist = self.get_dist_mat(self.graphs, self.graphs)\n\n def name_suffix(self):\n return '_{}_{}'.format(SynData.train_num_graphs,\n SynData.test_num_graphs)\n\n\nclass AIDS10kData(Data):\n def __init__(self, train):\n super().__init__(train)\n\n def init(self):\n self.graphs = []\n datadir = get_root_path() + '/data/AIDS10k/' + ('train' if self.train \\\n else 'test')\n for file in glob(datadir + '/*.gexf'):\n gid = int(file.split('/')[-1].split('.')[0])\n g = nx.read_gexf(file)\n g.graph['gid'] = gid\n self.graphs.append(g)\n if not nx.is_connected(g):\n raise RuntimeError('{} not connected'.format(gid))\n print('Loaded {} graphs from {}'.format(len(self.graphs), datadir))\n\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"332023246","text":"from core.models import Boec, Brigade, Season, SeasonReport\nfrom core.serializers import DynamicFieldsModelSerializer\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import serializers\nfrom so.serializers import BoecInfoSerializer, BrigadeShortSerializer\n\n\nclass SeasonReportListSerializer(serializers.ModelSerializer):\n \"\"\"serializer for the SeasonReport object\"\"\"\n\n class Meta:\n model = SeasonReport\n fields = (\"id\", \"year\", \"boec_count\", \"state\", \"employer\")\n read_only_fields = (\"id\", \"boec_count\")\n\n\nclass SeasonReportSerializer(DynamicFieldsModelSerializer):\n \"\"\"serializer for the SeasonReport object\"\"\"\n\n can_edit = serializers.BooleanField(read_only=True)\n brigade = BrigadeShortSerializer(fields=(\"id\", \"full_title\"), read_only=True)\n brigade_id = serializers.PrimaryKeyRelatedField(\n queryset=Brigade.objects.all(), source=\"brigade\", required=False\n )\n\n class Meta:\n model = SeasonReport\n fields = (\n \"id\",\n \"year\",\n \"boec_count\",\n \"state\",\n \"employer\",\n \"can_edit\",\n \"brigade\",\n \"brigade_id\",\n \"is_summer\",\n )\n read_only_fields = (\"id\", \"boec_count\", \"can_edit\", \"brigade\")\n\n\nclass SeasonSerializer(DynamicFieldsModelSerializer):\n \"\"\"serializer for season objects\"\"\"\n\n boec = BoecInfoSerializer(read_only=True)\n boec_id = serializers.PrimaryKeyRelatedField(\n queryset=Boec.objects.all(), source=\"boec\"\n )\n reports = SeasonReportSerializer(\n fields=(\"brigade\", \"id\", \"employer\", \"year\"),\n source=\"season_reports\",\n many=True,\n read_only=True,\n )\n\n class Meta:\n model = Season\n fields = (\"id\", \"boec\", \"boec_id\", \"state\", \"reports\")\n read_only_fields = (\"id\", \"boec\", \"reports\")\n\n\nclass BoecSeasonsSerializer(DynamicFieldsModelSerializer):\n \"\"\"serializer for season objects\"\"\"\n\n reports = SeasonReportSerializer(\n fields=(\"brigade\", \"id\", \"employer\", \"year\"),\n source=\"season_reports\",\n many=True,\n read_only=True,\n )\n boec_id = serializers.PrimaryKeyRelatedField(\n queryset=Boec.objects.all(), source=\"boec\", required=False\n )\n\n class Meta:\n model = Season\n fields = (\"id\", \"state\", \"reports\", \"boec_id\")\n read_only_fields = (\"id\", \"reports\")\n\n\nclass SeasonRequestSerializer(serializers.ModelSerializer):\n \"\"\"serializer for the SeasonReport object\"\"\"\n\n seasons = SeasonSerializer(many=True)\n\n class Meta:\n model = SeasonReport\n fields = (\"id\", \"year\", \"seasons\", \"employer\")\n read_only_fields = (\"id\",)\n","sub_path":"app/report/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"630452109","text":"# 通过两个循环遍历,左右逼近\n# 但这样做会超时\ndef fourSum(nums, target):\n list.sort(nums)\n rlt = []\n for i in range(len(nums)-3):\n start = i + 2\n end = len(nums) - 1\n for j in range(i+1, len(nums)-2):\n while start < end:\n four_sum = nums[i] + nums[j] + nums[start] + nums[end]\n if four_sum == target:\n rlt.append([nums[i], nums[j], nums[start], nums[end]])\n elif four_sum > target:\n end -= 1\n else:\n start += 1\n return set(tuple(row) for row in rlt)\n\n\nprint(fourSum([-2, -1, 0, 0, 1, 2], 0))\n","sub_path":"Fei_LeetCode/Array/0920/18-fourSum.py","file_name":"18-fourSum.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262276986","text":"# 20056 마법사 상어와 파이어볼\nfrom math import floor\n\nN, M, K = map(int, input().split())\n\ndr = (-1, -1, 0, 1, 1, 1, 0, -1)\ndc = (0, 1, 1, 1, 0, -1, -1, -1)\n\nmap_dict = {}\nfor _ in range(M):\n r, c, m, s, d = map(int, input().split())\n fire_ball = (m, s, d)\n map_dict[(r, c)] = map_dict.get((r, c), list()) + [fire_ball]\n\ndef dprint(dictionary):\n for key in dictionary:\n print(key, dictionary[key])\n\ndef move(new_map_dict):\n for key, value in map_dict.items():\n r, c, = key\n fireball_list = value\n for fireball in fireball_list:\n m, s, d = fireball\n rd = dr[d] * s\n rc = dc[d] * s\n newr = r + rd\n newc = c + rc\n if newr > N:\n newr %= N\n if newr == 0:\n newr = N\n if newc > N:\n newc %= N\n if newc == 0:\n newc = N\n if newr < 0:\n newr = (newr - 1) % N\n newr += 1\n # if newr == 0:\n # newr = 1\n if newc < 0:\n newc = (newc - 1) % N\n newc += 1\n # if newc == 0:\n # newc = 1\n if newr == 0:\n newr = N\n if newc == 0:\n newc = N\n\n\n new_map_dict[(newr, newc)] = new_map_dict.get((newr, newc), list()) + [(m, s, d)]\n \ndef merge(new_map_dict):\n for key, value in new_map_dict.items():\n r, c = key\n fireball_list = value\n length = len(fireball_list)\n if len(fireball_list) >= 2:\n m_sum = 0\n s_sum = 0\n fireball_cnt = 0\n odd_direction_cnt = 0\n even_direction_cnt = 0\n for fireball in fireball_list:\n m, s, d = fireball\n m_sum += m\n s_sum += s\n fireball_cnt += 1\n if d % 2:\n odd_direction_cnt += 1\n else:\n even_direction_cnt += 1\n \n new_m = floor(m_sum // 5)\n new_s = floor(s_sum // fireball_cnt)\n\n if new_m > 0:\n if odd_direction_cnt == length or even_direction_cnt == length:\n direction_list = (0, 2, 4, 6)\n else:\n direction_list = (1, 3, 5, 7)\n\n new_map_dict[key] = [ (new_m, new_s, direction) for direction in direction_list ]\n else:\n new_map_dict[key] = []\n\ndef get_mass_sum(map_dict):\n mass_sum = 0\n for fireball_list in map_dict.values():\n for fireball in fireball_list:\n m, s, d = fireball\n mass_sum += m\n return mass_sum\n\nfor time in range(K):\n new_map_dict = {} \n\n move(new_map_dict)\n merge(new_map_dict)\n # dprint(new_map_dict)\n\n map_dict = new_map_dict\n\nprint(get_mass_sum(map_dict))","sub_path":"Algorithm/BOJ/20056.py","file_name":"20056.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"92695930","text":"\"\"\" Defines the backend connection class for PostgreSQL databases. \"\"\"\n\nimport datetime\nimport logging\nimport os\nimport orb\nimport re\nimport traceback\n\nfrom projex.text import nativestring as nstr\n\nfrom ..sqlconnection import SQLConnection\nfrom ..sqlstatement import SQLStatement\n\nlog = logging.getLogger(__name__)\n\ntry:\n import psycopg2 as pg\n import psycopg2.extensions as pg_ext\n\n from psycopg2.extras import DictCursor, register_hstore, register_json\n\nexcept ImportError:\n log.debug('For PostgreSQL backend, download the psycopg2 module')\n\n DictCursor = None\n register_hstore = None\n register_json = None\n pg = None\n pg_ext = None\n\nelse:\n # ensure that psycopg2 uses unicode for all the database strings\n pg_ext.register_type(pg_ext.UNICODE)\n pg_ext.register_type(pg_ext.UNICODEARRAY)\n\n\n# ----------------------------------------------------------------------\n\nclass PSQLStatement(SQLStatement):\n pass\n\n\n# noinspection PyAbstractClass\nclass PSQLConnection(SQLConnection):\n \"\"\" \n Creates a PostgreSQL backend connection type for handling database\n connections to PostgreSQL databases.\n \"\"\"\n\n # ----------------------------------------------------------------------\n # PROTECTED METHODS\n # ----------------------------------------------------------------------\n def _execute(self,\n native,\n command,\n data=None,\n returning=True,\n mapper=dict):\n \"\"\"\n Executes the inputted command into the current \\\n connection cursor.\n \n :param command | \n data | || None\n autoCommit | | commit database changes immediately\n autoClose | | closes connections immediately\n \n :return [{ key: , ..}, ..], count\n \"\"\"\n if data is None:\n data = {}\n\n cursor = native.cursor(cursor_factory=DictCursor)\n\n # register the hstore option\n try:\n register_hstore(cursor, unicode=True)\n except pg.ProgrammingError:\n log.warning('HSTORE is not supported in this version of Postgres!')\n\n # register the json option\n try:\n register_json(cursor)\n except pg.ProgrammingError:\n log.warning('JSON is not supported in this version of Postgres!')\n\n start = datetime.datetime.now()\n\n log.debug('***********************')\n log.debug(command % data)\n log.debug('***********************')\n\n try:\n cursor.execute(command, data)\n rowcount = cursor.rowcount\n\n # look for a cancelled query\n except pg_ext.QueryCanceledError as cancelled:\n try:\n native.rollback()\n except StandardError as err:\n log.error('Rollback error: {0}'.format(err))\n log.critical(command)\n if data:\n log.critical(str(data))\n\n # raise more useful errors\n if 'statement timeout' in str(cancelled):\n raise orb.errors.QueryTimeout(command, (datetime.datetime.now() - start).total_seconds())\n else:\n raise orb.errors.Interruption()\n\n # look for a disconnection error\n except pg.InterfaceError:\n raise orb.errors.ConnectionLost()\n\n # look for integrity errors\n except (pg.IntegrityError, pg.OperationalError) as err:\n try:\n native.rollback()\n except StandardError:\n pass\n\n # look for a duplicate error\n duplicate_error = re.search('Key (.*) already exists.', nstr(err))\n if duplicate_error:\n key = duplicate_error.group(1)\n result = re.match('^\\(lower\\((?P[^\\)]+)::text\\)\\)=\\((?P[^\\)]+)\\)$', key)\n if not result:\n result = re.match('^(?P\\w+)=(?P\\w+)', key)\n\n if result:\n msg = '{value} is already being used.'.format(**result.groupdict())\n raise orb.errors.DuplicateEntryFound(msg)\n else:\n raise orb.errors.DuplicateEntryFound(duplicate_error.group())\n\n # look for a reference error\n reference_error = re.search('Key .* is still referenced from table \".*\"', nstr(err))\n if reference_error:\n msg = 'Cannot remove this record, it is still being referenced.'\n raise orb.errors.CannotDelete(msg)\n\n # unknown error\n log.debug(traceback.print_exc())\n raise orb.errors.QueryFailed(command, data, nstr(err))\n\n # connection has closed underneath the hood\n except (pg.Error, pg.ProgrammingError) as err:\n try:\n native.rollback()\n except StandardError:\n pass\n\n log.error(traceback.print_exc())\n raise orb.errors.QueryFailed(command, data, nstr(err))\n\n try:\n results = [mapper(record) for record in cursor.fetchall()]\n except pg.ProgrammingError:\n results = []\n\n return results, rowcount\n\n def _open(self, db, writeAccess=False):\n \"\"\"\n Handles simple, SQL specific connection creation. This will not\n have to manage thread information as it is already managed within\n the main open method for the SQLBase class.\n \n :param db | \n \n :return | backend specific database connection\n \"\"\"\n if not pg:\n raise orb.errors.BackendNotFound('psycopg2 is not installed.')\n\n if db.timeout():\n os.environ['PGOPTIONS'] = '-c statement_timeout={0}'.format(db.timeout())\n\n # create the python connection\n try:\n return pg.connect(database=db.name(),\n user=db.username(),\n password=db.password(),\n host=db.writeHost() if writeAccess else db.host(),\n port=db.port(),\n connect_timeout=3)\n except pg.OperationalError as err:\n log.exception('Failed to connect to postgres')\n raise orb.errors.ConnectionFailed()\n\n def _interrupt(self, threadId, connection):\n \"\"\"\n Interrupts the given native connection from a separate thread.\n \n :param threadId | \n connection | | backend specific database.\n \"\"\"\n try:\n connection.cancel()\n except pg.Error:\n pass\n\n # ----------------------------------------------------------------------\n\n @classmethod\n def statement(cls, code=''):\n \"\"\"\n Returns the statement interface for this connection.\n \n :param code | \n \n :return subclass of \n \"\"\"\n return PSQLStatement.byName(code) if code else PSQLStatement\n\n\n# register the postgres backend\nif pg:\n orb.Connection.registerAddon('Postgres', PSQLConnection)\n\n","sub_path":"orb/core/connection_types/sql/postgres/psqlconnection.py","file_name":"psqlconnection.py","file_ext":"py","file_size_in_byte":7273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"650042048","text":"import re\nwith open (r\"C:\\Distrib\\Python\\dataset_3363_2.txt\", \"r\") as file_in:\n f1 = file_in.readline().strip()\n f1 = re.split(\"(\\d*)\", f1)[:-1]\nout = open (r\"C:\\\\Distrib\\\\Python\\\\dataset_3363_3.txt\", \"w\")\nfor i in range(len(f1)):\n if f1[i].isdigit() == 0:\n output = f1[i]*int(f1[i+1])\n print(output)\n out.write(output)\n else: \n continue\nout.close()\n\n\n'''\n open (r\"C:\\\\Distrib\\\\Python\\\\dataset_3363_3.txt\", \"w\") as f_out:\n f_out.write(f1[i]*int(f1[i]))\n\ns=input()\nj=len(s)\nc=1\nfor i in range(j):\n if i==(j-1):\n print(s[i]+str(c),end='')\n else:\n if s[i]==s[i+1]:\n c+=1\n else:\n print(s[i]+str(c),end='')\n c=1\n \n ''' \n \n\n","sub_path":"files001.py","file_name":"files001.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"221624426","text":"\"\"\" module imports \"\"\"\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import List\nfrom .forms import ListForm\n\n\ndef home(request):\n \"\"\" renders 'Home' page and tasks list \"\"\"\n if request.method == 'POST':\n form = ListForm(request.POST or None)\n if form.is_valid():\n form.save()\n all_items = List.objects.all\n messages.success(request, ('Item successfully added to the list!'))\n return render(request, 'home.html', {'items': all_items})\n messages.error(request, ('Could not add item to the list.'))\n all_items = List.objects.all\n return render(request, 'home.html', {'items': all_items})\n\n\ndef about(request):\n \"\"\" renders 'About' page \"\"\"\n return render(request, 'about.html', {})\n\n\ndef delete(request, item_id):\n \"\"\" deletes item from the list of tasks \"\"\"\n item = List.objects.get(pk=item_id)\n item.delete()\n messages.success(request, ('Item has been deleted!'))\n return redirect('home')\n\n\ndef toggle_check_done(request, item_id):\n \"\"\" toggles item status to completed \"\"\"\n item = List.objects.get(pk=item_id)\n item.completed = True\n item.save()\n return redirect('home')\n\n\ndef toggle_check_undone(request, item_id):\n \"\"\" toggles item status to undone \"\"\"\n item = List.objects.get(pk=item_id)\n item.completed = False\n item.save()\n return redirect('home')\n\n\ndef edit(request, item_id):\n \"\"\" edits item information \"\"\"\n if request.method == 'POST':\n item = List.objects.get(pk=item_id)\n form = ListForm(request.POST or None, instance=item)\n if form.is_valid():\n form.save()\n messages.success(request, ('Item successfully edited!'))\n return redirect('home')\n messages.error(request, ('Could not save edited item.'))\n item = List.objects.get(pk=item_id)\n return render(request, 'edit.html', {'item': item})\n","sub_path":"todo_list/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217056791","text":"import argparse\n\n# Req. 2-1\tConfig.py 파일 생성\n\n# 캡션 데이터가 있는 파일 경로 (예시)\ndef config():\n parser = argparse.ArgumentParser()\n parser.add_argument('--caption_file_path', type=str, default='.\\\\datasets\\\\captions.csv')\n parser.add_argument('--image_file_path', type=str, default='.\\\\datasets\\\\images\\\\')\n\n return parser.parse_args()\n","sub_path":"2020 Education/Project3 수어 또 하나의 언어/Code/AI/backend/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"114626772","text":"import uno\nfrom unohelper import systemPathToFileUrl\nfrom com.sun.star.beans import PropertyValue\nfrom com.sun.star.style.ParagraphAdjust import CENTER, LEFT, RIGHT, BLOCK, STRETCH\nfrom com.sun.star.text.ControlCharacter import PARAGRAPH_BREAK, APPEND_PARAGRAPH, LINE_BREAK\nfrom com.sun.star.text.TextContentAnchorType import AS_CHARACTER\nfrom com.sun.star.table import BorderLine2\nfrom com.sun.star.awt import Size\nfrom com.sun.star.text import TableColumnSeparator\ncursor = None\n\ndef saveDocument(doc, patient):\n url = systemPathToFileUrl('/Users/justusvoigt/Documents/' + str(patient['name']) + '.odt')\n args = (PropertyValue('FilterName',0, 'writer8', 0),)\n doc.storeToURL(url, args)\n doc.dispose()\n\ndef configureBorders(doc, text, items):\n no_line = BorderLine2()\n no_line.Color = 0\n no_line.InnerLineWidth = 0\n no_line.LineDistance = 0\n no_line.LineStyle = 0\n no_line.LineWidth = 0\n no_line.OuterLineWidth = 0\n text_tables = doc.getTextTables()\n get_top_table = text_tables.getByIndex(0)\n table_top_border = get_top_table.TableBorder\n table_top_border.LeftLine = no_line\n table_top_border.RightLine = no_line\n table_top_border.TopLine = no_line\n table_top_border.BottomLine = no_line\n table_top_border.HorizontalLine = no_line\n table_top_border.VerticalLine = no_line\n get_top_table.TableBorder = table_top_border\n get_middle_table = text_tables.getByIndex(1)\n table_middle_border = get_middle_table.TableBorder\n table_middle_border.LeftLine = no_line\n table_middle_border.RightLine = no_line\n table_middle_border.TopLine = no_line\n table_middle_border.BottomLine = no_line\n table_middle_border.HorizontalLine = no_line\n table_middle_border.VerticalLine = no_line\n get_middle_table.TableBorder = table_middle_border\n get_main_table = text_tables.getByIndex(2)\n count = 0\n col = ['A', 'B', 'C', 'D']\n for i in items:\n count = count + 1\n\n for i in col:\n table_main_cell = get_main_table.getCellByName(i + str(count+2))\n left_border_a_cell = table_main_cell.LeftBorder\n left_border_a_cell.OuterLineWidth = 0 \n left_border_a_cell.LineWidth = 0 \n table_main_cell.LeftBorder = left_border_a_cell\n\n cRange = get_main_table.getCellRangeByName(\"A\" + str(count +2) + \":D\" + str(count + 2))\n cRange.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n cRange.setPropertyValue( \"CharHeight\", 10.0 )\n table_main_cell = get_main_table.getCellByName(\"D\" + str(count+2))\n right_border_a_cell = table_main_cell.RightBorder\n right_border_a_cell.OuterLineWidth = 0 \n right_border_a_cell.LineWidth = 0 \n table_main_cell.RightBorder = right_border_a_cell\n table_main_border = get_main_table.TableBorder\n table_main_border.BottomLine = no_line\n get_main_table.TableBorder = table_main_border\n get_bottom_table = text_tables.getByIndex(3)\n table_bottom_border = get_bottom_table.TableBorder \n table_bottom_border.Distance = 1\n table_bottom_border.HorizontalLine = no_line\n table_bottom_border.VerticalLine = no_line\n get_bottom_table.TableBorder = table_bottom_border\n return doc, text\n\ndef populateBottomTable(doc, text):\n bottom_table = doc.createInstance( \"com.sun.star.text.TextTable\" )\n bottom_table.initialize(5,3)\n bottom_table.setName('bottom_table')\n styles = doc.StyleFamilies\n page_styles = styles.getByName(\"PageStyles\")\n oDefaultStyle = page_styles.getByName(\"Standard\")\n oDefaultStyle.FooterIsOn = True\n footer_text = oDefaultStyle.getPropertyValue(\"FooterText\")\n footer_cursor = footer_text.createTextCursor()\n footer_text = footer_text.insertTextContent(footer_cursor, bottom_table, 1)\n first_bottom_table_text = bottom_table.getCellByName(\"A1\")\n first_bottom_table_text.setString(\" Accounts Details:\" )\n first_bottom_table_text = bottom_table.getCellByName(\"A2\")\n first_bottom_table_text.setString(\" Account Holder:\" )\n first_bottom_table_text = bottom_table.getCellByName(\"A3\")\n first_bottom_table_text.setString(\" Bank:\" )\n first_bottom_table_text = bottom_table.getCellByName(\"A4\")\n first_bottom_table_text.setString(\" Account Number:\" )\n first_bottom_table_text = bottom_table.getCellByName(\"A5\")\n first_bottom_table_text.setString(\" Branch Code:\" )\n first_bottom_table_text = bottom_table.getCellByName(\"B2\")\n first_bottom_table_text.setString(\"A.Pickel-Voigt\" )\n first_bottom_table_text = bottom_table.getCellByName(\"B3\")\n first_bottom_table_text.setString(\"Standard Bank\" )\n first_bottom_table_text = bottom_table.getCellByName(\"B4\")\n first_bottom_table_text.setString(\"241710812\" )\n first_bottom_table_text = bottom_table.getCellByName(\"B5\")\n first_bottom_table_text.setString(\"084873 (Oshakati Branch)\" )\n first_bottom_table_text = bottom_table.getCellByName(\"C1\")\n first_bottom_table_text.setString(\"Postal:\" )\n first_bottom_table_text = bottom_table.getCellByName(\"C2\")\n first_bottom_table_text.setString(\"A. Pickel-Voigt\" )\n first_bottom_table_text = bottom_table.getCellByName(\"C3\")\n first_bottom_table_text.setString(\"PO Box 37\" )\n first_bottom_table_text = bottom_table.getCellByName(\"C4\")\n first_bottom_table_text.setString(\"Oshakati\" )\n first_bottom_table_text = bottom_table.getCellByName(\"C5\")\n first_bottom_table_text.setString(\"Namibia\" )\n range = bottom_table.getCellRangeByName(\"A1:C5\")\n range.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n range.setPropertyValue( \"CharHeight\", 8.5 )\n range.setPropertyValue(\"ParaAdjust\", LEFT)\n otabseps = bottom_table.TableColumnSeparators\n relativeTableWidth = bottom_table.getPropertyValue( \"TableColumnRelativeSum\" )\n otabseps[0].Position = relativeTableWidth * 0.2\n otabseps[1].Position = relativeTableWidth * 0.8\n # otabseps[2].Position = relativeTableWidth * 0.90\n bottom_table.TableColumnSeparators = otabseps \n bottom_table.setPropertyValue(\"TableColumnSeparators\", otabseps)\n return doc, text\n\ndef createTable(doc, text, unitCount):\n global cursor\n table = doc.createInstance( \"com.sun.star.text.TextTable\" )\n table.initialize(unitCount + 2, 4)\n text.insertTextContent( cursor, table, 1 )\n otabseps = table.TableColumnSeparators\n relativeTableWidth = table.getPropertyValue( \"TableColumnRelativeSum\" )\n otabseps[0].Position = relativeTableWidth * 0.12\n otabseps[1].Position = relativeTableWidth * 0.21\n otabseps[2].Position = relativeTableWidth * 0.90\n table.TableColumnSeparators = otabseps \n table.setPropertyValue(\"TableColumnSeparators\", otabseps)\n cRange = table.getCellRangeByName(\"A1:D1\")\n cRange.setPropertyValue( \"ParaAdjust\", LEFT )\n cRange.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n cRange.setPropertyValue( \"CharHeight\", 10.0 )\n insertTextIntoCell( table, \"A1\", \"Date of Service\" )\n insertTextIntoCell( table, \"B1\", \"Namaf Code\" )\n insertTextIntoCell( table, \"C1\", \"Description\")\n insertTextIntoCell( table, \"D1\", \"Amount\")\n insertTextIntoCell( table, \"C\" + str(2 + unitCount), \"Total N$: \")\n # insertTextIntoCell( table, \"A\" + str(2 + unitCount), \"Total\")\n cursor_right = table.createCursorByCellName(\"C\" + str(2 + unitCount))\n cursor_right.setPropertyValue( \"ParaAdjust\", RIGHT )\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n return table, unitCount\n\ndef insertTextIntoCell( table, cellName, text, color = None ):\n tableText = table.getCellByName( cellName )\n cursor = tableText.createTextCursor()\n cursor.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n cursor.setPropertyValue( \"CharHeight\", 9.0 )\n if color != None: \n cursor.setPropertyValue( \"CharColor\", color )\n tableText.setString( text )\n\ndef populateTable(doc, text, items, treatments, price, dates, modifier):\n table, unitCount = createTable(doc, text, len(treatments))\n print(price)\n print(treatments)\n for a, b, c, d, e in zip(enumerate(treatments), dates, items, price, modifier):\n if e == '14':\n insertTextIntoCell(table, \"B\" + str(a[0] + 2), str(c + \" (0\" + e + \")\"))\n else:\n insertTextIntoCell(table, \"B\" + str(a[0] + 2), c)\n insertTextIntoCell(table, \"A\" + str(a[0] + 2), b)\n insertTextIntoCell(table, \"C\" + str(a[0] + 2), a[1]['description'])\n insertTextIntoCell(table, \"D\" + str(a[0] + 2), str(d))\n cell_sum = table.getCellByName(\"D\" + str(2 + unitCount))\n cell_sum.setFormula(\"=sum \")\n # NumForms = doc.getNumberFormats()\n # dateFormatString = \"YYYY/MM/DD\\\\ HH:MM:SS\"\n # DateKey = NumForms.queryKey(dateFormatString, sLocale, True)\n # cell_sum.NumberFormat = DateKey\n return doc, text\n\ndef populateMiddleTable(doc, text, patient):\n global cursor\n middle_table = doc.createInstance( \"com.sun.star.text.TextTable\" )\n if (patient['medical'] == 'mva'):\n middle_table.initialize(2,3)\n middle_table.setName('middle_table')\n text.insertTextContent( cursor, middle_table, 1 )\n first_middle_table_text = middle_table.getCellByName(\"A1\")\n first_middle_table_text.setString(\"Invoice No: MVA/2020/H\" )\n first_middle_table_text.setPropertyValue( \"ParaAdjust\", LEFT )\n first_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n second_middle_table_text = middle_table.getCellByName(\"A2\")\n second_middle_table_text.setString(\"Patient Name: \" + str(patient['name']) + \"\\nCase Number: \" + str(patient['case']) + \"\\nPO: \" + str(patient['po']))\n second_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n second_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n third_middle_table_text = middle_table.getCellByName(\"B2\")\n # third_middle_table_text.setString(\"Case Number: \" + str(patient['case']))\n third_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n third_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n # third_middle_table_text.setPropertyValue(\"ParaAdjust\", CENTER)\n cursor_middle_right = middle_table.createCursorByCellName(\"C1\")\n cursor_middle_right.setPropertyValue( \"ParaAdjust\", RIGHT )\n cursor_middle_table_c1 = middle_table.getCellByName(\"C1\")\n cursor_middle_table_c1.setString(\"Date: \" + str(patient['date']))\n cursor_middle_table_c1.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n cursor_middle_table_c1.setPropertyValue( \"CharHeight\", 10.0 )\n fourth_middle_table_text = middle_table.getCellByName(\"C2\")\n # fourth_middle_table_text.setString(\"PO: \" + str(patient['po']))\n fourth_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n fourth_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n # fourth_middle_table_text.setPropertyValue( \"ParaAdjust\", RIGHT )\n \n seventh_middle_table_text = middle_table.getCellByName(\"B1\")\n seventh_middle_table_text.setString(\"Invoice\")\n seventh_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n seventh_middle_table_text.setPropertyValue( \"CharHeight\", 11.0 )\n\n\n\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n # text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n\n else:\n middle_table.initialize(3,3)\n middle_table.setName('middle_table')\n text.insertTextContent( cursor, middle_table, 1 )\n first_middle_table_text = middle_table.getCellByName(\"A1\")\n first_middle_table_text.setString(\"Invoice No: PSEMAS/2020/H\" )\n first_middle_table_text.setPropertyValue( \"ParaAdjust\", LEFT )\n first_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n second_middle_table_text = middle_table.getCellByName(\"A2\")\n second_middle_table_text.setString(\"Main Member: \" + str(patient['main']))\n second_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n second_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n third_middle_table_text = middle_table.getCellByName(\"B2\")\n third_middle_table_text.setString(\"Medical Aid No: \" + str(patient['number']))\n third_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n third_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n # third_middle_table_text.setPropertyValue(\"ParaAdjust\", CENTER)\n cursor_middle_right = middle_table.createCursorByCellName(\"C1\")\n cursor_middle_right.setPropertyValue( \"ParaAdjust\", RIGHT )\n cursor_middle_table_c1 = middle_table.getCellByName(\"C1\")\n cursor_middle_table_c1.setString(\"Date: \" + str(patient['date']))\n cursor_middle_table_c1.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n cursor_middle_table_c1.setPropertyValue( \"CharHeight\", 10.0 )\n fourth_middle_table_text = middle_table.getCellByName(\"C2\")\n fourth_middle_table_text.setString(\"Insurance: \" + str(patient['medical']))\n fourth_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n fourth_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n #fourth_middle_table_text.setPropertyValue( \"ParaAdjust\", RIGHT )\n fifth_middle_table_text = middle_table.getCellByName(\"A3\")\n fifth_middle_table_text.setString(\"Patient Name: \" + str(patient['name']))\n fifth_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n fifth_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n # fifth_middle_table_text.setPropertyValue(\"ParaAdjust\", CENTER)\n sixth_middle_table_text = middle_table.getCellByName(\"B3\")\n sixth_middle_table_text.setString(\"Patient DoB: \" + str(patient['dob']))\n sixth_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n sixth_middle_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n \n seventh_middle_table_text = middle_table.getCellByName(\"B1\")\n seventh_middle_table_text.setString(\"Invoice\")\n seventh_middle_table_text.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n seventh_middle_table_text.setPropertyValue( \"CharHeight\", 11.0 )\n\n\n #sixth_middle_table_text.setPropertyValue(\"ParaAdjust\", CENTER)\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n # text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n\n return doc, text\n\ndef populateTopTable(doc, text, patient):\n global cursor\n cursor.setPropertyValue( \"CharHeight\", 10.0 )\n cursor.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n top_table = doc.createInstance( \"com.sun.star.text.TextTable\" )\n top_table.initialize(1,2)\n top_table.setName('top_Table')\n text.insertTextContent( cursor, top_table, 1 )\n first_top_table_text = top_table.getCellByName(\"A1\")\n first_top_table_text.setString(\"Practice No: 072 0000 637653 \\nHPCNA No: PHY 00194\" )\n first_top_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n cursor_top_right = top_table.createCursorByCellName(\"B1\")\n cursor_top_right.setPropertyValue( \"ParaAdjust\", RIGHT )\n second_top_table_text = top_table.getCellByName(\"B1\")\n second_top_table_text.setPropertyValue( \"CharHeight\", 10.0 )\n second_top_table_text.setString(\"anpickel@gmail.com\\nCell: 081 648 11 82\")\n eText = top_table.getCellByName(\"B1\").getText()\n eCursor = eText.createTextCursor()\n eText.insertString(eCursor, \"\", False)\n eCursor.goRight(len(\"anpickel@gmail.com\"), True)\n eCursor.HyperLinkURL = \"mailto:anpickel@gmail.com\"\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False );\n return doc, text\n\ndef populateTopText(doc, text):\n global cursor\n# styles = doc.StyleFamilies\n# page_styles = styles.getByName(\"PageStyles\")\n# oDefaultStyle = page_styles.getByName(\"Standard\")\n# oDefaultStyle.HeaderIsOn = True\n# header_text = oDefaultStyle.getPropertyValue(\"HeaderText\")\n# header_cursor = header_text.createTextCursor()\n# header_text.insertString(header_cursor, \"hello\",0)\n\n cursor = text.createTextCursor()\n cursor.setPropertyValue( \"CharFontName\", \"Liberation Serif\" )\n cursor.setPropertyValue( \"CharHeight\", 10.0 )\n cursor.setPropertyValue( \"ParaAdjust\", CENTER )\n text.insertString( cursor, \"A. Pickel-Voigt\", 0 )\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n text.insertString( cursor, \"MSc Physiotherapy (UWC)\" , 0 )\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n text.insertControlCharacter( cursor, PARAGRAPH_BREAK, False )\n return doc, text\n\ndef setupConnection():\n localContext = uno.getComponentContext()\n resolver = localContext.ServiceManager.createInstanceWithContext(\"com.sun.star.bridge.UnoUrlResolver\", localContext )\n smgr = resolver.resolve( \"uno:socket,host=localhost,port=2002;urp;StarOffice.ServiceManager\" )\n remoteContext = smgr.getPropertyValue( \"DefaultContext\" )\n desktop = smgr.createInstanceWithContext( \"com.sun.star.frame.Desktop\",remoteContext)\n doc = desktop.loadComponentFromURL( \"private:factory/swriter\",\"_blank\", 0, () )\n text = doc.Text\n return doc, text\n\ndef createTextInvoice(items, treatments, price, dates, patient, modifier):\n doc, text = setupConnection()\n doc, text = populateTopText(doc, text)\n doc, text = populateTopTable(doc, text, patient)\n doc, text = populateMiddleTable(doc, text, patient)\n doc, text = populateTable(doc, text, items, treatments, price, dates, modifier)\n doc, text = populateBottomTable(doc, text)\n doc, text = configureBorders(doc, text, items)\n saveDocument(doc, patient)\n\n\ndef testing():\n patient = {'case': 'asdfasdfa', 'csrf_token':'ImU5NjFiYWEwN2Y1MGUyMmFiZDBkY2ZiYTQ5NDgxYzdiN2NlODQ2MDQi.XpVy6A.zOXe-xkr0gUZJroWUQHqVEoGxu0','date': '2020-04-14', 'medical': 'mva', 'name': 'asdfasdf', 'po': '423423423'}\n dates = ['01-04-2020', '04-04-2020', '10-04-2020', '15-04-2020']\n treatments = [{'description': 'Infra-red, Radiant heat, Wax therapy Hot packs', 'units': 10, 'value': 98}, {'description': 'Infra-red, Radiant heat, Wax therapy Hot packs', 'units': 10, 'value': 98}, {'description': 'Infra-red, Radiant heat, Wax therapy Hot packs', 'units': 10, 'value': 98}, {'description': 'Infra-red, Radiant heat, Wax therapy Hot packs', 'units': 10, 'value': 98}]\n items = ['001', '001', '001', '001']\n price = ['300','435', '196', '444']\n modifier = ['0','0','0','14']\n createTextInvoice(items, treatments, price, dates, patient, modifier)\n\ntesting()\n","sub_path":"swriter.py","file_name":"swriter.py","file_ext":"py","file_size_in_byte":18704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"11704188","text":"# -*- coding: cp936 -*-\nfrom math import log\nimport operator\nimport os\n\nimport re\nfrom numpy import inf\nimport copy\n\n\n# 计算信息熵\ndef calcShannonEnt(dataSet, labelIndex):\n # type: (list) -> float\n numEntries = 0 # 样本数(按权重计算)\n labelCounts = {}\n for featVec in dataSet: # 遍历每个样本\n if featVec[labelIndex] != 'N':\n weight = float(featVec[-2])\n numEntries += weight\n currentLabel = featVec[-1] # 当前样本的类别\n if currentLabel not in labelCounts.keys(): # 生成类别字典\n labelCounts[currentLabel] = 0\n labelCounts[currentLabel] += weight # 数据集的倒数第二个值用来标记样本权重\n shannonEnt = 0.0\n for key in labelCounts: # 计算信息熵\n prob = float(labelCounts[key]) / numEntries\n shannonEnt = shannonEnt - prob * log(prob, 2)\n return shannonEnt\n\n\ndef splitDataSet(dataSet, axis, value, LorR='N'):\n \"\"\"\n type: (list, int, string or float, string) -> list\n 划分数据集\n axis:按第几个特征划分\n value:划分特征的值\n LorR: N 离散属性; L 小于等于value值; R 大于value值\n \"\"\"\n retDataSet = []\n featVec = []\n if LorR == 'N': # 离散属性\n for featVec in dataSet:\n if featVec[axis] == value:\n reducedFeatVec = featVec[:axis]\n reducedFeatVec.extend(featVec[axis + 1:])\n retDataSet.append(reducedFeatVec)\n elif LorR == 'L':\n for featVec in dataSet:\n if featVec[axis] != 'N':\n if float(featVec[axis]) < value:\n retDataSet.append(featVec)\n elif LorR == 'R':\n for featVec in dataSet:\n if featVec[axis] != 'N':\n if float(featVec[axis]) > value:\n retDataSet.append(featVec)\n return retDataSet\n\n\ndef splitDataSetWithNull(dataSet, axis, value, LorR='N'):\n \"\"\"\n type: (list, int, string or float, string) -> list\n 划分数据集\n axis:按第几个特征划分\n value:划分特征的值\n LorR: N 离散属性; L 小于等于value值; R 大于value值\n \"\"\"\n retDataSet = []\n nullDataSet = []\n featVec = []\n totalWeightV = calcTotalWeight(dataSet, axis, False) # 非空样本权重\n totalWeightSub = 0.0\n if LorR == 'N': # 离散属性\n for featVec in dataSet:\n if featVec[axis] == value:\n reducedFeatVec = featVec[:axis]\n reducedFeatVec.extend(featVec[axis + 1:])\n retDataSet.append(reducedFeatVec)\n elif featVec[axis] == 'N':\n reducedNullVec = featVec[:axis]\n reducedNullVec.extend(featVec[axis + 1:])\n nullDataSet.append(reducedNullVec)\n elif LorR == 'L':\n for featVec in dataSet:\n if featVec[axis] != 'N':\n if float(featVec[axis]) < value:\n retDataSet.append(featVec)\n elif featVec[axis] == 'N':\n nullDataSet.append(featVec)\n elif LorR == 'R':\n for featVec in dataSet:\n if featVec[axis] != 'N':\n if float(featVec[axis]) > value:\n retDataSet.append(featVec)\n elif featVec[axis] == 'N':\n nullDataSet.append(featVec)\n\n totalWeightSub = calcTotalWeight(retDataSet, -1, True) # 计算此分支中非空样本的总权重\n for nullVec in nullDataSet: # 把缺失值样本按权值比例划分到分支中\n nullVec[-2] = float(nullVec[-2]) * totalWeightSub / totalWeightV\n retDataSet.append(nullVec)\n\n return retDataSet\n\n\ndef calcTotalWeight(dataSet, labelIndex, isContainNull):\n \"\"\"\n type: (list, int, bool) -> float\n 计算样本集对某个特征值的总样本树(按权重计算)\n :param dataSet: 数据集\n :param labelIndex: 特征值索引\n :param isContainNull: 是否包含空值的样本\n :return: 返回样本集的总权重值\n \"\"\"\n totalWeight = 0.0\n print(len(dataSet))\n for featVec in dataSet: # 遍历每个样本\n print(featVec)\n weight = float(featVec[-2])\n if isContainNull is False and featVec[labelIndex] != 'N':\n totalWeight += weight # 非空样本树,按权重计算\n if isContainNull is True:\n totalWeight += weight # 总样本数,按权重计算\n return totalWeight\n\n\ndef calcGain(dataSet, labelIndex, labelPropertyi):\n \"\"\"\n type: (list, int, int) -> float, int\n 计算信息增益,返回信息增益值和连续属性的划分点\n dataSet: 数据集\n labelIndex: 特征值索引\n labelPropertyi: 特征值类型,0为离散,1为连续\n \"\"\"\n baseEntropy = calcShannonEnt(dataSet, labelIndex) # 计算根节点的信息熵\n featList = [example[labelIndex] for example in dataSet] # 特征值列表\n uniqueVals = set(featList) # 该特征包含的所有值\n newEntropy = 0.0\n totalWeight = 0.0\n totalWeightV = 0.0\n totalWeight = calcTotalWeight(dataSet, labelIndex, True) # 总样本权重\n totalWeightV = calcTotalWeight(dataSet, labelIndex, False) # 非空样本权重\n if labelPropertyi == 0: # 对离散的特征\n for value in uniqueVals: # 对每个特征值,划分数据集, 计算各子集的信息熵\n if value != 'N':\n subDataSet = splitDataSet(dataSet, labelIndex, value)\n totalWeightSub = 0.0\n totalWeightSub = calcTotalWeight(subDataSet, labelIndex, True)\n prob = totalWeightSub / totalWeightV\n newEntropy += prob * calcShannonEnt(subDataSet, labelIndex)\n else: # 对连续的特征\n uniqueValsList = list(uniqueVals)\n if 'N' in uniqueValsList:\n uniqueValsList.remove('N')\n sortedUniqueVals = sorted(uniqueValsList) # 对特征值排序\n listPartition = []\n minEntropy = inf\n if len(sortedUniqueVals) == 1: # 如果只有一个值,可以看作只有左子集,没有右子集\n totalWeightLeft = calcTotalWeight(dataSet, labelIndex, True)\n probLeft = totalWeightLeft / totalWeightV\n minEntropy = probLeft * calcShannonEnt(dataSet, labelIndex)\n else:\n for j in range(len(sortedUniqueVals) - 1): # 计算划分点\n partValue = (float(sortedUniqueVals[j]) + float(\n sortedUniqueVals[j + 1])) / 2\n # 对每个划分点,计算信息熵\n dataSetLeft = splitDataSet(dataSet, labelIndex, partValue, 'L')\n dataSetRight = splitDataSet(dataSet, labelIndex, partValue, 'R')\n totalWeightLeft = 0.0\n totalWeightLeft = calcTotalWeight(dataSetLeft, labelIndex, True)\n totalWeightRight = 0.0\n totalWeightRight = calcTotalWeight(dataSetRight, labelIndex, True)\n probLeft = totalWeightLeft / totalWeightV\n probRight = totalWeightRight / totalWeightV\n Entropy = probLeft * calcShannonEnt(dataSetLeft, labelIndex) + \\\n probRight * calcShannonEnt(dataSetRight, labelIndex)\n if Entropy < minEntropy: # 取最小的信息熵\n minEntropy = Entropy\n newEntropy = minEntropy\n gain = totalWeightV / totalWeight * (baseEntropy - newEntropy)\n return gain\n\n\ndef calcGainRatio(dataSet, labelIndex, labelPropertyi):\n \"\"\"\n type: (list, int, int) -> float, int\n 计算信息增益率,返回信息增益率和连续属性的划分点\n dataSet: 数据集\n labelIndex: 特征值索引\n labelPropertyi: 特征值类型,0为离散,1为连续\n \"\"\"\n baseEntropy = calcShannonEnt(dataSet, labelIndex) # 计算根节点的信息熵\n featList = [example[labelIndex] for example in dataSet] # 特征值列表\n uniqueVals = set(featList) # 该特征包含的所有值\n newEntropy = 0.0\n bestPartValuei = None\n IV = 0.0\n totalWeight = 0.0\n totalWeightV = 0.0\n totalWeight = calcTotalWeight(dataSet, labelIndex, True) # 总样本权重\n totalWeightV = calcTotalWeight(dataSet, labelIndex, False) # 非空样本权重\n if labelPropertyi == 0: # 对离散的特征\n for value in uniqueVals: # 对每个特征值,划分数据集, 计算各子集的信息熵\n subDataSet = splitDataSet(dataSet, labelIndex, value)\n totalWeightSub = 0.0\n totalWeightSub = calcTotalWeight(subDataSet, labelIndex, True)\n if value != 'N':\n prob = totalWeightSub / totalWeightV\n newEntropy += prob * calcShannonEnt(subDataSet, labelIndex)\n prob1 = totalWeightSub / totalWeight\n IV -= prob1 * log(prob1, 2)\n else: # 对连续的特征\n uniqueValsList = list(uniqueVals)\n if 'N' in uniqueValsList:\n uniqueValsList.remove('N')\n # 计算空值样本的总权重,用于计算IV\n totalWeightN = 0.0\n dataSetNull = splitDataSet(dataSet, labelIndex, 'N')\n totalWeightN = calcTotalWeight(dataSetNull, labelIndex, True)\n probNull = totalWeightN / totalWeight\n if probNull > 0.0:\n IV += -1 * probNull * log(probNull, 2)\n\n sortedUniqueVals = sorted(uniqueValsList) # 对特征值排序\n listPartition = []\n minEntropy = inf\n\n if len(sortedUniqueVals) == 1: # 如果只有一个值,可以看作只有左子集,没有右子集\n totalWeightLeft = calcTotalWeight(dataSet, labelIndex, True)\n probLeft = totalWeightLeft / totalWeightV\n minEntropy = probLeft * calcShannonEnt(dataSet, labelIndex)\n IV = -1 * probLeft * log(probLeft, 2)\n else:\n for j in range(len(sortedUniqueVals) - 1): # 计算划分点\n partValue = (float(sortedUniqueVals[j]) + float(\n sortedUniqueVals[j + 1])) / 2\n # 对每个划分点,计算信息熵\n dataSetLeft = splitDataSet(dataSet, labelIndex, partValue, 'L')\n dataSetRight = splitDataSet(dataSet, labelIndex, partValue, 'R')\n totalWeightLeft = 0.0\n totalWeightLeft = calcTotalWeight(dataSetLeft, labelIndex, True)\n totalWeightRight = 0.0\n totalWeightRight = calcTotalWeight(dataSetRight, labelIndex, True)\n probLeft = totalWeightLeft / totalWeightV\n probRight = totalWeightRight / totalWeightV\n Entropy = probLeft * calcShannonEnt(\n dataSetLeft, labelIndex) + probRight * calcShannonEnt(dataSetRight, labelIndex)\n if Entropy < minEntropy: # 取最小的信息熵\n minEntropy = Entropy\n bestPartValuei = partValue\n probLeft1 = totalWeightLeft / totalWeight\n probRight1 = totalWeightRight / totalWeight\n IV += -1 * (probLeft1 * log(probLeft1, 2) + probRight1 * log(probRight1, 2))\n\n newEntropy = minEntropy\n gain = totalWeightV / totalWeight * (baseEntropy - newEntropy)\n if IV == 0.0: # 如果属性只有一个值,IV为0,为避免除数为0,给个很小的值\n IV = 0.0000000001\n gainRatio = gain / IV\n return gainRatio, bestPartValuei\n\n\n# 选择最好的数据集划分方式\ndef chooseBestFeatureToSplit(dataSet, labelProperty):\n \"\"\"\n type: (list, int) -> int, float\n :param dataSet: 样本集\n :param labelProperty: 特征值类型,1 连续, 0 离散\n :return: 最佳划分属性的索引和连续属性的划分值\n \"\"\"\n numFeatures = len(labelProperty) # 特征数\n bestInfoGainRatio = 0.0\n bestFeature = -1\n bestPartValue = None # 连续的特征值,最佳划分值\n gainSum = 0.0\n gainAvg = 0.0\n for i in range(numFeatures): # 对每个特征循环\n infoGain = calcGain(dataSet, i, labelProperty[i])\n gainSum += infoGain\n gainAvg = gainSum / numFeatures\n for i in range(numFeatures): # 对每个特征循环\n infoGainRatio, bestPartValuei = calcGainRatio(dataSet, i, labelProperty[i])\n infoGain = calcGain(dataSet, i, labelProperty[i])\n if infoGainRatio > bestInfoGainRatio and infoGain > gainAvg: # 取信息增益高于平均增益且信息增益率最大的特征\n bestInfoGainRatio = infoGainRatio\n bestFeature = i\n bestPartValue = bestPartValuei\n return bestFeature, bestPartValue\n\n\n# 通过排序返回出现次数最多的类别\ndef majorityCnt(classList, weightList):\n classCount = {}\n for i in range(len(classList)):\n if classList[i] not in classCount.keys():\n classCount[classList[i]] = 0.0\n classCount[classList[i]] += round(float(weightList[i]),1)\n\n # python 2.7\n # sortedClassCount = sorted(classCount.iteritems(),\n # key=operator.itemgetter(1), reverse=True)\n sortedClassCount = sorted(classCount.items(),\n key=operator.itemgetter(1), reverse=True)\n if len(sortedClassCount) == 1:\n return (sortedClassCount[0][0],sortedClassCount[0][1],0.0)\n return (sortedClassCount[0][0], sortedClassCount[0][1], sortedClassCount[1][1])\n\n\n# 创建树, 样本集 特征 特征属性(0 离散, 1 连续)\ndef createTree(dataSet, labels, labelProperty):\n classList = [example[-1] for example in dataSet] # 类别向量\n weightList = [example[-2] for example in dataSet] # 权重向量\n if classList.count(classList[0]) == len(classList): # 如果只有一个类别,返回\n totalWeiht = calcTotalWeight(dataSet,0,True)\n return (classList[0], round(totalWeiht,1),0.0)\n #totalWeight = calcTotalWeight(dataSet, 0, True)\n if len(dataSet[0]) == 1: # 如果所有特征都被遍历完了,返回出现次数最多的类别\n return majorityCnt(classList)\n bestFeat, bestPartValue = chooseBestFeatureToSplit(dataSet,\n labelProperty) # 最优分类特征的索引\n if bestFeat == -1: # 如果无法选出最优分类特征,返回出现次数最多的类别\n return majorityCnt(classList, weightList)\n if labelProperty[bestFeat] == 0: # 对离散的特征\n bestFeatLabel = labels[bestFeat]\n myTree = {bestFeatLabel: {}}\n labelsNew = copy.copy(labels)\n labelPropertyNew = copy.copy(labelProperty)\n del (labelsNew[bestFeat]) # 已经选择的特征不再参与分类\n del (labelPropertyNew[bestFeat])\n featValues = [example[bestFeat] for example in dataSet]\n uniqueValue = set(featValues) # 该特征包含的所有值\n uniqueValue.discard('N')\n for value in uniqueValue: # 对每个特征值,递归构建树\n subLabels = labelsNew[:]\n subLabelProperty = labelPropertyNew[:]\n myTree[bestFeatLabel][value] = createTree(\n splitDataSetWithNull(dataSet, bestFeat, value), subLabels,\n subLabelProperty)\n else: # 对连续的特征,不删除该特征,分别构建左子树和右子树\n bestFeatLabel = labels[bestFeat] + '<' + str(bestPartValue)\n myTree = {bestFeatLabel: {}}\n subLabels = labels[:]\n subLabelProperty = labelProperty[:]\n # 构建左子树\n valueLeft = 'Y'\n myTree[bestFeatLabel][valueLeft] = createTree(\n splitDataSetWithNull(dataSet, bestFeat, bestPartValue, 'L'), subLabels,\n subLabelProperty)\n # 构建右子树\n valueRight = 'N'\n myTree[bestFeatLabel][valueRight] = createTree(\n splitDataSetWithNull(dataSet, bestFeat, bestPartValue, 'R'), subLabels,\n subLabelProperty)\n return myTree\n\n\n# 测试算法\ndef classify(inputTree, classList, featLabels, featLabelProperties, testVec):\n firstStr = list(inputTree.keys())[0] # 根节点\n firstLabel = firstStr\n lessIndex = str(firstStr).find('<')\n if lessIndex > -1: # 如果是连续型的特征\n firstLabel = str(firstStr)[:lessIndex]\n secondDict = inputTree[firstStr]\n featIndex = featLabels.index(firstLabel) # 跟节点对应的特征\n classLabel = {}\n for classI in classList:\n classLabel[classI] = 0.0\n for key in secondDict.keys(): # 对每个分支循环\n if featLabelProperties[featIndex] == 0: # 离散的特征\n if testVec[featIndex] == key: # 测试样本进入某个分支\n if type(secondDict[key]).__name__ == 'dict': # 该分支不是叶子节点,递归\n classLabelSub = classify(secondDict[key], classList, featLabels,\n featLabelProperties, testVec)\n for classKey in classLabel.keys():\n classLabel[classKey] += classLabelSub[classKey]\n else: # 如果是叶子, 返回结果\n for classKey in classLabel.keys():\n if classKey == secondDict[key][0]:\n classLabel[classKey] += secondDict[key][1]\n else:\n classLabel[classKey] += secondDict[key][2]\n elif testVec[featIndex] == 'N': # 如果测试样本的属性值缺失,则进入每个分支\n if type(secondDict[key]).__name__ == 'dict': # 该分支不是叶子节点,递归\n classLabelSub = classify(secondDict[key], classList, featLabels,\n featLabelProperties, testVec)\n for classKey in classLabel.keys():\n classLabel[classKey] += classLabelSub[key]\n else: # 如果是叶子, 返回结果\n for classKey in classLabel.keys():\n if classKey == secondDict[key][0]:\n classLabel[classKey] += secondDict[key][1]\n else:\n classLabel[classKey] += secondDict[key][2]\n else:\n partValue = float(str(firstStr)[lessIndex + 1:])\n if testVec[featIndex] == 'N': # 如果测试样本的属性值缺失,则对每个分支的结果加和\n # 进入左子树\n if type(secondDict[key]).__name__ == 'dict': # 该分支不是叶子节点,递归\n classLabelSub = classify(secondDict[key], classList, featLabels,\n featLabelProperties, testVec)\n for classKey in classLabel.keys():\n classLabel[classKey] += classLabelSub[classKey]\n else: # 如果是叶子, 返回结果\n for classKey in classLabel.keys():\n if classKey == secondDict[key][0]:\n classLabel[classKey] += secondDict[key][1]\n else:\n classLabel[classKey] += secondDict[key][2]\n elif float(testVec[featIndex]) <= partValue and key == 'Y': # 进入左子树\n if type(secondDict['Y']).__name__ == 'dict': # 该分支不是叶子节点,递归\n classLabelSub = classify(secondDict['Y'], classList, featLabels,\n featLabelProperties, testVec)\n for classKey in classLabel.keys():\n classLabel[classKey] += classLabelSub[classKey]\n else: # 如果是叶子, 返回结果\n for classKey in classLabel.keys():\n if classKey == secondDict[key][0]:\n classLabel[classKey] += secondDict['Y'][1]\n else:\n classLabel[classKey] += secondDict['Y'][2]\n elif float(testVec[featIndex]) > partValue and key == 'N':\n if type(secondDict['N']).__name__ == 'dict': # 该分支不是叶子节点,递归\n classLabelSub = classify(secondDict['N'], classList, featLabels,\n featLabelProperties, testVec)\n for classKey in classLabel.keys():\n classLabel[classKey] += classLabelSub[classKey]\n else: # 如果是叶子, 返回结果\n for classKey in classLabel.keys():\n if classKey == secondDict[key][0]:\n classLabel[classKey] += secondDict['N'][1]\n else:\n classLabel[classKey] += secondDict['N'][2]\n\n return classLabel\n\n\n# 存储决策树\ndef storeTree(inputTree, filename):\n import pickle\n fw = open(filename, 'w')\n pickle.dump(inputTree, fw)\n fw.close()\n\n\n# 读取决策树, 文件不存在返回None\ndef grabTree(filename):\n import pickle\n if os.path.isfile(filename):\n fr = open(filename)\n return pickle.load(fr)\n else:\n return None\n\n\n# 测试决策树正确率\ndef testing(myTree, classList, data_test, labels, labelProperties):\n error = 0.0\n for i in range(len(data_test)):\n classLabelSet = classify(myTree, classList, labels, labelProperties, data_test[i])\n maxWeight = 0.0\n classLabel = ''\n for item in classLabelSet.items():\n if item[1] > maxWeight:\n classLabel = item[0]\n if classLabel != data_test[i][-1]:\n error += 1\n return float(error)\n\n\n# 测试投票节点正确率\ndef testingMajor(major, data_test):\n error = 0.0\n for i in range(len(data_test)):\n if major[0] != data_test[i][-1]:\n error += 1\n # print 'major %d' %error\n return float(error)\n\n\n# 后剪枝\ndef postPruningTree(inputTree, classSet, dataSet, data_test, labels, labelProperties):\n firstStr = list(inputTree.keys())[0]\n secondDict = inputTree[firstStr]\n classList = [example[-1] for example in dataSet]\n weightList = [example[-2] for example in dataSet]\n featkey = copy.deepcopy(firstStr)\n if '<' in firstStr: # 对连续的特征值,使用正则表达式获得特征标签和value\n featkey = re.compile(\"(.+<)\").search(firstStr).group()[:-1]\n featvalue = float(re.compile(\"(<.+)\").search(firstStr).group()[1:])\n labelIndex = labels.index(featkey)\n temp_labels = copy.deepcopy(labels)\n temp_labelProperties = copy.deepcopy(labelProperties)\n if labelProperties[labelIndex] == 0: # 离散特征\n del (labels[labelIndex])\n del (labelProperties[labelIndex])\n for key in secondDict.keys(): # 对每个分支\n if type(secondDict[key]).__name__ == 'dict': # 如果不是叶子节点\n if temp_labelProperties[labelIndex] == 0: # 离散的\n subDataSet = splitDataSet(dataSet, labelIndex, key)\n subDataTest = splitDataSet(data_test, labelIndex, key)\n else:\n if key == 'Y':\n subDataSet = splitDataSet(dataSet, labelIndex, featvalue,\n 'L')\n subDataTest = splitDataSet(data_test, labelIndex,\n featvalue, 'L')\n else:\n subDataSet = splitDataSet(dataSet, labelIndex, featvalue,\n 'R')\n subDataTest = splitDataSet(data_test, labelIndex,\n featvalue, 'R')\n if len(subDataTest) > 0:\n inputTree[firstStr][key] = postPruningTree(secondDict[key], classSet,\n subDataSet, subDataTest,\n copy.deepcopy(labels),\n copy.deepcopy(\n labelProperties))\n if testing(inputTree, classSet, data_test, temp_labels,\n temp_labelProperties) <= testingMajor(majorityCnt(classList, weightList),\n data_test):\n return inputTree\n return majorityCnt(classList,weightList)\n","sub_path":"decision_tree/C4.5/C45.py","file_name":"C45.py","file_ext":"py","file_size_in_byte":24325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"422127153","text":"import requests,zipfile,io,datetime\nimport pandas as pd\ndef split(date):\n return date.split('/')\ndef mapped(lis):\n return list(map(int,lis))\ndef urlmodify(year,month,day):\n base_url='https://www1.nseindia.com/content/historical/EQUITIES/'\n base_suffix='bhav.csv.zip'\n month_dict={1:'JAN',2:'FEB',3:'MAR',4:'APR',5:'MAY',6:'JUN',7:'JUL',8:'AUG',9:'SEP',10:'OCT',11:'NOV',12:'DEC'}\n base_month=month_dict[month]\n day=bytes(str(day).zfill(2),encoding=\"utf-8\")\n final_url=base_url+str(year)+'/'+base_month+'/'+\"cm\"+day.decode()+base_month+str(year)+base_suffix\n print(final_url)\n return final_url\ndef dlfile(file):\n r=requests.get(file)\n if r.status_code!=200:\n return 0\n zipf=zipfile.ZipFile(io.BytesIO(r.content))\n zipf.extractall()\n file=''.join(zipf.namelist())\n print(file)\n return file\ndef merge(curr,old):\n df1=pd.read_csv(curr)\n df2=pd.read_csv(old)\n join = pd.merge(df1, df2, on='ISIN')\n df_final = join.loc[:, ['SYMBOL_x', 'ISIN', 'CLOSE_x', 'CLOSE_y']]\n formula = ((df_final['CLOSE_x'] - df_final['CLOSE_y']) / df_final['CLOSE_y']) * 100\n df_final = df_final.assign(changed=formula.values)\n df_final = df_final.round(2)\n df_final.sort_values(by='changed', axis=0, ascending=False, inplace=True)\n file_name=\"report\"+str(cd_day)+str(cd_month)+\"_\"+str(od_day)+str(od_month)+\".csv\"\n with open(file_name,'w') as file:\n df_final.to_csv(file_name)\n return 0\ncurrent_date=input(\"Enter Current Date YYYY/M/DD\\t\")\nold_date=input(\"Enter old date in YYYY/M/DD\\t\")\ncurrent_date=split(current_date)\ncurrent_date=mapped(current_date)\ncd_year,cd_month,cd_day=current_date\nold_date=split(old_date)\nold_date=mapped(old_date)\nod_year,od_month,od_day=old_date\ncdo=datetime.datetime(cd_year,cd_month,cd_day)\nodo=datetime.datetime(od_year,od_month,od_day)\nif cdo.isoweekday() and odo.isoweekday() not in {6,7}:\n cu_url=urlmodify(cd_year,cd_month,cd_day)\n old_url = urlmodify(od_year,od_month,od_day)\n cu_file=dlfile(cu_url)\n od_file=dlfile(old_url)\n if cu_file and od_file:\n merge(cu_file,od_file)\n else:\n print(\"File not found. Retry\")\nelse:\n if cdo.isoweekday() in {6,7}:\n print(\"Current date should be a weekday or trading day. Please don't try again\")\n else:\n print(\"Old date should be a weekday or trading day\")","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602192262","text":"from django import forms\nfrom .models import Unit, PreventiveMaintenance, AREA\n\nclass UnitForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs['class'] = 'form-control form-control-sm'\n\n area = forms.ChoiceField(choices=AREA)\n\n class Meta:\n model = Unit\n exclude = ('active','created_at', 'updated_at', 'created_by', 'updated_by')\n\n\nclass PreventiveMaintenanceForm(forms.ModelForm):\n\n date_format = 'mm/dd/yyyy'\n time_format = 'hh:mm'\n\n def __init__(self, *args, **kwargs): \n super(PreventiveMaintenanceForm, self).__init__(*args, **kwargs)\n self.fields['service_report_number'].label = 'SR #'\n self.fields['target_date'].widget.attrs['placeholder'] = self.date_format\n self.fields['target_time'].widget.attrs['placeholder'] = self.time_format\n self.fields['actual_date'].widget.attrs['placeholder'] = self.date_format\n\n class Meta:\n model = PreventiveMaintenance\n #fields = ('target_date', 'target_time', 'actual_date', 'pm_date_done')\n exclude = ('pm_done', 'pm_date_done', 'status','active','created_at', 'updated_at', 'created_by', 'updated_by')","sub_path":"inventory_management/inventory/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636727059","text":"#!/usr/bin/env python\n# -*-coding:utf-8-*-\nimport os\nfrom apps.app import csrf\nfrom apps.core.blueprint import theme_view\nfrom flask import render_template, request, send_file, g\nfrom werkzeug.exceptions import abort\nfrom apps.core.flask.permission import page_permission_required\nfrom apps.core.utils.get_config import get_config\nfrom apps.modules.global_data.process.global_data import get_global_site_data\n\n__author__ = \"Allen Woo\"\n\n# robots.txt\n@csrf.exempt\n@theme_view.route('/robots.txt', methods=['GET'])\ndef robots():\n \"\"\"\n robots.txt\n :return:\n \"\"\"\n absolute_path = \"{}/{}/pages/robots.txt\".format(\n theme_view.template_folder, get_config(\n \"theme\", \"CURRENT_THEME_NAME\"))\n return send_file(absolute_path)\n\n\n@csrf.exempt\n@theme_view.route('/', methods=['GET', 'POST'])\n@page_permission_required()\ndef index():\n return get_render_template(\"index\")\n\n\n@csrf.exempt\n@theme_view.route('/', methods=['GET'])\n@page_permission_required()\ndef pages(path):\n \"\"\"\n GET:\n 通用视图函数,那些公共的页面将从此进入\n :param path:\n :return:\n \"\"\"\n return get_render_template(path.rstrip(\"/\"))\n\n\ndef get_render_template(path):\n \"\"\"\n 根据路由path,返回一个render_template\n :param path:\n :return:\n \"\"\"\n # 拼接当前主题目录\n path = \"{}/pages/{}\".format(get_config(\"theme\",\n \"CURRENT_THEME_NAME\"), path)\n absolute_path = os.path.abspath(\n \"{}/{}.html\".format(theme_view.template_folder, path))\n if not os.path.isfile(absolute_path):\n path = \"{}/index\".format(path)\n absolute_path = os.path.abspath(\n \"{}/{}.html\".format(theme_view.template_folder, path))\n if not os.path.isfile(absolute_path):\n abort(404)\n\n data = dict(request.args.items())\n g.site_global = dict(g.site_global,\n **get_global_site_data(req_type=\"view\"))\n return render_template('{}.html'.format(path), data=data)\n","sub_path":"apps/routing/theme_views.py","file_name":"theme_views.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315309868","text":"import re\nimport time\nfrom unittest import skip\n\nfrom selenium.webdriver.common.keys import Keys\n\nfrom functional_tests.base import FunctionalTest\n\n\nclass NewVisitorTest(FunctionalTest):\n \"\"\"Функциональный тест для нового посетителя\"\"\"\n\n def test_can_create_new_bell(self):\n \"\"\"тест: создать новый колокольчик\"\"\"\n\n # Иван заходит на сайт\n self.browser.get(self.live_server_url)\n\n # Видит заголовок сайта \"Мой колокольчик\"\n self.assertEqual(self.browser.title, \"Мой колокольчик\")\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('Создай свой колокольчик!', header_text)\n\n # Он видит поле для ввода названия колокольчика\n self.add_new_bell(\"Важные письма\")\n\n # И попадает на страницу нового колокольчика\n self.assertEqual(self.browser.title, \"Колокольчик: Важные письма\")\n header_text = self.browser.find_element_by_css_selector('.bell-title').text\n self.assertEqual('Важные письма', header_text)\n\n # Он видит сообщение, что колокольчик ждет события\n self.wait_for_stop_ring()\n\n def test_ring_bell_after_post_request(self):\n \"\"\"тест: колокольчик звенит после пост-запроса\"\"\"\n\n # Ивано создает новый колокольчик\n self.add_new_bell(\"Важные письма\")\n\n # Теперь он видит адрес и информацию, для отправки уведомлений на этот колокольчик методом пост-запросов\n address_for_post = self.browser.find_element_by_css_selector('.bell-info').text\n url_search = re.search(r'http://.+/bells/.+/events/add', address_for_post)\n if not url_search:\n self.fail(f'Не найден адрес для пост-запроса.')\n url = url_search.group(0)\n self.assertIn(self.live_server_url, url)\n\n # Так как нет событий, то колокольчик молчит\n self.wait_for_stop_ring()\n\n # он отправляет пост-запрос из другого приложения на этот адрес\n self.send_event_to_bell(url, 'Новое письмо от Владимира!')\n\n # колокольчик начинает звенеть\n self.wait_for_ring()\n\n # и на экран выходит сообщение\n message = self.browser.find_element_by_css_selector('.bell-status__text').text\n self.assertEqual(message, 'Новое письмо от Владимира!')\n\n # Иван нажимает кнопку \"Выключить\"\n stop_ring = self.browser.find_element_by_css_selector(\".bell-status__read\")\n stop_ring.click()\n\n # Звонок останавливается\n self.wait_for_stop_ring()\n\n def test_bell_page_displays_the_latest_events(self):\n \"\"\"test: На странице колокольчика отображаются последние события\"\"\"\n\n # Иван создает новый колокольчик\n self.add_new_bell(\"Важные письма\")\n\n # Этому колоколчику еще не приходили уведомления, поэтому список пустой\n events = self.browser.find_elements_by_css_selector('.events-table tbody tr')\n self.assertEqual(events, [])\n\n # Иван посылает уведомление\n url = self.browser.find_element_by_css_selector('.bell-info__add-event-url')\n self.send_event_to_bell(url.text, 'Новое письмо от Ивана!')\n\n # В списке событий начинает отображаться это уведомление\n event = self.wait_for(lambda: self.browser.find_element_by_css_selector('.events-table tbody tr'))\n self.assertIn('Новое письмо от Ивана!', event.text)\n","sub_path":"functional_tests/test_simple_creation_bell.py","file_name":"test_simple_creation_bell.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"471371919","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 15 22:10:41 2020\n\nComputes and plots Contact force info (Contact and Normal), average overlap, \naverage height, average coordinatino number (CN) and the radial distribution function (RDF),\nfor tests of different parameter values, when given the variable name as used\nin LIGGGHTS simulations.\n\nScript sections are:\n1: Define inputs, extract data and perform necessary calculations (slow)\n2: Remove results associated with useless variable values (too high/small/etc) (fast)\n3: Plot data stored in local variables (fast)\n4: Save figues using variable name and range of variable values\n\n@author: Noah\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as scp\nimport matplotlib.ticker\nfrom matplotlib.ticker import FormatStrFormatter\n\n\n\ndef contact_forces(fname):\n # Input file stem up to and including /variable_0i/\n # Returns contact_info = (c_force, n_force, t_force, delta)\n allcforces = np.loadtxt(fname + \"post/contact_force.txt\", skiprows=3)#, max_rows = 1000)\n allnforces = np.loadtxt(fname + \"post/force_normal.txt\", skiprows=3)\n alltforces = np.loadtxt(fname + \"post/force_tangential.txt\", skiprows = 3)\n alldelta = np.loadtxt(fname + \"post/delta.txt\", skiprows = 3)\n indices = np.nonzero(allcforces)\n\n indices_no_dup = [] \n for i in indices[0]: # remove duplicate indices to select only one row\n if i not in indices_no_dup: \n indices_no_dup.append(i)\n\n # Select only the non-zero force elements\n cforces = allcforces[indices_no_dup,:]\n nforces = allnforces[indices_no_dup,:]\n tforces = alltforces[indices_no_dup,:]\n delta = alldelta[indices_no_dup]\n\n # Sum force components to get overall force\n c_force = np.zeros((len(cforces),1))\n n_force = np.zeros((len(nforces),1))\n t_force = np.zeros((len(tforces),1))\n\n for i in range(0,len(cforces)): # combine x, y, & z force components\n c_force[i] = (10**6)*np.sqrt(cforces[i,0]**2 + cforces[i,1]**2 + cforces[i,2]**2)\n n_force[i] = (10**6)*np.sqrt(nforces[i,0]**2 + nforces[i,1]**2 + nforces[i,2]**2)\n t_force[i] = np.sqrt(tforces[i,0]**2 + tforces[i,1]**2 + tforces[i,2]**2)\n\n contact_info = (c_force, n_force, t_force, delta)\n return contact_info\n\ndef average_z(fname):\n # Calculate the average height using this file stem up to and including /post/\n z_data = np.loadtxt(fname + \"post/height.txt\", skiprows = 3)\n av = np.average(z_data)\n std = np.std(z_data)\n b = (av, std)\n return b\n\n\"\"\"\n# # # # # # # # # # # # # # # # # # # # \n\"\"\"\n# Inputs\nvariable = \"scalefactor\"\nvariablelabel = \"Scale Factor\"\nunit = \"\"\nerrors = 1 # Error bars: on = 1, off = 0 \nplot_X_Fu = 0 # X. Fu data: on = 1, off = 0\n\n# Make a list of file locations\nfilelist = []\nrdffilelist = []\ncoordfilelist = []\n\n# Initialise lists to be plot later\nc_force = []\nn_force = []\nt_force = []\naverage_cforce = []\naverage_nforce = []\nav_delta = []\ndelta_err = []\nz_av = []\nz_std = []\ncoordination = []\ncoordination_stdev = []\nRDFs = []\n\np_size = 200e-6 # Particle diameter for delta and z\nradius = p_size/2\n\n#filestem = str(\"/Users/Noah/remote/LIGGGHTS/Run/ContactModel/\" + \n # \"packing/thornton_ning_test/small_particles/\" + variable + \"/\")\nfilestem = str(\"/Users/Noah/remote/LIGGGHTS/Run/ContactModel/\" + \n \"packing/forced_packing/wiggle/scaling_tests/full_scale/no_tapping/\" + variable + \"/\")\n\n# Get parameter values\nvalues = np.loadtxt(filestem + \"variables.txt\", skiprows=1)\nN = int(values[0]) # Number of variable cases to load\nvalues = np.delete(values,[0])\n\nprint(\"\\n\" + \"Loading \" + str(N) + \" values of \" + variable + \" : \" + str(values) + \"\\n\")\n\n# Make lists of files to load\nfor i in range(1,N+1):\n trial_no = str(i).zfill(2)\n filelist.append(filestem + variable + \"_\" + trial_no + \"/\")\n rdffilelist.append(filestem + variable + \"_\" + trial_no + \"/rdfoutput.txt\")\n coordfilelist.append(filestem + variable + \"_\" + trial_no + \"/post/coordination.txt\")\nprint(\"\\n\" + \"Wait for data extraction ...\" + \"\\n\")\n\n## Contact force, Normal Force, Tangential Force, Delta\nfor i in range(len(filelist)): # Get data from files, remove 0 values\n X = contact_forces(filelist[i])\n c_force.append(X[0])\n n_force.append(X[1])\n t_force.append(X[2])\n delta = X[3]\n \n av_delta.append(np.average(delta)*100/(p_size*values[i]**0.4)) # delta overlap in %\n delta_err.append((np.std(delta)*100/(p_size*values[i]**0.4))/np.sqrt(len(delta)))\n average_cforce.append(np.average(c_force[i]))\n average_nforce.append(np.average(n_force[i]))\n\n## Average Height, into z_av and z_std\nfor i in range(len(filelist)):\n z = average_z(filelist[i])\n z_av.append(z[0]/(p_size*values[i]**0.4))\n z_std.append(z[1]/(p_size*values[i]**0.4))\n\n## Coordination Number, into list 'coordination' and 'coordination_stdev'\nfor fname in coordfilelist:\n with open(fname) as myfile:\n head = [next(myfile) for x in range(3)]\n x = head[1].split(\"\\t\")\n n_particle = int(str(x[1]).rstrip(\"\\n\"))\n \n # Now import coordination number data from row 3 down to n_particle\n coordata = np.loadtxt(fname, skiprows=3, max_rows=n_particle)\n\n # Calculate average coordination number and standard deviation\n av = np.average(coordata)\n std = np.std(coordata)\n # Print and save to file\n a = coordfilelist.index(fname)\n coordination.append(av)\n coordination_stdev.append(std)\n #print(variable + \" = \" + str(values[a]))\n #print(\"\\t\"+\"Average coordination number = \" + str(av)[:5] + \"\\n\\t\" + \"StDev = \" + str(std)[:5])\n\n## RDF, into list 'RDFs'\nfor fname in rdffilelist:\n rdfdata = np.loadtxt(fname, skiprows=5)\n a = rdffilelist.index(fname)\n norm_radius = rdfdata[:,1]/(2*radius*values[a]**0.4) ; # normalise radial distance using radius\n rdf = rdfdata[:,2];\n rdf_2 = rdf/rdf[-1] # set RDF to be 1 at 6 x diameter\n #a = rdffilelist.index(fname)\n RDFs.append(rdf_2)\n\nrun_no = 0\nprint(\"... data extraction complete.\")\n#%% Data Selection / Manipulation\n# Change if you don't want to plot all parameter runs\n\nremove_end = 1 # remove from high end\nremove_beginning = 0 # remove from low end\n\nif run_no < 1 : # Ensure nothing removed on first run:\n remove_end = 0\n remove_beginning = 0\n\nif remove_end > 0:\n for i in range (remove_end):\n #for item in lists:\n # item = np.delete(item, [len(item)]) \n final_index = len(values) - 1\n values = np.delete(values,[final_index])\n av_delta = np.delete(av_delta,[final_index])\n c_force = np.delete(c_force,[final_index])\n n_force = np.delete(n_force,[final_index])\n t_force = np.delete(t_force,[final_index])\n coordination = np.delete(coordination,[final_index])\n coordination_stdev = np.delete(coordination_stdev,[final_index])\n average_cforce = np.delete(average_cforce,[final_index])\n average_nforce = np.delete(average_nforce,[final_index])\n z_av = np.delete(z_av,[final_index])\n z_std = np.delete(z_std,[final_index])\n RDFs = np.delete(RDFs, [final_index], 0)\n print(\"Index number \" + str(final_index) + \" removed\")\n \nif remove_beginning > 0:\n for i in range (remove_beginning):\n #for item in lists:\n # item = np.delete(item, [len(item)]) \n final_index = i\n values = np.delete(values,[final_index])\n av_delta = np.delete(av_delta,[final_index])\n c_force = np.delete(c_force,[final_index])\n n_force = np.delete(n_force,[final_index])\n t_force = np.delete(t_force,[final_index])\n coordination = np.delete(coordination,[final_index])\n coordination_stdev = np.delete(coordination_stdev,[final_index])\n average_cforce = np.delete(average_cforce,[final_index])\n average_nforce = np.delete(average_nforce,[final_index])\n z_av = np.delete(z_av,[final_index])\n z_std = np.delete(z_std,[final_index])\n RDFs = np.delete(RDFs,[final_index])\n print(\"Index number \" + str(i) + \" removed\")\n\nN = len(values) # to correct for labelling\nrun_no = run_no + 1\nprint(\"Data selection complete, remaining values are: \\n\" + str(values))\n#%%\n## Plotting ##\nif errors == 1:\n errortext = \"err\"\nelse:\n errortext = \"\"\n \nprint(\"\\n\" + \"Plotting...\")\nbin_no = 100\nfig1 = plt.figure(1) # Contact Force PDF\nplt.ylabel(\"Probability Density\")\nplt.xlabel(\"Force Magnitude (x10^6 N)\")\nplt.title(\"Contact Force PDF\")\n\nfig2 = plt.figure(2) # Normal Force PDF\nplt.ylabel(\"Probability Density\")\nplt.xlabel(\"Force Magnitude (x10^6 N)\")\nplt.title(\"Normal Force PDF\")\n\nfor i in range(len(values)): # Plot the PDFs - semilogx\n plt.figure(1) # Contact Force\n x_cf = np.linspace(0, max(c_force[i]), bin_no) # NB should plot wrt c_force[i]\n hist_cf = np.histogram(c_force[i], bins=bin_no)\n c_force_dist = scp.rv_histogram(hist_cf)\n plt.semilogx(x_cf, c_force_dist.pdf(x_cf), label = variablelabel + \" = \" + str(values[i]))\n\n plt.figure(2) # Normal Force\n x_nf = np.linspace(0, max(n_force[i]), bin_no) # NB should plot wrt c_force[i]\n hist_nf = np.histogram(n_force[i], bins=bin_no)\n n_force_dist = scp.rv_histogram(hist_nf)\n plt.semilogx(x_nf, n_force_dist.pdf(x_nf), label = variablelabel + \" = \" + str(values[i]))\n \nplt.figure(1)\nplt.legend()\nplt.figure(2)\nplt.legend()\n\nfig4 = plt.figure(4) # Contact and Normal Force Magnitudes\nplt.ylabel(\"Average Force (x10^6 N)\")\nplt.xlabel(variablelabel + unit)\nplt.title(\"Total Contact Force and Normal Force\")\nplt.plot(values, average_cforce, 'x', label = \"Contact force\")\nplt.plot(values, average_nforce, '.', label = \"Normal force\")\nplt.legend()\nplt.show()\n\nfig3 = plt.figure(3) # Overlap\nplt.ylabel(\"Overlap (% average particle diameter)\")\nplt.title(\"Average Particle Overlap\")\nplt.xlabel(variablelabel + unit)\nif errors ==1:\n plt.errorbar(values, av_delta, fmt = 'x', yerr = delta_err, elinewidth = 1, capsize = 1, label = \"Average Overlap\")\nelse:\n plt.plot(values, av_delta, 'x', label = \"Average Overlap\")\nplt.ticklabel_format(axis = 'y', style = 'sci', scilimits = (0,0)) \nplt.show\n\nfig5 = plt.figure(5) # Particle Height\nplt.ylabel(\"Height (particle diameters)\")\nplt.xlabel(variablelabel + unit)\nplt.title(\"Average Particle Height\")\nz_err = z_std/np.sqrt(n_particle)\n\nplt.ylim(11, 12)\n\nif errors == 1:\n plt.errorbar(values, z_av, fmt = 'x', yerr = z_err, elinewidth = 1, capsize = 1)\nelse:\n plt.plot(values, z_av, 'x')\nplt.show()\n\nfig6 = plt.figure(6) #RDF\nplt.xlabel(\"r/\")\nplt.ylabel(\"g(r)\")\nplt.title(\"RDF\")\n\nif plot_X_Fu == 1: # Read in and plot X. Fu et al data (normalised to finish at 1)\n plt.figure(6)\n x, y = np.loadtxt('/Users/Noah/python/RDF/data/X_Fu_monodisperse_normalised.csv',\n delimiter=',', unpack=True)\n plt.plot(x,y, label = \"X. Fu et al.\")\n Fu = \"_Fu\"\nelse:\n print(\"Not plotting data from X_Fu...\")\n Fu = \"\"\nfor i in range(len(values)):\n plt.figure(6)\n plt.plot(norm_radius, RDFs[i], label = variablelabel + \" = \" + str(values[i]))\n plt.legend()\n \nfig7 = plt.figure(7) # Coordination number\nplt.ylabel(\"Coordination Number\")\nplt.xlabel(variablelabel + unit)\nplt.title(\"Coordination Number\")\n\nax = plt.gca()\nax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n\nplt.ylim(6, 6.2)\ncoordination_err = coordination_stdev/np.sqrt(n_particle)\nif errors == 1:\n plt.errorbar(values[:len(coordination)],coordination, fmt = 'x', yerr = coordination_err, elinewidth = 1, capsize = 1)\nelse:\n plt.plot(values[:len(coordination)],coordination,'x')\n\n\nplt.show()\n\n\n#%% ## Save the Figures\nprint(\"\\n\" + \"Plotting complete, saving figures...\")\n\nfig1.savefig('/Users/Noah/python/tapping_packing/' + 'cF_' + variable + str(values[0]) +\n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + '.png', dpi=400)\n\nfig2.savefig('/Users/Noah/python/tapping_packing/' + 'nF_' + variable + str(values[0]) +\n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + '.png', dpi=400)\n\nfig3.savefig('/Users/Noah/python/tapping_packing/' + 'delta_' + variable + str(values[0]) +\n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + errortext + '.png', dpi=400)\n\nfig4.savefig('/Users/Noah/python/tapping_packing/' + 'cF_nF_' + variable + str(values[0]) +\n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + '.png', dpi=400)\n\nfig5.savefig('/Users/Noah/python/tapping_packing/' + 'z_' + variable + str(values[0]) +\n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + errortext + '.png', dpi=400)\n\nfig6.savefig('/Users/Noah/python/tapping_packing/' + 'RDF_' + variable + str(values[0])+ \n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + '.png', dpi=400)\n\nfig7.savefig('/Users/Noah/python/tapping_packing/' + 'CN_' + variable + str(values[0])+ \n \"_to_\" + str(values[-1]) + \"_\" + str(N) + Fu + errortext + '.png', dpi=400)\n\nprint(\"\\nSaved to /Users/Noah/python/gravity_packing/\")","sub_path":"python-analysis/packing/scalefactor_F_RDF_CN.py","file_name":"scalefactor_F_RDF_CN.py","file_ext":"py","file_size_in_byte":13110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"549233057","text":"\"\"\"\ntest_variant_db.py\n\nCopyright 2012 Andres Riancho\n\nThis file is part of w3af, http://w3af.org/ .\n\nw3af is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation version 2 of the License.\n\nw3af is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with w3af; if not, write to the Free Software\nFoundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\n\"\"\"\nimport unittest\n\nfrom w3af.core.data.parsers.url import URL\nfrom w3af.core.data.db.variant_db import VariantDB, DEFAULT_MAX_VARIANTS\nfrom w3af.core.controllers.misc.temp_dir import create_temp_dir\nfrom w3af.core.data.request.fuzzable_request import FuzzableRequest\nfrom w3af.core.data.parsers.utils.form_params import FormParameters\nfrom w3af.core.data.dc.headers import Headers\nfrom w3af.core.data.dc.factory import dc_from_form_params\nfrom w3af.core.data.dc.generic.kv_container import KeyValueContainer\n\n\nclass TestVariantDB(unittest.TestCase):\n\n def setUp(self):\n create_temp_dir()\n self.vdb = VariantDB()\n\n def test_db_int(self):\n url_fmt = 'http://w3af.org/foo.htm?id=%s'\n\n for i in xrange(DEFAULT_MAX_VARIANTS):\n url = URL(url_fmt % i)\n self.assertTrue(self.vdb.need_more_variants(url))\n self.vdb.append(url)\n\n extra_url = URL(url_fmt % (DEFAULT_MAX_VARIANTS + 1,))\n self.assertFalse(self.vdb.need_more_variants(extra_url))\n\n def test_db_int_int(self):\n url_fmt = 'http://w3af.org/foo.htm?id=%s&bar=1'\n\n for i in xrange(DEFAULT_MAX_VARIANTS):\n url = URL(url_fmt % i)\n self.assertTrue(self.vdb.need_more_variants(url))\n self.vdb.append(url)\n\n self.assertFalse(\n self.vdb.need_more_variants(URL(url_fmt % (DEFAULT_MAX_VARIANTS + 1,))))\n\n def test_db_int_int_var(self):\n url_fmt = 'http://w3af.org/foo.htm?id=%s&bar=%s'\n\n for i in xrange(DEFAULT_MAX_VARIANTS):\n url = URL(url_fmt % (i, i))\n self.assertTrue(self.vdb.need_more_variants(url))\n self.vdb.append(url)\n\n self.assertFalse(\n self.vdb.need_more_variants(URL(url_fmt % (DEFAULT_MAX_VARIANTS + 1, DEFAULT_MAX_VARIANTS + 1))))\n\n def test_db_int_str(self):\n url_fmt = 'http://w3af.org/foo.htm?id=%s&bar=%s'\n\n for i in xrange(DEFAULT_MAX_VARIANTS):\n url = URL(url_fmt % (i, 'abc' * i))\n self.assertTrue(self.vdb.need_more_variants(url))\n self.vdb.append(url)\n\n self.assertFalse(self.vdb.need_more_variants(\n URL(url_fmt % (DEFAULT_MAX_VARIANTS + 1, 'abc' * (DEFAULT_MAX_VARIANTS + 1)))))\n\n def test_db_int_str_then_int_int(self):\n url_fmt = 'http://w3af.org/foo.htm?id=%s&bar=%s'\n\n # Add (int, str)\n for i in xrange(DEFAULT_MAX_VARIANTS):\n url = URL(url_fmt % (i, 'abc' * i))\n self.assertTrue(self.vdb.need_more_variants(url))\n self.vdb.append(url)\n\n # Please note that in this case I'm asking for (int, int) and I added\n # (int, str) before\n self.assertTrue(\n self.vdb.need_more_variants(URL(url_fmt % (DEFAULT_MAX_VARIANTS + 1, DEFAULT_MAX_VARIANTS + 1))))\n\n # Add (int, int)\n for i in xrange(DEFAULT_MAX_VARIANTS):\n url = URL(url_fmt % (i, i))\n self.assertTrue(self.vdb.need_more_variants(url))\n self.vdb.append(url)\n\n self.assertFalse(\n self.vdb.need_more_variants(URL(url_fmt % (DEFAULT_MAX_VARIANTS + 1, DEFAULT_MAX_VARIANTS + 1))))\n\n def test_clean_reference_simple(self):\n self.assertEqual(self.vdb._clean_reference(URL('http://w3af.org/')),\n u'(GET)-http://w3af.org/')\n\n def test_clean_reference_file(self):\n self.assertEqual(\n self.vdb._clean_reference(URL('http://w3af.org/index.php')),\n u'(GET)-http://w3af.org/index.php')\n\n def test_clean_reference_directory_file(self):\n self.assertEqual(\n self.vdb._clean_reference(URL('http://w3af.org/foo/index.php')),\n u'(GET)-http://w3af.org/foo/index.php')\n\n def test_clean_reference_directory_file_int(self):\n self.assertEqual(\n self.vdb._clean_reference(URL('http://w3af.org/foo/index.php?id=2')),\n u'(GET)-http://w3af.org/foo/index.php?id=number')\n\n def test_clean_reference_int(self):\n self.assertEqual(\n self.vdb._clean_reference(URL('http://w3af.org/index.php?id=2')),\n u'(GET)-http://w3af.org/index.php?id=number')\n\n def test_clean_reference_int_str(self):\n self.assertEqual(\n self.vdb._clean_reference(\n URL('http://w3af.org/index.php?id=2&foo=bar')),\n u'(GET)-http://w3af.org/index.php?id=number&foo=string')\n\n def test_clean_reference_int_str_empty(self):\n self.assertEqual(\n self.vdb._clean_reference(\n URL('http://w3af.org/index.php?id=2&foo=bar&spam=')),\n u'(GET)-http://w3af.org/index.php?id=number&foo=string&spam=string')\n\n def test_clean_form_fuzzable_request(self):\n fr = FuzzableRequest(URL(\"http://www.w3af.com/\"),\n headers=Headers([('Host', 'www.w3af.com')]),\n method='POST',\n post_data=KeyValueContainer(init_val=[('data', ['23'])]))\n\n expected = u'(POST)-http://www.w3af.com/!data=number'\n self.assertEqual(self.vdb._clean_fuzzable_request(fr), expected)\n\n def test_clean_form_fuzzable_request_form(self):\n form_params = FormParameters()\n form_params.add_input([(\"name\", \"username\"), (\"value\", \"abc\")])\n form_params.add_input([(\"name\", \"address\"), (\"value\", \"\")])\n form_params.set_action(URL('http://example.com/?id=1'))\n form_params.set_method('post')\n\n form = dc_from_form_params(form_params)\n\n fr = FuzzableRequest.from_form(form)\n\n expected = u'(POST)-http://example.com/?id=number!username=string&address=string'\n self.assertEqual(self.vdb._clean_fuzzable_request(fr), expected)\n","sub_path":"w3af/core/data/db/tests/test_variant_db.py","file_name":"test_variant_db.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"282295511","text":"def solution(a,b):\n global A\n global B\n global cache\n if a >= len(A) or b >= len(B):\n return -1\n\n ret = 2\n \n if(a == -1):\n numA = -2**32-1\n else:\n numA = A[a]\n if(b == -1):\n numB = -2**32-1\n else:\n numB = B[b]\n \n maxElement = max(numA, numB)\n \n for i in range(a+1, len(A)):\n# print(i)\n if maxElement < A[i]:\n ret = max(ret, solution(i,b)+ 1)\n\n for i in range(b+1, len(B)):\n if maxElement < B[i]:\n ret = max(ret, solution(a,i)+ 1)\n return ret\n \n \nN = input()\nfor i in range (int(N)):\n x, y = map(int, input().split())\n cache = [[-1 for k in range(x)] for j in range(y)]\n A = list(map(int, input().split()))\n B = list(map(int, input().split()))\n print(solution(-1, -1) - 2)\n \n","sub_path":"JLIS/myunghak.py","file_name":"myunghak.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"4464522","text":"import string\n\nfrom a.stack import Stack\n\n\ndef infix_to_suffix(expr):\n \"\"\" 从中序表达式到后序表达式的转换 \"\"\"\n stack, suffixes, operators = Stack(), [], {\n '*': 3,\n '/': 3,\n '+': 2,\n '-': 2,\n '(': 1,\n ')': 1,\n }\n\n for i in list(expr.replace(' ', '')):\n if i.upper() in string.ascii_uppercase:\n suffixes.append(i)\n elif i == '(':\n stack.push(i)\n elif i == ')':\n top = stack.pop()\n while top != '(':\n suffixes.append(top)\n top = stack.pop()\n else:\n while not stack.isEmpty() and operators[stack.peek()] >= operators[i]:\n suffixes.append(stack.pop())\n stack.push(i)\n\n while not stack.isEmpty():\n suffixes.append(stack.pop())\n\n return ''.join(suffixes)\n\n\ndef suffix_expr_value(expr):\n \"\"\" 计算后序表达式的值 \"\"\"\n stack = Stack()\n\n for i in list(expr.replace(' ', '')):\n if i in '0123456789':\n stack.push(int(i))\n else:\n a, b = stack.pop(), stack.pop()\n stack.push(calculation(i, a, b))\n return stack.pop()\n\n\ndef calculation(op, a, b):\n \"\"\" 根据操作符计算给定操作数 \"\"\"\n if op == '*':\n return a * b\n elif op == '/':\n return a / b\n elif op == '+':\n return a + b\n elif op == '-':\n return a - b\n else:\n raise ValueError('Operator parameter error')\n","sub_path":"python/stack/g/infix_to_suffix.py","file_name":"infix_to_suffix.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427009359","text":"from urlparse import urlparse, parse_qs\n\nfrom linkedin import linkedin\n\n\ndef extract_view_id(url):\n try:\n return parse_qs(\n urlparse(url).query\n )['id'][0]\n except (IndexError, KeyError):\n return None\n\n\ndef get_linkedin_application(user):\n return linkedin.LinkedInApplication(\n token=user.social_auth.filter(provider='linkedin-oauth2').get().extra_data['access_token']\n )\n\n\ndef construct_date(linkedin_data, day=1):\n if not linkedin_data:\n return linkedin_data\n\n month, year = linkedin_data.get('month'), linkedin_data.get('year')\n date_string = ''\n if month:\n date_string += str(month) + '/'\n if year:\n date_string += str(year)\n return date_string\n","sub_path":"linkedin_app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"6065926","text":"from route4me import Route4Me\nfrom route4me.constants import (\n DEVICE_TYPE\n)\n\nKEY = \"11111111111111111111111111111111\"\n\n\ndef main():\n route4me = Route4Me(KEY)\n route = route4me.route\n response = route.get_routes(limit=10, Offset=5)\n if hasattr(response, 'errors'):\n print('. '.join(response.errors))\n else:\n response = route.get_route(route_id=response[0].route_id)\n if hasattr(response, 'errors'):\n print('. '.join(response.errors))\n else:\n route_id = response.route_id\n route_destination_id = response.addresses[0].route_destination_id\n lat = response.addresses[0].lat\n lng = response.addresses[0].lng\n note = 'Test Note Contents'\n response = route4me.address.add_address_notes(\n note,\n route_id=route_id,\n device_type=DEVICE_TYPE.WEB,\n activity_type='wrongdelivery',\n dev_lat=lat,\n dev_lng=lng,\n address_id=route_destination_id,\n )\n if hasattr(response, 'errors'):\n print('. '.join(response.errors))\n else:\n print('Note ID: {}'.format(response.note_id))\n print('Note contents: {}'.format(response.note.contents))\n print('Route ID: {}'.format(response.note.route_id))\n print('Route Destination ID: {}'.format(\n response.note.route_destination_id\n ))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/notes/add_address_notes.py","file_name":"add_address_notes.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"427460894","text":"from typing import List\n\n\"\"\"\n遍历用set进行保存\n再生成1~n+1的数值,进行查找,如果不存在就存入结果集\n\"\"\"\n\n\nclass Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n res = []\n n = len(nums)\n nums_set = set()\n for num in nums:\n nums_set.add(num)\n\n for i in range(1, n + 1):\n if i not in nums_set:\n res.append(i)\n\n return res\n\n\nif __name__ == '__main__':\n nums = [4, 3, 2, 7, 8, 2, 3, 1]\n result = Solution().findDisappearedNumbers(nums)\n print(result)\n","sub_path":"leetcode/448.找到所有数组中消失的数字/448.找到所有数组中消失的数字.py","file_name":"448.找到所有数组中消失的数字.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"208113713","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/8/21 5:01 下午\n# @Author : Kai Zheng\n# @FileName: tianyiyun2022.py\n# @Software: PyCharm\n# @Email: 156252108@qq.com\n'''\nl= [(1,3),(5,7),(2,6)]\nl.sort()\n# print(l)\nN=len(l)\n# print(l[1][0])\n# res = 0\n# for i in range(N-1):\n# if l[i+1][0]>=l[i][1]:\n# res += l[i][1] -l[i][0]\n# else:\n# res += l[i+1][0] - l[i][0]\n# print(res)\n# store 存放所有,统计数字为1的个数。\nstore= []\nfor i in l:\n for j in range(i[0],i[1]):\n store.append((j,j+1))\nstore.sort()\ncount = 0\nfor i in store:\n if store.count(i) == 1:\n count += 1\nprint(count)\n\n'''\ndef findMax(numOfItems,total_money,items,vlaues): #i是多少行 j是多少列\n dp = [[0]*(total_money+1) for i in range(numOfItems+1)]\n #print(dp)\n for i in range(1,numOfItems + 1):\n for j in range(1,total_money + 1):\n if j < items[i-1]: #只能放上一个物品,因为放不进比他大一个型号的\n dp[i][j] = dp[i - 1][j]\n else: #当可以放进时,就比较放进去的收益大还是不放的收益大\n dp[i][j] = max(dp[i - 1][j],dp[i - 1][j - items[i-1]] + vlaues[i-1])\n return dp[i][j]\ntotal_money = 20 #背包容量1000\nhotspot_val = [4,8,6,22,2]#[6, 10,3, 4, 5, 8]\nunit_price = [4,7,5,10,1]#[200,600,100,180,300,450]\nres=findMax(len(unit_price),total_money,unit_price,hotspot_val)\nprint(res)\n","sub_path":"autumnrecruit/tianyiyun2022.py","file_name":"tianyiyun2022.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217163563","text":"import synapsebridgehelpers\nimport synapseclient\nimport pandas as pd\n\ndef summaryTable(syn, projectId, columns = []):\n \"\"\"Outputs a concatenated table containing the given\n list of columns from the given projectId. If no columns \n are given, then all columns are considered\n \n NOTE: When giving a column, note that it needs to be \n present in all the tables of the given Project, otherwise\n the function will throw an error\n \n Arguments:\n - syn: a Synapse client object\n - projectID: synapse ID of the project we want to summarize\n - columns: list of columns we want in the summary table\"\"\"\n \n all_tables = synapsebridgehelpers.get_tables(syn, projectId)\n df_main = pd.DataFrame()\n columns_str = ''\n for col in columns:\n columns_str = columns_str+col+','\n columns_str = columns_str[:-1] # removing the last ','\n columns_str = '*' if columns_str == '' else columns_str # If empty then we need to choose all columns\n for table_id in all_tables['table.id']:\n df = syn.tableQuery('select ' +columns_str+' from '+ table_id)\n df = df.asDataFrame()\n schema = syn.get(table_id)\n df['originalTableName'] = [schema.name for count in range(0,df.shape[0])]\n df['originalTableId'] = [schema.id for count in range(0,df.shape[0])]\n df_main = pd.concat([df_main,df])\n return df_main\n","sub_path":"synapsebridgehelpers/summaryTable.py","file_name":"summaryTable.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"108918959","text":"\"\"\"\nService Oriented Computing Lab Week12 in Python.\nService loader\n\nUsage:\npython service_loader.py\n\nLoad the service in data/service/\n\n@author: Weiyi Wang\n@contact: weiyi.wang@sv.cmu.edu\n@date: 11/23/2015\n\"\"\"\nimport os\nfrom os import path\nimport collections\nfrom service import Service\n\n\nclass ServiceLoader:\n def __init__(self, file_path='data/service'):\n self.files = [path.join(file_path, f) for f in os.listdir(file_path) if path.isfile(path.join(file_path, f))]\n self.services = collections.OrderedDict()\n for file in self.files:\n self.services[path.basename(file)] = []\n with open(file, 'r') as stream:\n for line in stream.read().splitlines():\n [name, cost, reliability, time, availability] = line.split('\\t')\n svc = Service(name, float(cost), float(reliability), float(time), float(availability))\n self.services[path.basename(file)].append(svc)\n","sub_path":"lab12/service_loader.py","file_name":"service_loader.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"138789298","text":"import logging\nfrom typing import List, Optional\n\ntry:\n from mmcv.parallel import scatter\nexcept ImportError:\n mmcv = None\ntry:\n from mmocr.utils.model import revert_sync_batchnorm\nexcept ImportError:\n mmocr = None\nfrom torch import nn\n\nfrom ..constants import (\n AUTOMM,\n COLUMN,\n COLUMN_FEATURES,\n FEATURES,\n IMAGE,\n IMAGE_VALID_NUM,\n LABEL,\n LOGITS,\n MASKS,\n SCORE,\n TEXT,\n)\nfrom .utils import assign_layer_ids, get_column_features, get_mmocr_config_and_model, get_model_head\n\nlogger = logging.getLogger(__name__)\n\n\nclass MMOCRAutoModelForTextRecognition(nn.Module):\n \"\"\"\n Support MMOCR text recognition models.\n Refer to https://github.com/open-mmlab/mmocr\n \"\"\"\n\n def __init__(\n self,\n prefix: str,\n checkpoint_name: str,\n num_classes: Optional[int] = None,\n pretrained: Optional[bool] = True,\n ):\n \"\"\"\n Load a pretrained ocr text recognition detector from MMOCR.\n\n Parameters\n ----------\n prefix\n The prefix of the MMdetAutoModelForTextRecognition model.\n checkpoint_name\n Name of the mmdet checkpoint.\n num_classes\n The number of classes.\n pretrained\n Whether using the pretrained mmocr models. If pretrained=True, download the pretrained model.\n \"\"\"\n super().__init__()\n logger.debug(f\"initializing {checkpoint_name}\")\n self.checkpoint_name = checkpoint_name\n self.pretrained = pretrained\n\n self.config, self.model = get_mmocr_config_and_model(checkpoint_name)\n self.model = revert_sync_batchnorm(self.model)\n self.model.cfg = self.config\n self.prefix = prefix\n\n @property\n def image_key(self):\n return f\"{self.prefix}_{IMAGE}\"\n\n @property\n def image_valid_num_key(self):\n return f\"{self.prefix}_{IMAGE_VALID_NUM}\"\n\n @property\n def label_key(self):\n return f\"{self.prefix}_{LABEL}\"\n\n @property\n def image_column_prefix(self):\n return f\"{self.image_key}_{COLUMN}\"\n\n @property\n def image_feature_dim(self):\n return self.model.num_features\n\n def forward(\n self,\n batch: dict,\n ):\n \"\"\"\n Parameters\n ----------\n batch\n A dictionary containing the input mini-batch data.\n We need to use the keys with the model prefix to index required data.\n\n Returns\n -------\n A dictionary with bounding boxes.\n \"\"\"\n\n data = batch[self.image_key]\n # single image\n if isinstance(data[\"img_metas\"], List):\n data[\"img_metas\"] = [img_metas.data[0] for img_metas in data[\"img_metas\"]]\n else:\n data[\"img_metas\"] = data[\"img_metas\"].data\n\n if isinstance(data[\"img\"], List):\n data[\"img\"] = [img.data[0] for img in data[\"img\"]]\n else:\n data[\"img\"] = data[\"img\"].data\n\n device = next(self.model.parameters()).device # model device\n if next(self.model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device])[0]\n\n results = self.model(return_loss=False, rescale=True, **data)\n\n ret = {TEXT: results[0][\"text\"], SCORE: results[0][\"score\"]}\n return {self.prefix: ret}\n\n def get_layer_ids(\n self,\n ):\n \"\"\"\n Assign an id to each layer. Layer ids will be used in layer-wise lr decay.\n Basically, id gradually increases when going from the output end to\n the input end. The layers defined in this class, e.g., head, have id 0.\n\n Setting all layers as the same id 0 for now.\n TODO: Need to investigate mmocr's model definitions\n\n Returns\n -------\n A dictionary mapping the layer names (keys) to their ids (values).\n \"\"\"\n name_to_id = {}\n for n, _ in self.named_parameters():\n name_to_id[n] = 0\n return name_to_id\n","sub_path":"multimodal/src/autogluon/multimodal/models/mmocr_text_recognition.py","file_name":"mmocr_text_recognition.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259231976","text":"from . import halconfig_types as types\nfrom . import halconfig_dependency as dep\n\nname = \"ANTDIV\"\ndisplayname = \"Antenna Diversity\"\ndescription = \"Antenna Diversity\"\ncompatibility = dep.Dependency(mcu_type=dep.McuType.RADIO) # = all\ncategory = \" Radio\"\nstudio_module = {\n \"basename\" : \"SDK.HAL.ANTDIV\",\n \"modules\" : [types.StudioFrameworkModule(\"BASE\", [types.Framework.ZNET, types.Framework.THREAD, types.Framework.CONNECT])],\n }\nenable = {\n \"define\": \"HAL_ANTDIV_ENABLE\",\n \"description\": \"Enable antenna diversity. SEL signal must be defined, NSEL is optional.\",\n}\noptions = {\n \"BSP_ANTDIV_SEL\": {\n \"type\": types.Pin(),\n \"description\": \"Antenna select\",\n \"longdescription\": \"Pin used to control external antenna switch\",\n },\n \"BSP_ANTDIV_NSEL\": {\n \"type\": types.Pin(),\n \"description\": \"Complementary antenna select\",\n \"longdescription\": \"Pin for inverted external antenna signal\",\n },\n}\n","sub_path":"growhouse/end-devices/gecko_sdk_suite/v2.3/platform/hwconf_data/efr32mg12p/modules/ANTDIV/ANTDIV_model.py","file_name":"ANTDIV_model.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"415329300","text":"#-------------------------------------------------------------------------------\n# Name: Matrix\n# Purpose:\n#\n# Author: Thibault GAUTIER\n#\n# Created: 14/10/2014\n# Copyright: (c) admin 2014\n# Licence: \n#-------------------------------------------------------------------------------\n#!/usr/bin/env python\n\nfrom random import *\n\ndef matrice_aleat(n): # Initialisation d'une matrice de taille n\n m = []\n for i in range(0, n):\n m.append([])\n for j in range(0, n):\n m[i].append(randrange(0,20)) # Ajout d'une valeur aleatoire dans la matrice\n\n return m\n\ndef main():\n n = int(input(\"Donner la taille de la matrice:\"))\n\n print(matrice_aleat(n))\n\nmain()\n","sub_path":"Thibault GAUTIER/Exercices/Tableau/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"64609420","text":"from TaskList import TaskList\nfrom Task import Task\nimport re\n\n\nclass InvalidPriorityError(Exception):\n pass\n\n\nclass MissingTaskIDError(Exception):\n pass\n\n\nTASK_FILE = 'tasks.txt'\nPRIORITY = '!'\nPROJECT = '#'\n\n\ndef get_task_list():\n task_list = TaskList()\n with open(TASK_FILE, 'r') as task_list_file:\n for task in task_list_file:\n task_id, task_desc = parse_task_id(task)\n desc, priority, project, complete = parse_task(task_desc.strip())\n task = Task(task_id, desc, complete, priority, project)\n task_list[task_id] = task\n return task_list\n\n\ndef save_task_list(task_list):\n with open('tasks.txt', 'w+') as task_list_file:\n task_list_file.write(repr(task_list))\n\n\ndef parse_task_id(unparsed_task):\n task_id = get_task_id(unparsed_task)\n desc = unparsed_task[len(str(task_id)):]\n return task_id, desc\n\n\ndef get_task_id(unparsed_task):\n try:\n return int(re.findall('^[0-9]+', unparsed_task.strip())[0])\n except IndexError:\n raise MissingTaskIDError(\"This functionality requires a task id; task id must be a number.\")\n\n\ndef parse_task(unparsed_task):\n try:\n project_start = unparsed_task.index(PROJECT)\n project = unparsed_task[project_start + 1:]\n except ValueError:\n project = None\n project_start = len(unparsed_task)\n\n try:\n priority_start = unparsed_task.index(PRIORITY)\n priority = unparsed_task[priority_start + 1:project_start].strip()\n except ValueError:\n priority = None\n else:\n try:\n priority = int(priority)\n except ValueError:\n raise InvalidPriorityError(f'Priority must be a number value.')\n\n try:\n desc = re.findall('^.+?(?=[#!~])', unparsed_task)[0].strip()\n except IndexError:\n desc = unparsed_task.strip()\n\n try:\n complete = re.findall('~(False|True)', unparsed_task)[0].strip() == \"True\"\n except IndexError:\n complete = None\n except ValueError:\n complete = None\n\n return desc, priority, project, complete\n\n\n","sub_path":"TaskParser.py","file_name":"TaskParser.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144122008","text":"import os\nimport qrcode\nfrom telegram.ext import Updater, CommandHandler, ConversationHandler, CallbackQueryHandler, MessageHandler, Filters\nfrom telegram import ChatAction, InlineKeyboardMarkup, InlineKeyboardButton\n\n\n\nIMPUT_TEXT = 0\n\n\ndef start(update, context):\n\n update.message.reply_text(\n text= 'Hola, ¿Qué deseas hacer?',\n reply_markup=InlineKeyboardMarkup([\n [InlineKeyboardButton(text='Generar qr', callback_data='qr')],\n [InlineKeyboardButton(text='Sobre el autor', url='www.facebook.com')],\n ])\n )\n\n\ndef qr_command_handler(update, context):\n update.message.reply_text('enviame el texto para generar un codigo QR.')\n\n return IMPUT_TEXT\n\n\ndef qr_callback_handler(update, context):\n\n query = update.callback_query\n query.answer()\n\n query.edit_message_text(\n text='Envíame el texto para generar el codigo Qr'\n )\n\n return IMPUT_TEXT\n\n\ndef generate_qr(text):\n\n filename = text + \".jpg\"\n\n img = qrcode.make(text)\n img.save(filename)\n\n return filename\n\ndef send_qr(filename, chat):\n\n chat.send_action(\n action=ChatAction.UPLOAD_PHOTO,\n timeout=None\n )\n\n chat.send_photo(\n photo=open(filename, \"rb\")\n )\n\n os.unlink(filename)\n\n\ndef imput_text(update, context):\n\n text = update.message.text\n\n filename = generate_qr(text)\n\n chat = update.message.chat\n\n send_qr(filename, chat)\n\n return ConversationHandler.END\n\n\nif __name__ == '__main__':\n updater = Updater(token='your token', use_context=True)\n\n dp = updater.dispatcher\n\n dp.add_handler(CommandHandler('start', start))\n\n dp.add_handler(ConversationHandler(\n entry_points=[\n CommandHandler('qr', qr_command_handler),\n CallbackQueryHandler(pattern='qr', callback=qr_callback_handler)\n ],\n\n states={\n IMPUT_TEXT: [MessageHandler(Filters.text, imput_text)]\n },\n\n fallbacks=[],\n\n ))\n\n updater.start_polling()\n updater.idle()\n","sub_path":"Bot2.py","file_name":"Bot2.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"432262883","text":"from __future__ import print_function, absolute_import, division\n\nimport copy\nimport unittest2 as unittest\nfrom mock import patch\n\nfrom cfn_sphere_python.stack_config_helper import StackConfigHelper\n\nSIMPLE_SMALL_CONFIG = {\n 'region': 'eu-west-4',\n 'tags': {'one': 'two'},\n 'stacks': {\n 'foo': {\n 'parameters': {\n 'vpc': '123456'\n }\n },\n 'bar': {}\n }\n}\n# two stacks in one config, with one cross reference\nFIRST_TEST_CONFIG = {\n 'region': 'eu-west-1',\n 'tags': {\n 'one': 'two',\n 'three': 'four'\n },\n 'stacks': {\n 'foo': {},\n 'foo-one': {\n 'parameters': {\n 'param': 'value',\n 'num': 1234,\n 'ref': '|ref|foo.output'\n }\n }\n }\n}\n# This contains a cross reference to one of the FIRST_TEST_CONFIG stacks\nSECOND_TEST_CONFIG = {\n 'region': 'eu-west-1',\n 'stacks': {\n 'lorum': {\n 'parameters': {\n 'ref': '|ref|foo.output'\n }\n }\n }\n}\n\n\nclass StackConfigHelperTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n @staticmethod\n def get_stack_config_helper(test_config):\n obj = None\n with patch('cfn_sphere_python.stack_config_helper.StackConfigHelper._load_config') as load_config_mock:\n load_config_mock.return_value = copy.deepcopy(test_config)\n obj = StackConfigHelper(config_file='foo')\n return obj\n\n @patch('cfn_sphere_python.stack_config_helper.StackConfigHelper._rename_stacks')\n def test_init__with_minimal_parameters(self, rename_stacks):\n stacks_config = self.get_stack_config_helper(SIMPLE_SMALL_CONFIG)\n rename_stacks.assert_called_once()\n\n self.assertEqual(SIMPLE_SMALL_CONFIG, stacks_config.config)\n self.assertEqual('eu-west-4', stacks_config.config['region'])\n self.assertEqual({'one': 'two'}, stacks_config.config['tags'])\n\n def test_init__default_suffix_and_set_mappings(self):\n stacks_config = self.get_stack_config_helper(SIMPLE_SMALL_CONFIG)\n self.assertEqual({'foo': 'foo-it', 'bar': 'bar-it'},\n stacks_config.stack_name_mappings)\n\n @patch('cfn_sphere_python.stack_config_helper.StackConfigHelper._rename_stacks')\n def test_init__with_all_parameters_set_which_overrides_config(self, rename_stacks):\n with patch('cfn_sphere_python.stack_config_helper.StackConfigHelper._load_config') as load_config_mock:\n load_config_mock.return_value = copy.deepcopy(FIRST_TEST_CONFIG)\n stacks_config = StackConfigHelper(\n config_file='foo',\n suffix='bar',\n region='eu-central-2',\n tags={'foo': 'bar'})\n\n rename_stacks.assert_called_once()\n # suffix, region and tags will be overridden by given values\n self.assertEqual('bar', stacks_config.suffix)\n self.assertEqual({'foo': 'bar'}, stacks_config.config['tags'])\n self.assertEqual('eu-central-2', stacks_config.config['region'])\n\n def test_update_parameters_adding_to_empty(self):\n stacks_config = self.get_stack_config_helper(FIRST_TEST_CONFIG)\n self.assertIsNone(self.get_stack_parameters(stacks_config, 'foo'))\n stacks_config.update_parameters('foo', {'bar': 'tender'})\n self.assertEqual({'bar': 'tender'},\n self.get_stack_parameters(stacks_config, 'foo'))\n\n def test_update_parameters_replace_and_add(self):\n stacks_config = self.get_stack_config_helper(SIMPLE_SMALL_CONFIG)\n self.assertEqual({\n 'vpc': '123456'\n }, self.get_stack_parameters(stacks_config, 'foo'))\n stacks_config.update_parameters('foo', {'bar': 'tender', 'vpc': '654321'})\n self.assertEqual({'vpc': '654321', 'bar': 'tender'},\n self.get_stack_parameters(stacks_config, 'foo'))\n # the other is untouched\n self.assertIsNone(self.get_stack_parameters(stacks_config, 'bar'))\n\n @staticmethod\n def get_stack_parameters(stacks_config, stack_basename):\n config = stacks_config.config['stacks'][stacks_config._new_stackname(stack_basename)]\n return config.get('parameters', None)\n\n def test_update_references(self):\n stacks_config = self.get_stack_config_helper(SECOND_TEST_CONFIG)\n stacks_config.update_references({'foo': 'new_foo'})\n self.assertEqual({'ref': '|ref|new_foo.output'},\n self.get_stack_parameters(stacks_config, 'lorum'))\n\n def test_update_references_w_empty_mapping(self):\n stacks_config = self.get_stack_config_helper(SECOND_TEST_CONFIG)\n stacks_config.update_references({})\n self.assertEqual({'ref': '|ref|foo.output'},\n self.get_stack_parameters(stacks_config, 'lorum'))\n\n @patch('cfn_sphere_python.stack_config_helper.StackConfigHelper._rename_stack_references')\n def test__rename_stacks(self, rename_refs_mock):\n stacks_config = self.get_stack_config_helper(FIRST_TEST_CONFIG)\n self.assertEqual(rename_refs_mock.call_count, 2)\n # after init the stacknames are already renamed\n new_stack_names = [k for k, v in stacks_config.config['stacks'].items()]\n self.assertEqual(['foo-it', 'foo-one-it'], new_stack_names)\n self.assertEqual({'foo': 'foo-it', 'foo-one': 'foo-one-it'},\n stacks_config.stack_name_mappings)\n\n def test__rename_stack_references(self):\n stacks_config = self.get_stack_config_helper(FIRST_TEST_CONFIG)\n # current stacknames with new ones\n mapping = {'foo-it': 'foo-new', 'foo-one-it': 'foo-one-new'}\n result = stacks_config._rename_stack_references(\n stacks_config.config['stacks']['foo-one-it'], mapping)\n self.assertEqual({'parameters': {\n 'param': 'value',\n 'num': 1234,\n 'ref': '|ref|foo-new.output'\n }}, result)\n\n def test_get_stack_output(self):\n # TODO\n pass\n\n def test__new_stackname(self):\n stacks_config = self.get_stack_config_helper(SIMPLE_SMALL_CONFIG)\n self.assertEqual('foo-it', stacks_config._new_stackname('foo'))\n\n def test__new_stackname_no_suffix(self):\n stacks_config = self.get_stack_config_helper(SIMPLE_SMALL_CONFIG)\n stacks_config.suffix = None\n self.assertEqual('foo', stacks_config._new_stackname('foo'))\n\n def test_get_shortest_stack_basename(self):\n stacks_config = self.get_stack_config_helper(FIRST_TEST_CONFIG)\n self.assertEqual('foo', stacks_config.get_shortest_stack_basename())\n stacks_config = self.get_stack_config_helper(SECOND_TEST_CONFIG)\n self.assertEqual('lorum', stacks_config.get_shortest_stack_basename())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/unittest/python/stack_config_helper_tests.py","file_name":"stack_config_helper_tests.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"245421773","text":"import os\nimport pytest\nimport streamcorpus\nfrom streamcorpus_pipeline.stages import _init_stage\nfrom tests.streamcorpus_pipeline._test_data import _TEST_DATA_ROOT\n\ndef test_protection():\n with pytest.raises(streamcorpus.VersionMismatchError): # pylint: disable=E1101\n for si in streamcorpus.Chunk(\n os.path.join(\n os.path.dirname(__file__),\n _TEST_DATA_ROOT,\n 'test/MAINSTREAM_NEWS-15-9d6218f0aa7c9585cda12a10d642a8b3-41600ffca7703f7914102da5256233ce.sc.xz'),\n message=streamcorpus.StreamItem\n ):\n pass\n\ndef test_upgrade_streamcorpus_v0_3_0():\n\n up = _init_stage('upgrade_streamcorpus_v0_3_0', {})\n\n count = 0\n for si in streamcorpus.Chunk(\n os.path.join(\n os.path.dirname(__file__),\n _TEST_DATA_ROOT,\n 'test/WEBLOG-100-fd5f05c8a680faa2bf8c55413e949bbf.sc'),\n message=streamcorpus.StreamItem_v0_2_0\n ):\n \n count += 1\n\n si3 = up(si)\n\n assert si3.version == streamcorpus.Versions._NAMES_TO_VALUES['v0_3_0']\n\n if count > 10:\n break\n\n\ndef test_upgrade_streamcorpus_v0_3_0_check_mention_ids():\n\n up = _init_stage('upgrade_streamcorpus_v0_3_0', {})\n\n all_mention_ids = set()\n\n for si in streamcorpus.Chunk(\n os.path.join(\n os.path.dirname(__file__),\n _TEST_DATA_ROOT,\n 'test/MAINSTREAM_NEWS-15-9d6218f0aa7c9585cda12a10d642a8b3-41600ffca7703f7914102da5256233ce.sc.xz'),\n message=streamcorpus.StreamItem_v0_2_0\n ):\n \n si3 = up(si)\n\n assert si3.version == streamcorpus.Versions._NAMES_TO_VALUES['v0_3_0']\n\n mention_ids = set()\n for sentence in si3.body.sentences['lingpipe']:\n sentence_mention_ids = set()\n for token in sentence.tokens:\n if token.mention_id not in [None, -1]:\n sentence_mention_ids.add(token.mention_id)\n\n assert mention_ids.intersection(sentence_mention_ids) == set()\n\n mention_ids.update( sentence_mention_ids )\n\n all_mention_ids.update( sentence_mention_ids )\n\n assert len(all_mention_ids) > 0\n\n","sub_path":"src/tests/streamcorpus_pipeline/test_upgrade_streamcorpus_v0_3_0.py","file_name":"test_upgrade_streamcorpus_v0_3_0.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643868912","text":"import os.path\nimport tensorflow as tf\nfrom tensorflow.contrib.tensorboard.plugins import projector\nimport numpy as np\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# reference:\n# https://www.tensorflow.org/programmers_guide/graph_viz\n\n# define some functions\ndef weight(shape,name):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial,name=name)\n\ndef bias(shape,name):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial,name=name)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides = [1,1,1,1], padding = 'SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')\n\n# load the dataset\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data\", one_hot = True)\n\n# build CNN model\n# initialize weights with small random values for symmetry breaking\n# initialize weights with positive values for preventing \"dead ReLU neurons\"\nW1 = 5; H1 = 5; C1 = 32\nW2 = 5; H2 = 5; C2 = 64\nC3 = 1024\nwith tf.name_scope('input_data'):\n x = tf.placeholder(tf.float32, [None, 784])\n y_ = tf.placeholder(tf.float32, [None, 10])\n x_image = tf.reshape(x, [-1,28,28,1])\n tf.summary.image('input', x_image, max_outputs=10)\n# 1st conv layer\nwith tf.name_scope('hidden_conv_1'):\n W_conv1 = weight([W1,H1,1,C1],name='weights')\n b_conv1 = bias([C1],name='bias')\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n # summaries\n tf.summary.histogram('histogram of W1', W_conv1)\n tf.summary.histogram('histogram of b1', b_conv1)\n# 2nd conv layer\nwith tf.name_scope('hidden_conv_2'):\n W_conv2 = weight([W2,H2,C1,C2],name='weights')\n b_conv2 = bias([C2],name='bias')\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2) # now that the image size has been reduced to 7x7\n # summaries\n tf.summary.histogram('histogram of W2', W_conv2)\n tf.summary.histogram('histogram of b2', b_conv2)\n# densely connected layer with 1024 neurons\nwith tf.name_scope('hidden_fully_connected'):\n W_fc = weight([7*7*C2, C3],name='weights')\n b_fc = bias([C3],name='bias')\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*C2])\n h_fc = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc) + b_fc)\n # summaries\n tf.summary.histogram('histogram of W_fc', W_fc)\n tf.summary.histogram('histogram of b_fc', b_fc)\n# dropout\nwith tf.name_scope('dropout'):\n p_keep = tf.placeholder(tf.float32)\n h_fc_drop = tf.nn.dropout(h_fc, p_keep)\n# output layer\nwith tf.name_scope('output_layer'):\n W_output = weight([C3, 10],name='weights')\n b_output = bias([10],name='bias')\n y = tf.matmul(h_fc_drop, W_output) + b_output\n # summaries\n tf.summary.histogram('histogram of W_out', W_output)\n tf.summary.histogram('histogram of b_out', b_output)\n tf.summary.histogram('histogram of prob_dist_out', y_)\n\n# cost\nwith tf.name_scope('cost'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y))\n # summary\n tf.summary.scalar('loss', cross_entropy)\n\n# training op\nwith tf.name_scope('train'):\n optimizer = tf.train.AdamOptimizer(1e-4)\n train_step = optimizer.minimize(cross_entropy)\n\n# accuracy testing\nwith tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n # summary\n tf.summary.scalar('accuracy', accuracy)\n\n# training\nmax_iter = 20000\nbatch_size = 64\nkeep_prob = 0.5\nif not os.path.exists('MNIST_logs/train'):\n os.mkdir('MNIST_logs')\n os.mkdir('MNIST_logs/train')\nif not os.path.exists('MNIST_logs/test'):\n os.mkdir('MNIST_logs/test')\nwith tf.Session() as sess:\n ######################################################################################\n # embeddings\n test_images = tf.Variable(mnist.test.images, name='test_images')\n test_labels = tf.argmax(mnist.test.labels,1).eval(session=sess)\n with open('MNIST_logs/test/metadata.tsv', 'w') as metadata_file:\n for label in test_labels:\n metadata_file.write('%d\\n' % label)\n # associate metadata with the embedding.\n config = projector.ProjectorConfig()\n embedding = config.embeddings.add()\n embedding.tensor_name = test_images.name\n embedding.metadata_path = 'metadata.tsv' # Link this tensor to its metadata file\n # writers\n summary = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter('MNIST_logs/train', sess.graph)\n test_writer = tf.summary.FileWriter('MNIST_logs/test' , sess.graph)\n saver = tf.train.Saver() # Create a saver for writing training checkpoints.\n projector.visualize_embeddings(test_writer, config)\n ######################################################################################\n # training\n sess.run(tf.global_variables_initializer())\n for iter in range(max_iter):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # each 100 steps: report accuracy & write summary file\n if iter%100 == 0:\n # report accuracy\n train_accuracy = accuracy.eval({x:batch_x, y_:batch_y, p_keep: 1.0}) # turn off dropout during testing\n test_accuracy = 0.0\n for i in range(100):\n test_x, test_y = mnist.test.next_batch(100)\n test_acc = accuracy.eval({x:test_x, y_:test_y, p_keep:1.0})\n test_accuracy = test_accuracy + test_acc\n test_accuracy = test_accuracy/100.0\n print(\"iter step %d batch accuracy %f test accuracy %f\"%(iter, train_accuracy, test_accuracy))\n # train summary\n summary_train = sess.run(summary, {x:batch_x, y_:batch_y, p_keep: 1.0})\n train_writer.add_summary(summary_train, iter)\n train_writer.flush()\n # test summary\n summary_test = sess.run(summary, {x:test_x, y_:test_y, p_keep: 1.0})\n test_writer.add_summary(summary_test, iter)\n test_writer.flush()\n if iter%1000 == 0:\n # save checkpoint file\n checkpoint_file = os.path.join('MNIST_logs', 'model.ckpt')\n saver.save(sess, checkpoint_file, global_step=iter)\n ######################################################################################\n # each 1000 steps: save running status\n if iter%1000 == 0:\n print('\\nsaving running status information ...\\n')\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n sess.run(train_step, {x:batch_x, y_:batch_y, p_keep: keep_prob},\n options=run_options, run_metadata=run_metadata)\n train_writer.add_run_metadata(run_metadata, 'step%03d' % iter)\n else:\n sess.run(train_step, {x:batch_x, y_:batch_y, p_keep: keep_prob})\n ######################################################################################\n train_writer.close()\n test_writer.close()\n embedding_writer.close()\n","sub_path":"exp09_Graph_Visualization.py","file_name":"exp09_Graph_Visualization.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"38339163","text":"h, m = map(int,input().split())\ncooking = int(input())\n\nh += cooking//60\nm += cooking%60\n\nif m >= 60:\n h += 1\n m -=60\nif h >= 24:\n h -= 24\nprint(h,m)\n","sub_path":"boj(baekjoon)/boj_2525.py","file_name":"boj_2525.py","file_ext":"py","file_size_in_byte":160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634188021","text":"#!/usr/bin/env python3.7\n\nimport os\nimport sys\nfrom subprocess import CalledProcessError, check_call\nimport github3 as gh3\n\nSCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))\nWORK = os.path.join(SCRIPT_DIR, \"work\")\nCARGO_HOME = os.path.join(SCRIPT_DIR, \".cargo\")\nRUSTUP_HOME = os.path.join(SCRIPT_DIR, \".rustup\")\nCARGO = os.path.join(CARGO_HOME, \"bin\", \"cargo\")\nSOFTDEV_GH = \"https://github.com/softdevteam\"\nRUSTUP_URL = \"https://sh.rustup.rs/\"\nGH_API_HOST = \"api.github.com\"\nGH_API_REPOS = \"/users/softdevteam/repos\"\n\n# If you want to skip any soft-dev repos, you can add them here.\nSD_SKIP_REPOS = []\n\n\ndef get_sd_rust_repos(token_file):\n \"\"\"Get a list of unarchived soft-dev repos written in Rust\"\"\"\n\n with open(token_file) as f:\n token = f.read().strip()\n\n gh = gh3.login(token=token)\n return [r for r in gh.repositories() if\n r.owner.login == \"softdevteam\" and\n \"Rust\" in map(lambda tup: tup[0], r.languages()) and\n not r.archived and\n r.name not in SD_SKIP_REPOS]\n\n\ndef install_cargo_audit():\n os.environ[\"RUSTUP_HOME\"] = RUSTUP_HOME\n os.environ[\"CARGO_HOME\"] = CARGO_HOME\n\n check_call([\"curl\", \"--proto\", \"=https\", \"--tlsv1.2\", \"-sSf\",\n \"https://sh.rustup.rs\", \"-o\", \"rustup.sh\"])\n check_call([\"sh\", \"rustup.sh\", \"--no-modify-path\", \"-y\"])\n check_call([CARGO, \"install\", \"cargo-audit\"])\n\n\ndef audit(name, repo):\n direc = os.path.join(WORK, name)\n\n # Either pull or update the source from git.\n src_exists = os.path.exists(direc)\n if not src_exists:\n os.chdir(WORK)\n git_cmd = [\"git\", \"clone\", repo, name]\n else:\n os.chdir(direc)\n git_cmd = [\"git\", \"pull\"]\n\n try:\n check_call(git_cmd)\n except CalledProcessError:\n return False\n\n os.chdir(direc)\n\n # If there's no Cargo.toml, we can't audit it.\n if not os.path.exists(\"Cargo.toml\"):\n print(\"No Cargo.toml\")\n return True\n\n # Repos which use sub-modules (like Rust forks) need the submodules sources\n # available too.\n try:\n check_call([\"git\", \"submodule\", \"update\"])\n except CalledProcessError:\n return False\n\n # Actually do the audit.\n try:\n check_call([CARGO, \"audit\", \"-D\"])\n except CalledProcessError:\n return False\n\n return True\n\n\nif __name__ == \"__main__\":\n try:\n token_file = sys.argv[1]\n except IndexError:\n print(\"usage: audit.py \")\n sys.exit(1)\n\n if not os.path.exists(\".cargo\"):\n install_cargo_audit()\n\n if not os.path.exists(WORK):\n os.mkdir(WORK)\n\n repos = get_sd_rust_repos(token_file)\n\n problematic = []\n for r in repos:\n print(f\"\\n\\nChecking {r.clone_url}...\")\n res = audit(r.name, r.clone_url)\n if not res:\n problematic.append(r.name)\n\n if problematic:\n print(\"\\n\\nThe following repos have problems:\")\n for p in problematic:\n print(f\" {p}\")\n sys.exit(1)\n","sub_path":"audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"210608516","text":"\"\"\"def countingSort(arr, exp1):\n \n n = len(arr)\n \n # The output array elements that will have sorted arr\n output = [0] * n\n \n # initialize count array as 0\n count = [0] * 10\n \n # Store count of occurrences in count[]\n for i in range(0, n):\n index = arr[i]//exp1\n count[index % 10] += 1\n \n # Change count[i] so that count[i] now contains actual\n # position of this digit in output array\n for i in range(1,10):\n count[i] += count[i-1]\n \n # Build the output array\n i = n-1\n while i >= 0:\n index = (arr[i] // exp1)\n output[ count[index % 10] - 1] = arr[i]\n count[index % 10] -= 1\n i -= 1\n \n # Copying the output array to arr[],\n # so that arr now contains sorted numbers\n for i in range(0, len(arr)):\n arr[i] = output[i]\n\n\n# Method to do Radix Sort\ndef radixSort(arr):\n \n # Find the maximum number to know number of digits\n max1 = max(arr)\n \n # Do counting sort for every digit. Note that instead\n # of passing digit number, exp is passed. exp is 10^i\n # where i is current digit number\n exp = 1\n while max1/exp > 0:\n countingSort(arr,exp)\n exp *= 10\n\"\"\"\n\n\ndef countingSort(input_data, expo1):\n count_arr = [0] * 10\n for i in input_data:\n index = i // expo1\n count_arr[index % 10] += 1\n for i in range(1, 10):\n count_arr[i] += count_arr[i-1]\n res = [0] * len(input_data)\n for i in range(len(input_data)-1, -1, -1):\n index = input_data[i] // expo1\n res[count_arr[index % 10] - 1] = input_data[i]\n count_arr[index % 10] -= 1\n return res\n\n\ndef radixSort(input_data):\n max_data = max(input_data)\n expo = 1\n while max_data // expo > 0:\n input_data = countingSort(input_data, expo)\n expo *= 10\n return input_data\n\n\n# Driver code to test above\narr = [170, 45, 75, 90, 802, 24, 2, 66]\nres=radixSort(arr)\n \nprint(res)\n","sub_path":"RadixSort_Implement.py","file_name":"RadixSort_Implement.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"318136797","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 15 14:38:51 2018\n\nload open-ephys .Spikes file,\nreturn waveforms, timestamps, and sampling rate (fs)\n\n@author: Patrick\n\"\"\"\n\nimport numpy as np\n\ndef load_spikefile(filename):\n ''' loads an openephys .spikes file '''\n\n #read the header\n fs,numChannels = load_header(filename)\n # fs,numChannels = 30000.,4\n numSamples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**\n \n #define the data types for reading the file\n spike_dtype = np.dtype([('eventType', np.dtype(' 800)] = 0\n spikes[i] = np.clip(spikes[i],a_min = -1000,a_max=1000)\n \n waveforms = spikes[:,:32,:]\n timestamps = data['timestamps'].astype(np.float) * 1000000. / np.float(fs)\n gain = str(data['gain'][0][0]) + ' ' + str(data['gain'][0][1]) + ' ' + str(data['gain'][0][2]) + ' ' + str(data['gain'][0][3])\n\n #return our data\n return -waveforms, timestamps, fs, gain\n\ndef load_header(filename):\n \n counter = 0\n with open(filename,'rb') as f:\n for line in f:\n counter += 1\n if counter > 10:\n break\n try:\n line = line.decode(\"utf-8\")\n if line.startswith(\"header.sampleRate =\"):\n fs = line[len(\"header.sampleRate =\"):len(line)-2]\n fs = float(fs.strip())\n elif line.startswith(\"header.num_channels =\"):\n numChannels = line[len(\"header.num_channels =\"):len(line)-2]\n numChannels = int(numChannels.strip())\n except:\n break\n f.close()\n \n return fs, numChannels\n","sub_path":"kilosort_control/load_oe.py","file_name":"load_oe.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"83735698","text":"import sys\r\nimport copy\r\nimport time\r\n\r\n#The input and the goal states are passed through an input file .\r\nwith open('input.txt', 'r') as input_file:\r\n data_item = [[int(num) for num in line.split()] for line in input_file if line.strip() != \"\"]\r\n\r\n\r\n#Extracting the Input and Goal States from the input file.\r\nstart_state = data_item[0:3].copy()\r\ngoal_state = data_item[3:6].copy()\r\n\r\ntermination_time = 3600 # Termination time in seconds of program if the solution is not obtained within the timit limit\r\n\r\n\r\n#Defining the structure of the node.\r\nclass node:\r\n \r\n def __init__(self,starts = None,d = None,path = None,move = None,h = None):\r\n self.state = starts\r\n self.depth = d\r\n self.hvalue = h\r\n self.curPath = path\r\n self.operation = move\r\n \r\n def display(self,tlist): \r\n for i in range(0,3):\r\n print(tlist[i])\r\n \r\n def generate_sub_space(self,parent,visited,h = None,total_nodes = None):\r\n children = []\r\n x = None\r\n y = None\r\n \r\n for i in range(0,3):\r\n for j in range(0,3):\r\n if parent.state[i][j] == 0 :\r\n x=i\r\n y=j\r\n break\r\n \r\n if x is not None:\r\n break\r\n \r\n #Defining actions on all possible moves of the Blank space. \r\n if x != 0:\r\n tpath = copy.deepcopy(parent.curPath)\r\n tpath.append(\"Up\")\r\n child = node(copy.deepcopy(parent.state),parent.depth + 1,tpath,\"Up\",h)\r\n child.state[x - 1][y],child.state[x][y] = child.state[x][y],child.state[x - 1][y]\r\n if self.to_String(child.state) not in visited:\r\n children.append(child)\r\n total_nodes = total_nodes + 1 \r\n \r\n if x != 2:\r\n tpath = copy.deepcopy(parent.curPath)\r\n tpath.append(\"Down\")\r\n child = node(copy.deepcopy(parent.state),parent.depth + 1,tpath,\"Down\",h)\r\n child.state[x + 1][y],child.state[x][y] = child.state[x][y],child.state[x + 1][y]\r\n if self.to_String(child.state) not in visited:\r\n children.append(child)\r\n total_nodes = total_nodes + 1 \r\n \r\n if y != 0:\r\n tpath = copy.deepcopy(parent.curPath)\r\n tpath.append(\"Left\")\r\n child = node(copy.deepcopy(parent.state),parent.depth + 1,tpath,\"Left\",h)\r\n child.state[x][y - 1],child.state[x][y] = child.state[x][y],child.state[x][y - 1]\r\n if self.to_String(child.state) not in visited:\r\n children.append(child)\r\n total_nodes = total_nodes + 1\r\n \r\n if y != 2:\r\n tpath = copy.deepcopy(parent.curPath)\r\n tpath.append(\"Right\")\r\n child = node(copy.deepcopy(parent.state),parent.depth + 1,tpath,\"Right\",h)\r\n child.state[x][y + 1],child.state[x][y] = child.state[x][y],child.state[x][y + 1]\r\n if self.to_String(child.state) not in visited:\r\n children.append(child)\r\n total_nodes = total_nodes + 1 \r\n \r\n return children,total_nodes #Returning all possible children of the current node \r\n \r\n def to_String(self,temp_state):\r\n s=''\r\n for i in temp_state:\r\n for j in i:\r\n s = s + str(j)\r\n return s\r\n \r\n #Calculating manhatten heuristic value\r\n def heuristic_manhatten(self,state):\r\n score = 0\r\n goalx = [2, 0, 0, 0, 1, 1, 1, 2, 2 ] \r\n goaly = [2, 0, 1, 2, 0, 1, 2, 0, 1 ]\r\n for i in range(0, 3):\r\n for j in range(0,3):\r\n num=state[i][j]\r\n if(num!=0):\r\n score += abs(i-goalx[num])+abs(j-goaly[num])\r\n return score\r\n \r\n #Astar using manhatten heuristic\r\n def A_Star_Search_With_Manhattan_Distance_as_Heuristic(self):\r\n max_list_size=-sys.maxsize-1\r\n total_nodes=0\r\n time_flag=0\r\n start_time = time.time()\r\n queue = []\r\n flag = 0\r\n visited = set()\r\n start_node = node(start_state, 1, [], '', 1+self.heuristic_manhatten(start_state))\r\n queue.append(start_node)\r\n while (queue):\r\n if len(queue)>max_list_size:\r\n max_list_size=len(queue)\r\n temp_time = time.time()\r\n if (temp_time - start_time >= termination_time):\r\n time_flag = 1\r\n break\r\n queue.sort(key=lambda x: (x.hvalue))\r\n current_node = queue.pop(0)\r\n state_string = self.to_String(current_node.state)\r\n visited.add(state_string)\r\n if (current_node.state == goal_state):\r\n print(\"\\n Success, The program has reached to an optimal solution\")\r\n print(\"Moves=\"+str(len(current_node.curPath)))\r\n print(str(current_node.curPath))\r\n flag = 1\r\n print('')\r\n print(\"Total Nodes Visited=\"+str(total_nodes))\r\n print(\"A* with Manhatten Heuristic Time \"+ str(time.time()-start_time))\r\n print(\"Maximum List Size=\"+str(max_list_size))\r\n\r\n if flag == 1:\r\n break\r\n\t\t\t\t\r\n tchilds,total_nodes=self.generate_sub_space(current_node, visited, current_node.depth + self.heuristic_manhatten(current_node.state),total_nodes)\r\n queue.extend(tchilds)# Adding the expanded chidrens to the list\r\n\t\t\t\r\n if time_flag == 1:\r\n \r\n print(\"Failure, The program is not able to give solution within the time limit\")\r\n print(\"Total Nodes Visited=\" + str(total_nodes))\r\n print(\"A* with Manhatten Heuristic terminated due to time out in \"+str(int(time.time()-start_time)/60)+\" minutes\")\r\n \r\nif __name__ == \"__main__\":\r\n obj1=node()\r\n print(\"\\n Start State is : \")\r\n obj1.display(start_state)\r\n \r\n print(\"\\n Goal State is : \")\r\n obj1.display(goal_state)\r\n obj1.A_Star_Search_With_Manhattan_Distance_as_Heuristic()\r\n ","sub_path":"A_Star_Search_With_Manhattan_Distance_as_Heuristic.py","file_name":"A_Star_Search_With_Manhattan_Distance_as_Heuristic.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"492685707","text":"# -*- coding: utf-8 -*-\n\n\nfrom deval.component.std.input import InputComponent\nfrom deval.component.ios.utils.iosfuncs import IOSProxy, check_platform_ios, retry_session\nfrom deval.utils.parse import parse_uri\n\n\nclass IOSInputComponent(InputComponent):\n\n def __init__(self, name, dev, uri):\n self._name = name\n self.device = dev\n try:\n self.proxy = self.dev.iosproxy\n except AttributeError:\n self.device.iosproxy = IOSProxy(**check_platform_ios(uri))\n self.proxy = self.device.iosproxy\n\n @retry_session\n def click(self, pos, duration=0.05, button='left'):\n # trans pos of click\n pos = self.proxy._touch_point_by_orientation(pos)\n\n # scale touch postion\n x, y = pos[0] * self.proxy._touch_factor, pos[1] * \\\n self.proxy._touch_factor\n if duration >= 0.5:\n self.proxy.session.tap_hold(x, y, duration)\n else:\n self.proxy.session.tap(x, y)\n\n def swipe(self, p1, p2, duration=0.5, steps=5, fingers=1, button='left'):\n fx, fy = self.proxy._touch_point_by_orientation(p1)\n tx, ty = self.proxy._touch_point_by_orientation(p2)\n self.proxy.session.swipe(fx * self.proxy._touch_factor, fy * self.proxy._touch_factor,\n tx * self.proxy._touch_factor, ty * self.proxy._touch_factor, duration)\n\n def double_tap(self, pos, button='left'):\n # trans pos of click\n pos = self.proxy._touch_point_by_orientation(pos)\n x, y = pos[0] * self.proxy._touch_factor, pos[1] * \\\n self.proxy._touch_factor\n self.proxy.session.double_tap(x, y)\n \n @property\n def name(self):\n return self._name\n \n @name.setter\n def name(self, value):\n self._name = value\n","sub_path":"deval/component/ios/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446883124","text":"# -*- coding: utf-8 -*-\nclass Solution(object):\n def readBinaryWatch(self, num):\n \"\"\"\n :type num: int\n :rtype: List[str]\n \"\"\"\n hours = {}\n minutes = {}\n times = []\n\n for i in range(60):\n count = bin(i).count('1')\n if i <= 11:\n if count not in hours:\n hours[count] = []\n hours[count].append(i)\n if i <= 59:\n if count not in minutes:\n minutes[count] = []\n minutes[count].append(i)\n\n for i in range(0, num + 1):\n j = num - i\n for h in hours.get(i, []):\n for m in minutes.get(j, []):\n times.append('%d:%02d' % (h, m))\n\n return times\n","sub_path":"401-Binary-Watch.py","file_name":"401-Binary-Watch.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"55430830","text":"#gcd\n\ndef gcd(x,y):\n if x>y:\n smaller = y\n else:\n smaller = x\n \n for i in range(1,smaller+1):\n if(( x%i==0) and (y%i)==0):\n gcd = i\n return gcd\n \nt = int(input())\nwhile t:\n a = input()\n arr = [int(s) for s in a.split() if s.isdigit()]\n A = arr[0]\n B = arr[1]\n res = gcd(A,B)\n print(res)\n t = t-1","sub_path":"Mathematical&AlgorithmPuzzle/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"408872610","text":"# Owner(s): [\"module: primTorch\"]\n\nfrom functools import partial\nfrom itertools import product\nimport warnings\nfrom warnings import catch_warnings\nimport unittest\n\nimport torch\nfrom torch.testing import make_tensor\nfrom torch.testing._internal.common_utils import (parametrize, run_tests, TestCase, TEST_SCIPY,\n set_default_dtype, skipCUDAMemoryLeakCheckIf)\nfrom torch.testing._internal.common_device_type import (\n instantiate_device_type_tests,\n onlyCUDA,\n dtypes,\n OpDTypes,\n)\nfrom torch.testing._internal.common_methods_invocations import (\n op_db,\n)\nfrom torch.testing._internal.common_device_type import (\n ops,\n)\n\nfrom torch.testing._internal.logging_tensor import LoggingTensor, capture_logs, log_input\nimport torch._prims as prims\nfrom torch._prims_common import CUDARngStateHelper\nfrom torch._prims.executor import make_traced\nimport torch._refs as refs\nfrom torch.fx.experimental.proxy_tensor import make_fx\n\n\nif TEST_SCIPY:\n import scipy.special\n\nNVPRIM_ATEN_FALLBACK_WARNING = \"fallback to aten executor\"\nGET_ISOLATED_GRAPHMODULE_ERROR = \"get_isolated_graphmodule failed on decomposition\"\n\nclass TestPrims(TestCase):\n @onlyCUDA\n @dtypes(torch.float32)\n def test_broadcast_in_dim(self, device, dtype):\n def _wrapper(a, b, broadcast_dimensions):\n return prims.broadcast_in_dim(a, b.shape, broadcast_dimensions)\n\n traced = make_traced(_wrapper)\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n for executor in ('aten', 'strictly_nvfuser'):\n fn = partial(traced, executor=executor)\n # Same shape\n shape = (5, 5)\n a = make_arg(shape)\n b = make_arg(shape, low=0.0, high=0.0)\n result = fn(a, b, (0, 1))\n\n self.assertEqual(result.shape, a.shape)\n self.assertTrue(result.is_contiguous)\n self.assertEqual(a, result)\n\n # Error input: reordering dims\n with self.assertRaises(Exception):\n result = fn(a, b, (1, 0))\n\n # Adding outermost dimensions\n a = make_arg((5, 5))\n b = make_arg((3, 3, 5, 5), low=0.0, high=0.0)\n result = fn(a, b, (2, 3))\n\n self.assertEqual(result.shape, b.shape)\n self.assertEqual(a.broadcast_to(b.shape), result)\n\n # Expands\n a = make_arg((1, 5, 1))\n b = make_arg((3, 5, 7), low=0.0, high=0.0)\n result = fn(a, b, (0, 1, 2))\n\n self.assertEqual(result.shape, b.shape)\n self.assertEqual(a.expand_as(result), result)\n\n # Unsqueezes\n a = make_arg((1, 2, 3))\n b = make_arg((1, 2, 1, 3), low=0.0, high=0.0)\n result = fn(a, b, (0, 1, 3))\n\n self.assertEqual(result.shape, b.shape)\n self.assertEqual(a.unsqueeze(2), result)\n\n # FIXME: This test exposes an issue in nvfuser\n # Adds outermost, expands, and unsqueezes\n \"\"\"\n a = make_arg((1, 2, 3))\n b = make_arg((4, 1, 7, 2, 3, 3), low=0.0, high=0.0)\n result = fn(a, b, (1, 3, 4))\n\n self.assertEqual(result.shape, b.shape)\n a.unsqueeze_(3)\n a.unsqueeze_(1)\n a.unsqueeze_(0)\n self.assertEqual(a.expand_as(result), result)\n \"\"\"\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_broadcast_in_dim_sum(self, device, dtype):\n def _wrapper(a):\n a_sum = prims.sum(a, [0, 1])\n a_bc = prims.broadcast_in_dim(a_sum, [], [])\n return a_bc\n\n traced = make_traced(_wrapper)\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n for executor in ('aten', 'strictly_nvfuser'):\n fn = partial(traced, executor=executor)\n shape = (5, 5)\n a = make_arg(shape)\n result = fn(a)\n\n self.assertEqual(result.shape, ())\n self.assertTrue(result.is_contiguous)\n self.assertEqual(_wrapper(a), result)\n\n @unittest.skipIf(not TEST_SCIPY, \"SciPy not found\")\n @dtypes(torch.float64, torch.long)\n def test_cbrt_prim(self, device, dtype):\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n batches = [(), (1,), (2,), (0, 1), (1, 1), (2, 2)]\n shapes = [(), (0,), (1,), (5,)]\n\n # Sets the default dtype to NumPy's default dtype of double\n with set_default_dtype(torch.double):\n # Tested here, as this OP is not currently exposed or tested in ATen\n for b, s in product(batches, shapes):\n x = make_arg(b + s)\n y = prims.cbrt(x)\n\n x_np = x.cpu().numpy()\n y_np = scipy.special.cbrt(x_np)\n\n self.assertEqual(y, y_np, exact_device=False)\n\n @dtypes(torch.float32)\n def test_collapse(self, device, dtype):\n t = torch.rand(2, 2, 2)\n dim_ranges = [(0, 0), (0, 1), (1, 2), (0, 2)]\n expected_shapes = [(2, 2, 2), (4, 2), (2, 4), (8,)]\n\n for (start, end), shape in zip(dim_ranges, expected_shapes):\n expect = t.reshape(shape)\n\n copy = prims.collapse(t, start, end)\n self.assertEqual(copy, expect)\n self.assertFalse(copy._is_view())\n\n view = prims.collapse_view(t, start, end)\n self.assertEqual(view, expect)\n self.assertTrue(view._is_view())\n\n t_discontig = t.transpose(0, 1)\n with self.assertRaises(ValueError, msg=\"no such view exists\"):\n view = prims.collapse_view(t_discontig, 0, 2)\n\n copy = prims.collapse(t_discontig, 0, 1)\n self.assertEqual(copy, t_discontig.reshape(4, 2))\n\n error_dims = [(-1, 1), (0, 3), (1, -1)]\n for start, end in error_dims:\n for fn in [prims.collapse, prims.collapse_view]:\n with self.assertRaises(AssertionError):\n fn(t, start, end)\n\n @onlyCUDA\n def test_nvfuser_impl_is_used(self, device):\n # This test is to ensure that when the nvfuser implementation exists it is used\n # Assuming one-to-one mapping between prims and nvfuser implementations\n # This test is not intended to test the correctness of the nvfuser implementation\n try:\n from nvfuser import FusionDefinition as fd\n except ImportError:\n from nvfuser._C import FusionDefinition as fd\n\n\n prim_nvfuser_ops = set(torch._prims.__all__).intersection(dir(fd.ops))\n ops_without_nvfuser_impl = {\n name\n for name in prim_nvfuser_ops\n if getattr(torch.ops.nvprims, name, None) is None\n }\n assert (\n len(ops_without_nvfuser_impl) == 0\n ), (f\"The following prims do not have 'impl_nvfuser' defined: {ops_without_nvfuser_impl} \",\n \"while there exists nvfuser implementations for them.\")\n\n def test_skip_ops_nvfuser_prims_mode(self, device):\n # This test verifies that the NvfuserPrimsMode skips the specified\n # functions. Skipping a function means that it's not converted into\n # nvprims counterparts.\n from torch._prims.context import NvfuserPrimsMode\n\n a = make_tensor(5, 5, device=device, dtype=torch.float32)\n\n def func(a):\n return torch.ops.prims.sin.default(a)\n\n skip_ops = {\"prims.sin.default\", }\n with NvfuserPrimsMode(skip_ops=skip_ops):\n gm = make_fx(func)(a)\n\n includes_any_prims_sin = any(\n node.target == torch.ops.prims.sin.default for node in gm.graph.nodes\n )\n self.assertTrue(includes_any_prims_sin)\n include_any_nvprims_sin = any(\n node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes\n )\n self.assertFalse(include_any_nvprims_sin)\n\n def test_skip_ops_nvfuser_capability_mode(self, device):\n # This test verifies that the NvfuserCapabilityMode skips the specified\n # functions. Skipping a function means that specific\n # reference/decomposition is not traced and there's no attempt to lower\n # it to nvprims.\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n\n a = make_tensor(5, 5, device=device, dtype=torch.float32)\n\n def func(a):\n return torch.sin(a)\n\n skip_ops = {\"torch.sin\", }\n with TorchRefsNvfuserCapabilityMode(skip_ops=skip_ops):\n gm = make_fx(func)(a)\n\n includes_any_aten_sin = any(\n node.target == torch.ops.aten.sin.default for node in gm.graph.nodes\n )\n self.assertTrue(includes_any_aten_sin)\n include_any_nvprims_sin = any(\n node.target == torch.ops.nvprims.sin.default for node in gm.graph.nodes\n )\n self.assertFalse(include_any_nvprims_sin)\n\n def test_partitioner_tuple_output(self, device):\n # This test verifies that the partitioner doesn't segment on nodes with\n # tuple outputs.\n from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner\n from torch._prims.nvfuser_executor import NvfuserPrimOperatorSupport\n\n a = make_tensor(5, 3, 3, device=device, dtype=torch.float32)\n\n def func(x):\n xx = torch.ops.nvprims.add(x, 1)\n var, mean = torch.ops.nvprims.var_mean(x, correction=0)\n var_cos = torch.ops.nvprims.cos(var)\n mean_sin = torch.ops.nvprims.sin(mean)\n return torch.ops.nvprims.add(var_cos, mean_sin)\n\n gm = make_fx(func)(a)\n supported_ops = NvfuserPrimOperatorSupport()\n partitioner = CapabilityBasedPartitioner(\n gm, supported_ops, allows_single_node_partition=False\n )\n partitions = partitioner.propose_partitions()\n self.assertEqual(len(partitions), 1)\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_full(self, device, dtype):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n def func1(size, value, b):\n return (torch.full(size, value, dtype=dtype, device=device),)\n\n def func2(size, value, b):\n a = torch.full(size, value, dtype=dtype, device=device)\n b_sin = b.sin()\n return (torch.add(a, b_sin),)\n\n def func3(size, value, b):\n return (torch.full(size, value, dtype=dtype, device=device), b)\n\n def func4(size, value, b):\n b_sin = b.sin()\n return (torch.full(size, value, dtype=dtype, device=device), b_sin)\n\n def func5(size, value, b):\n b_sin = b.sin()\n a = torch.full(size, value, dtype=dtype, device=device)\n a_sin = a.sin()\n return (a, b_sin, a_sin)\n\n for func in (func1, func3, func2, func3, func4, func5):\n size = (3, 3)\n value = 10\n b = torch.randn(*size, dtype=dtype, device=device)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(size, value, b)\n\n out = execute(gm, size, value, b, executor=\"strictly_nvfuser\")\n self.assertEqual(out, func(size, value, b))\n\n @onlyCUDA\n def test_nvfuser_empty_fusion(self, device):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.executor import execute\n\n a = torch.randn(3, 3, device=device)\n\n def func(a, b, c):\n return (a, b, c)\n\n gm = make_fx(func)(a, a, a)\n\n with self.assertRaisesRegex(AssertionError, \"Graph must contain at least one call_function node\"):\n execute(gm, a, a, a, executor=\"strictly_nvfuser\")\n\n # Should pass with partitioned executor\n out = execute(gm, a, a, a, executor=\"nvfuser\")\n self.assertEqual(out, (a, a, a))\n\n @onlyCUDA\n @dtypes(torch.float16, torch.uint8)\n def test_nvprim_convert_element_type(self, device, dtype):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.executor import execute\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims_common import _torch_dtype_to_nvfuser_dtype_map\n\n # initialize input as float32, which is different from `dtype` in the argument.\n # this ensures that tracing will have a _to_copy node.\n a = torch.randn(3, 3, device=device, dtype=torch.float32)\n\n def func(x, dtype):\n return x.to(dtype).to(x.dtype)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a, dtype)\n execute(gm, a, dtype, executor=\"nvfuser\")\n\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_aten_to_copy = any(\n torch.ops.aten._to_copy.default == node.target\n for node in call_function_nodes\n )\n includes_nvprim_convert_element_type = any(\n torch.ops.nvprims.convert_element_type.default == node.target\n for node in call_function_nodes\n )\n nvprim_support_flag = _torch_dtype_to_nvfuser_dtype_map.get(dtype) is not None\n self.assertEqual(includes_aten_to_copy, not nvprim_support_flag)\n self.assertEqual(includes_nvprim_convert_element_type, nvprim_support_flag)\n\n @onlyCUDA\n def test_nvfuser_rand_like_fusion(self, device):\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.executor import execute\n\n a = torch.randn(3, 3, device=device)\n\n def func(a):\n return torch.rand_like(a)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a)\n\n out = execute(gm, a, executor=\"strictly_nvfuser\")\n self.assertEqual(out.size(), a.size())\n\n @skipCUDAMemoryLeakCheckIf(True) # https://github.com/pytorch/pytorch/issues/84529\n @onlyCUDA\n def test_nvfuser_no_args(self, device):\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.executor import execute\n from torch._prims.nvfuser_executor import make_nvfuser_fusion\n\n a = torch.randn(3, 3, device=device)\n\n def func():\n return torch.sigmoid(a)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)()\n\n with warnings.catch_warnings(record=True) as caught:\n execute(gm, executor=\"strictly_nvfuser\")\n # fusion execute with no cuda input is handled by nvprim aten fallback\n self.assertTrue(any(NVPRIM_ATEN_FALLBACK_WARNING in str(w.message) for w in caught))\n\n with self.assertRaisesRegex(AssertionError, \"There must be at least one argument\"):\n make_nvfuser_fusion(gm)\n\n with self.assertRaisesRegex(AssertionError, \"Number of placeholder nodes in the graph must match\"):\n execute(gm, a, executor=\"strictly_nvfuser\")\n\n # Should pass with partitioned executor\n out = execute(gm, executor=\"nvfuser\")\n self.assertEqual(out, func())\n\n @onlyCUDA\n def test_nvfuser_constant_tensors(self, device):\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.executor import execute\n\n a = torch.randn(3, 3, device=device)\n b = torch.randn(3, 3, device=device)\n\n def func(b):\n return a + b\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(b)\n\n with self.assertRaisesRegex(AssertionError, \"not supported yet\"):\n execute(gm, b, executor=\"strictly_nvfuser\")\n\n # Should pass with partitioned executor\n out = execute(gm, b, executor=\"nvfuser\")\n self.assertEqual(out, gm(b))\n\n @onlyCUDA\n def test_nvfuser_executor_cached_noncontiguous(self, device):\n # This test is to ensure that nvfuser computes correct results for noncontiguous tensors\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n a = torch.randn(3, 3, device=device)\n\n def func(a):\n return torch.sigmoid(a)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a)\n\n # First run to create the cache\n execute(gm, a, executor=\"strictly_nvfuser\")\n\n # a.mT is noncontiguous, but it shouldn't affect correctness\n expected = execute(gm, a.mT, executor=\"aten\")\n for use_python_cache in [True, False]:\n params = {\"use_python_fusion_cache\": use_python_cache}\n actual = execute(gm, a.mT, executor=\"strictly_nvfuser\", executor_parameters=params)\n self.assertEqual(expected, actual)\n\n def test_nvfuser_capability_context(self, device):\n # This test is to ensure that the torch calls are replaced with refs\n # based on the nvfuser+prims capability\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n\n # It's assumed that digamma is not supported by nvfuser\n # If it's ever supported, this test will need to be updated\n self.assertTrue(getattr(torch.ops.nvprims, \"digamma\", None) is None)\n\n a = torch.randn(3, 3, device=device)\n\n def func(a):\n return torch.digamma(a)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a)\n\n # Check that the torch.digamma is not replaced with torch.ops.prims.digamma\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_aten_digamma = any(\n torch.ops.aten.digamma.default == node.target\n for node in call_function_nodes\n )\n includes_prims_digamma = any(\n torch.ops.prims.digamma.default == node.target\n for node in call_function_nodes\n )\n self.assertTrue(includes_aten_digamma)\n self.assertFalse(includes_prims_digamma)\n\n # Check mixed case, sigmoid is replaced with refs, but digamma is not\n def func(a):\n return torch.sigmoid(torch.digamma(a))\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a)\n\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_aten_sigmoid = any(\n torch.ops.aten.sigmoid.default == node.target\n for node in call_function_nodes\n )\n includes_prims_digamma = any(\n torch.ops.prims.digamma.default == node.target\n for node in call_function_nodes\n )\n includes_nvprims_exp = any(\n torch.ops.nvprims.exp.default == node.target\n for node in call_function_nodes\n )\n self.assertFalse(includes_aten_sigmoid)\n self.assertFalse(includes_prims_digamma)\n self.assertTrue(includes_nvprims_exp)\n\n\n def test_aten_overload_to_prims(self, device):\n # This test is to ensure that the torch.ops.aten calls are replaced with refs\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsMode\n\n a = torch.randn(3, 3, device=device)\n\n def func(a):\n return torch.ops.aten.sigmoid.default(torch.ops.aten.digamma.default(a))\n\n with TorchRefsMode():\n gm = make_fx(func)(a)\n\n # Check that all call_function nodes are prims\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n all_prims_namespace = all(\n node.target.name().startswith(\"prims\") for node in call_function_nodes\n )\n self.assertTrue(all_prims_namespace)\n\n\n @onlyCUDA\n def test_nvfuser_executor_parameters(self, device):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.executor import execute\n\n a = torch.randn(3, 4, device=device)\n\n def func(a):\n return torch.ops.nvprims.add(a, a)\n\n gm = make_fx(func)(a)\n\n expected = execute(gm, a, executor=\"aten\")\n # Shouldn't raise an error because unuseful parameters are ignored\n params_dicts = [None, {}, {\"none\": None}]\n for params in params_dicts:\n actual = execute(gm, a, executor=\"nvfuser\", executor_parameters=params)\n self.assertEqual(expected, actual)\n\n # Check caching parameter\n for use_cache in [True, False]:\n params = {\"use_python_fusion_cache\": use_cache}\n actual = execute(gm, a, executor=\"nvfuser\", executor_parameters=params)\n self.assertEqual(expected, actual)\n\n # Check allow_single_op_fusion parameter\n for allow_single_op_fusion in [True, False]:\n params = {\"allow_single_op_fusion\": allow_single_op_fusion}\n actual = execute(gm, a, executor=\"nvfuser\", executor_parameters=params)\n self.assertEqual(expected, actual)\n\n\n @onlyCUDA\n def test_nvfuser_executor_partitioned(self, device):\n # This test is to ensure that nvfuser partitioned executor works correctly\n # It's assumed that digamma is not supported by nvfuser\n # If it's ever supported, this test will need to be updated\n self.assertTrue(getattr(torch.ops.nvprims, \"digamma\", None) is None)\n\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n a = torch.randn(3, 4, device=device)\n b = torch.rand(3, 1, device=device)\n c = torch.rand(3, 4, device=device)\n\n def func(a, b, c):\n aa = torch.digamma(a) # not supported by nvfuser\n d = torch.add(b, c)\n dd = torch.sqrt(d)\n return torch.mul(aa, dd.digamma())\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a, b, c)\n\n expected = execute(gm, a, b, c, executor=\"aten\")\n actual = execute(gm, a, b, c, executor=\"nvfuser\")\n self.assertEqual(expected, actual)\n\n @onlyCUDA\n def test_nvfuser_executor_partitioned_no_partitions_error(self, device):\n # This test is to ensure that nvfuser partitioned executor works correctly\n # It's assumed that digamma is not supported by nvfuser\n # If it's ever supported, this test will need to be updated\n self.assertTrue(getattr(torch.ops.nvprims, \"digamma\", None) is None)\n\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n a = torch.randn(3, 4, device=device)\n\n def func(a):\n return torch.digamma(a) # not supported by nvfuser\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a)\n\n with catch_warnings(record=True) as w:\n # Trigger warning\n execute(gm, a, executor=\"nvfuser\")\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"is not supported by nvFuser\" in str(w[-1].message))\n\n def test_nvprims(self, device):\n # This test is to ensure that nvfuser specific prims are exposed\n # and can be traced with make_fx\n from torch.fx.experimental.proxy_tensor import make_fx\n\n def func(a):\n return torch.ops.nvprims.add(a, a)\n\n a = torch.randn(3, 4, device=device)\n gm = make_fx(func)(a)\n\n for node in gm.graph.nodes:\n if node.op == \"call_function\":\n self.assertTrue(node.name == \"add\")\n self.assertTrue(node.target == torch.ops.nvprims.add.default)\n self.assertFalse(node.target == torch.ops.prims.add.default)\n self.assertFalse(node.target == torch.ops.aten.add.default)\n\n @onlyCUDA\n @dtypes(torch.float32, torch.float64)\n def test_native_batch_norm_nvprims(self, device, dtype):\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n # This test verifies that native_batch_norm is translated into nvprims\n # and can be executed with nvFuser\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch.testing._internal.common_methods_invocations import (\n sample_inputs_native_batch_norm,\n )\n\n samples = sample_inputs_native_batch_norm(\n None, device, dtype, requires_grad=False\n )\n batch_norms = [\n torch.native_batch_norm,\n torch.ops.aten.native_batch_norm,\n torch.ops.aten.native_batch_norm.default,\n torch.ops.nvprims.native_batch_norm.default,\n ]\n for sample, batch_norm in product(samples, batch_norms):\n if sample.input.numel() == 0:\n continue\n\n def func(\n input, weight, bias, running_mean, running_var, training, momentum, eps\n ):\n return batch_norm(\n input,\n weight,\n bias,\n running_mean,\n running_var,\n training,\n momentum,\n eps,\n )\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(sample.input, *sample.args)\n\n call_function_nodes = list(\n filter(lambda n: n.op == \"call_function\", gm.graph.nodes)\n )\n includes_aten_batch_norm = any(\n torch.ops.aten.native_batch_norm.default == node.target\n for node in call_function_nodes\n )\n self.assertFalse(includes_aten_batch_norm)\n\n includes_nvprims_batch_norm = any(\n torch.ops.nvprims.native_batch_norm.default == node.target\n for node in call_function_nodes\n )\n self.assertTrue(includes_nvprims_batch_norm)\n\n # Check that the graph can be executed with nvFuser\n out = execute(gm, sample.input, *sample.args, executor=\"strictly_nvfuser\")\n self.assertEqual(out, gm(sample.input, *sample.args))\n\n @onlyCUDA\n @dtypes(torch.float32, torch.float64)\n def test_cudnn_batch_norm_nvprims(self, device, dtype):\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n # This test verifies that cudnn_batch_norm is translated into nvprims\n # and can be executed with nvFuser\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch.testing._internal.common_methods_invocations import (\n sample_inputs_native_batch_norm,\n )\n\n samples = sample_inputs_native_batch_norm(\n None, device, dtype, requires_grad=False\n )\n for sample in samples:\n if sample.input.numel() == 0:\n continue\n\n def func(\n input, weight, bias, running_mean, running_var, training, momentum, eps\n ):\n return torch.ops.aten.cudnn_batch_norm.default(\n input,\n weight,\n bias,\n running_mean,\n running_var,\n training,\n momentum,\n eps,\n )\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(sample.input, *sample.args)\n\n call_function_nodes = list(\n filter(lambda n: n.op == \"call_function\", gm.graph.nodes)\n )\n includes_aten_batch_norm = any(\n torch.ops.aten.cudnn_batch_norm.default == node.target\n for node in call_function_nodes\n )\n self.assertFalse(includes_aten_batch_norm)\n\n includes_nvprims_batch_norm = any(\n torch.ops.nvprims.native_batch_norm.default == node.target\n for node in call_function_nodes\n )\n self.assertTrue(includes_nvprims_batch_norm)\n\n # Check that the graph can be executed with nvFuser\n out = execute(gm, sample.input, *sample.args, executor=\"nvfuser\")\n ref_out = gm(sample.input, *sample.args)\n for idx, (left, right) in enumerate(zip(out, ref_out)):\n # Nvfuser does not support torch.uint8 dtype so check reserve output against 0 scalar\n if idx == 3:\n self.assertTrue(torch.all(torch.eq(left, 0)))\n else:\n self.assertEqual(left, right)\n\n # decomposition of native_batch_norm_backward uses a casting, which prevents nvprim lowering on CPU build\n @onlyCUDA\n @dtypes(torch.float32, torch.float16)\n def test_batch_norm_backward_nvprims(self, device, dtype):\n # This test verifies that the backward pass of batch norm is correctly decomposed into nvprims\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch.testing._internal.common_methods_invocations import sample_inputs_batch_norm\n\n samples_iter = sample_inputs_batch_norm(None, device, dtype, requires_grad=True)\n sample = next(samples_iter)\n grad = torch.randn_like(sample.input)\n\n def func1(grad, input, weight, rm, rv, eps, train):\n return torch.ops.aten.native_batch_norm_backward.default(\n grad, input, weight, rm, rv, rm, rv, train, eps, [True, True, True]\n )\n\n def func2(grad, input, weight, rm, rv, eps, train):\n return torch.ops.aten.cudnn_batch_norm_backward.default(\n input, grad, weight, rm, rv, rm, rv, eps, grad\n )\n\n args = sample.args\n kwargs = sample.kwargs\n all_args = [grad, sample.input, args[2], args[0], args[1], kwargs['eps'], kwargs['training']]\n\n for func in (func1, func2):\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(*all_args)\n\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_batch_norm_backward = any(\n torch.ops.aten.native_batch_norm_backward.default == node.target\n for node in call_function_nodes\n )\n self.assertFalse(includes_batch_norm_backward)\n all_nvprims = all(\n str(node.target).startswith(\"nvprims\") for node in call_function_nodes\n )\n self.assertTrue(all_nvprims)\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_silu_backward_no_filled_tensor(self, device, dtype):\n # This test verifies a workaround for\n # https://github.com/pytorch/pytorch/issues/86612\n from torch.fx.experimental.proxy_tensor import make_fx\n from functorch import functionalize\n from torch._prims.nvfuser_executor import _remove_empty_like_fill\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n\n def func(a):\n out = torch.nn.functional.silu(a)\n grad = torch.ones_like(out)\n return torch.autograd.grad([out], [a], [grad])\n\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=True)\n a = make_arg((3, 4))\n gm = make_fx(func)(a)\n # functionalize(gm) doesn't work with non-detached inputs\n gm = make_fx(functionalize(gm))(a.detach())\n\n # replace aten.sub with nvprims.sub\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(gm)(a)\n\n # Check that the graph contains empty_like\n any_aten_empty_like = any(\n node.target == torch.ops.aten.empty_like.default for node in gm.graph.nodes\n )\n self.assertTrue(any_aten_empty_like)\n any_aten_fill = any(\n node.target == torch.ops.aten.fill.Scalar for node in gm.graph.nodes\n )\n self.assertTrue(any_aten_fill)\n\n # Now remove the empty_like and fill\n gm = _remove_empty_like_fill(gm)\n any_aten_empty_like = any(\n node.target == torch.ops.aten.empty_like.default for node in gm.graph.nodes\n )\n self.assertFalse(any_aten_empty_like)\n any_aten_fill = any(\n node.target == torch.ops.aten.fill.Scalar for node in gm.graph.nodes\n )\n self.assertFalse(any_aten_fill)\n self.assertEqual(gm(a), func(a))\n\n\n @onlyCUDA\n @dtypes(torch.float32)\n @parametrize(\"correction\", [0, 1])\n def test_var(self, device, dtype, correction):\n def _wrapper(a):\n return prims.var(a, [0, 1], correction=correction)\n\n traced = make_traced(_wrapper)\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n for executor in ('aten', 'strictly_nvfuser'):\n fn = partial(traced, executor=executor)\n shape = (5, 5)\n a = make_arg(shape)\n result = fn(a)\n\n self.assertEqual(result.shape, ())\n self.assertTrue(result.is_contiguous)\n self.assertEqual(_wrapper(a), result)\n\n @onlyCUDA\n @dtypes(torch.float16, torch.float32)\n @parametrize(\"correction\", [0, 1])\n @parametrize(\"keepdim\", [True, False])\n def test_var_mean(self, device, dtype, correction, keepdim):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n\n\n def _wrapper(a):\n return torch.var_mean(a, [0, 1], correction=correction, keepdim=keepdim)\n\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(_wrapper)(make_arg((5, 5)))\n\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_nvprims_var_mean = any(\n torch.ops.nvprims.var_mean.main == node.target\n for node in call_function_nodes\n )\n self.assertTrue(includes_nvprims_var_mean)\n\n @onlyCUDA\n @dtypes(torch.float16, torch.float32)\n def test_nvprims_view(self, device, dtype):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n a = make_arg((3, 4, 5))\n\n def func1(a):\n return a.view(tuple(reversed(a.shape)))\n\n def func2(a):\n return a.reshape(tuple(reversed(a.shape)))\n\n def func3(a):\n return torch.view_copy(a, tuple(reversed(a.shape)))\n\n def func4(a):\n return torch.reshape(a, tuple(reversed(a.shape)))\n\n def func5(a):\n return torch.ops.aten.view.default(a, tuple(reversed(a.shape)))\n\n def func6(a):\n return torch.ops.aten._unsafe_view.default(a, tuple(reversed(a.shape)))\n\n def func7(a):\n return torch.ops.aten.view_copy.default(a, tuple(reversed(a.shape)))\n\n for func in (func1, func2, func3, func4, func5, func6, func7):\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a)\n\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_nvprims_view = any(\n torch.ops.nvprims.view.default == node.target\n for node in call_function_nodes\n )\n self.assertTrue(includes_nvprims_view)\n\n # Try executing the graph\n out = execute(gm, a, executor=\"strictly_nvfuser\")\n self.assertEqual(out, func(a))\n\n @onlyCUDA\n @dtypes(torch.float16, torch.float32)\n def test_nvprims_view_partitioner(self, device, dtype):\n # This test verifies that views that are not fused with other ops are\n # correctly overriden to call aten implementation.\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.nvfuser_executor import maybe_partition_graph\n\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n a = make_arg((4, 5))\n b = make_arg((5, 4))\n\n def func(a, b):\n aa = a.view(b.shape)\n aa = aa.view(a.shape)\n return aa.digamma()\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(func)(a, b)\n gm, _ = maybe_partition_graph(gm, False, False)\n\n out = gm(a, b)\n self.assertEqual(out, func(a, b))\n\n @onlyCUDA\n @dtypes(torch.float32, torch.float16)\n def test_cpu_tensor(self, device, dtype):\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n from torch._prims.executor import execute\n\n def _wrapper(t0, t1, cpu_scalar):\n return t0 + t1 + cpu_scalar\n\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n a = make_arg((12, 1))\n b = make_arg((12, 12))\n c = torch.tensor(0.5)\n\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(_wrapper)(a, b, c)\n\n with warnings.catch_warnings(record=True) as caught:\n actual = execute(gm, a, b, c, executor=\"nvfuser\")\n # cpu scalar tensor is handled by nvfuser codegen, so it shouldn't fallback\n self.assertFalse(any(NVPRIM_ATEN_FALLBACK_WARNING in str(w.message) for w in caught))\n\n expected = execute(gm, a, b, c, executor=\"aten\")\n self.assertEqual(expected, actual)\n\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_aten_add = any(\n torch.ops.aten.add.default == node.target\n for node in call_function_nodes\n )\n self.assertFalse(includes_aten_add)\n\n with warnings.catch_warnings(record=True) as caught:\n nvprim_aten_fallback = execute(gm, a.cpu(), b.cpu(), c, executor=\"nvfuser\")\n # cpu tensor is handled by nvprim aten fallback, assert that it's indeed in warning\n self.assertTrue(any(NVPRIM_ATEN_FALLBACK_WARNING in str(w.message) for w in caught))\n\n self.assertEqual(expected, nvprim_aten_fallback)\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_pytree_input_output(self, device, dtype):\n @make_traced\n def fn(a, b_dict):\n b = b_dict[\"b\"]\n d = {}\n d[\"c\"] = torch.add(a, b)\n return (d, torch.add(a, d[\"c\"]))\n\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n a = make_arg((5, 5))\n b = make_arg((1, 5))\n b_dict = {\"b\": b}\n\n result_aten = fn(a, b_dict, executor=\"aten\")\n result_nvfuser = fn(a, b_dict, executor=\"strictly_nvfuser\")\n self.assertEqual(result_aten, result_nvfuser)\n\n @dtypes(torch.float32)\n def test_memory_format_strides(self, device, dtype):\n shapes = (\n (),\n (0,),\n (1,),\n (5),\n (1, 0),\n (1, 1),\n (3, 7),\n (3, 0, 2),\n (1, 1, 2),\n (4, 1, 1),\n (7, 8, 9),\n )\n\n channels_last_shapes = (\n (0, 0, 0, 0),\n (1, 0, 3, 0),\n (0, 2, 3, 5),\n (2, 2, 2, 0),\n (5, 4, 3, 2),\n (8, 8, 7, 2),\n (9, 1, 3, 1),\n (4, 5, 8, 7)\n )\n\n channels_last_3d_shapes = (\n (0, 8, 7, 9, 2),\n (5, 0, 7, 9, 2),\n (5, 0, 7, 9, 0),\n (5, 8, 7, 9, 2),\n (5, 1, 7, 9, 2),\n (5, 1, 7, 9, 1),\n )\n\n pairs = (\n (shapes, torch.contiguous_format),\n (channels_last_shapes, torch.contiguous_format),\n (channels_last_3d_shapes, torch.contiguous_format),\n (channels_last_shapes, torch.channels_last),\n (channels_last_3d_shapes, torch.channels_last_3d),\n )\n\n for shapes, memory_format in pairs:\n for shape in shapes:\n # tests empty\n expected = torch.empty(shape, device=device, dtype=dtype, memory_format=memory_format)\n actual = refs.empty(shape, device=device, dtype=dtype, memory_format=memory_format)\n self.assertEqual(expected.stride(), actual.stride())\n\n # tests clone\n a = torch.testing.make_tensor(shape, device=device, dtype=dtype)\n expected = torch.clone(a, memory_format=memory_format)\n actual = torch.clone(a, memory_format=memory_format)\n self.assertEqual(expected.stride(), actual.stride())\n\n # tests contiguous\n a = torch.testing.make_tensor(shape, device=device, dtype=dtype, noncontiguous=True)\n expected = a.contiguous(memory_format=memory_format)\n actual = refs.contiguous(a, memory_format=memory_format)\n self.assertEqual(expected.stride(), actual.stride())\n\n @dtypes(torch.float32)\n def test_reshape_view_method(self, device, dtype):\n make_arg = partial(make_tensor, device=device, dtype=dtype)\n a = make_arg((5, 5))\n new_shape = 1, 5, 1, 5\n result_eager = a.reshape(*new_shape)\n result_refs = refs.reshape(a, *new_shape)\n self.assertEqual(result_eager, result_refs)\n\n result_eager = a.view(*new_shape)\n result_refs = refs.view(a, *new_shape)\n self.assertEqual(result_eager, result_refs)\n\n\n @onlyCUDA\n @dtypes(torch.float32)\n def test_philox_rand(self, device, dtype):\n sizes = (1000, 1000000) # offsets of 4 and 8\n repeats = 2 # Checks multiple rand calls results with multiple philox_rand calls\n for size in sizes:\n torch.cuda.manual_seed(123)\n references = []\n results = []\n rng_states = []\n for _ in range(repeats):\n rng_states.append(CUDARngStateHelper.get_torch_state_as_tuple())\n references.append(torch.rand(size, device=device, dtype=dtype))\n\n torch.cuda.manual_seed(123)\n for idx in range(repeats):\n seed, offset = rng_states[idx]\n result, _ = torch.ops.rngprims.philox_rand((size,),\n seed=seed,\n offset=offset,\n stride=None,\n device=device,\n dtype=dtype)\n results.append(result)\n\n for a, b in zip(references, results):\n self.assertEqual(a, b)\n\n\n @dtypes(torch.float32)\n def test_functional_rng_wrappers(self, device, dtype):\n\n torch.manual_seed(123)\n ref1 = torch.rand(10, device=device, dtype=dtype)\n ref2 = torch.rand(10, device=device, dtype=dtype)\n\n\n torch.manual_seed(123)\n rng_state1, res1 = torch._prims.rng_prims.run_and_save_rng_state(torch.rand, 10, device=device, dtype=dtype)\n rng_state2, res2 = torch._prims.rng_prims.run_and_save_rng_state(torch.rand, 10, device=device, dtype=dtype)\n\n res3 = torch._prims.rng_prims.run_with_rng_state(rng_state1, torch.rand, 10, device=device, dtype=dtype)\n res4 = torch._prims.rng_prims.run_with_rng_state(rng_state2, torch.rand, 10, device=device, dtype=dtype)\n\n self.assertEqual(ref1, res1)\n self.assertEqual(ref2, res2)\n self.assertEqual(ref1, res3)\n self.assertEqual(ref2, res4)\n\nclass TestPrimsBasic(TestCase):\n def test_torch_ops(self):\n r = make_tensor((2,), device='cpu', dtype=torch.float)\n self.assertEqual(torch.ops.prims.sin(r), torch.sin(r))\n\n r = LoggingTensor(r)\n with capture_logs() as logs:\n log_input(\"input\", r)\n prims.sin(r)\n self.assertExpectedInline('\\n'.join(logs), \"\"\"\\\n$0: f32[2] = input('input')\n$1: f32[2] = torch._ops.prims.sin.default($0)\"\"\")\n\n def test_mul_complex(self):\n prims.mul(torch.randn(2), 1 + 1j)\n\n def test_check_deprecation_warning(self):\n with self.assertWarnsRegex(DeprecationWarning, 'will be removed in the future'):\n torch._prims_common.check(True, lambda: 'message')\n\n\ninstantiate_device_type_tests(TestPrims, globals())\n\n\nclass TestRefs(TestCase):\n @dtypes(torch.float32)\n def test_constant_pad_nd_memory_format(self, device, dtype):\n # Test memory format is preserved in unambiguous cases\n for mf, ndim in (\n (torch.channels_last, 4),\n (torch.contiguous_format, 4),\n (torch.channels_last_3d, 5),\n (torch.contiguous_format, 5),\n ):\n a = torch.zeros([2] * ndim).to(memory_format=mf)\n res = refs.constant_pad_nd(a, pad=[1] * (2 * ndim))\n self.assertTrue(res.is_contiguous(memory_format=mf))\n\n # Ambiguous cases\n\n # is_channels_last_ and is_contiguous_, results in channels_last output\n a = torch.empty_strided((2, 1, 2, 2), stride=(4, 1, 2, 1))\n self.assertTrue(a.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(a.is_contiguous())\n actual = refs.constant_pad_nd(a, pad=[1] * 8)\n expect = torch.constant_pad_nd(a, pad=[1] * 8)\n self.assertEqual(actual.stride(), expect.stride())\n self.assertTrue(actual.is_contiguous(memory_format=torch.channels_last))\n\n # is_channels_last_contiguous_ but not is_channels_last_, results in\n # contiguous output\n a = torch.empty_strided((2, 1, 2, 2), stride=(4, 4, 2, 1))\n self.assertTrue(a.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(a.is_contiguous())\n actual = refs.constant_pad_nd(a, pad=[1] * 8)\n expect = torch.constant_pad_nd(a, pad=[1] * 8)\n self.assertEqual(actual.stride(), expect.stride())\n self.assertTrue(actual.is_contiguous())\n\n def test_unbind(self):\n # If unbind returns empty tuple, it breaks some assumptions in some backward tests in test_ops.py.\n # So can't put this test into common_methods_invocations.py.\n a = torch.rand([3, 0, 4])\n actual = refs.unbind(a, 1)\n expect = torch.unbind(a, 1)\n self.assertEqual(actual, expect)\n\n def test_logspace_with_complex_input(self):\n actual = refs.logspace(2, 10 + 5j, steps=5)\n expect = torch.logspace(2, 10 + 5j, steps=5)\n self.assertEqual(actual, expect)\n\n def test_linspace_with_complex_input(self):\n actual = refs.linspace(2, 10 + 5j, steps=5)\n expect = torch.linspace(2, 10 + 5j, steps=5)\n self.assertEqual(actual, expect)\n\n\ninstantiate_device_type_tests(TestRefs, globals())\n\n\nclass TestDecomp(TestCase):\n @onlyCUDA\n @dtypes(torch.float16, torch.float32)\n def test_decomposition_type_promotion_nvprim_amp(self, device, dtype):\n x = torch.rand(5, device=device).to(dtype)\n y = torch.rand(5, device=device).to(dtype)\n\n from torch._prims.context import TorchRefsNvfuserCapabilityMode, _is_func_unsupported_nvfuser\n from torch.fx.experimental.proxy_tensor import make_fx\n op = torch.ops.aten.leaky_relu_backward.default\n op_decomp = torch._decomp.decomposition_table.get(op)\n\n def fn0(*arg):\n return _is_func_unsupported_nvfuser(TorchRefsNvfuserCapabilityMode(), op, op_decomp, arg, {})\n\n def fn1(x):\n x = x * 2\n x = x @ x\n x = x * 2\n return x\n\n self.assertFalse(fn0(x, y, 0.3, False))\n with TorchRefsNvfuserCapabilityMode():\n\n # Autocast context has C++ level ATen calls that are hidden from\n # TorchRefsNvfuserCapabilityMode that works only on Python level.\n # The first call to make_fx records autocast C++ calls directly and\n # doesn't have the chance to translate to nvprims. After the first\n # call, \"gm\" contains explicit calls to torch.ops.aten and nothing\n # is hidden, so the second call to make_fx actually translates\n # recorded autocast dtype conversions to nvprims.\n with torch.autocast(\"cuda\"):\n gm = make_fx(fn1)(x)\n gm = make_fx(gm)(x)\n call_function_nodes = list(filter(lambda n: n.op == \"call_function\", gm.graph.nodes))\n includes_aten_to_copy = any(\n torch.ops.aten._to_copy.default == node.target\n for node in call_function_nodes\n )\n self.assertFalse(includes_aten_to_copy)\n\n @onlyCUDA\n @dtypes(torch.float16, torch.float32)\n def test_masked_fill_decomposition_under_nvprim_context(self, device, dtype):\n # Test masked_fill decomposition doesn't trigger data-dependent control flow\n # on TorchRefsNvfuser speculative lowering.\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsNvfuserCapabilityMode\n\n x = torch.empty(2, 3, device=device).to(dtype=dtype)\n mask = torch.ones_like(x).bool()\n y = torch.tensor(0.3) # cpu scalar tensor\n\n def func(x, mask, y):\n return torch.masked_fill(x, mask, y)\n\n # mimics real use-case for TorchRefsNvfuserCapabilityMode context\n gm = make_fx(func, decomposition_table={})(x, mask, y)\n\n with warnings.catch_warnings(record=True) as caught:\n with TorchRefsNvfuserCapabilityMode():\n gm = make_fx(gm)(x, mask, y)\n # masked_fill decomposition fails inside `get_isolated_graphmodule`\n self.assertFalse(any(GET_ISOLATED_GRAPHMODULE_ERROR in str(w.message) for w in caught))\n\n @ops([op for op in op_db if op.supports_varargs], dtypes=OpDTypes.any_one)\n def test_decomposition_method_vararg(self, device, dtype, op):\n # some ops have vararg variants for the methods. this tests it.\n # we don't have tests for varargs in OpInfo, so we need to\n # improvise this a bit.\n # The rule for general functions (the special cases being e.g. tensor\n # creation functions taking shapes) is that things can be vararg\n # if the method has only one argument of sequence type.\n # e.g. permute can be called on a 3d tensor t as t.permute(0, 2, 1)\n # as well as t.permute([0, 2, 1])\n # when the signature in native_functions.yaml\n # shows arguments Tensor self, IntList dims\n # we might need to adjust things for the factory functions or\n # have them do their own test\n from torch.fx.experimental.proxy_tensor import make_fx\n from torch._prims.context import TorchRefsMode\n\n # filter out empty tuple as that cannot be the varargs\n sample_inputs = (si for si in op.sample_inputs(device, dtype, requires_grad=False)\n if (si.args[-1] if si.args else si.input))\n\n # just run one test, we assume there is a suitable one in the tests\n sample_input = next(sample_inputs)\n all_args = (sample_input.input,) + sample_input.args\n\n # in general, the methods take varargs and not (always?) the function\n # variants, the exception to this rule are the factory functions\n if op.is_factory_function:\n fn = op.op\n else:\n fn = op.method_variant\n with TorchRefsMode():\n gm = make_fx(fn)(*all_args[:-1], *all_args[-1])\n\n # in case we add random factory functions\n torch.manual_seed(1)\n res = gm(*all_args[:-1], *all_args[-1])\n torch.manual_seed(1)\n expected = fn(*all_args[:-1], *all_args[-1])\n self.assertEqual(res, expected)\n\n\ninstantiate_device_type_tests(TestDecomp, globals())\n\n\nif __name__ == \"__main__\":\n run_tests()\n","sub_path":"test/test_prims.py","file_name":"test_prims.py","file_ext":"py","file_size_in_byte":52399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"208172990","text":"# coding: utf-8\ntry:\n import sys\n reload(sys)\n sys.setdefaultencoding('utf-8')\nexcept NameError:\n pass\nclass Log:\n @staticmethod\n def common_log(info, coding=\"utf-8\"):\n if type(info) is not str:\n info = str(info)\n print(\"--**--\"+str(info.encode(coding))+\"--**--\")\n\n @staticmethod\n def mark_log(info, coding=\"utf-8\"):\n if type(info) is not str:\n info = str(info)\n print(\"\\n\"+\"-----\"+str(info.encode(coding))+\"-----\")\n\n @staticmethod\n def error_log(info, coding=\"utf-8\"):\n if type(info) is not str:\n info = str(info)\n print(\"--!error!--\" + str(info.encode(coding)) + \"--!error!--\")\n\n @staticmethod\n def success_log(info, coding=\"utf-8\"):\n if type(info) is not str:\n info = str(info)\n print(\"--#^V^#--\" + str(info.encode(coding)) + \"--#^V^#--\")","sub_path":"utils/common/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545022096","text":"#!/usr/bin/env python3\nimport calendar\nimport csv\nimport ctypes\nimport itertools\nimport math\nimport operator\nimport os\nimport re\n\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta, tzinfo\nfrom pprint import pprint\nfrom timeit import default_timer\nfrom functools import reduce, partial\n\ndef call_without_ctrl_c_handler_replacement(func): # On Python 2.7 on Windows, NumPy messes up Ctrl+C handling. This function fixes it. It won't change anything on other systems.\n\ttry:\n\t\tSetConsoleCtrlHandler_body_new = b'\\xC2\\x08\\x00' if ctypes.sizeof(ctypes.c_void_p) == 4 else b'\\xC3'\n\t\ttry: SetConsoleCtrlHandler_body = (lambda kernel32: (lambda pSetConsoleCtrlHandler:\n\t\t\tkernel32.VirtualProtect(pSetConsoleCtrlHandler, ctypes.c_size_t(1), 0x40, ctypes.byref(ctypes.c_uint32(0)))\n\t\t\tand (ctypes.c_char * 3).from_address(pSetConsoleCtrlHandler.value)\n\t\t)(ctypes.cast(kernel32.SetConsoleCtrlHandler, ctypes.c_void_p)))(ctypes.windll.kernel32)\n\t\texcept: SetConsoleCtrlHandler_body = None\n\t\tif SetConsoleCtrlHandler_body:\n\t\t\tSetConsoleCtrlHandler_body_old = SetConsoleCtrlHandler_body[0:len(SetConsoleCtrlHandler_body_new)]\n\t\t\tSetConsoleCtrlHandler_body[0:len(SetConsoleCtrlHandler_body_new)] = SetConsoleCtrlHandler_body_new\n\t\ttry:\n\t\t\treturn func()\n\t\tfinally:\n\t\t\tif SetConsoleCtrlHandler_body:\n\t\t\t\tSetConsoleCtrlHandler_body[0:len(SetConsoleCtrlHandler_body_new)] = SetConsoleCtrlHandler_body_old\n\texcept ImportError as e:\n\t\tpass\n\ndef import_numpy(): import numpy, numpy.fft; return numpy\nnumpy = call_without_ctrl_c_handler_replacement(import_numpy)\nnumpy_fftpack_lite = getattr(numpy.fft, 'fftpack_lite', None)\n\ndef fftpack_lite_rfftb(buf, s, scratch=None):\n\tn = len(buf)\n\tm = (n - 1) * 2\n\ttemp = numpy.empty(m, buf.dtype) if scratch is None else scratch if scratch.dtype == buf.dtype else scratch.view(buf.dtype)\n\tnumpy.divide(buf, m, temp[:n])\n\ttemp[n:m] = 0\n\tresult = (numpy_fftpack_lite.rfftb if numpy_fftpack_lite is not None else numpy.fft.irfft)(temp[0:m], s)\n\tif numpy_fftpack_lite is None:\n\t\tresult *= s\n\treturn result\n\ndef array_lexicographical_compare(a, b, less=numpy.less, scratch=None):\n\t# 1D arrays only!\n\t# 'less' parameter is optional\n\t# Tests:\n\t# assert array_lexicographical_compare([0, 0], [1]) < 0\n\t# assert array_lexicographical_compare([0], [0, 0]) < 0\n\t# assert array_lexicographical_compare([1], [0]) > 0\n\t# assert array_lexicographical_compare([0], [0]) == 0\n\t# assert array_lexicographical_compare([0], [1]) < 0\n\t# assert array_lexicographical_compare([0, 0], [0]) > 0\n\t# assert array_lexicographical_compare([1], [0, 0]) > 0\n\tan = len(a)\n\tbn = len(b)\n\tn = bn if bn < an else an\n\tx = a[:n] if an > n else a\n\ty = b[:n] if bn > n else b\n\tif scratch is not None: scratch = scratch[:n]\n\tscratch = less(x, y, scratch) if less is not None else x < y\n\ti = scratch.argmax() if n > 0 else 0\n\tif 0 == i < n and not (a[i] < b[i]): i = n\n\tscratch = less(y, x, scratch) if less is not None else y < x\n\tj = scratch.argmax() if n > 0 else 0\n\tif 0 == j < n and not (b[j] < a[j]): j = n\n\tif i < j: c = -1\n\telif i > j: c = +1\n\telif an < bn: c = -1\n\telif an > bn: c = +1\n\telse: c = 0\n\treturn c\n\ndef fftpad(v, m, padded):\n\tvn = len(v)\n\tif vn < m or padded.dtype != v.dtype:\n\t\tpadded[vn:m] = 0\n\t\tpadded[0:vn] = v\n\t\tv = padded[:m]\n\treturn v\n\ndef fftconvolve(x, y, x_y_transform_cache=None, pad=fftpad, initialize=numpy_fftpack_lite.rffti if numpy_fftpack_lite is not None else {}.get(None), forward=numpy_fftpack_lite.rfftf if numpy_fftpack_lite is not None else numpy.fft.rfft, multiply=numpy.multiply, backward=fftpack_lite_rfftb, cache=[]):\n\tcn = max(len(x) + len(y) - 1, 0)\n\tmlog2 = cn.bit_length()\n\tm = 1 << mlog2\n\twhile len(cache) <= mlog2:\n\t\tcache.append(None)\n\tentry = cache[mlog2]\n\tif entry is None:\n\t\tcache[mlog2] = (s, padded) = (initialize(m) if initialize is not None else m, numpy.empty(m * 2, float))\n\telse:\n\t\t(s, padded) = cache[mlog2]\n\tdo_pad = partial(pad, m=m, padded=padded)\n\ta = forward(do_pad(x), s) if x_y_transform_cache is None or len(x_y_transform_cache) <= 0 or x_y_transform_cache[0] is None else x_y_transform_cache[0]\n\tb = forward(do_pad(y), s) if x_y_transform_cache is None or len(x_y_transform_cache) <= 1 or x_y_transform_cache[1] is None else x_y_transform_cache[1]\n\tif x_y_transform_cache is not None:\n\t\tif len(x_y_transform_cache) > 0: x_y_transform_cache[0] = a\n\t\tif len(x_y_transform_cache) > 1: x_y_transform_cache[1] = b\n\tc = backward(multiply(a, b, padded[:m + 2].view(complex)), s, scratch=padded)\n\treturn c[:cn]\n\ndef overlap_add_convolve(x, y, convolve, out=None, direct_convolve=numpy.convolve):\n\txn = len(x)\n\tyn = len(y)\n\tif yn < xn: (x, xn, y, yn) = (y, yn, x, xn)\n\tuse_direct = direct_convolve is not None and xn < 0x20\n\tif use_direct or 2 * xn >= yn:\n\t\tresult = (direct_convolve if use_direct else convolve)(x, y)\n\t\tif out is not None:\n\t\t\tout[:len(result)] = result\n\t\telse:\n\t\t\tout = result\n\t\treturn out\n\tzn = xn + yn - 1 if xn > 0 and yn > 0 else 0\n\tif out is None:\n\t\tout = numpy.empty(zn, float)\n\t\tassert out.__setitem__(slice(None), numpy.nan) is None or True # in debug mode, fill with NaNs to find any bugs\n\telif len(out) > zn:\n\t\tout = out[:zn]\n\tif xn > 0 and yn > 0:\n\t\tsaved_transforms = [None]\n\t\tblocksize = 1 << min((xn - 1).bit_length(), (yn - 1).bit_length())\n\t\tsaved = None\n\t\ti = 0\n\t\twhile i < yn:\n\t\t\tj = i + blocksize\n\t\t\tif j > yn:\n\t\t\t\tj = yn\n\t\t\t\tsaved_transforms[0] = None\n\t\t\tz = convolve(x[0:xn], y[i:j], saved_transforms)\n\t\t\tout[i : j + xn - 1] = z\n\t\t\tif saved is not None:\n\t\t\t\tout[i : i + len(saved)] += saved\n\t\t\tsaved = z[blocksize : blocksize * 2 -1]\n\t\t\ti = j\n\treturn out\n\ndef overlap_add_test(ntests):\n\timport numpy.random\n\tfor _ in range(ntests):\n\t\ta = numpy.asarray(numpy.random.randint(0, 8, numpy.random.randint(1, 16)), float)\n\t\tb = numpy.asarray(numpy.random.randint(0, 8, numpy.random.randint(1, 16)), float)\n\t\tassert numpy.allclose(overlap_add_convolve(a, b, fftconvolve), numpy.convolve(a, b))\n\ndef cumsum_via_reverse_sum(arr): # Better cumsum for right-skewed distributions\n\tresult = numpy.cumsum(arr[::-1])[::-1]\n\ts = result[0]\n\tresult = s - result\n\tresult = numpy.roll(result, -1)\n\tresult[-1] = s\n\treturn result\n\n# inverse of cumsum()\ndef diff(arr, has_prepend='prepend' in (lambda code: code.co_varnames[:code.co_argcount])(numpy.diff.__code__)):\n\tif has_prepend: # Only available in newer versions of NumPy\n\t\tresult = numpy.diff(arr, prepend=0)\n\telse:\n\t\tresult = numpy.diff(numpy.pad(arr, [(1, 0)], 'constant', constant_values=0))\n\treturn result\n\nclass FixedTimeZone(tzinfo):\n\tdef __init__(self, *args, **kwargs):\n\t\tself._utcoffset = kwargs.pop('utcoffset', None)\n\t\tself._dst = kwargs.pop('dst', None)\n\t\tself._name = kwargs.pop('name', None)\n\t\tsuper(FixedTimeZone, self).__init__(*args, **kwargs)\n\tdef utcoffset(self, dt): return self._utcoffset\n\tdef dst(self, dt): return self._dst\n\tdef tzname(self, dt): return self._name\n\n\ndef datetime_parse_iso8601(s, keep_as_tuple=False, timezone_cache=None, pattern_match=re.compile(\"^(\\\\d\\\\d\\\\d\\\\d)-(\\\\d\\\\d?)-(\\\\d\\\\d?)T(\\\\d\\\\d?):(\\\\d\\\\d?):(\\\\d\\\\d?)(?:\\\\.(\\\\d*))?(?:Z?|([+\\\\-]?\\\\d\\\\d?)(?::?(\\\\d\\\\d))?)$\").match):\n\tint_ = int\n\tm = pattern_match(s)\n\tif m is None: raise ValueError(\"Could not parse date/time: \" + repr(s))\n\tg = m.groups()\n\tif g[7] is None:\n\t\ttimezone = None\n\telse:\n\t\ttz0 = g[7]\n\t\ttz1 = g[8]\n\t\ttz_min = int(tz1)\n\t\tis_utc = False\n\t\ttz_key = (int(tz0), tz_min) if tz_min else tz0\n\t\ttimezone = timezone_cache.get(tz_key) if timezone_cache is not None else None\n\t\tif timezone is None:\n\t\t\ttimezone = FixedTimeZone(utcoffset=timedelta(hours=int_(tz0), minutes=0 if is_utc else tz_min))\n\t\t\tif timezone_cache is not None:\n\t\t\t\ttimezone_cache[tz_key] = timezone\n\targs = (int_(g[0]), int_(g[1]), int_(g[2]), int_(g[3]), int_(g[4]), int_(g[5]), int_(g[6].ljust(6, \"0\")[:6]) if g[6] is not None else 0, timezone)\n\treturn datetime(*args) if not keep_as_tuple else args\n\ndef epochtime(tuple_with_timezone):\n\tresult = calendar.timegm(tuple_with_timezone)\n\tif len(tuple_with_timezone) > 7 and tuple_with_timezone[7] is not None:\n\t\tresult += tuple_with_timezone[7].utcoffset(tuple_with_timezone)\n\treturn result\n\ndef read_header_line(iterator):\n\tquoted = False\n\tremaining = None\n\tso_far = []\n\tstop = False\n\twhile not stop:\n\t\ttry: line = iterator.readline()\n\t\texcept StopIteration: break\n\t\tto_append = line\n\t\tif remaining is None: remaining = line[:0]\n\t\tfor i, c in enumerate(line):\n\t\t\tif c == '\"': quoted = not quoted\n\t\t\telif c in '\\r\\n':\n\t\t\t\tif not quoted:\n\t\t\t\t\tif line[i:i+1] == '\\r': i += 1\n\t\t\t\t\tif line[i:i+1] == '\\n': i += 1\n\t\t\t\t\tto_append = line[:i]\n\t\t\t\t\tremaining = line[i:]\n\t\t\t\t\tstop = True\n\t\t\t\t\tbreak\n\t\tso_far.append(to_append)\n\treturn (so_far[0][:0].join(so_far) if len(so_far) > 0 else '', remaining)\n\ndef make_csv_reader(infile):\n\t(first_line, after_first_line) = read_header_line(infile)\n\tprefix = first_line + after_first_line + infile.read(1 << 12)\n\tdialect = csv.Sniffer().sniff(prefix)\n\tif dialect.escapechar is None and not dialect.doublequote: dialect.doublequote = True\n\tcontent = prefix + infile.read()\n\tresult = None\n\tif \"\\\"\" not in content:\n\t\tlines = content.splitlines()\n\t\tresult = map(lambda line: line.split(\",\"), lines)\n\telse:\n\t\tresult = csv.reader(content.splitlines(True), dialect)\n\treturn result\n\nTicket_dtype = [('user_id', ' 0\n\t\t\tnonzeros = numpy.append(nonzeros, 0)\n\t\tcumulative_complement = numpy.cumsum(distribution)\n\t\tcumulative_complement = numpy.subtract(1, cumulative_complement, cumulative_complement)\n\t\tjnonzero = nonzeros[-1].tolist() + 1 if len(nonzeros) > 0 else 0\n\t\tinonzero = nonzeros[0].tolist() if len(nonzeros) > 0 else jnonzero\n\t\tdistribution.flags.writeable = False\n\t\tcumulative_complement.flags.writeable = False\n\t\tself.distribution = distribution[inonzero:jnonzero]\n\t\tself.cumulative_complement = cumulative_complement[inonzero:jnonzero]\n\t\tself.begin = begin + inonzero\n\t\tself.end = self.begin + (jnonzero - inonzero)\n\t@staticmethod\n\tdef pad_shifted_array(arr, arr_begin, i, j, lval=0, rval=0, out=None):\n\t\tarr_end = arr_begin + len(arr)\n\t\tif i is None: i = arr_begin\n\t\tif j is None: j = arr_end\n\t\tv = arr[max(i - arr_begin, 0) : max(j - arr_begin, 0)]\n\t\tn = len(v)\n\t\tlpad = max(arr_begin - i, 0)\n\t\trpad = max(j - arr_end, 0)\n\t\tif out is None:\n\t\t\tout = numpy.pad(v, [(lpad, rpad)], 'constant', constant_values=[(lval, rval)])\n\t\telse:\n\t\t\tbegin = lpad + n\n\t\t\tout[0 : lpad] = lval\n\t\t\tout[lpad : begin] = v\n\t\t\tout[begin : begin + rpad] = rval\n\t\treturn out\n\tdef pdf_range(self, i=None, j=None, out=None):\n\t\tarr = self.distribution\n\t\treturn Distribution.pad_shifted_array(self.distribution, self.begin, i, j, 0, 0, out)\n\tdef cdf_complement_range(self, i=None, j=None, out=None):\n\t\tarr = self.cumulative_complement\n\t\treturn Distribution.pad_shifted_array(arr, self.begin, i, j, 1, arr[-1], out)\n\t@staticmethod\n\tdef chop(arr, cutoff):\n\t\tto_chop = arr <= cutoff\n\t\tdenom = 1 - arr[to_chop].sum()\n\t\tarr[to_chop] = 0\n\t\tarr /= denom\n\tdef plus(self, other, cutoff=None):\n\t\tassert other is not self\n\t\tdist = overlap_add_convolve(self.distribution, other.distribution, fftconvolve)\n\t\tif cutoff is not None: self.chop(dist, cutoff)\n\t\treturn self.__class__(dist, self.begin + other.begin)\n\tdef __repr__(self):\n\t\tif self.begin >= 0 and (len(self.distribution) < 0x10 or len(numpy.flatnonzero(self.distribution)) >= len(self.distribution) // 2):\n\t\t\treturn repr([0.0] * self.begin + numpy.asarray(self.distribution).tolist())\n\t\treturn repr(dict(filter(lambda pair: pair[1] != 0, map(lambda k, v: (k, v), range(self.begin, self.end), numpy.asarray(self.distribution).tolist())))) if True else \"{begin: %s, distribution: %s}\" % (repr(self.begin), repr(self.distribution))\n\tdef __hash__(self):\n\t\tif self._hash is None:\n\t\t\tself._hash = hash((self.begin, self.distribution.tobytes()))\n\t\treturn self._hash\n\tdef __eq__(self, other): return self.begin == other.begin and numpy.array_equal(self.distribution, other.distribution)\n\tdef __lt__(self, other):\n\t\treturn self.begin < other.begin or not (other.begin < self.begin) and array_lexicographical_compare(self.distribution, other.distribution) < 0\n\nclass WaitTimePredictor(object):\n\tPROBABILITY_MAGNITUDE_CUTOFF = 1E-8\n\tDURATION_PERCENTILE_CUTOFF = 100 * (1 - 1.0 / 2000) # Cut off data above this percentile\n\tMAX_SPLIT_DAY_DURATION_SECONDS = 0 # Max duration for tickets spanning different days (optional)\n\tmin_duration_seconds = 35 # Min duration for any ticket\n\tmax_duration_seconds = 16 * 60 * 60 # Max duration for any ticket\n\tbin_size = 1 # number of seconds to bin together\n\tbin_smoothing = (lambda a: numpy.asarray(a, float) / sum(a))([1]) # smoothing convolution filter (normalized)\n\ttickets = None\n\tscratch = None\n\tdef __init__(self, **kwargs):\n\t\tfor key, value in kwargs.items():\n\t\t\tif not hasattr(WaitTimePredictor, key):\n\t\t\t\traise TypeError(\"invalid keyword argumen: \" + str(key))\n\t\t\tsetattr(self, key, value)\n\tdef load(self, csv_reader):\n\t\tif self.tickets is None:\n\t\t\tself.tickets = []\n\t\theaders = None\n\t\trows = None\n\t\tfor row in csv_reader:\n\t\t\tif headers is None:\n\t\t\t\tif 'event_type' not in row: break\n\t\t\t\theaders = dict(map(lambda pair: (pair[1], pair[0]), enumerate(row)))\n\t\t\t\trows = []\n\t\t\telse:\n\t\t\t\trows.append(row)\n\t\tif rows is not None:\n\t\t\tid_header = headers['id']\n\t\t\tuser_id_header = headers['user_id']\n\t\t\tticket_id_header = headers['ticket_id']\n\t\t\tevent_type_header = headers['event_type']\n\t\t\ttime_header = headers['time']\n\t\t\tif False: rows.sort(key=lambda row: (row[time_header], row[id_header]))\n\t\t\ttickets_assigned = {}\n\t\t\ttimezone_cache= {}\n\t\t\tfor row in rows:\n\t\t\t\tevent_type = row[event_type_header]\n\t\t\t\tif event_type == 'assign':\n\t\t\t\t\ttickets_assigned[row[ticket_id_header]] = row\n\t\t\t\telif event_type == 'unassign':\n\t\t\t\t\ttickets_assigned.pop(row[ticket_id_header], None)\n\t\t\t\telif event_type == 'resolve':\n\t\t\t\t\tprev_row = tickets_assigned.pop(row[ticket_id_header], None)\n\t\t\t\t\tif prev_row is not None:\n\t\t\t\t\t\ttassign = datetime_parse_iso8601(prev_row[time_header], True, timezone_cache)\n\t\t\t\t\t\ttresolve = datetime_parse_iso8601(row[time_header], True, timezone_cache)\n\t\t\t\t\t\tsame_hour = tresolve[0] == tassign[0] and tresolve[1] == tassign[1] and tresolve[2] == tassign[2] and tresolve[3] == tassign[3]\n\t\t\t\t\t\tif self.MAX_SPLIT_DAY_DURATION_SECONDS is None or same_hour:\n\t\t\t\t\t\t\tif same_hour:\n\t\t\t\t\t\t\t\tduration = int(tresolve[4] - tassign[4]) * 60 + (tresolve[5] - tassign[5])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tduration = int((datetime(*tassign) - datetime(*tresolve)).total_seconds())\n\t\t\t\t\t\t\tsplit_days = 0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttassign_epoch = epochtime(tassign)\n\t\t\t\t\t\t\ttresolve_epoch = epochtime(tresolve)\n\t\t\t\t\t\t\tduration = int(tresolve_epoch - tassign_epoch)\n\t\t\t\t\t\t\tsplit_days = (datetime.fromtimestamp(tresolve_epoch).date() - datetime.fromtimestamp(tassign_epoch).date()).days\n\t\t\t\t\t\tif duration <= 3600 and duration > 5:\n\t\t\t\t\t\t\tself.tickets.append((row[user_id_header], duration, split_days))\n\t\t\t\telif event_type == 'create' or event_type == 'delete' or event_type == 'describe' or event_type == 'update_location':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Unknown event_type: \" + event_type)\n\tdef compute_wait_times(self):\n\t\tif self.tickets is None:\n\t\t\traise ValueError(\"no ticket information loaded; can't compute distributions\")\n\t\tfiltered = numpy.asarray(self.tickets, dtype=Ticket_dtype)\n\t\tfiltered = filtered[~(filtered['duration'] < self.min_duration_seconds)]\n\t\tfiltered = filtered[~(filtered['duration'] > self.max_duration_seconds)]\n\t\tif self.MAX_SPLIT_DAY_DURATION_SECONDS is not None: filtered = filtered[~((filtered['split_days'] > 0) & (filtered['duration'] > self.MAX_SPLIT_DAY_DURATION_SECONDS))]\n\t\tfiltered = filtered[~(filtered['duration'] > numpy.percentile(filtered['duration'], self.DURATION_PERCENTILE_CUTOFF))]\n\t\treturn filtered['duration']\n\tdef compute_wait_time_distribution(self, durations):\n\t\t# sys.stdout.write(\"Median: %.0f minutes\\n\" % (numpy.median(durations).tolist() / 60,))\n\t\t# sys.stdout.write(\"Mean: %.0f minutes\\n\" % (numpy.mean(durations).tolist() / 60,))\n\t\t# sys.stdout.write(\"Std dev: %.0f minutes\\n\" % (numpy.std(durations).tolist() / 60,))\n\t\t(dist, bins) = numpy.histogram(durations, numpy.arange(0, durations.max() + self.bin_size, self.bin_size), density=True)\n\t\tdist *= self.bin_size\n\t\tdist = numpy.convolve(dist, self.bin_smoothing, 'same')\n\t\tto_chop = dist <= self.PROBABILITY_MAGNITUDE_CUTOFF\n\t\tdenom = 1 - dist[to_chop].sum()\n\t\tdist[to_chop] = 0\n\t\tdist[:] /= denom\n\t\tnonzeros = dist.nonzero()[0]\n\t\treturn dist[:nonzeros.max() + 1 if len(nonzeros) > 0 else 0]\n\tdef _calculate_help_time_probabilities(self, availabilities):\n\t\t# We want the probability that the i'th TA will help at time ti.\n\t\t# Probability of being helped by i'th TA at time ti is equal to\n\t\t# the probability of NOT being helped before time ti\n\t\t# times the probability that the i'th TA becomes ready at time ti\n\t\t# divided by the total probability of ANY TAs becoming ready at time ti\n\t\t# Also note: CDF(min_k(x[k])) = 1 - prod_k(1 - CDF(x[k]))\n\t\t# Sample table to work through:\n\t\t# | 0 | 1 | 2\n\t\t# ---+----+-----+----\n\t\t# A | 0 | 1 | 0\n\t\t# B | 0 | 1/3 | 2/3\n\t\t# C | 0 | 1/4 | 3/4\n\t\t# Another example:\n\t\t# A: [0, 3/4, 1/4]\n\t\t# B: [0, 2/3, 1/3]\n\t\ttminbegin = min(map(lambda v: v.begin, availabilities))\n\t\ttmaxend = max(map(lambda v: v.end, availabilities))\n\t\tscratch_shape = (2, len(availabilities), tmaxend - tminbegin)\n\t\tif self.scratch is not None and all(scratch_shape >= self.scratch):\n\t\t\tscratch = self.scratch\n\t\t\tself.scratch = None\n\t\telse:\n\t\t\tscratch = numpy.empty(scratch_shape, float)\n\t\t\tassert scratch.__setitem__(slice(None), numpy.nan) is None or True # in debug mode, fill with NaNs to find any bugs\n\t\t(pdfs, cdf_complements) = (a, b) = scratch[tuple(map(slice, scratch_shape))]\n\t\t# WARNING: The code below is tricky! The buffers are re-used! Be VERY careful not to trash buffers that are used afterward!\n\t\tfor i in range(len(pdfs)):\n\t\t\tavailabilities[i].pdf_range(tminbegin, tmaxend, pdfs[i])\n\t\tfor i in range(len(cdf_complements)):\n\t\t\tavailabilities[i].cdf_complement_range(tminbegin, tmaxend, cdf_complements[i])\n\t\twith numpy.errstate(divide='ignore', invalid='ignore'):\n\t\t\tmin_cdf = numpy.subtract(1, numpy.prod(cdf_complements, 0, out=b[0, :]), out=b[0, :])\n\t\t\tmin_pdf = diff(min_cdf)\n\t\t\tprob_help = numpy.nan_to_num(numpy.multiply(min_pdf[numpy.newaxis, :],\n\t\t\t\tnumpy.divide(pdfs, pdfs.sum(axis=0, out=None, keepdims=True), out=a),\n\t\t\t\tout=a), False)\n\t\treturn (tminbegin, prob_help.sum(0), prob_help.sum(1), tmaxend)\n\t# TODO: Verify by simulation\n\tdef get_wait_itimes(self, dist, instructor_start_times, queue_depth):\n\t\tABSTOL_DECIMAL_PLACES = 12\n\t\thelp_time = Distribution(dist, 0)\n\t\tstart_time_discretized_distributions = list(map(lambda tistart: Distribution(numpy.asarray([1], float), tistart), numpy.divide(instructor_start_times, self.bin_size).astype(int).tolist()))\n\t\tmemo = {} # set to None to disable memoization (for correctness verification)\n\t\tdef helper(*args):\n\t\t\t(nq, weight, ta_start_times) = args\n\t\t\tresult = memo.get(args) if memo is not None else None\n\t\t\t#print(result)\n\t\t\tif result is None:\n\t\t\t\t# Compute the wait time for the current state\n\n\t\t\t\t(tminbegin, time_help_probs, instructor_help_probs, tmaxend) = self._calculate_help_time_probabilities(ta_start_times)\n\t\t\t\tassert numpy.allclose(time_help_probs.sum(), 1, atol=1E-4), \"probabilities don't add up to 1; they add up to \" + repr(time_help_probs.sum().tolist())\n\t\t\t\twait_itimes = Distribution(time_help_probs, tminbegin)\n\t\t\t\tresult = []\n\t\t\t\tif nq < queue_depth:\n\t\t\t\t\tresult.append((nq, 1.0, wait_itimes, list(ta_start_times)))\n\t\t\t\t# Compute the next possible queue state\n\t\t\t\tif nq < queue_depth - 1:\n\t\t\t\t\tprev_subargs = None; prev_subresults = None; prev_wi = None\n\t\t\t\t\tfor i in range(len(instructor_start_times)):\n\t\t\t\t\t\twi = instructor_help_probs[i].tolist()\n\t\t\t\t\t\tta_end_times = ta_start_times[:i] + (ta_start_times[i].plus(help_time, self.PROBABILITY_MAGNITUDE_CUTOFF),) + ta_start_times[i + 1:]\n\t\t\t\t\t\tta_end_times = sorted(ta_end_times) # Because TAs are indistinguishable (order doesn't matter) -- this lets us memoize (and maybe prune to approximate later)\n\t\t\t\t\t\tsubargs = (nq + 1, 1.0, tuple(ta_end_times))\n\t\t\t\t\t\tif memo is not None and prev_subargs == subargs:\n\t\t\t\t\t\t\tcached = memo[subargs]\n\t\t\t\t\t\t\tresult[len(result) - len(prev_subresults):] = map(lambda item, prev: item[:1] + (prev[1] + wi * item[1],) + item[1+1:], prev_subresults, result[len(result) - len(prev_subresults):])\n\t\t\t\t\t\t\tprev_subresults = prev_subresults # To denote that it hasn't changed\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprev_subresults = list(helper(*subargs))\n\t\t\t\t\t\t\tresult.extend(map(lambda item: item[:1] + (wi * item[1],) + item[1+1:], prev_subresults))\n\t\t\t\t\t\tprev_subargs = subargs; prev_wi = wi\n\t\t\t\tresult = list(result)\n\t\t\t\tresult[:] = map(lambda item: item[:1] + (item[1] * weight,) + item[1+1:], result)\n\t\t\t\tresult[:] = filter(lambda item: item[1] >= self.PROBABILITY_MAGNITUDE_CUTOFF, result)\n\t\t\t\tif memo is not None:\n\t\t\t\t\tmemo[args] = tuple(result)\n\t\t\treturn result\n\n\t\treturn helper(0, 1.0, tuple(start_time_discretized_distributions))\n\ndef sparse_to_dense_pmf(weights, normalize=False):\n\ti = min(min(weights.keys()), 0)\n\tj = max(weights.keys()) + 1\n\tif i < 0: raise ValueError(\"start index cannot be below zero\")\n\tw = numpy.zeros(j - i, float)\n\tfor k, v in weights.items():\n\t\tw[k] = v\n\tif normalize:\n\t\tw /= numpy.sum(w)\n\treturn w\n\ndef avgWaitTimeList(*args):\n\tif (args[2] == 0) : return [],[]\n\tinstructor_start_times = [0] * (int(args[2]))\n\tqueue_depth = len(instructor_start_times) + int(args[1])\n\tpercentile = 0.50\n\tbin_size = 1\n\tDEBUG = False\n\tlogging = DEBUG\n\tpredictor = WaitTimePredictor(bin_size=bin_size)\n\tif DEBUG:\n\t\tsamples_to_generate = 10000\n\t\twait_time_dist = numpy.asarray(sparse_to_dense_pmf({1 * 60: 0.5, 3 * 60: 0.5}, True), float)\n\t\twaiting_times = numpy.random.choice(len(wait_time_dist), samples_to_generate, True, y / numpy.sum(wait_time_dist))\n\telse:\n\t\tnloaded = 0\n\t\ttstart = default_timer()\n\t\tfor filename in args[0:1]:\n\t\t\tif os.path.splitext(filename)[1].lower() == \".csv\":\n\t\t\t\twith open(filename, \"r\") as infile:\n\t\t\t\t\tpredictor.load(make_csv_reader(infile))\n\t\t\t\t\tnloaded += 1\n\t\ttend = default_timer()\n\t\t# if nloaded > 0:\n\t\t# \t# sys.stderr.write(\"Loading data took %.2f seconds\\n\" % (tend - tstart,))\n\t\twaiting_times = predictor.compute_wait_times()\n\t\twait_time_dist = predictor.compute_wait_time_distribution(waiting_times)\n\t# sys.stdout.write(\"Resolution (bin size): %s second(s)\\n\" % (bin_size,))\n\t# if queue_depth <= len(instructor_start_times): sys.stderr.write(\"NOTE: Fewer people on queue than available instructors; everyone will be serviced immediately.\\n\")\n\ttstart = default_timer()\n\tqueue_wait_itimes = []\n\tfor (depth, weight, wait_itimes, ta_start_itimes) in predictor.get_wait_itimes(wait_time_dist, instructor_start_times, queue_depth):\n\t\twhile depth >= len(queue_wait_itimes): queue_wait_itimes.append([])\n\t\tqueue_wait_itimes[depth].append((weight, wait_itimes))\n\t\t# if logging: sys.stdout.write(\"%s\\n\" % (\"\\t\".join(map(str, [weight, wait_itimes, ta_start_itimes])),))\n\ttend = default_timer()\n\t# if tend - tstart >= 0.05: sys.stderr.write(\"Calculating queue wait times took %.2f seconds\\n\" % (tend - tstart,))\n\t# sys.stdout.write(\"Wait times <= [%s] min(s) with %.0f%% probability for %d instructors starting at T = %s min(s)\\n\" % (\n\t# \t\", \".join(map(\n\t# \t\tlambda t: \"%.2g\" % (t / 60.0,),\n\t# \t\tmap(\n\t# \t\t\tlambda queue_k_wait_times: sum(map(\n\t# \t\t\t\tlambda info: info[0] * bin_size * numpy.searchsorted(numpy.subtract(1, info[1].cdf_complement_range(0)), percentile, 'left'),\n\t# \t\t\t\tqueue_k_wait_times)),\n\t# \t\t\tqueue_wait_itimes))),\n\t# \tpercentile * 100,\n\t# \tlen(instructor_start_times),\n\t# \tlist(map(lambda t: t / 60.0, instructor_start_times))\n\t# ))\n\tsmoothing_window = numpy.median(waiting_times) / 5\n\t# sys.stdout.write(\"Plotting with a smoothing window of %s; please wait as smoothing may be slow...\\n\" % (smoothing_window,))\n\tlw = 96.0 / 120\n\tusetex = False\n\t(rows, cols) = (2, 1)\n\t# import matplotlib, matplotlib.pyplot\n\t# matplotlib.rc('text', usetex=usetex)\n\t# matplotlib.rc('font', family='serif', serif=['cmr10' if usetex else 'Latin Modern Roman'], size=11)\n\t# pyplot = matplotlib.pyplot\n\t# from matplotlib import pyplot\n\t# fig = pyplot.figure(2, (9.0 * cols, 3.5 * rows), 120, None, None, False) or matplotlib.figure.Figure()\n\txmax = None\n\t# if True:\n\t# \txcutoff = 60 * 60\n\t# \timport scipy.stats\n\t# \tdist = scipy.stats.gengamma\n\t# \tx = numpy.arange(len(wait_time_dist))[:xcutoff]\n\t# \tx_displayed = x / 60.0\n\t# \txmin = 0.0\n\t# \ty0 = dist.pdf(x, *dist.fit(waiting_times[waiting_times <= xcutoff], floc=xmin))\n\t# \ty1 = scipy.stats.gaussian_kde(waiting_times, smoothing_window / float(len(x)))(x); y1 /= numpy.sum(y1)\n\t# \ty2 = wait_time_dist[:xcutoff]; y2 = y2 / numpy.sum(y2)\n\t\t# axes = fig.add_subplot(rows, cols, 1) or matplotlib.axes.Axes()\n\t\t# artists_and_legends = []\n\t\t# axes.set_title(\"Waiting time of a single person (minutes)\")\n\t\t# # axes.grid(True, which='major')\n\t\t# artists_and_legends.append((axes.plot(x_displayed, y0, lw=lw, color='black')[0], \"Estimated %s distribution (ideal)\" % (dist.name,)))\n\t\t# artists_and_legends.append((axes.fill_between(x_displayed, y1, lw=lw), \"Smoothed empirical distribution\"))\n\t\t# artists_and_legends.append((axes.fill_between(x_displayed, y2, lw=lw), \"Empirical distribution\"))\n\t\t# axes.legend(*zip(*artists_and_legends[::-1]))\n\t\t# axes.set_xlim(xmin, x_displayed.max())\n\t\t# axes.set_ylim(0, numpy.max((y0.max(), y1.max(), y2.max())))\n\t\t# xmax = axes.get_xlim()[1]\n\t\t# axes.get_xaxis().set_major_locator(matplotlib.ticker.MultipleLocator(5))\n\t\t# axes.get_xaxis().set_minor_locator(matplotlib.ticker.MultipleLocator(1))\n\tif True:\n\t\t# axes = fig.add_subplot(rows, cols, 2) or matplotlib.axes.Axes()\n\t\t# axes.set_title(\"Waiting times of the people waiting on the queue (minutes)\")\n\t\tmaxend = max(map(lambda queue_k_wait_itimes: max(map(lambda info: info[1].end, queue_k_wait_itimes)), queue_wait_itimes))\n\t\tartists_and_legends = []\n\t\tavgTimeList, stdDevList = [], []\n\t\tfor k, queue_k_wait_itimes in list(enumerate(queue_wait_itimes[len(instructor_start_times):]))[::-1]:\n\t\t\tdist = numpy.sum(list(map(lambda info: info[0] * info[1].pdf_range(0, maxend), queue_k_wait_itimes)), 0)\n\t\t\tcumdist = numpy.cumsum(dist)\n\t\t\tibegin = 0\n\t\t\tplot_cumulative = False\n\t\t\tiend = len(cumdist) - (0 if plot_cumulative else (cumdist[::-1] < (1 - 1.0 / (1 << 8))).argmax())\n\t\t\tdist_to_plot = cumdist if plot_cumulative else dist\n\t\t\tdist_to_plot = dist_to_plot[ibegin:iend]\n\t\t\tx = numpy.linspace(ibegin * bin_size, iend * bin_size, len(dist_to_plot), False)\n\t\t\tx_displayed = x / 60\n\t\t\t# artists_and_legends.append((axes.fill_between(x_displayed, dist_to_plot, lw=lw), \"Waiting time of person #%s on queue\" % (k + 1,)))\n\t\t\t# axes.axvline(x=numpy.average(x_displayed, None, dist_to_plot / dist_to_plot.sum()), lw=lw, color='black')\n\t\t\tavgTimeList.append(numpy.average(x_displayed, None, dist_to_plot / dist_to_plot.sum()))\n\t\t\tstdDevList.append(numpy.std(x_displayed, None))\n\n\t\t# axes.legend(*zip(*artists_and_legends[::-1]))\n\t\t# axes.set_xlim(0, max(xmax or 0, axes.get_xlim()[1]))\n\t\t# axes.set_ylim(0, axes.get_ylim()[1])\n\t\t# axes.get_xaxis().set_major_locator(matplotlib.ticker.MultipleLocator(5))\n\t\t# axes.get_xaxis().set_minor_locator(matplotlib.ticker.MultipleLocator(1))\n\t#fig.tight_layout()\n\t#if False: fig.savefig('waiting-time.png', transparent=True)\n\t#pyplot.show(fig)\n\tavgTimeList.reverse()\n\tstdDevList.reverse()\n\treturn avgTimeList, stdDevList\n","sub_path":"oh_queue/waitingtime.py","file_name":"waitingtime.py","file_ext":"py","file_size_in_byte":27769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"301481544","text":"import mysql.connector\nimport json\nimport datetime\nimport numpy\nimport tensorflow\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\nfrom PIL import Image\nimport os\n\nfrom models.Activity import Activity\n\ndatabase = mysql.connector.connect(\n host=\"mysql\",\n user=\"root\",\n password=\"root\",\n database=\"camaro\",\n port=\"3306\"\n)\n\ncursor = database.cursor()\n\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask import request\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/activities/recent\")\ndef helloWorld():\n userid = request.args.get('userid')\n cursor.execute(\"SELECT * FROM `activities` WHERE valid = 1 AND userid=\" + userid + \" ORDER BY id DESC LIMIT 5\")\n result = cursor.fetchall()\n\n results = []\n\n for x in result:\n results.append(x)\n\n return json.dumps(results)\n\n\n@app.route(\"/activities/all\")\ndef get_all_activities():\n userid = request.args.get('userid')\n cursor.execute(\"SELECT * FROM `activities` WHERE valid = 1 AND userid=\" + userid + \" ORDER BY timestamp DESC\")\n result = cursor.fetchall()\n\n return json.dumps(result)\n\n\n\n@app.route(\"/activities/valid/all\")\ndef get_all_valid_activities():\n cursor.execute(\"SELECT COUNT(activities.valid) FROM activities WHERE activities.valid = 1\")\n result = cursor.fetchall()\n\n return json.dumps(result)\n\n\n@app.route(\"/activities/invalid/all\")\ndef get_all_invalid_activities():\n cursor.execute(\"SELECT COUNT(activities.valid) FROM activities WHERE activities.valid = 0\")\n result = cursor.fetchall()\n\n return json.dumps(result)\n\n\n@app.route(\"/activities/monthly\")\ndef get_activities_monthly():\n userid = request.args.get('userid')\n cursor.execute(\"SELECT DATE(activities.timestamp), COUNT(activities.timestamp) AS TOT FROM activities WHERE userid=\" + userid + \" GROUP BY DATE(activities.timestamp) ORDER BY DATE(activities.timestamp) ASC\")\n result = cursor.fetchall()\n\n results = []\n\n for x in result:\n activity = Activity(1)\n activity.set_day(int(str(x[0])[8:10]))\n\n subarray = []\n subarray.append(x[1])\n subarray.append(activity.get_day())\n\n results.append(subarray)\n\n return json.dumps(results)\n\n\n@app.route(\"/activities/delete/all\")\ndef delete_all_activities():\n cursor.execute(\"DELETE FROM activities\")\n database.commit()\n\n return str(200)\n\n\n@app.route(\"/activities/percentage\")\ndef get_activities_percentage():\n userid = request.args.get('userid')\n cursor.execute(\"SELECT COUNT(activities.valid) FROM activities WHERE userid = \" + userid + \" AND valid = 1\")\n valid = cursor.fetchall()\n\n cursor.execute(\"SELECT COUNT(activities.valid) FROM activities WHERE userid = \" + userid + \" AND valid = 0\")\n invalid = cursor.fetchall()\n \n if (str(valid[0])[1] == '0' and str(invalid[0])[1] == '0'):\n data = []\n data.append(0)\n data.append(0)\n return json.dumps(data)\n\n if str(invalid[0])[1] == '0':\n data = []\n data.append(100)\n data.append(0)\n return json.dumps(data)\n\n if str(valid[0])[1] == '0':\n data = []\n data.append(0)\n data.append(100)\n return json.dumps(data)\n\n valid = valid[0][0]\n invalid = invalid[0][0]\n\n total = valid + invalid\n\n data = []\n data.append(int((valid / total) * 100))\n data.append(int((invalid / total) * 100))\n\n return (json.dumps(data))\n\n@app.route(\"/sensor/value/recent\")\ndef get_values():\n userid = request.args.get('userid')\n cursor.execute(\"SELECT sensor_values.distance, sensor_values.ldr FROM `sensor_values` WHERE userid = \" + userid + \" ORDER BY sensor_values.ID DESC LIMIT 1\")\n value = cursor.fetchall()\n\n return json.dumps(value) \n\n\n@app.route(\"/sensor/delete/all\")\ndef delete_all_sensor():\n cursor.execute(\"DELETE FROM sensor_values\")\n database.commit()\n\n return str(200)\n\n@app.route(\"/image/validate\")\ndef validate_image():\n model = tensorflow.keras.models.load_model(\"humansnfaces\")\n\n img = image.load_img('images/' + str(request.args.get('image')), target_size=(128, 128))\n\n x = image.img_to_array(img)\n x = numpy.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n predict = model.predict(x)\n\n return str(round(predict[0][0], 2))\n\n","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"171678444","text":"from mlxtend.data import loadlocal_mnist\nimport os\nimport numpy as np\nimport pickle\nimport math \n\nnp.set_printoptions(edgeitems = 8)\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef sigmoid_prime(x):\n return (1 - x) * x \n\nclass SimplyNet:\n def loadWeightAndBias(self, path):\n def loadData(path):\n input = open(path, 'rb')\n obj = pickle.load(input)\n input.close()\n return obj\n\n self.b1 = loadData(path + \"b1.pickle\")\n self.b2 = loadData(path + \"b2.pickle\")\n self.w1 = loadData(path + \"W1.pickle\")\n self.w2 = loadData(path + \"W2.pickle\")\n\n self.b1 = np.array(self.b1)\n self.b2 = np.array(self.b2)\n self.w1 = np.array(self.w1)\n self.w2 = np.array(self.w2)\n\n self.w1 = self.w1.reshape(784, 16)\n self.w2 = self.w2.reshape(16, 10)\n\n def forward(self, images):\n #1 слой\n self.h1 = np.dot(images, self.w1) #(imageCnt, 16)\n self.h1 += self.b1\n #2 слой\n self.h2 = sigmoid(self.h1) \n #3 слой\n self.h3 = np.dot(self.h2, self.w2) #(imageCnt, 10)\n self.h3 += self.b2\n #4 слой\n out = sigmoid(self.h3)\n return out\n\n def acuracy(self, images, labels):\n out = self.forward(images)\n count = 0\n for i, it in enumerate(out):\n if it.argmax() == labels[i]:\n count += 1\n\n acuracy = count / images.shape[0] * 100\n return acuracy\n\n def learning(self, images, labels, batchSize, epochCnt, lr, imgTest, lblTest):\n if batchSize > images.shape[0]:\n raise ValueError(\"batchSize > images size\")\n\n if labels.shape[0] != images.shape[0]:\n raise ValueError(\"labels.len() != images.len()\")\n\n lblArr = np.zeros((labels.shape[0], 10))\n gen = [i for i in range(labels.size)]\n lblArr[gen, labels] = 1\n #print(lblArr)\n\n self.w1 = np.random.uniform(-0.05, 0.05, (784, 16))\n self.w2 = np.random.uniform(-0.05, 0.05, (16, 10))\n self.b1 = np.zeros((16))\n self.b2 = np.zeros((10))\n\n for epoch in range(epochCnt):\n for batchBegin in range(0, images.shape[0], batchSize):\n batchEnd = min(batchBegin + batchSize, images.shape[0])\n #print(\"range\", batchBegin, batchEnd)\n batchImg = images[batchBegin : batchEnd]\n batchLbls = lblArr[batchBegin : batchEnd]\n\n o = self.forward(batchImg)\n\n e = 2 * (o - batchLbls)\n z3 = e * sigmoid_prime(o)\n z2 = np.dot(z3, self.w2.T)\n z1 = z2 * sigmoid_prime(self.h2)\n\n dW1 = np.dot(batchImg.T, z1)\n dW2 = np.dot(self.h2.T, z3)\n\n k = 1 / batchLbls.shape[0]\n\n self.w1 -= dW1 * k * lr\n self.w2 -= dW2 * k * lr\n\n acuracy = self.acuracy(imgTest, lblTest)\n print(\"epoch : \", epoch, \"acuracy = \", acuracy)\n\n\n\n \n\n\npath = os.path.dirname(os.path.abspath(__file__))\nprint(\"Текущая директория: \", path)\n\ntestImg, testLbl = loadlocal_mnist(\n images_path = path + \"/t10k-images-idx3-ubyte\", \n labels_path = path + \"/t10k-labels-idx1-ubyte\")\n\n\ntrainImg, trainLbl = loadlocal_mnist(\n images_path = path + \"/train-images-idx3-ubyte\", \n labels_path = path + \"/train-labels-idx1-ubyte\")\n\n#Нормализация\nKnorm = 1 / 255\ntestImg = testImg * Knorm\ntrainImg = trainImg * Knorm\n\npath += \"/params/\"\n\nnet = SimplyNet()\n#net.loadWeightAndBias(path)\n#acuracy = net.acuracy(trainImg, trainLbl)\n#print(\"Acuracy\", acuracy) \n\nnet.learning(trainImg, trainLbl, 60, 10, 1, testImg, testLbl)\n\n\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"607049442","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\sboon\\AppData\\Local\\Temp\\pip-install-ptdbtr91\\quarchpy\\quarchpy\\device\\device.py\n# Compiled at: 2020-03-25 05:10:07\n# Size of source mod 2**32: 16958 bytes\nimport time, sys, os, logging\nfrom quarchpy.connection import QISConnection, PYConnection, QPSConnection\nfrom quarchpy import user_interface\n\nclass quarchDevice:\n __doc__ = '\\n Allows control over a Quarch device, over a wide range of underlying connection methods. This is the core class\\n used for control of all Quarch products.\\n \\n '\n\n def __init__(self, ConString, ConType='PY', timeout='5', forceFind=0):\n \"\"\"\n Constructor for quarchDevice, allowing the connection method of the device to be specified.\n \n Parameters\n ----------\n ConString : str\n \n Connection string, specifying the underlying connection type and module identifier. The underlying\n connection must be supported both by the connection type and the target module.\n \n Example:\n USB:QTL1743 - USB connection with given part number\n USB:QTL1743-03-015 - USB connection with fully qualified serial number\n SERIAL:COM4 - Serial connection with COM port (ttyS0 for linux)\n TCP:192.168.1.55 - LAN(TCP) connection to IP address\n TELNET:QTL1079-03-004 - LAN(TELNET) connection to netBIOS name (must resolve to IP address)\n REST:192.168.1.60 - LAN(REST) connection to IP address\n \n ConType : {'PY', 'QIS', 'QPS'}\n \n Specifies the software type which runs the connection:\n PY - (Default) Connection is run via pure Python code\n \n QIS - Power modules only, connection run via QIS (Quarch Instrument Server) for easy power capture in raw formats. \n Serial is not supported. IP and port can be specified to connect to a QIS instance running at another location \"QIS:192.168.1.100:9722\"\n \n QPS - Power modules only, connection run via QPS (Quarch Power Studio) for automated power capture and analysis within thr QPS graphical environment. \n Serial is not supported. IP and port can be specified to connect to a QPS instance running at another location \"QPS:192.168.1.100:9822\"\n \n timeout : str, optional\n \n Timeout in seconds for the device to respond. \n \n forceFind : str, optional\n \n When using QIS, if a LAN device you wish to connect to is outside the local subnet, enter it's IP address here\n to force a scan and enumeration, otherwise the broadcast scan will probably not find it \n \n \"\"\"\n self.ConString = ConString\n if 'serial' not in ConString.lower():\n self.ConString = ConString.lower()\n else:\n self.ConType = ConType\n try:\n self.timeout = int(timeout)\n except:\n raise Exception('Invalid value for timeout, must be a numeric value')\n\n self.forceFind = forceFind\n if checkModuleFormat(self.ConString) == False:\n raise Exception('Module format is invalid!')\n elif self.ConType.upper() == 'PY':\n numb_colons = self.ConString.count(':')\n if numb_colons == 2:\n self.ConString = self.ConString.replace('::', ':')\n self.connectionObj = PYConnection(self.ConString)\n self.ConCommsType = self.connectionObj.ConnTypeStr\n self.connectionName = self.connectionObj.ConnTarget\n self.connectionTypeName = self.connectionObj.ConnTypeStr\n time.sleep(0.1)\n item = None\n item = self.connectionObj.connection.sendCommand('*tst?')\n if 'OK' in item:\n pass\n elif 'FAIL' in item:\n pass\n else:\n if item is not None:\n pass\n else:\n raise Exception('No module responded to *tst? command!')\n time.sleep(0.1)\n else:\n if self.ConType[:3].upper() == 'QIS':\n try:\n QIS, host, port = self.ConType.split(':')\n port = int(port)\n except:\n host = '127.0.0.1'\n port = 9722\n\n numb_colons = self.ConString.count(':')\n if numb_colons == 1:\n self.ConString = self.ConString.replace(':', '::')\n self.connectionObj = QISConnection(self.ConString, host, port)\n if self.forceFind != 0:\n self.connectionObj.qis.sendAndReceiveCmd(cmd=('$scan ' + self.forceFind))\n time.sleep(0.1)\n list = self.connectionObj.qis.getDeviceList()\n list_str = ''.join(list).lower()\n while True:\n if self.timeout == 0:\n raise ValueError('Search timeout - no Quarch module found.')\n elif self.ConString in list_str:\n break\n else:\n time.sleep(1)\n self.timeout -= 1\n list = self.connectionObj.qis.getDeviceList()\n list_str = ''.join(list).lower()\n\n self.connectionObj.qis.sendAndReceiveCmd(cmd=('$default ' + self.ConString))\n else:\n if self.ConType[:3].upper() == 'QPS':\n try:\n QIS, host, port = self.ConType.split(':')\n port = int(port)\n except:\n host = '127.0.0.1'\n port = 9822\n\n numb_colons = self.ConString.count(':')\n if numb_colons == 1:\n self.ConString = self.ConString.replace(':', '::')\n self.connectionObj = QPSConnection(host, port)\n else:\n raise ValueError('Invalid connection type. Acceptable values [PY,QIS,QPS]')\n logging.debug(os.path.basename(__file__) + ' ConString : ' + str(self.ConString) + ' ConType : ' + str(self.ConType))\n\n def sendCommand(self, CommandString, expectedResponse=True):\n \"\"\"\n Executes a text based command on the device. This is the primary way of controlling a device. The full command set available to use\n is found in the technical manual for the device.\n \n Parameters\n ----------\n CommandString : str\n\n The text based command string to send to the device\n \n Returns\n -------\n command_response : str or None\n\n The response text from the module. Multiline responses will be seperated with LF. Some commands\n do not return a response and None will be returned\n\n \"\"\"\n logging.debug(os.path.basename(__file__) + ': ' + self.ConType[:3] + ' sending command: ' + CommandString)\n if self.ConType[:3] == 'QIS':\n numb_colons = self.ConString.count(':')\n if numb_colons == 1:\n self.ConString = self.ConString.replace(':', '::')\n response = self.connectionObj.qis.sendCmd(self.ConString, CommandString)\n logging.debug(os.path.basename(__file__) + ': ' + self.ConType[:3] + ' received: ' + response)\n return response\n if self.ConType == 'PY':\n response = self.connectionObj.connection.sendCommand(CommandString)\n logging.debug(os.path.basename(__file__) + ': ' + self.ConType[:3] + ' received: ' + response)\n return response\n if self.ConType[:3] == 'QPS':\n if CommandString[0] != '$':\n CommandString = self.ConString + ' ' + CommandString\n response = self.connectionObj.qps.sendCmdVerbose(CommandString)\n logging.debug(os.path.basename(__file__) + ': ' + self.ConType[:3] + ' received: ' + response)\n return response\n\n def sendBinaryCommand(self, cmd):\n self.connectionObj.connection.Connection.SendCommand(cmd)\n return self.connectionObj.connection.Connection.BulkRead()\n\n def openConnection(self):\n \"\"\"\n Opens the connection to the module. This will be open by default on creation of quarchDevice but this\n allows re-opening if required.\n \n \"\"\"\n if self.ConType[:3] == 'QIS':\n self.connectionObj.qis.connect()\n else:\n if self.ConType == 'PY':\n del self.connectionObj\n self.connectionObj = PYConnection(self.ConString)\n return self.connectionObj\n if self.ConType[:3] == 'QPS':\n self.connectionObj.qps.connect(self.ConString)\n\n def closeConnection(self):\n \"\"\"\n Closes the connection to the module, freeing the module for other uses. This should always be called whern you are finished with a device.\n \n \"\"\"\n if self.ConType[:3] == 'QIS':\n self.connectionObj.qis.disconnect()\n else:\n if self.ConType == 'PY':\n self.connectionObj.connection.close()\n else:\n if self.ConType[:3] == 'QPS':\n self.connectionObj.qps.disconnect(self.ConString)\n\n def resetDevice(self, timeout=10):\n \"\"\"\n Issues a power-on-reset command to the device. Attempts to recover the connection to the module after reset.\n Function halts until the timeout is complete or the module is found\n \n Parameters\n ----------\n timeout : int\n \n Number of seconds to wait for the module to re-enumerate and become available\n \n Returns\n -------\n result : bool\n \n True if the device was found and reconnected, false if it was not and we timed out\n \n \"\"\"\n logging.debug(os.path.basename(__file__) + ': sending command: *rst')\n if self.ConType[:3] == 'QIS':\n numb_colons = self.ConString.count(':')\n if numb_colons == 1:\n self.ConString = self.ConString.replace(':', '::')\n retval = self.ConString\n self.connectionObj.qis.sendCmd((self.ConString), '*rst', expectedResponse=False)\n logging.debug(os.path.basename(__file__) + ': connecting back to ' + retval)\n else:\n if self.ConType == 'PY':\n retval = self.ConString\n self.connectionObj.connection.sendCommand('*rst', expectedResponse=False)\n self.connectionObj.connection.close()\n logging.debug(os.path.basename(__file__) + ': connecting back to ' + retval)\n else:\n if self.ConType[:3] == 'QPS':\n retval = self.ConString\n CommandString = self.ConString + ' ' + '*rst'\n self.connectionObj.qps.sendCmdVerbose(CommandString, expectedResponse=False)\n logging.debug(os.path.basename(__file__) + ': connecting back to ' + retval)\n temp = None\n startTime = time.time()\n time.sleep(0.6)\n while temp == None:\n try:\n temp = quarchDevice(retval)\n except:\n time.sleep(0.2)\n if time.time() - startTime > timeout:\n logging.critical(os.path.basename(__file__) + ': connection failed to ' + retval)\n return False\n\n self.connectionObj = temp.connectionObj\n time.sleep(1)\n return True\n\n def sendAndVerifyCommand(self, commandString, responseExpected='OK', exception=True):\n \"\"\"\n Sends a command to the device and verifies the response is as expected. This is a simple\n wrapper of sendCommand and helps write cleaner code in test scripts.\n \n Parameters\n ----------\n commandString : str\n \n The text command to send to the device\n \n commandString : str, optional\n \n The expected text response from the module.\n \n exception : bool, optional\n \n If True, an exception is raised on mismatch. If False, we just return False\n \n Returns\n -------\n result : bool\n \n True if we matched the response, False if we did not\n \n Raises\n ------\n ValueError\n If the response does not match AND the exception parameter is set to True\n \n \"\"\"\n responseStr = self.sendCommand(commandString)\n if responseStr != responseExpected:\n if exception:\n raise ValueError('Command response error: ' + responseStr)\n else:\n return False\n else:\n return True\n\n\ndef checkModuleFormat(ConString):\n ConnectionTypes = [\n 'USB', 'SERIAL', 'TELNET', 'REST', 'TCP']\n conTypeSpecified = ConString[:ConString.find(':')]\n correctConType = False\n for value in ConnectionTypes:\n if value.lower() == conTypeSpecified.lower():\n correctConType = True\n\n if not correctConType:\n raise Exception('Invalid connection type specified in Module string, use one of [USB|SERIAL|TELNET|REST|TCP]')\n return False\n numb_colons = ConString.count(':')\n if numb_colons > 2 or numb_colons <= 0:\n raise Exception('Invalid number of colons in module string')\n return False\n return True","sub_path":"pycfiles/quarchpy-2.0.14-py2.py3-none-any/device.cpython-37.py","file_name":"device.cpython-37.py","file_ext":"py","file_size_in_byte":14252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"313228682","text":"import re\nfrom typing import Iterable, List, Tuple\n\nre_word = re.compile(r\"\\s*\\S+\\s*\")\n\n\ndef words(text: str) -> Iterable[Tuple[int, int, str]]:\n position = 0\n word_match = re_word.match(text, position)\n while word_match is not None:\n start, end = word_match.span()\n word = word_match.group(0)\n yield start, end, word\n word_match = re_word.match(text, end)\n\n\ndef divide_line(text: str, width: int) -> List[int]:\n divides: List[int] = []\n append = divides.append\n line_position = 0\n for start, end, word in words(text):\n if line_position + len(word.rstrip()) > width:\n if line_position and start:\n append(start)\n line_position = len(word)\n else:\n divides.extend(range(start or width, end + 1, width))\n line_position = len(word) % width\n else:\n line_position += len(word)\n return divides\n","sub_path":"rich/_wrap.py","file_name":"_wrap.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146457299","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom datetime import datetime\nfrom django.db import connections\nfrom django.core.management.base import BaseCommand\nfrom corehq.sql_db.util import get_db_aliases_for_partitioned_query\nfrom six.moves import range\n\n\ndef foreign_key_exists(db_alias, table_name, foreign_column_key_name):\n cursor = connections[db_alias].cursor()\n cursor.execute(\n \"SELECT 1 \"\n \"FROM information_schema.table_constraints A \"\n \"JOIN information_schema.key_column_usage B \"\n \"ON A.constraint_name = B.constraint_name \"\n \"AND A.table_name = B.table_name \"\n \"WHERE A.table_name = %s \"\n \"AND B.column_name = %s\"\n \"AND A.constraint_type = 'FOREIGN KEY' \",\n [table_name, foreign_column_key_name]\n )\n return cursor.fetchone() is not None\n\n\ndef add_locations_sqllocation_parent_fk(db_alias):\n cursor = connections[db_alias].cursor()\n cursor.execute(\n \"ALTER TABLE locations_sqllocation \"\n \"ADD CONSTRAINT locations_sqlloc_parent_id_2ffc03fb_fk_locations_sqllocation_id \"\n \"FOREIGN KEY (parent_id) REFERENCES locations_sqllocation(id) DEFERRABLE INITIALLY DEFERRED\"\n )\n\n\ndef add_form_processor_xformattachmentsql_form_id_fk(db_alias):\n cursor = connections[db_alias].cursor()\n cursor.execute(\n \"ALTER TABLE form_processor_xformattachmentsql \"\n \"ADD CONSTRAINT for_form_id_d184240c_fk_form_processor_xforminstancesql_form_id \"\n \"FOREIGN KEY (form_id) REFERENCES form_processor_xforminstancesql(form_id) DEFERRABLE INITIALLY DEFERRED\"\n )\n\n\ndef add_form_processor_commcarecaseindexsql_case_id_fk(db_alias):\n cursor = connections[db_alias].cursor()\n cursor.execute(\n \"ALTER TABLE form_processor_commcarecaseindexsql \"\n \"ADD CONSTRAINT form_case_id_be4cb9e1_fk_form_processor_commcarecasesql_case_id \"\n \"FOREIGN KEY (case_id) REFERENCES form_processor_commcarecasesql(case_id) DEFERRABLE INITIALLY DEFERRED\"\n )\n\n\ndef add_form_processor_casetransaction_case_id_fk(db_alias):\n cursor = connections[db_alias].cursor()\n cursor.execute(\n \"ALTER TABLE form_processor_casetransaction \"\n \"ADD CONSTRAINT form_case_id_0328b100_fk_form_processor_commcarecasesql_case_id \"\n \"FOREIGN KEY (case_id) REFERENCES form_processor_commcarecasesql(case_id) DEFERRABLE INITIALLY DEFERRED\"\n )\n\n\nclass Command(BaseCommand):\n help = \"\"\n\n log_file = None\n\n def log(self, text, indent=0):\n self.log_file.write(datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S: \"))\n for i in range(indent):\n self.log_file.write(' ')\n self.log_file.write(text)\n self.log_file.write('\\n')\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--check-only',\n action='store_true',\n dest='check_only',\n default=False,\n help=\"Only check if the foreign keys exist but don't add anything\",\n )\n\n def try_to_add_fk(self, function, db_alias):\n try:\n function(db_alias)\n self.log(\"foreign key added\", 2)\n except Exception as e:\n self.log(\"error adding foreign key: %s\" % e, 2)\n\n def handle_locations_sqllocation(self):\n self.log(\"handling locations_sqllocation\", 2)\n if foreign_key_exists('default', 'locations_sqllocation', 'parent_id'):\n self.log(\"foreign key exists\", 2)\n else:\n self.log(\"foreign key DOES NOT exist\", 2)\n if not self.check_only:\n self.try_to_add_fk(add_locations_sqllocation_parent_fk, 'default')\n\n def handle_form_processor_xformattachmentsql(self, db_alias):\n self.log(\"handling form_processor_xformattachmentsql\", 2)\n if foreign_key_exists(db_alias, 'form_processor_xformattachmentsql', 'form_id'):\n self.log(\"foreign key exists\", 2)\n else:\n self.log(\"foreign key DOES NOT exist\", 2)\n if not self.check_only:\n self.try_to_add_fk(add_form_processor_xformattachmentsql_form_id_fk, db_alias)\n\n def handle_form_processor_commcarecaseindexsql(self, db_alias):\n self.log(\"handling form_processor_commcarecaseindexsql\", 2)\n if foreign_key_exists(db_alias, 'form_processor_commcarecaseindexsql', 'case_id'):\n self.log(\"foreign key exists\", 2)\n else:\n self.log(\"foreign key DOES NOT exist\", 2)\n if not self.check_only:\n self.try_to_add_fk(add_form_processor_commcarecaseindexsql_case_id_fk, db_alias)\n\n def handle_form_processor_casetransaction(self, db_alias):\n self.log(\"handling form_processor_casetransaction\", 2)\n if foreign_key_exists(db_alias, 'form_processor_casetransaction', 'case_id'):\n self.log(\"foreign key exists\", 2)\n else:\n self.log(\"foreign key DOES NOT exist\", 2)\n if not self.check_only:\n self.try_to_add_fk(add_form_processor_casetransaction_case_id_fk, db_alias)\n\n def handle(self, check_only, **options):\n self.check_only = check_only\n with open('add_back_enikshay_foreign_keys.log', 'a') as f:\n self.log_file = f\n self.log(\"\")\n self.log(\"running script to add back missing foreign keys\")\n self.log(\"check_only is: %s\" % check_only)\n\n self.log(\"handling db: default\", 1)\n self.handle_locations_sqllocation()\n\n for db_alias in get_db_aliases_for_partitioned_query():\n self.log(\"handling db: %s\" % db_alias, 1)\n self.handle_form_processor_xformattachmentsql(db_alias)\n self.handle_form_processor_commcarecaseindexsql(db_alias)\n self.handle_form_processor_casetransaction(db_alias)\n","sub_path":"custom/enikshay/management/commands/add_back_enikshay_foreign_keys.py","file_name":"add_back_enikshay_foreign_keys.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"91676584","text":"import os\nimport re\nimport shutil\nfrom datetime import datetime, timedelta\nfrom typing import Dict, List, Optional, Set, Tuple\n\nimport dateutil.parser.isoparser\nimport discord\nfrom bs4 import BeautifulSoup\nfrom canvasapi.canvas import Canvas\nfrom canvasapi.course import Course\nfrom canvasapi.paginated_list import PaginatedList\n\nfrom util import create_file\nfrom util.canvas_api_extension import get_course_stream, get_course_url\n\n# Stores course modules and channels that are live tracking courses\n# Do *not* put a slash at the end of this path\nCOURSES_DIRECTORY = \"./data/courses\"\n\n\nclass CanvasHandler(Canvas):\n \"\"\"\n Represents a handler for Canvas information for a guild\n\n Attributes\n ----------\n courses : `List[canvasapi.Course]`\n Courses tracked in guild mode.\n\n guild : `discord.Guild`\n Guild assigned to this handler.\n\n timings : `Dict[str, str]`\n Contains course and its last announcement date and time.\n\n due_week : `Dict[str, List[str]]`\n Contains course and assignment ids due in less than a week.\n\n due_day : `Dict[str, List[str]]`\n Contains course and assignment ids due in less than a day.\n \"\"\"\n\n def __init__(self, API_URL, API_KEY, guild: discord.Guild):\n \"\"\"\n Parameters\n ----------\n API_URL : `str`\n Base URL of the Canvas instance's API\n\n API_KEY : `str`\n API key to authenticate requests with\n\n guild : `discord.Guild`\n Guild to assign to this handler\n \"\"\"\n\n super().__init__(API_URL, API_KEY)\n self._courses: List[Course] = []\n self._guild = guild\n self._live_channels: List[discord.TextChannel] = []\n self._timings: Dict[str, str] = {}\n self._due_week: Dict[str, List[str]] = {}\n self._due_day: Dict[str, List[str]] = {}\n\n @property\n def courses(self) -> List[Course]:\n return self._courses\n\n @courses.setter\n def courses(self, courses: List[Course]):\n self._courses = courses\n\n @property\n def guild(self) -> discord.Guild:\n return self._guild\n\n @property\n def live_channels(self):\n return self._live_channels\n\n @live_channels.setter\n def live_channels(self, live_channels):\n self._live_channels = live_channels\n\n @property\n def timings(self):\n return self._timings\n\n @timings.setter\n def timings(self, timings):\n self._timings = timings\n\n @property\n def due_week(self):\n return self._due_week\n\n @due_week.setter\n def due_week(self, due_week):\n self._due_week = due_week\n\n @property\n def due_day(self):\n return self._due_day\n\n @due_day.setter\n def due_day(self, due_day):\n self._due_day = due_day\n\n @staticmethod\n def _ids_converter(ids: Tuple[str, ...]) -> Set[int]:\n \"\"\"\n Converts list of string to list of int, removes duplicates\n\n Parameters\n ----------\n ids : `Tuple[str, ...]`\n Tuple of string ids\n\n Returns\n -------\n `List[int]`\n List of int ids\n \"\"\"\n\n return set(int(i) for i in ids)\n\n def track_course(self, course_ids_str: Tuple[str, ...]):\n \"\"\"\n Adds course(s) to track\n\n Parameters\n ----------\n course_ids_str : `Tuple[str, ...]`\n Tuple of course ids\n \"\"\"\n\n course_ids = self._ids_converter(course_ids_str)\n c_ids = {c.id for c in self.courses}\n\n new_courses = tuple(self.get_course(i) for i in course_ids if i not in c_ids)\n self.courses.extend(new_courses)\n\n for c in course_ids_str:\n if c not in self.timings:\n self.timings[c] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if c not in self.due_week:\n self.due_week[c] = []\n\n if c not in self.due_day:\n self.due_day[c] = []\n\n for c in new_courses:\n modules_file = f\"{COURSES_DIRECTORY}/{c.id}/modules.txt\"\n watchers_file = f\"{COURSES_DIRECTORY}/{c.id}/watchers.txt\"\n self.store_channels_in_file(tuple(self._live_channels), watchers_file)\n\n if self._live_channels:\n create_file.create_file_if_not_exists(modules_file)\n\n # Here, we will only download modules if modules_file is empty.\n if os.stat(modules_file).st_size == 0:\n self.download_modules(c)\n\n @staticmethod\n def download_modules(course: Course):\n \"\"\"\n Download all modules for a Canvas course, storing each module's URL (or name/title\n if the url does not exist) in {COURSES_DIRECTORY}/{course.id}/modules.txt.\n\n Assumption: {COURSES_DIRECTORY}/{course.id}/modules.txt exists.\n \"\"\"\n\n modules_file = f\"{COURSES_DIRECTORY}/{course.id}/modules.txt\"\n\n with open(modules_file, \"w\") as f:\n for module in course.get_modules():\n if hasattr(module, \"name\"):\n f.write(module.name + \"\\n\")\n\n for item in module.get_module_items():\n if hasattr(item, \"title\"):\n if hasattr(item, \"html_url\"):\n f.write(item.html_url + \"\\n\")\n else:\n f.write(item.title + \"\\n\")\n\n @staticmethod\n def store_channels_in_file(text_channels: Tuple[discord.TextChannel], file_path: str):\n \"\"\"\n For each text channel provided, we add its id to the file with given path if the file does\n not already contain the id.\n \"\"\"\n\n if text_channels:\n create_file.create_file_if_not_exists(file_path)\n\n with open(file_path, \"r\") as f:\n existing_ids = f.readlines()\n\n ids_to_add = set(map(lambda channel: str(channel.id) + \"\\n\", text_channels))\n\n with open(file_path, \"w\") as f:\n for channel_id in existing_ids:\n if channel_id in ids_to_add:\n ids_to_add.remove(channel_id)\n\n f.write(channel_id)\n\n for channel_id in ids_to_add:\n f.write(channel_id)\n\n def untrack_course(self, course_ids_str: Tuple[str, ...]):\n \"\"\"\n Untracks course(s)\n\n Parameters\n ----------\n course_ids_str : `Tuple[str, ...]`\n Tuple of course ids\n \"\"\"\n\n course_ids = self._ids_converter(course_ids_str)\n c_ids = {c.id: c for c in self.courses}\n\n ids_of_removed_courses = []\n\n for i in filter(c_ids.__contains__, course_ids):\n self.courses.remove(c_ids[i])\n ids_of_removed_courses.append(i)\n\n for c in course_ids_str:\n if c in self.timings:\n del self.timings[c]\n\n if c in self.due_week:\n del self.due_week[c]\n\n if c in self.due_day:\n del self.due_day[c]\n\n for i in ids_of_removed_courses:\n watchers_file = f\"{COURSES_DIRECTORY}/{i}/watchers.txt\"\n self.delete_channels_from_file(self.live_channels, watchers_file)\n\n # If there are no more channels watching the course, we should delete that course's directory.\n if os.stat(watchers_file).st_size == 0:\n shutil.rmtree(f\"{COURSES_DIRECTORY}/{i}\")\n\n @staticmethod\n def delete_channels_from_file(text_channels: List[discord.TextChannel], file_path: str):\n \"\"\"\n For each text channel provided, we remove its id from the file with given path\n if the id is contained in the file.\n \"\"\"\n\n create_file.create_file_if_not_exists(file_path)\n\n with open(file_path, \"r\") as f:\n channel_ids = f.readlines()\n\n ids_to_remove = set(map(lambda channel: str(channel.id) + \"\\n\", text_channels))\n\n with open(file_path, \"w\") as f:\n for channel_id in channel_ids:\n if channel_id not in ids_to_remove:\n f.write(channel_id)\n\n def get_course_stream_ch(self, since: Optional[str], course_ids_str: Tuple[str, ...], base_url: str, access_token: str) -> List[List[str]]:\n \"\"\"\n Gets announcements for course(s)\n\n Parameters\n ----------\n since : `None or str`\n Date/Time from announcement creation to now\n\n course_ids_str : `Tuple[str, ...]`\n Tuple of course ids\n\n base_url : `str`\n Base URL of the Canvas instance's API\n\n access_token : `str`\n API key to authenticate requests with\n\n Returns\n -------\n `List[List[str]]`\n List of announcement data to be formatted and sent as embeds\n \"\"\"\n\n course_ids = self._ids_converter(course_ids_str)\n course_stream_list = tuple(get_course_stream(c.id, base_url, access_token) for c in self.courses if (not course_ids) or c.id in course_ids)\n data_list = []\n\n for stream_iter in map(iter, course_stream_list):\n for item in filter(lambda i: i[\"type\"] == \"Conversation\", stream_iter):\n course = self.get_course(item[\"course_id\"])\n course_url = get_course_url(course.id, base_url)\n title = \"Announcement: \" + item[\"title\"]\n short_desc = \"\\n\".join(item[\"latest_messages\"][0][\"message\"].split(\"\\n\")[:4])\n ctime_iso = item[\"created_at\"]\n\n if ctime_iso is None:\n ctime_text = \"No info\"\n else:\n time_shift = datetime.now() - datetime.utcnow()\n ctime_iso_parsed = (dateutil.parser.isoparse(ctime_iso) + time_shift).replace(tzinfo=None)\n ctime_timedelta = ctime_iso_parsed - datetime.now()\n\n if since and ctime_timedelta <= -self._make_timedelta(since):\n break\n\n ctime_text = ctime_iso_parsed.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n data_list.append([course.name, course_url, title, item[\"html_url\"], short_desc, ctime_text, course.id])\n\n return data_list\n\n def get_assignments(self, due: Optional[str], course_ids_str: Tuple[str, ...], base_url) -> List[List[str]]:\n \"\"\"\n Gets assignments for course(s)\n\n Parameters\n ----------\n due : `None or str`\n Date/Time from due date of assignments\n\n course_ids_str : `Tuple[str, ...]`\n Tuple of course ids\n\n base_url : `str`\n Base URL of the Canvas instance's API\n\n Returns\n -------\n `List[List[str]]`\n List of assignment data to be formatted and sent as embeds\n \"\"\"\n\n course_ids = self._ids_converter(course_ids_str)\n courses_assignments = {c: c.get_assignments() for c in self.courses if not course_ids or c.id in course_ids}\n\n return self._get_assignment_data(due, courses_assignments, base_url)\n\n def _get_assignment_data(self, due: Optional[str], courses_assignments: Dict[Course, PaginatedList], base_url: str) -> List[List[str]]:\n \"\"\"\n Formats all courses assignments as separate assignments\n\n Parameters\n ----------\n due : `None or str`\n Date/Time from due date of assignments\n\n courses_assignments : `Dict[Course, PaginatedList of Assignments]`\n List of courses and their assignments\n\n base_url : `str`\n Base URL of the Canvas instance's API\n\n Returns\n -------\n `List[List[str]]`\n List of assignment data to be formatted and sent as embeds\n \"\"\"\n\n data_list = []\n\n for course, assignments in courses_assignments.items():\n course_name = course.name\n course_url = get_course_url(course.id, base_url)\n\n for assignment in filter(lambda asgn: asgn.published, assignments):\n ass_id = assignment.__getattribute__(\"id\")\n title = \"Assignment: \" + assignment.__getattribute__(\"name\")\n url = assignment.__getattribute__(\"html_url\")\n desc_html = assignment.__getattribute__(\"description\") or \"No description\"\n\n short_desc = \"\\n\".join(BeautifulSoup(desc_html, \"html.parser\").get_text().split(\"\\n\")[:4])\n\n ctime_iso = assignment.__getattribute__(\"created_at\")\n dtime_iso = assignment.__getattribute__(\"due_at\")\n\n time_shift = datetime.now() - datetime.utcnow()\n\n if ctime_iso is None:\n ctime_text = \"No info\"\n else:\n ctime_text = (dateutil.parser.isoparse(ctime_iso) + time_shift).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if dtime_iso is None:\n dtime_text = \"No info\"\n else:\n dtime_iso_parsed = (dateutil.parser.isoparse(dtime_iso) + time_shift).replace(tzinfo=None)\n dtime_timedelta = dtime_iso_parsed - datetime.now()\n\n if dtime_timedelta < timedelta(0) or (due and dtime_timedelta > self._make_timedelta(due)):\n continue\n\n dtime_text = dtime_iso_parsed.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n data_list.append([course_name, course_url, title, url, short_desc, ctime_text, dtime_text, course.id, ass_id])\n\n return data_list\n\n @staticmethod\n def _make_timedelta(till_str: str) -> timedelta:\n \"\"\"\n Makes a datetime.timedelta\n\n Parameters\n ----------\n till_str : `str`\n Date/Time from due date of assignments\n\n Returns\n -------\n `datetime.timedelta`\n Time delta between till and now\n \"\"\"\n\n till = re.split(r\"[-:]\", till_str)\n\n if till[1] in [\"hour\", \"day\", \"week\"]:\n return abs(timedelta(**{till[1] + \"s\": float(till[0])}))\n elif till[1] in [\"month\", \"year\"]:\n return abs(timedelta(days=(30 if till[1] == \"month\" else 365) * float(till[1])))\n\n year, month, day = int(till[0]), int(till[1]), int(till[2])\n\n if len(till) == 3:\n return abs(datetime(year, month, day) - datetime.now())\n\n hour, minute, second = int(till[3]), int(till[4]), int(till[5])\n return abs(datetime(year, month, day, hour, minute, second) - datetime.now())\n\n def get_course_names(self, url) -> List[List[str]]:\n \"\"\"\n Gives a list of tracked courses and their urls\n\n Parameters\n ----------\n url : `str`\n Base URL of the Canvas instance's API\n\n Returns\n -------\n `List[List[str]]`\n List of course names and their page urls\n \"\"\"\n\n return [[c.name, get_course_url(c.id, url)] for c in self.courses]\n","sub_path":"util/canvas_handler.py","file_name":"canvas_handler.py","file_ext":"py","file_size_in_byte":14815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399054648","text":"money = 10000\nown = []\n\nsampleStock = {\n 'name':'shiwon',\n 'price':900,\n 'history':[0,0,0],\n}\n\ninfo_market = \"\"\"\n[[MARKET]]\n1. Buy\n2. Make New\n3. Delete\n4. Go to My Stocks\n\"\"\"\n\ninfo_mystocks = \"\"\"\n[[MYSTOCKS]]\n1. Buy\n2. Sell\n3. Back to Main Page\n4. Go to Market\n\"\"\"\n\ndef market():\n print(f'Money: {money}')\n print(info_market)","sub_path":"lab9_2018199061/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25797094","text":"# Inspecting the vectors\n# To get a better idea of how the vectors work, you'll investigate them by converting them into pandas DataFrames.\n#\n# Here, you'll use the same data structures you created in the previous two exercises (count_train, count_vectorizer,\n# tfidf_train, tfidf_vectorizer) as well as pandas, which is imported as pd.\n\nimport pandas as pd\n\nfrom practice1 import count_train, count_vectorizer\nfrom practice2 import tfidf_train, tfidf_vectorizer\n\n# Create the CountVectorizer DataFrame: count_df\ncount_df = pd.DataFrame(count_train.A, columns=count_vectorizer.get_feature_names())\n\n# Create the TfidfVectorizer DataFrame: tfidf_df\ntfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vectorizer.get_feature_names())\n\n# Print the head of count_df\nprint(count_df.head())\n\n# Print the head of tfidf_df\nprint(tfidf_df.head())\n\n# Calculate the difference in columns: difference\ndifference = set(count_df.columns) - set(tfidf_df.columns)\nprint(difference)\n\n# Check whether the DataFrames are equal\nprint(count_df.equals(tfidf_df))\n","sub_path":"Introduction_to_Natural_Language_Processing_in_Python/04_Building_a_fake_news_classifier/practice3.py","file_name":"practice3.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610928436","text":"#!/usr/bin/env python\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\nimport json\nimport re\nimport urllib2\n\nimport functest.utils.functest_logger as ft_logger\nimport functest.utils.functest_utils as ft_utils\nfrom functest.utils.constants import CONST\n\nCOL_1_LEN = 25\nCOL_2_LEN = 15\nCOL_3_LEN = 12\nCOL_4_LEN = 15\nCOL_5_LEN = 75\n\n# If we run from CI (Jenkins) we will push the results to the DB\n# and then we can print the url to the specific test result\n\n\nlogger = ft_logger.Logger(\"generate_report\").getLogger()\n\n\ndef init(tiers_to_run):\n test_cases_arr = []\n for tier in tiers_to_run:\n for test in tier.get_tests():\n test_cases_arr.append({'test_name': test.get_name(),\n 'tier_name': tier.get_name(),\n 'result': 'Not executed',\n 'duration': '0',\n 'url': ''})\n return test_cases_arr\n\n\ndef get_results_from_db():\n url = \"%s/results?build_tag=%s\" % (ft_utils.get_db_url(),\n CONST.BUILD_TAG)\n logger.debug(\"Query to rest api: %s\" % url)\n try:\n data = json.load(urllib2.urlopen(url))\n return data['results']\n except:\n logger.error(\"Cannot read content from the url: %s\" % url)\n return None\n\n\ndef get_data(test, results):\n test_result = test['result']\n url = ''\n for test_db in results:\n if test['test_name'] in test_db['case_name']:\n id = test_db['_id']\n url = ft_utils.get_db_url() + '/results/' + id\n test_result = test_db['criteria']\n\n return {\"url\": url, \"result\": test_result}\n\n\ndef print_line(w1, w2='', w3='', w4='', w5=''):\n str = ('| ' + w1.ljust(COL_1_LEN - 1) +\n '| ' + w2.ljust(COL_2_LEN - 1) +\n '| ' + w3.ljust(COL_3_LEN - 1) +\n '| ' + w4.ljust(COL_4_LEN - 1))\n if CONST.IS_CI_RUN:\n str += ('| ' + w5.ljust(COL_5_LEN - 1))\n str += '|\\n'\n return str\n\n\ndef print_line_no_columns(str):\n TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN + 2\n if CONST.IS_CI_RUN:\n TOTAL_LEN += COL_5_LEN + 1\n return ('| ' + str.ljust(TOTAL_LEN) + \"|\\n\")\n\n\ndef print_separator(char=\"=\", delimiter=\"+\"):\n str = (\"+\" + char * COL_1_LEN +\n delimiter + char * COL_2_LEN +\n delimiter + char * COL_3_LEN +\n delimiter + char * COL_4_LEN)\n if CONST.IS_CI_RUN:\n str += (delimiter + char * COL_5_LEN)\n str += '+\\n'\n return str\n\n\ndef main(args):\n executed_test_cases = args\n\n if CONST.IS_CI_RUN:\n results = get_results_from_db()\n if results is not None:\n for test in executed_test_cases:\n data = get_data(test, results)\n test.update({\"url\": data['url'],\n \"result\": data['result']})\n\n TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN\n if CONST.IS_CI_RUN:\n TOTAL_LEN += COL_5_LEN\n MID = TOTAL_LEN / 2\n\n if CONST.BUILD_TAG is not None:\n if re.search(\"daily\", CONST.BUILD_TAG) is not None:\n CONST.CI_LOOP = \"daily\"\n else:\n CONST.CI_LOOP = \"weekly\"\n\n str = ''\n str += print_separator('=', delimiter=\"=\")\n str += print_line_no_columns(' ' * (MID - 8) + 'FUNCTEST REPORT')\n str += print_separator('=', delimiter=\"=\")\n str += print_line_no_columns(' ')\n str += print_line_no_columns(\" Deployment description:\")\n str += print_line_no_columns(\" INSTALLER: %s\"\n % CONST.INSTALLER_TYPE)\n if CONST.DEPLOY_SCENARIO is not None:\n str += print_line_no_columns(\" SCENARIO: %s\"\n % CONST.DEPLOY_SCENARIO)\n if CONST.BUILD_TAG is not None:\n str += print_line_no_columns(\" BUILD TAG: %s\"\n % CONST.BUILD_TAG)\n if CONST.CI_LOOP is not None:\n str += print_line_no_columns(\" CI LOOP: %s\"\n % CONST.CI_LOOP)\n str += print_line_no_columns(' ')\n str += print_separator('=')\n if CONST.IS_CI_RUN:\n str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT', 'URL')\n else:\n str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT')\n str += print_separator('=')\n for test in executed_test_cases:\n str += print_line(test['test_name'],\n test['tier_name'],\n test['duration'],\n test['result'],\n test['url'])\n str += print_separator('-')\n\n logger.info(\"\\n\\n\\n%s\" % str)\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv[1:])\n","sub_path":"functest/ci/generate_report.py","file_name":"generate_report.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482495857","text":"import gym\nimport numpy as np\n\nenv = gym.make('CartPole-v1')\n\n\ndef play(env, policy):\n observation = env.reset()\n\n done = False\n score = 0\n observations = []\n\n for _ in range(5000):\n observations += [observation.tolist()] # Record the observations for normalization and replay\n\n if done: # If the simulation was over last iteration, exit loop\n break\n\n # Pick an action according to the policy matrix\n outcome = np.dot(policy, observation)\n action = 1 if outcome > 0 else 0\n\n # Make the action, record reward\n observation, reward, done, info = env.step(action)\n score += reward\n\n return score, observations\n\n\nmax = (0, [], [])\n\n# We changed the next two lines!\nfor _ in range(100):\n policy = np.random.rand(1, 4) - 0.5\n score, observations = play(env, policy)\n\n if score > max[0]:\n max = (score, observations, policy)\n\nprint('Max Score', max[0])\n\nscores = []\nfor _ in range(100):\n score, _ = play(env, max[2])\n scores += [score]\n\nprint('Average Score (100 trials)', np.mean(scores))\n\n\n\nfrom flask import Flask\nimport json\n\napp = Flask(__name__, static_folder='.')\n\n\n@app.route(\"/data\")\ndef data():\n return json.dumps(max[1])\n\n\n@app.route('/')\ndef root():\n return app.send_static_file('./index.html')\n\n\napp.run(host='localhost', port='3000')\n","sub_path":"cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"17818677","text":"s = 'hello'\n##for char in s:\n## print(char)\n\ni = 0\n\nwhile not (s[i] in 'aeiouAEIOU'):\n print(s[i])\n i = i + 1\n\ns = 'there'\ni = 0\nwhile not (s[i] in 'aeiouAEIOU'):\n print(s[i])\n i = i + 1\n\ns = 'xyz'\ni = 0\nwhile i < len(s) and not (s[i] in 'aeiouAEIOU'):\n print(s[i])\n i = i + 1\n\ndef up_to_vowel(s):\n ''' (str) -> str\n\n Return a substring of s from index 0 up to but\n not including the first vowel in s.\n\n >>> up_to_vowel('hello')\n 'h'\n >>> up_to_vowel('there')\n 'th'\n >>> up_to_vowel('cs')\n 'cs'\n '''\n\n before_vowel = ''\n i = 0\n\n while i < len(s) and not (s[i] in 'aeiouAEIOU'):\n before_vowel = before_vowel + s[i]\n i = i + 1\n\n return before_vowel\n\ndef get_answer(prompt):\n ''' (str) -> str\n\n Use prompt to ask the user for a \"yes\" or \"no\"\n answer and continue asking until the user gives\n a valid response. Return the answer.\n '''\n\n answer = input(prompt)\n\n while not (answer == 'yes' or answer == 'no'):\n answer = input(prompt)\n\n return answer\n\nsum = 0\ni = 1523\nwhile i <= 10503:\n\tif (i % 3 == 0):\n\t\tsum = sum + i\t\t\n\ti = i + 1\n\nprint(sum)\n\n\n\n","sub_path":"Python Basics/while_loops.py","file_name":"while_loops.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630849801","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndriver=webdriver.Firefox()\ndriver.get(\"http://selenium-python.readthedocs.io\")\nassert \"Python\" in driver.title\nelement=driver.find_element_by_name(\"q\")\nelement.send_keys(\"pycon\")\nelement.send_keys(Keys.RETURN)\nassert \"No result found\" in driver.page_source\ndriver.close()\n","sub_path":"python/selenium_python/first_driver_simple_example.py","file_name":"first_driver_simple_example.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"358463141","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 21:15:17 2016\n\n@author: seis\n\"\"\"\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom function import Lorenz96, RungeKutta4, KFforB, calc3DVAR\nimport sys\n\n#各定数の定義を行う\nT = 0\ndt = 0.05\nF = 8.\nJ = 40\n\nR = np.eye(J)\nP_a = R\nH = np.eye(J)\n\n#データの読み込みを行っている。data[i]がiステップ目の40個のベクトルデータになっている。data1が真値data2が観測値\ndata1 = np.loadtxt(\"data01.txt\", delimiter=\", \")\ndata2 = np.loadtxt(\"data02.txt\", delimiter=\", \")\n\nB = np.loadtxt(\"B_best.txt\", delimiter=\", \")\n\n#方法1で定めたBを用いて3次元変分法を適用した場合\nFig1 = []\nFig2 = []\nx_a = data2[0]\nfor i in range(1, 1460):\n x_t = data1[i]\n x_f = RungeKutta4(Lorenz96, x_a, F, dt)\n y = data2[i]\n x_a = calc3DVAR(x_f, y, H, B)\n Fig1.append(np.linalg.norm(x_t- x_a)/ np.sqrt(J))\n Fig2.append(x_t)\n\nplt.title(\"3DVAR: B:method1\")\nplt.xlabel(\"Time Steps\")\nplt.ylabel(\"Root Mean Square Error\")\nplt.xlim(0, 1460)\nplt.ylim(0, 3)\nplt.plot(Fig1)\nplt.savefig(\"Fig6_B_3.png\",format = 'png', dpi=300)\nplt.show()\nmean = np.array(Fig1).mean()\nprint(mean)","sub_path":"aineko27_nakamura_20160523/problem6_B_3.py","file_name":"problem6_B_3.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"83330807","text":"#!/usr/bin/python3\n__author__ = 'ivan.shynkarenka'\n\n\nimport argparse\nfrom TTWebClient.TickTraderWebClient import TickTraderWebClient\n\n\ndef main():\n parser = argparse.ArgumentParser(description='TickTrader Web API sample')\n parser.add_argument('web_api_address', help='TickTrader Web API address')\n parser.add_argument('web_api_id', default=None, help='TickTrader Web API Id')\n parser.add_argument('web_api_key', default=None, help='TickTrader Web API Key')\n parser.add_argument('web_api_secret', default=None, help='TickTrader Web API Secret')\n args = parser.parse_args()\n\n # Create instance of the TickTrader Web API client\n client = TickTraderWebClient(args.web_api_address, args.web_api_id, args.web_api_key, args.web_api_secret)\n\n # Create, modify and cancel limit order\n account = client.get_account()\n if (account['AccountingType'] == 'Gross') or (account['AccountingType'] == 'Net'):\n\n # Create limit order\n limit = client.create_trade(\n {\n 'Type': 'Limit',\n 'Side': 'Buy',\n 'Symbol': 'EURUSD' if (account['AccountingType'] == 'Gross') else 'EUR/USD',\n 'Amount': 10000,\n 'Price': 1.0,\n 'Comment': 'Buy limit from Web API sample'\n })\n\n # Modify limit order\n limit = client.modify_trade(\n {\n 'Id': limit['Id'],\n 'Comment': 'Modified limit from Web API sample'\n })\n\n # Cancel limit order\n client.cancel_trade(limit['Id'])\n\nif __name__ == '__main__':\n main()","sub_path":"TTWebClientSample/trade_with_limit_forex.py","file_name":"trade_with_limit_forex.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645298703","text":"\nimport sys, os\n\ndef splitFile(file):\n date = ''\n count = 0\n outputname = 'output/out' + str(count) + '.txt'\n #outfile = open(outputname, 'w')\n #outfile.write('Remember to clear out all files in this directory!')\n for line in open(file, 'r').readlines():\n l = line.strip().split('\\t')\n if len(l) == 0:\n continue\n l = l[2].strip().split()\n if len(l) == 0:\n continue\n\n if date != l[0]:\n date = str(l[0])\n count = count + 1\n outputname = 'output/out' + date + '.txt'\n outfile = open(outputname, 'w')\n\n outfile.write(line)\n\n\ndef run():\n filename = sys.argv[1]\n splitFile(filename)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"project-meddle/meddle_analysis/origin_scripts/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80270876","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom time import sleep\n\nimport sys\nimport rospy\n\nfrom trollius import From\nimport pygazebo\nimport pygazebo.msg.world_control_pb2\n\n#Import the necessary ROS messages in order to gather observation, fitness, and send actions\n\nfrom std_srvs.srv import Empty\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Float32\n\nclass OmniROSEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n\n rospy.init_node('ddpg_node', anonymous=False)\n self.subscription_fitness = rospy.Subscriber(\"/omniROS/fitness\", Float32, self.callbackFitness)\n self.subscription_lidar = rospy.Subscriber(\"/omniROS/lidarScan\", LaserScan, self.callbackLaserScan)\n self.publisher_action = rospy.Publisher(\"/omniROS/cmd_vel\", Twist, queue_size=1)\n self.pause_service = rospy.ServiceProxy('/gazebo/pause_physics', Empty)\n self.unpause_service = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)\n self.reset_service = rospy.ServiceProxy('/omniROS/resetSimulation', Empty)\n\n self.fitness = np.float64(0.0)\n self.lidar = np.full((360, 1), 0.0)#np.ndarray(shape=(360,0), dtype=float)\n\n self.Vx_max_action = 1.0\n self.Vy_max_action = 1.0\n self.Vz_max_action = 6.0 #In radians/sec\n\n self.lidar_distance_min = 0.0\n self.lidar_distance_max = 10.0\n\n self.goal_position = 0.05 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version\n self.power = 0.0015\n\n self.viewer = None\n self.action_space = spaces.Box( np.array([-self.Vx_max_action,-self.Vy_max_action,-self.Vz_max_action]), np.array([self.Vx_max_action,self.Vy_max_action,self.Vz_max_action]))\n self.observation_space = spaces.Box(self.lidar_distance_min, self.lidar_distance_max, shape=(360,), dtype=np.float32)\n\n self.i = 0\n\n self.seed()\n self.reset()\n\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def callbackFitness(self,msgFitness):\n self.fitness = np.float64(msgFitness.data)\n\n def callbackLaserScan(self,msgScan):\n self.lidar = np.float64(msgScan.ranges)\n self.lidar[np.isinf(self.lidar)] = 0 #Replace all np.inf caused by the lidar not seeing anything by zero\n\n def pauseSim(self):\n rospy.wait_for_service('/gazebo/pause_physics')\n self.pause_service()\n\n def unpauseSim(self):\n rospy.wait_for_service('/gazebo/unpause_physics')\n self.unpause_service()\n\n def step(self, action):\n self.i = self.i + 1\n if rospy.is_shutdown():\n exit()\n print(\"\\nStep n°%d\\n\"%(self.i))\n print(\"Observation : \")\n print(self.lidar)\n print(\"\\nAction : \")\n print(action)\n\n msg = Twist()\n if(not np.isnan(action[0][0])):\n msg.linear.x = action[0][0]\n if(not np.isnan(action[0][1])):\n msg.linear.y = action[0][1]\n if(not np.isnan(action[0][2])):\n msg.angular.z = action[0][2]\n\n self.publisher_action.publish(msg)\n\n message = pygazebo.msg.world_control_pb2.WorldControl()\n message.pause = True\n message.multi_step = 100\n #yield From(publisherStep.publish(message))\n\n done = False\n done = bool(self.fitness >= 4.0)\n if done:\n self.fitness =+ 10.0\n\n return self.lidar, self.fitness, done, {}\n\n def reset(self):\n rospy.wait_for_service('/omniROS/resetSimulation')\n self.reset_service()\n self.state = self.lidar\n self.i =0\n self.unpauseSim()\n return np.array(self.state)\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n","sub_path":"envs/omniROS/omniROS_env.py","file_name":"omniROS_env.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370178466","text":"\"\"\"This very similar problem came up recently during a technical\ninterview with a social media company that I'll refer to as\n'FrontOfHeadTome'.\n\nGiven a 4-sided maze, determine if it is solvable.\n\nmaze = [[0,0,0,0,0],\n [0,0,1,0,1],\n [0,1,0,0,0],\n [0,0,1,0,1],\n [0,1,2,0,1]]\n\nYou can travel on 0's, but not on 1's.\nAlways start in upper-leftmost cell.\nstart = maze[0][0]\nend = 3\n\nThe implementations below were adapted from:\nhttps://www.laurentluce.com/posts/solving-mazes-using-python-simple-recursivity-and-a-search/\n\"\"\"\n\n\ndef slow_search(x, y, grid):\n \"\"\"\n\n :param x:\n :param y:\n :param grid:\n :return:\n \"\"\"\n\n if grid[x][y] == 2:\n print(f'**found at {x},{y}**')\n return True\n\n if grid[x][y] == 1:\n print(f'wall at {x},{y}')\n return False\n\n if grid[x][y] == 3:\n print(f'previously visited cell at {x},{y}')\n return False\n\n print(f'visiting {x},{y}')\n\n # mark as visited\n grid[x][y] = 3\n\n # explore neighbors clockwise starting with the cell below.\n # if within dimensions...\n # ...try cell below\n if ((x < len(grid) - 1 and slow_search(x + 1, y, grid))\n # ...try cell to the left\n or (y > 0 and slow_search(x, y - 1, grid))\n # ...try cell above\n or (x > 0 and slow_search(x - 1, y, grid))\n # ...try cell to the right\n or (y < len(grid) - 1 and slow_search(x, y + 1, grid))):\n return True\n return False\n\n\nif __name__ == \"__main__\":\n\n maze1 = [[0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 1],\n [0, 1, 2, 0, 1]]\n\n maze2 = [[0, 0, 0, 0, 1],\n [0, 1, 1, 0, 0],\n [0, 0, 1, 0, 1],\n [0, 1, 2, 0, 1]]\n\n slow_search(0, 0, maze2)\n","sub_path":"src/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144661177","text":"from paintstorch.network import Generator, Illustration2Vec\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass PaintsTorch2(nn.Module):\n def __init__(self, features: int, g: str, f1: str, bn: bool) -> None:\n super(PaintsTorch2, self).__init__()\n\n ckpt = torch.load(args.model)\n G = nn.DataParallel(Generator(features, bn=bn))\n G.load_state_dict(ckpt[\"G\"] if \"G\" in ckpt.keys() else ckpt)\n \n self.G = G.module.eval().cpu()\n self.F1 = Illustration2Vec(f1).eval().cpu()\n\n for param in self.G.parameters():\n param.requires_grad = False\n for param in self.F1.parameters():\n param.requires_grad = False\n\n @torch.no_grad()\n def forward(self, x: torch.Tensor, h: torch.Tensor) -> torch.Tensor:\n mask = x[:, -1].unsqueeze(1)\n y, *_ = self.G(x, h, self.F1(x))\n y = x[:, :3] * (1 - mask) + y * mask\n return y\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--features\", type=int, default=32)\nparser.add_argument(\"--model\", type=str, required=True)\nparser.add_argument(\"--save\", type=str, required=True)\nparser.add_argument(\"--bn\", action=\"store_true\")\nargs = parser.parse_args()\n\n\nmodel = PaintsTorch2(args.features, args.model, \"./models/i2v.pth\", args.bn)\nmodel = model.eval()\n\nfake = torch.zeros((1, 4, 512, 512)), torch.zeros((1, 4, 128, 128))\n\ntorch.onnx.export(\n model,\n fake,\n args.save,\n verbose=True,\n input_names=[\"input\", \"hints\"],\n output_names=[\"illustration\"],\n opset_version=9,\n do_constant_folding=True,\n)","sub_path":"evaluation/onnx.py","file_name":"onnx.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273959215","text":"__all__ = ['imread']\n\nfrom skimage.util.dtype import convert\nimport imread as _imread\n\ndef imread(record, dtype=None):\n \"\"\"Load an image from a WARC record.\n\n Parameters\n ----------\n record : WARC Record\n\n \"\"\"\n im = _imread.imread_from_blob(record.payload.read())\n if dtype is not None:\n im = convert(im, dtype)\n return im\n","sub_path":"skimage/io/_plugins/warc.py","file_name":"warc.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330855475","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom rest_framework import routers\nfrom api.views import PrinterLog, Printers, Dashboard, Connection, Log\n\nrouter = routers.DefaultRouter()\nrouter.register('printers', Printers)\nrouter.register('logs', PrinterLog)\nurlpatterns = patterns(\n # Examples:\n # url(r'^$', 'ricoh.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^dashboard/', Dashboard.as_view(), name='dashboard'),\n url(r'^log/', Log.as_view(), name='log'),\n url(r'^dashboard/connection', Connection.as_view(), name='connection'),\n url(r'^api/', include(router.urls))\n)\n","sub_path":"ricoh/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101584140","text":"### SOURCES:\n### https://docs.python.org/3/library/shutil.html#module-shutil\n### http://boto.cloudhackers.com/en/latest/#\n### https://python-forum.io/Thread-Menu-selection-using-function\n\nimport os\nimport shutil\nimport boto\nimport datetime\nimport string\nimport tarfile\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\nfrom datetime import timedelta\nfrom cryptography.fernet import Fernet\n\n### variables:\n\n### data/time variables\ntoday = str(datetime.date.today())\nnow = datetime.datetime.now()\n\n### connection info:\naws_secret_access_key = 'access_key'\naws_access_key = 'access_key'\naws_bucket_name = 'bucket name'\naws_folder_name = '/tmp/'\n\n### MENU\ndef showmenu():\n print(\"\\nBackup menu\")\n print(\"---------------\")\n print(\"A - Start backup\")\n print(\"B - Remove backup\")\n print(\"C - Status backup\")\n print(\"Q - Exit\\n\")\n\ndef menu():\n while True:\n showmenu()\n choice = input(\"Enter your choice:\")\n if choice == 'A':\n startBackup()\n elif choice == 'B':\n removeBackup()\n elif choice == 'C':\n statusBackup()\n elif choice == 'Q':\n return\n else:\n print(\"Try again\")\n\nif __name__ == '__main__':\n menu()\n\ndef startBackup():\n print(now + '- Backup is starting!')\n### insert backup info:\n insert_backup_name = input ('Insert database name where you want to backup: ')\n backup_name = print(insert_backup_name + '.sql')\n insert_backup_dir = input ('Insert path to directory where you want to save backup: /tmp/...')\n backup_dir = print('/tmp/' +insert_backup_dir)\n\n## backup info:\n backup_create_dir(\"mkdir\" + backup_dir)\n backup_mysqldump= \"mysqldump '\"+ backup_name +\"' > '\"+ backup_dir +\"''\"+ backup_name +\"'\"\n os.system(backup_create_dir)\n os.execute(backup_cmd)\n\n### backup/archive variables:\n archieve_name = backup_name\n backup_path = backup_dir + backup_name\n archieve_path = backup_dir + archieve_name\n \n### backuping/archiving:\n print(now + ' - Creating archive for ' + backup_name)\n## shutil copying and removal functions\n shutil.make_archive(archieve_path, 'gztar', backup_dir)\n print(now + ' - Completed archiving database')\n full_archive_file_path = archieve_path + \".tar.gz\"\n full_archive_name = archieve_name + \".tar.gz\"\n\n### Connect to S3\n s3 = S3Connection(aws_access_key, aws_secret_access_key)\n bucket_name = s3.get_bucket(aws_bucket_name)\n\n### Upload backup to S3\n print(now + '- Uploading file archive ' + full_archive_name + '...')\n s3_bucket = Key(bucket_name)\n s3_bucket.key = aws_folder_name + full_archive_name\n print(s3_bucket.key)\n s3_bucket.set_contents_from_filename(full_archive_file_path)\n s3_bucket.set_acl(\"public-read\")\n \n print(now + '- Clearing previous archives ' + full_archive_name + '...')\n shutil.rmtree(backup_dir)\n print(now + '- Removed backup of local database')\n print(now + '- Backup job is done')\n \ndef removeBackup():\n if os.path.exists(backup_dir + backup_name):\n### removing backup file\n print(\"Removing backup file\" + backup_dir + backup_name)\n os.remove(backup_dir + backup_name)\n print(\"Removed\")\n else:\n print(\"The file does not exist!\") \n\ndef statusBackup():\n import os.path\n\n if os.path.isfile(backup_dir + backup_name):\n print (\"Backup file\" + backupdir + backup_name + \" exists \")\n else:\n print (\"File not exist\")\n\n \n\n\n\n \n\n\n\n","sub_path":"backup_sql_upload_s3_updated.py","file_name":"backup_sql_upload_s3_updated.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"442408646","text":"import datetime\nimport socket\n\n__author__ = 'Robin Elvin'\n\n\n\"\"\"\n Class to handle Heatmiser WiFi thermostats that use the\n modified V3 protocol\n\"\"\"\nclass HeatmiserWiFi(object):\n DEFAULT_OPTIONS = {\n 'host': 'heatmiser',\n 'port': 8068,\n 'pin': 0000\n }\n\n def __init__(self, host=DEFAULT_OPTIONS['host'], pin=DEFAULT_OPTIONS['pin']):\n self.host = host\n self.pin = pin\n self.socket = None\n self.port = HeatmiserWiFi.DEFAULT_OPTIONS['port']\n\n @staticmethod\n def w2b(word):\n return [word & 0xff, word >> 8]\n\n @staticmethod\n def b2w(lsb, msb):\n return lsb + (msb << 8)\n\n @staticmethod\n def crc16(octets):\n def crc16_4bits(crc, nibble):\n lookup = (0x0000, 0x1021, 0x2042, 0x3063,\n 0x4084, 0x50A5, 0x60C6, 0x70E7,\n 0x8108, 0x9129, 0xA14A, 0xB16B,\n 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF)\n return ((crc << 4) & 0xffff) ^ lookup[(crc >> 12) ^ nibble]\n\n crc = 0xffff\n for octet in octets:\n crc = crc16_4bits(crc, octet >> 4)\n crc = crc16_4bits(crc, octet & 0x0f)\n\n return crc\n\n def open(self):\n if self.socket is not None:\n return\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((self.host, self.port))\n self.socket = s\n\n def close(self):\n self.socket.close()\n self.socket = None\n\n \"\"\" Construct an arbitrary thermostat command \"\"\"\n def command(self, op, data):\n # Ensure socket is open\n try:\n self.open()\n except Exception as ex:\n raise HeatmiserException(str(ex))\n\n # Construct the command\n length = 7 + len(data)\n cmd = [op, ]\n cmd.extend(self.w2b(length))\n cmd.extend(self.w2b(self.pin))\n cmd.extend(data)\n cmd.extend(self.w2b(self.crc16(cmd)))\n\n # Convert the command to binary\n cmd = bytes(cmd)\n\n # Send the command to the thermostat\n try:\n self.socket.send(cmd)\n except Exception as ex:\n self.close()\n raise HeatmiserException('Failed to send command to thermostat: %s' % str(ex))\n\n \"\"\" Deconstruct an arbitrary thermostat response \"\"\"\n def response(self):\n # Receive a response from the thermostat\n try:\n rsp = self.socket.recv(0x10000)\n except Exception as ex:\n self.close()\n raise HeatmiserException('No response from thermostat: %s' % str(ex))\n\n # Split the response into octets\n rsp = list(rsp)\n\n # Extract interesting fields\n op = rsp[0]\n length = self.b2w(rsp[1], rsp[2])\n data = rsp[3:-2]\n crc = self.b2w(rsp[-2], rsp[-1])\n\n # Error checking\n if length != len(rsp):\n raise HeatmiserException(\"Length field mismatch in thermostat response\")\n crc_actual = self.crc16(rsp[:-2])\n if crc != crc_actual:\n raise HeatmiserException(\"CRC incorrect in thermostat response\")\n\n return op, data\n\n def read_dcb(self, start=0x0000, octets=0xffff):\n # Construct and issue the inquiry command\n data = HeatmiserWiFi.w2b(start)\n data.extend(HeatmiserWiFi.w2b(octets))\n self.command(0x93, data)\n\n # Read the response\n op, data = self.response()\n\n # Perform some basic sanity checks on the response\n if op != 0x94:\n raise HeatmiserException(\"Unexpected opcode in thermostat response\")\n if self.b2w(data[0], data[1]) != start:\n raise HeatmiserException(\"Start address mismatch in thermostat response\")\n length = self.b2w(data[2], data[3])\n if length == 0x0000:\n raise HeatmiserException(\"Incorrect PIN used\")\n if len(data) != length + 4:\n raise HeatmiserException(\"Incorrect length of thermostat response\")\n\n # Return the DCB portion of the response\n return data[4:]\n\n def write_dcb(self, items):\n itemdata = [len(items) & 0xff] # Total number of writing items\n for item in items:\n itemdata.extend(HeatmiserWiFi.w2b(item[0])) # Item address\n itemdata.extend(bytes([len(item[1])])) # Number of bytes to be written\n itemdata.extend(item[1]) # Content bytes\n\n try:\n self.command(0xa3, bytes(itemdata))\n except:\n raise\n\n op, data = self.response()\n\n # Perform some basic sanity checks on the response\n if op != 0x94:\n raise HeatmiserException(\"Unexpected opcode in thermostat response\")\n if self.b2w(data[0], data[1]) != 0:\n raise HeatmiserException(\"Start address not zero in thermostat response\")\n length = self.b2w(data[2], data[3])\n if length == 0x0000:\n raise HeatmiserException(\"Incorrect PIN used\")\n if len(data) != length + 4:\n raise HeatmiserException(\"Incorrect length of thermostat response\")\n\n # Return the DCB portion of the response\n return data[4:]\n\n def dcb_to_status(self, dcb):\n # Sanity check the DCB length field\n status = {}\n status['dcblength'] = self.b2w(dcb[0], dcb[1])\n if len(dcb) != status['dcblength']:\n raise HeatmiserException(\"DCB length mismatch\")\n\n # Device type and version\n lookup = lambda value, names: names.get(value, value)\n status['product'] = {\n 'vendor': lookup(dcb[2], {0: 'Heatmiser', 1: 'OEM'}),\n 'version': float(dcb[3] & 0x7f) / 10,\n 'model': lookup(dcb[4], {0: 'DT', 1: 'DT-E', 2: 'PRT', 3: 'PRT-E', 4: 'PRTHW', 5: 'TM1'})\n }\n\n # Current date and time\n timebase = 41\n if status['product']['model'] in ('PRTHW', 'TM1'):\n timebase = 44\n\n status['time'] = datetime.datetime(2000 + dcb[timebase], dcb[timebase+1], dcb[timebase+2], dcb[timebase+4], dcb[timebase+5], dcb[timebase+6])\n\n # General operating status\n status['enabled'] = dcb[21]\n status['keylock'] = dcb[22]\n\n # Holiday mode\n holiday = dcb[25:30+1]\n status['holiday'] = {\n 'time': datetime.datetime(2000 + holiday[0], holiday[1], holiday[2], holiday[3], holiday[4]),\n 'enabled': holiday[5]\n }\n\n # Fields that only apply to models with thermometers\n if status['product']['model'] != 'TM1':\n # Temperature configuration\n status['config'] = {\n 'units': lookup(dcb[5], {0: 'C', 1: 'F'}),\n 'switchdiff': dcb[6] / 2,\n 'caloffset': self.b2w(dcb[8], dcb[9]),\n 'outputdelay': dcb[10],\n 'locklimit': dcb[12],\n 'sensor': lookup(dcb[13], {0: 'internal', 1: 'remote', 2: 'floor', 3: 'internal + floor', 4: 'remote + floor'}),\n 'optimumstart': dcb[14]\n }\n\n # Run mode\n status['runmode'] = lookup(dcb[23], {0: 'heating', 1: 'frost'})\n\n # Frost protection\n status['frostprotect'] = {\n 'enabled': dcb[7],\n 'target': dcb[17]\n }\n\n # Floor limit\n if status['product']['model'].endswith('-E'):\n status['floorlimit'] = {\n 'limiting': dcb[3] >> 7,\n 'floormax': dcb[20]\n }\n\n # Current temperature(s)\n temps = dcb[33:38+1]\n temperature = lambda ts: None if self.b2w(ts[0], ts[1]) == 0xffff else float(self.b2w(ts[0], ts[1])) / 10\n status['temperature'] = {\n 'remote': temperature(temps[0:1+1]),\n 'floor': temperature(temps[2:3+1]),\n 'internal': temperature(temps[4:5+1])\n }\n\n # Status of heating\n status['heating'] = {\n 'on': dcb[40],\n 'target': dcb[18],\n 'hold': self.b2w(dcb[31], dcb[32])\n }\n\n # Learnt rate of temperature rise\n status['rateofchange'] = dcb[15]\n\n # Error code\n status['errorcode'] = lookup(dcb[39], {0: None, 0xe0: 'internal', 0xe1: 'floor', 0xe2: 'remote'})\n\n # Fields that only apply to models with hot water control\n if status['product']['model'] in ('PRTHW', 'TM1'):\n # Status of hot water\n status['hotwater'] = {\n 'on': dcb[43],\n 'boost': self.b2w(dcb[31], dcb[32])\n }\n # Away mode\n status['awaymode'] = lookup(dcb[16], {0: 'home', 1: 'away'})\n\n # Program mode\n status['config']['progmode'] = lookup(dcb[16], {0: '5/2', 1: '7'})\n\n # Program entries - does not apply to non-programmable thermostats\n if not 'DT' in status['product']['model']:\n # Find the start of the program data\n # Weekday/Weekend or Mon/Tue/Wed/Thu/Fri/Sat/Sun\n days = 2 if status['config']['progmode'] == '5/2' else 7\n progbase = 51 if status['product']['model'] in ('PRTHW', 'TM1') else 48\n if days == 7:\n if 'PRT' in status['product']['model']:\n progbase += 24\n if status['product']['model'] in ('PRTHW', 'TM1'):\n progbase += 32\n\n # Heating comfort levels program\n prog = dcb[progbase:]\n if status['product']['model'].startswith('PRT'):\n status['comfort'] = []\n for day in range(0, days):\n daydata = []\n for entry in range(0, 4):\n if prog[0] < 24:\n daydata.append({'time': datetime.time(prog[0], prog[1]), 'target': prog[2]})\n prog = prog[3:]\n status['comfort'].append(daydata)\n\n # Hot water control program\n if status['product']['model'].startswith('TM1') or status['product']['model'].startswith('PRTHW'):\n status['timer'] = []\n for day in range(0, days):\n daydata = []\n for entry in range(0, 4):\n if prog[0] < 24:\n daydata.append({'on': datetime.time(prog[0], prog[1]), 'off': datetime.time(prog[2], prog[3])})\n prog = prog[4:]\n status['timer'].append(daydata)\n\n # TODO rest of dcb\n\n # Return the decoded status\n return status\n\n def status_to_text(self, status):\n # Device type and version\n text = []\n text.append(\"{vendor} {model} version {version}\".format(**status['product']))\n\n # General operating status\n text.append(\"Thermostat is {state}\".format(state='ON' if status['enabled'] else 'OFF'))\n if status['keylock']:\n text.append(\"Keylock active\")\n text.append(\"Time: {time}\".format(time=status['time']))\n\n # Holiday mode\n if status['holiday']['enabled']:\n text.append(\"Holiday until {time}\".format(time=status['holiday']['time']))\n\n # Current temperature(s)\n units = \"deg {units}\".format(units=status['config']['units'])\n temperatures = [\"{temp} {units} ({sensor})\".format(temp=status['temperature'][_sensor], units=units, sensor=_sensor) for _sensor in ('internal', 'floor', 'remote') if status['temperature'][_sensor] is not None]\n if len(temperatures) > 0:\n text.append(\"Temperature \" + ', '.join(temperatures))\n if status.get('floorlimit', {}).get('limiting', False):\n text.append(\"(floor limit active\")\n if status['config']['caloffset']:\n text.append(\"Calibration offset {caloffset}\".format(caloffset=status['config']['caloffset']))\n if status['errorcode']:\n text.append(\"Error with {errorcode} sensor\".format(caloffset=status['errorcode']))\n\n # Status of heating\n line = ''\n if status['heating']['target']:\n line = \"Target {target} {units}\".format(target=status['heating']['target'], units=units)\n if status['heating']['hold']:\n line += \" hold for {minutes} minutes\".format(minutes=status['heating']['hold'])\n text.append(line)\n line = ''\n if status['heating']['on'] is not None:\n line = \"Heating is {heating}\".format(heating='ON' if status['heating']['on'] else 'OFF')\n if status['runmode']:\n line += \" ({runmode} mode)\".format(runmode=status['runmode'])\n text.append(line)\n\n # Status of hot water\n line = ''\n if status.get('hotwater', {}).get('on', None) is not None:\n line = \"Hot water is {water}\".format(water='ON' if status['hotwater'] else 'OFF')\n if status.get('hotwater', {}).get('boost', False):\n line += \" boost for {boost} minutes\".format(boost=status['hotwater']['boost'])\n if status.get('enabled', False) and status.get('awaymode', False):\n line += \" ({awaymode} mode)\".format(awaymode=status['awaymode'])\n text.append(line)\n\n # Feature table\n features = [\n # Features 01 to 05 apply to all models\n ['Temperature format', status['config']['units']],\n ['Switching differential', status['config']['switchdiff'], units],\n ['Frost protect', status['frostprotect']['enabled']],\n ['Frost temperature', status['frostprotect']['target'], units],\n ['Output delay', status['config']['outputdelay'], 'minutes'],\n # Feature 06 on non-RF models or 06 to 10 on RF models\n ['Comms #', 'n/a'],\n # Feature 07 on non-RF models or 11 on RF models\n ['Temperature limit', status['config']['locklimit'], units]\n ]\n if status.get('comfort', False) or status.get('timer', False):\n features.extend(\n [\n # Features 08 to 12 on non RF or 12 to 16 on RF models, excludes DT-TS\n ['Sensor selection', status['config']['sensor']],\n ['Floor limit', status.get('floorlimit', {'floormax': None})['floormax'], units],\n ['Optimum start', status['config'].get('optimumstart', 'disabled'), 'hours'],\n ['Rate of change', status['rateofchange'], 'minutes / deg C'],\n ['Program mode', status['config']['progmode'], 'day'],\n ]\n )\n\n index = 1\n index2 = 1\n for feature in features:\n if index == 6:\n index2 += 4\n desc = feature[0]\n value = feature[1]\n if len(feature) == 2:\n _units = ''\n elif len(feature) == 3:\n _units = feature[2]\n if value is None:\n value = \"n/a\"\n _units = ''\n text.append(\"Feature {:02d} ({:02d}): {:<23} {:>3} {}\".format(index, index2, desc, value, _units))\n index += 1\n index2 += 2\n\n # Program entries\n days = ('Weekday', 'Weekend') if status['config']['progmode'] == '5/2' else ('Monday', 'Tuesday', 'Wednesday',\n 'Thursday', 'Friday', 'Saturday',\n 'Sunday')\n hhmm = lambda tm: \"{:%H:%M}\".format(tm)\n for index in range(len(days)):\n comfort = map(lambda c: \"{} {} {}\".format(hhmm(c['time']), c['target'], units), status['comfort'][index])\n timer = map(lambda c: \"{}-{}\".format(hhmm(c['on']), hhmm(c['off'])), status['timer'][index] if status.get('timer', False) else [])\n\n entry = 1\n for c in comfort:\n _timer = timer[entry-1] if len(timer) >= entry else ''\n text.append(\"{:<9} {}: {:<14} {}\".format('' if entry > 1 else days[index], entry, c, _timer))\n entry += 1\n\n return '\\n'.join(text)\n","sub_path":"heatmiserV3/wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":15965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64944296","text":"from csv import reader, DictReader\nfrom epics import PV\n\nimport sys\nsys.path.insert(0, '..')\nfrom scLinac import LINACS, Cavity\n\nclass PvInvalid(Exception):\n def __init__(self, message):\n super(PvInvalid, self).__init__(message)\n\nclass Fault:\n def __init__(self, tlc, severity, rank, level, suffix, okValue, faultValue):\n self.tlc = tlc\n self.severity = severity\n self.rank = rank\n self.level = level\n self.suffix = suffix\n self.okValue = okValue\n self.faultValue = faultValue\n\n def __gt__(self, other):\n return self.rank > other.rank\n \n def isConnected(self, cavity):\n pass\n\n def isFaulted(self, faultPV):\n\n if faultPV.status == None:\n raise PvInvalid(faultPV)\n \n if self.okValue: \n return (faultPV.value != self.okValue)\n \n elif self.faultValue:\n return (faultPV.value == self.faultValue)\n \n else:\n raise(\"Weird state, oh no\")\n\n def writeToPVs(self):\n # if faulted, write tlc to CUDSTATUS pv and severity to CUDSEVR pv\n pass\n\nfaults = []\ncsvFile = DictReader(open(\"faults.csv\"))\ncsvFile.next()\nfor row in csvFile:\n if row[\"PV Suffix\"] and row[\"Level\"]==\"CAV\":\n faults.append(Fault(row[\"Three Letter Code\"], row[\"Severity\"], \n csvFile.line_num, row[\"Level\"], row[\"PV Suffix\"],\n row[\"OK If Equal To\"], row[\"Faulted If Equal To\"]))\n\n\n\n\n","sub_path":"BackEnd/fault.py","file_name":"fault.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624675723","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport os\n\n\ndef search_spider():\n url = \"https://en.wikipedia.org/wiki/Deep_learning\"\n source_code = urllib.request.urlopen(url)\n plain_text = source_code\n soup = BeautifulSoup(plain_text, \"html.parser\")\n\n result_title = soup.title\n print(\"The title is\" , result_title)\n\n result_a = soup.find_all('a')\n print(\"The a tags are\", result_a)\n\n for link in result_a:\n result_href = link.get('href')\n print(result_href)\n\n\n\nsearch_spider()","sub_path":"ICP 03/ICP03/Source/WebScraping.py","file_name":"WebScraping.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"487571354","text":"from baseparser import BaseParser\nfrom BeautifulSoup import BeautifulSoup, Tag\n\nimport datetime\n\nclass SolHaberParser(BaseParser):\n SUFFIX = ''\n domains = ['haber.sol.org.tr']\n\n end_date = datetime.date.today().isoformat()\n start_date = (datetime.date.today()-datetime.timedelta(days=1)).isoformat()\n\n feeder_pat = '^http://haber.sol.org.tr/'\n feeder_pages = ['http://haber.sol.org.tr/arsiv?icerik=All&tarih%%5Bmin%%5D%%5Bdate%%5D=%s&tarih%%5Bmax%%5D%%5Bdate%%5D=%s' % (start_date, end_date)]\n\n def _parse(self, html):\n soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES,\n fromEncoding='utf-8')\n\n self.meta = soup.findAll('meta')\n elt = soup.find(\"h2\", attrs={\"class\": \"title node-title\"})\n if elt is None:\n self.real_article = False\n return\n self.title = elt.getText()\n self.byline = ''\n self.date = soup.find('div', attrs={'class': 'node-date'}).getText()\n\n div = soup.find('div', attrs={'class': 'makale-govde'})\n if div is None:\n # Hack for video articles\n div = soup.find('div', 'emp-decription')\n if div is None:\n self.real_article = False\n return\n self.body = '\\n'+'\\n\\n'.join([x.getText() for x in div.childGenerator()\n if isinstance(x, Tag) and x.name == 'p'])\n","sub_path":"parsers/solhaber.py","file_name":"solhaber.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"616264434","text":"# https://en.wikipedia.org/wiki/Naive_Bayes_classifier\n# a basic naive bayes predictor\n\n# input values are the starting row of the test set to use,\n# ending row of the test set to use, and an identifier.\n# I used letters a-d as the identifiers and ran\n# for instances of this script at the same time.\n\n# Based on submission 3, but not with the day variable.\n\nimport MySQLdb\nimport pandas as pd \nimport sys\n\ndb = MySQLdb.connect(\"localhost\", \"python\", \"password\", \"talkingdata\")\n\ndef run_query(query):\n \"\"\"runs a mysql query and returns a dict\"\"\"\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n return dbFetched\n\n#reading tables\n\ndef get_dict(tableName, var, p, folder):\n df = pd.read_csv('processing/submission' + str(folder) + \n '/' + tableName + '.csv')\n return dict(zip(df[var], df[p]))\n \npAppAt = get_dict('AppAttributed', 'app', 'pApp', 1)\npApp = get_dict('pApp', 'app', 'pApp', 1)\npOSat = get_dict('OSattributed', 'os', 'pOS', 1)\npOS = get_dict('pOS', 'os', 'pOS', 1)\npChannelAt = get_dict('ChannelAttributed', 'channel', 'pChannel', 1)\npChannel = get_dict('pChannel', 'channel', 'pChannel', 1)\npHourAt = get_dict('HourAttributed', 'hour', 'pHour', 1)\npHour = get_dict('pHour', 'hour', 'pHour', 1)\npDeviceAt = get_dict('DeviceAttributed', 'device', 'pDevice', 1)\npDevice = get_dict('pDevice', 'device', 'pDevice', 1)\npIPAt = get_dict('IPAttributed', 'ip', 'pIP', 3)\npIP = get_dict('pIP', 'ip', 'pIP', 3)\n\n#Running test data\n\ndef get_ip(ipNum):\n \"\"\"returns probabilities for the ip\"\"\"\n ipValue = 1\n if ipNum in pIPAt:\n ipValue = pIPAt[ipNum]\n ipValue = ipValue / pIP[ipNum]\n return ipValue\n\ndef get_app(appNum):\n \"\"\"returns probabilities for the app\"\"\"\n appValue = 1\n if appNum in pAppAt:\n appValue = pAppAt[appNum]\n appValue = appValue / pApp[appNum]\n return appValue\n\ndef get_os(osNum):\n \"\"\"return probabilitios for the os\"\"\"\n osValue = 1\n if osNum in pOSat:\n osValue = pOSat[osNum]\n osValue = osValue / pOS[osNum]\n #print('osValue ' + str(osValue))\n return osValue\n\ndef get_channel(channelNum):\n \"\"\"return probabilitios for the channel\"\"\"\n channelValue = 1\n if channelNum in pChannelAt:\n channelValue = pChannelAt[channelNum]\n channelValue = channelValue / pChannel[channelNum]\n #print('channelValue ' + str(channelValue))\n return channelValue\n\ndef get_hour(hourNum):\n \"\"\"return probabilitios for the hour\"\"\"\n hourValue = 1\n if hourNum in pHourAt:\n hourValue = pHourAt[hourNum]\n hourValue = hourValue / pHour[hourNum]\n return hourValue\n\ndef get_device(deviceNum):\n \"\"\"return probabilitios for the device\"\"\"\n deviceValue = 1\n if deviceNum in pDeviceAt:\n deviceValue = pDeviceAt[deviceNum]\n deviceValue = deviceValue / pDevice[deviceNum]\n return deviceValue\n\n#startRow = int(sys.argv[1])\n#testRows = int(sys.argv[2])#\n#letter = sys.argv[3]\ntestRows = 18790469\nstartRow = 0\nis_attributed = []\nclick_id = []\nsubset = 500000\npAttributed = 456846/184903890\n\n#predicting probabliitios\nwhile (startRow < testRows):\n print(testRows - startRow)\n query = \"\"\"SELECT click_id, ip, app, device, os, channel,\\\n HOUR(click_time) AS hour \\\n FROM test LIMIT \"\"\" + str(subset) + \"\"\" OFFSET \"\"\" + \\\n str(startRow) + \"\"\";\"\"\"\n test = pd.DataFrame.from_records(run_query(query))\n loop = subset\n if subset > (testRows - startRow):\n loop = testRows - startRow\n for i in range(loop):\n prob = pAttributed * get_app(test['test.app'][i]) * \\\n get_os(test['test.os'][i]) * \\\n get_channel(test['test.channel'][i]) * \\\n get_hour(test['hour'][i]) * \\\n get_device(test['test.device'][i]) * get_ip(test['test.ip'][i])\n if prob > 1:\n prob = 1\n elif prob < 0.0001:\n prob = 0\n prob = round(prob, 5)\n click_id.append(test['test.click_id'][i])\n is_attributed.append(prob)\n startRow = startRow + subset\n\n\nclick_id = pd.Series(click_id)\nis_attributed = pd.Series(is_attributed)\nprobDF = pd.concat((click_id, is_attributed), axis = 1)\nprobDF.columns = ['click_id', 'is_attributed']\nprobDF.to_csv('submissions/submission4.csv', index = False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"submission4.py","file_name":"submission4.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"57207394","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport inspect\nimport load\nimport os\nimport sys\nimport django\nif not os.path.exists(\"manage.py\"):\n os.environ['DJANGO_SETTINGS_MODULE'] = 'django_settings.me'\n django.setup()\n\nNAME = \"my_workout\"\n\nclasses = []\npath = os.path.join(os.path.dirname(__file__), \"classes\")\nmodules = load.sources(NAME, path)\nfor module in modules:\n for k, v in module.__dict__.items():\n if inspect.isclass(v) and hasattr(v, \"title\"):\n setattr(sys.modules[__name__], v.__name__, v)\n classes.append(v)\n","sub_path":"python/site-packages/my_exercises/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"395419840","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport torch.nn.init as init \nfrom nn_layer_simu import EmbeddingLayer, Encoder\n \nclass Environment(nn.Module):\n def __init__(self, bsize, embed_dim, encod_dim, numlabel, feature_vec=None, init=False, model='LSTM'):\n super(Environment, self).__init__()\n '''\n # Use drop out to get a simpler model\n self.dropout = 0.5\n self.drop = nn.Dropout(self.dropout)\n '''\n # classifier\n self.batch_size = bsize\n self.nonlinear_fc = False\n self.n_classes = numlabel\n self.enc_lstm_dim = encod_dim\n self.encoder_type = 'Encoder'\n self.model = model\n self.embedding = EmbeddingLayer(numlabel, embed_dim)\n self.gamma = 0.9\n '''\n if init:\n self.embedding.init_embedding_weights(feature_vec, embed_dim)\n '''\n self.encoder = eval(self.encoder_type)(self.batch_size, embed_dim, self.enc_lstm_dim, self.model)\n self.enc2out = nn.Linear(self.enc_lstm_dim, embed_dim)\n self.enc2rewd = nn.Linear(self.enc_lstm_dim, embed_dim)\n self.end = numlabel-1\n \n # initialise oracle network with N(0,1)\n # otherwise variance of initialisation is very small => high NLL for data sampled from the same model\n def init_params(self):\n for param in self.parameters():\n init.normal_(param, 0, 1)\n \n def forward(self, seq):\n # seq : (seq, seq_len)\n seq_em, seq_len= seq\n seq_em = self.embedding(seq_em)\n if self.model == 'LSTM':\n enc_out, (h, c) = self.encoder((seq_em, seq_len))\n return enc_out, (h, c)\n else:\n enc_out, h = self.encoder((seq_em, seq_len))\n return enc_out, h\n \n def step(self, click, hidden):\n seq_em = self.embedding(click)\n if self.model == 'LSTM':\n enc_out, (h, c) = self.encoder.step_cell(seq_em, hidden)\n return enc_out, (h, c) \n else:\n enc_out, h = self.encoder.step_cell(seq_em, hidden)\n return enc_out, h\n \n def next_click(self, enc_out, rec_list, real_batch): \n embed_weight = self.embedding.embedding.weight.clone().permute(1,0)\n vec = self.enc2out(enc_out)\n vec = torch.matmul(vec, embed_weight)\n mask = torch.zeros(real_batch, self.n_classes).cuda()\n mask.scatter_(1, rec_list, 1.) \n output = F.softmax(vec * mask, dim=1)\n outputk = output * mask\n #Normalization\n outputk = F.normalize(outputk, p=1)\n return outputk \n \n def reward(self, seq, enc_out):\n seq_em = self.embedding(seq)\n vec = self.enc2rewd(enc_out)\n reward_logit = torch.sum(seq_em.permute(1,0,2) * vec, dim = 2).squeeze()\n reward = torch.sigmoid(reward_logit) \n return reward, reward_logit\n \n def value(self, reward_batch):\n with torch.no_grad():\n value_batch = reward_batch.clone()\n for i in range(1, reward_batch.size(1)):\n value_batch[:, -(i+1)] = self.gamma * value_batch[:, -i] + value_batch[:, -(i+1)]\n return value_batch\n","sub_path":"rl_rec_simulation_2/src/data/MDP.py","file_name":"MDP.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"120947933","text":"\"\"\"\nCreated on Tue Apr 20 2021 01:31\n\n@author: Sanjeev\n\"\"\"\n\n### Custom definitions and classes if any ###\nimport pandas as pd\nimport numpy as np\nimport joblib\n\ndef predictRuns(testInput):\n \n with open('regressionModel.joblib', 'rb') as f:\n regressor = joblib.load(f)\n with open('venueEncoder.joblib', 'rb') as f:\n venueEncoder = joblib.load(f)\n with open('teamEncoder.joblib', 'rb') as f:\n teamEncoder = joblib.load(f)\n\n # rest test data\n testCase = pd.read_csv(testInput)\n\n # rename team\n testCase['batting_team'] = testCase['batting_team'].replace('Punjab Kings', 'Kings XI Punjab')\n testCase['bowling_team'] = testCase['bowling_team'].replace('Punjab Kings', 'Kings XI Punjab')\n \n # # Delhi Daredevils\n # testCase['batting_team'] = testCase['batting_team'].replace('Delhi Capitals', 'Delhi Daredevils')\n # testCase['bowling_team'] = testCase['bowling_team'].replace('Delhi Capitals', 'Delhi Daredevils')\n\n # encode venue and batting & bowling teams\n testCase['venue'] = venueEncoder.transform(testCase['venue'])\n testCase['batting_team']= teamEncoder.transform(testCase['batting_team'])\n testCase['bowling_team']= teamEncoder.transform(testCase['bowling_team'])\n\n # make sure that the order of columns is same as that fed to model\n testCase = testCase[['venue', 'innings', 'batting_team', 'bowling_team']]\n\n # convert input test case into numpy array\n testArray = testCase.to_numpy()\n\n # one hot encode venue, batting and bowling teams\n testCase = np.concatenate((np.eye(42)[testArray[:,0]],\n np.eye(2)[testArray[:,1] -1 ],\n np.eye(15)[testArray[:,2]],\n np.eye(15)[testArray[:,3]],\n ),\n axis = 1)\n\n prediction = regressor.predict(testCase)\n ### Your Code Here ###\n prediction = 7*int(prediction[0])\n prediction = prediction//2 if prediction>70 else prediction*2 if prediction<20 else prediction\n return prediction\n","sub_path":"Submission/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"169582777","text":"import numpy as np\nimport pandas as pd\nimport os\nfrom os import walk\n\nclass wikihow:\n\tdef __init__(self):\n\t\tself.df = pd.read_csv('final.csv')\n\t\t\n\tdef print_doc(self,n=0):\n\t\tprint(self.df['Summary'][n])\n\t\tprint(\"\\n======================================\\n\")\n\t\tprint(self.df['Text'][n])\n\t\t\n\tdef preprocess_summary(self,summ):\n\t\tsumm_splits = summ.split(\"\\r\\n\")\n\t\tnew_summ = []\n\t\tfor l in summ_splits:\n\t\t\tif l != '' and not l.isspace():\n\t\t\t\tnew_summ.append(l.strip(', '))\n\t\treturn new_summ\n\t\t\n\tdef prepare_dataset(self):\n\t\tex = 0\n\t\tfor index, row in df.iterrows():\n\t\t\tif index < 0:\n\t\t\t\tcontinue\n\t\t\tfn = \"dataset/d_\"+str(index)+\".story\"\n\t\t\ttry:\n\t\t\t\tf = open(fn, \"w\", encoding='utf8')\n\t\t\t\tif type(row[\"Summary\"]) != str or type(row[\"Text\"]) != str:\n\t\t\t\t\tcontinue\n\t\t\t\tsumm = self.preprocess_summary(row['Summary'])\n\t\t\t\ttext = self.preprocess_summary(row['Text'])\n\t\t\t\tfor line in text:\n\t\t\t\t\tf.write(line+\"\\n\\n\")\n\t\t\t\tfor line in summ:\n\t\t\t\t\tf.write(\"@highlight\\n\\n\")\n\t\t\t\t\tf.write(line+\"\\n\\n\")\n\t\t\t\tf.close()\n\t\t\texcept:\n\t\t\t\tos.remove(fn)\n\t\t\t\tex += 1\n\t\t\t\tprint(\"Exception occurred! - \",ex)\n\t\tself.split_train_val_test()\n\t\t\n\tdef split_train_val_test(self,url=\"dataset\"):\n\t\tf = []\n\t\tfor (dirpath, dirnames, filenames) in walk(url):\n\t\t\tf.extend(filenames)\n\t\t\tbreak\n\t\t\t\n\t\turllists = open(\"url_lists/all_train.txt\", \"w\")\n\t\tfor file in f[0:200000]:\n\t\t\turllists.write(file+\"\\n\")\n\t\turllists.close()\n\t\turllists = open(\"url_lists/all_val.txt\", \"w\")\n\t\tfor file in f[200000:250000]:\n\t\t\turllists.write(file+\"\\n\")\n\t\turllists.close()\n\t\turllists = open(\"url_lists/all_test.txt\", \"w\")\n\t\tfor file in f[250000:len(f)]:\n\t\t\turllists.write(file+\"\\n\")\n\t\turllists.close()","sub_path":"dataset/wikihow/wikihow.py","file_name":"wikihow.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232276382","text":"\"\"\"\nCreated on Sat Mar. 09 15:09:17 2019\n\n@author: ygkim\n\nmain for mnist\n\n\"\"\"\n\nfrom __future__ import absolute_import \nfrom __future__ import division \nfrom __future__ import print_function \n\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils\nimport network\n\nFLAGS = None\n\ndef arg_process():\n \n parser = argparse.ArgumentParser('Implementation for MNIST handwritten digits 2019')\n \n parser.add_argument('--data_path', \n type=str, \n #default='/home/ygkim/mnist/data',\n default='C:\\\\users\\\\Angelo\\\\mnist\\\\data',\n help='The directory where the MNIST images were located', \n required = False)\n \n parser.add_argument('--log_dir', \n type=str, \n #default='/home/ygkim/mnist/data',\n default='C:\\\\users\\\\Angelo\\\\mnist\\\\log',\n help='The directory where the MNIST logs are located', \n required = False)\n \n parser.add_argument('--save_dir', \n type=str, \n #default='/home/ygkim/mnist/data',\n default='C:\\\\users\\\\Angelo\\\\mnist\\\\ckpt',\n help='The directory where the MNIST checkpoint files are located', \n required = False)\n \n parser.add_argument('--img_name',\n type=str, \n default='train-images.idx3-ubyte',\n help='The file name for MNIST image', \n required = False)\n \n parser.add_argument('--label_name',\n type=str, \n default='train-labels.idx1-ubyte',\n help='The file name for MNIST labels', \n required = False)\n \n parser.add_argument('--epoch_size', \n type=int, \n default=100,\n help='Parameter for learning rate', \n required = False)\n \n parser.add_argument('--batch_size', \n type=int, \n default=64,\n help='parameter for batch size', \n required = False)\n \n parser.add_argument('--learning_rate', \n type=float, \n default=0.001,\n help='Parameter for learning rate', \n required = False)\n \n parser.add_argument('--num_layers', \n type=int, \n #default=4,\n default=3,\n help='Parameter for learning rate', \n required = False)\n \n parser.add_argument('--num_nodes',\n nargs='+',\n type=int, \n default=[64,32,32],\n help='Number of Nodes for each Dense Layer', \n required = False)\n \n parser.add_argument('--num_depths',\n nargs='+',\n type=int, \n #default=[16,32,64,128],\n default=[16,32,64],\n help='Number of Output Channels for each Conv Layer', \n required = False)\n \n parser.add_argument('--kernel_size',\n nargs='+',\n type=int, \n #default=[(5, 5),(5, 5),(5, 5),(3, 3)],\n default=[(5, 5),(5, 5),(5, 5)],\n help='Number of Output Channels for each Conv Layer', \n required = False)\n \n parser.add_argument('--nw_type',\n type=str, \n default='conv',\n #default='dense',\n help='The Type of Network dense or conv ?', \n required = False)\n \n parser.add_argument('--activation',\n type=str, \n default='leaky_relu',\n help='The Type of activation ?', \n required = False)\n \n parser.add_argument('--initializers',\n type=str, \n default='he_uniform',\n help='The Type of initializers ?', \n required = False)\n\n args, unkowns = parser.parse_known_args()\n \n return args, unkowns\n\n\nclass net_model:\n\n def __init__(self, sess, name):\n self.sess = sess\n self.name = name\n self.build_net()\n \n def build_net(self):\n with tf.variable_scope(self.name):\n self.optimizer = network.mnist_optimizer(FLAGS, self.sess)\n self.cost, self.opt_op = self.optimizer.optimize()\n self.sum_train = self.optimizer.summarize()\n self.accuracy, self.sum_accuracy, self.logit = self.optimizer.evaluate()\n \n self.optimizer.saver.build()\n \n def train(self, batch_x, batch_y):\n feed_dict={self.optimizer.digit:batch_x, self.optimizer.label:batch_y}\n \n _, loss = self.optimizer.sess.run([self.opt_op, self.cost], feed_dict=feed_dict)\n return loss\n \n def Save(self, index, training_error, batch_x, batch_y):\n sum_ptr = self.optimizer.sess.run(self.sum_train, feed_dict={self.optimizer.epoch_cost:training_error})\n self.optimizer.writer.add_summary(sum_ptr, index)\n \n feed_dict={self.optimizer.digit:batch_x, self.optimizer.label:batch_y}\n \n acc, sum_ptr, logit = self.optimizer.sess.run([self.accuracy, self.sum_accuracy, self.logit], feed_dict=feed_dict)\n self.optimizer.writer.add_summary(sum_ptr, index)\n \n self.optimizer.save(index)\n return acc, logit\n \ndef main(_):\n \n print(\"---------------------------------------------------------\")\n print(\" Starting MNIST Digit Recognition Learning\")\n print(\"---------------------------------------------------------\")\n \n # initialize\n mnist_data = utils.mnist_data(FLAGS)\n \n sess = tf.Session()\n\n models = []\n num_models = 2\n for m in range(num_models):\n models.append(net_model(sess, \"net_model\" + str(m)))\n\n sess.run(tf.global_variables_initializer())\n \n per_epoch = int(mnist_data.image_size/FLAGS.batch_size)\n\n for index in range(FLAGS.epoch_size): \n mnist_data.shuffle()\n training_error = 0.\n for step in range(per_epoch):\n batch_x, batch_y = mnist_data.next_batch()\n for m_idx, m in enumerate(models):\n loss = m.train(batch_x, batch_y)\n training_error += loss\n if step%10 == 0:\n print(m.name + \" Learning at %d epoch with %d step\" %(index, step), \"==== Cost of %.4f\" %loss)\n \n batch_x, batch_y = mnist_data.eval_data()\n predictions = np.zeros([len(batch_y), 10])\n for m_idx, m in enumerate(models):\n acc, logit = m.Save(index, training_error, batch_x, batch_y)\n \n predictions += logit\n print(\" \")\n print(m.name + \" Evaluation at %d epoch \" %index, \"==== Accuracy of %1.4f\" %acc)\n print(\" \")\n \n ensemble_prediction = tf.equal(\n tf.argmax(predictions, 1), tf.argmax(batch_y, 1))\n ensemble_accuracy = tf.reduce_mean(\n tf.cast(ensemble_prediction, tf.float32))\n print(\" \")\n print('Total accuracy:', sess.run(ensemble_accuracy))\n print(\" \")\n \nif __name__ == '__main__':\n \n FLAGS, unparsed = arg_process()\n \n tf.app.run() \n","sub_path":"mnist/mnist_ensemble.py","file_name":"mnist_ensemble.py","file_ext":"py","file_size_in_byte":7714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"198084342","text":"import argparse\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nfrom os import path\nfrom tensorflow import keras\nfrom sklearn.utils import shuffle\n\nmodel_dir = \"model\"\nmodel_file = path.join(model_dir, \"entropy.default.h5\")\n# model_file = path.join(model_dir, \"specificity.default.h5\")\n\ndef calculate_normalized_click_entropy(entropy):\n m = -1.16439377\n b = 2.870847\n d = -0.05665\n max_entropy = 10.0\n entropy = np.minimum(entropy, max_entropy);\n val = (999.0 / (1 + d + np.exp(-1 * (m * entropy + b)))) + 1;\n return val\n return np.round(val)\n\ndef read_not_df(tsv_file):\n all_keywords = []\n all_entropy_clicks = []\n with open(tsv_file) as fin:\n line = fin.readline() # Skip header\n while True:\n line = fin.readline()\n if not line:\n break\n split = line.split(\"\\t\")\n all_keywords.append(split[0])\n all_entropy_clicks.append(float(split[1]))\n return all_keywords, all_entropy_clicks\n\ndef embed_keywords(args, all_keywords):\n embed = hub.load(args.module_url)\n\n x_embed = embed(all_keywords).numpy().astype(np.float32)\n print(\"x_embed\", x_embed.shape, x_embed.dtype)\n\n x_length = np.vectorize(len)(all_keywords).reshape((-1, 1))\n print(\"x_length\", x_length.shape, x_length.dtype)\n\n x = np.concatenate([x_embed, x_length], axis=-1).astype(np.float32)\n print(\"x\", x.shape, x.dtype)\n return x\n\ndef train(args):\n all_keywords, all_entropy_clicks = read_not_df(args.tsv_file)\n print(\"Find %d keywords\" % len(all_keywords))\n\n all_keywords, all_entropy_clicks = shuffle(all_keywords, all_entropy_clicks, random_state=19931201)\n print(\"Find %d keywords\" % len(all_keywords))\n\n n_keywords = len(all_keywords)\n n_train = int(0.8 * n_keywords) + 1\n\n train_keywords = all_keywords[:n_train]\n train_entropy_clicks = all_entropy_clicks[:n_train]\n\n dev_kekywords = all_keywords[n_train:]\n dev_entropy_clicks = all_entropy_clicks[n_train:]\n\n x_train = embed_keywords(args, train_keywords)\n y_train = np.asarray(train_entropy_clicks).astype(np.float32)\n\n x_dev = embed_keywords(args, dev_kekywords)\n y_dev = np.asarray(dev_entropy_clicks).astype(np.float32)\n\n y_train = calculate_normalized_click_entropy(train_entropy_clicks)\n print(\"Train %.2f %.2f\" % (min(y_train), max(y_train)))\n exit()\n # y_dev = calculate_normalized_click_entropy(dev_entropy_clicks)\n\n print(train_keywords[666], y_train[666])\n print(dev_kekywords[888], y_dev[888])\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dropout(.5))\n model.add(tf.keras.layers.Dense(128, activation='relu'))\n model.add(tf.keras.layers.Dropout(.5))\n model.add(tf.keras.layers.Dense(128, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam')\n # model.compile(loss='mean_absolute_error', optimizer='adam')\n model.fit(x_train, y_train,\n epochs=16,\n batch_size=256,\n validation_data=(x_dev, y_dev))\n\n if not path.exists(model_dir):\n os.makedirs(model_dir)\n model.save(model_file)\n\ndef eval(args):\n model = keras.models.load_model(model_file)\n\n all_keywords = []\n y_true = []\n with open(\"data/uk.specificity.original.tsv\") as fin:\n while True:\n line = fin.readline()\n if not line:\n break\n split = line.split(\"\\t\")\n all_keywords.append(split[0])\n y_true.append(int(split[1]))\n\n y_true = np.asarray(y_true)\n print(\"hard-uk\", y_true.shape)\n y_true = y_true[y_true > 0]\n print(\"non-zero\", y_true.shape)\n y_count = dict()\n for quer_specificity in y_true:\n y_count[quer_specificity] = y_count.get(quer_specificity, 0) + 1\n cum = [(0, 0.0)]\n for quer_specificity in sorted(y_count.keys()):\n cum.append((quer_specificity, y_count[quer_specificity] / len(y_true) + cum[-1][-1]))\n print(cum[-1][-1])\n\n x = embed_keywords(args, all_keywords)\n y = model.predict(x, batch_size=256, verbose=0)\n y = calculate_normalized_click_entropy(y)\n\n y = np.maximum(np.minimum(y, 1000), 1)\n\n indexes = np.argsort(np.squeeze(y))\n for i in range(len(cum) - 1):\n start = int(cum[i][-1] * len(y))\n stop = int(cum[i + 1][-1] * len(y))\n y[indexes[start:stop]] = cum[i + 1][0]\n\n print(\"Eval %.2f %.2f\" % (min(y), max(y)))\n\n assert len(all_keywords) == len(y)\n with open(\"data/uk.specificity.default.tsv\", \"w\") as fout:\n fout.write(\"keywords\\tquer_specificity\\n\")\n for keywords, quer_specificity in zip(all_keywords, y):\n fout.write(\"%s\\t%d\\n\" % (keywords, quer_specificity))\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--do_train\", action=\"store_true\")\nparser.add_argument(\"--do_eval\", action=\"store_true\")\nparser.add_argument(\"--tsv_file\", type=str)\nargs = parser.parse_args()\nargs.module_url = \"https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1\"\n\nif args.do_train:\n train(args)\n\nif args.do_eval:\n eval(args)\n\n\n\n\n","sub_path":"hard_uk/train_default.py","file_name":"train_default.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"164233853","text":"\"\"\"\ncolor bag class\n\"\"\"\n\nclass ColorBag():\n \"\"\"\n Input Handler class\n \"\"\"\n\n def __init__(self):\n\n # background\n self.background = (255, 255, 255)\n\n # ordinary walls\n self.wall = (10, 200, 200)\n\n # moving walls\n self.active_move_wall = (200, 100, 100)\n self.default_move_wall = (10, 100, 100)\n\n # text color\n self.win = (50, 100, 100)\n\n # mic bar\n self.mic_bar_meter = (210, 100, 20)\n\n\n","sub_path":"game/color_bag.py","file_name":"color_bag.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"643057503","text":"#In this project we want to do image processing\nfrom PIL import Image,ImageFilter\nimg = Image.open('./Flowers/flower2.jpg')\nprint(img)\t#image object\nprint(img.format)\t#image format\nprint(img.size)\t#image size\nprint(img.mode)\t#image mode\nprint(dir(img))\t#image dir\n\n#Adding \"BLUR\" Filter to Image\nfiltered_img = img.filter(ImageFilter.BLUR)\nfiltered_img.save(\"blur.png\",'png')\n\n#Adding \"SMOOTH\" Filter to Image: WE NEED PNG FORMAT\n#Adding \"SHARPEN\" Filter to Image: WE NEED PNG FORMAT\n\n#Convert jpeg format to png format\nfiltered2_img = img.convert('L')\nfiltered2_img.save(\"grey.png\", 'png')\nfiltered2_img.show()\t#show image in a seprate page\n\n#Rotate image\nrotate_img = filtered2_img.rotate(90)\nrotate_img.save(\"rotate.png\", 'png')\n\n#Resize image\nresize = filtered2_img.resize((300,300))\nresize.save(\"resize.png\", 'png')\n\n#Crop image\nbox = (100, 100, 1800, 1800)\nregion = filtered2_img.crop(box)\nregion.save(\"crop.png\", 'png')\n\n#Working on astro.jpg for correct resizing (using thumbnail) and write in thumbnail.jpg\nim = Image.open('./Flowers/astro.jpg')\nprint(im.size)\nim.thumbnail((400,200))\nim.save('thumbnail.jpg')\nprint(im.size)\n\n# Grab images from a same specific folder, work on all images (change,resize,convert...) then add the new images in a new folder that we want to create(new) if it is not already created.\n\n#step1:grab first and second arguments (flowers,new)\nimport sys\nimport os \n\nfrom PIL import Image\nimage_folder = sys.argv[1]\noutput_folder = sys.argv[2]\nprint(image_folder, output_folder)\n\n#step2:check if new folder exists, if not create\nif not os.path.exists(output_folder):\n\tos.makedirs(output_folder)\n\n#step3:loop through flowers. convert all images to PNG and save to new folder.\nfor items in os.listdir(image_folder):\t#get all files in the same directory\n\timg = Image.open(f'{image_folder}{items}')\n\tfinal_images = os.path.splitext(items)[0]\t#seperate the filename and type and choose the filename\n\timg.save(f'{output_folder}/{final_images}.png',\"png\")\n\tprint('all done!')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"261022550","text":"from django.urls import path\n\nfrom app.views.submits import upload\nfrom app.views.general import *\nfrom app.utils.dbapi import *\n\nurlpatterns = [\n # general\n path('', view_root, name='root'),\n path('signin/', view_signin, name='signin'),\n path('signup/', view_signup, name='signup'),\n path('signout/', view_signout, name='signout'),\n path('myaccount/', view_myaccount, name='myaccount'),\n path('myaccount/delete_account_confirmed', view_delete_account_confirmed, name='delmyaccount'),\n\n # submitter\n path('upload/', upload, name='submit'),\n path('tsklist/', view_tsklist_submitter, name='tsklist'),\n path('tsklist/', view_tskdetail_submitter, name='tskdetail'),\n # evaluator\n\n # admin\n path('tskstat/', view_tskstat, name='tskstat'),\n path('tskstat//', view_tskstat_per_task, name='tskstat_per_task'),\n path('tskcreate/', view_tskcreate, name='tskcreate'),\n path('tskmgmt/', view_tskmgmt, name='tskmgmt'),\n path('tskmgmt//', view_tskmgmt_per_task, name='tskmgmt_per_task'),\n #debug only\n path('tskmgmt//delete', view_tskmgmt_per_task_del, name='tskmgmt_per_task_del'),\n path('usrmgmt/', view_usrmgmt, name='usrmgmt'),\n path('usrmgmt//', view_usrmgmt_per_user, name='usrmgmt_per_user'),\n\n # api\n path('api/db/update_account', api_update_account),\n path('api/db/create_account', api_create_account),\n path('api/db/signin', api_signin),\n path('api/db/users/',api_get_users, name='api_db_users'),\n path('api/db/user/',api_get_user_detail),\n path('api/db/tasks/',api_get_tasks, name='api_db_tasks'),\n path('api/db/task/create',api_create_task, name='api_db_create_task'),\n path('api/db/task/update',api_update_task, name='api_db_update_task'),\n path('api/db/task/raw_create',api_create_raw_type, name='api_db_create_raw'),\n path('api/db/task//raws',api_get_raw_types, name='api_db_get_raws'),\n path('api/db/task//pending',api_get_pending_submitters, name='api_db_get_pendings'),\n path('api/db/task//approved',api_get_approved_submitters, name='api_db_get_approved'),\n path('api/db/task//raw/',api_get_raw_data),\n path('api/db/task//update_pending',api_update_pending_submitters, name='api_db_update_pendings'),\n path('api/db/task//task_accepted',api_get_accepted_data),\n path('api/db/task//task_pending',api_get_pending_data),\n\n path('api/db/task//status/',api_submitter_pending_status, name='api_db_submitter_status'),\n path('api/db/task//request/',api_submitter_request_permission , name='api_db_submitter_request'),\n]\n\napp_name = 'app'","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648120927","text":"# import requests\n# from bs4 import BeautifulSoup\n# import time\n# import pandas as pd\n# import matplotlib\n# from datetime import datetime\n# from datetime import timedelta\n# import bs4 as bs\n# import data_stocks\n# import tkinter\n# matplotlib.use(\"TkAgg\")\n# from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n# from matplotlib.figure import Figure\n# from tkinter import *\n# import tkinter.ttk as tkk\n# import numpy as np\n# import matplotlib.pyplot as plt\n\n# # Initialisation liste pandas\n# df = pd.DataFrame(columns=['Close', 'e9', 'MACD', 'e26', 'lastClose', 'e12', 'Position'])\n\n# # Creer premiere fenetre\n# window = Tk()\n\n# # Personnalisation\n# window.title(\"Trading Bot\")\n# window.geometry(\"1920x1080\")\n# window.minsize(1920, 1080)\n# window.maxsize(1920, 1080)\n# window.config(background='#3CF1E9')\n\n# # Ajouter texte\n# label_title = Label(window, text=\"Trading Algorithmique\", font=(\"Courrier\", 20), bg = '#3CF1E9')\n# label_title.pack(pady=5)\n\n# # # Canvas for graph\n# # canvasGraph = Canvas(window, height = 500, width = 1500)\n# # canvasGraph.pack(pady=30)\n\n# # Figure for stock graph\n# fig = Figure(figsize=(22,5), dpi=80, facecolor='white')\n# axes = fig.add_subplot(111)\n# axes.plot(0)\n\n# # Insert the matplotlib graph into the canvasGraph\n# canvas = FigureCanvasTkAgg(fig, master=window)\n# canvas.draw()\n# canvas.get_tk_widget().pack(pady=20)\n\n# # Figure for stock graph\n# figMACD = Figure(figsize=(22,2), dpi=80, facecolor='white')\n# axes = fig.add_subplot(111)\n# axes.plot(0)\n\n# canvasMACD = FigureCanvasTkAgg(figMACD, master=window)\n# canvasMACD.draw()\n# canvasMACD.get_tk_widget().pack(pady=5)\n\n# # # Toolbar og the graph\n# # toolbar = NavigationToolbar2Tk(canvas, window)\n# # toolbar.update()\n# # canvas.get_tk_widget().pack()\n\n# # Table of buy and sell\n# table = LabelFrame(window, text=\"Buy and sell\", height = 200, width = 1400)\n# table.pack(pady = 20)\n\n# colonne = tkk.Treeview(table, columns=(1,2,3,4), show=\"headings\", height=\"6\")\n# colonne.pack()\n\n# colonne.heading(1, text=\"Date\")\n# colonne.heading(2, text=\"Close\")\n# colonne.heading(3, text=\"MACD\")\n# colonne.heading(4, text='e9')\n\n# # Put the value og the stock in the table\n# def PutValueOnTable(stk):\n# df = data_stocks.GetPastData(stk)\n# for index, row in df.iterrows():\n# colonne.insert('', 'end',value=(\"Date\", row['Close'], row['MACD'],row['e9']))\n\n# # Affichage graph\n# def graph():\n# # Clear the table\n# ClearTable()\n# # Clear the graph\n# fig.clf()\n# # Get the date of the stock\n# df = data_stocks.GetPastData(entree.get())\n# fig.add_subplot(111).plot(df['Close'])\n# # Zoom on the stock\n# axes.set_ylim(df['Close'].min(), df['Close'].max())\n# canvas.draw()\n# # Put all the value on the table\n# PutValueOnTable(entree.get())\n\n# # Clear the table\n# def ClearTable():\n# for i in colonne.get_children():\n# colonne.delete(i)\n\n# # Button pour afficher le graph\n# my_button = Button(window, text=\"Graph\", command=graph)\n# my_button.pack()\n\n# # Saisir le stock\n# entree = Entry(window)\n# entree.pack()\n\n# # Afficher\n# window.mainloop()\n\n# # df = pd.DataFrame(columns=['Close', 'e9', 'MACD', 'e26', 'lastClose', 'e12', 'Position'])\n# # url_TEL = 'https://fr.finance.yahoo.com/quote/TEL?p=TEL'\n\n# # df = data_stocks.GetPastData(\"AAPL\")\n\n# # for row in df['Close']:\n# # print(str(row))\n\nfrom fastapi import FastAPI, Request, Form\nimport sqlite3, config\nfrom fastapi.templating import Jinja2Templates\nfrom datetime import date\nfrom fastapi.responses import RedirectResponse\nimport pandas as pd\nfrom pychartjs import BaseChart, ChartType, Color \nfrom flask import Flask, render_template, jsonify\nimport json, random\nfrom django.shortcuts import render\nfrom random import sample\nimport backtesting_macd\nfrom fastapi.staticfiles import StaticFiles\nfrom pathlib import Path\nfrom aiofiles.os import stat as aio_stat\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates\")\n\napp.mount(\n \"/static\",\n StaticFiles(directory=Path(__file__).parent.parent.absolute() / \"static\"),\n name=\"static\",\n)\n\n@app.get(\"/home\")\ndef home(request: Request):\n\n return templates.TemplateResponse(\"home.html\", {\"request\": request})\n\n@app.get(\"/stocks\")\ndef index(request: Request):\n stock_filter = request.query_params.get('filter', False)\n\n # Get the app data already created\n connection = sqlite3.connect(config.DATA_BASE)\n connection.row_factory = sqlite3.Row\n\n # Create connection\n cursor = connection.cursor()\n\n if stock_filter == 'new_closing_highs':\n cursor.execute(\"\"\"\n select * from (\n select symbol, name, stock_id, max(close), date\n from stock_price join stock on stock.id = stock_price.stock_id\n group by stock_id\n order by symbol\n ) where date = ?\n \"\"\", (date.today().isoformat(),))\n else:\n # Get symbol and company from the database\n cursor.execute(\"\"\"SELECT symbol, name FROM stock ORDER by symbol\"\"\")\n\n rows = cursor.fetchall()\n\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"stocks\": rows})\n\n\n@app.get(\"/stock/{symbol}\")\ndef stock_detail(request: Request, symbol):\n # Get the app data already created\n connection = sqlite3.connect(config.DATA_BASE)\n connection.row_factory = sqlite3.Row\n\n # Create connection\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT * FROM strategy\n \"\"\")\n\n strategies = cursor.fetchall()\n\n # Get symbol and company from the database\n cursor.execute(\"\"\"SELECT id, symbol, name FROM stock WHERE symbol = ?\"\"\", (symbol,))\n row = cursor.fetchone()\n\n cursor.execute(\"\"\"\n SELECT * FROM stock_price WHERE stock_id = ? ORDER BY date DESC\n \"\"\", (row['id'],))\n prices = cursor.fetchall()\n\n\n # Get symbol and company from the database\n cursor.execute(\"\"\"SELECT * FROM twitter_analysis where stock_id = (?)\"\"\", (row['id'],))\n\n #df = pd.read_sql_query(f\"\"\"SELECT close FROM stock_price_minutes where stock_id = 9074\"\"\" ,connection)\n\n sentiments = cursor.fetchall()\n polaritys = []\n dates = []\n volumes = []\n\n for sentiment in sentiments:\n polarity = sentiment['polarity']\n polaritys.append(polarity)\n date = sentiment['date']\n dates.append(date)\n volume = sentiment['volume']\n volumes.append(volume)\n\n polaritys = [float(i) for i in polaritys]\n volumes= [float(i) for i in volumes]\n\n return templates.TemplateResponse(\"stock_detail.html\", {\"request\": request, \"stock\": row, \"bars\": prices, \"strategies\": strategies,\"polaritys\": polaritys, \"dates\": dates, \"volumes\": volumes})\n\n\n@app.post(\"/apply_strategy\")\ndef apply_strategy(strategy_id: int = Form(...), stock_id: int = Form(...)):\n connection = sqlite3.connect(config.DATA_BASE)\n cursor = connection.cursor()\n \n cursor.execute(\"\"\"\n INSERT INTO stock_strategy (stock_id, strategy_id) VALUES (?, ?)\n \"\"\", (stock_id, strategy_id))\n\n connection.commit()\n\n return RedirectResponse(url=f\"/strategy/{strategy_id}\", status_code=303)\n\n\n@app.get(\"/strategy/{strategy_id}\")\ndef strategy(request: Request, strategy_id):\n # Get the app data already created\n connection = sqlite3.connect(config.DATA_BASE)\n connection.row_factory = sqlite3.Row\n\n # Create connection\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT id, name\n FROM strategy\n WHERE id = ?\n \"\"\", (strategy_id,))\n\n strategy = cursor.fetchone()\n\n cursor.execute(\"\"\"\n SELECT symbol, name\n FROM stock JOIN stock_strategy on stock_strategy.stock_id = stock.id\n WHERE strategy_id = ?\n \"\"\", (strategy_id,))\n \n stocks = cursor.fetchall()\n\n return templates.TemplateResponse(\"strategy.html\", {\"request\": request, \"stocks\": stocks, \"strategy\": strategy})\n\n@app.post(\"/backtesting\")\ndef backtesting(strategy_id: int = Form(...), stock_id: int = Form(...)):\n\n return RedirectResponse(url=f\"/backtesting/{strategy_id}/{stock_id}\", status_code=303)\n\n@app.get(\"/backtesting/{startegy_id}/{stock_id}\")\ndef apply_backtesting(request: Request, stock_id):\n # Get the app data already created\n connection = sqlite3.connect(config.DATA_BASE)\n connection.row_factory = sqlite3.Row\n\n # Create connection\n cursor = connection.cursor()\n\n # Get symbol and company from the database\n cursor.execute(\"\"\"SELECT id, symbol, name FROM stock WHERE id = ?\"\"\", (stock_id,))\n stock = cursor.fetchone()\n \n df = backtesting_macd.BackTestingMACD(backtesting_macd.GetPastData(stock['symbol']))\n\n MACD = [float(i) for i in df['MACD']]\n e9 = [float(i) for i in df['e9']]\n closes = [float(i) for i in df['Close']]\n dates = [str(i) for i in df['Date']]\n\n df_filtred = df[(df.Order == 'buy') | (df.Order == 'sell')]\n closes_order = [float(i) for i in df_filtred['Close']]\n \n benefice = backtesting_macd.CalculateBenef(df)\n print(benefice)\n \n return templates.TemplateResponse(\"backtesting_macd.html\", {\"request\": request, \"stock\": stock, \"MACD\": MACD, 'e9': e9, \"closes\": closes, \"dates\": dates,\"closes_order\": closes_order , \"df_order\": df_filtred.to_dict(orient='records'), \"benefice\": benefice})\n\n\n@app.get(\"/strategies\")\ndef stratefgy(request: Request, strategy_id):\n # Get the app data already created\n connection = sqlite3.connect(config.DATA_BASE)\n connection.row_factory = sqlite3.Row\n\n # Create connection\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT id, name\n FROM strategy\n WHERE id = ?\n \"\"\", (strategy_id,))\n\n strategy = cursor.fetchone()\n\n cursor.execute(\"\"\"\n SELECT stock_id, date, benefice, volume_order\n FROM backtesting_macd JOIN stock_strategy on stock_strategy.stock_id = stock.id\n WHERE strategy_id = ?\n \"\"\", (strategy_id,))\n \n stocks = cursor.fetchall()\n\n return templates.TemplateResponse(\"strategy.html\", {\"request\": request, \"stocks\": stocks, \"strategy\": strategy})\n\n@app.get(\"/test\")\ndef test(request: Request):\n\n # Get the app data already created\n connection = sqlite3.connect(config.DATA_BASE)\n connection.row_factory = sqlite3.Row\n\n # Create connection\n cursor = connection.cursor()\n\n # Get symbol and company from the database\n cursor.execute(\"\"\"SELECT * FROM stock_price_minutes where stock_id = 9074\"\"\")\n\n #df = pd.read_sql_query(f\"\"\"SELECT close FROM stock_price_minutes where stock_id = 9074\"\"\" ,connection)\n\n rows = cursor.fetchall()\n closes = []\n dates = []\n\n for row in rows:\n close = row['close']\n closes.append(close)\n date = row['date']\n dates.append(date)\n\n closes = [float(i) for i in closes]\n data = json.dumps(closes)\n labels = json.dumps(dates)\n \n return templates.TemplateResponse(\"test.html\", {\"request\": request, \"stocks\": closes, \"dates\": dates})\n \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"392025307","text":"\"\"\"Ian Sahlberg\nPython210\n2/13/2019\nUnit Testing\"\"\"\n\n\nimport unittest as u\nimport Mailroom3 as m\nfrom Mailroom3 import amount_append\nfrom Mailroom3 import donate\nfrom Mailroom3 import print_report\nfrom Mailroom3 import quit\n\n\nclass tests(u.TestCase):\n\n \"\"\"\n def test_donate(self): #Requires test input = amount below (100)\n test_dict = {'bugs mouse': [32, 5], 'mickey bunny': {678}}\n amount_donated = 100\n expected = 100\n actual = donate()\n self.assertEqual(expected,actual)\n \"\"\"\n\n def test_amount_append(self):\n dict = {'Roberto' : []}\n name = 'Roberto'\n amount = 23.00\n expected = {name : [amount]}\n m.amount_append(name, dict, amount)\n self.assertDictEqual(expected, dict)\n\n\n def test_print_list(self):\n dict = {'Bob': 1, 'Mary': 2}\n expected = ['Bob', 'Mary']\n actual = m.print_list(dict)\n self.assertEqual(expected, actual)\n\n def test_email(self):\n name = 'Bibs'\n amount = 666.00\n expected = 'Thank you {} for the generous donation of $ {:.2f}. We appreciate your generosity.\\n\\nSincerely, \\n\\nThe Helping R Us Team\\n\\n'.format(name.title(), amount)\n actual = m.email(name, amount)\n self.assertEqual(expected, actual)\n\n def test_quit(self):\n actual = quit(self)\n expected = 'Until next time!'\n self.assertEqual(expected, actual)\n\n\n def test_sort_stats(self):\n dict = {'bob johnson': [150.00],\n 'susan skoosan': [2000.00, 550.00]}\n expected = [('susan skoosan', [2550.00, 2, 1275.00]), ('bob johnson', [150.00, 1, 150.00])]\n actual = m.sort_and_stats_dict(dict)\n self.assertListEqual(expected, actual)\n\n def test_print_report(self):\n dict = {'bob johnson': [150,1,150],\n 'susan skoosan': [2550,2,1275],\n 'tim tam':[10.50,1,10],\n 'roxanne raffle':[211,3,70],\n 'jon jacob':[5005,2,2502]}\n dict_2 = sorted(list(dict.items()), key=lambda x: x[1], reverse=True)\n actual = print_report(dict_2)\n expected = \"\"\"Donor Name | Donor Total | Count of Donations | Average of Donations\n,----------------------------------------------------------------------------------------\njon jacob $ 5005.00 2 $ 2502\nsusan skoosan $ 2550.00 2 $ 1275\nroxanne raffle $ 211.00 3 $ 70\nbob johnson $ 150.00 1 $ 150\ntim tam $ 10.50 1 $ 10\"\"\"\n\n self.assertEqual(expected,str(actual).rstrip())\n\n\n def test_email_file(self):\n import datetime as dt\n dict = ({'Haggus': [4.00]})\n expected = 'Thank you Haggus for the generous donation of $ 4.00. We appreciate your generosity.Sincerely, The Helping R Us Team'\n a = m.email_file(dict)\n with open ('Haggus_{}.txt'.format(dt.date.today()), 'r') as file:\n x = file.read()\n actual = \"\".join(x.splitlines())\n self.assertEqual(expected, actual)\n\n\nif __name__ == '__main__':\n u.main()\n","sub_path":"students/Sahlberg/Lesson6/UnitTest.py","file_name":"UnitTest.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"488782013","text":"import feedparser\nfrom bs4 import *\n\n\n#accepts an entree from one of the websites\n#returns a list of entries (5 for now. Should make user adjustable)\ndef first_five(feed, lst):\n \n if len(lst) >= 5:\n return(lst)\n \n else:\n lst.append(feed[0])\n lst = first_five(feed[1:], lst)\n return(lst)\n \n\n#prnt_title takes a list of lists of entries\n#prints out all entries from each website list\n#returns nothing\ndef prnt_title(sources):\n lst = sources\n \n for i in range(len(sources)):\n for j in range(len(sources[i])):\n\n print(sources[i][j].title)\n \n\n\ndef main():\n bbc = feedparser.parse(\"http://feeds.bbci.co.uk/news/technology/rss.xml\")\n cnn = feedparser.parse(\"http://rss.cnn.com/rss/edition.rss\")\n wash = feedparser.parse(\"http://www.wsj.com/xml/rss/3_7085.xml\")\n\n lst = []\n lst2 = []\n lst3 = []\n #bbc.entries[0].link HOW TO ACCESS LINKS\n bbc5 = first_five(bbc.entries, lst)\n cnn5 = first_five(cnn.entries, lst2)\n wash5 = first_five(wash.entries, lst3)\n\n sources = [bbc5, cnn5, wash5]\n\n \nmain()\n \n","sub_path":"news_aggregator_main.py","file_name":"news_aggregator_main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7620040","text":"#coding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\npath=\"D:\\\\fallingspace\\\\perceptron\\\\data\\\\ex4Data\\\\\"\ndata= np.loadtxt(path+\"ex4x.dat\")\nvalue= np.loadtxt(path+\"ex4y.dat\")\n\n\n# 计算正确率(未使用)\ndef checkTrueRate(testMatData, testLabelData, W):\n accuracyCount = 0\n for index,item in enumerate(testMatData):\n for indexw in range(0,3):\n result=np.dot(item,W[indexw])\n if(result<0 and indexw!=testLabelData[index]):accuracyCount+=1\n if(result>0 and indexw==testLabelData[index]):accuracyCount+=1\n return accuracyCount\n\n# 计算损失\ndef chechLoss(testMatData,testLabelData,W):\n loss = 0\n for index,item in enumerate(testMatData):\n for indexw in range(0,3):\n result=np.dot(item,W[indexw])\n if(result<0 and indexw==testLabelData[index]):loss+=result\n if(result>0 and indexw!=testLabelData[index]):loss+=result\n return loss\n\n# 计算单个超平面正确率\ndef checkSingleTrueRate(testMatData, testLabelData, w,indexw):\n accuracyCount = 0\n for index,item in enumerate(testMatData):\n result=np.dot(item,w)\n if(result<0 and indexw!=testLabelData[index]):accuracyCount+=1\n if(result>0 and indexw==testLabelData[index]):accuracyCount+=1\n return accuracyCount\n\n# 归一化数据\ndef normalizefeature(data):\n x_norm = data\n meam = np.mean(data, axis=0)\n sigma = np.std(data, axis=0)\n x_norm = (data - meam) / sigma\n return x_norm\n\n# 随机化数据\ndef shuffleData(data,value):\n tempData=[]\n for index,item in enumerate(data):\n tempData.append([item[0],item[1],value[index]])\n np.random.shuffle(tempData)\n tempData=np.array(tempData)\n return tempData[...,:2],tempData[...,2]\n\n# 绘制图标\ndef mscatter(x,y,ax=None, m=None, **kw):\n import matplotlib.markers as mmarkers\n if not ax: ax=plt.gca()\n sc = ax.scatter(x,y,**kw)\n if (m is not None) and (len(m)==len(x)):\n paths = []\n for marker in m:\n if isinstance(marker, mmarkers.MarkerStyle):\n marker_obj = marker\n else:\n marker_obj = mmarkers.MarkerStyle(marker)\n path = marker_obj.get_path().transformed(\n marker_obj.get_transform())\n paths.append(path)\n sc.set_paths(paths)\n return sc\n\n# 查找对于单个data的最大距离超平面\ndef maxJBywx(W,data):\n maxIndex=0\n maxValue=np.dot(W[0],data)\n for ind,item in enumerate(W):\n if maxValue < np.dot(item,data):\n maxIndex=ind\n maxValue=np.dot(item,data)\n return maxIndex\n\n# 初始化权值数组\nW=[(0,0,0),(0,0,0)]\n# 最优权值组\nbestW=[(0,0,0),(0,0,0)]\n# 最优正确分类数\nbestClassifyCount=[0,0]\n# 数据预处理\ndata,value=shuffleData(data,value)\ndata=normalizefeature(data)\n\n# 添加偏置参数\ndata=np.insert(data,2,values=1,axis=1)\n\n\n# 作图相关\n# 给标签映射图形\n# 圆形 星星 正三角\nmarker={0:'o',1:'*'}\ncmarker=list(map(lambda x:marker[x],value))\n# 生成子图\n# fig, ax = plt.subplots()\n# 创建图像布局对象fig\nfig = plt.figure(figsize = (12, 6))\n# ax1=fig.add_subplot(122)\n # 初始线的数据\nx = np.linspace(-5,5)\n# ax1.plot(x, x)\ny0 = (-bestW[0][0] * x- bestW[0][2])/(bestW[0][1])\ny1 = (-bestW[1][0] * x- bestW[1][2])/(bestW[1][1])\nax=fig.add_subplot(111)\n# 圆\nc=ax.plot(x ,y0,color='red',label='0 circle')\n# 星\ns=ax.plot(x ,y1,color='blue',label='1 star')\n# 图例标注\nplt.legend([c,s],['0 circle','1 star'])\n# 坐标轴范围\nplt.xlim((-5,5))\nplt.ylim((-2,2))\n# 最终设置\nscatter = mscatter(data[:,0], data[:,1],c='', m=cmarker, ax=ax,cmap=plt.cm.RdYlBu,edgecolors='k')\n# 关闭阻塞模式,打开交互模式\nplt.ion() \n\n# 学习参数设置 \n# 学习率\na=0.1\n# 学习轮数\nepoch=20\n\nwhile(epoch>0):\n print(\"epoch:%2d\"%epoch)\n epoch-=1\n for k,input_vector in enumerate(data):\n ck=maxJBywx(W,input_vector)\n # 对每个权重向量更新\n for j in range(0,2):\n if(j==ck and j!=value[k]):\n W[j]=W[j]-a*input_vector\n \n elif(j==value[k] and j!=ck):\n W[j]=W[j]+a*input_vector\n # 获取当前效果(正确分类个数)\n currentRate=checkSingleTrueRate(data,value,W[j],j)\n # 如果更好就更换最优权值\n if(currentRate>bestClassifyCount[j]):\n bestClassifyCount[j]=np.copy(currentRate)\n bestW[j]=np.copy(W[j])\n\n\n # 更新超平面\n try:\n for i in range(2):\n ax.lines.remove(ax.lines[i])\n except Exception:\n pass\n y0 = (-bestW[0][0] * x- bestW[0][2])/(bestW[0][1])\n y1 = (-bestW[1][0] * x- bestW[1][2])/(bestW[1][1])\n # 圆\n ax.plot(x ,y0,color='red',label='0 circle')\n # 星\n ax.plot(x ,y1,color='blue',label='1 star')\n plt.pause(0.1)\n\n# 显示前关掉交互模式\nplt.ioff()\nplt.show()\n\n\"\"\"\n'-'\t实线样式\n'--'\t短横线样式\n'-.'\t点划线样式\n':'\t虚线样式\n'.'\t点标记\n','\t像素标记\n'o'\t圆标记\n'v'\t倒三角标记\n'^'\t正三角标记\n'<'\t左三角标记\n'>'\t右三角标记\n'1'\t下箭头标记\n'2'\t上箭头标记\n'3'\t左箭头标记\n'4'\t右箭头标记\n's'\t正方形标记\n'p'\t五边形标记\n'*'\t星形标记\n'h'\t六边形标记 1\n'H'\t六边形标记 2\n'+'\t加号标记\n'x'\tX 标记\n'D'\t菱形标记\n'd'\t窄菱形标记\n'|'\t竖直线标记\n'_'\t水平线标记\n\"\"\"","sub_path":"perception/useless/2DClassByMultiClassPLA.py","file_name":"2DClassByMultiClassPLA.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"174169740","text":"#import the webdriver from selenium package\nfrom selenium import webdriver\n#import the keys class from selenium package\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport mvp\n\ncontact = 'Halo Mondays'\n#initialize the web driver variable\ndriver = webdriver.Chrome(executable_path = r\"C:/Users/Dave/Webdrivers/chromedriver.exe\") #give the path of the web driver in the string\ntime.sleep(2)\n#browse to a whatsapp web\ndriver.get(r\"https://web.whatsapp.com\") \ntime.sleep(2)\n#control the contact search textbox\nuser = driver.find_element_by_xpath('//span[@title = \"{}\"]'.format(contact))\nuser.click()\n\n#Now the contact opens and a new textbox is created, so we need to execute this line once again to read the new textbox\nsearch=driver.find_elements_by_class_name(r\"_2Evw0\")\ntime.sleep(2)\n\nattach = driver.find_element_by_xpath(r'//*[@title = \"Attach\"]')\nattach.click()\n\ndialog=driver.find_element_by_tag_name(r\"input\")\n\n#send a file to open dialog\ndialog.send_keys(r\"C:/Users/Dave/Python/Python Scripts/Halo-Git/MVP.xlsx\") #this string contains the path of the file to be uploaded\n#NOTE: if the file is large, then upload time will be high. If it takes some time to upload, then the preview window also takes some time to load\n#so, its better to ask the program to wait\n#this is done by the following code\n\ntime.sleep(2) #wait for 2 secs\n#this step is not required when you execute the code step by step\n\n#after the upload is completed, a preview window is opened\n#in the preview window, there is a send button\n#the below code controls the send button\nbut=driver.find_element_by_class_name(r\"_3Git-\")\n\n#click the send button\nbut.click()\n ","sub_path":"Send_mvp.py","file_name":"Send_mvp.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88736040","text":"\"\"\"==========================================\n; Title: Tests for JS Parser\n; Author: Nick Leslie\n; Date: 22/08/2020\n=============================================\n\"\"\"\n\nfrom unittest import TestCase\nfrom src.converter.js_parser import JSParser\n\n\nclass TestJSParser(TestCase):\n\n def test_parse_expected_input(self):\n # arrange\n js_parser = JSParser()\n js_string = 'class Patient {constructor(issue) {this.issue = '\\\n 'new Object();} }'\n expected_result = [{'class_name': 'Patient', 'attributes': [\n 'issue'], 'methods': ['constructor'], 'edges': {'Object'}}]\n # act\n result = js_parser.parse(js_string)\n # assert\n self.assertEqual(result, expected_result)\n\n def test_parse_unexpected_input(self):\n # arrange\n js_parser = JSParser()\n js_string = ''\n expected_result = []\n # act\n result = js_parser.parse(js_string)\n # assert\n self.assertNotEqual(result, expected_result)\n","sub_path":"tests/test_js_parser.py","file_name":"test_js_parser.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"224837895","text":"from datetime import datetime\r\nfrom pytse.pytse import PyTse,SymbolData\r\nimport pickle\r\nfrom tse_constants import FILE_PATH\r\nimport xlsxwriter\r\n\r\n\r\ndef get_buy_minus_sell(symbols):\r\n num = 0\r\n sum = 0\r\n for x in symbols:\r\n symbol = symbols[x]\r\n sym_name = str(symbol.l18)\r\n # print(sym_name,symbol)\r\n if hasattr(symbol,'ct'):\r\n num = num + 1\r\n buy_i_vol = symbol.ct.Buy_I_Volume\r\n sell_i_vol = symbol.ct.Sell_I_Volume\r\n price_commited = symbol.pc\r\n temp_sum=(buy_i_vol - sell_i_vol) * price_commited\r\n sum = sum + temp_sum\r\n print(sym_name,temp_sum)\r\n print(f\"num={num} , sum={sum}\")\r\n\r\ndef get_sum_val(symbols,date):\r\n num = 0\r\n sum = 0\r\n for x in symbols:\r\n symbol = symbols[x]\r\n sym_name = str(symbol.l18)\r\n num = num + 1\r\n sum = sum + int(symbol.tval)\r\n print(sym_name,symbol.tval)\r\n print(f\"num={num} , sum={sum}\")\r\n\r\n workbook = xlsxwriter.Workbook('tse_1.xlsx') \r\n worksheet = workbook.add_worksheet(\"tval %\") \r\n col=0\r\n row=0\r\n #col 0 -> name / col 1 -> tval\r\n worksheet.write(row,col,\"name\")\r\n worksheet.write(row,col+1,date)\r\n row = row + 1\r\n for x in symbols:\r\n symbol = symbols[x]\r\n if int(symbol.tval) != 0:\r\n sym_name = str(symbol.l18)\r\n worksheet.write(row,col,sym_name)\r\n worksheet.write(row,col+1,int(symbol.tval)*100/sum)\r\n row = row + 1\r\n workbook.close()\r\n\r\n# %Y%m%d like 20201119\r\n# date = \"20201119\"\r\ndate = \"20201122\"\r\nfile_name = f\"tse_{date}.pkl\"\r\n# file_name = f\"tse.pkl\"\r\nwith open(f\"{FILE_PATH}\\\\{file_name}\", 'rb') as input:\r\n symbols = pickle.load(input)\r\n # get_buy_minus_sell(symbols)\r\n get_sum_val(symbols,date)\r\n","sub_path":"retrive_xlsx.py","file_name":"retrive_xlsx.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300201436","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\nPIN_TRIGGER =19\nPIN_ECHO =26\n\nGPIO.setup(PIN_TRIGGER, GPIO.OUT)\nGPIO.setup(PIN_ECHO, GPIO.IN)\n\nGPIO.output(PIN_TRIGGER, GPIO.HIGH)\ntime.sleep(0.00001)\nGPIO.output(PIN_TRIGGER, GPIO.LOW)\nwhile GPIO.input(PIN_ECHO) == 0:\n pulse_start_time = time.time()\nwhile GPIO.input(PIN_ECHO) == 1:\n pulse_end_time = time.time()\n\npulse_duration = pulse_end_time - pulse_start_time\n\ndistance = round(pulse_duration * 17150, 2)\n\nprint(\"Distance:\",distance,\"cm\")","sub_path":"testfile.py","file_name":"testfile.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"465253765","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('threefeatures', '0019_auto_20150926_0100'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Abonelik',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('eposta', models.EmailField(verbose_name='Abone E-Posta Adresi', max_length=254)),\n ],\n options={\n 'verbose_name': 'E-Posta Adresi',\n 'verbose_name_plural': 'E-Posta Adresleri',\n },\n ),\n ]\n","sub_path":"threefeatures/migrations/0020_abonelik.py","file_name":"0020_abonelik.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87065564","text":"import numpy as np\n\n\n\ndef nicer_scale_prefix(scale, milli_cutoff=0.1):\n \"\"\"\n Returns a nice factor and a SI prefix string \n \n Example:\n scale = 2e-10\n \n f, u = nicer_scale_prefix(scale)\n \n \n \"\"\"\n \n max_val = np.max(np.abs(scale))\n \n if max_val < 1e-28:\n return 1, ''\n \n fudge_factor=10**(-3.0/2.0)/milli_cutoff\n \n max_power=3*np.sign(np.log10(max_val))*round(abs(np.log10(max_val*fudge_factor)/3))\n f = 10**max_power\n\n return f, SHORT_PREFIX[f]\n\n\n\n\ndef nicer_array(a, milli_cutoff=0.1):\n \"\"\"\n Returns a scaled array, the scaling, and a unit prefix\n \n Example: \n nicer_array( np.array([2e-10, 3e-10]) )\n Returns:\n (array([200., 300.]), 1e-12, 'p')\n \n \"\"\"\n \n if np.isscalar(a):\n x = a\n elif len(a) == 1:\n x = a[0]\n else:\n a = np.array(a)\n x = a.ptp()\n \n fac, prefix = nicer_scale_prefix( x, milli_cutoff=milli_cutoff )\n \n return a/fac, fac, prefix\n\n\n\n\n# Dicts for prefixes\nPREFIX_FACTOR = {\n 'yocto-' :1e-24,\n 'zepto-' :1e-21,\n 'atto-' :1e-18,\n 'femto-' :1e-15,\n 'pico-' :1e-12,\n 'nano-' :1e-9 ,\n 'micro-' :1e-6,\n 'milli-' :1e-3 ,\n 'centi-' :1e-2 ,\n 'deci-' :1e-1,\n 'deca-' :1e+1,\n 'hecto-' :1e2 ,\n 'kilo-' :1e3 ,\n 'mega-' :1e6 ,\n 'giga-' :1e9 ,\n 'tera-' :1e12 ,\n 'peta-' :1e15 ,\n 'exa-' :1e18 ,\n 'zetta-' :1e21 ,\n 'yotta-' :1e24\n}\n# Inverse\nPREFIX = dict( (v,k) for k,v in PREFIX_FACTOR.items())\n\nSHORT_PREFIX_FACTOR = {\n 'y' :1e-24,\n 'z' :1e-21,\n 'a' :1e-18,\n 'f' :1e-15,\n 'p' :1e-12,\n 'n' :1e-9 ,\n 'u' :1e-6,\n 'm' :1e-3 ,\n 'c' :1e-2 ,\n 'd' :1e-1,\n '' : 1,\n 'da' :1e+1,\n 'h' :1e2 ,\n 'k' :1e3 ,\n 'M' :1e6 ,\n 'G' :1e9 ,\n 'T' :1e12 ,\n 'P' :1e15 ,\n 'E' :1e18 ,\n 'Z' :1e21 ,\n 'Y' :1e24\n}\n# Inverse\nSHORT_PREFIX = dict( (v,k) for k,v in SHORT_PREFIX_FACTOR.items())\n\n\n","sub_path":"GPT_bokeh_plot/nicer_units.py","file_name":"nicer_units.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"554995624","text":"from gameloop import *\r\nfrom pygame import *\r\nimport pygame, sys, time\r\n\r\nif __name__ == '__main__':\r\n player = game() # 声明一个类对象\r\n player.game_start('Tank-War') # 调用开始函数\r\n while player.playing: # 进入游戏运行\r\n player.new() # 开始游戏\r\n player.screen.fill(black)\r\n player.game_start('GAME-OVER') # 游戏结束\r\n time.sleep(1.5) # 可以不要\r\n\r\n\r\n#主循环\r\n\r\nfrom setting import *\r\nfrom pygame import *\r\nfrom Sprite import *\r\nimport pygame, sys\r\n\r\nvec = pygame.math.Vector2\r\n\r\nclass game: # 游戏类 包含循环等\r\n def __init__(self): # 初始化\r\n pygame.init() # pygame 初始化\r\n pygame.display.set_caption(\"Keep-Going\") # 游戏窗口 左上角名称\r\n self.screen = pygame.display.set_mode((width, height)) # 游戏窗口的大小\r\n self.FpsClock = pygame.time.Clock() # 设置游戏的刷新率\r\n self.playing = True # 进入游戏的状态\r\n self.running = True # 游戏运行的状态\r\n self.Waiting = True # 游戏等待的状态\r\n self.Pblood = 100 # 玩家血量\r\n self.Eblood = 100 # 敌人血量\r\n self.player = Player() # 声明一个游戏玩家对象\r\n self.enemy = Enemy() # 声明一个敌人对象\r\n self.all_groups = pygame.sprite.Group() # 通过pygame自带的 group 来判断碰撞检测\r\n self.player_groups = pygame.sprite.Group()\r\n self.Map_groups = pygame.sprite.Group()\r\n self.Enemy_groups = pygame.sprite.Group()\r\n\r\n def new(self): # 开始一个游戏\r\n self.player_groups.add(self.player) # 将玩家添加到玩家组\r\n self.all_groups.add(self.player) # 将玩家添加到 所有组\r\n\r\n self.Enemy_groups.add(self.enemy)\r\n self.all_groups.add(self.enemy)\r\n\r\n for platfroms in Map1: # 地图\r\n p = Platform(*platfroms) # 取出所有值\r\n self.Map_groups.add(p)\r\n self.all_groups.add(p)\r\n\r\n self.run() # 调用函数运行游戏\r\n\r\n def game_start(self, text): # 游戏的开始界面\r\n self.text_draw(width / 2, height / 4, 64, text) # 文本\r\n self.text_draw(width / 2, height * 3 / 4, 25, 'Press any key to continue', ) # 文本\r\n pygame.display.update() # 更行展示\r\n while self.Waiting: # 实现 按键等待开始效果\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n self.Waiting = False\r\n\r\n def update(self): # 画面更新\r\n self.Map_groups.update()\r\n self.player_groups.update()\r\n self.enemy.Bullet_groups.update(self.enemy.flag) # 通过按键判断子弹方向\r\n self.player.Bullet_groups.update(self.player.flag)\r\n self.Enemy_groups.update()\r\n\r\n hit = pygame.sprite.groupcollide(self.player.Bullet_groups, self.Map_groups, True, False) # 子弹碰墙消失\r\n hit = pygame.sprite.groupcollide(self.enemy.Bullet_groups, self.Map_groups, True, False)\r\n\r\n PMC = pygame.sprite.spritecollide(self.player, self.Map_groups, False, False) # 撞墙\r\n if PMC:\r\n key_pressed = pygame.key.get_pressed()\r\n if key_pressed[pygame.K_a]:\r\n self.player.pos.x = self.player.pos.x + gap\r\n if key_pressed[pygame.K_d]:\r\n self.player.pos.x = self.player.pos.x - gap\r\n if key_pressed[pygame.K_w]:\r\n self.player.pos.y = self.player.pos.y + gap\r\n if key_pressed[pygame.K_s]:\r\n self.player.pos.y = self.player.pos.y - gap\r\n\r\n EMC = pygame.sprite.spritecollide(self.enemy, self.Map_groups, False, False) # 撞墙\r\n if EMC:\r\n key_pressed = pygame.key.get_pressed()\r\n if key_pressed[pygame.K_LEFT]:\r\n self.enemy.pos.x = self.enemy.pos.x + gap\r\n if key_pressed[pygame.K_RIGHT]:\r\n self.enemy.pos.x = self.enemy.pos.x - gap\r\n if key_pressed[pygame.K_UP]:\r\n self.enemy.pos.y = self.enemy.pos.y + gap\r\n if key_pressed[pygame.K_DOWN]:\r\n self.enemy.pos.y = self.enemy.pos.y - gap\r\n\r\n def run(self):\r\n while self.running:\r\n self.FpsClock.tick(Fps) # 设置帧率\r\n self.events() # 获取事件\r\n self.draw_pic() # 画出图片\r\n self.update()\r\n\r\n\r\n if self.Eblood <= 0: # enemy\r\n self.screen.fill(black)\r\n self.game_start('P1 WIN!')\r\n time.sleep(1.5)\r\n self.running = False\r\n self.playing = False\r\n\r\n if self.Pblood <= 0: # Player\r\n self.screen.fill(black)\r\n self.game_start('P2 WIN!')\r\n time.sleep(1.5)\r\n self.running = False\r\n self.playing = False\r\n\r\n\r\ndef text_draw(self, x, y, size, text): # 文本展示函数\r\n self.font = pygame.font.Font('HYChaoJiZhanJiaW-2', size)\r\n# self.font = pygame.font.Font('freesansbold.ttf', size) # 字体,大小\r\n self.text_surf = self.font.render(text, True, red) # 颜色\r\n self.text_rect = self.text_surf.get_rect() # 矩形\r\n self.text_rect.center = (x, y) # 位置\r\n self.screen.blit(self.text_surf, self.text_rect) # 覆盖展示\r\n\r\n\r\ndef draw_pic(self):\r\n self.screen.fill(white) # 背景\r\n self.text_draw(900, 50, 30, \"KEEP\") # 文本\r\n self.text_draw(900, 100, 30, \"GOING\")\r\n\r\n self.text_draw(820, 150, 20, \"P1:\")\r\n self.text_draw(820, 200, 20, \"P2:\")\r\n\r\n self.text_draw(900, 250, 20, \"Attention!\")\r\n self.text_draw(900, 300, 20, \"The Bullet Can\")\r\n self.text_draw(900, 350, 20, \"Be Control!\")\r\n self.bar_draw(850, 145, self.Pblood) # 血条\r\n hit = pygame.sprite.groupcollide(self.enemy.Bullet_groups, self.player_groups, True, False) # 血条减少\r\n if hit:\r\n self.Pblood = self.Pblood - randint(10, 15)\r\n self.bar_draw(850, 145, self.Pblood)\r\n\r\n self.bar_draw(850, 195, self.Eblood)\r\n hit = pygame.sprite.groupcollide(self.player.Bullet_groups, self.Enemy_groups, True, False)\r\n if hit:\r\n self.Eblood = self.Eblood - randint(10, 15)\r\n self.bar_draw(850, 195, self.Eblood)\r\n\r\n self.Map_groups.draw(self.screen) # 画出图片\r\n self.player_groups.draw(self.screen)\r\n self.Enemy_groups.draw(self.screen)\r\n self.player.Bullet_groups.draw(self.screen)\r\n self.enemy.Bullet_groups.draw(self.screen)\r\n\r\n pygame.display.update()\r\n\r\n\r\ndef bar_draw(self, x, y, pct): # 血条函数\r\n # draw a bar\r\n if pct <= 0:\r\n pct = 0\r\n Bar_Lenth = 100\r\n Bar_Height = 10\r\n Fill_Lenth = (pct / 100) * Bar_Lenth\r\n Out_rect = pygame.Rect(x, y, Bar_Lenth, Bar_Height)\r\n Fill_rect = pygame.Rect(x, y, Fill_Lenth, Bar_Height)\r\n pygame.draw.rect(self.screen, green, Fill_rect)\r\n pygame.draw.rect(self.screen, red, Out_rect, 2)\r\n\r\n\r\ndef events(self): # 事件\r\n for events in pygame.event.get():\r\n if events.type == pygame.QUIT:\r\n self.running = False\r\n self.playing = False\r\n\r\n\r\n\r\n\r\n\r\n#定义类\r\n\r\nfrom setting import *\r\nfrom pygame import *\r\nimport pygame, sys, time\r\nfrom random import *\r\nfrom math import *\r\n\r\nvec = pygame.math.Vector2 # 运用向量\r\n\r\n\r\nclass Player(pygame.sprite.Sprite): # 玩家类\r\n Bullet_groups = pygame.sprite.Group()\r\n flag = 1 # 判断方向的flag\r\n\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(r'tank.png').convert() # 图片的加载\r\n self.image.set_colorkey(white) # 设置忽略白色\r\n self.rect = self.image.get_rect()\r\n self.rect.midbottom = (115, 130)\r\n\r\n self.pos = vec(115, 130)\r\n\r\n self.last_time = time.time() # 记录上一次时间 用来设置子弹频率等\r\n\r\n def update(self):\r\n\r\n if key_pressed[pygame.K_SPACE]:\r\n self.shoot()\r\n self.rect.midbottom = self.pos\r\n\r\n def shoot(self): # 开火\r\n self.now = time.time() # 获取现在时间\r\n if self.now - self.last_time > 0.8: # 子弹时间间隔\r\n bullet = Bullet(self.pos.x, self.pos.y)\r\n self.Bullet_groups.add(bullet)\r\n self.last_time = self.now\r\n\r\n\r\nclass Platform(pygame.sprite.Sprite): # 地图创建\r\n def __init__(self, x, y, w, h): # x,y,宽,高\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.Surface((w, h)) # 砖块大小\r\n self.image.fill(yellow) # 砖颜色\r\n self.rect = self.image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n\r\n\r\nclass Enemy(pygame.sprite.Sprite): # 与player 相同\r\n Bullet_groups = pygame.sprite.Group()\r\n flag = 1\r\n\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(r'tank.png').convert()\r\n self.image.set_colorkey(white)\r\n self.rect = self.image.get_rect()\r\n self.rect.midbottom = (315, 130)\r\n self.pos = vec(315, 130)\r\n self.bar = 100\r\n self.last_time = time.time()\r\n self.flag = 1\r\n\r\n def update(self):\r\n\r\n if key_pressed[pygame.K_p]:\r\n self.shoot()\r\n\r\n self.rect.midbottom = self.pos\r\n\r\n def shoot(self):\r\n self.now = time.time()\r\n if self.now - self.last_time > 0.8:\r\n\r\n bullet = Bullet(self.pos.x, self.pos.y)\r\n self.Bullet_groups.add(bullet)\r\n self.Bullet_groups.update(self.flag)\r\n self.last_time = self.now\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite): # 炮弹组\r\n def __init__(self, x, y): # 炮弹该有的位置 玩家周围\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(r'炮弹.png ').convert()\r\n self.image.set_colorkey(white)\r\n self.rect = self.image.get_rect()\r\n self.rect.centerx = x + 10 # 这里是准确的位置,未进行准确更改\r\n self.rect.bottom = y - 12\r\n self.speed = 5\r\n\r\n def update(self, flag):\r\n if flag == 1: # right\r\n self.rect.x += self.speed\r\n if flag == 2: # left\r\n self.rect.x -= self.speed\r\n if flag == 3: # up\r\n self.rect.y -= self.speed\r\n if flag == 4: # down\r\n self.rect.y += self.speed\r\n\r\n\r\n\r\n#设置相关文件\r\nwidth = 1000\r\nheight = 600\r\nFps = 60\r\nfood = 20\r\ngap = 3\r\nmove_space = 1.5\r\nback_space = 5\r\nMap1 = [(0, 0, width * 2, 10), (0, 10, 10, height * 2),\r\n (0, height - 10, width * 2, 10), (width - 210, 0, 10, height * 2),\r\n (50, 50, 100, 20), (250, 50, 100, 20), (150, 230, 100, 20), (100, 340, 200, 20),\r\n (50, 70, 20, 90), (130, 70, 20, 90), (250, 70, 20, 90), (330, 70, 20, 90),\r\n (130, 280, 20, 70), (250, 300, 20, 50),\r\n (80, 320, 20, 20), (300, 320, 20, 20), (185, 200, 30, 30), (185, 250, 30, 30),\r\n (60, 300, 20, 20), (320, 300, 20, 20),\r\n (40, 280, 20, 20), (340, 280, 20, 20),\r\n (490, 100, 160, 40), (650, 100, 40, 200), (425, 250, 150, 40), (425, 290, 40, 80),\r\n (510, 365, 160, 40), (695, 460, 95, 40), (595, 454, 40, 100), (190, 460, 30, 30),\r\n (300, 450, 200, 40), (100, 425, 30, 130), (200, 520, 230, 25), (725, 70, 30, 30),\r\n (725, 140, 30, 30), (725, 210, 30, 30), (725, 280, 30, 30), (725, 365, 30, 30)\r\n ] # map\r\n# color\r\n\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\ngreen = (0, 255, 0)\r\nblue = (0, 0, 255)\r\nyellow = (255, 200, 0)\r\npurple = (128, 138, 135)\r\n","sub_path":"TankWar/主函数.py","file_name":"主函数.py","file_ext":"py","file_size_in_byte":11618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"190565556","text":"import json\n\nfrom flask import jsonify, request, Blueprint, abort\n\nfrom queries.controller import find_all_ids, read\n\nqueries_api = Blueprint('queries_api', __name__)\n\n\n@queries_api.route(\"/queries\", methods=['GET'])\ndef get_all_queries():\n \"\"\"Returns all queries ids stored in MongoDB\"\"\"\n return jsonify(find_all_ids())\n\n\n@queries_api.route(\"/query/\", methods=['POST'])\ndef get_query(id):\n \"\"\"Returns the query for the given id\"\"\"\n query = read(id)\n if not query:\n abort(404)\n query = query.value.replace(\"'\", '\"')\n if request.data:\n filters = json.loads(str(request.data, encoding='utf-8'))\n for v in filters.items(): # Loop through every filter and replace by value asked\n query = query.replace('{{ ' + v[0] + ' }}', v[1])\n return query\n","sub_path":"queries/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297294313","text":"from dataclasses import dataclass\nfrom typing import Optional, Dict, List\n\nfrom models import FilteredModel\nfrom models.affiliation import search_matching_affiliation, Affiliation\nfrom utils import search_matching_orcid, to_bool, to_str\n\n\n@dataclass\nclass Person(FilteredModel):\n address: str\n affiliation_id: Optional[int]\n author_type: str\n first_name: str\n last_name: str\n is_speaker: bool\n title: str\n orcid: Optional[str]\n email: str\n\n @classmethod\n def from_json(cls, allow_list, json_content, orcids: Dict[str, Dict],\n affiliations: List[Affiliation], email_agreement=False, contact_email=None):\n first_name = json_content[\"first_name\"]\n last_name = json_content[\"last_name\"]\n orcid_id = search_matching_orcid(first_name, last_name, orcids)\n affiliation_id = search_matching_affiliation(to_str(json_content[\"affiliation\"]), affiliations)\n email = None\n if email_agreement:\n parsed_email = to_str(json_content[\"email\"])\n if parsed_email is not None and parsed_email.lower() == contact_email.lower():\n email = contact_email\n return Person(\n allow_list=allow_list,\n first_name=to_str(json_content[\"first_name\"]),\n last_name=to_str(json_content[\"last_name\"]),\n affiliation_id=affiliation_id,\n address=to_str(json_content[\"address\"]),\n author_type=json_content[\"author_type\"],\n is_speaker=to_bool(json_content[\"is_speaker\"]),\n title=to_str(json_content[\"title\"]),\n orcid=orcid_id,\n email=email,\n )\n\n def __repr__(self):\n return f\"{self.__class__.__name__}(address='{self.address}', \" \\\n f\"affiliation_id='{self.affiliation_id}', author_type='{self.author_type}', \" \\\n f\"first_name='{self.first_name}', last_name='{self.last_name}', \" \\\n f\"is_speaker='{self.is_speaker}', title='{self.title}', \" \\\n f\"orcid='{self.orcid}')\"\n","sub_path":"models/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45108871","text":"from superwires import games, color\r\nimport math\r\n\r\ngames.init(screen_width=800, screen_height=600,fps=60)\r\n\r\n\r\nclass Blue(games.Sprite):\r\n image = games.load_image(\"blue_tank.png\")\r\n NEXT_SHOT = 50\r\n\r\n def __init__(self):\r\n super(Blue,self).__init__(image = Blue.image,\r\n x = 40,\r\n y = games.screen.height/2)\r\n\r\n self.missile_wait = 0\r\n\r\n self.score = games.Text(value=5,\r\n size = 50,\r\n color = color.blue,\r\n x = 40,\r\n y = 40,\r\n is_collideable = False)\r\n\r\n games.screen.add(self.score)\r\n\r\n def update(self):\r\n if games.keyboard.is_pressed(games.K_d):\r\n self.angle += 1\r\n if games.keyboard.is_pressed(games.K_a):\r\n self.angle -= 1\r\n if games.keyboard.is_pressed(games.K_w):\r\n angle = self.angle * math.pi / 180\r\n self.x += 1 * math.cos(angle)\r\n self.y -= 1 * -math.sin(angle)\r\n\r\n\r\n if self.missile_wait > 0:\r\n self.missile_wait -= 1\r\n if games.keyboard.is_pressed(games.K_SPACE) and self.missile_wait == 0:\r\n shoot = Missile(self.x, self.y, self.angle)\r\n games.screen.add(shoot)\r\n self.missile_wait = Blue.NEXT_SHOT\r\n\r\n if self.left < 0:\r\n self.left = 0\r\n if self.right > games.screen.width:\r\n self.right = games.screen.width\r\n if self.top < 0:\r\n self.top = 0\r\n if self.bottom > games.screen.height:\r\n self.bottom = games.screen.height\r\n\r\n for sprites in self.overlapping_sprites:\r\n if self.left > sprites.x:\r\n self.left = self.x - 27\r\n if self.right < sprites.x:\r\n self.right = self.x + 27\r\n if self.top > sprites.y:\r\n self.top = self.y - 27\r\n if self.bottom < sprites.y:\r\n self.bottom = self.y + 27\r\n\r\n\r\n def die(self):\r\n self.destroy()\r\n self.score.value -= 1\r\n explode = Explosion(x = self.x, y = self.y)\r\n games.screen.add(explode)\r\n if self.score.value > 0:\r\n games.screen.add(self)\r\n self.x = 40\r\n self.y = games.screen.height/2\r\n\r\n if self.score.value == 0:\r\n game_over = games.Message(value=\"Red Wins!\",\r\n size = 90,\r\n color = color.black,\r\n x = games.screen.width/2,\r\n y = games.screen.height/2,\r\n lifetime = 5 * games.screen.fps,\r\n after_death = games.screen.quit,\r\n is_collideable = False)\r\n\r\n games.screen.add(game_over)\r\n\r\n\r\nclass Red(games.Sprite):\r\n image = games.load_image(\"red_tank.png\")\r\n NEXT_SHOT = 50\r\n\r\n def __init__(self):\r\n super(Red, self).__init__(image=Red.image,\r\n x = games.screen.width - 40,\r\n y = games.screen.height/2,\r\n angle = 180)\r\n self.missile_wait = 0\r\n\r\n self.score = games.Text(value = 5,\r\n size = 50,\r\n color = color.red,\r\n x = games.screen.width - 40,\r\n y = 40,\r\n is_collideable = False)\r\n\r\n games.screen.add(self.score)\r\n\r\n def update(self):\r\n if games.keyboard.is_pressed(games.K_LEFT):\r\n self.angle -= 1\r\n if games.keyboard.is_pressed(games.K_RIGHT):\r\n self.angle += 1\r\n if games.keyboard.is_pressed(games.K_UP):\r\n angle = self.angle * math.pi / 180\r\n self.x += 1 * math.cos(angle)\r\n self.y -= 1 * -math.sin(angle)\r\n if self.missile_wait > 0:\r\n self.missile_wait -= 1\r\n if games.keyboard.is_pressed(games.K_RALT) and self.missile_wait == 0:\r\n shoot = Missile(self.x, self.y, self.angle)\r\n games.screen.add(shoot)\r\n self.missile_wait = Red.NEXT_SHOT\r\n\r\n if self.left < 0:\r\n self.left = 0\r\n if self.right > games.screen.width:\r\n self.right = games.screen.width\r\n if self.top < 0:\r\n self.top = 0\r\n if self.bottom > games.screen.height:\r\n self.bottom = games.screen.height\r\n\r\n for sprites in self.overlapping_sprites:\r\n if self.left > sprites.x:\r\n self.left = self.x - 27\r\n if self.right < sprites.x:\r\n self.right = self.x + 27\r\n if self.top > sprites.y:\r\n self.top = self.y - 27\r\n if self.bottom < sprites.y:\r\n self.bottom = self.y + 27\r\n\r\n def die(self):\r\n self.destroy()\r\n self.score.value -= 1\r\n explode = Explosion(x = self.x, y = self.y)\r\n games.screen.add(explode)\r\n if self.score.value > 0:\r\n games.screen.add(self)\r\n self.x = games.screen.width - 40\r\n self.y = games.screen.height/2\r\n\r\n if self.score.value == 0:\r\n game_over = games.Message(value = \"Blue Wins!\",\r\n size = 90,\r\n color = color.black,\r\n x = games.screen.width/2,\r\n y = games.screen.height/2,\r\n lifetime = 5 * games.screen.fps,\r\n after_death = games.screen.quit,\r\n is_collideable = False)\r\n\r\n games.screen.add(game_over)\r\n\r\n\r\n\r\n\r\nclass Explosion(games.Animation):\r\n images = [\"explosion1.bmp\",\r\n \"explosion2.bmp\",\r\n \"explosion3.bmp\",\r\n \"explosion4.bmp\",\r\n \"explosion5.bmp\",\r\n \"explosion6.bmp\",\r\n \"explosion7.bmp\",\r\n \"explosion8.bmp\",\r\n \"explosion9.bmp\"]\r\n\r\n def __init__(self,x,y):\r\n super(Explosion, self).__init__(images = Explosion.images,\r\n x = x,\r\n y = y,\r\n n_repeats = 1,\r\n repeat_interval = 5,\r\n is_collideable = False)\r\n\r\n\r\nclass Missile(games.Sprite):\r\n image = games.load_image(\"missile.png\")\r\n sound = games.load_sound(\"explosion.wav\")\r\n BUFFER = 60\r\n LIFE = 40\r\n VELOCITY = 10\r\n\r\n def __init__(self,tank_x,tank_y,tank_angle):\r\n\r\n missile_angle = tank_angle * math.pi / 180\r\n\r\n buffer_x = Missile.BUFFER * math.cos(missile_angle)\r\n buffer_y = Missile.BUFFER * math.sin(missile_angle)\r\n x = tank_x + buffer_x\r\n y = tank_y + buffer_y\r\n\r\n dx = Missile.VELOCITY * math.cos(missile_angle)\r\n dy = Missile.VELOCITY * math.sin(missile_angle)\r\n\r\n super(Missile, self).__init__(image = Missile.image,\r\n x = x, y = y,\r\n dx = dx, dy = dy)\r\n\r\n def update(self):\r\n for sprite in self.overlapping_sprites:\r\n sprite.die()\r\n self.destroy()\r\n explosion = Explosion(self.x, self.y)\r\n games.screen.add(explosion)\r\n Missile.sound.play()\r\n\r\n def die(self):\r\n self.destroy()\r\n\r\n\r\nclass Walls(games.Sprite):\r\n def _init__(self, image, x, y):\r\n super(Walls, self).__init__(x = x,\r\n y = y)\r\n\r\n def die(self):\r\n pass\r\n\r\n\r\n\r\n\r\ndef main():\r\n bg = games.load_image(\"dirt.jpg\")\r\n games.screen.background = bg\r\n\r\n games.music.load(\"main.mp3\")\r\n games.music.play()\r\n\r\n blue_tank = Blue()\r\n games.screen.add(blue_tank)\r\n\r\n red_tank = Red()\r\n games.screen.add(red_tank)\r\n\r\n wall_one = games.load_image(\"wall_center.jpg\")\r\n w_one = Walls(image = wall_one, x = games.screen.width/2, y = games.screen.height/2)\r\n games.screen.add(w_one)\r\n\r\n wall_two = games.load_image(\"wall_h.jpg\")\r\n w_two = Walls(image = wall_two, x = games.screen.width/2, y = 100)\r\n games.screen.add(w_two)\r\n\r\n wall_three = games.load_image(\"wall_h.jpg\")\r\n w_three = Walls(image = wall_three, x = games.screen.width/2, y = games.screen.height - 100)\r\n games.screen.add(w_three)\r\n\r\n wall_four = games.load_image(\"wall_v.jpg\")\r\n w_four = Walls(image = wall_four, x = games.screen.width - 200, y = games.screen.height/2, angle = 90)\r\n games.screen.add(w_four)\r\n\r\n wall_five = games.load_image(\"wall_v.jpg\")\r\n w_five = Walls(image = wall_five, x = 200, y = games.screen.height/2, angle = 90)\r\n games.screen.add(w_five)\r\n\r\n games.screen.mainloop()\r\n\r\n\r\nmain()\r\n","sub_path":"PY3E Challenges/Challenges Chapter 12/Challenge 1/Challenge 3/challenge_3.py","file_name":"challenge_3.py","file_ext":"py","file_size_in_byte":9070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640471474","text":"from . import views\n\nfrom django.conf.urls import url\n\n\napp_name = 'blog'\nurlpatterns = [\n url(r'^$', views.BlogIndexView.as_view(), name='index'),\n url(r'^posts/(?P[-\\w\\d]+)/$', views.BlogDetailView.as_view(), name='detail'),\n url(r'^popular/$', views.PopularPostsView.as_view(), name='popular_posts'),\n url(r'^tags/$', views.TagIndexView.as_view(), name='tags'),\n url(r'^tags/(?P[-_\\sa-zA-Z0-9]+)/$', views.TagDetailView.as_view(), name='tag_detail')\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"316667562","text":"from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_score(z_func, title, z_axis, z_title, filename):\n n_points = 50\n\n spec = np.linspace(0., 1.0, n_points)\n sens = np.linspace(0., 1.0, n_points)\n\n x = np.zeros(len(spec) * len(sens))\n y = np.zeros(len(spec) * len(sens))\n z = np.zeros(len(spec) * len(sens))\n\n x, y = np.meshgrid(spec, sens)\n z = z_func(x, y)\n\n fig = plt.figure()\n fig.suptitle(title)\n plt.locator_params(nbins=10)\n ax = fig.gca(projection='3d')\n ax.plot_surface(x, y, z, cmap=cm.viridis, alpha=1)\n ax.view_init(elev=20., azim=-135.5)\n\n ax.set_xlabel('specificity')\n ax.set_ylabel('sensitivity')\n ax.set_zlabel(z_title)\n\n ax.w_xaxis.gridlines.set_lw(1.0)\n ax.w_yaxis.gridlines.set_lw(1.0)\n ax.w_zaxis.gridlines.set_lw(1.0)\n\n ax.w_xaxis._axinfo.update({'grid': {'color': (0, 0, 0, 0.4)}})\n ax.w_yaxis._axinfo.update({'grid': {'color': (0, 0, 0, 0.4)}})\n ax.w_zaxis._axinfo.update({'grid': {'color': (0, 0, 0, 0.4)}})\n\n fig.set_size_inches(25, 16)\n\n ax.set_yticks(np.arange(0, 1.2, 0.2))\n ax.set_xticks(np.arange(0, 1.2, 0.2))\n ax.set_zticks(z_axis)\n\n ax.xaxis.labelpad = 60\n ax.yaxis.labelpad = 60\n ax.zaxis.labelpad = 60\n\n ax.tick_params(axis='z', which='major', pad=23)\n ax.tick_params(axis='x', which='major', pad=15)\n ax.tick_params(axis='y', which='major', pad=15)\n\n mpl.rcParams.update({'font.size': 36})\n\n fig.subplots_adjust(top=1)\n\n plt.savefig(filename, dpi=72, bbox_inches='tight')\n\n\n# plot_score(lambda spec, sens: (spec * 3 + sens) / 4, 'Weighted mean of specifity (w = 3) and sensitivity (w = 1)', np.arange(0, 1.2, 0.2), 'weighted mean', \"weighted_normal_mean.png\")\nplot_score(lambda spec, sens: spec**1.75 * sens**0.75, 'Product using exponents ($\\mathregular{spec^{1.75} * sens^{0.75}}$)', np.arange(0, 1.1, 0.1), 'exponent weighted', \"exp_weighted_mean.png\")\n# plot_score(lambda spec, sens: spec + sens - 1, 'Youden\\'s J statistic', np.arange(-1.0, 1.2, 0.2), 'Youden\\'s J statistic', \"youden.png\")\n# plot_score(lambda spec, sens: (3 + 1) * (spec * sens) / (spec + 3 * sens), 'Weighted harmonic mean of specifity (w = 3) and sensitivity (w = 1)', np.arange(0, 1.1, 0.1), 'weighted mean', \"weighted_mean.png\")\n# plot_score(lambda spec, sens: sens / (sens + (1 - spec)), 'Positive Predictive Value', np.arange(0, 1.1, 0.1), 'Positive Predictive Value', \"ppv.png\")\n# plot_score(lambda spec, sens: spec / (spec + (1 - sens)), 'Negative Predictive Value', np.arange(0, 1.1, 0.1), 'Negative Predictive Value', \"npv.png\")\n# \n# \n# def cohen(sens, spec):\n# a = sens\n# b = 1 - spec\n# d = spec\n# c = 1 - sens\n# \n# total = (a + b + c + d)\n# \n# po = (a + d) / total\n# ma = ((a + b) * (a + c)) / (a + b + c + d)\n# mb = ((c + d) * (b + d)) / (a + b + c + d)\n# pe = (ma + mb) / total\n# cohen = (po - pe) / (1 - pe)\n# return cohen\n# \n# plot_score(cohen, 'Cohen\\'s kappa', np.arange(-1.0, 1.2, 0.2), 'Cohen\\'s kappa', \"cohen.png\")\n# plot_score(lambda spec, sens: (sens + spec) / (sens + spec + (1 - sens) + (1 - spec)), 'Accuracy', np.arange(0, 1.1, 0.1), 'accuracy', \"accuracy.png\")\n# \n# \n# def f1(sens, spec):\n# a = sens\n# b = 1 - spec\n# c = 1 - sens\n# \n# f1 = (2) * a / ((2 * a) + (1 * c) + b)\n# return f1\n# \n# plot_score(f1, '$\\mathregular{F_1}$ score (harmonic mean of recall and precision)', np.arange(0.0, 1.1, 0.1), '$\\mathregular{F_1}$ score', \"f1_score.png\")\n# \n# \n# def f05(sens, spec):\n# a = sens\n# b = 1 - spec\n# c = 1 - sens\n# \n# f05 = (1 + 0.5**2) * a / ((1 + 0.5**2 * a) + (0.5**2 * c) + b)\n# return f05\n# \n# plot_score(f05, '$\\mathregular{F_{0.5}}$ score (weighs recall lower than precision)', np.arange(0.0, 1.1, 0.1), '$\\mathregular{F_{0.5}}$ score', \"f05_score.png\")\n# \n# \n# def f2(sens, spec):\n# a = sens\n# b = 1 - spec\n# c = 1 - sens\n# \n# f05 = (1 + 2**2) * a / ((1 + 2**2 * a) + (2**2 * c) + b)\n# return f05\n# \n# plot_score(f2, '$\\mathregular{F_2}$ score (weighs recall higher than precision)', np.arange(0.0, 1.1, 0.1), '$\\mathregular{F_2}$ score', \"f2_score.png\")\n","sub_path":"results/measures/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633570279","text":"from __future__ import division, print_function\n\nimport time\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom scipy.stats import multivariate_normal\nfrom rampwf.workflows.image_classifier import get_nb_minibatches\n\nis_cuda = torch.cuda.is_available()\nfactor = 100000\n\n\nclass ObjectDetector(object):\n def __init__(self):\n self.net = Net()\n\n def fit(self, X, y):\n # torch.load('model.pt')\n shape = X.shape[1:]\n print('Computing target map...')\n y = np.array([gaussian_detection_map(yi, shape) for yi in y])\n batch_size = 16\n nb_epochs = 30\n lr = 1e-3\n valid_ratio = 0.05\n\n if is_cuda:\n self.net = self.net.cuda()\n net = self.net\n optimizer = optim.Adam(net.parameters(), lr=lr)\n nb_valid = int(valid_ratio * len(X))\n nb_train = len(X) - nb_valid\n nb_train_minibatches = get_nb_minibatches(nb_train, batch_size)\n criterion = nn.MSELoss()\n if is_cuda:\n criterion = criterion.cuda()\n\n for epoch in range(nb_epochs):\n if epoch % 10 == 0:\n lr /= 10\n print('learning rate =', lr)\n t0 = time.time()\n net.train() # train mode\n nb_trained = 0\n nb_updates = 0\n train_loss = []\n train_mae = []\n train_rmse_n = []\n train_err_n = []\n X_train = X[:nb_train]\n X_valid = X[nb_train:]\n y_train = y[:nb_train]\n y_valid = y[nb_train:]\n for i in range(0, len(X_train), batch_size):\n net.train() # train mode\n idxs = slice(i, i + batch_size)\n X_minibatch = X_train[idxs]\n X_minibatch = self._make_X_minibatch(X_minibatch)\n y_minibatch = y_train[idxs]\n y_minibatch = _make_variable(y_minibatch)\n # zero-out the gradients because they accumulate by default\n optimizer.zero_grad()\n y_minibatch_pred = self._predict_map_torch(X_minibatch)\n loss = criterion(y_minibatch_pred, y_minibatch)\n loss.backward() # compute gradients\n optimizer.step() # update params\n\n # Loss and accuracy\n train_mae.append(\n self._get_mae_torch(y_minibatch_pred, y_minibatch))\n train_rmse_n.append(\n self._get_rmse_n_torch(y_minibatch_pred, y_minibatch))\n train_err_n.append(\n self._get_err_n_torch(y_minibatch_pred, y_minibatch))\n train_loss.append(loss.data[0])\n nb_trained += X_minibatch.size(0)\n nb_updates += 1\n if nb_updates % 100 == 0 or nb_updates == nb_train_minibatches:\n print(\n 'Epoch [{}/{}], [trained {}/{}]'\n ', avg_loss: {:.8f}'\n ', avg_train_mae: {:.4f}'\n ', avg_train_err_n: {:.4f}'\n ', avg_train_rmse_n: {:.4f}'.format(\n epoch + 1, nb_epochs, nb_trained, nb_train,\n np.mean(train_loss), np.mean(train_mae),\n np.mean(train_err_n), np.mean(train_rmse_n)))\n\n torch.save(self.net.state_dict(), 'model.pt')\n net.eval() # eval mode\n y_valid_pred = self._predict_map(X_valid)\n valid_mae = self._get_mae(y_valid_pred, y_valid)\n valid_err_n = self._get_err_n(y_valid_pred, y_valid)\n valid_rmse_n = self._get_rmse_n(y_valid_pred, y_valid)\n\n np.save('x.npy', X_valid)\n np.save('y.npy', y_valid)\n np.save('y_pred.npy', y_valid_pred)\n\n delta_t = time.time() - t0\n print('Finished epoch {}'.format(epoch + 1))\n print('Time spent : {:.4f}'.format(delta_t))\n print('Train mae : {:.4f}'.format(np.mean(train_mae)))\n print('Train err_n : {:.4f}'.format(np.mean(train_err_n)))\n print('Train rmse_n : {:.4f}'.format(np.mean(train_rmse_n)))\n print('Valid mae : {:.4f}'.format(np.mean(valid_mae)))\n print('Valid err_n : {:.4f}'.format(np.mean(valid_err_n)))\n print('Valid rmse_n : {:.4f}'.format(np.mean(valid_rmse_n)))\n\n def _make_X_minibatch(self, X_minibatch):\n X_minibatch = np.expand_dims(X_minibatch, axis=1)\n X_minibatch = _make_variable(X_minibatch.astype(np.float32))\n return X_minibatch\n\n def _get_mae_torch(self, y_pred, y_true):\n y_pred = y_pred.cpu().data.numpy()\n y_true = y_true.cpu().data.numpy()\n return self._get_mae(y_pred, y_true)\n\n def _get_mae(self, y_pred, y_true):\n return np.sum(np.abs(y_pred - y_true), axis=0) / len(y_pred)\n\n def _get_rmse_n_torch(self, y_pred, y_true):\n y_pred = y_pred.cpu().data.numpy()\n y_true = y_true.cpu().data.numpy()\n return self._get_rmse_n(y_pred, y_true)\n\n def _get_rmse_n(self, y_pred, y_true):\n n_pred = np.sum(y_pred, axis=(1, 2))\n n_true = np.sum(y_true, axis=(1, 2))\n return np.sqrt(np.sum((n_true - n_pred) ** 2) / len(n_true)) / factor\n\n def _get_err_n(self, y_pred, y_true):\n n_pred = np.round(np.sum(y_pred, axis=(1, 2)) / factor + 1.0)\n n_true = np.round(np.sum(y_true, axis=(1, 2)) / factor)\n return np.mean(n_pred < n_true)\n\n def _get_err_n_torch(self, y_pred, y_true):\n y_pred = y_pred.cpu().data.numpy()\n y_true = y_true.cpu().data.numpy()\n return self._get_err_n(y_pred, y_true)\n\n def _predict_map_torch(self, X_minibatch):\n y_map_pred = self.net(X_minibatch)\n s = y_map_pred.size\n y_map_pred = y_map_pred.view(s(0), s(2), s(3))\n return y_map_pred\n\n def _predict_map(self, X):\n y_map_pred = np.empty(X.shape, dtype=np.float32)\n batch_size = 16\n for i in range(0, len(X), batch_size):\n idxs = slice(i, i + batch_size)\n X_minibatch = X[idxs]\n X_minibatch = self._make_X_minibatch(X_minibatch)\n y_map_pred[idxs] = self._predict_map_torch(\n X_minibatch).cpu().data.numpy()\n return y_map_pred\n\n def predict(self, X):\n y_map_pred = self._predict_map(X)\n y_pred_array = np.empty(len(y_map_pred), dtype=object)\n y_pred_array[:] = y_map_pred\n return y_pred_array\n\n\ndef _make_variable(X):\n variable = Variable(torch.from_numpy(X))\n if is_cuda:\n variable = variable.cuda()\n return variable\n\n\nclass Net(nn.Module):\n def __init__(self, w=224, h=224):\n super(Net, self).__init__()\n self.block1 = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.block2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.block3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.block4 = nn.Sequential(\n nn.Conv2d(128, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n# self.block5 = nn.Sequential(\n# nn.Conv2d(256, 512, kernel_size=3, padding=1),\n# nn.ReLU(inplace=True),\n# nn.Conv2d(512, 512, kernel_size=3, padding=1),\n# nn.ReLU(inplace=True),\n# nn.Conv2d(512, 512, kernel_size=3, padding=1),\n# nn.ReLU(inplace=True),\n# # nn.MaxPool2d(kernel_size=2, stride=2)\n # )\n self.block6 = nn.Sequential(\n # nn.ConvTranspose2d(512, 256, 3, 1, bias=False),\n nn.ReLU(True),\n nn.ConvTranspose2d(\n in_channels=256, out_channels=128,\n kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(True),\n nn.ConvTranspose2d(\n in_channels=128, out_channels=64,\n kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(True),\n nn.ConvTranspose2d(\n in_channels=64, out_channels=32,\n kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(True),\n nn.ConvTranspose2d(\n in_channels=32, out_channels=1,\n kernel_size=3, stride=2, padding=1, output_padding=1)\n )\n self._initialize_weights()\n\n def forward(self, x):\n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.block6(x)\n return x\n\n def _initialize_weights(self):\n # Source: https://github.com/pytorch/vision/blob/master/torchvision/\n # models/vgg.py\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n elif isinstance(m, nn.ConvTranspose2d):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef _flatten(x):\n return x.view(x.size(0), -1)\n\n\ndef single_gaussian_detection_map(gdm, pixels, cx, cy, r):\n next_gdm = multivariate_normal.pdf(\n pixels, mean=[cx, cy], cov=[[r, 0], [0, r]]).reshape(gdm.shape)\n next_gdm /= next_gdm.sum()\n next_gdm *= factor\n return gdm + next_gdm\n\n\ndef gaussian_detection_map(list_of_circles, shape):\n pixels = np.array(\n np.meshgrid(range(shape[0]), range(shape[1]))).T.reshape(-1, 2)\n gdm = np.zeros(shape, dtype=np.float32)\n gdm += np.sum([single_gaussian_detection_map(gdm, pixels, *y)\n for y in list_of_circles], axis=0)\n return gdm\n","sub_path":"submissions/gdm/object_detector.py","file_name":"object_detector.py","file_ext":"py","file_size_in_byte":11081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"43766924","text":"from __future__ import print_function\nimport matplotlib.pyplot as pl\nimport numpy as np\nimport sys\nimport tables\nfrom numba import njit\nfrom scipy.ndimage.filters import gaussian_filter1d\n\nfrom astropy.io import fits\nfrom numba_models import *\n\ndef mixture_glm(\n alpha=0.1,\n xco_min=0.0, xco_max=10.,\n Y_b_mean=0.0, Y_b_stddev=30.0,\n offset_mean=0.0, offset_stddev=3.0,\n stddev_min=1e-5, stddev_max=None,\n d0=10./60., d_scale=1.):\n\n\n @njit\n def lnprior(p):\n # offset, x_co, stddev, log_Pb, Y_b, V_b = p[:6]\n offset, x_co, stddev= p[:3]\n betas = p[3:]\n prior = 0.\n\n # uniform prior for log(P_b)\n # if (log_Pb < -3.) or (log_Pb > 0.):\n # return -1.e12\n\n # normal for the offset\n prior += ln_normal(offset, offset_mean, offset_stddev)\n\n # scale-invariant for the scatter\n prior += ln_loguniform(stddev)\n \n # wide Gaussian for Yb\n # prior += ln_normal(Y_b, Y_b_mean, Y_b_stddev)\n\n # Vb\n \n\n # laplace prior for beta\n l1_penalty = 0.\n\n for i in range(len(betas)):\n l1_penalty += ln_laplace_unscaled(betas[i])\n l1_penalty *= alpha\n prior += l1_penalty\n\n return prior\n\n\n @njit\n def lnlike(p, hi, w_co, tau, distances):\n # offset, x_co, stddev, log_Pb, Y_b, V_b = p[:6]\n offset, x_co, stddev = p[:3]\n betas = p[3:]\n\n like = 0.\n n_points, n_channels = hi.shape\n\n for i in range(n_points):\n nhi_eff = 0.\n for j in range(n_channels):\n nhi_eff += hi[i,j] * betas[j]\n\n # first term of the mixture model (for well-behaved data)\n diff = tau[i] - nhi_eff - w_co[i] * x_co - offset\n diff_sq = diff * diff\n scatter = stddev * (1. + d_scale * (distances[i]/d0)**2)\n scatter_sq = scatter * scatter\n\n good = (-diff_sq/scatter_sq/2.) - np.log(scatter)\n \n # second term of the mixture model (for outliers)\n # diff_outliers = diff - Y_b\n # diff_outliers_sq = diff_outliers * diff_outliers\n # scatter_outliers_sq = scatter_sq + V_b * V_b\n \n # bad = np.exp(-diff_outliers_sq/scatter_outliers_sq/2.) * 10**log_Pb / np.sqrt(scatter_outliers_sq)\n\n # sum them up\n # logsum = np.log(good)\n # if np.isfinite(logsum):\n like += good\n # else:\n # return -1.e12\n\n return like\n\n\n @njit\n def lnwrap(p, hi, w_co, tau, distances):\n \"\"\"\n Sum of log-prior and log-likelihood\n \"\"\"\n return -lnprior(p) - lnlike(p, hi, w_co, tau, distances)\n # l = lnprior(p)\n # if np.isfinite(l):\n # return -1. * (l + lnlike(p, hi, w_co, tau, distances))\n # else:\n # return np.inf\n\n return lnwrap\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"212354881","text":"import pandas as pd\n\npath_target_data = '/Users/beartwo/Documents/数据处理/data_autotest/test_data.xlsx'\n\n\nclass Data_excel():\n\n def get_excel_event(self):\n df_event_name = pd.read_excel(path_target_data, usecols=[2], names=None)\n df_event_name_li = df_event_name.values.tolist()\n df_event_name_result = []\n for s_li in df_event_name_li:\n df_event_name_result.append(s_li[0])\n #print(df_event_name_result)\n return df_event_name_result\n\n def get_excel_parmas(self):\n df_event_params = pd.read_excel(path_target_data, usecols=[3], names=None)\n df_event_params_li = df_event_params.values.tolist()\n df_event_params_result = []\n for s_li in df_event_params_li:\n df_event_params_result.append(s_li[0])\n #print(df_event_params_result)\n return df_event_params_result\n '''将Excel中的事件和属性组合成字典'''\n def get_true_data(self):\n df_event_name_result = self.get_excel_event()\n df_event_params_result = self.get_excel_parmas()\n df_event_name1 = set(df_event_name_result)\n true_data = {}\n for word in df_event_name1:\n index_list = [item[0] for item in enumerate(df_event_name_result) if item[1] == word]\n for index in index_list:\n true_data.setdefault(word, []).append(df_event_params_result[index])\n print(true_data)\n return true_data\n\n","sub_path":"mta/excel_data.py","file_name":"excel_data.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"280294535","text":"import plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom plotly.colors import hex_to_rgb\nimport pandas as pd\n\n\n\ndef mix_color(weights, as_tuple=False, colors=['#1b9e77', '#d95f02', '#7570b3']):\n \"\"\"mix the colors given the weights\"\"\"\n colors = [hex_to_rgb(c) for c in colors]\n r = sum([(w / 100) * c[0] for w, c in zip(weights, colors)])\n g = sum([(w / 100) * c[1] for w, c in zip(weights, colors)])\n b = sum([(w / 100) * c[2] for w, c in zip(weights, colors)])\n \n if as_tuple:\n return (r, g, b)\n else:\n return f\"rgb ({r}, {g}, {b})\"\n\n\ndef build_ce_ternary(data: pd.DataFrame, labels=dict(a='A', b='B', c='C'), mix_colors=['#1b9e77', '#d95f02', '#7570b3']) -> go.Figure:\n \"\"\"\n Build the ternary plot for the given dataframe.\n \"\"\"\n fig = make_subplots(rows=2, cols=2, specs=[[{'rowspan': 2}, {}], [None, {'type': 'ternary'}]])\n colors = [mix_color(row.iloc[:3].values, colors=mix_colors) for _, row in data.iterrows()]\n \n fig.add_trace(go.Scatterternary(\n mode='markers',\n a=[row.iloc[0] for _, row in data.iterrows()],\n b=[row.iloc[1] for _, row in data.iterrows()],\n c=[row.iloc[2] for _, row in data.iterrows()],\n marker=dict(\n size=14,\n color=colors\n ),\n hovertemplate=\"
\" + labels['a'] + \": %{a}%
\" + labels['b'] + \": %{b}%
\" + labels['c'] + \": %{c}%\",\n showlegend=False\n ), row=2, col=2)\n\n fig.add_trace(go.Scatter(\n x=data.meanCE,\n y=data.varCE,\n mode='markers',\n showlegend=False,\n marker=dict(\n size=14,\n color=colors\n ),\n text=[f\"{labels['a']}: {row.iloc[0]}%
{labels['b']}: {row.iloc[1]}%
{labels['c']}: {row.iloc[2]}%\" for _, row in data.\n iterrows()],\n hovertemplate=\"%{text}\"\n ))\n\n fig.update_layout(\n template='none',\n ternary=dict(\n sum=100,\n aaxis=dict(title=labels['a']), \n baxis=dict(title=labels['b']), \n caxis=dict(title=labels['c'])\n ),\n xaxis=dict(title=\"Variance\"),\n yaxis=dict(title=\"Mean\")\n )\n\n fig.add_trace(go.Bar(\n y=[0.1 for _ in range(len(colors))],\n x=[_ for _ in range(len(colors))],\n marker_color=colors,\n text=[f\"{labels['a']}: {row.iloc[0]}%
{labels['b']}: {row.iloc[1]}%
{labels['c']}: {row.iloc[2]}%\" for _, row in data.iterrows()],\n hovertemplate=\"%{text}\",\n showlegend=False\n ), row=1, col=2)\n\n fig.update_layout(\n yaxis2=dict(domain=[0.65,0.70], zeroline=False, visible=False),\n xaxis2=dict(domain=[0.65, 0.9], zeroline=False, visible=False),\n margin=dict(t=4, r=10),\n\n )\n \n return fig\n","sub_path":"ruins/plotting/ternary.py","file_name":"ternary.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71312223","text":"from django.shortcuts import render\r\nfrom . import forms\r\n# Create your views here.\r\n\r\ndef index(request):\r\n return render(request,'basicapp/index.html')\r\n\r\ndef form_name_view(request):\r\n form = forms.FormName()\r\n\r\n if request.method == 'POST':\r\n form = forms.FormName(request.POST)\r\n\r\n if form.is_valid():\r\n # DO SOMETHING CODE\r\n print(\"VALIDATION SUCCESS!\")\r\n print(\"RACid: \"+form.cleaned_data['RACid'])\r\n print(\"Number of Requests: \"+str(form.cleaned_data['no_of_req']))\r\n\r\n return render(request,'basicapp/form_page.html',{'form':form})\r\n","sub_path":"basicforms/basicapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399036178","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2022/8/20 09:49\r\n# File: 0654.py\r\n# Desc: \r\n\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode:\r\n# def __init__(self, val=0, left=None, right=None):\r\n# self.val = val\r\n# self.left = left\r\n# self.right = right\r\nclass Solution:\r\n def constructMaximumBinaryTree(self, nums: List[int]) -> Optional[TreeNode]:\r\n def parse(l):\r\n if len(l) == 0:\r\n return None\r\n elif len(l) == 1:\r\n return TreeNode(l[0])\r\n else:\r\n max_num = max(l)\r\n idx = l.index(max_num)\r\n tn = TreeNode(max_num)\r\n tn.left = parse(l[:idx])\r\n tn.right = parse(l[idx+1:])\r\n return tn\r\n\r\n t = parse(nums)\r\n return t\r\n","sub_path":"Solutions/0654/0654.py","file_name":"0654.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13723418","text":"import os\nimport collections\nimport itertools\n\nfrom . import re_patterns\n\n\ndef parse_artifacts(lines, base_path):\n \"\"\"Return an iterator which returns artifacts found in the lines given.\"\"\"\n for l in lines:\n match = re_patterns.STORED_ARTIFACT_RE.search(l.msg)\n if match:\n art_path = match.group(1).strip()\n base_name = os.path.basename(base_path)\n art_rel = art_path.split(base_name+'/')[1]\n new_art = os.path.join(base_path, art_rel)\n yield new_art\n\n\ndef parse_steps(lines):\n for i, l in enumerate(lines):\n match = re_patterns.RE_LOG_STEP.search(l.msg)\n if match:\n yield StepLog(i, **match.groupdict())\n\n\ndef parse_errors(lines):\n \"\"\"parses the logs for errors, returning\n\n (failure_index, error_index)\n \"\"\"\n failure_index = None\n error_index = None\n for i, l in enumerate(lines):\n match = re_patterns.RE_LOG_CASE_ERROR.search(l.msg)\n if match:\n ty = match.group(1)\n if ty == 'Failure':\n failure_index = i\n else:\n error_index = i\n return failure_index, error_index\n\n\nclass StepLog(object):\n \"\"\"An object representing a step log.\n\n :parm int test_log_index: the index in case.test_logs where this\n step can be found.\n :parm int step_index: the step that this is.\n :parm str action: the action this step took.\n \"\"\"\n\n def __init__(self, test_log_index, step_index, action):\n self.test_log_index = test_log_index\n self.step_index = int(step_index)\n self.action = action\n\n def serialize(self):\n return dict(self.__dict__)\n\n\nclass SuiteLog(object):\n \"\"\"Represents the logs of a single Suite run (but not the test cases).\n\n :param str name: the name of this suite\n :param list[log.LogItem] setup_logs: the suite setup log (split from full log)\n :param list[log.LogItem] teardown_logs: the suite teardown log (split from full log)\n :param list[str] artifact_paths: paths to artifacts collected in suite setup/teardown\n \"\"\"\n\n def __init__(self, name, timestamp, setup_logs, teardown_logs, artifact_paths, base_path):\n self.name = name\n self.timestamp = timestamp\n self.setup_logs = setup_logs\n self.teardown_logs = teardown_logs\n self.artifact_paths = artifact_paths\n self.base_path = base_path\n\n def serialize(self):\n \"\"\"FOR HACKATHON!!!\"\"\"\n out = dict(self.__dict__)\n out['setup_logs'] = [l.serialize() for l in self.setup_logs]\n out['teardown_logs'] = [l.serialize() for l in self.teardown_logs]\n out = collections.OrderedDict(\n (k, out[k]) for k in (\n 'name', 'timestamp',\n 'setup_logs', 'teardown_logs'\n )\n )\n return out\n\n @classmethod\n def from_lines(cls, setup_lines, teardown_lines, base_path):\n print(\"new suite\")\n setup_dict = next(\n n.groupdict() for n in (re_patterns.RE_SUITE_SETUP.search(l.msg) for l in setup_lines) if\n n)\n name = setup_dict['name']\n timestamp = setup_lines[0].timestamp\n artifact_paths = list(parse_artifacts(setup_lines, base_path))\n artifact_paths.extend(parse_artifacts(teardown_lines, base_path))\n return cls(name, timestamp, setup_lines, teardown_lines, artifact_paths, base_path)\n\n def __repr__(self):\n return \"TestSuite<{name}>\".format(name=self.name)\n\n\nclass CaseLog(object):\n \"\"\"Represents the logs of a single Test Case run.\n\n :param int index: the index of the test case\n :param str name: the name of this test case\n :param list[log.LogItem] status: the status of this test case (passed, failed, skipped)\n :param int setup_index: the index where the setup starts.\n :param int test_index: the index where the test starts (or None).\n :param int teardown_index: the index where the teardown starts (or None).\n :param int failure_index: the index where the test failed (or None).\n :param int error_index: the index where the test had a critical error (or None).\n :param list[StepLog] steps: the steps that were run during the test.\n :param list[log.LogItem] logs: the test case setup log\n :param list[str] artifact_paths: paths to artifacts collected in test case setup/teardown\n \"\"\"\n\n def __init__(self, index, name, status,\n setup_index, test_index, teardown_index,\n failure_index, error_index,\n steps, logs,\n artifact_paths, suite_path):\n self.index = int(index)\n self.name = name\n self.status = status\n self.setup_index = setup_index\n self.test_index = test_index\n self.teardown_index = teardown_index\n self.failure_index = failure_index\n self.error_index = error_index\n self.steps = steps\n self.logs = logs\n self.artifact_paths = artifact_paths\n self.suite_path = suite_path\n\n def __repr__(self):\n return \"TestCase<{index}, {name}, {status}>\".format(**self.__dict__)\n\n def serialize(self):\n \"\"\"FOR HACKATHON!!!\"\"\"\n out = dict(self.__dict__)\n out['logs'] = [l.serialize() for l in self.logs]\n out['steps'] = [l.serialize() for l in self.steps]\n out = collections.OrderedDict(\n (k, out[k]) for k in (\n 'index', 'name', 'status',\n 'setup_index', 'test_index', 'teardown_index',\n 'failure_index', 'error_index',\n 'steps', 'logs'\n )\n )\n return out\n\n def serialize_tiny(self):\n \"\"\"FOR HACKATHON!!!\"\"\"\n return collections.OrderedDict([\n ('index', self.index),\n ('name', self.name),\n ('status', self.status),\n ])\n\n @classmethod\n def from_lines(cls, lines, base_path):\n skip_index, skip_dict = next(\n ((i, n.groupdict()) for (i, n) in enumerate((re_patterns.RE_TEST_CASE_SKIP.search(l.msg)\n for l in lines)) if n),\n (None, None))\n if skip_index is not None:\n # the step was skipped\n index, name = skip_dict['index'], skip_dict['name']\n return cls(\n index, name, 'skipped',\n skip_index, None, None,\n [], lines, [])\n\n # get the indexes and dictionaries of the setup/test/teardown sections\n setup_index, setup_dict = next((i, n.groupdict()) for (i, n) in enumerate(\n re_patterns.RE_TEST_CASE_SETUP.search(l.msg) for l in lines) if n)\n if setup_index < len(lines) - 1:\n test_index, _ = next((\n (i, n.groupdict()) for i, n in\n enumerate(re_patterns.RE_TEST_CASE_TEST.search(l.msg) for l in\n itertools.islice(lines, setup_index + 1, len(lines)))\n if n\n ), (None, None))\n test_index += setup_index + 1\n else:\n test_index = None\n\n if test_index is not None and test_index < len(lines) - 1:\n tear_index, _ = next((\n (i, n.groupdict()) for i, n in\n enumerate(re_patterns.RE_TEST_CASE_TEARDOWN.search(l.msg) for l in\n itertools.islice(lines, test_index + 1, len(lines)))\n if n\n ), (None, None))\n tear_index += test_index + 1\n else:\n tear_index = None\n\n # get exception indicies if they happened\n failure_index, error_index = parse_errors(lines)\n\n # now break everything down\n index = setup_dict['index']\n name = setup_dict['name']\n status = next(\n n.groupdict() for n in\n (re_patterns.RE_TEST_CASE_STATUS.search(l.msg) for l in reversed(lines))\n if n\n )['status']\n steps = list(parse_steps(lines))\n artifact_paths = list(parse_artifacts(lines, base_path))\n return cls(\n index, name, status,\n setup_index, test_index, tear_index,\n failure_index, error_index,\n steps, lines,\n artifact_paths,\n base_path\n )\n\n\ndef split_log(log_items, base_path=None):\n \"\"\"Analyze a file, splitting it up into a SuiteLog and a list of CaseLog objects.\n\n :param list[log.LogItem] log_items: the parsed log items.\n :rtype (SuiteLog, list[CaseLog]):\n \"\"\"\n\n # parse the log_items and find the indexes where important things happen\n # there are actually only two important indexes:\n # - where a test case setup starts\n # - where the suite teardown starts\n case_indexes = []\n teardown_index = None\n for i, item in enumerate(log_items):\n if re_patterns.RE_TEST_CASE_SETUP.search(item.msg):\n assert teardown_index is None\n case_indexes.append(i)\n elif re_patterns.RE_TEST_CASE_SKIP.search(item.msg):\n assert teardown_index is None\n case_indexes.append(i)\n elif re_patterns.RE_SUITE_TEARDOWN.search(item.msg):\n teardown_index = i\n\n if not case_indexes and teardown_index is None:\n # basically nothing was run...\n setup_lines = log_items\n teardown_lines = []\n cases_lines = []\n elif not case_indexes:\n # no cases were run, but the suite setup and teardown were run\n setup_lines = log_items[:teardown_index]\n teardown_lines = log_items[teardown_index:]\n cases_lines = []\n else:\n # at least one case was run (but teardown may not have)\n setup_lines = log_items[:case_indexes[0]]\n if teardown_index:\n teardown_lines = log_items[teardown_index:]\n else:\n teardown_lines = []\n\n cases_lines = []\n for i, index in enumerate(case_indexes):\n if i == len(case_indexes) - 1:\n to = teardown_index\n else:\n to = case_indexes[i + 1]\n cases_lines.append(log_items[index:to])\n\n suite = SuiteLog.from_lines(setup_lines, teardown_lines, base_path=base_path)\n cases = [CaseLog.from_lines(l, base_path) for l in cases_lines]\n return suite, cases\n","sub_path":"FlaskApp_LogTesting-master/analyze/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314643282","text":"'''\nword(k) k는 남아있는 문자. 후보 문자열로 k를 만들 수 있으면 1 리턴 없으면 0리턴\n'''\n\nS = input()\nN = int(input())\nA = []\n\nfor _ in range(N):\n A.append(input())\n\nch = False\n\ndp=[0]*(len(S)+1)\n\ndef word(k):\n global ch\n if len(k) == 0:\n ch = True\n return ch\n if dp[len(S)-len(k)] == 1:\n return \n for a in A:\n a_l = len(a)\n if len(k) < a_l:\n continue\n if k[0:a_l] == a:\n dp[len(S)-len(k)] = 1\n word(k[a_l:])\n return ch\n\nword(S)\n\nif ch:\n print(1)\nelse:\n print(0)\n","sub_path":"백준/Python/카테고리/다이나믹 프로그래밍/16500(문자열 판별).py","file_name":"16500(문자열 판별).py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113649628","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nYour mapper function should print out 10 lines containing longest posts, sorted in\nascending order from shortest to longest.\nPlease do not use global variables and do not change the \"main\" function.\n\"\"\"\nimport sys\nimport csv\nimport Queue as Q\nimport heapq\n\n\n# Queue.PriorityQueue本质上是对heapq的再封装\n# 同样的用list来存储heap,同样的是minHeap\n# 当然,你也可以直接用heapq\n\n# 如果直接用heapq, 常用的methods有下面5个\n# 其实看heapq源码看他们的methods是如何实现的是一个很好的学习机会\n# 1. heapify(iterable) :- This function is used to convert the iterable into a heap data structure. i.e. in heap order.\n# 2. heappush(heap, item) :- This function is used to insert the element mentioned in its arguments into heap. The order is adjusted, so as heap structure is maintained.\n# 3. heappop(heap) :- This function is used to remove and return the smallest element from heap. The order is adjusted, so as heap structure is maintained.\n# 4. heapreplace(heap, item) : -(pop and push) Pop and return the current smallest value, and add the new item\n# 5. heappushpop(heap, item) : -(push and pop) Fast version of a heappush followed by a heappop.\n\ndef mapper():\n reader = csv.reader(sys.stdin, delimiter='\\t')\n writer = csv.writer(sys.stdout, delimiter='\\t', quotechar='\"', quoting=csv.QUOTE_ALL)\n\n # 这个PQ的Entries are typically tuples of the form: (priority number, data).\n # And priority_number should be int\n # Get the first 10 lines\n top_ten = [reader.next() for _ in range(10)]\n # reshape it as tuple (len(line[4]), line)\n top_ten = [(len(line[4]), line) for line in top_ten]\n # init minHeap\n heapq.heapify(top_ten)\n # process the rest\n for line in reader:\n print(len(line[4]), line)\n if (len(line[4]), line) > top_ten[0]:\n # push and pop\n heapq.heappushpop(top_ten, (len(line[4]), line))\n while top_ten:\n writer.writerow(heapq.heappop(top_ten)[1])\n\ntest_text = \"\"\"\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"333\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"88888888\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"1\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"11111111111\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"1000000000\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"22\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"4444\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"666666\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"55555\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"999999999\\\"\\t\\\"\\\"\n\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"\\\"\\t\\\"7777777\\\"\\t\\\"\\\"\n\"\"\"\n\n\n# This function allows you to test the mapper with the provided test string\n# But you should comment out if you use another dataset as input\n# def main():\n# import StringIO\n# sys.stdin = StringIO.StringIO(test_text)\n# mapper()\n# sys.stdin = sys.__stdin__\n#\n#\n# if __name__ == '__main__':\n# main()\n","sub_path":"mapreduce/ud617/mapper_pattern_q2_top10.py","file_name":"mapper_pattern_q2_top10.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"476211984","text":"import requests\nimport traceback\nimport re\n\nfrom lxml import etree\n\n\nclass weibo:\n\n def __init__(self, user_id, filter=0):\n self.user_id = user_id\n self.filter = filter\n self.username = ''\n self.certification = ''\n self.sex = ''\n self.where = ''\n self.birth = ''\n self.introduction = ''\n self.study = ''\n self.work = ''\n self.totalPage = 0\n self.weibo_num = 0\n self.weibo_num2 = 0\n self.following = 0\n self.followers = 0\n self.weibo_content = []\n self.publish_time = []\n self.up_num = []\n self.retweet_num = []\n self.comment_num = []\n self.user_info = {}\n self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) '\n 'AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15'}\n self.cookie = {'Cookie': '_T_WM=d05aeae54952b041f96eb019880c06b8;'\n ' SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFK58OAMrlOrDqJQgTgDVcn5JpX5K-hUgL'\n '.Foe71Ke4e0eESh-2dJLoI7L2dNHeT-L1x.Yt;'\n ' H5_INDEX_TITLE=DouL-INFO; H5_INDEX=3; '\n 'SCF=AkITaniycUV3hUqo-Ny0hStzzHhmOyTamUxebQF2iiw6xyUIc-Pqyoq-1ItkE-nb-fQbkfdD-s50wgXOFkqLoFk.;'\n ' MLOGIN=0;'\n ' SUB=_2A25367aYDeRhGeVO4lEY8y3OzzmIHXVVF9rQrDV6PUJbkdANLWX8kW1NTXzT6APTOAIJJPgBNohRCLiU12PSsLQP;'\n ' SUHB=0D-FwyEreCGyRy; SSOLoginState=1525663432'}\n\n def get_info(self):\n try:\n url = 'https://weibo.cn/%d/info#top' % (self.user_id,)\n response = requests.get(url, headers=self.headers, cookies=self.cookie)\n selector = etree.HTML(response.content)\n info1 = selector.xpath('/html/body/div[7]/text()')\n del info1[5]\n self.username = info1[0].split(':')[1]\n self.certification = info1[1].split(':')[1]\n self.sex = info1[2].split(':')[1]\n self.where = info1[3].split(':')[1]\n self.birth = info1[4].split(':')[1]\n self.introduction = info1[5].split(':')[1]\n self.study = selector.xpath('/html/body/div[9]/text()')[0]\n self.study = self.study[1:]\n self.work = selector.xpath('/html/body/div[11]/text()')[0]\n self.work = self.work[1:]\n\n url2 = 'https://weibo.cn/u/%d?filter=%d' % (self.user_id, self.filter)\n response2 = requests.get(url2, headers=self.headers, cookies=self.cookie)\n selector2 = etree.HTML(response2.content)\n self.weibo_num = selector2.xpath('/html/body/div[4]/div/span/text()')[0]\n self.weibo_num = re.sub('[^\\d+]', '', self.weibo_num)\n self.following = selector2.xpath('/html/body/div[4]/div/a[1]/text()')[0]\n self.following = re.sub('[^\\d+]', '', self.following)\n self.followers = selector2.xpath('/html/body/div[4]/div/a[2]/text()')[0]\n self.followers = re.sub('[^\\d+]', '', self.followers)\n self.user_info['username'] = self.username\n self.user_info['userid'] = self.user_id\n self.user_info['sex'] = self.sex\n self.user_info['where'] = self.where\n self.user_info['birth'] = self.birth\n self.user_info['certification'] = self.certification\n self.user_info['introduction'] = self.introduction\n self.user_info['study'] = self.study\n self.user_info['work'] = self.work\n self.user_info['weibo_num'] = self.weibo_num\n self.user_info['following'] = self.following\n self.user_info['followers'] = self.followers\n f = open('./user_info.txt', 'a+')\n f.write(str(self.user_info))\n except Exception as e:\n print('Error:', e)\n traceback.print_exc()\n\n def get_content(self, page):\n\n def not_own_content(html):\n pass\n\n def own_content(html):\n # 这一部分是获取微博内容 和 内容链接\n all = {}\n words = html.xpath('div[1]/span[@class = \"ctt\"]/text()')[0]\n list_a = []\n a = html.xpath('div[1]/span[@class = \"ctt\"]/a')\n for i in range(len(a)):\n text_a = html.xpath('div[1]/span[@class = \"ctt\"]/a[' + str(i+1) + ']/text()')[0]\n href_a = html.xpath('div[1]/span[@class = \"ctt\"]/a[' + str(i+1) + ']/@href')[0]\n c2a = {text_a: href_a}\n list_a.append(c2a)\n all['content'] = words\n all['links'] = list_a\n # 这部分是过去图片链接\n images = html.xpath('div[1]/a/@href')\n if images:\n tmp_images = images[0]\n all['images'] = tmp_images\n # 这部分是获取点赞、转发、评论\n if len(html.xpath('div')) == 1:\n all['ups'] = re.sub('[^\\d+]', '', html.xpath('div[1]/a[1]/text()')[0])\n all['res'] = re.sub('[^\\d+]', '', html.xpath('div[1]/a[2]/text()')[0])\n all['coms'] =re.sub('[^\\d+]', '', html.xpath('div[1]/a[3]/text()')[0])\n else:\n all['ups'] = re.sub('[^\\d+]', '', html.xpath('div[2]/a[3]/text()')[0])\n all['res'] = re.sub('[^\\d+]', '', html.xpath('div[2]/a[4]/text()')[0])\n all['coms'] = re.sub('[^\\d+]', '', html.xpath('div[2]/a[5]/text()')[0])\n print(all)\n f = open('./content.txt', 'a+')\n f.write(str(all))\n f.write('\\n')\n f.close()\n\n try:\n url = 'https://weibo.cn/u/%d?filter=%d&page=%d' % (self.user_id, self.filter, page)\n print('现在开始爬第-------%d-------页.......' % page)\n response = requests.get(url, headers=self.headers, cookies=self.cookie)\n selector = etree.HTML(response.content)\n self.totalPage = selector.xpath('//div[@id=\"pagelist\"]/form/div/input[1]/@value')[0]\n contents = selector.xpath('//div[@class = \"c\"]')\n del contents[-2:]\n print('该页有%d条微博' % len(contents))\n for content in contents:\n not_own = content.xpath('div/span[@class = \"cmt\"]')\n if not_own:\n print('本条微博是转发的,调用转发微博内容爬取方法....')\n not_own_content(content)\n else:\n print('本条微博是原创的,调用原创微博内容爬取方法....')\n own_content(content)\n if page < int(self.totalPage):\n page += 1\n self.get_content(page)\n except Exception as e:\n print('Error', e)\n traceback.print_exc()\n\n def start(self):\n try:\n self.get_info()\n self.get_content(1)\n except Exception as e:\n print('Error:', e)\n\n\ndef main():\n try:\n user_id = 1669879400\n filter = 0\n wb = weibo(user_id, filter)\n wb.start()\n except Exception as e:\n traceback.print_exc()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"src/weibo.py","file_name":"weibo.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574914787","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files\n\"\"\"\nimport csv\nimport math\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 2: Which telephone number spent the longest time on the phone\nduring the period? Don't forget that time spent answering a call is\nalso time spent on the phone.\nPrint a message:\n\" spent the longest time, seconds, on the phone during \nSeptember 2016.\".\n\"\"\"\n\ndef maxSpentTimeOnPhone(records):\n longestCalls = {}\n\n for rec in records:\n # calling telephone\n if rec[0] in longestCalls:\n longestCalls[rec[0]] += int(rec[3])\n else:\n longestCalls[rec[0]] = int(rec[3])\n \n # receiving call\n if rec[1] in longestCalls:\n longestCalls[rec[1]] += int(rec[3])\n else:\n longestCalls[rec[1]] = int(rec[3])\n\n val = list(longestCalls.values())\n keys = list(longestCalls.keys())\n\n return keys[val.index(max(val))], max(val)\n\nprint(\"{} spent the longest time, {} seconds, on the phone during September 2016.\".format(maxSpentTimeOnPhone(calls)[0], maxSpentTimeOnPhone(calls)[1]))\n\n","sub_path":"BasicProblems/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645672212","text":"import os\n\nimport numpy as np\nfrom pandas.io.parsers import read_csv\nfrom sklearn.utils import shuffle\nfrom matplotlib import pyplot\nFTRAIN = '/home/soren/Desktop/kaggle/facialkeypoints/training.csv'\nFTEST = '/home/soren/Desktop/kaggle/facialkeypoints/test.csv'\nFLOOKUP = '/home/soren/Desktop/kaggle/facialkeypoints/IdLookupTable.csv'\n\n\n\ndef load(test=False, cols=None):\n\n \"\"\"Loads data from FTEST if *test* is True, otherwise from FTRAIN.\n Pass a list of *cols* if you're only interested in a subset of the\n target columns.\n \"\"\"\n fname = FTEST if test else FTRAIN\n df = read_csv(os.path.expanduser(fname)) # load pandas dataframe\n\n # The Image column has pixel values separated by space; convert\n # the values to numpy arrays:\n df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))\n\n if cols: # get a subset of columns\n df = df[list(cols) + ['Image']]\n\n print(df.count()) # prints the number of values for each column\n df = df.dropna() # drop all rows that have missing values in them\n\n X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]\n X = X.astype(np.float32)\n\n if not test: # only FTRAIN has any target columns\n y = df[df.columns[:-1]].values\n y = (y - 48) / 48 # scale target coordinates to [-1, 1]\n X, y = shuffle(X, y, random_state=42) # shuffle train data\n y = y.astype(np.float32)\n else:\n y = None\n\n return X, y\n\n\ndef plot_sample(x, axis):\n img = x.reshape(96, 96)\n axis.imshow(img, cmap='gray')\n #axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)\n\nX, _ = load(test=False)\n\nfig = pyplot.figure(figsize=(6, 6))\nfig.subplots_adjust(\n left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n\n\ndef random_grid(X,n):\n for i in range(n**2):\n ax = fig.add_subplot(n, n, i + 1, xticks=[], yticks=[])\n a=np.random.randint(low=0,high=2000)\n plot_sample(X[a], ax)\n pyplot.show()\nrandom_grid(X,2)","sub_path":"exploreFaceData.py","file_name":"exploreFaceData.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634654189","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponseRedirect, JsonResponse\r\nfrom django.urls import reverse\r\nfrom django.forms import modelformset_factory\r\n\r\nfrom .models import BakedGood, BakedGoodForm\r\n\r\ndef index(request):\r\n baked_goods = BakedGood.objects.all() \r\n context = {'baked_goods': baked_goods} \r\n return render(request, 'example/index.html', context)\r\n \r\ndef menu(request):\r\n baked_goods = BakedGood.objects.all()\r\n context = {'baked_goods': baked_goods}\r\n return render(request, 'example/for.html', context)\r\n \r\ndef bake(request):\r\n if request.method == 'POST':\r\n form = BakedGoodForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return HttpResponseRedirect(reverse('index'))\r\n else:\r\n form = BakedGoodForm()\r\n\r\n return render(request, 'example/bake.html', {'form': form})\r\n\r\ndef ajax(request):\r\n return render(request, 'example/ajax.html')\r\n\r\ndef ajax_demo(request):\r\n data = []\r\n for baked_good in BakedGood.objects.all():\r\n data.append({\"name\": baked_good.name, \"desc\": baked_good.desc})\r\n\r\n return JsonResponse({\"data\": data})\r\n \r\ndef bake_formset(request):\r\n BakedGoodFormSet = modelformset_factory(BakedGood, extra=5,\r\n fields=['name', 'desc', 'good_type', 'price', 'recipe'])\r\n if request.method == 'POST':\r\n formset = BakedGoodFormSet(request.POST)\r\n if formset.is_valid():\r\n formset.save()\r\n return HttpResponseRedirect(reverse('index'))\r\n else:\r\n formset = BakedGoodFormSet(queryset=BakedGood.objects.none())\r\n\r\n return render(request, 'example/bake_formset.html', {'formset': formset})","sub_path":"django-base/example/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274865281","text":"import turtle\n\n\nt = turtle.Turtle()\nt.hideturtle()\n# 1\n# 实现函数, 用于画一个边长 100 的正方形\n# 参数 x, y 是正方形左上角坐标\n# 函数声明如下\n# def square(x, y)\n\n\ndef square(x, y):\n t.up()\n t.goto(x, y)\n t.setheading(0)\n t.down()\n for i in range(4):\n t.forward(100)\n t.right(90)\n\n# square(100, 100)\n\n# 2\n# 实现函数, 用于画一个正方形, 边长由参数提供\n# 参数 x, y 是正方形左上角坐标\n# 参数 l 是正方行边长\n# 函数声明如下\n# def square(x, y, l)\n\n\ndef square(x, y, l):\n t.up()\n t.goto(x, y)\n t.setheading(0)\n t.down()\n for i in range(4):\n t.forward(l)\n t.right(90)\n\n# square(30, 20, 60)\n\n#\n# 3\n# 画一排正方形, 一共 5 个\n# 从 0 0 点开始, 边长为 30, 每2个正方形之间间隔10像素\n#\n\n\n# for i in range(5):\n# square(i * 40, 0, 30)\n\n# 4\n# 实现函数, 画一排正方形, 有如下参数\n# x, y 是第一个正方形左上角坐标\n# n 是正方形的个数\n# space 是两个正方形之间的间距\n# l 是正方形的边长\n# def square_line(x, y, space, l, n)\n\n\ndef square_line(x, y, space, l, n):\n for i in range(n):\n square(i * (l + space) + x, 0 + y, l)\n\n# square_line(-300, -300, 30, 40, 10)\n\n# 5\n# 实现函数, 用上题的函数来画一个正方形方阵, 参数如下\n# x, y 是第一个正方形左上角坐标\n# space 是两个正方形之间的间距\n# l 是正方形的边长\n# n 是横向正方形的个数\n# m 是纵向正方形的个数\n# def square_square(x, y, space, l, n, m)\n#\n\n\ndef square_square(x, y, space, l, n, m):\n for i in range(m):\n square_line(x, y - i * (l + space), space, l, n)\n\n# square_square(0, 0, 10, 30, 4, 3)\n\n# 6\n# 实现函数, 参数如下\n# list 是一个只包含数字的列表\n# 返回 list 中所有数字的和\n# def sum(list)\n\n\ndef sum(list):\n _sum = 0\n for i in list:\n _sum += i\n return _sum\n\nl1 = [2, 4, 65, 12, 67, 88]\n# print(sum(l1))\n\n# 7\n# 实现函数, 参数如下\n# list 是一个只包含数字的列表\n# 返回 list 中所有数字的平均值\n# def pingjun(list)\n#\n# len(list) 可以用来求 list 的长度\n\n\ndef pingjun(list):\n avr = sum(list) / len(list)\n return avr\n\n# print(pingjun(l1))\n\n\n# 8\n# 实现函数 如下\n# list 参数为一个只包含数字的列表\n# 返回 list 中 最大的数字\n# def max(list)\n#\n# 例如:\n# list = [2,4,1,5,3,9];\n# 则 max(list) 返回 9\n#\n\n\ndef max(list):\n _max = 0\n for i in list:\n if _max < i:\n _max = i\n return _max\n\nprint(max(l1))\n\n\nturtle.done()\n","sub_path":"base/class03/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"3682349","text":"import time\nimport tensorflow as tf\n\n\nclass Barrier(object):\n def __init__(self, worker_num, barrier_num, sleep_time_ms=10):\n self._worker_num = worker_num\n self._barrier_num = barrier_num\n self._sleep_time_ms = sleep_time_ms\n self._counter_vars = []\n self._counter_add_ops = []\n self._counter_reset_ops = []\n ps_device = '/job:ps/task:0/cpu:0'\n with tf.device(ps_device):\n for i in range(self._barrier_num):\n for j in range(self._worker_num):\n counter_var = tf.get_variable(\n 'counter-{}_{}'.format(i, j),\n (),\n tf.int32,\n initializer=tf.zeros_initializer\n )\n self._counter_vars.append(counter_var)\n self._counter_add_ops.append(counter_var.assign_add(1, use_locking=True))\n self._counter_reset_ops.append(counter_var.assign(0, use_locking=True))\n\n def barrier_reset(self, session, worker_index, barrier_index):\n index = barrier_index * self._worker_num + worker_index\n session.run(self._counter_reset_ops[index])\n\n def barrier(self, session, worker_index, barrier_index, epoch):\n for task_index in range(self._worker_num):\n if task_index == worker_index:\n session.run(self._counter_add_ops[barrier_index * self._worker_num + worker_index])\n index = barrier_index * self._worker_num + task_index\n count = session.run(self._counter_vars[index])\n retry_num = 0\n while count < epoch:\n time.sleep(self._sleep_time_ms)\n retry_num += 1\n count = session.run(self._counter_vars[index])\n if retry_num == 1:\n tf.logging.info(\"{} wait for {} to be completed\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), task_index))","sub_path":"tf_euler/python/utils/barrier.py","file_name":"barrier.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7893807","text":"\"\"\"\nOne Away: There are three types of edits that can be performed on strings: insert a character,\nremove a character, or replace a character. Given two strings, write a function to check if they are\none edit (or zero edits) away.\nEXAMPLE\npale, ple -> true\npales, pale -> true\npale, bale -> true\npale, bake -> false\n\"\"\"\n\ndef one_edit_away(string1, string2):\n if len(string1) >= len(string2):\n str1 = string1\n str2 = string2\n else:\n str1 = string2\n str2 = string1\n \n index = 0\n number_of_changes = 0\n\n for str1_character in str1:\n try:\n str2_character = str2[index]\n except IndexError:\n number_of_changes += 1\n if number_of_changes > 1:\n return False\n continue\n\n print(str1_character + \" vs \" + str2_character)\n if str1_character != str2_character:\n number_of_changes += 1\n if number_of_changes > 1:\n return False\n try:\n # When we encounter different character we need to check\n # the next one if it's the same in both\n # We possibly deal with the replace edit\n if str1[index+1] == str2[index+1]:\n print(\"Replace character happened?\")\n index += 1\n continue\n except IndexError:\n pass\n # If it's different it's a remove event\n # on the shorter string (we always assume remove, not add)\n # Try to skip one index\n # without incrementing index of the shorter string\n print(\"Insert character happened?\")\n continue\n index += 1\n\n return True\n \n\ntest_cases = [\n ('pale', 'ple', True),\n ('pales', 'pale', True),\n ('pale', 'bale', True),\n ('pale', 'bake', False),\n ('', 'bake', False),\n ('eloeleoleo', 'bake', False),\n ('123', 'bake', False)\n]\n\nfor test_case in test_cases:\n print(\"*** TESTING ***\")\n print(test_case[0] + \" AND \" + test_case[1])\n assert one_edit_away(test_case[0], test_case[1]) == test_case[2]\n print(test_case[1] + \" AND \" + test_case[0])\n assert one_edit_away(test_case[1], test_case[0]) == test_case[2]","sub_path":"1_5_cracking_coding.py","file_name":"1_5_cracking_coding.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"379938336","text":"# %%\n#######################################\ndef test_dirpath(thepath: str):\n import pathlib\n\n path_obj = pathlib.Path(thepath).resolve()\n path_exists = path_obj.exists()\n path_isdir = path_obj.is_dir()\n if path_exists and path_isdir:\n return True\n else:\n print(f\"The path is a directory: {path_isdir}\")\n print(f\"The path exists: {path_exists}\")\n\n","sub_path":"file_folder_funcs/test_dirpath.py","file_name":"test_dirpath.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29420692","text":"# %load q04_ridge/build.py\n# Default imports\nfrom sklearn.linear_model import Ridge\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom greyatomlib.advanced_linear_regression.q01_load_data.build import load_data\n\n# We have already loaded the data for you\ndata_set, X_train, X_test, y_train, y_test = load_data('data/house_prices_multivariate.csv')\n\nnp.random.seed(9)\n\n\n# Write your solution here\ndef ridge(alpha=0.01):\n clf=Ridge(alpha=alpha, normalize=True)\n clf.fit(X_train,y_train)\n y_pred=clf.predict(X_test)\n return mean_squared_error(clf.predict(X_train),y_train)**0.5,mean_squared_error(y_pred,y_test)**0.5,clf\n \n \n\nridge()\nclf=Ridge(alpha=100, normalize=False,random_state=9)\nclf.fit(X_train,y_train)\ny_pred=clf.predict(X_train)\n\nmean_squared_error(y_pred,y_train) **0.5\nfrom sklearn.linear_model import LinearRegression\n\nclf=LinearRegression()\nclf.fit(X_train,y_train)\ny_pred=clf.predict(X_train)\n\nmean_squared_error(y_pred,y_train) \nprint(y_test.shape,y_pred.shape)\ntype(y_pred)\ny_pred[10]\ny_train\n\n\n","sub_path":"q04_ridge/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"225241253","text":"import os\nimport sys\nimport json\nimport spotipy\nimport time\nimport spotipy.util as util\nfrom json.decoder import JSONDecodeError\nfrom shufflers import *\n\n\ndef get_pl_track_items(pl_id):\n\toffset = 0\n\n\tdone = False\n\n\tresult = []\n\n\twhile(not done):\n\t\t# Get playlist tracks (format specified above)\n\t\tpl_track_items = spotify.playlist_tracks(playlist_id=pl_id,\n\t\t\t\t\t\t\tfields='items(track(name,id,artists.name,artists.id,album.name,album.id))', offset=offset)['items']\n\n\t\tresult.extend(pl_track_items)\n\n\t\tif(len(pl_track_items) < 100):\n\t\t\tdone = True\n\n\t\toffset = offset + 100\n\n\treturn result\n\n\ndef replace_tracks(username, pl_id, tracklist):\n\tlength = len(tracklist)\n\tbatches = []\n\toffset = 0\n\n\t# Append batches of 100\n\tfor i in range(length//100):\n\t\tbatches.append(tracklist[offset:offset+100])\n\t\toffset = offset + 100\n\n\t# Append remainder if needed\n\tremainder = tracklist[offset:]\n\tif(remainder):\n\t\tbatches.append(remainder)\n\n\t# Do 'replace' on first 100 (or less)\n\tspotify.user_playlist_replace_tracks(username, pl_id, batches[0])\n\n\t# Do 'add' on the remaining batches\n\tfor tracks in batches[1:]:\n\t\tspotify.user_playlist_add_tracks(username, pl_id, tracks)\n\n\tprint('success')\n\n\n\nstartTime = time.time()\n\n# Set scope\nscope = 'playlist-modify-public'\n\n# Get username\nusername = sys.argv[1]\n\n# My Username ID: 12930687\n\n# Prompt user permission\ntry:\n\ttoken = util.prompt_for_user_token(username, scope)\nexcept:\n\tos.remove(f\".cache-{username}\")\n\ttoken = util.prompt_for_user_token(username, scope)\n\n# Spotify object\nspotify = spotipy.Spotify(auth=token)\n\n# Printing readable JSON\n# print(json.dumps(VAR, sort_keys=True, indent=4))\n\n# Get playlist tracks with id and name of track, album, artist(s)\n''' Format:\npl_track_items[\n\t{\n\ttrack{\n\t\tid,\n\t\tname,\n\t\talbum{\n\t\t\tid\n\t\t\tname\n\t\t},\n\t\tartists[\n\t\t\t{\n\t\t\tid\n\t\t\tname\n\t\t\t},...\n\t\t]\n\t},... \n]\n\n'''\n# ID of playlist to shuffle\n# spotify:playlist:0qfJQD123jdsapEus1MjJM\n\npl_id = '0qfJQD123jdsapEus1MjJM'\n\npl_track_items = get_pl_track_items(pl_id)\n\nprint(len(pl_track_items))\n\n# print(json.dumps(pl_track_items, sort_keys=True, indent=4))\n\n# Get the new song order - returns list of track ID's\nnew_order = Custom_w(pl_track_items, 10)\n\n# print(new_order)\n\nreplace_tracks(username, pl_id, new_order)\n\nendTime = time.time()\n\nprint('Execution time:', endTime-startTime)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"25671254","text":"prizes = {\n (2, True): 3,\n (3, False): 5,\n (3, True): 8,\n (4, False): 22,\n (4, True): 218,\n (5, False): 1201,\n (5, True): 35722,\n (6, False): 1_000_000,\n\n}\nk = int(input())\nfor i in range(k):\n correct_numbers = input().split(\" \")\n correct_numbers = [int(i) for i in correct_numbers]\n numbers = correct_numbers[:-1]\n extra_number = correct_numbers[-1]\n attemps = int(input())\n profit = 0\n for x in range(attemps):\n attempt = input().split(\" \")\n attempt = [int(i) for i in attempt]\n correct = 0\n extra = False\n for a in attempt:\n if a in numbers:\n correct += 1\n elif a == extra_number:\n extra = True\n result = (correct, extra)\n if result in prizes:\n profit += prizes[result]\n print(\"{} {}\".format(i + 1, profit))\n","sub_path":"2017/5 - Lotto.py","file_name":"5 - Lotto.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"211099689","text":"# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport bleach\nfrom uuid import uuid4\n\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.dispatch.dispatcher import receiver\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.translation import ugettext_lazy as _\nfrom image_cropping.fields import ImageRatioField\n\nfrom imagekit.models.fields import ProcessedImageField, ImageSpecField\nfrom pilkit.processors.resize import ResizeToFit, SmartResize\nfrom account.models import User, GROUP_NAME_MODERATORS\nfrom actions.models import ActionGeneratingModelMixin\nfrom cropping.fields import ProcessedImageFieldWithCropping\nfrom cropping.models import CroppingModelMixin\n\nfrom libs.moderation.models import MODERATION_STATUS_APPROVED\nfrom libs.moderation.signals import post_moderation\n\nfrom kuaapi.models import ParticipatingMunicipality\n\nfrom nuka.models import MultilingualRedactorField, MultilingualTextField\nfrom nuka.utils import strip_tags\nfrom slug.models import SlugifiedModel\n\n\ndef _organization_pic_path(obj, name):\n return 'organization/%d/pictures/%s.jpg' % (obj.pk, uuid4().hex)\n\n\nclass OrganizationQuerySet(models.QuerySet):\n def real(self):\n return self.filter(type__gt=max(self.model.MAGIC_TYPES))\n\n def active(self):\n return self.filter(is_active=True)\n\n def normal(self):\n return self.filter(type__gt=self.model.TYPE_UNKNOWN).active()\n\n def normal_and_inactive(self):\n return self.filter(type__gt=self.model.TYPE_UNKNOWN)\n\n\n@python_2_unicode_compatible\nclass Organization(ActionGeneratingModelMixin, SlugifiedModel, models.Model,\n CroppingModelMixin):\n TYPE_UNKNOWN = 0\n TYPE_NATION = 1\n TYPE_ORGANIZATION = 3\n TYPE_MUNICIPALITY = 4\n TYPE_SCHOOL = 5\n TYPE_OTHER = 10\n TYPE_CHOICES = (\n (TYPE_UNKNOWN, _(\"Tuntematon\")),\n (TYPE_NATION, _(\"Koko Suomi\")),\n (TYPE_ORGANIZATION, _(\"Järjestö\")),\n (TYPE_MUNICIPALITY, _(\"Kunta\")),\n (TYPE_SCHOOL, _(\"Koulu tai muu oppilaitos\")),\n (TYPE_OTHER, _(\"Muu organisaatio\")),\n )\n # these can't be selected for new organizations:\n MAGIC_TYPES = (TYPE_UNKNOWN, TYPE_NATION)\n\n type = models.SmallIntegerField(_(\"tyyppi\"), choices=TYPE_CHOICES)\n name = MultilingualTextField(_(\"nimi\"), max_length=255, simultaneous_edit=True)\n description = MultilingualRedactorField(_(\"kuvaus\"), blank=True)\n municipalities = models.ManyToManyField(\n 'fimunicipality.Municipality',\n related_name=_(\"Kunnat\"),\n verbose_name=_(\"Valitse kunnat, joiden alueella organisaatio toimii.\")\n )\n\n # cropping\n original_picture = ProcessedImageFieldWithCropping(\n upload_to=_organization_pic_path,\n processors=[ResizeToFit(width=1280, height=1280, upscale=False)],\n format='JPEG', options={'quality': 90}, default=\"\"\n )\n\n picture = ProcessedImageField(\n upload_to=_organization_pic_path, max_length=120,\n processors=[ResizeToFit(width=1280, height=1280, upscale=False)],\n format='JPEG', options={'quality': 90},\n null=True, default=None, blank=True\n )\n picture_medium = ImageSpecField(source='picture',\n processors=[SmartResize(width=220, height=220)],\n format='JPEG', options={'quality': 70})\n cropping = ImageRatioField('original_picture', '220x220', size_warning=True,\n verbose_name=_(\"Profiilikuvan rajaus\"))\n is_active = models.BooleanField(_(\"aktiivinen\"), default=False)\n created = models.DateTimeField(_(\"luotu\"), default=timezone.now)\n\n # TODO: validation: municipality must be unique if type == TYPE_MUNICIPALITY\n\n search_text = models.TextField(null=True, default=None)\n\n objects = OrganizationQuerySet.as_manager()\n\n def __str__(self):\n return '%s' % self.name\n\n def get_cropping_cancel_url(self):\n return reverse('organization:picture', kwargs={'pk': self.pk})\n\n def absolute_url_viewname(self):\n return 'organization:detail'\n\n def is_real_organization(self):\n return self.type not in self.MAGIC_TYPES\n\n def participates_in_kua(self):\n if self.type != self.TYPE_MUNICIPALITY:\n return False\n try:\n return bool(self.municipalities.first().kua_participation.pk)\n except ParticipatingMunicipality.DoesNotExist:\n return False\n\n def description_plaintext(self):\n desc = '%s' % self.description\n return bleach.clean(desc.replace('>', '> '),\n tags=[], strip=True, strip_comments=True).strip()\n\n def admins_str(self):\n admin_list = [a.get_full_name() for a in self.admins.all()]\n return \", \".join(admin_list)\n\n def slugifiable_text(self):\n return self.name\n\n # action processing\n def action_kwargs_on_create(self):\n\n return {'actor': None}\n\n def fill_notification_recipients(self, action):\n for u in User.objects.filter(groups__name=GROUP_NAME_MODERATORS):\n action.add_notification_recipients(action.ROLE_MODERATOR, u)\n\n class Meta:\n verbose_name = _(\"organisaatio\")\n verbose_name_plural = _(\"organisaatiot\")\n\n\n@receiver(signal=post_moderation, sender=Organization)\ndef activate_approved_organization(instance=None, status=None, **kwargs):\n if status == MODERATION_STATUS_APPROVED and not instance.is_active:\n instance.is_active = True\n instance.save()\n\n\n@receiver(pre_save, sender=Organization)\ndef update_search_text(instance=None, **kwargs):\n instance.search_text = ' '.join(map(strip_tags, instance.name.values() +\n instance.description.values()))\n","sub_path":"organization/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"536413357","text":"# Imports\nimport numpy as np\nimport os\nimport argparse\nimport tensorflow as tf\nfrom cnn_models import CNN_Model\ncnn_model = CNN_Model #which model to use\nimport cv2\nfrom functions import receiverNetwork, parse_predictions, get_labels, visualize, write_file, prune_boxes, crop_and_warp\nfrom COCO.COCOlite import dataset\nimport time\n\n#DEBUG, INFO, WARN, ERROR, or FATAL\ntf.logging.set_verbosity(tf.logging.WARN)\n\n#Argument parsing\nparser = argparse.ArgumentParser()\nparser.add_argument(\"model_name\", help=\"Relative path to model\")\nparser.add_argument(\"vis\", help=\"Visualizations? y/n\")\nargs = parser.parse_args()\nmodel_path = args.model_name\n\n\n#EDGEBOXES setup\nmodelfile = \"./model.yml.gz\"\nprint(\"Loading model...\")\nedgeGenerator = cv2.ximgproc.createStructuredEdgeDetection(model = modelfile)\nboxGenerator = cv2.ximgproc.createEdgeBoxes(maxBoxes = 1000,\n alpha = 0.65,\n beta = 0.75,\n minScore = 0.03)\n\nCOCO = dataset() #Retrieve images from COCO\n\ndef main(unused_argv):\n\n # Create the Estimator\n classifier = tf.estimator.Estimator(\n model_fn=cnn_model,\n model_dir=model_path)\n \n labels = get_labels() #maps id ints to name\n\n #Variables to track performance\n total_time = 0\n total_execs = 0\n \n try:\n #GroundTruth\n image , gt_classes, gt_boxes, filename = COCO.nextImage()\n while image is not None:\n #EdgeBoxes, generate bounding boxes\n edgearray = edgeGenerator.detectEdges(image)\n orientationarray = edgeGenerator.computeOrientation(edgearray)\n suppressed_edgearray = edgeGenerator.edgesNms(edgearray, orientationarray)\n boxes = boxGenerator.getBoundingBoxes(suppressed_edgearray, orientationarray)\n\n b_time = time.time() #beginning time\n \n #Create list of all objects, cropped and warped\n objects = list()\n for box in boxes:\n object = crop_and_warp(image, box)\n objects.append(object)\n samples = np.array(objects, dtype=np.float32)\n \n if len(boxes) > 0: #skip images with no boxes\n #Input function with all objects in image\n pred_input_fn = tf.estimator.inputs.numpy_input_fn(\n x=samples,\n num_epochs=1,\n shuffle=False)\n\n #Perform prediction\n predictions = classifier.predict(\n input_fn=pred_input_fn,\n yield_single_examples=False)\n\n #predictions is a weird object\n classes, scores = parse_predictions(predictions)\n \n #Get rid of overlapping boxes with iou threshold\n iou_threshold = 0.5\n boxes, classes, scores = prune_boxes(boxes, iou_threshold, classes, scores)\n \n #Performance metrics\n exec_time = time.time()-b_time\n print(\"Executed in:\", exec_time) #execution time\n total_time = total_time + exec_time\n total_execs = total_execs + 1\n \n #Write files for mAP calculations\n mAP_paths = [\"./mAP/\", filename] #Path to mAP https://github.com/Cartucho/mAP\n write_file(gt_classes, gt_boxes, mAP_paths, None, labels) #write gt files\n write_file(classes, boxes, mAP_paths, scores, labels) #write predicted files\n \n #Visualizations\n if args.vis == 'y':\n image = image*255 #Convert to value in [0,255] for vis\n image = image.astype(np.uint8)\n image_COCO = visualize(gt_boxes, image, None, gt_classes, labels)\n image = visualize(boxes, image, scores, classes, labels)\n\n cv2.imshow(\"COCO\", image_COCO)\n cv2.imshow(\"image\", image)\n cv2.waitKey(1000)\n \n #Retrieve next set of images and annotations\n image , gt_classes, gt_boxes, filename = COCO.nextImage()\n \n except KeyboardInterrupt:\n exit(total_time/total_execs)\n \nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"accuracy_eval.py","file_name":"accuracy_eval.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"205897","text":"import pandas as pd\nfrom krk_meetings.config import FloydDataPaths, MAX_WALKING_DISTANCE\nfrom krk_meetings.data_provider.data_provider_utils import get_walking_distance\nimport openrouteservice\nfrom openrouteservice.directions import directions\nfrom krk_meetings.utils import save_pickle, load_pickle\nimport time\n\nopenrouteservice_api_key = '' # Specify your personal API key\n\n\ndef reparse_walking_distances():\n client = openrouteservice.Client(key=openrouteservice_api_key)\n stops_df: pd.DataFrame = pd.read_pickle(FloydDataPaths.stops_df.value)\n stops_df = stops_df[['stop_name', 'stop_lon', 'stop_lat']]\n walking_distances_pickle = load_pickle(FloydDataPaths.api_walking_distances.value)\n api_walking_distances = walking_distances_pickle['distances']\n api_stop_list = stops_df['stop_name'].to_list()\n print(api_stop_list)\n\n def adjacent_stops_generator():\n for id_1, name_1, lon_1, lat_1 in stops_df.itertuples():\n for id_2, name_2, lon_2, lat_2 in stops_df.itertuples():\n if id_1 < id_2 and get_walking_distance(lon_1, lat_1, lon_2, lat_2) < MAX_WALKING_DISTANCE:\n yield name_1, lon_1, lat_1, name_2, lon_2, lat_2\n\n adjacent_stops = list(adjacent_stops_generator())\n all_stops = len(adjacent_stops)\n current_stop_percent = 0\n loop_counter = 0\n for name_1, lon_1, lat_1, name_2, lon_2, lat_2 in adjacent_stops:\n loop_counter += 1\n if 100 * loop_counter / all_stops >= current_stop_percent:\n print(f\"{current_stop_percent}% done \")\n current_stop_percent += 1\n if (name_1, name_2) in api_walking_distances.keys():\n continue\n time.sleep(2)\n coords = ((lon_1, lat_1), (lon_2, lat_2))\n if (lon_1, lat_1) == (lon_2, lat_2):\n api_walking_distances[(name_1, name_2)] = 0\n api_walking_distances[(name_2, name_1)] = 0\n else:\n try:\n routes = directions(client, coords, profile='foot-walking')\n api_walking_distances[(name_1, name_2)] = routes['routes'][0]['summary']['duration']\n api_walking_distances[(name_2, name_1)] = routes['routes'][0]['summary']['duration']\n\n except openrouteservice.exceptions.ApiError:\n break\n save_pickle({'distances': api_walking_distances, 'stop_list': list(api_stop_list)}, FloydDataPaths.api_walking_distances.value)\n\n\nif __name__ == \"__main__\":\n # save_pickle({'distances': {}, 'stop_list': []}, FloydDataPaths.api_walking_distances.value)\n reparse_walking_distances()\n\n","sub_path":"backend/krk_meetings/scripts/reparse_walking_distances.py","file_name":"reparse_walking_distances.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"504043458","text":"import os\nimport sys\nfrom PyQt5.QtWidgets import (\n QApplication, QListWidget, QWidget, QVBoxLayout\n)\nfrom PyQt5.QtGui import QFont\nfrom collections import deque\n\nclass Main(QWidget):\n def __init__(self):\n super(Main, self).__init__()\n\n self.setAcceptDrops(True)\n self.__que = deque()\n self.__FileList = QListWidget()\n self.__initUI()\n \n def __initUI(self):\n layout = QVBoxLayout()\n layout.addWidget(self.__FileList)\n\n self.setLayout(layout)\n\n self.setGeometry(1000, 500, 800, 500)\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n urls = event.mimeData().urls()\n for url in urls:\n path = url.toLocalFile()\n tmp = path.split('.')\n if len(tmp) != 1:\n self.__FileList.addItem(os.path.basename(path))\n else:\n self.__addDir(tmp[0])\n \n def __addDir(self, item):\n for roots, dirs, files in os.walk(item):\n for f in files:\n self.__FileList.addItem(os.path.basename(f))\n\n if len(dirs) != 0:\n for d in dirs:\n self.__que.append(f\"{item}/{d}\")\n return self.__addDir(self.__que.popleft())\n\n try:\n if len(self.__que) != 0:\n return self.__addDir(self.__que.popleft())\n except:\n return\n \ndef main():\n app = QApplication(sys.argv)\n font = QFont(\"Meiryo\")\n app.setFont(font)\n w = Main()\n w.setWindowTitle(\"title\")\n w.show()\n w.raise_()\n app.exec_()\n\nif __name__ == '__main__':\n main()\n\n# use pathlib\n\"\"\"\nfrom pathlib import Path\n\ndef dragEnterEvent(self, event) -> None:\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\ndef dropEvent(self, event) -> None:\n urls = event.mimeData().urls()\n for url in urls:\n path = url.toLocalFile()\n x = Path(path)\n tmp = path.split('.')\n if x in self.__xmlPathList:\n QMessageBox.information(self, 'Warning', 'This file already in.', QMessageBox.Ok)\n continue\n if len(tmp) != 1:\n if inExtension(x, \"db\"):\n self.xmlList.addItem(x.name)\n self.__xmlPathList.append(x)\n else:\n print(tmp[0])\n self.__addDir(Path(tmp[0]))\n\ndef __addDir(self, item: str) -> None:\n for f in list(item.glob(\"**/*.db\")):\n self.xmlList.addItem(f.name)\n self.__xmlPathList.append(f)\n\"\"\"","sub_path":"Portfolio/PyQt5/Template/DnD/DnD.py","file_name":"DnD.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"3076608","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pyfits\nimport os\ndata = pyfits.getdata('spectrum_9721.fits')\nprint(data)\ndata.shape\ndata = data[0,0]\nplt.plot(data)\nplt.xlabel(\"VLSR\")\nplt.ylabel(\"Antenna Temperature(K)\")\nplt.title(\"Solar Drift Scan Plot\")\nplt.grid(True)\nplt.savefig('Solar_drift.png')\nplt.show()\nprint(data)\ndata = data[5:]\nx = max(data)\ny = [0,27]\nt = y.append(x)\nprint(t)\n\n\n\n\n\n","sub_path":"Python/Salsa_Solar_Drift.py","file_name":"Salsa_Solar_Drift.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"445069037","text":"##!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom utils import deallocate_vm, RG_TEMPLATE\n\nSTUDENT_NAME = \"student1\"\nRG_NAME = RG_TEMPLATE.format(STUDENT_NAME)\n\nfor idx in [1, 2, 3]:\n VM_NAME = \"cluster{0}\".format(idx)\n deallocate_vm(VM_NAME, RG_NAME)\n","sub_path":"azure/deallocate_vm.py","file_name":"deallocate_vm.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207126623","text":"import cv2\nfrom bebop import Bebop\nimport pygame\nimport time\nfrom commands import *\nimport numpy\nimport logging\n\ndef mergeSort(lst):\n if len(lst) <= 1:\n return lst\n mid = len(lst) // 2\n left = mergeSort(lst[:mid])\n right = mergeSort(lst[mid:])\n return merge(left, right)\n\ndef merge(left, right):\n if not left:\n return right\n if not right:\n return left\n if left[0] < right[0]:\n return [left[0]] + merge(left[1:], right)\n return [right[0]] + merge(left, right[1:])\n\ncnt = 0\nf = open( \"./images/video.h264\", \"wb\" )\n\nlogging.basicConfig(level=logging.DEBUG)\n\nwnd = None\ndef video_frame(frame):\n # Initialize the frame size for drone adjustment\n if drone.frameWidth == 0:\n drone.frameWidth = numpy.size(frame, 1)\n if drone.frameHeight == 0:\n drone.frameHeight = numpy.size(frame, 0)\n\n # Initialize variables to compare the current frame to\n if drone.thisFrame is None:\n drone.lastFrame = frame\n else:\n drone.lastFrame = drone.thisFrame\n drone.thisFrame = frame\n\n # # Convert frames to grayscale and blur them\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # gray = cv2.GaussianBlur(gray, (21, 21), 0)\n #\n # grayLastFrame = cv2.cvtColor(drone.lastFrame, cv2.COLOR_BGR2GRAY)\n # grayLastFrame = cv2.GaussianBlur(grayLastFrame, (21, 21), 0)\n #\n # # compute the absolute difference between the current frame and the last frame\n # frameDelta = cv2.absdiff(grayLastFrame, gray)\n\n ret, thresh = cv2.threshold(frame, 127, 255, cv2.THRESH_BINARY)\n # edges = cv2.cvtColor(edges, cv2.COLOR_BGR2GRAY)\n\n # Find edges after motion detection\n edges = cv2.Canny(thresh, drone.minEdgeVal, drone.maxEdgeVal)\n\n if drone.pictureBoolean:\n drone.pictureBoolean = False\n cv2.imwrite(\"saved_image.jpg\", edges)\n\n # Find sphero using circles\n if drone.findSphero:\n # Find circles after detecting edges\n circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1.2, 5,\n param1=50, param2=30, minRadius=drone.minCircleRadius, maxRadius=drone.maxCircleRadius)\n # circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1.2, 10, minRadius=drone.minCircleRadius, maxRadius=drone.maxCircleRadius)\n\n if circles is not None:\n circles = numpy.uint16(numpy.around(circles))\n listX = []\n listY = []\n listR = []\n\n for i in circles[0, :]:\n # # draw the outer circle\n # cv2.circle(edges, (i[0], i[1]), i[2], (255, 255, 255), 2)\n # # draw the center of the circle\n # cv2.circle(edges, (i[0], i[1]), 2, (255, 255, 255), 3)\n\n # Save the centers and radii\n listX.append(i[0])\n listY.append(i[1])\n listR.append(i[2])\n # print(\"Edges circle center at: \" + str(i[0]) + \", \" + str(i[1]))\n\n # Sort the centers and radii and print/draw the median\n sortedX = mergeSort(listX)\n sortedY = mergeSort(listY)\n sortedR = mergeSort(listR)\n\n medianX = sortedX[len(sortedX) // 2]\n medianY = sortedY[len(sortedY) // 2]\n medianR = sortedR[len(sortedR) // 2]\n\n drone.objectCenterX = medianX\n drone.objectCenterY = medianY\n\n cv2.circle(edges, (medianX, medianY), medianR, (255,255,255), 2)\n cv2.circle(edges, (medianX, medianY), 2, (255,255,255), 2)\n # print(\"Median edges circle center: \" + str(medianX) + \", \" + str(medianY) + \" with radius \" + str(medianR))\n\n drone.sinceLastSphero = 0\n drone.foundCircle = True\n else:\n # Fake a circle in the center if none found\n drone.objectCenterX = drone.frameWidth >> 1\n drone.objectCenterY = drone.frameHeight >> 1\n\n drone.sinceLastSphero += 1\n drone.foundCircle = False\n else:\n # Fake a circle in the center if none found\n drone.objectCenterX = drone.frameWidth >> 1\n drone.objectCenterY = drone.frameHeight >> 1\n drone.foundCircle = False\n\n # Find sphero using blobs if no circles found\n if drone.findSphero and not drone.foundCircle:\n kernel = numpy.ones((5, 5), numpy.uint8)\n edges = cv2.dilate(edges, kernel, iterations=1)\n edges = cv2.erode(edges, kernel, iterations=1)\n\n params = cv2.SimpleBlobDetector_Params()\n\n # Filter by Circularity\n # params.filterByCircularity = True\n # params.minCircularity = 0.6\n\n # Filter by Area.\n # params.filterByArea = True\n # params.minArea = 16\n\n detector = cv2.SimpleBlobDetector_create(params)\n keypoints = detector.detect(edges)\n\n if keypoints is not None:\n listX = []\n listY = []\n listR = []\n\n for keypoint in keypoints:\n # # draw the outer circle\n # cv2.circle(edges, (i[0], i[1]), i[2], (255, 255, 255), 2)\n # # draw the center of the circle\n # cv2.circle(edges, (i[0], i[1]), 2, (255, 255, 255), 3)\n\n # Save the centers and radii\n # print point.pt[0]\n listX.append(int(keypoint.pt[0]))\n listY.append(int(keypoint.pt[1]))\n listR.append(int(keypoint.size / 2))\n # print(\"Edges circle center at: \" + str(i[0]) + \", \" + str(i[1]))\n # print keypoint.pt\n\n if len(listX) > 0 and len(listY) > 0 and len(listR) > 0:\n # Sort the centers and radii and print/draw the median\n sortedX = mergeSort(listX)\n sortedY = mergeSort(listY)\n sortedR = mergeSort(listR)\n\n # print sortedX\n medianX = sortedX[len(sortedX) // 2]\n medianY = sortedY[len(sortedY) // 2]\n medianR = sortedR[len(sortedR) // 2]\n\n drone.objectCenterX = medianX\n drone.objectCenterY = medianY\n\n cv2.circle(edges, (medianX, medianY), medianR, (255,255,255), 2)\n cv2.circle(edges, (medianX, medianY), 2, (255,255,255), 2)\n # print(\"Median edges circle center: \" + str(medianX) + \", \" + str(medianY) + \" with radius \" + str(medianR))\n\n drone.sinceLastSphero = 0\n else:\n # Fake a circle in the center if none found\n drone.objectCenterX = drone.frameWidth >> 1\n drone.objectCenterY = drone.frameHeight >> 1\n\n drone.sinceLastSphero += 1\n else:\n # Fake a circle in the center if none found\n drone.objectCenterX = drone.frameWidth >> 1\n drone.objectCenterY = drone.frameHeight >> 1\n\n drone.sinceLastSphero += 1\n elif not drone.foundCircle:\n # Fake a circle in the center if none found\n drone.objectCenterX = drone.frameWidth >> 1\n drone.objectCenterY = drone.frameHeight >> 1\n drone.sinceLastSphero = 0\n\n # cnt += 1\n cv2.imshow(\"Drone\", frame)\n # cv2.imshow(\"Motion Detection\", frameDelta)\n cv2.imshow(\"Threshold Edges\", edges)\n cv2.imshow(\"Threshold\", thresh)\n cv2.waitKey(10)\n\ndef video_start():\n print(\"Starting video...\")\n cv2.namedWindow(\"Drone\")\n cv2.namedWindow(\"Threshold Edges\")\n cv2.namedWindow(\"Threshold\")\n\ndef video_end():\n print(\"Ending video...\")\n cv2.destroyWindow(\"Drone\")\n cv2.destroyWindow(\"Threshold Edges\")\n cv2.destroyWindow(\"Threshold\")\n # Have to send waitKey several times on Unix to make window disappear\n for i in range(1, 5):\n cv2.waitKey(1)\n\ndef scale(value, scaler):\n if abs(value) < 0.03:\n return 0\n return value * scaler\n\ndef clip(value, low, high):\n if value < low:\n return low\n if value > high:\n return high\n return value\n\n\n# Video variables\nf = open( \"./images/video.h264\", \"wb\" )\ncnt = 0\nframes = 0\nlastFrames = 0\n\nprint(\"Connecting to drone...\")\ndrone = Bebop()\ndrone.trim()\ndrone.video_callbacks(video_start, video_end, video_frame)\ndrone.videoEnable()\ndrone.minEdgeVal = 30\ndrone.maxEdgeVal = 50\nprint(\"Connected.\")\n\npygame.init()\nsize = [100, 100]\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Drone Teleop\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n# Initializes joystick\nif pygame.joystick.get_count() == 0:\n print(\"No joysticks found\")\n done = True\nelse:\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n print(\"Initialized %s\" % (joystick.get_name()))\n print(\"Number of buttons %d. Number of axis %d, Number of hats %d\" %\n (joystick.get_numbuttons(), joystick.get_numaxes(),\n joystick.get_numhats()))\n\nMAX_SPEED = 40\n\ntilt = 0\ntiltMin = -70\ntiltMax = 40\n\npan = 0\npanMin = -40\npanMax = 40\n\nsecondsCounter = 0\n\nprintCounter = 0\nspheroMoveCounter = 0\n\nlastTime = time.time()\n\n# -------- Main Program Loop -----------\nwhile not done:\n try:\n userMovement = False\n\n # EVENT PROCESSING STEP\n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n done = True # Flag that we are done so we exit this loop\n\n # Displays battery every 5 seconds\n nowTime = time.time()\n if (nowTime - lastTime) > 1:\n secondsCounter += 1\n lastTime = nowTime\n\n if secondsCounter % 5 == 0:\n print(\"Battery: \" + str(drone.battery))\n\n # Deltas for controlling the camera\n tiltDelta = 0\n panDelta = 0\n\n if joystick.get_button(1):\n drone.pictureBoolean = True\n\n # A and Back to emergency land\n if joystick.get_button(0) == 1 and joystick.get_button(6) == 1:\n drone.emergency()\n\n # Back to land\n if joystick.get_button(6) == 1:\n print(\"Landing...\")\n if drone.flyingState is None or drone.flyingState == 1: # if taking off then do emergency landing\n drone.emergency()\n drone.land()\n\n # Start to takeoff\n if joystick.get_button(7) == 1:\n if drone.flyingState is 0:\n drone.takeoff()\n # drone.takeoff()\n\n # --- Flying ---\n # Power values\n roll = scale(joystick.get_axis(0), MAX_SPEED)\n pitch = -scale(joystick.get_axis(1), MAX_SPEED)\n yaw = scale(joystick.get_axis(3), MAX_SPEED)\n gaz = -scale(joystick.get_axis(4), MAX_SPEED)\n\n if roll != 0:\n userMovement = True\n\n if pitch != 0:\n userMovement = True\n\n if yaw != 0:\n userMovement = True\n\n if gaz != 0:\n userMovement = True\n\n if joystick.get_button(3) == 1 and not drone.findSphero:\n print(\"Start finding sphero\")\n drone.findSphero = True\n drone.moveScaler = .25\n\n # Upper and lower bounds for circle pixel radius\n drone.minCircleRadius = 5\n drone.maxCircleRadius = 20\n\n if joystick.get_button(2) and drone.findSphero:\n print(\"Stop finding sphero\")\n drone.findSphero = False\n\n # --- Move camera ---\n\n # Triggers to tilt\n if not(joystick.get_button(0) == 1) and joystick.get_axis(2) > 0.05:\n tiltDelta = scale(joystick.get_axis(2), -10)\n\n if not(joystick.get_button(0) == 1) and joystick.get_axis(5) > 0.05:\n tiltDelta = scale(joystick.get_axis(5), 10)\n\n tilt = clip(tilt + tiltDelta, tiltMin, tiltMax)\n pan = clip(pan + panDelta, panMin, panMax)\n\n # Reset camera on B\n if joystick.get_button(1) == 1:\n tilt = 0\n pan = 0\n\n if joystick.get_button(5) == 1:\n print(\"Flying to altitude: 1.5\")\n drone.flyToAltitude(1.25)\n\n drone.moveCamera(tilt, pan)\n\n # All movement updated here\n if userMovement:\n drone.update(cmd=movePCMDCmd(True, roll, pitch, yaw, gaz))\n elif drone.findSphero:\n roll = (drone.objectCenterX - (drone.frameWidth >> 1)) * drone.moveScaler\n pitch = ((drone.frameHeight >> 1) - drone.objectCenterY) * drone.moveScaler\n\n roll = clip(roll, -50, 50)\n pitch = clip(pitch, -50, 50)\n\n # print(\"Finding Sphero\")\n # print(roll)\n # print(pitch)\n spheroMoveCounter += 1\n\n # if (drone.sinceLastSphero % 3) == 0 and drone.altitude >= 1:\n # drone.update(cmd=movePCMDCmd(True, 0, 0, 0, -10))\n #\n # if drone.altitude < 1:\n # drone.flyToAltitude(1.5)\n\n # Adjustment test\n if (spheroMoveCounter % 3) == 0:\n roll *= .4\n pitch *= .4\n\n drone.update(cmd=movePCMDCmd(True, roll, pitch, 0, 0))\n else:\n drone.hover()\n\n # Limit to 20 frames per second\n clock.tick(20)\n except:\n print(\"Error\")\n if drone.flyingState is None or drone.flyingState == 1:\n # if taking off then do emergency landing\n drone.emergency()\n drone.land()\n done = True\n\n# Close the window and quit.\npygame.quit()","sub_path":"drone/droneSpheroTracking.py","file_name":"droneSpheroTracking.py","file_ext":"py","file_size_in_byte":13416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"444248201","text":"# --------------------------------------------------------------------------- #\n# hyper.py #\n# #\n# Copyright © 2015-2020, Rajiv Bakulesh Shah, original author. #\n# All rights reserved. #\n# --------------------------------------------------------------------------- #\n\n\nfrom typing import Iterable\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\nfrom typing import cast\n\nfrom redis import Redis\n\nfrom .base import Base\nfrom .base import JSONTypes\nfrom .base import RedisValues\n\n\nclass HyperLogLog(Base):\n '''Redis-backed HyperLogLog with a Pythonic API.\n\n Wikipedia article:\n https://en.wikipedia.org/wiki/HyperLogLog\n\n antirez's blog post:\n http://antirez.com/news/75\n\n Riak blog post:\n https://riak.com/posts/technical/what-in-the-hell-is-hyperloglog/index.html?p=13169.html\n '''\n\n def __init__(self,\n iterable: Iterable[RedisValues] = frozenset(),\n *,\n redis: Optional[Redis] = None,\n key: Optional[str] = None,\n ) -> None:\n '''Initialize a HyperLogLog. O(n)\n\n Here, n is the number of elements in iterable that you want to insert\n into this HyperLogLog.\n '''\n super().__init__(redis=redis, key=key)\n self.update(iterable)\n\n def add(self, value: RedisValues) -> None:\n 'Add an element to a HyperLogLog. O(1)'\n self.update({value})\n\n def update(self,\n *objs: Union['HyperLogLog', Iterable[RedisValues]],\n ) -> None:\n # We have to iterate over objs multiple times, so cast it to a tuple.\n # This allows the caller to pass in a generator for objs, and we can\n # still iterate over it multiple times.\n objs = tuple(objs)\n other_hll_keys: List[str] = []\n encoded_values: List[str] = []\n with self._watch(objs) as pipeline:\n for obj in objs:\n if isinstance(obj, self.__class__):\n if self.redis.connection_pool == obj.redis.connection_pool:\n other_hll_keys.append(obj.key)\n else: # pragma: no cover\n raise RuntimeError(\n f\"can't update {self} with {obj} as they live on \"\n \"different Redis instances/databases\"\n )\n else:\n for value in cast(Iterable[JSONTypes], obj):\n encoded_values.append(self._encode(value))\n pipeline.multi()\n pipeline.pfmerge(self.key, *other_hll_keys)\n pipeline.pfadd(self.key, *encoded_values)\n\n def union(self,\n *objs: Iterable[RedisValues],\n redis: Optional[Redis] = None,\n key: Optional[str] = None,\n ) -> 'HyperLogLog':\n new_hll = self.__class__(redis=redis, key=key)\n new_hll.update(self, *objs)\n return new_hll\n\n def __len__(self) -> int:\n '''Return the approximate number of elements in a HyperLogLog. O(1)\n\n Please note that this method returns an approximation, not an exact\n value. So please don't rely on it for anything important like\n financial systems or cat gif websites.\n '''\n return self.redis.pfcount(self.key)\n\n def __repr__(self) -> str:\n 'Return the string representation of a HyperLogLog. O(1)'\n return f'<{self.__class__.__name__} key={self.key} len={len(self)}>'\n","sub_path":"pottery/hyper.py","file_name":"hyper.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"652374384","text":"import os\nimport shutil\nimport json\n\nfrom In.field import Field, FieldFielder\nfrom In.field import FieldEntityReference, FieldEntityReferenceFielder\nfrom In.field import FieldFormatter, FieldFormatterConfigForm\n\nimport magic\n\nclass FieldFile(FieldEntityReference):\n\t'''EntityReference field'''\n\t__input_field_type__ = 'FileUpload'\n\t\n@IN.register('FieldFile', type = 'Fielder')\nclass FieldFileFielder(FieldEntityReferenceFielder):\n\t'''Base Field Fielder'''\n\n\tdefault_file_bundle = 'file'\n\t\n\t\n\tdef __file_create_from_post__(self, field_value):\n\t\tif type(field_value) is not dict:\n\t\t\treturn int(field_value)\n\t\t\n\t\tif '__upload__' in field_value and field_value['__upload__'] and field_value['path']:\n\t\t\t\n\t\t\tpath = field_value['path']\n\t\t\t\n\t\t\treturn IN.filer.create_file_entity(path, self.default_file_bundle)\n\t\t\n\tdef __field_prepare_insert_update__(self, field):\n\t\t'''prepare the field submit values to db insert/update'''\n\t\n\t\tvalue = field.value\n\t\tentity = field.entity\n\t\tentitier = IN.entitier\n\t\t\n\t\tif value:\n\t\t\tfor lang, lang_items in value.items():\n\t\t\t\tnew_lang_items = {}\n\t\t\t\tnew_idx = 0\n\t\t\t\tfor idx in sorted(lang_items.keys(), key = lambda o:o):\n\t\t\t\t\tidx_items = lang_items[idx]\n\t\t\t\t\t\n\t\t\t\t\tfield_value = idx_items['value']\n\t\t\t\t\t# new file uploaded\n\t\t\t\t\t\n\t\t\t\t\tif type(field_value) is dict and '__upload__' in field_value and field_value['__upload__'] and field_value['path']:\n\t\t\t\t\t\t\n\t\t\t\t\t\tfile_id = self.__file_create_from_post__(field_value)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif file_id:\n\t\t\t\t\t\t\tidx_items['value'] = file_id\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\traise Exception('Unable to save the file ' + field_value['path'])\n\t\t\t\t\t\t\tIN.logger.debug('Unable to save the file ' + field_value['path'])\n\t\t\t\t\t\t\t#del field.value[lang][idx]\n\t\t\t\t\t\t\tidx_items['value'] = 0\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\n\t\t\t\t\tif idx_items['value']:\n\t\t\t\t\t\tnew_lang_items[new_idx] = idx_items\n\t\t\t\t\t\tnew_idx += 1\n\t\t\t\t\n\t\t\t\tvalue[lang] = new_lang_items\n\t\n\tdef prepare_insert(self, field):\n\t\t'''prepare the field submit values to db insert'''\n\t\tself.__field_prepare_insert_update__(field)\n\t\t\n\tdef prepare_update(self, field):\n\t\t'''prepare the field submit values to db update'''\n\t\tself.__field_prepare_insert_update__(field)\n\t\t\n\n@IN.hook\ndef field_model():\n\t# default model\n\treturn {\n\t\t'FieldFile' : {\t\t\t\t\t# field type\n\t\t\t'columns' : {\t\t\t\t\t\t\t# table columns\n\t\t\t\t'id' : {'type' : 'bigserial'},\n\t\t\t\t'entity_type' : {'type' : 'varchar', 'length' : 64},\n\t\t\t\t'entity_id' : {'type' : 'bigint'},\n\t\t\t\t'language' : {'type' : 'varchar', 'length' : 4, 'default' : 'lang'},\n\t\t\t\t'weight' : {'type' : 'smallint'},\n\t\t\t\t'value' : {'type' : 'bigint'}, \t\t# big int\n\t\t\t\t'created' : {},\n\t\t\t},\n\t\t\t'keys' : {\n\t\t\t\t'primary' : 'id',\n\t\t\t},\n\t\t},\n\t}\n\n#class (Field):\n\t#__input_field_type__ = 'TextBox'\n\n#@IN.register('FieldFile', type = 'Fielder')\n#class (FieldFielder):\n\t#'''Base Field Fielder'''\n\n\n\t#def form_field(self, field_config, field_value = None, language = ''):\n\t\t#'''returns form field based on field type, data, language'''\n\n\t\t#print('FFFFFFFFFFFFFFFF', field_value)\n\t\t#field_name = field_config['field_name']\n\t\t#field_data = field_config['data']\n\t\t#if field_data is None:\n\t\t\t#field_data = {}\n\t\t#if field_value is None:\n\t\t\t#field_value = {}\n\t\t#print('f1f1f1f1', field_value)\n\t\t#title = field_data.get('title', field_name)\n\t\t#max_allowed = int(field_data.get('max_allowed', 1)) # 0, unlimited\n\t\t#new_empty_fields = int(field_data.get('new_empty_fields', 1))\n\t\t#print(max_allowed, new_empty_fields)\n\t\t## '': field is available to all language\n\t\t#field_languages = field_data.get('languages', [''])\n\t\t#if field_languages is None:\n\t\t\t#field_languages = [''] # all language\n\n\t\t## return if field is not for this language\n\t\t#if language not in field_languages:\n\t\t\t#print('LLLLLLLLLL this field is not available in language', field_name, language, field_languages)\n\t\t\t#return\n\t\t\n\t\t## wrapper\n\t\t#obj = Object.new('HTMLField', {\n\t\t\t#'id' : field_name,\n\t\t\t#'title' : title,\n\t\t\t#'weight': field_config['weight'],\n\t\t\t#'css' : ['field form-field']\n\t\t#})\n\t\t#print('field name obj', obj.id)\n\t\t#for lang, idx_val in field_value.items():\n\t\t\t#if lang not in field_languages:\n\t\t\t\t#print('lang not avai', lang, idx_val)\n\t\t\t\t#continue\n\t\t\t\t\n\t\t\t#for idx, value in idx_val.items():\n\n\t\t\t\t#name = ''.join((field_name, '[', lang, '][', str(idx), '][value]'))\n\t\t\t\t#id = '_'.join((field_name, lang, str(idx), 'value'))\n\t\t\t\t#obj.add(type = self.field_class.__input_field_type__, data = {\n\t\t\t\t\t#'id' : id,\n\t\t\t\t\t#'name' : name,\n\t\t\t\t\t#'value' : value['value'],\n\t\t\t\t\t#'placeholder' : title,\n\t\t\t\t\t##'validation_rule' : ['Length', 6, '>', 0, 'The loginname length should be greater than 6.'],\n\t\t\t\t\t#'css' : ['i-width-1-1 i-form-large'],\n\t\t\t\t\t#'weight' : int(idx),\n\t\t\t\t#})\n\n\t\t#added = len(obj)\n\t\t## add remaining new/empty fields\n\t\t#if max_allowed != 0:\n\t\t\t#new_empty_fields = max_allowed - added\n\t\t#print('NEW empty fields', new_empty_fields, ' max ', max_allowed)\n\t\t#if new_empty_fields > 0:\n\t\t\t## add new empty\n\t\t\t#for added_idx in range(added, new_empty_fields + added):\n\t\t\t\t#name = ''.join((field_name, '[', language, '][', str(added_idx), '][value]'))\n\t\t\t\t#id = '_'.join((field_name, language, str(added_idx), 'value'))\n\t\t\t\t#obj.add(type = self.field_class.__input_field_type__, data = {\n\t\t\t\t\t#'id' : id,\n\t\t\t\t\t#'name' : name,\n\t\t\t\t\t#'value' : '',\n\t\t\t\t\t#'placeholder' : title,\n\t\t\t\t\t##'validation_rule' : ['Length', 6, '>', 0, 'The loginname length should be greater than 6.'],\n\t\t\t\t\t#'css' : ['i-width-1-1 i-form-large'],\n\t\t\t\t\t#'weight' : added_idx,\n\t\t\t\t#})\n\t\t\n\t\t#return obj\n\t\t\n\n\n@IN.register('FieldFile', type = 'FieldFormatter')\nclass FieldFileFieldFormatter(FieldFormatter):\n\t'''Base class of all IN FieldFormatterBase.\n\n\t'''\n\t\n\t__info__ = s('file')\n\t\n\tdef format_value(self, field, format, view_mode, args, config):\n\t\toutput_value = ''\n\t\t\n\t\t\n\t\tfield_values = field.value\n\t\tif field_values is not None:\n\t\t\tvalues = []\n\t\t\tfor lang, lang_value in field_values.items():\n\t\t\t\t# sort by weight\n\t\t\t\tsi = sorted(lang_value.items(), key = lambda o: int(o[0]))\n\t\t\t\tfor idx_value in si:\n\t\t\t\t\tvalues.append(str(idx_value[1]['value']))\n\t\t\t\t\t\n\t\t\toutput_value = ', '.join(values)\n\t\t\n\t\treturn output_value\n\n\n","sub_path":"In/filer/field_file.py","file_name":"field_file.py","file_ext":"py","file_size_in_byte":6079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466871556","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('behavior', '0002_auto_20150608_1519'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='comment',\n name='url',\n ),\n migrations.AddField(\n model_name='post',\n name='post_id',\n field=models.CharField(default=0, max_length=12),\n ),\n ]\n","sub_path":"behavior/migrations/0003_auto_20150608_2016.py","file_name":"0003_auto_20150608_2016.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"562444365","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 7 15:20:49 2018\r\n\r\n@author: Simon Nguyen\r\n\r\nThis code reload a stored model and predict a new data\r\n\"\"\"\r\n\r\nimport numpy as np\r\nnp.random.seed(123) # for reproducibility\r\n \r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Activation, Flatten\r\nfrom keras.layers import Convolution2D, MaxPooling2D\r\nfrom keras.utils import np_utils\r\nfrom keras.datasets import mnist\r\nfrom keras.models import model_from_json\r\nfrom matplotlib import pyplot as plt\r\nfrom keras import backend as K\r\nK.set_image_dim_ordering('th')\r\n \r\n# 4. Load pre-shuffled MNIST data into train and test sets\r\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\r\nprint ('Shape of X_train: ', X_train.shape)\r\nprint ('Shape of X_test: ', X_test.shape)\r\nplt.imshow(X_train[10])\r\n\r\n# 5. Preprocess input data\r\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28)\r\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28)\r\nX_train = X_train.astype('float32')\r\nX_test = X_test.astype('float32')\r\nX_train /= 255\r\nX_test /= 255\r\n \r\n# 6. Preprocess class labels\r\nY_train = np_utils.to_categorical(y_train, 10)\r\nY_test = np_utils.to_categorical(y_test, 10)\r\n\r\nprint ('Shape of X_train: ', X_train.shape)\r\nprint ('Shape of X_test: ', X_test.shape)\r\n \r\n## 7. Define model architecture\r\n#model = Sequential()\r\n#print('Start to create model')\r\n#model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28)))\r\n#model.add(Convolution2D(32, 3, 3, activation='relu'))\r\n#model.add(MaxPooling2D(pool_size=(2,2)))\r\n#model.add(Dropout(0.25))\r\n# \r\n#model.add(Flatten())\r\n#model.add(Dense(128, activation='relu'))\r\n#model.add(Dropout(0.5))\r\n#model.add(Dense(10, activation='softmax'))\r\n# \r\n## 8. Compile model\r\n#print('Compile model')\r\n#model.compile(loss='categorical_crossentropy',\r\n# optimizer='adam',\r\n# metrics=['accuracy'])\r\n# \r\n## 9. Fit model on training data\r\n#print('Fit model')\r\n#model.fit(X_train, Y_train, batch_size=32, epochs=1, verbose=1)\r\n# \r\n## 10. Evaluate model on test data\r\n#print('Evaluate model')\r\n#score = model.evaluate(X_test, Y_test, verbose=0)\r\n#print(\"%s: %.2f%%\" % (model.metrics_names[1], score[1]*100))\r\n#\r\n## serialize model to JSON\r\n#model_json = model.to_json()\r\n#with open(\"testKeras.json\", \"w\") as json_file:\r\n# json_file.write(model_json)\r\n## serialize weights to HDF5\r\n#model.save_weights(\"testKeras.h5\")\r\n#print(\"Saved model to disk\")\r\n\r\n# load json and create model\r\njson_file = open('testKeras.json', 'r')\r\nloaded_model_json = json_file.read()\r\njson_file.close()\r\nloaded_model = model_from_json(loaded_model_json)\r\n# load weights into new model\r\nloaded_model.load_weights(\"testKeras.h5\")\r\nprint(\"Loaded model from disk\")\r\n \r\n# evaluate loaded model on test data\r\nloaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\r\nscore = loaded_model.evaluate(X_test, Y_test, verbose=0)\r\nprint(\"%s: %.2f%%\" % (loaded_model.metrics_names[1], score[1]*100))\r\n\r\n# Test new data\r\ny_new = loaded_model.predict_classes(X_train[0:1,:,:,:])\r\nprint('result: ',y_new)\r\nprint('Actual result: ', Y_train[0:1,:])","sub_path":"TF_Journey/MNIST/testyourown/runKeras.py","file_name":"runKeras.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443904969","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nclass DataScrapper:\n\n def get_results(self,url):\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n headers = {'User-Agent':user_agent,}\n request = urllib.request.Request(url,None,headers)\n response = urllib.request.urlopen(request)\n return response\n\n def get_twitter_results(self):\n url = \"https://twitter.com/search?q=reactjs\"\n response = self.get_results(url)\n data = BeautifulSoup(response.read(),'lxml')\n all_tweets = data.findAll(\"div\", { \"class\" : \"dir-ltr\" })[0]\n return all_tweets\n\n def get_google_results(self):\n url = \"https://www.google.co.in/search?q=reactjs\"\n response = self.get_results(url)\n data = BeautifulSoup(response.read(),'lxml')\n all_results = data.findAll(\"cite\")\n return all_results\n ","sub_path":"st/data_scrapper.py","file_name":"data_scrapper.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70941950","text":"import sys\nsys.path.insert(0, r'..\\..\\src')\n\nimport numpy as np\nimport unittest\n\nfrom element import Node\nfrom element.D1 import Rod\nfrom mesh import Truss\n\n\nclass MyTestCase(unittest.TestCase):\n\n def setup_truss1(self):\n nodeA = Node(1, [0., 0.], ndof=2)\n nodeB = Node(2, [1., 0.], ndof=2)\n nodeC = Node(3, [1., 1.], ndof=2)\n nodeD = Node(4, [0., 1.], ndof=2)\n\n nodes = [nodeA, nodeB, nodeC, nodeD]\n\n E = 1.e11\n area = .02\n\n element1 = Rod(1, [nodeA, nodeD], E=E, area=area)\n element2 = Rod(2, [nodeD, nodeC], E=E, area=area)\n element3 = Rod(3, [nodeC, nodeB], E=E, area=area)\n element4 = Rod(4, [nodeD, nodeB], E=E, area=area)\n\n elements = [element1, element2, element3, element4]\n displacements = {1: [0., 0.],\n 2: [0., 0.],\n 3: [None, None],\n 4: [None, None]}\n\n loads = {1: [None, None],\n 2: [None, None],\n 3: [10., 0.],\n 4: [ 0., 0.]}\n\n return nodes, elements, displacements, loads\n\n def test_truss1_displacement_solution(self):\n nodes, elements, displacements, loads = self.setup_truss1()\n truss = Truss(nodes, elements, displacements, loads)\n\n expected = {1: 1.e-7 * np.array([0., 0.]),\n 2: 1.e-7 * np.array([0., 0.]),\n 3: 1.e-7 * np.array([0.2414213562373097, 0.]),\n 4: 1.e-7 * np.array([.19142135623730963, 0.05])}\n\n for key in expected:\n self.assertTrue(np.allclose(expected[key], truss.displacements[key]))\n\n def test_truss1_L_matrices(self):\n nodes, elements, displacements, loads = self.setup_truss1()\n truss = Truss(nodes, elements, displacements, loads, auto=False)\n\n L1_expected = np.zeros((4, 8))\n L1_expected[0, 0] = 1.\n L1_expected[1, 1] = 1.\n L1_expected[2, 6] = 1.\n L1_expected[3, 7] = 1.\n\n L2_expected = np.zeros((4, 8))\n L2_expected[0, 6] = 1.\n L2_expected[1, 7] = 1.\n L2_expected[2, 4] = 1.\n L2_expected[3, 5] = 1.\n\n L3_expected = np.zeros((4, 8))\n L3_expected[0, 4] = 1.\n L3_expected[1, 5] = 1.\n L3_expected[2, 2] = 1.\n L3_expected[3, 3] = 1.\n\n L4_expected = np.zeros((4, 8))\n L4_expected[0, 6] = 1.\n L4_expected[1, 7] = 1.\n L4_expected[2, 2] = 1.\n L4_expected[3, 3] = 1.\n\n self.assertTrue(np.allclose(truss.L[1], L1_expected))\n self.assertTrue(np.allclose(truss.L[2], L2_expected))\n self.assertTrue(np.allclose(truss.L[3], L3_expected))\n self.assertTrue(np.allclose(truss.L[4], L4_expected))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/mesh/test_truss.py","file_name":"test_truss.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"421934217","text":"import PhysicsTools.HeppyCore.framework.config as cfg\nimport os\nimport glob\n\n#####COMPONENT CREATOR\n\nfrom CMGTools.RootTools.samples.ComponentCreator import ComponentCreator\nkreator = ComponentCreator()\n\n## Pythia MC\n#files_2018_400k = glob.glob('/gwteras/cms/store/user/lguzzi/WToTauNu_Tau3Mu_Pythia_RunIIFall18/crab_miniaod/200501_160511/0000/wtotaunu_tau3mu_phytia_RunIIAutumn18MiniAOD_*.root')\n# first private production (BR bug)\n#files_2018_400k = glob.glob('/gwteras/cms/store/user/lguzzi/WToTauNu_Tau3Mu_Pythia/CRAB3_MC_generation_400k_miniaod_smallfiles/190321_150932/*/*.root')\n\n#WToTauTo3Mu_Pythia = cfg.MCComponent(\n# dataset = 'WToTauTo3Mu',\n# name = 'WToTauTo3Mu',\n# files = files_2018_400k ,\n# xSection = 21490.9, # this uses the correct tau BR from the PDG # 20508.9 * 1.e-7, # W to lep nu / 3.[pb] x BR\n# nGenEvents = 381627,\n# effCorrFactor = 1,\n#)\n\n\n## use this one, larger stats!\n#WToTauTo3Mu_MadGraph = kreator.makeMCComponent(\n# name = 'WToTauTo3Mu' ,\n# dataset = '/W_ToTau_ToMuMuMu_TuneCP5_13TeV-pythia8-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM',\n# user = 'CMS' , \n# pattern = '.*root' ,\n# useAAA = True ,\n#)\n\n#WToTauNu_Tau3Mu_Pythia_UL = cfg.MCComponent(\n# dataset = 'WToTauTo3Mu',\n# name = 'WToTauTo3Mu',\n# files = '/gwpool/users/lguzzi/Tau3Mu/2017_2018/MC_prod/ultralegacy/MINIAOD/WTau3Mu_BPH-RunIISummer19UL18MiniAOD-00008_v2.root',\n# xSection = 21490.9, # this uses the correct tau BR from the PDG # 20508.9 * 1.e-7, # W to lep nu / 3.[pb] x BR\n# nGenEvents = 50000,\n# effCorrFactor = 1,\n#)\n\n#WToTauNu_Tau3Mu_Pythia_ULcentral = kreator.makeMCComponent(\n# name = 'WToTauTo3Mu' ,\n# dataset = '/W_ToTau_ToMuMuMu_TuneCP5_13TeV-pythia8/RunIISummer19UL18MiniAOD-106X_upgrade2018_realistic_v11_L1v1-v2/MINIAODSIM',\n# user = 'CMS' ,\n# pattern = '.*root' ,\n# useAAA = True ,\n#)\n\nWToTauTo3Mu_MadGraph = kreator.makeMCComponent(\n name = 'WToTauTo3Mu' ,\n dataset = '/W_ToTau_ToMuMuMu_2HDM_TuneCP5_13TeV-pythia8-madgraph/RunIISummer19UL18MiniAOD-106X_upgrade2018_realistic_v11_L1v1-v2/MINIAODSIM',\n user = 'CMS' ,\n pattern = '.*root' ,\n useAAA = True ,\n)\n\nWToTauNu_Tau3Mu_Pythia_ULcentral = cfg.MCComponent(\n dataset = 'WToTauTo3Mu',\n name = 'WToTauTo3Mu',\n files = glob.glob('/gwteray/users/lguzzi/W_ToTau_ToMuMuMu_TuneCP5_13TeV-pythia8/RunIISummer19UL18MiniAOD-106X_upgrade2018_realistic_v11_L1v1-v2/deepMET/MINIAOD/*.root'),\n xSection = 21490.9, # this uses the correct tau BR from the PDG # 20508.9 * 1.e-7, # W to lep nu / 3.[pb] x BR\n nGenEvents = 492000,\n effCorrFactor = 1,\n)","sub_path":"python/samples/mc_2018.py","file_name":"mc_2018.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"287059158","text":"\"\"\"\nBase classes\n\"\"\"\nimport time\n\ntry:\n from yaml import CLoader as YamlLoader, CDumper as YamlDumper\nexcept ImportError:\n from yaml import YamlLoader, YamlDumper\nimport threading\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\n\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass PIDFinder(object):\n \"\"\" PIDFinder base class \"\"\"\n\n def __init__(self,id,jobid,config):\n self.id = id\n self.jobid = jobid\n self.config = config\n\n def find(self):\n raise Exception(\"Not implemented\")\n # return []\n\nclass Sampler(threading.Thread):\n \"\"\" Sampler base class \"\"\"\n\n def __init__(self,id,outQueue,config):\n super(Sampler,self).__init__()\n self.id = id\n self.outQueue = outQueue\n self.config = config\n self.jobid = self.config.get(['options','jobid'])\n self.pidQueue = queue.Queue()\n self.pids = []\n self.sampler_interval = self.config.get([self.id,\"sampler_interval\"],60)\n\n def init(self):\n pass\n\n def run(self):\n try:\n self.init()\n except Exception as e:\n logger.exception(\"Failed to do self.init in %s\" % self.id,e)\n while True:\n try:\n pids = self.pidQueue.get(timeout = self.sampler_interval)\n if not pids:\n self.pidQueue.task_done()\n break\n logger.debug(\"Received new pids: %s\", pids)\n self.pids.extend(pids)\n self.pidQueue.task_done()\n except queue.Empty as e:\n logger.debug(\"%s queue.Empty timeout\" % self.id)\n pass\n try:\n if self.do_sample():\n self.sample()\n except Exception as e:\n logger.exception(\"Failed to do self.sample in %s\" % self.id,e)\n \n try:\n self.store(self.final_data(),'final')\n except Exception as e:\n logger.exception(\"Failed to do self.final_data in %s\" % self.id,e)\n self.outQueue.join()\n \n def store(self,data,type='now'):\n self.outQueue.put({\n 'id': self.id,\n 'data': data,\n 'type': type\n })\n\n # this should be implemented in the real Sampler..\n def sample(self):\n raise Exception(\"Not implemented\")\n\n def do_sample(self):\n return len(self.pids) > 0\n\n def exit(self):\n logger.debug(\"%s exit\" % self.id)\n self.pidQueue.put(None)\n\nclass Aggregator(object):\n \"\"\" Aggregator base class \"\"\"\n\n def __init__(self,id,config):\n self.id = id\n self.config = config\n\n def aggregate(self,data):\n raise Exception(\"Not implemented\")\n\nclass Loader(object):\n \"\"\" Loader base class \"\"\"\n\n def __init__(self,id,config):\n self.id = id\n self.config = config \n\n def load(self):\n raise Exception(\"Not implemented\")\n \n def next(self):\n raise Exception(\"Not implemented\")\n\n def commit(self):\n raise Exception(\"Not implemented\")\n\nclass Backend(object):\n \"\"\" Backend base class \"\"\"\n\n def __init__(self,id,config):\n self.id = id\n self.config = config\n\n def update(self,updater):\n raise Exception(\"Not implemented\")\n\n def extract(self,xyz):\n raise Exception(\"Not implemented\")\n\nclass Software(object):\n \"\"\" Software base class \"\"\"\n\n def __init__(self,id,config):\n self.id = id\n self.config = config\n\n def update(self):\n raise Exception(\"Not implemented\")\n \nclass Output(threading.Thread):\n \"\"\" Output base class \"\"\"\n \n def __init__(self,id,config):\n super(Output,self).__init__()\n self.id = id\n self.config = config\n\n self.dataQueue = queue.Queue()\n self.jobid = self.config.get(['options','jobid'])\n\n def run(self):\n while True:\n data = self.dataQueue.get()\n if data is None:\n self.dataQueue.task_done()\n break\n try:\n self.store({ data['id']: data['data'] })\n except:\n logger.exception(\"Failed to store\",e)\n if 'type' in data and data['type'] == 'final':\n try:\n self.final({ data['id']: data['data'] })\n except Exception as e:\n logger.exception(\"Failed to do self.final in %s\" % self.id,e)\n self.dataQueue.task_done()\n\n for t in range(int(self.config.get([self.id,'retry_count'],3))):\n try:\n self.write()\n break\n except Exception as e:\n logger.exception(\"Failed to do self.write in %s\" % self.id,e)\n time.sleep(int(self.config.get([self.id,'retry_sleep'],3)))\n\n def store(self,data):\n raise Exception(\"Not implemented\")\n\n def final(self,data):\n self.store(data)\n\n def write(self):\n raise Exception(\"Not implemented\")\n\n def exit(self):\n self.dataQueue.put(None)\n\nclass XMLWriter(object):\n \"\"\" XMLWriter base class \"\"\"\n\n def __init__(self,id,config):\n self.id = id\n self.config = config\n\n def write(self,data):\n raise Exception(\"Not implemented\")\n","sub_path":"sams/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124092453","text":"from .utils import PyKEArgumentHelpFormatter\nfrom astropy.io import fits as pyfits\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom . import kepio, kepmsg, kepkey, kepfunc, kepstat\n\n\n__all__ = ['kepfilter']\n\n\ndef kepfilter(infile, passband, outfile=None, datacol='SAP_FLUX', function='boxcar',\n cutoff=1.0, plot=False, overwrite=False, verbose=False,\n logfile='kepfilter.log'):\n \"\"\"\n kepfilter -- bandpass filtering of Kepler light curve data\n\n ``kepfilter`` applies a bandpass filter to Kepler light curve data. In the\n low bandpass option, the data is convolved with a function of\n user-specified width. Choices of convolution function are **boxcar**,\n **Gaussian** or **sinc**. In the high bandpass option the convolution minus\n the median of the convolution is subtracted from the original data. The\n filtered data is copied to a new FITS file with the same structure as the\n input file.\n\n Parameters\n ----------\n infile : str\n The name of a MAST standard format FITS file containing Kepler light\n curve data within the first data extension.\n passband : str\n The type of filter to be applied. A low bandpass filter will suppress\n high-frequency signal shorter than the cutoff. A high bandpass filter\n will suppress low-frequency signal longer than the cutoff.\n The options are:\n\n * low\n * high\n outfile : str\n The name of the output FITS file. The output file is identical in\n format to the input file. The data to be filtered will be overwritten\n in the output file by its filtered version.\n datacol : str\n The name of the data column in the input FITS file to be filtered, e.g.\n SAP_FLUX, PDCSAP_FLUX, MOM_CENTR1 etc. A full list of\n archived data columns is provided in the Kepler Archive Manual.\n function : string\n The functional form of the bandpass convolution function.\n The options are:\n\n * boxcar\n * gauss\n * sinc\n cutoff : float\n The frequency of the bandpass cutoff in units of days-1.\n plot : bool\n Plot the original light curve and the result of the filter?\n overwrite : bool\n Overwrite the output file? if overwrite is **False** and an existing\n file has the same name as outfile then the task will stop with an\n error.\n verbose : bool\n Print informative messages and warnings to the shell and logfile?\n logfile : str\n Name of the logfile containing error and warning messages.\n\n Examples\n --------\n\n .. code-block :: bash\n\n $ kepfilter kplr002436324-2009259160929_llc.fits --datacol 'SAP_FLUX' --function 'boxcar'\n --plot --verbose --overwrite\n\n .. image :: ../_static/images/api/kepfilter.png\n :align: center\n \"\"\"\n if outfile is None:\n outfile = infile.split('.')[0] + \"-{}.fits\".format(__all__[0])\n ## log the call\n hashline = '--------------------------------------------------------------'\n kepmsg.log(logfile, hashline, verbose)\n call = ('KEPFILTER -- '\n + ' infile={}'.format(infile)\n + ' outfile={}'.format(outfile)\n + ' datacol={}'.format(datacol)\n + ' function={}'.format(function)\n + ' cutoff={}'.format(cutoff)\n + ' passband={}'.format(passband)\n + ' plot={}'.format(plot)\n + ' overwrite={}'.format(overwrite)\n + ' verbose={}'.format(verbose)\n + ' logfile={}'.format(logfile))\n kepmsg.log(logfile, call+'\\n', verbose)\n ## start time\n kepmsg.clock('KEPFILTER started at',logfile,verbose)\n ## overwrite output file\n if overwrite:\n kepio.overwrite(outfile, logfile, verbose)\n if kepio.fileexists(outfile):\n errmsg = 'ERROR -- KEPFILTER: {} exists. Use --overwrite'.format(outfile)\n kepmsg.err(logfile, message, verbose)\n\n ## open input file\n instr = pyfits.open(infile, 'readonly')\n tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile,\n logfile, verbose)\n try:\n work = instr[0].header['FILEVER']\n cadenom = 1.0\n except:\n cadenom = cadence\n\n ## fudge non-compliant FITS keywords with no values\n instr = kepkey.emptykeys(instr, infile, logfile, verbose)\n ## read table structure\n table = kepio.readfitstab(infile, instr[1], logfile, verbose)\n # read time and flux columns\n barytime = kepio.readtimecol(infile, table, logfile, verbose)\n flux= kepio.readsapcol(infile, table, logfile, verbose)\n # filter input data table\n try:\n nanclean = instr[1].header['NANCLEAN']\n except:\n naxis2 = 0\n for i in range(len(table.field(0))):\n if (np.isfinite(barytime[i]) and np.isfinite(flux[i])\n and flux[i] != 0.0):\n table[naxis2] = table[i]\n naxis2 += 1\n instr[1].data = table[:naxis2]\n kepkey.new('NANCLEAN', True, 'NaN cadences removed from data',\n instr[1], outfile, logfile, verbose)\n\n ## read table columns\n intime = (kepio.readtimecol(infile, instr[1].data, logfile, verbose)\n + bjdref)\n indata = kepio.readfitscol(infile, instr[1].data, datacol, logfile,\n verbose) / cadenom\n ## define data sampling\n tr = 1.0 / (cadence / 86400)\n timescale = 1.0 / (cutoff / tr)\n ## define convolution function\n if function == 'boxcar':\n filtfunc = np.ones(int(np.ceil(timescale)))\n elif function == 'gauss':\n timescale /= 2\n dx = np.ceil(timescale * 10 + 1)\n filtfunc = kepfunc.gauss([1.0, dx / 2 - 1.0, timescale],\n np.linspace(0, dx - 1, dx))\n elif function == 'sinc':\n dx = np.ceil(timescale * 12 + 1)\n fx = (np.linspace(0, dx - 1, dx) - dx / 2 + 0.5) / timescale\n filtfunc = np.sinc(fx)\n\n filtfunc /= np.sum(filtfunc)\n ## pad time series at both ends with noise model\n ave, sigma = (np.mean(indata[:len(filtfunc)]),\n np.std(indata[:len(filtfunc)]))\n padded = np.append(kepstat.randarray(np.ones(len(filtfunc)) * ave,\n np.ones(len(filtfunc)) * sigma), indata)\n ave, sigma = (np.mean(indata[-len(filtfunc):]),\n np.std(indata[-len(filtfunc):]))\n padded = np.append(padded, kepstat.randarray(np.ones(len(filtfunc)) * ave,\n np.ones(len(filtfunc)) * sigma))\n ## convolve data\n convolved = np.convolve(padded,filtfunc,'same')\n ## remove padding from the output array\n if function == 'boxcar':\n outdata = convolved[len(filtfunc):-len(filtfunc)]\n else:\n outdata = convolved[len(filtfunc):-len(filtfunc)]\n ## subtract low frequencies\n if passband == 'high':\n outmedian = np.median(outdata)\n outdata = indata - outdata + outmedian\n ## comment keyword in output file\n kepkey.history(call, instr[0], outfile, logfile, verbose)\n ## clean up x-axis unit\n intime0 = float(int(tstart / 100) * 100.0)\n if intime0 < 2.4e6: intime0 += 2.4e6\n ptime = intime - intime0\n xlab = 'BJD $-$ {}'.format(intime0)\n ## clean up y-axis units\n pout = indata * 1.0\n pout2 = outdata * 1.0\n nrm = len(str(int(np.nanmax(pout)))) - 1\n pout = pout / 10 ** nrm\n pout2 = pout2 / 10 ** nrm\n ylab = '10$^{}$ {}'.format(nrm, 'e$^-$ s$^{-1}$')\n ## data limits\n xmin = ptime.min()\n xmax = ptime.max()\n ymin = np.nanmin(pout)\n ymax = np.nanmax(pout)\n xr = xmax - xmin\n yr = ymax - ymin\n ptime = np.insert(ptime, [0], [ptime[0]])\n ptime = np.append(ptime, [ptime[-1]])\n pout = np.insert(pout, [0], [0.0])\n pout = np.append(pout, 0.0)\n pout2 = np.insert(pout2, [0], [0.0])\n pout2 = np.append(pout2, 0.0)\n ## plot light curve\n if plot:\n plt.figure()\n plt.clf()\n\n ## plot filtered data\n ax = plt.axes([0.06, 0.1, 0.93, 0.87])\n plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))\n plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))\n plt.plot(ptime, pout, color='#ff9900', linestyle='-', linewidth=1.0)\n plt.fill(ptime, pout, color='#ffff00', linewidth=0.0, alpha=0.2)\n if passband == 'low':\n plt.plot(ptime[1:-1], pout2[1:-1], color='#0000ff', linestyle='-',\n linewidth=1.0)\n else:\n plt.plot(ptime, pout2, color='#0000ff', linestyle='-',\n linewidth=1.0)\n plt.fill(ptime, pout2, color='#0000ff', linewidth=0.0, alpha=0.2)\n plt.xlabel(xlab, {'color' : 'k'})\n plt.ylabel(ylab, {'color' : 'k'})\n plt.xlim(xmin-xr*0.01,xmax+xr*0.01)\n if ymin >= 0.0:\n plt.ylim(ymin-yr*0.01,ymax+yr*0.01)\n else:\n plt.ylim(1.0e-10,ymax+yr*0.01)\n plt.grid()\n # render plot\n plt.show()\n ## write output file\n print(\"Writing output file {}...\".format(outfile))\n for i in tqdm(range(len(outdata))):\n instr[1].data.field(datacol)[i] = outdata[i]\n instr.writeto(outfile)\n ## close input file\n instr.close()\n ## end time\n kepmsg.clock('KEPFILTER completed at', logfile, verbose)\n\ndef kepfilter_main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Low bandpass or high bandpass signal filtering',\n formatter_class=PyKEArgumentHelpFormatter)\n parser.add_argument('infile', help='Name of input file', type=str)\n parser.add_argument('--passband', help='low- or high-bandpass filter',\n type=str, choices=['low','high'])\n parser.add_argument('--outfile',\n help=('Name of FITS file to output.'\n ' If None, outfile is infile-kepfilter.'),\n default=None)\n parser.add_argument('--datacol', default='SAP_FLUX',\n help='Name of data column', type=str)\n parser.add_argument('--function', default='boxcar',\n help='The bandpass convolution function', type=str,\n choices=['boxcar','gauss','sinc'])\n parser.add_argument('--cutoff', default=1.0,\n help='Characteristic frequency cutoff of filter [1/days]',\n type=float)\n parser.add_argument('--plot', action='store_true',\n help='Plot result?')\n parser.add_argument('--overwrite', action='store_true',\n help='Overwrite output file?')\n parser.add_argument('--verbose', action='store_true',\n help='Write to a log file?')\n parser.add_argument('--logfile', help='Name of ascii log file',\n default='kepfilter.log', type=str)\n args = parser.parse_args()\n kepfilter(args.infile, args.passband, args.outfile, args.datacol,\n args.function, args.cutoff, args.plot, args.overwrite,\n args.verbose, args.logfile)\n","sub_path":"pyke/kepfilter.py","file_name":"kepfilter.py","file_ext":"py","file_size_in_byte":11082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116867457","text":"import os\nimport tempfile\nimport dask.distributed\nimport swot_simulator.launcher\nimport swot_simulator.settings\n\nROOT = os.path.dirname(os.path.abspath(__file__))\n\n\ndef test_launcher():\n with tempfile.TemporaryDirectory() as tmpdir:\n parameters = swot_simulator.settings.Parameters.load_default()\n parameters.ephemeris = os.path.join(\n ROOT, \"..\", \"data\", \"ephemeris_calval_june2015_ell.txt\")\n parameters.nadir = True\n parameters.working_directory = tmpdir\n cluster = dask.distributed.LocalCluster()\n client = dask.distributed.Client(cluster)\n client.wait_for_workers(1)\n swot_simulator.launcher.launch(client, parameters, None)\n client.close()\n cluster.close()\n","sub_path":"tests/test_launcher.py","file_name":"test_launcher.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88498836","text":"# Python 2 server example\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport os\nimport time\n\nhost_name = \"0.0.0.0\"\n# port = 8080\nport = int(os.environ.get('PORT', 17995))\n\nclass my_server(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"https://pythonbasics.org\", \"utf-8\"))\n self.wfile.write(bytes(\"

Request: %s

\" % self.path, \"utf-8\"))\n self.wfile.write(bytes(\"\", \"utf-8\"))\n self.wfile.write(bytes(\"

This is an example web server.

\", \"utf-8\"))\n self.wfile.write(bytes(\"\", \"utf-8\"))\n\nif __name__ == \"__main__\": \n webServer = HTTPServer((host_name, port), my_server)\n print(\"Server started http://%s:%s\" % (host_name, port))\n\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")","sub_path":"python2-web-server.py","file_name":"python2-web-server.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194886568","text":"# Given a list of strings, write a function that returns the longest string chain that \n# can be buit from those strings \n\n# O(n * m^2 + nlog(n)) time, O(nm) space, n = number of string, m = length of the longest string\ndef longestStringChain(strings):\n\t# For every string, imagine the longest string chain that starts with it.\n\t# Set up every string to point to the next string in its respective longest\n\t# string chain. Also keep track of the lengths of these longest string chains.\n\tstringChains = {}\n\tfor string in strings:\n\t\tstringChains[string] = {\"nextString\": \"\", \"maxChainLength\": 1}\n\t\t\n\t\n\t# Sort the strings based on their length so that whenever we visit a \n\t# string (as we iterate through them from left to right), we can\n\t# already have computed the longest string chains of any smaller strings.\n\tsortedStrings = sorted(strings, key = len)\n\tfor string in sortedStrings:\n\t\tfindLongestStringChain(string, stringChains)\n\t\t\n\treturn buildLongestStringChain(strings, stringChains)\n\ndef findLongestStringChain(string, stringChains):\n\t# Try removing every letter of the current string to see if the\n\t# remaining strings form a string chain\n\tfor i in range(len(string)):\n\t\tsmallerString = getSmallerString(string, i)\n\t\tif smallerString not in stringChains:\n\t\t\tcontinue\n\t\ttryUpdateLongestStringChain(string, smallerString, stringChains)\n\t\t\ndef getSmallerString(string, index):\n\treturn string[0:index] + string[index + 1 : ]\n\n\ndef tryUpdateLongestStringChain(currentString, smallerString, stringChains):\n\tsmallerStringChainLength = stringChains[smallerString][\"maxChainLength\"]\n\tcurrentStringChainLength = stringChains[currentString][\"maxChainLength\"]\n\t# Update the string chain of the current string only if the smaller string leads\n\t# to a longer string chain\n\tif smallerStringChainLength + 1 > currentStringChainLength:\n\t\tstringChains[currentString][\"maxChainLength\"] = smallerStringChainLength + 1\n\t\tstringChains[currentString][\"nextString\"] = smallerString\n\t\t\ndef buildLongestStringChain(strings, stringChains):\n\t# Find the string that starts the longest string chain\n\tmaxChainLength = 0\n\tchainStartingString = \"\"\n\tfor string in strings:\n\t\tif stringChains[string][\"maxChainLength\"] > maxChainLength:\n\t\t\tmaxChainLength = stringChains[string][\"maxChainLength\"]\n\t\t\tchainStartingString = string\n\t\n\t# Starting at the string found above, build the longest string chain.\n\tourLongestStringChain = []\n\tcurrentString = chainStartingString\n\twhile currentString != \"\":\n\t\tourLongestStringChain.append(currentString)\n\t\tcurrentString = stringChains[currentString][\"nextString\"]\n\t\t\n\treturn [] if len(ourLongestStringChain) == 1 else ourLongestStringChain\n","sub_path":"Very Hard/longestStringChain.py","file_name":"longestStringChain.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"283010903","text":"import random\nimport copy\nimport time\nimport sys\nimport matplotlib.pyplot as plt\nimport os\nfrom multiprocessing import Pool\nimport multiprocessing as multi\nfrom ship import Ship\nfrom tqdm import tqdm\nimport slackweb\n# import own modules #\nsys.path.append('../public')\nsys.path.append('../output')\nfrom constants import *\nfrom my_modules import *\n\nclass GA_Extended:\n\n def __init__(self,oil_price_data,freight_rate_outward,freight_rate_homeward,exchange_rate,demand,supply,newbuilding,secondhand,actionlist=None,generation=None,population_size=None,crossover_rate=None,mutation_rate=None):\n self.oil_price_data = oil_price_data #oil price predicted data\n self.freight_rate_outward_data = freight_rate_outward #freight rate outward predicted data\n self.freight_rate_homeward_data = freight_rate_homeward # freight rate return predicted data\n self.exchange_rate_data = exchange_rate # exchange_rate predicted data\n self.demand_data = demand#ship demand predicted data\n self.supply_data = supply#ship supply predicted data\n self.newbuilding = newbuilding#new building ship price data\n self.secondhand = secondhand#secondhand ship price data\n self.actionlist = None if actionlist else None # decision of action parts.\n self.generation = generation if generation else DEFAULT_GENERATION # the number of generation\n self.population_size = population_size if population_size else DEFAULT_POPULATION_SIZE # the number of individual\n self.mutation_rate = mutation_rate if mutation_rate else DEFAULT_MUTATION_RATE # the rate of mutation\n self.crossover_rate = crossover_rate if crossover_rate else DEFAULT_CROSSOVER_RATE\n self.population = [] # population that has individual\n self.temp = [] #temporary group that has individuals\n self.bestpopulation = [] # group that has the best individuals in each generation\n self.averagepopulation = [] # the average value of fitness in each generation\n self.number_of_train_data = DEFAULT_PREDICT_PATTERN_NUMBER\n self.fitness_dictionary = {}\n\n def adapt_rule(self,oil_data,freight_data,own_ship,time,rule_args):\n rule_integrate = copy.deepcopy(rule_args)\n result = [[False,0],[False,0],[False,0]]\n for which_action in range(DEFAULT_NUM_OF_ACTION_INTEGRATE):\n rule = rule_integrate[which_action]\n oil_start = convert2to10_in_list(rule[2])\n oil_end = convert2to10_in_list(rule[3])\n if oil_start > oil_end:\n oil_start, oil_end = oil_end, oil_start\n freight_start = convert2to10_in_list(rule[6])\n freight_end = convert2to10_in_list(rule[7])\n if freight_start > freight_end:\n freight_start, freight_end = freight_end, freight_start\n average_oil = 0\n if oil_start == oil_end:\n average_oil = oil_data[time - oil_start]['price']\n else:\n for month_oil in range(oil_start,oil_end + 1):\n if time - month_oil < 0:\n average_oil += OIL_PREV[time - month_oil]\n else:\n average_oil += oil_data[time - month_oil]['price']\n average_oil /= (oil_end - oil_start + 1)\n average_freight = 0\n if freight_start == freight_end:\n average_freight = freight_data[time - freight_start]['price']\n else:\n for month_freight in range(freight_start,freight_end + 1):\n if time - month_freight < 0:\n average_freight += FREIGHT_PREV[time - month_freight]\n else:\n average_freight += freight_data[time - month_freight]['price']\n average_freight /= (freight_end - freight_start + 1)\n flag = True\n list_1 = [0,4,8]\n list_2 = [1,5,9]\n list_3 = [OIL_PRICE_LIST,FREIGHT_RATE_LIST,OWN_SHIP_LIST]\n list_4 = [average_oil,average_freight,own_ship]\n for cond_1, cond_2, condition_type, data_compare in zip(list_1,list_2,list_3,list_4):\n lower = condition_type[convert2to10_in_list(rule[cond_1])]\n upper = condition_type[convert2to10_in_list(rule[cond_2])]\n if (lower < data_compare or lower == DO_NOT_CARE) and (data_compare < upper or upper == DO_NOT_CARE):\n pass\n else:\n flag = False\n if flag == True:\n result[which_action][0] = True\n result[which_action][1] = + 1\n return result\n \n def generateIndividual_with_wise(self):\n population = []\n for num in range(self.population_size):\n rule_random = []\n for trade in range(DEFAULT_NUM_OF_ACTION_INTEGRATE):\n rule_random.append([])\n for condition in range(DEFAULT_NUM_OF_CONDITION*2):\n rule_random[trade].append([])\n for bit in range(DEFAULT_NUM_OF_BIT):\n rule_random[trade][condition].append(random.randint(0,1))\n rule_random.append([0,0])\n rule_random[-1][0],rule_random[-1][1] = self.fitness_function(rule_random)\n rule_string = self.return_rule_str(rule_random)\n self.fitness_dictionary[rule_string] = copy.deepcopy([rule_random[-1][0],rule_random[-1][1]])\n population.append(copy.deepcopy(rule_random))\n return population\n\n def crossover(self,a_args,b_args):\n a = copy.deepcopy(a_args)\n b = copy.deepcopy(b_args)\n temp1 = []\n temp2 = []\n which_action = random.randint(0,DEFAULT_NUM_OF_ACTION_INTEGRATE-1)\n proportion = [1,1,1,1,1]\n rand = random.randint(0,sum(proportion)-1)\n crossover_block = 0\n for tryal in range(4):\n if rand < proportion[crossover_block]:\n pass\n else:\n rand -= proportion[crossover_block]\n crossover_block += 1\n crossover_block = crossover_block*2 + random.randint(0,1)\n for index in range(DEFAULT_NUM_OF_ACTION_INTEGRATE):\n if index == which_action:\n temp1.append([])\n temp2.append([])\n for condition in range(DEFAULT_NUM_OF_CONDITION*2):\n if condition == crossover_block:\n temp1[index].append([])\n temp2[index].append([])\n length = len(a[index][condition]) - 1\n crossover_point = random.randint(1,length-1)\n for former in range(0,crossover_point):\n temp1[index][condition].append(a[index][condition][former])\n temp2[index][condition].append(b[index][condition][former])\n for latter in range(crossover_point,len(a[index][condition])):\n temp1[index][condition].append(b[index][condition][latter])\n temp2[index][condition].append(a[index][condition][latter])\n else:\n temp1[index].append(a[index][condition])\n temp2[index].append(b[index][condition])\n else:\n temp1.append(a[index])\n temp2.append(b[index])\n temp1.append([0,0])\n temp2.append([0,0])\n return [temp1,temp2]\n\n def mutation(self,individual_args):\n individual = copy.deepcopy(individual_args)\n which_action = random.randint(0,DEFAULT_NUM_OF_ACTION_INTEGRATE-1)\n proportion = [1,1,1,1,1]\n rand = random.randint(0,sum(proportion)-1)\n mutation_block = 0\n for tryal in range(4):\n if rand < proportion[mutation_block]:\n pass\n else:\n rand -= proportion[mutation_block]\n mutation_block += 1\n mutation_block = mutation_block*2 + random.randint(0,1)\n length = len(individual[which_action][mutation_block]) - 1\n point = random.randint(0,length)\n if individual[which_action][mutation_block][point] == 0:\n individual[which_action][mutation_block][point] = 1\n else:\n individual[which_action][mutation_block][point] = 0\n return individual\n\n def fitness_function(self,rule_args):\n rule = copy.deepcopy(rule_args)\n Record = []\n for pattern in range(self.number_of_train_data):\n fitness = 0\n ship = Ship(TEU_SIZE,INITIAL_SPEED,ROUTE_DISTANCE)\n for year in range(DEFAULT_PREDICT_YEARS):\n cash_flow = 0\n if year >= PAYBACK_PERIOD and ship.exist_number + ship.order_number <= 0:\n break\n for month in range(0,12,TIME_STEP):\n current_oil_price = self.oil_price_data[pattern][year*12+month]['price']\n current_freight_rate_outward = self.freight_rate_outward_data[pattern][year*12+month]['price']\n current_freight_rate_homeward = self.freight_rate_homeward_data[pattern][year*12+month]['price']\n total_freight = ( current_freight_rate_outward * LOAD_FACTOR_ASIA_TO_EUROPE + current_freight_rate_homeward * LOAD_FACTOR_EUROPE_TO_ASIA)\n current_exchange = self.exchange_rate_data[pattern][year*12+month]['price']\n current_demand = self.demand_data[pattern][year*12+month]['price']\n current_supply = self.supply_data[pattern][year*12+month]['price']\n if year < PAYBACK_PERIOD:\n current_newbuilding = self.newbuilding[pattern][year*12+month]['price']\n current_secondhand = self.secondhand[pattern][year*12+month]['price']\n result = self.adapt_rule(self.oil_price_data[pattern],self.freight_rate_outward_data[pattern],ship.total_number+ship.order_number,year*12+month,rule)\n if result[0][0]:\n cash_flow += ship.buy_new_ship(current_newbuilding,result[0][1])\n if result[1][0]:\n cash_flow += ship.buy_secondhand_ship(current_secondhand,result[1][1])\n if result[2][0]:\n cash_flow += ship.sell_ship(current_secondhand,result[2][1])\n cash_flow += ship.calculate_income_per_time_step_month(current_oil_price,total_freight,current_demand,current_supply)\n cash_flow += ship.add_age()\n DISCOUNT = (1 + DISCOUNT_RATE) ** (year + 1)\n cash_flow *= self.exchange_rate_data[pattern][year*12+11]['price']\n fitness += cash_flow / DISCOUNT\n fitness /= HUNDRED_MILLION\n fitness /= SCALING\n Record.append(fitness)\n e, sigma = calc_statistics(Record)\n return [e,sigma]\n\n def process(self,rule_args,number):\n rule = copy.deepcopy(rule_args)\n e, sigma = self.fitness_function(rule)\n return [e,sigma,self.return_rule_str(rule),number]\n\n def wrapper_process(self,args):\n return self.process(*args)\n\n def selection(self,generation):\n #store last generation's best individual unchanged\n self.population.sort(key=lambda x:x[-1][0],reverse = True)\n #roulette selection and elite storing\n #store the best 5% individual\n self.temp.sort(key=lambda x:x[-1][0],reverse = True)\n elite_number = int(self.population_size * 0.05)\n start = 1 if generation != 0 else 0\n for ith_individual in range(start,elite_number+1):\n self.population[ith_individual] = copy.deepcopy(self.temp[ith_individual])\n min_fit = self.temp[-1][-1][0]\n random.shuffle(self.temp)\n ark = 0 # the number used to roulette in crossing\n probability = 0\n for jth_individual in range(len(self.temp)):\n probability = probability + self.temp[jth_individual][-1][0] + (0.1 - min_fit)#Translation\n roulette = 0\n for kth_individual in range(elite_number+1,self.population_size):\n roulette = random.randint(0,int(probability))\n while roulette > 0:\n roulette = roulette - (self.temp[ark][-1][0] + 0.1 - min_fit)\n ark = (ark + 1) % len(self.temp)\n self.population[kth_individual] = copy.deepcopy(self.temp[ark])\n\n def exchange_rule(self):\n for individual_index in range(len(self.temp)):\n for condition_block in range(DEFAULT_NUM_OF_ACTION_INTEGRATE):\n condition = self.temp[individual_index][condition_block]\n if OIL_PRICE_LIST[convert2to10_in_list(condition[0])] > OIL_PRICE_LIST[convert2to10_in_list(condition[1])]:\n condition[0],condition[1] = copy.deepcopy(condition[1]),copy.deepcopy(condition[0])\n if FREIGHT_RATE_LIST[convert2to10_in_list(condition[2])] > FREIGHT_RATE_LIST[convert2to10_in_list(condition[3])]:\n condition[2],condition[3] = copy.deepcopy(condition[3]),copy.deepcopy(condition[2])\n if EXCHANGE_RATE_LIST[convert2to10_in_list(condition[4])] > EXCHANGE_RATE_LIST[convert2to10_in_list(condition[5])]:\n condition[4],condition[5] = copy.deepcopy(condition[5]),copy.deepcopy(condition[4])\n if OWN_SHIP_LIST[convert2to10_in_list(condition[6])] > OWN_SHIP_LIST[convert2to10_in_list(condition[7])]:\n condition[6],condition[7] = copy.deepcopy(condition[7]),copy.deepcopy(condition[6])\n if FREIGHT_RATE_LIST[convert2to10_in_list(condition[8])] > FREIGHT_RATE_LIST[convert2to10_in_list(condition[9])]:\n condition[8],condition[9] = copy.deepcopy(condition[9]),copy.deepcopy(condition[8])\n\n def store_best_and_average(self):\n self.population.sort(key=lambda x:x[-1][0],reverse = True)\n self.bestpopulation.append(self.population[0])\n random.shuffle(self.population)\n total = 0\n for e in range(self.population_size):\n total += self.population[e][-1][0]\n self.averagepopulation.append(total/self.population_size)\n\n def depict_fitness(self,gene):\n x = range(0,len(self.bestpopulation))\n y = []\n z = []\n for i in range(len(self.bestpopulation)):\n y.append(self.bestpopulation[i][-1][0])\n z.append(self.averagepopulation[i])\n plt.plot(x, y, marker='o',label='best')\n plt.plot(x, z, marker='x',label='average')\n plt.title('Transition of fitness', fontsize = 20)\n plt.xlabel('generation', fontsize = 16)\n plt.ylabel('fitness value', fontsize = 16)\n plt.tick_params(labelsize=14)\n plt.grid(True)\n plt.legend(loc = 'lower right')\n save_dir = '../output/rule-discovered'\n plt.savefig(os.path.join(save_dir, 'integrate_fitness_{}.png'.format(gene)))\n plt.close()\n\n def depict_average_variance(self,gene=None):\n x = []\n y = []\n for i in range(self.population_size): \n x.append(self.population[i][-1][0])\n y.append(self.population[i][-1][1])\n plt.scatter(x,y)\n x_min = min(x)\n x_min = x_min*0.9 if x_min>0 else x_min*1.1\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.title(\"Rule Performance\")\n plt.xlabel(\"Expectation\")\n plt.ylabel(\"Variance\")\n plt.grid(True)\n save_dir = '../output/train/image'\n if gene is not None:\n plt.savefig(os.path.join(save_dir, 'Evaluation_initial.png'))\n else:\n plt.savefig(os.path.join(save_dir, 'Evaluation.png'))\n plt.close()\n\n def check_convergence(self,target,criteria):\n flag = True\n for index in range(1,criteria+1):\n if target[-index][-1] != target[-(index+1)][-1]:\n flag = False\n break\n return flag\n\n def return_rule_str(self,lists_args):\n lists = copy.deepcopy(lists_args)\n rule_string = ''\n for rule_type in range(DEFAULT_NUM_OF_ACTION_INTEGRATE):\n for condition_block in range(DEFAULT_NUM_OF_CONDITION*2):\n block = lists[rule_type][condition_block]\n for e in block:\n rule_string += str(e)\n return rule_string\n\n def execute_GA(self):\n time_record = [0]\n first = time.time()\n #randomly generating individual group\n #for p_size in range(self.population_size):\n # self.population.append(self.generateIndividual())\n self.population = copy.deepcopy(self.generateIndividual_with_wise())\n self.depict_average_variance(0)\n\n #genetic algorithm\n for gene in tqdm(range(self.generation)):\n #crossover\n self.temp = copy.deepcopy(self.population)\n random.shuffle(self.temp)\n for selected in range(0,self.population_size,2):\n if random.random() < self.crossover_rate:\n a,b = self.crossover(self.temp[selected],self.temp[selected+1])\n else:\n a,b = self.temp[selected],self.temp[selected+1]\n self.temp.append(copy.deepcopy(a))\n self.temp.append(copy.deepcopy(b))\n \n #mutation\n for individual_index in range(self.population_size*2):\n if random.random() < self.mutation_rate:\n self.temp[individual_index] = copy.deepcopy(self.mutation(self.temp[individual_index]))\n\n #rule check\n self.exchange_rule()\n \n #fitness calculation\n num_pool = multi.cpu_count()\n num_pool = int(num_pool*0.95)\n tutumimono = []\n for individual_index in range(self.population_size*2):\n rule_string = self.return_rule_str(self.temp[individual_index])\n if rule_string in self.fitness_dictionary:\n self.temp[individual_index][-1][0] = self.fitness_dictionary[rule_string][0]\n self.temp[individual_index][-1][1] = self.fitness_dictionary[rule_string][1]\n else:\n tutumimono.append([copy.deepcopy(self.temp[individual_index]),individual_index])\n #tutumimono = [[self.temp[individual_number], individual_number] for individual_number in range(self.population_size*2)]\n '''\n with Pool(num_pool) as pool:\n p = pool.map(self.wrapper_process, tutumimono)\n #for index in range(self.population_size*2):\n for i in range(len(p)):\n index = p[i][-1]\n self.temp[index][-1][0] = p[i][0]\n self.temp[index][-1][1] = p[i][1]\n self.fitness_dictionary[p[i][2]] = copy.deepcopy([p[i][0],p[i][1]])\n '''\n for index in range(self.population_size*2):\n rule_string = self.return_rule_str(self.temp[index])\n if rule_string in self.fitness_dictionary:\n pass\n else:\n e, sigma = self.fitness_function(self.temp[index])\n self.temp[index][-1][0] = e\n self.temp[index][-1][1] = sigma\n self.fitness_dictionary[rule_string] = [e,sigma]\n #'''\n #selection\n self.selection(gene)\n\n #store best and average individual\n self.store_best_and_average()\n #if gene > 1000 and self.check_convergence(self.bestpopulation,500):\n # break\n time_record.append(time.time()-first)\n\n if gene % 500 == 0:\n export_rules_integrate_csv(self.population,gene)\n export_dictionary(self.fitness_dictionary)\n self.depict_fitness(gene)\n\n x = range(self.generation+1)\n plt.plot(x,time_record)\n save_dir = '../output/train/image'\n plt.savefig(os.path.join(save_dir, 'computationi_time.png'))\n plt.close()\n print('exploranation number ',len(self.fitness_dictionary))\n #for index in range(len(self.population)):\n # self.population[index][-1][0],self.population[index][-1][1] = self.fitness_function(self.population[index])\n self.depict_fitness(gene)\n self.depict_average_variance()\n self.population.sort(key=lambda x:x[-1][0],reverse = True)\n print(self.population[0])\n return self.population\n\n\ndef main():\n oil_data,freight_outward_data,freight_return_data,exchange_data,demand_data,supply_data,newbuilding_data,secondhand_data = load_generated_sinario()\n ga = GA_Extended(oil_data,freight_outward_data,freight_return_data,exchange_data,demand_data,supply_data,newbuilding_data,secondhand_data)\n start = time.time()\n p = ga.execute_GA()\n print(time.time()-start)\n export_rules_integrate_csv(p)\n\nif __name__ == \"__main__\":\n main()\n slack = slackweb.Slack(url=\"############\")\n slack.notify(text=\"program end!!!!!!!!!\")\n","sub_path":"models/ga_extended.py","file_name":"ga_extended.py","file_ext":"py","file_size_in_byte":21213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"555781917","text":"\"\"\"Model definition for CNN sentiment training.\"\"\"\n\nimport os\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.initializers import Constant\n\n\ndef keras_model_fn(_, config, args):\n \"\"\"Creates a CNN model for sentiment modeling.\"\"\"\n embedding_matrix = np.zeros((\n config[\"embeddings_dictionary_size\"],\n config[\"embeddings_vector_size\"]))\n\n with tf.io.gfile.GFile(config[\"embeddings_path\"], \"r\") as file:\n input_data = file.read()\n split = input_data.split(\"\\n\")\n\n for index, _ in enumerate(split):\n data = np.asarray(split[index].split()[1:], dtype='float32')\n if len(data) == config[\"embeddings_vector_size\"]:\n embedding_matrix[index + 2] = data\n else:\n padded = np.zeros((config[\"embeddings_vector_size\"]), 'float32')\n padded[:len(data)] = data\n embedding_matrix[index + 2] = padded\n\n cnn_model = keras.Sequential()\n cnn_model.add(layers.Embedding(\n input_dim=config[\"embeddings_dictionary_size\"],\n input_length=config[\"padding_size\"],\n embeddings_initializer=Constant(embedding_matrix),\n output_dim=config[\"embeddings_vector_size\"],\n trainable=True))\n cnn_filters = [\n min(1000,\n max(8, int(\n args.first_filter_size * args.cnn_layer_sizes_scale_factor**i)))\n for i in range(args.num_cnn_layers)\n ]\n for i in range(args.num_cnn_layers):\n cnn_model.add(layers.Conv1D(\n filters=cnn_filters[i],\n kernel_size=2,\n strides=1,\n padding=\"valid\",\n activation=\"relu\"))\n cnn_model.add(layers.GlobalMaxPool1D())\n dense_layers = [\n min(1024,\n max(8, int(\n args.first_layer_size * args.dense_layer_sizes_scale_factor**i)))\n for i in range(args.num_dense_layers)\n ]\n for i in range(args.num_dense_layers):\n cnn_model.add(layers.Dense(dense_layers[i], activation=\"relu\"))\n cnn_model.add(layers.Dense(1, activation=\"sigmoid\"))\n\n cnn_model.compile(\n optimizer=\"adam\",\n loss=\"binary_crossentropy\",\n metrics=[\"accuracy\"])\n return cnn_model\n\n\ndef save_model(model, output):\n \"\"\"Saves models in SaveModel format with signature to support serving.\"\"\"\n tf.saved_model.save(model, os.path.join(output, \"1\"))\n print(\"Model successfully saved at: {}\".format(output))\n","sub_path":"gcp_model_training/sentiment_model_cnn.py","file_name":"sentiment_model_cnn.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"51464278","text":"import argparse\nimport math\nimport os\nimport sys\nfrom linecache import getline\nfrom multiprocessing import Process\n\nimport Constants as c\n\n#In: -i in_decoded_dir -o out_dir [-t time window] [-s slide_interval] [-p num_processes]\n#Out: tab-delim txt w/ header: frame_num\\tts\\tts_delta\\tframe_len\\tip_src\\tip_dst\\thost\n\n\n#is_error is either 0 or 1\ndef print_usage(is_error): \n print(c.SLIDE_SPLIT_USAGE, file=sys.stderr) if is_error else print(c.SLIDE_SPLIT_USAGE)\n exit(is_error)\n\n\ndef get_num(str_num, description):\n bad_num = False\n try:\n num = int(str_num)\n if num < 0:\n bad_num = True\n except ValueError:\n bad_num = True\n\n if bad_num:\n print(c.NON_POS % (description, str_num), file=sys.stderr)\n num = -1\n\n return num\n\n\ndef write_file(start_idx, end_idx, in_file, out_file):\n lines = \"frame_num\\tts\\tts_delta\\tframe_len\\tip_src\\tip_dst\\thost\\n\"\n for i in range(start_idx, end_idx):\n lines += getline(in_file, i)\n with open(out_file, \"w\") as f:\n f.write(lines)\n\n\ndef run(pid, files, src, dest, slide_int, time_window):\n for fpath in files:\n with open(fpath, \"r\") as f:\n times = []\n for l in f:\n try:\n times.append(float(l.split(\"\\t\")[1]))\n except (IndexError, ValueError) as e:\n pass\n\n if len(times) == 0:\n print(c.NO_VAL_TS % fpath, file=sys.stderr)\n continue\n\n dest_file = os.path.join(dest, fpath.replace(src, \"\", 1)[:-4] + \"_part_0.txt\")\n if os.path.isfile(dest_file):\n print(\"P%s: %s exists, skipping %s\" % (pid, dest_file, fpath))\n continue\n\n print(\"P%s: IN: %s\" % (pid, fpath)) \n start_int = times[0]\n end_int = start_int + time_window\n last_poss_start = math.ceil((times[len(times) - 1] - time_window) / slide_int) * slide_int\n start_idxes = [0]\n idx = 0\n num = 0\n last_bucket = False\n for t in times:\n while t - start_int >= slide_int and not last_bucket:\n if t > last_poss_start and last_poss_start - start_int < slide_int:\n last_bucket = True\n\n start_int += slide_int\n start_idxes.append(idx)\n\n num_pop = 0\n for i in start_idxes:\n if t > end_int:\n dest_file = dest_file[:dest_file.rfind(\"_\")] + \"_%d.txt\" % num\n print(\"P%s: OUT: %s\" % (pid, dest_file))\n if not os.path.isdir(os.path.dirname(dest_file)):\n os.system(\"mkdir -pv %s\" % os.path.dirname(dest_file))\n #+2: +1 for getline is 1-based index, +1 for skipping header row\n write_file(i + 2, idx + 2, fpath, dest_file)\n num_pop += 1\n num += 1\n end_int += slide_int\n\n [ start_idxes.pop(0) for _ in range(num_pop) ]\n idx += 1\n\n while len(start_idxes) > 0:\n dest_file = dest_file[:dest_file.rfind(\"_\")] + \"_%d.txt\" % num\n print(\"P%s: OUT: %s\" % (pid, dest_file))\n if not os.path.isdir(os.path.dirname(dest_file)):\n os.system(\"mkdir -pv %s\" % os.path.dirname(dest_file))\n write_file(start_idxes[0] + 2, idx + 2, fpath, dest_file)\n start_idxes.pop(0)\n num += 1\n\n\ndef main():\n #parse arguments\n parser = argparse.ArgumentParser(usage=c.SLIDE_SPLIT_USAGE, add_help=False)\n parser.add_argument(\"-i\", dest=\"dec_dir\", default=\"\")\n parser.add_argument(\"-o\", dest=\"dest_dir\", default=\"\")\n parser.add_argument(\"-t\", dest=\"time_window\", default=\"30\")\n parser.add_argument(\"-s\", dest=\"slide_int\", default=\"5\")\n parser.add_argument(\"-p\", dest=\"num_proc\", default=\"1\")\n parser.add_argument(\"-h\", dest=\"help\", action=\"store_true\", default=False)\n args = parser.parse_args()\n\n if args.help:\n print_usage(0)\n\n print(\"Running %s...\" % c.PATH)\n\n #error checking\n errors = False\n #check -i in source\n if args.dec_dir == \"\":\n errors = True\n print(c.NO_SRC_DIR, file=sys.stderr)\n elif not os.path.isdir(args.dec_dir):\n errors = True\n print(c.INVAL % (\"Source directory\", args.dec_dir, \"directory\"), file=sys.stderr)\n else:\n if not os.access(args.dec_dir, os.R_OK):\n errors = True\n print(c.NO_PERM % (\"source directory\", args.dec_dir, \"read\"), file=sys.stderr)\n if not os.access(args.dec_dir, os.X_OK):\n errors = True\n print(c.NO_PERM % (\"source directory\", args.dec_dir, \"execute\"), file=sys.stderr)\n\n #check -o out destination\n if args.dest_dir == \"\":\n errors = True\n print(c.NO_DEST_DIR, file=sys.stderr)\n elif os.path.isdir(args.dest_dir):\n if not os.access(args.dest_dir, os.W_OK):\n errors = True\n print(c.NO_PERM % (\"destination directory\", args.dest_dir, \"write\"), file=sys.stderr)\n if not os.access(args.dest_dir, os.X_OK):\n errors = True\n print(c.NO_PERM % (\"destination directory\", args.dest_dir, \"execute\"), file=sys.stderr)\n\n #check -t time window\n time_window = get_num(args.time_window, \"time window\")\n if time_window == -1:\n errors = True\n\n #check -s slide interval\n slide_int = get_num(args.slide_int, \"slide interval\")\n if slide_int == -1:\n errors = True\n\n if slide_int > time_window:\n errors = True\n print(c.INT_GT_TIME_WIN % (slide_int, time_window), file=sys.stderr)\n\n #check -p number processes\n num_proc = get_num(args.num_proc, \"number of processes\")\n if num_proc == -1:\n errors = True\n\n if errors:\n print_usage(1)\n #end error checking\n\n if not os.path.isdir(args.dest_dir):\n os.system(\"mkdir -pv %s\" % args.dest_dir)\n\n files = [[] for _ in range(num_proc)]\n\n index = 0\n for root, dirs, fs in os.walk(args.dec_dir):\n for fname in fs:\n if fname.endswith(\".txt\"):\n files[index].append(os.path.join(root, fname))\n index += 1\n if index >= num_proc:\n index = 0\n else:\n print(c.WRONG_EXT % (\"Decoded file\", \"text (.txt)\", os.path.join(root, fname)),\n file=sys.stderr)\n\n procs = []\n for pid, files in enumerate(files):\n p = Process(target=run, args=(pid, files, args.dec_dir, args.dest_dir, slide_int, time_window))\n procs.append(p)\n p.start()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"model/src/s8_slide_split.py","file_name":"s8_slide_split.py","file_ext":"py","file_size_in_byte":6646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297140283","text":"# $Id: ScrollBar.py,v 1.46.2.4 2007/01/27 11:10:37 marcusva Exp $\n#\n# Copyright (c) 2004-2007, Marcus von Appen\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"A widget, which allows scrolling using buttons and a slider.\"\"\"\n\nfrom pygame import K_KP_PLUS, K_PLUS, K_RIGHT, K_DOWN, K_KP_MINUS, K_MINUS\nfrom pygame import K_LEFT, K_UP, K_PAGEUP, K_PAGEDOWN, K_HOME, K_END, Rect\nfrom .Range import Range\nfrom .Constants import *\nfrom .StyleInformation import StyleInformation\nfrom . import base\n\n# Timer value for the button press delay.\n_TIMER = 25\n\nclass ScrollBar (Range):\n \"\"\"ScrollBar () -> ScrollBar\n\n An abstract widget class, which is suitable for scrolling.\n\n The ScrollBar widget works much the same like a Scale widget except\n that it supports buttons for adjusting the value and that its\n minimum value always is 0. It is suitable for widgets which need\n scrolling ability and a scrolling logic.\n\n Inheriting widgets have to implement the _get_value_from_coords()\n and _get_coords_from_value() methods, which calculate the value of\n the ScrollBar using a pair of coordinates and vice versa. Example\n implementations can be found in the HScrollBar and VScrollBar widget\n classes. They also need to implement the _get_button_coords()\n method, which has to return a tuple of the both button coordinates\n [(x, y, width, height)].\n \n Default action (invoked by activate()):\n Give the ScrollBar the input focus.\n \n Mnemonic action (invoked by activate_mnemonic()):\n None\n\n Signals:\n SIG_MOUSEDOWN - Invoked, when a mouse button is pressed on the\n ScrollBar.\n SIG_MOUSEUP - Invoked, when a mouse buttor is released on the\n ScrollBar.\n SIG_MOUSEMOVE - Invoked, when the mouse moves over the ScrollBar.\n\n Attributes:\n button_dec - Indicates, if the decrease button is pressed.\n button_inc - Indicates, if the increase button is pressed.\n \"\"\"\n def __init__ (self):\n Range.__init__ (self, 0, 1, 1)\n\n # Signals.\n self._signals[SIG_MOUSEDOWN] = []\n self._signals[SIG_MOUSEMOVE] = []\n self._signals[SIG_MOUSEUP] = []\n self._signals[SIG_KEYDOWN] = None # Dummy for keyboard activation.\n self._signals[SIG_TICK] = None # Dummy for automatic scrolling.\n \n # Internal state handlers for the Events. Those need to be known by\n # the inheritors.\n self._buttondec = False\n self._buttoninc = False\n\n self._timer = _TIMER\n self._click = False\n\n def activate (self):\n \"\"\"S.activate () -> None\n\n Activates the ScrollBar default action.\n\n Activates the ScrollBar default action. This usually means giving\n the ScrollBar the input focus.\n \"\"\"\n if not self.sensitive:\n return\n self.focus = True\n \n def _get_button_coords (self, area):\n \"\"\"S._get_button_coords (...) -> tuple\n\n Gets a tuple with the coordinates of the in- and decrease buttons.\n \n This method has to be implemented by inherited widgets.\n \"\"\"\n raise NotImplementedError\n\n def _get_coords_from_value (self):\n \"\"\"S._get_coords_from_value () -> float\n\n Calculates the slider coordinates for the ScrollBar.\n \n This method has to be implemented by inherited widgets.\n \"\"\"\n raise NotImplementedError\n\n def _get_value_from_coords (self, area, coords):\n \"\"\"S._get_value_from_coords (...) -> float\n\n Calculates the slider coordinates for the ScrollBar.\n \n This method has to be implemented by inherited widgets.\n \"\"\"\n raise NotImplementedError\n\n def _get_slider_size (self):\n \"\"\"S._get_slider_size (...) -> int\n\n Calculates the size of the slider knob.\n \n This method has to be implemented by inherited widgets.\n \"\"\"\n raise NotImplementedError\n \n def _check_collision (self, pos, rect):\n \"\"\"S._check_collirion (...) -> bool\n\n Checks the collision of the given position with the passed rect.\n \"\"\"\n # Rect: (x, y, width, height), pos: (x, y).\n return (pos[0] >= rect[0]) and (pos[0] <= (rect[2] + rect[0])) and \\\n (pos[1] >= rect[1]) and (pos[1] <= (rect[3] + rect[1]))\n \n def set_minimum (self, minimum):\n \"\"\"S.set_minimum (...) -> Exception\n\n This method does not have any use.\n \"\"\"\n pass\n\n def notify (self, event):\n \"\"\"S.notify (...) -> None\n\n Notifies the ScrollBar about an event.\n \"\"\"\n if not self.sensitive:\n return\n\n if event.signal in SIGNALS_MOUSE:\n eventarea = self.rect_to_client ()\n collision = eventarea.collidepoint (event.data.pos)\n if event.signal == SIG_MOUSEDOWN and collision:\n self.focus = True\n # Act only on left clicks or scrollwheel Events.\n if event.data.button == 1:\n self.state = STATE_ACTIVE\n self.run_signal_handlers (SIG_MOUSEDOWN, event.data)\n\n if event.data.button == 1:\n buttons = self._get_button_coords (eventarea)\n if self._check_collision (event.data.pos, buttons[0]):\n self._buttondec = True\n self._buttoninc = False\n self._click = False\n self.decrease ()\n elif self._check_collision (event.data.pos, buttons[1]):\n self._buttoninc = True\n self._buttondec = False\n self._click = False\n self.increase ()\n else:\n self._click = True\n self._buttondec = False\n self._buttoninc = False\n val = self._get_value_from_coords (eventarea,\n event.data.pos)\n if val != self.value:\n self.value = val\n # Mouse wheel.\n elif event.data.button == 4:\n self.decrease ()\n elif event.data.button == 5:\n self.increase ()\n event.handled = True\n\n elif event.signal == SIG_MOUSEMOVE:\n dirty = False\n if collision:\n self.focus = True\n if self.state == STATE_NORMAL:\n self.state = STATE_ENTERED\n self.run_signal_handlers (SIG_MOUSEMOVE, event.data)\n\n buttons = self._get_button_coords (eventarea)\n if not self._check_collision (event.data.pos, buttons[0]) \\\n and self._buttondec:\n self._buttondec = False\n dirty = True\n if not self._check_collision (event.data.pos, buttons[1]) \\\n and self._buttoninc:\n self._buttoninc = False\n dirty = True\n if self._click:\n val = self._get_value_from_coords (eventarea,\n event.data.pos)\n if val != self.value:\n self.value = val\n dirty = False\n self.dirty = dirty\n event.handled = True\n\n elif self.state == STATE_ENTERED:\n self.state = STATE_NORMAL\n\n elif event.signal == SIG_MOUSEUP:\n if self._click or self._buttoninc or self._buttondec:\n self._buttondec = False\n self._buttoninc = False\n self._click = False\n\n if collision:\n if event.data.button == 1:\n if self.state == STATE_ACTIVE:\n self.state = STATE_ENTERED\n self.run_signal_handlers (SIG_MOUSEUP, event.data)\n event.handled = True\n else:\n self.state = STATE_NORMAL\n # Reset timer\n self._timer = _TIMER\n\n # The user holds the mouse clicked over one button.\n elif (self._buttondec or self._buttoninc) and \\\n (event.signal == SIG_TICK):\n # Wait half a second before starting to in/decrease.\n if self._timer > 0:\n self._timer -= 1\n else:\n if self._buttondec:\n self.decrease ()\n elif self._buttoninc:\n self.increase ()\n\n # Keyboard activation.\n elif (event.signal == SIG_KEYDOWN) and self.focus:\n if event.data.key in (K_KP_PLUS, K_PLUS, K_RIGHT, K_DOWN):\n self.increase ()\n event.handled = True\n elif event.data.key in (K_KP_MINUS, K_MINUS, K_LEFT, K_UP):\n self.decrease ()\n event.handled = True\n elif event.data.key == K_PAGEUP:\n val = self.value - 10 * self.step\n if val > self.minimum:\n self.value = val\n else:\n self.value = self.minimum\n event.handled = True\n elif event.data.key == K_PAGEDOWN:\n val = self.value + 10 * self.step\n if val < self.maximum:\n self.value = val\n else:\n self.value = self.maximum\n event.handled = True\n elif event.data.key == K_END:\n self.value = self.maximum\n event.handled = True\n elif event.data.key == K_HOME:\n self.value = self.minimum\n event.handled = True\n\n Range.notify (self, event)\n\n button_dec = property (lambda self: self._buttondec,\n doc = \"\"\"Indicates, whether the decrease\n button is pressed.\"\"\")\n button_inc = property (lambda self: self._buttoninc,\n doc = \"\"\"Indicates, whether the increase\n button is pressed.\"\"\")\n \nclass HScrollBar (ScrollBar):\n \"\"\"HScrollBar (width, scroll) -> HScrollBar\n\n A horizontal ScrollBar widget.\n\n A ScrollBar widget with a horizontal orientation. By default, its\n height is the sum of the button height (HSCROLLBAR_BUTTON_SIZE) and\n the border drawn around it (2 * SCROLLBAR_BORDER) and has the passed\n width. The scrolling area is the passed scroll value minus the width\n of the ScrollBar.\n\n Thus, if the area to scroll is 200 pixels wide and the ScrollBar is\n about 100 pixels long, the ScrollBar its value range will go from 0\n to 100 (maximum = scroll - width). If the ScrollBar is longer than\n the area to scroll (scroll < width), then the value range will be 0.\n\n Note: The minimum size of the scrollbar is at least twice its\n size[1] parameter. This means, that it can display the both\n scrolling buttons next to each other. This will override the passed\n width value in the constructor, if necessary.\n \"\"\"\n def __init__ (self, width, scroll):\n ScrollBar.__init__ (self)\n # Minimum size for the two scrolling buttons next to each other\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\")) * 2\n height = StyleInformation.get (\"HSCROLLBAR_BUTTON_SIZE\")[1] + border\n if width < 2 * height:\n width = 2 * height\n\n self.lock ()\n self.minsize = (width, height) # Default size.\n self.maximum = scroll\n self.unlock ()\n\n def set_maximum (self, maximum):\n \"\"\"H.set_maximum (...) -> None\n\n Sets the maximum value to scroll.\n\n The passed maximum value differs from maximum value of the\n slider. The HScrollBar also subtracts its own height from the\n scrolling maximum, so that the real maximum of its value range\n can be expressed in the formula:\n\n real_maximum = maximum - self.minsize[1]\n\n That means, that if the HScrollBar is 100 pixels high and the\n passed maximum value is 200, the scrolling range of the\n HScrollBar will go from 0 to 100 (100 + size = 200).\n\n Raises a ValueError, if the passed argument is smaller than\n the first element of the ScrollBar its size.\n \"\"\"\n if maximum < self.minsize[0]:\n raise ValueError (\"maximum must be greater than or equal to %d\"\n % self.minsize[0])\n ScrollBar.set_maximum (self, maximum - self.minsize[0])\n self.dirty = True\n\n def _get_button_coords (self, area):\n \"\"\"H._get_button_coords (...) -> tuple\n\n Gets a tuple with the coordinates of the in- and decrease buttons.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n # Respect the set shadow for the ScrollBar.\n button1 = (area.left + border, area.top + border,\n area.height - 2 * border, area.height - 2 * border)\n button2 = (area.left + area.width - area.height - border,\n area.top + border, area.height - 2 * border,\n area.height - 2 * border)\n return (button1, button2)\n\n def _get_slider_size (self):\n \"\"\"H._get_slider_size () -> int\n\n Calculates the size of the slider knob.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n \n # Minimum slider size, if the scrollbar is big enough.\n minsize = 10\n fullsize = self.size[0] - 2 * self.size[1]\n if fullsize == 0:\n # If only the both scrolling buttons can be displayed, we will\n # completely skip the slider.\n return 0\n\n # Full size.\n fullsize += 2 * border\n slider_width = fullsize\n if self.maximum != 0:\n slider_width = fullsize / (float (self.maximum) + fullsize) * \\\n fullsize\n if slider_width < minsize:\n slider_width = minsize\n return int (slider_width)\n \n def _get_coords_from_value (self):\n \"\"\"H._get_coords_from_value () -> int\n\n Calculates the slider coordinates for the HScrollBar.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n\n val = 0\n if self.maximum > 0:\n slider = self._get_slider_size ()\n # Start offset for scrolling - this is the height\n # (button + 2 * border) - border plus the half of the\n # slider.\n sl_x = self.minsize[1] - border + float (slider) / 2\n\n # Valid sliding range.\n slide = self.minsize[0] - 2 * sl_x\n step = self.maximum / float (slide)\n val = self.value / step + sl_x\n return val\n return self.size[0] / 2\n \n def _get_value_from_coords (self, area, coords):\n \"\"\"H._get_value_from_coords (...) -> float\n\n Calculates the slider coordinates for the HScrollBar.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n\n val = 0\n if self.maximum > 0:\n slider = self._get_slider_size ()\n sl_x = self.minsize[1] - border + float (slider) / 2\n slide = self.minsize[0] - 2 * sl_x\n n = coords[0] - area.left - sl_x\n step = self.maximum / float (slide)\n val = n * step\n if val > self.maximum:\n val = self.maximum\n elif val < 0:\n val = 0\n return val\n \n def draw_bg (self):\n \"\"\"H.draw_bg () -> Surface\n\n Draws the HScrollBar background surface and returns it.\n\n Creates the visible surface of the HScrollBar and returns it to\n the caller.\n \"\"\"\n return base.GlobalStyle.engine.draw_scrollbar (self,\n ORIENTATION_HORIZONTAL)\n\n def draw (self):\n \"\"\"H.draw () -> None\n\n Draws the HScrollBar surface and places its Buttons and slider on it.\n \"\"\"\n ScrollBar.draw (self)\n\n cls = self.__class__\n style = base.GlobalStyle\n st = self.style or style.get_style (cls)\n rect = self.image.get_rect ()\n draw_rect = style.engine.draw_rect\n draw_border = style.engine.draw_border\n draw_arrow = style.engine.draw_arrow\n\n # Create both buttons.\n border = style.get_border_size \\\n (cls, st, StyleInformation.get (\"SCROLLBAR_BORDER\"))\n button_type = StyleInformation.get (\"SCROLLBAR_BUTTON_BORDER\")\n\n width_button = rect.height - 2 * border\n\n # We use a temporary state here, so that just the buttons will\n # have the typical sunken effect.\n tmp_state = self.state\n if self.state == STATE_ACTIVE:\n tmp_state = STATE_NORMAL\n\n # First button.\n state_button = tmp_state\n if self.button_dec:\n state_button = STATE_ACTIVE\n button1 = draw_rect (width_button, width_button, state_button, cls, st)\n draw_border (button1, state_button, cls, st, button_type)\n rect_button1 = button1.get_rect ()\n\n # Draw the arrow.\n draw_arrow (button1, ARROW_LEFT, state_button, cls, st)\n rect_button1.x = border\n rect_button1.centery = rect.centery\n self.image.blit (button1, rect_button1)\n\n # Second button\n state_button = tmp_state\n if self.button_inc:\n state_button = STATE_ACTIVE\n \n button2 = draw_rect (width_button, width_button, state_button, cls, st)\n draw_border (button2, state_button, cls, st, button_type)\n rect_button2 = button2.get_rect ()\n\n # Draw the arrow.\n draw_arrow (button2, ARROW_RIGHT, state_button, cls, st)\n rect_button2.x = rect.width - width_button - border\n rect_button2.centery = rect.centery\n self.image.blit (button2, rect_button2)\n\n # Create the slider.\n slider_size = self._get_slider_size ()\n if slider_size > 0:\n sl = style.engine.draw_slider (slider_size, width_button,\n tmp_state, cls, st)\n r = sl.get_rect ()\n r.centerx = self._get_coords_from_value ()\n r.centery = rect.centery\n self.image.blit (sl, r)\n\nclass VScrollBar (ScrollBar):\n \"\"\"VScrollBar (height, scroll) -> VScrollBar\n\n A vertical ScrollBar widget.\n\n A ScrollBar widget with a vertical orientation. By default, its\n width is the sum of the button width (VSCROLLBAR_BUTTON_SIZE) and\n the border drawn around it (2 * SCROLLBAR_BORDER) and has the passed\n height. The scrolling area is the passed scroll value minus the\n height of the ScrollBar.\n\n Thus, if the area to scroll is 200 pixels high and the ScrollBar is\n about 100 pixels high, the ScrollBar its value range will go from 0\n to 100 (maximum = scroll - height). If the ScrollBar is longer than\n the area to scroll (scroll < height), then the value range will be 0.\n\n Note: The minimum size of the scrollbar is at least twice its\n size[0] parameter. This means, that it can display the both\n scrolling buttons next to each other. This will override the passed\n width value in the constructor, if necessary.\n \"\"\"\n def __init__ (self, height, scroll):\n ScrollBar.__init__ (self)\n # Minimum size for the two scrolling buttons next to each other.\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\")) * 2\n \n width = StyleInformation.get (\"VSCROLLBAR_BUTTON_SIZE\")[0] + border\n if height < 2 * width:\n height = 2 * width\n\n self.lock ()\n self.minsize = (width, height) # Default size.\n self.maximum = scroll\n self.unlock ()\n\n def set_maximum (self, maximum):\n \"\"\"V.set_maximum (...) -> None\n\n Sets the maximum value to scroll.\n\n The passed maximum value differs from maximum value of the\n slider. The VScrollBar also subtracts its own width from the\n scrolling maximum, so that the real maximum of its value range\n can be expressed in the formula:\n\n real_maximum = maximum - self.minsize[0]\n\n That means, that if the VScrollBar is 100 pixels long and the\n passed maximum value is 200, the scrolling range of the\n VScrollBar will go from 0 to 100 (100 + size = 200).\n\n Raises a ValueError, if the passed argument is smaller than\n the second element of the ScrollBar its size.\n \"\"\"\n if maximum < self.minsize[1]:\n raise ValueError (\"maximum must be greater than or equal to %d\"\n % self.minsize[1])\n ScrollBar.set_maximum (self, maximum - self.minsize[1])\n self.dirty = True\n\n def _get_button_coords (self, area):\n \"\"\"V._get_button_coords (...) -> tuple\n\n Gets a tuple with the coordinates of the in- and decrease buttons.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n\n # Respect the set shadow for the ScrollBar.\n button1 = (area.left + border, area.top + border,\n area.width - 2 * border, area.width - 2 * border)\n button2 = (area.left + border,\n area.top + area.height - area.width - border,\n area.width - 2 * border, area.width - border)\n return (button1, button2)\n\n def _get_slider_size (self):\n \"\"\"V._get_slider_size () -> int\n\n Calculates the size of the slider knob.\n \"\"\"\n # Minimum slider size.\n minsize = 10\n if (self.size[1] - 2 * self.size[0]) == 0:\n # If only the both scrolling buttons can be displayed, we will\n # completely skip the slider.\n return 0\n \n # Full size.\n fullsize = self.size[1] - 2 * self.size[0]\n slider_height = fullsize\n if self.maximum != 0:\n slider_height = fullsize / (float (self.maximum) + fullsize) * \\\n fullsize\n if slider_height < minsize:\n slider_height = minsize\n return int (slider_height)\n \n def _get_coords_from_value (self):\n \"\"\"V._get_coords_from_value () -> int\n\n Calculates the slider coordinates for the VScrollBar.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n \n val = 0\n if self.maximum > 0:\n slider = self._get_slider_size ()\n sl_y = self.minsize[0] - border + float (slider) / 2\n slide = self.minsize[1] - 2 * sl_y\n step = self.maximum / float (slide)\n val = self.value / step + sl_y\n return val\n return self.size[1] / 2\n \n def _get_value_from_coords (self, area, coords):\n \"\"\"V._get_value_from_coords (...) -> float\n\n Calculates the slider coordinates for the VScrollBar.\n \"\"\"\n border = base.GlobalStyle.get_border_size \\\n (self.__class__, self.style,\n StyleInformation.get (\"SCROLLBAR_BORDER\"))\n\n val = 0\n if self.maximum > 0:\n slider = self._get_slider_size ()\n\n # Start offset for scrolling - this is the width\n # (button + 2 * border) - border plus the half of the\n # slider.\n sl_y = self.minsize[0] - border + float (slider) / 2\n\n # Valid sliding range.\n slide = self.minsize[1] - 2 * sl_y\n \n n = coords[1] - area.top - sl_y\n step = self.maximum / float (slide)\n val = n * step\n if val > self.maximum:\n val = self.maximum\n elif val < 0:\n val = 0\n return val\n \n def draw_bg (self):\n \"\"\"V.draw_bg (...) -> Surface\n\n Draws the VScrollBar background surface and returns it.\n\n Creates the visible surface of the VScrollBar and returns it to\n the caller.\n \"\"\"\n return base.GlobalStyle.engine.draw_scrollbar (self,\n ORIENTATION_VERTICAL)\n\n def draw (self):\n \"\"\"V.draw () -> None\n\n Draws the VScrollBar surface and places its Buttons and slider on it.\n \"\"\"\n ScrollBar.draw (self)\n cls = self.__class__\n style = base.GlobalStyle\n st = self.style or style.get_style (cls)\n rect = self.image.get_rect ()\n draw_rect = style.engine.draw_rect\n draw_border = style.engine.draw_border\n draw_arrow = style.engine.draw_arrow\n \n # Create both buttons.\n border = style.get_border_size \\\n (cls, st, StyleInformation.get (\"SCROLLBAR_BORDER\"))\n button_type = StyleInformation.get (\"SCROLLBAR_BUTTON_BORDER\")\n \n width_button = rect.width - 2 * border\n\n # We use a temporary state here, so that just the buttons will\n # have the typical sunken effect.\n tmp_state = self.state\n if self.state == STATE_ACTIVE:\n tmp_state = STATE_NORMAL\n\n # First button.\n state_button = tmp_state\n if self.button_dec:\n state_button = STATE_ACTIVE\n button1 = draw_rect (width_button, width_button, state_button, cls, st)\n draw_border (button1, state_button, cls, st, button_type)\n rect_button1 = button1.get_rect ()\n\n # Draw the arrow.\n draw_arrow (button1, ARROW_UP, state_button, cls, st)\n rect_button1.y = border\n rect_button1.centerx = rect.centerx\n self.image.blit (button1, rect_button1)\n\n # Second button\n state_button = tmp_state\n if self.button_inc:\n state_button = STATE_ACTIVE\n \n button2 = draw_rect (width_button, width_button, state_button, cls, st)\n draw_border (button2, state_button, cls, st, button_type)\n rect_button2 = button2.get_rect ()\n\n # Draw the arrow.\n draw_arrow (button2, ARROW_DOWN, state_button, cls, st)\n rect_button2.y = rect.height - width_button - border\n rect_button2.centerx = rect.centerx\n self.image.blit (button2, rect_button2)\n\n # Create the slider.\n slider_size = self._get_slider_size ()\n if slider_size > 0:\n sl = style.engine.draw_slider (width_button, slider_size,\n tmp_state, cls, st)\n r = sl.get_rect ()\n r.centerx = rect.centerx\n r.centery = self._get_coords_from_value ()\n self.image.blit (sl, r)\n","sub_path":"ocempgui/widgets/ScrollBar.py","file_name":"ScrollBar.py","file_ext":"py","file_size_in_byte":28896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"85194947","text":"class SinglyLinkedList:\r\n\tdef __init__(self):\r\n\t\tself.length = 0\r\n\r\n\tdef __str__(self):\r\n\t\toutput = \"[\"\r\n\t\tif(self.length == 0):\r\n\t\t\toutput += \"]\"\r\n\t\telif(self.length == 1):\r\n\t\t\toutput += \"{0}]\".format(self.head.data)\r\n\t\telse:\r\n\t\t\tnode = self.head\r\n\t\t\tfor i in range(self.length):\r\n\t\t\t\tif(i == self.length - 1):\r\n\t\t\t\t\toutput += \"{0}\".format(node.data)\r\n\t\t\t\telse:\r\n\t\t\t\t\toutput += \"{0}, \".format(node.data)\r\n\t\t\t\t\tnode = node.nextNode\r\n\t\t\toutput += \"]\"\r\n\r\n\t\treturn output\r\n\r\n\r\n\tdef access(self, index):\r\n\t\tif(index >= self.length):\r\n\t\t\traise self.InvalidIndexError(\"No element at index {0} in list of length {1}\".format(index, self.length))\r\n\t\telif(index < 0):\r\n\t\t\traise self.InvalidIndexError(\"Negative index of {0}\".format(index))\r\n\t\telse:\r\n\t\t\tnode = self.head\r\n\t\t\tfor i in range(index):\r\n\t\t\t\tnode = node.nextNode\r\n\t\t\treturn node.data\r\n\r\n\tdef insert(self, data):\r\n\t\tif(self.length == 0):\r\n\t\t\tself.head = self.Node(data)\r\n\t\t\tself.tail = self.head\r\n\t\telif(self.length == 1):\r\n\t\t\tself.head.nextNode = self.Node(data)\r\n\t\t\tself.tail = self.head.nextNode\r\n\t\telse:\r\n\t\t\tself.tail.nextNode = self.Node(data)\r\n\t\t\tself.tail = self.tail.nextNode\r\n\t\tself.length += 1\r\n\r\n\tdef search(self, data):\r\n\t\tif(self.length == 0):\r\n\t\t\treturn -1\r\n\t\telse:\r\n\t\t\tnode = self.head\r\n\t\t\ti = 0\r\n\t\t\twhile True:\r\n\t\t\t\tif(node.data == data):\r\n\t\t\t\t\treturn i\r\n\t\t\t\tif(node.nextNode == None):\r\n\t\t\t\t\treturn -1\r\n\t\t\t\tnode = node.nextNode\r\n\t\t\t\ti += 1\r\n\r\n\tdef delete(self, index=None):\r\n\t\tif(index == None):\r\n\t\t\tindex = self.length\r\n\r\n\t\tif(index >= self.length):\r\n\t\t\traise self.InvalidIndexError(\"No element at index {0} in list of length {1}\".format(index, self.length))\r\n\t\telif(index < 0):\r\n\t\t\traise self.InvalidIndexError(\"Negative index of {0}\".format(index))\r\n\t\telif(index == 0):\r\n\t\t\tself.head = self.head.nextNode\r\n\t\telse:\r\n\t\t\tprev = self.head\r\n\t\t\tfor i in range(index - 1):\r\n\t\t\t\tprev = prev.nextNode\r\n\t\t\tprev.nextNode = prev.nextNode.nextNode\r\n\t\tself.length -= 1\r\n\r\n\r\n\tclass InvalidIndexError(Exception):\r\n\t\tdef __init__(self, msg):\r\n\t\t\tself.msg = msg\r\n\t\tdef __str__(self):\r\n\t\t\treturn self.msg\r\n\r\n\tclass Node():\r\n\t\tdef __init__(self, data):\r\n\t\t\tself.data = data\r\n\t\t\tself.nextNode = None\r\n\r\nif __name__ == \"__main__\":\r\n\tmyList = SinglyLinkedList()\r\n\tmyList.insert(1)\r\n\tmyList.insert(2)\r\n\tmyList.insert(3)\r\n\tmyList.insert(4)\r\n\tmyList.delete(4)\r\n\tprint(myList)","sub_path":"data_structures/singlylinkedlist.py","file_name":"singlylinkedlist.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"424960923","text":"import json\nfrom flask import make_response, jsonify\nfrom flask_api import status\nfrom util import jsonDefault\n\n# from dcsmp.util.util import DateTimeEncoder, model_to_map,filter_result\n\n# def get_json(obj, fields = None):\n# if obj is None:\n# # Response does not contain any data\n# response = make_response()\n# response.status_code = status.HTTP_204_NO_CONTENT\n# else:\n # if type(obj) is list:\n # result = []\n # for item in obj:\n # if isinstance(item, Model):\n # i = model_to_map(item)\n # else:\n # i = item\n #\n # if type(i) is dict:\n # i = filter_result(i, fields)\n # result.append(i)\n # else:\n # if isinstance(obj, Model):\n # result = model_to_map(obj)\n # else:\n # result = obj\n #\n # if type(result) is dict:\n # result = filter_result(result, fields)\n # result = {\n # \"status\": status.HTTP_200_OK,\n # \"errorMessage\": \"\",\n # \"result\": result\n # }\n # response = make_response(json.dumps(result, default=jsonDefault))\n #\n # response.mimetype = 'application/json'\n # return response\n\n\ndef created(body=None):\n result = {\n \"status\": status.HTTP_201_CREATED,\n \"errorMessage\": \"\",\n }\n if body:\n result['result'] = body\n response = make_response(json.dumps(result, default=jsonDefault))\n\n response.status_code = status.HTTP_201_CREATED\n response.mimetype = 'application/json'\n return response\n\n\ndef bad_request(message):\n response = jsonify({\n \"status\": status.HTTP_400_BAD_REQUEST,\n \"errorMessage\": message\n })\n response.status_code = status.HTTP_400_BAD_REQUEST\n return response\n\n\ndef not_found(message=None):\n response = make_response()\n response.status_code = status.HTTP_404_NOT_FOUND\n return response\n\n\ndef duplicate(message):\n response = jsonify({\n \"status\": status.HTTP_409_CONFLICT,\n \"errorMessage\": message\n })\n response.status_code = status.HTTP_409_CONFLICT\n return response\n\n\ndef unsupported_media_type(message):\n response = jsonify({\n \"status\": status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n \"errorMessage\": message\n })\n response.status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE\n return response\n\n\ndef ok(body=None):\n result = {\n \"status\": status.HTTP_200_OK,\n \"errorMessage\": \"\",\n }\n if body:\n result['result'] = body\n response = make_response(json.dumps(result, default=jsonDefault))\n\n response.status_code = status.HTTP_200_OK\n response.mimetype = 'application/json'\n return response\n","sub_path":"cckm/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"620525593","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('w', '0008_auto_20151018_2204'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='archive',\n name='value',\n field=models.FloatField(default=0, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='rule',\n name='input_attribute',\n field=models.CharField(default=0, max_length=6, choices=[(b'VALUE', b'VALUE'), (b'STATUS', b'STATUS'), (b'NT', b'NT'), (b'IV', b'IV'), (b'W', b'WARNING'), (b'A', b'ALARM')]),\n ),\n migrations.AlterField(\n model_name='rule',\n name='output_attribute',\n field=models.CharField(default=0, max_length=6, choices=[(b'VALUE', b'VALUE'), (b'STATUS', b'STATUS'), (b'NT', b'NT'), (b'IV', b'IV'), (b'W', b'WARNING'), (b'A', b'ALARM')]),\n ),\n ]\n","sub_path":"growmat/w/migrations/0009_auto_20151018_2241.py","file_name":"0009_auto_20151018_2241.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"377063694","text":"# Python imports\nfrom setuptools import setup\n\n#Error in atexit._run_exitfuncs, TypeError: 'NoneType' object is not callable\nfrom multiprocessing import util\n\n# Project imports\nfrom notable import app\n\n# Attributes\nAUTHOR = 'John McFarlane'\nDESCRIPTION = 'A very simple note taking application'\nEMAIL = 'john.mcfarlane@rockfloat.com'\nNAME = 'Notable'\nPYPI = 'http://pypi.python.org/packages/source/N/Notable'\nURL = 'https://github.com/jmcfarlane/Notable'\nCLASSIFIERS = \"\"\"\nDevelopment Status :: 2 - Pre-Alpha\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nOperating System :: OS Independent\nProgramming Language :: Python\nTopic :: Internet :: WWW/HTTP\nIntended Audience :: End Users/Desktop\nTopic :: Office/Business :: News/Diary\nTopic :: Security :: Cryptography\nTopic :: Utilities\n\"\"\"\n\nsetup(\n author = AUTHOR,\n author_email = EMAIL,\n classifiers = [c for c in CLASSIFIERS.split('\\n') if c],\n description = DESCRIPTION,\n download_url = '%s/Notable-%s.tar.gz' % (PYPI, app.version),\n include_package_data = True,\n name = NAME,\n packages = ['notable'],\n scripts = ['scripts/notable'],\n test_suite='nose.collector',\n url = URL,\n version = app.version\n)\n","sub_path":"pypi_install_script/Notable-0.4.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153185783","text":"# coding: utf-8\r\n\r\n#this file is to scrape news title, article link and image link from some major news websites initially\r\n#next, insert scraped content into database and only keep the latest information\r\n#after that, use home made graph structure traversal algorithm to extract key info and remove similar contents\r\n#finally, send html emails including titles, links and images\r\n#for details of scraping, database and outlook manipulation, plz take the following link as a reference\r\n# https://github.com/tattooday/web-scraping/blob/master/Feeds%20from%20Database.py\r\n\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup as bs \r\nimport marketanalysis as ma\r\nimport datetime as dt\r\nimport win32com.client as win32 \r\nimport sqlite3\r\nimport os\r\nimport re\r\nimport copy\r\nimport time\r\nos.chdir('d:/')\r\n\r\n#this is a home made special package for text mining\r\n#it is designed to extract key information and remove similar contents\r\n#for details of this graph traversal algorithm plz refer to the following link\r\n# https://github.com/tattooday/graph-theory/blob/master/Text%20Mining%20project/alternative%20bfs.py\r\nimport graph\r\n\r\n#main stuff\r\ndef main():\r\n \r\n ec=scrape('https://www.economist.com/middle-east-and-africa/',economist)\r\n aj=scrape('https://www.aljazeera.com/topics/regions/middleeast.html',aljazeera)\r\n tr=scrape('https://www.reuters.com/news/archive/middle-east',reuters) \r\n bc=scrape('https://www.bbc.co.uk/news/world/middle_east',bbc)\r\n ws=scrape('https://www.wsj.com/news/types/middle-east-news',wsj)\r\n ft=scrape('https://www.ft.com/world/mideast',financialtimes)\r\n bb=scrape('https://www.bloomberg.com/view/topics/middle-east',bloomberg)\r\n cn=scrape('https://edition.cnn.com/middle-east',cnn)\r\n fo=scrape('https://fortune.com/tag/middle-east/',fortune)\r\n \r\n #concat scraped data via append, can use pd.concat as an alternative\r\n #unlike the previous version, current version does not sort information by source\r\n df=ft\r\n for i in [aj,tr,bc,ws,cn,fo,ec,bb]:\r\n df=df.append(i)\r\n \r\n #CRUCIAL!!!\r\n #as we append dataframe together, we need to reset the index\r\n #otherwise, we would not be able to use reindex in database function call\r\n df.reset_index(inplace=True,drop=True)\r\n \r\n #first round, insert into database and remove outdated information\r\n df=database(df)\r\n \r\n #second round, use home made package to remove similar contents\r\n output=graph.remove_similar(df,graph.stopword)\r\n \r\n print(output)\r\n \r\n html='
Mid East

'\r\n \r\n #there are a few ways for embed image in html email\r\n #here, we use the link of the image\r\n #it may be a lil bit slow to load the image but its the most efficient way\r\n #alternatively, we can use mail.Attachments.add()\r\n #we attach all images, and set \r\n #the issue with this method is that we have to scrape the website repeatedly to get images\r\n #or we can use < img src='data:image/jpg; base64, [remove the brackets and paste base64]'/>\r\n #but this is blocked by most email clients including outlook 2016\r\n for i in range(len(output)):\r\n \r\n html+=\"\"\"
%s
\r\n \r\n

\"\"\"%(output['link'][i],output['title'][i],output['image'][i])\r\n html+='
'\r\n \r\n send(html)\r\n\r\n\r\n \r\n#send html email via outlook\r\ndef send(html):\r\n \r\n outlook = win32.Dispatch('outlook.application') \r\n mail = outlook.CreateItem(0) \r\n receivers = ['naomi.woods@brazzers.com'] \r\n mail.To = ';'.join(receivers) \r\n mail.Subject ='Mid East Newsfeed %s'%(dt.datetime.now())\r\n mail.BodyFormat=2\r\n mail.HTMLBody=html\r\n \r\n condition=str(input('0/1 for no/yes:'))\r\n if condition=='1':\r\n mail.Send()\r\n print('\\nSENT')\r\n \r\n return\r\n\r\n\r\n#the economist etl\r\ndef economist(page):\r\n \r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n prefix='https://www.economist.com'\r\n \r\n a=page.find_all('div',class_=\"topic-item-container\")\r\n \r\n for i in a:\r\n \r\n link.append(prefix+i.find('a').get('href'))\r\n title.append(i.find('a').text)\r\n image.append(i.parent.find('img').get('src'))\r\n\r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n\r\n#fortune etl\r\ndef fortune(page):\r\n \r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n prefix='https://fortune.com'\r\n \r\n a=page.find_all('article')\r\n \r\n for i in a:\r\n \r\n link.append(prefix+i.find('a').get('href'))\r\n \r\n if 'http' in i.find('img').get('src'):\r\n image.append(i.find('img').get('src'))\r\n else:\r\n image.append('')\r\n \r\n temp=re.split('\\s*',i.find_all('a')[1].text)\r\n temp.pop()\r\n temp.pop(0)\r\n title.append(' '.join(temp))\r\n\r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n\r\n#cnn etl\r\ndef cnn(page):\r\n \r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n \r\n prefix='https://edition.cnn.com'\r\n \r\n a=page.find_all('div', class_='cd__wrapper')\r\n \r\n for i in a:\r\n title.append(i.find('span').text)\r\n link.append(prefix+i.find('a').get('href'))\r\n try:\r\n image.append('https:'+i.find('img').get('data-src-medium'))\r\n except:\r\n image.append('')\r\n \r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n#bloomberg etl\r\ndef bloomberg(page):\r\n c=[]\r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n prefix='https://www.bloomberg.com'\r\n \r\n a=page.find_all('h1')\r\n for i in a:\r\n try:\r\n link.append(prefix+i.find('a').get('href'))\r\n title.append(i.find('a').text.replace('’','\\''))\r\n except:\r\n pass\r\n \r\n\r\n b=page.find_all('li')\r\n for j in b:\r\n try:\r\n temp=j.find('article').get('style')\r\n \r\n image.append( \\\r\n re.search('(?<=url\\()\\S*(?=\\))', \\\r\n temp).group() \\\r\n )\r\n except:\r\n temp=j.find('article')\r\n \r\n try:\r\n temp2=temp.get('id')\r\n if not temp2:\r\n image.append('')\r\n except:\r\n pass\r\n\r\n\r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n\r\n#financial times etl\r\ndef financialtimes(page):\r\n \r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n prefix='https://www.ft.com'\r\n\r\n a=page.find_all('a',class_='js-teaser-heading-link')\r\n for i in a:\r\n link.append(prefix+i.get('href'))\r\n temp=i.text.replace('’','\\'').replace('‘','\\'')\r\n title.append(temp.replace('\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t\\t',''))\r\n\r\n for j in a:\r\n temp=j.parent.parent.parent\r\n try:\r\n text=re.search('(?<=\")\\S*(?=next)',str(temp)).group()\r\n image.append(text+'next&fit=scale-down&compression=best&width=210')\r\n except:\r\n image.append('')\r\n \r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n\r\n#wall street journal etl\r\ndef wsj(page):\r\n \r\n df=pd.DataFrame()\r\n \r\n text=str(page)\r\n\r\n link=re.findall('(?<=headline\">
)',text)\r\n\r\n image=re.findall('(?<=img data-src=\")\\S*(?=\")',text)\r\n\r\n title=[]\r\n for i in link:\r\n try:\r\n temp=re.search('(?<={}\")>(.*?)<'.format(i),text).group()\r\n title.append(temp)\r\n except:\r\n pass\r\n\r\n for i in range(len(title)):\r\n title[i]=title[i].replace('’',\"'\").replace('<','').replace('>','')\r\n \r\n df['title']=title\r\n df['link']=link[:len(title)]\r\n df['image']=image+[''] if (len(image)!=len(title)) else image\r\n \r\n return df\r\n\r\n\r\n#bbc etl\r\ndef bbc(page):\r\n \r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n \r\n prefix='https://www.bbc.co.uk'\r\n \r\n a=page.find_all('span',class_='title-link__title-text')\r\n \r\n for i in a:\r\n temp=i.parent.parent.parent.parent\r\n b=(re.findall('(?<=src=\")\\S*(?=jpg)',str(temp)))\r\n \r\n if len(b)>0:\r\n b=copy.deepcopy(b[0])+'jpg'\r\n else:\r\n b=''\r\n \r\n image.append(b)\r\n \r\n for j in a:\r\n title.append(j.text)\r\n \r\n for k in a:\r\n temp=k.parent.parent\r\n c=re.findall('(?<=href=\")\\S*(?=\">)',str(temp))\r\n link.append(prefix+c[0])\r\n \r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n\r\n#thompson reuters etl\r\ndef reuters(page):\r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n \r\n prefix='https://www.reuters.com'\r\n \r\n for i in page.find('div', class_='news-headline-list').find_all('h3'):\r\n temp=i.text.replace('\t\t\t\t\t\t\t\t','')\r\n title.append(temp.replace('\\n',''))\r\n \r\n for j in page.find('div', class_='news-headline-list').find_all('a'):\r\n link.append(prefix+j.get('href'))\r\n link=link[0::2]\r\n \r\n for k in page.find('div', class_='news-headline-list').find_all('img'):\r\n if k.get('org-src'):\r\n image.append(k.get('org-src'))\r\n else:\r\n image.append('')\r\n\r\n \r\n df['title']=title\r\n df['link']=link\r\n df['image']=image\r\n \r\n return df\r\n\r\n\r\n\r\n#al jazeera etl\r\ndef aljazeera(page):\r\n title,link,image=[],[],[]\r\n df=pd.DataFrame()\r\n \r\n prefix='https://www.aljazeera.com'\r\n \r\n a=page.find_all('div',class_='frame-container')\r\n for i in a:\r\n title.append(i.find('img').get('title'))\r\n image.append(prefix+i.find('img').get('src'))\r\n temp=i.find('a').get('href')\r\n link.append(temp if 'www' in temp else (prefix+temp))\r\n \r\n b=page.find_all('div',class_='col-sm-7 topics-sec-item-cont')\r\n c=page.find_all('div',class_='col-sm-5 topics-sec-item-img')\r\n \r\n limit=max(len(b),len(c))\r\n j,k=0,0\r\n while j to kafka\n'''\n\ndef send_value(ip, value):\n requests.post(ip, data=value)\n\n\n\ndef read_temperature_simulator():\n\n while (True):\n try:\n payload = {'value': 'a'*int(random.expovariate(1/mean_length))}\n send_value(server_ips[random.randint(0, len(server_ips)-1)], json.dumps(payload))\n except Exception as e:\n print(e)\n print(\"Error publishing temperature simulator values\")\n sleep(random.expovariate(1/mean_sleep))\n\n\n'''\n\tMain function\n\tProgram is initialized with the thread launching, waiting for the user to exit the program.\n'''\n\ndef main():\n print('Simulators running. Press CTRL-C to interrupt...')\n\n thread_temperature = Thread(target=read_temperature_simulator)\n thread_temperature.start()\n\n while (thread_temperature.isAlive()):\n try:\n sleep(1)\n except KeyboardInterrupt:\n e.set()\n\n print('Exiting...')\n\n thread_temperature.join()\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/PC1/n3/sensor_n3.py","file_name":"sensor_n3.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"76905440","text":"import requests\nimport os\nimport json\n\n\ndef get_birthrate_request():\n api_key = '7699e69a226cc8b98ffee27212220c7f'\n url = 'https://apidata.mos.ru/v1/datasets/2008/rows?projection=null&api_key='\n api_url = url + api_key\n\n return requests.get(api_url)\n\n\ndef get_birthrate_json():\n return get_birthrate_request().json()\n\n\ndef read_data_json_from_cache():\n directory_folder = '../Cache/'\n os.chdir(directory_folder)\n with open('birthrate_cache.json', 'r') as file:\n data = json.load(file)\n\n return data\n\n\ndef get_birthrate_statistics():\n birthrate_info = read_data_json_from_cache()\n birthrate_statistics = 'Статистика рождаемости за последние 3 года:\\n'\n\n # Get birthrate info for the last 3 years\n start_range = 6\n for i in range(start_range, len(birthrate_info)):\n year = birthrate_info[i]['Cells']['Year']\n total_number_born = birthrate_info[i]['Cells']['TotalNumber']\n birthrate_statistics += 'Количество родившихся в {} году - {}.\\n'.format(year, total_number_born)\n\n return birthrate_statistics\n","sub_path":"API/birthrate.py","file_name":"birthrate.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214782400","text":"# 라인차트와 산점도 그리기 - 차트 선 색상, 선 스타일, 범례 세부 설정, 이미지 저장 \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# 다양한 색상과 선 스타일로 sin, cos 곡선을 그려주는 함수\r\ndef chartLineStyle():\r\n\r\n # linspace() 함수는 인수로 지정한 구간을 균등하게 나눠주는 함수\r\n # 아래는 0과 10 사이를 1000개의 구간으로 균등하게 나눈 1차원 배열을 반환 \r\n x = np.linspace(0, 10, 1000) \r\n \r\n # linestyle : solid(-), dashed(--), dashdot(-.), dotted(:)\r\n plt.plot(x, np.sin(x - 0), color=\"blue\", label=\"solid\", linestyle=\"solid\", linewidth=3)\r\n plt.plot(x, np.sin(x - 1), color=\"g\", label=\"dashed\", linestyle=\"dashed\", lw=3)\r\n \r\n # 0과 1사이의 회색조\r\n plt.plot(x, np.sin(x - 2), color=\"0.75\", label=\"gray solid\", linestyle=\"solid\")\r\n \r\n # RGB 색상 지정\r\n plt.plot(x, np.sin(x - 3), color=\"#FFDD44\", label=\"dotted\", linestyle=\"dotted\")\r\n plt.plot(x, np.sin(x - 4), color=(1.0, 0.2, 0.3), label=\"point dashed\", linestyle=\"-.\")\r\n \r\n # -g(g-) : 녹색 실선, :b(b:) : 파란 점선, g-- : 녹색 대쉬선, b-. : 파란 1점 쇄선 \r\n plt.plot(x, np.sin(x - 5), \"-g\", label=\"solid green\")\r\n \r\n # axis() 함수를 이용해 x와 y축의 범위나 비율에 대한 값을 지정한다.\r\n # equal : x축과 y축 비율을 같은 비율로 표시\r\n # scaled : x축과 y축 비율을 같은 비율로 표시(여백 축소)\r\n # on/off : 축과 눈금 등을 보이거나 숨김\r\n # [0, 10, -3, 3] : x축과 y축의 범위를 지정\r\n # plt.axis(\"equal\") \r\n plt.axis([0, 10, -1.5, 1.5])\r\n # plt.axis(\"off\")\r\n \r\n # 범례의 위치, 모서리, 그림자, 컬럼 수, x, y, width, height를 지정 \r\n # x, y, width, height는 loc 지정에 따라서 화면에 표시되는 것이 다를 수 있다.\r\n plt.legend(loc=\"upper center\", fancybox=True, shadow=True, \r\n ncol=3, bbox_to_anchor=(0.01, 1.30, 0.98, -0.2))\r\n \r\n plt.show()\r\n \r\n # 다음과 같이 차트를 화면에 띄우지 않고 이미지로 저장할 수 있다.\r\n # 위의 show() 메서드 호출 부분을 주석으로 처리하고 아래를 실행해야 함.\r\n #plt.savefig(\"chart_legend.png\")\r\n\r\n\r\n# 입력받은 데이터를 이용해 산점도를 출력하는 함수\r\ndef scatterChart(x_data, y_data): \r\n\r\n # 산점도 출력, c=\"b\" 파랑색, edgecolors=\"r\" 빨강 테두리\r\n plt.scatter(x_data, y_data, c=\"b\", edgecolors=\"r\")\r\n \r\n # 다음과 같이 plot() 함수를 이용해 산점도를 출력할 수 있다.\r\n #plt.plot(x_data, y_data, \"bo\", markeredgecolor=\"r\")\r\n \r\n plt.show()\r\n\r\n\r\n# 계단 형 라인차트를 출력하는 함수\r\ndef multiLineChart(): \r\n\r\n # 표준편차가 1이고 평균값이 0인 정규분포에서 50개의 난수 생성\r\n data1 = np.random.randn(50)\r\n data2 = np.random.randn(50)\r\n \r\n # 선 스타일을 계단형으로 지정\r\n # drawstyle : default | steps | steps-pre | steps-mid | steps-post\r\n plt.plot(data1, color=\"r\", label=\"step\", drawstyle=\"steps-post\")\r\n plt.plot(data2, color=\"g\", label=\"line\")\r\n \r\n plt.title(\"Multi Line Chart\")\r\n plt.xlabel(\"satge\")\r\n plt.ylabel(\"random num\")\r\n plt.legend(loc=\"best\")\r\n plt.show()\r\n\r\n\r\n# 다양한 색상과 크기로 산점도를 출력하는 함수\r\ndef multiSizeScatter():\r\n\r\n # 다양한 색상과 투명도를 가지는 산점도를 그리기 위해서 메르센느 트위스터(Mersenne Twister)\r\n # 알고리즘을 이용해 난수를 발생해 주는 프로그램용 컨테이너 생성 \r\n rng = np.random.RandomState(0)\r\n \r\n # 앞에서 생성한 컨테이너를 이용해 표준편차 1, 평균이 0인 정규분포에서 100개의 난수 생성\r\n x = rng.randn(100)\r\n y = rng.randn(100)\r\n \r\n # 균등분포에서 0 ~ 1 사이의 실수로된 100개의 표본을 추출\r\n colors = rng.rand(100)\r\n sizes = 1000 * rng.rand(100)\r\n \r\n # 색상, 크기, 투명도를 지정해 산점도를 출력\r\n plt.scatter(x, y, c=colors, s=sizes, alpha=0.3)\r\n \r\n # 색상 척도 차트에 출력\r\n plt.colorbar()\r\n plt.show()\r\n\r\n# 산점도 데이터 준비\r\npoint_list = []\r\nfor i in range(100):\r\n \r\n # 정규분포에서 난수를 추출해 x, y 좌표를 만들어 리스트에 추가\r\n x = np.random.normal(0, 1)\r\n y = x * 0.1 + 0.2 + np.random.normal(0, 1)\r\n point_list.append([x, y]) \r\n\r\nx_data = [x[0] for x in point_list]\r\ny_data = [y[1] for y in point_list]\r\nprint(x_data)\r\nprint(y_data)\r\n\r\n\r\n# 현재 모듈이 최상위에서 실행되면 \r\nif __name__ == \"__main__\": \r\n chartLineStyle() \r\n #scatterChart(x_data, y_data)\r\n #multiLineChart()\r\n #multiSizeScatter()\r\n \r\n\r\n ","sub_path":"add/matplotlib03.py","file_name":"matplotlib03.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"225461534","text":"import requests\nimport http.client\nfrom http import HTTPStatus\nimport xml.etree.ElementTree as ET\n\nfrom web_client import group_client\n\nATTENDANCE_WEB_PATH = \"/rest/attendances/\"\n\n\ndef _get_attendance_response_text(response):\n if response.ok:\n return response.text\n if response.status_code in http.client.responses:\n return http.client.responses[response.status_code]\n return str(response.status_code)\n\n\ndef _forge_attendance_record(attendance_id, student_id, settings, is_present):\n try:\n group_id = group_client.find_group(student_id, settings.tutor_name)\n except requests.exceptions.RequestException as e:\n print(e)\n\n if group_id is None:\n return None\n attendance = ET.Element('attendance')\n ET.SubElement(attendance, 'attendance_id').text = str(attendance_id)\n ET.SubElement(attendance, 'student_id').text = str(student_id)\n ET.SubElement(attendance, 'group_id').text = str(group_id)\n ET.SubElement(attendance, 'week_id').text = str(settings.week_id)\n if is_present:\n ET.SubElement(attendance, 'presented').text = \"true\"\n else:\n ET.SubElement(attendance, 'presented').text = \"false\"\n return ET.tostring(attendance).decode()\n\n\ndef register_attendance(attendance_id, student_id, settings, is_present):\n body_message = _forge_attendance_record(attendance_id, student_id, settings, is_present)\n if body_message is None:\n return http.client.responses[HTTPStatus.NOT_FOUND.value]\n try:\n response = requests.post(settings.host + ATTENDANCE_WEB_PATH, data=body_message)\n except requests.exceptions.RequestException as e:\n print(e)\n return e.strerror\n return _get_attendance_response_text(response)\n","sub_path":"Python_software_engineering/Raspberry Pi/raspberry_pi/web_client/attendance_client.py","file_name":"attendance_client.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"193439867","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n# Language : Python3\n# Database : MySQL 5.7\n# Editor : vim 7.4\n# OS : Ubuntu 16.04 LTS\n\"\"\"\n\nimport re\nimport time\nimport logging\nimport mysql.connector\nimport hashlib\n\nfrom html.parser import HTMLParser\nfrom multiprocessing import Queue\n\nPATH_TXT = './python_docs.txt' # 保存网页的内容,用于后期单词数据的处理\nPATH_WORDS = './python_words.txt' # 保存处理后的单词\nURL_INDEX = 'https://docs.python.org/3/' # 起点,入口\nURL_Q = Queue() # 任务队列\nCOUNT = 0 # 计数,已打开网页的数量\n# 数据库参数\nDB_NAME = 'test'\nDB_USER = 'root'\nDB_PASSWORD = 'mysql'\n\n\ndef get_html_rawdata(url):\n \"\"\"\n # 1st:打开url,获取网页内初始数据\n # ================================\n # | succeed | failed\n # ---------------------------------\n # return | rawdata | False\n # =================================\n # 扩展:\n # 1. 添加浏览器头文件,模仿浏览器登陆\n # 2. cookie\n # 3. 添加IP代理\n \"\"\"\n from urllib.request import urlopen\n\n try:\n with urlopen(url, timeout=10) as html:\n rawdata = html.read().decode('utf-8')\n return rawdata\n except:\n return False\n\n\nclass MyHTMLParser(HTMLParser):\n\n def __init__(self):\n # 建立一个列表用于存储数据\n HTMLParser.__init__(self)\n self.parsed_data = ''\n\n def handle_data(self, data):\n if data and re.match(r'\\w+', data): # 排除空行\n self.parsed_data = self.parsed_data + data + ' '\n\n\ndef get_context_and_urls(data):\n \"\"\"2nd: 处理网页数据,提取单词和url\"\"\"\n\n parser = MyHTMLParser()\n parser.feed(data)\n context = parser.parsed_data\n\n # 利用正则获取网页内的链接, 有两类,只取相对路径\n # r'[\\w][\\w/.-]*\\.html|https://[\\w/\\.]*'\n urls = re.findall(r'[\\w/.-]+\\.html', data)\n return context, urls\n\n\ndef save_context(url_current, context):\n \"\"\"\n # 3rd-1: 将网页内文本内容保存至本地文档中,最后再统一处理\n # ----------------------------------------------\n # 扩展: 将context也使用数据库保存\n \"\"\"\n global PATH_TXT\n\n context = ''.join([url_current, ':\\n', context, '\\n'])\n with open(PATH_TXT, 'a') as txt:\n txt.write(context)\n return True\n return False\n\n\ndef handle_urls(url_current, urls):\n \"\"\"3rd-2: 将url加入任务队列,并存入table(urls_all)\"\"\"\n\n from urllib.parse import urljoin\n\n global URL_INDEX, URL_Q\n INDEX_RE = r'%s$|%s[^/]+.html' % (URL_INDEX, URL_INDEX)\n\n # 将url加入Queue(URL_Q)、table(urls_all)\n tb_name = 'urls_all'\n cmd = 'create'\n operate_mysql(url_current, tb_name, cmd)\n try:\n for elem in urls:\n url_full = urljoin(url_current, elem)\n cmd = 'insert'\n # 利用MySQL去重:如果url_ful不存在,插入数据表并返回True\n if operate_mysql(url_full, tb_name, cmd):\n URL_Q.put(url_full)\n return True\n except Exception as e:\n logging.exception(e)\n return False\n\n\ndef get_and_save_words(path_in, path_out):\n \"\"\"\n # 4th: 统计单词频率,降序排列\n # ------------------------------\n # 扩展:对单词的筛选进行优化\n \"\"\"\n\n from collections import Counter\n import operator\n\n global URL_INDEX\n BREAK_WORDS_RE = r'[\\n\\s\\'\\\\\\\"\\‘\\’\\“\\”:()`~!$%^\\*@?\\-\\—–&,\\./\\+=;><\\[\\]\\|#{}]+'\n HTTPS_RE = r'%s' % URL_INDEX\n SIMPLE_WORDS_RE = r'\\w*\\d+\\w*|^\\w$|^(abc|aa|xx)'\n SIMPLE_WORDS = ['', 'a', 'an', 'at', 'as', 'are', 'all', 'be', 'by', 'c', 'do', 'does', 'e',\n 'for', 'g', 'h1', 'hello', 'how', 'if', 'is', 'in', 'it', 'me', 'not', 'or',\n 'of', 'on', 'to', 's', 'so', 'str', 'then', 'that', 'the', 'them', 'until',\n 'were', 'with', 'w3c'\n ]\n\n # 将数据拆分成单词,并存入Counter(words_counter)\n try:\n words_counter = Counter()\n with open(path_in, 'r') as txt:\n content_lines = txt.readlines()\n for line in content_lines:\n if re.match(HTTPS_RE, line): # 去除文件中为注释添加的 url\n continue\n\n break_words = re.split(BREAK_WORDS_RE, line) # 拆分成单词\n for word in break_words:\n if re.match(SIMPLE_WORDS_RE, word) or word.lower() in SIMPLE_WORDS: # 去除数字和简单词\n continue\n else:\n words_counter[word.lower()] += 1\n\n # convert counter to str,并根据频率降序排列\n words = ''\n for key, value in sorted(words_counter.items(), key=operator.itemgetter(1), reverse=True):\n words += ''.join([key, ' : ', str(value), '\\n'])\n\n # 将整理好的单词写入本地文件(path_out)中\n with open(path_out, 'w') as txt_words:\n txt_words.write(words)\n return True\n except Exception as e:\n logging.exception(e)\n return False\n\n\ndef operate_mysql(url, tb_name, cmd):\n \"\"\"\n # | create table | insert url | select diff url\n # ------------------------------------------------------------------\n # return True: | 创建过程正常 | url不存在 | 成功时 return diff_urls\n # -------------------------------------------------------------------\n # return False: | 创建失败 | url已存在数据库中 | 数据不存在\n #\n # ===================================================================\n # 扩展:\n # 1. 设置表中id自动递增: Failed, 不能设置两个主键,再尝试下其他方案\n # 2. 复习MySQL课程,语法还可以优化\n # 3. 数据库建立连接可以提取出来,使只建立一次连接\n # 4. hash去重: 将url_hash设为主键,关注有没有更好的方案\n \"\"\"\n\n global DB_USER, DB_PASSWORD, DB_NAME\n\n sql_create_tb = 'create table if not exists %s(url_hash varchar(32) primary key, url varchar(80));'\n # sql_insert = 'insert into %s(url_hash, url, id) values(%s, %s, %s);'\n sql_insert = 'insert into %s(url_hash, url) values(\"%s\", \"%s\");'\n sql_select = 'select url from %s;'\n sql_select_diff = 'select url from urls_all where url_hash != all(select url_hash from urls_visited)'\n\n conn = mysql.connector.connect(\n user=DB_USER,\n password=DB_PASSWORD,\n database=DB_NAME\n )\n cursor = conn.cursor()\n\n md5 = hashlib.md5()\n md5.update(url.encode('utf-8'))\n url_hash = md5.hexdigest()\n\n # 根据不同的命令选择不同的操作\n try:\n if cmd == 'create':\n try:\n cursor.execute(sql_create_tb % tb_name)\n except Exception as e:\n logging.exception(e)\n elif cmd == 'insert':\n cursor.execute(sql_insert % (tb_name, url_hash, url))\n conn.commit()\n elif cmd == 'select diff':\n cursor.execute(sql_select_diff)\n diff_data = cursor.fetchall()\n diff_urls = []\n for i in range(len(diff_data)):\n diff_urls.append(diff_data[i][0])\n return diff_urls\n # 下面这条命令已被优化为'select diff'\n elif cmd == 'select':\n cursor.execute(sql_select % tb_name)\n select_data = cursor.fetchall()\n urls = []\n for i in range(len(select_data)):\n urls.append(select_data[i][0])\n return urls\n except Exception as e:\n # logging.exception(e)\n return False\n finally:\n cursor.close()\n conn.close()\n return True\n\n\ndef start_work(url, count):\n\n global URL_Q\n\n # 创建表\n tb_name = 'urls_visited'\n cmd_create = 'create'\n operate_mysql(url, tb_name, cmd_create)\n\n # 将url插入表(urls_visited)并判重:\n # 1. if 不重复: 继续执行,\n # 2. else 重复: 从任务队列中get下一url并再次调用本函数\n cmd_insert = 'insert'\n if operate_mysql(url, tb_name, cmd_insert) and re.match(r'%s' % URL_INDEX, url):\n rawdata = get_html_rawdata(url)\n if rawdata:\n print('第%d个网页正在打开: %s' % (count, url))\n context, urls = get_context_and_urls(rawdata)\n save_context(url, context)\n handle_urls(url, urls)\n else:\n print('第%d个网址访问<失败>: %s' % (count, url))\n else:\n print('当前url: <%s> 已经访问过或者与官方网站无关' % url)\n try:\n url = URL_Q.get(timeout=1)\n start_work(url, count)\n except Exception as e:\n logging.exception(e)\n\n\ndef main():\n \"\"\"使用多线程调用函数start_work()开始抓取网页数据\"\"\"\n\n import threading\n\n global COUNT, URL_INDEX, PATH_TXT, PATH_WORDS, URL_Q\n NUM_THREAD = 30 # 线程数\n\n # 1. (urls_all - urls_visited) ==> URL_Q\n url = ''\n tb_name = ''\n cmd_select_diff = 'select diff'\n urls_unvisited = operate_mysql(url, tb_name, cmd_select_diff)\n\n # 2. 如果urls_visited为空,则值为0或set(),说明是第一次运行,需要先执行一次,使任务队列不为空,否则将差集加入任务队列\n if not urls_unvisited:\n url = URL_INDEX\n COUNT += 1\n start_work(url, COUNT)\n else:\n for elem in urls_unvisited:\n URL_Q.put(elem)\n\n while not URL_Q.empty():\n threads = []\n for x in range(NUM_THREAD):\n try:\n url = URL_Q.get(timeout=3)\n COUNT += 1\n thread = threading.Thread(target=start_work, args=(url, COUNT))\n thread.start()\n threads.append(thread)\n except Exception as e:\n logging.exception(e)\n for thread in threads:\n thread.join()\n\n # 最后统计单词词频\n get_and_save_words(PATH_TXT, PATH_WORDS)\n print('访问了%d个网页' % COUNT)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python3_docs_crawler.py","file_name":"python3_docs_crawler.py","file_ext":"py","file_size_in_byte":10239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"300758579","text":"from hubserver.app.projects.utils import format_code_file\nfrom tests.utils import HubServerTestCase\n\n\nclass TestProjectUtils(HubServerTestCase):\n def test_format_code_file(self):\n name = 'test'\n content = 'print(test)'\n result = format_code_file(name, content)\n self.assertEqual(result, '
{}
'.format(content))\n","sub_path":"tests/app/projects/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"283507902","text":"import os, sys\nsys.path.append('..')\nfrom PyQt5.QtCore import (\n Qt,\n QObject,\n QTimer,\n QByteArray,\n QDir,\n QFile,\n QFileInfo,\n QStandardPaths,\n QUrl,\n QMutex,\n QMutexLocker,\n QStandardPaths,\n pyqtSignal,\n pyqtSlot,\n QDateTime,\n QSettings,\n QVariant,\n QRegExp,\n QThread,\n QIODevice,\n QBuffer,\n QMessageLogger,\n qInstallMessageHandler,\n QCommandLineParser,\n QCommandLineOption,\n QJsonDocument,\n QJsonValue,\n QJsonParseError,\n QCoreApplication,\n)\nfrom PyQt5.QtNetwork import (\n QNetworkInterface,\n QHostAddress,\n QAbstractSocket,\n QUdpSocket,\n QSslConfiguration,\n QSsl,\n QSslCertificate,\n QSslKey,\n QSslSocket,\n )\nfrom PyQt5.QtWebSockets import (\n QWebSocketServer,\n QWebSocket,\n QWebSocketProtocol,\n )\nfrom PyQt5.QtSql import (\n QSqlDatabase,\n QSqlQuery\n )\n\n\nfrom PyQt5.QtSerialPort import (\n QSerialPort, QSerialPortInfo\n )\n\nfrom qtwebapp.httpserver import (\n g_session_store,\n HttpRequest,\n HttpResponse,\n HttpCookie,\n HttpSessionStore,\n HttpListener,\n StaticFileController,\n createSslConfig,\n )\nfrom qtwebapp.util import json_dumps, json_loads\nfrom qtwebapp.mime import MIMETYPE\n\n\nCONFIG = '../push_gateway.ini'\n\ndef getSettingsConfig(settings, settingGroup=''):\n ret = {}\n settings.beginGroup(settingGroup)\n if settingGroup == 'remote':\n ret['url'] = settings.value('url', defaultValue='')\n ret['retryTimes'] = int(settings.value('retryTimes', defaultValue='3'))\n ret['retryInterval'] = int(settings.value('retryInterval', defaultValue='60000'))\n ret['connectTimeout'] = int(settings.value('connectTimeout', defaultValue='5000'))\n\n elif settingGroup == 'local':\n ret['ipStart'] = settings.value('ipStart', defaultValue='192.168.1.101')\n ret['ipEnd'] = settings.value('ipEnd', defaultValue='192.168.1.130')\n ret['username'] = settings.value('username', defaultValue='a')\n ret['password'] = settings.value('password', defaultValue='aaaaaaa')\n ret['scanInterval'] = int(settings.value('scanInterval', defaultValue='3000'))\n ret['scanTimeout'] = int(settings.value('scanTimeout', defaultValue='1500'))\n # ret['pushInterval'] = int(settings.value('pushInterval', defaultValue='5000'))\n # ret['requestTimeout'] = int(settings.value('requestTimeout', defaultValue='2000'))\n elif settingGroup == 'hololens_device_portal_api':\n grps = settings.childGroups()\n for grp in grps:\n settings.beginGroup(grp)\n ret[grp] = {}\n for ks in settings.childKeys():\n ret[grp][ks] = settings.value(ks, defaultValue='')\n settings.endGroup()\n\n settings.endGroup()\n return ret\n\nclass TestServerController(StaticFileController):\n signalService = pyqtSignal(HttpRequest, HttpResponse)\n def __init__(self,\n parent=None,\n enableSession=False,\n unauthorizedPage='',\n loginPage='',\n **kwargs):\n super(TestServerController, self).__init__(parent=parent, **kwargs)\n self._parent = parent\n self.enableSession = enableSession\n self.unauthorizedPage = unauthorizedPage\n self.loginPage = loginPage\n\n\n\n\n def responseStaticPage(self, path, response):\n self.mutex.lock()\n filepath = self.docroot + path\n file = QFile(filepath)\n if file.open(QIODevice.ReadOnly):\n self.setContentType(filepath, response)\n # response.setHeader(b\"Cache-Control\", b\"max-age=\" + str(int(self.maxAge / 1000)).encode(encoding='utf-8'))\n response.setHeader(b'Cache-Control', b'no-cache')\n # response.setHeader(b'Access-Control-Allow-Origin', b'*')\n # response.setHeader(b'Access-Control-Allow-Methods', b'POST')\n response.write(file.readAll(), lastPart=True)\n file.close()\n self.mutex.unlock()\n else:\n self.mutex.unlock()\n response.setStatus(401, b'Unauthorized')\n msg = '401 forbidden: {} page error'.format(path)\n response.write(msg.encode(), lastPart=True)\n\n def handleRequest(self, path, request, response):\n ret = {}\n not200 = False\n if path == '/api/power/battery':\n ret = {\n \"AcOnline\": 1,\n \"BatteryPresent\": 1,\n \"Charging\": 0,\n \"DefaultAlert1\": 0,\n \"DefaultAlert2\": 1628,\n \"EstimatedTime\": 17295,\n \"MaximumCapacity\": 16275,\n \"RemainingCapacity\": 14749\n }\n elif path == '/api/holographic/mrc/status':\n ret = {\"IsRecording\" : False,\n \"ProcessStatus\" : {\n \"MrcProcess\" : \"Running\"\n }\n }\n elif path == '/api/holographic/mrc/file':\n if request.getMethod().decode().lower() == 'delete':\n response.setStatus(200, b'OK')\n response.write(b'', lastPart=True)\n return\n\n\n filename = request.getParameter(b'filename').decode()\n filename = QByteArray.fromBase64(filename.encode()).data().decode()\n fi = QFileInfo(filename)\n if fi.exists():\n path = fi.absoluteFilePath()\n f = QFile(path)\n ext = path[path.rindex('.'):]\n f.open(QIODevice.ReadOnly)\n ba = f.readAll()\n f.close()\n response.setHeader(b'Content-Type', MIMETYPE[ext].encode())\n # response.setHeader(b'Content-Length', str(len(ba)).encode())\n response.setStatus(200, b'OK')\n response.write(ba, lastPart=True)\n return\n else:\n not200 = True\n ret = {'error':'404 not found'}\n\n elif path == '/api/holographic/mrc/files':\n ret = {\"MrcRecordings\" : [\n {\n \"CreationTime\" : 131423339684371628.0,\n \"FileName\" : \"20170619_161928_HoloLens.mp4\",\n \"FileSize\" : 4713633\n },\n {\n \"CreationTime\" : 131423339609756226.0,\n \"FileName\" : \"20170619_161920_HoloLens.jpg\",\n \"FileSize\" : 301614\n },\n {\n \"CreationTime\" : 131423339870432136.0,\n \"FileName\" : \"20170619_161946_HoloLens.jpg\",\n \"FileSize\" : 357791\n }\n ]}\n\n elif path == '/api/holographic/os/webmanagement/settings/https':\n ret = {\"httpsRequired\" : False}\n\n elif path == '/api/holographic/mrc/thumbnail':\n filename = request.getParameter(b'filename').decode()\n filename = QByteArray.fromBase64(filename.encode()).data().decode()\n fi = QFileInfo('test_base64_json.json')\n l = json_loads(fi)\n ll = list(filter(lambda x:x['filename'] == filename, l))\n ba = b''\n if len(ll):\n ba = QByteArray.fromBase64(ll[0]['thumbnail'].encode()).data()\n response.setHeader(b'Content-Type', (MIMETYPE['.jpg'] + '').encode())\n response.setStatus(200, b'OK')\n response.write(ba, lastPart=True)\n return\n else:\n not200 = True\n response.setHeader(b'Content-Type', (MIMETYPE['.json'] + ';charset=utf-8').encode())\n response.setStatus(200, b'OK')\n if not200:\n response.setStatus(404, b'Not found')\n response.write(json_dumps(ret).encode(), lastPart=True)\n\n def service(self, request, response):\n global g_session_store\n path = request.getPath().decode(encoding='utf-8')\n self.handleRequest(path, request, response)\n\n\ndef main():\n app = QCoreApplication(sys.argv)\n controller = TestServerController(parent=app)\n # th0 = QThread()\n # th0.start()\n # th0 = QCoreApplication.instance().thread()\n httplistener = HttpListener(optParser=None,\n requestHandler=controller,\n parent=None,\n maxRequestSize=1600000000,\n maxMultiPartSize=1000000000,\n port=80,\n # host='192.168.1.129',\n )\n\n # httplistener.moveToThread(th0)\n httplistener.start.connect(httplistener.slotListen)\n httplistener.start.emit()\n sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"test/test_holo_server.py","file_name":"test_holo_server.py","file_ext":"py","file_size_in_byte":9707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"3933494","text":"#!/bin/python3\n\nimport numpy as np\nfrom field import field\nimport analyzer\n\n\n\ndef solve_sudoku(arr : np.array):\n \"\"\"solves a sudoku for a 9X9 array\"\"\"\n #init:\n ls = field_array_of(arr)\n changing = True\n finished = False\n \n #the actual solving algorithm:\n #tries to solve it by analyzing rows, columns and fields, starts guessing\n #if fixpoint is reached with other technique\n while not finished:\n while changing:\n analyzer.iterate(ls)\n changing = update_knowns(ls)\n if ready(ls):\n finished = True\n else:\n #the case where we have to guess\n a,b = analyzer.fewest_poss(ls)\n for val in ls[a,b].poss:\n arr = to_array(ls)\n arr[a,b] = val\n try:\n res = solve_sudoku(arr)\n except analyzer.NotSolvableException:\n print(\"one branch not solvable\")\n else:\n return res\n raise analyzer.NotSolvableException \n # output the result\n res = to_array(ls)\n return res\n\ndef to_array(ls):\n f = lambda li: list(map(lambda x : x.value, li))\n res = np.array(list(map(f, ls)))\n return res\n\ndef field_array_of(arr : np.array):\n ls = np.array([[None]*9]*9)\n\n #zeroes mean there was no entry\n for i in range(9):\n for j in range(9):\n ls[i,j] = field(arr[i,j],i,j)\n return ls\n\n\ndef update_knowns(ls):\n \"\"\"updates values where possible, and returns True if it was still able to\n change something, to use it for fixpoint iteration\"\"\"\n changing = False\n for i in ls:\n for j in i:\n if j.value == 0:\n if len(j.poss) == 0:\n raise analyzer.NotSolvableException\n elif len(j.poss)==1:\n if j.poss[0] == 0:\n raise analyzer.NotSolvableException\n j.value = j.poss[0]\n j.poss = []\n changing = True\n analyzer.iterate(ls)\n return changing\n\ndef ready(ls):\n\n for i in ls:\n for j in i:\n if j.value == 0:\n return False\n return True\n\n\n\n\n","sub_path":"sudoku/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"324118541","text":"from django.db import models\nfrom django.conf import settings\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return f\"{self.name}\"\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=30)\n\n def __str__(self):\n return f\"{self.name}\"\n\n\nclass Product(models.Model):\n title = models.CharField(max_length=50, null=False, blank=False)\n description = models.TextField()\n price = models.DecimalField(max_digits=20, decimal_places=2)\n stock = models.IntegerField()\n category = models.ForeignKey(\n to=Category,\n on_delete=models.CASCADE,\n blank=True\n )\n image = models.ImageField()\n tags = models.ManyToManyField(Tag, blank=True)\n\n @property\n def rating_list(self):\n rating_list = []\n objs = self.ratings.all()\n for obj in objs:\n rating_list.append({\n 'user': obj.user.username,\n 'rating': obj.rating,\n 'comment': obj.comment\n })\n return rating_list\n\n def __str__(self):\n return f\"{self.title} : {self.description}\"\n\n\nclass Wishlist(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE\n )\n products = models.ManyToManyField(Product, blank=True)\n\n def __str__(self):\n return f\"{self.user.username} : {self.products.all()}\"\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE\n )\n products = models.ManyToManyField(Product, blank=True)\n\n def __str__(self):\n return f\"{self.user.username} : {self.products.all()}\"\n\n\nclass Orders(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n related_name=\"orders\")\n total = models.FloatField()\n payment_id = models.CharField(max_length=50)\n order_id = models.CharField(max_length=50, unique=True)\n signature = models.CharField(max_length=250)\n date_ordered = models.DateTimeField(auto_now_add=True)\n\n @property\n def items(self):\n item_list = []\n objs = self.order_items.all()\n for obj in objs:\n item_list.append({\n \"item\": obj.product.title,\n \"id\": obj.product.id,\n \"price\": obj.product.price,\n \"image\": str(obj.product.image),\n \"quantity\": obj.qty,\n })\n return item_list\n\n def __str__(self):\n return f\"{self.order_id}\"\n\n\nclass OrderItem(models.Model):\n order = models.ForeignKey(\n Orders,\n on_delete=models.CASCADE,\n related_name=\"order_items\"\n )\n qty = models.IntegerField()\n product = models.ForeignKey(\n to=Product,\n on_delete=models.CASCADE,\n blank=True\n )\n\n\nRATING = (\n (1, 1),\n (2, 2),\n (3, 3),\n (4, 4),\n (5, 5),\n)\n\n\nclass Rating(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE\n )\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n related_name='ratings'\n )\n rating = models.PositiveIntegerField(choices=RATING, default=0)\n comment = models.CharField(max_length=300, default=\"\")\n\n def _str_(self):\n return f\"{self.product} - {self.rating}\"\n","sub_path":"backend/store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504758724","text":"# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom buildbot.process.properties import WithProperties\n\nfrom master import master_config\nfrom master import master_utils\nfrom master.factory import remote_run_factory\n\nimport master_site_config\n\nActiveMaster = master_site_config.ChromiumWebkit\n\ndefaults = {}\n\nhelper = master_config.Helper(defaults)\nB = helper.Builder\nF = helper.Factory\nT = helper.Triggerable\n\nrevision_getter = master_utils.ConditionalProperty(\n lambda build: build.getProperty('revision'),\n WithProperties('%(revision)s'),\n 'master')\n\ndef m_remote_run_chromium_src(recipe, **kwargs):\n kwargs.setdefault('revision', revision_getter)\n return remote_run_factory.RemoteRunFactory(\n active_master=ActiveMaster,\n repository='https://chromium.googlesource.com/chromium/src.git',\n recipe=recipe,\n factory_properties={'path_config': 'kitchen'},\n use_gitiles=True,\n **kwargs)\n\ndefaults['category'] = 'layout'\n\n\n################################################################################\n## Release\n################################################################################\n\n#\n# Linux Rel Builder/Tester\n#\n\nB('WebKit Linux Precise', 'f_webkit_linux_rel', scheduler='global_scheduler')\nF('f_webkit_linux_rel', m_remote_run_chromium_src('chromium'))\n\nB('WebKit Linux Trusty', 'f_webkit_linux_rel_trusty',\n scheduler='global_scheduler')\nF('f_webkit_linux_rel_trusty', m_remote_run_chromium_src('chromium'))\n\nB('WebKit Linux Precise ASAN', 'f_webkit_linux_rel_asan',\n scheduler='global_scheduler', auto_reboot=True)\nF('f_webkit_linux_rel_asan', m_remote_run_chromium_src('chromium'))\n\nB('WebKit Linux Precise MSAN', 'f_webkit_linux_rel_msan',\n scheduler='global_scheduler', auto_reboot=True)\nF('f_webkit_linux_rel_msan', m_remote_run_chromium_src('chromium'))\n\nB('WebKit Linux Precise Leak', 'f_webkit_linux_leak_rel',\n scheduler='global_scheduler', category='layout')\nF('f_webkit_linux_leak_rel', m_remote_run_chromium_src('chromium'))\n\n\n################################################################################\n## Debug\n################################################################################\n\n#\n# Linux Dbg Webkit builders/testers\n#\n\nB('WebKit Linux Precise (dbg)', 'f_webkit_dbg_tests',\n scheduler='global_scheduler', auto_reboot=True)\nF('f_webkit_dbg_tests', m_remote_run_chromium_src('chromium'))\n\n\ndef Update(_config, _active_master, c):\n return helper.Update(c)\n","sub_path":"masters/master.chromium.webkit/master_linux_webkit_latest_cfg.py","file_name":"master_linux_webkit_latest_cfg.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47277008","text":"import multiprocessing\nimport os\nimport re\nimport threading\nimport shutil\nimport sys\nfrom typing import Optional\nimport uuid\n\nfrom v2.containers import BuildCfg\nfrom v2.logging_subprocess import call\nfrom v2.workspace import (\n CANNOT_BUILD,\n BENCHMARK_BRANCH_NAME, BENCHMARK_BRANCH_ROOT, BENCHMARK_ENV, BENCHMARK_ENV_BUILT,\n BUILD_LOG_ROOT, BUILD_IN_PROGRESS_ROOT,\n MUTATION_LOCK, REF_REPO_ROOT)\n\n_NAMESPACE_LOCK = threading.Lock()\n_CONDA_ENV_TEMPLATE = \"env_{n:0>2}\"\n_MAX_ACTIVE_ENVS = 50\n\n\nclass OutOfEnvsError(Exception):\n pass\n\n\nclass _Unbuildable:\n def __init__(self):\n self._known_unbuildable = None\n\n def _lazy_init(self):\n if self._known_unbuildable is None:\n with open(CANNOT_BUILD, \"at\") as f:\n pass\n\n with open(CANNOT_BUILD, \"rt\") as f:\n self._known_unbuildable = set(f.read().splitlines(keepends=False))\n\n def check(self, checkout: str) -> bool:\n self._lazy_init()\n return checkout in self._known_unbuildable\n\n def update(self, checkout: str) -> None:\n self._lazy_init()\n MUTATION_LOCK.get()\n if checkout not in self._known_unbuildable:\n with open(CANNOT_BUILD, \"at\") as f:\n f.write(f\"{checkout}\\n\")\n self._known_unbuildable.add(checkout)\n\n def reset_unbuildable(self, checkout: str) -> None:\n self._lazy_init()\n MUTATION_LOCK.get()\n if checkout in self._known_unbuildable:\n self._known_unbuildable.remove(checkout)\n with open(CANNOT_BUILD, \"wt\") as f:\n f.write(\"\\n\".join(self._known_unbuildable) + \"\\n\")\n\n_UnbuildableSingleton = _Unbuildable()\ncheck_unbuildable = _UnbuildableSingleton.check\nmark_unbuildable = _UnbuildableSingleton.update\nreset_unbuildable = _UnbuildableSingleton.reset_unbuildable\n\n\ndef make_conda_env(\n env_path: Optional[str] = None,\n build_cfg: BuildCfg = BuildCfg(),\n):\n MUTATION_LOCK.get()\n cleanup = (env_path is None)\n success = False\n try:\n with _NAMESPACE_LOCK:\n if env_path is None:\n active_envs = set(os.listdir(BUILD_IN_PROGRESS_ROOT))\n for i in range(_MAX_ACTIVE_ENVS):\n env_name = _CONDA_ENV_TEMPLATE.format(n=i)\n if env_name not in active_envs:\n break\n else:\n raise OutOfEnvsError(\"Failed to create env. Too many already exist.\")\n\n env_path = os.path.join(BUILD_IN_PROGRESS_ROOT, env_name)\n else:\n env_name = \"custom\"\n\n mkl_spec = f\"=={build_cfg.mkl_version}\" if build_cfg.mkl_version else \"\"\n call(\n f\"conda create --no-default-packages -y --prefix {env_path} python={build_cfg.python_version}\",\n shell=True,\n check=True,\n task_name=f\"Conda env creation: {env_name}\",\n log_dir=BUILD_LOG_ROOT,\n )\n\n call(\n f\"\"\"\n echo ADD_INTEL\n conda config --env --add channels intel\n\n echo MAIN_INSTALL\n conda install -y numpy ninja pyyaml mkl{mkl_spec} mkl-include setuptools cmake cffi hypothesis typing_extensions pybind11 ipython\n\n echo GLOG_INSTALL\n conda install -y -c conda-forge glog\n\n echo INSTALL_VALGRIND\n conda install -y -c conda-forge valgrind\n \"\"\",\n shell=True,\n check=True,\n task_name=f\"Conda env install: {env_name}\",\n conda_env=env_path,\n log_dir=BUILD_LOG_ROOT,\n )\n\n success = True\n return env_path\n\n finally:\n if cleanup and not success and env_path is not None and os.path.exists(env_path):\n shutil.rmtree(env_path)\n\n\ndef _build(\n repo_path: str,\n checkout: Optional[str],\n setup_mode: str,\n conda_env: str,\n build_cfg: BuildCfg,\n show_progress: bool,\n taskset_cores: Optional[str],\n nice: Optional[str],\n max_jobs: Optional[int],\n) -> int:\n assert setup_mode in (\"develop\", \"install\")\n\n no_fbgemm = '-c submodule.\"third_party/fbgemm\".update=none'\n no_xnnpack = '-c submodule.\"third_party/XNNPACK\".update=none'\n no_nervanagpu = '-c submodule.\"third_party/nervanagpu\".update=none'\n call(\n f\"\"\"\n retry () {{ $* || (sleep 1 && $*) || (sleep 2 && $*); }}\n\n git checkout .\n git clean -fd\n git checkout .\n git checkout {checkout}\n git clean -fd\n\n # `git submodule sync` doesn't sync submodule submodules, which can\n # cause build failures. So instead we just start over.\n rm -rf third_party/*\n git checkout third_party\n retry git submodule sync\n\n # History for XNNPack has changed, so this will fail in February/March\n retry git {no_fbgemm} {no_xnnpack} {no_nervanagpu} submodule update --init --recursive\n \"\"\",\n shell=True,\n cwd=repo_path,\n check=True,\n conda_env=conda_env,\n task_name=\"(pre) Build PyTorch\",\n log_dir=BUILD_LOG_ROOT,\n )\n\n call(\n f\"\"\"\n retry () {{ $* || (sleep 1 && $*) || (sleep 2 && $*); }}\n retry git submodule update --init --recursive\n \"\"\",\n shell=True,\n cwd=repo_path,\n check=False,\n conda_env=conda_env,\n task_name=\"(pre) Build PyTorch\",\n log_dir=BUILD_LOG_ROOT,\n )\n\n progress_pattern = re.compile(r\"^\\[[0-9]+/[0-9]+\\]\\s.+$\")\n def per_line_fn(l):\n if progress_pattern.search(l):\n print(f\"\\r{l.strip()[:120]:<120}\", end=\"\")\n sys.stdout.flush()\n\n if \"BUILD_DONE\" in l:\n print(\"\\r\")\n\n taskset_str = f\"taskset --cpu-list {taskset_cores} \" if taskset_cores else \"\"\n nice_str = f\"nice -n {nice} \" if nice is not None else \"\"\n retcode = call(\n f\"\"\"\n # CCACHE variables are generally in `.bashrc`\n source ~/.bashrc\n which c++ | awk '{{print \"which c++: \"$1}}'\n\n {taskset_str}{nice_str}python -u setup.py clean\n {taskset_str}{nice_str}python -u setup.py {setup_mode}\n echo BUILD_DONE\n \"\"\",\n shell=True,\n cwd=repo_path,\n env={\n \"USE_DISTRIBUTED\": \"0\",\n \"BUILD_TEST\": build_cfg.build_tests,\n \"USE_CUDA\": \"0\",\n \"USE_FBGEMM\": \"0\",\n \"USE_NNPACK\": \"0\",\n \"USE_QNNPACK\": \"0\",\n \"USE_PYTORCH_QNNPACK\": \"0\",\n \"USE_XNNPACK\": \"0\",\n \"BUILD_CAFFE2_OPS\": \"0\",\n \"REL_WITH_DEB_INFO\": \"1\",\n \"MKL_THREADING_LAYER\": \"GNU\",\n \"USE_NUMA\": \"0\",\n \"MAX_JOBS\": \"\" if max_jobs is None else str(max_jobs),\n \"CFLAGS\": f\"-Wno-error=stringop-truncation\",\n },\n per_line_fn=per_line_fn if show_progress else None,\n conda_env=conda_env,\n task_name=\"Build PyTorch\",\n log_dir=BUILD_LOG_ROOT,\n )\n\n if not retcode:\n retcode = call(\n 'python -c \"import torch\"',\n shell=True,\n env={\n \"MKL_THREADING_LAYER\": \"GNU\",\n },\n conda_env=conda_env,\n task_name=\"Test PyTorch\",\n log_dir=BUILD_LOG_ROOT,\n )\n\n if retcode:\n mark_unbuildable(checkout)\n shutil.rmtree(conda_env)\n\n return retcode\n\n\ndef build_benchmark_env():\n MUTATION_LOCK.get()\n if os.path.exists(BENCHMARK_ENV_BUILT):\n return\n\n if not os.path.exists(BENCHMARK_ENV):\n shutil.rmtree(BENCHMARK_ENV, ignore_errors=True)\n\n make_conda_env(env_path=BENCHMARK_ENV, build_cfg=BuildCfg())\n call(\"pip install yattag\", shell=True, conda_env=BENCHMARK_ENV)\n\n # By default, build will try to take over all cores. However, this can\n # lead to OOM during some of the memory-intensive parts of compilation.\n max_jobs = max(int(multiprocessing.cpu_count() * 0.9), 1)\n\n retcode = _build(\n BENCHMARK_BRANCH_ROOT,\n BENCHMARK_BRANCH_NAME,\n \"develop\",\n BENCHMARK_ENV,\n build_cfg=BuildCfg(),\n show_progress=True,\n taskset_cores=None,\n nice=None,\n max_jobs=max_jobs,\n )\n\n assert not retcode\n with open(BENCHMARK_ENV_BUILT, \"wt\") as f:\n pass\n\n\ndef build_clean(\n checkout,\n build_cfg: BuildCfg,\n show_progress: bool = True,\n taskset_cores: Optional[str] = None,\n nice: Optional[str] = None,\n max_jobs: Optional[int] = None,\n) -> Optional[str]:\n MUTATION_LOCK.get()\n if check_unbuildable(checkout):\n print(f\"{checkout} is known to be unbuildable.\")\n return\n\n conda_env = None\n retcode = 1\n try:\n repo_path = os.path.join(BUILD_IN_PROGRESS_ROOT, f\"pytorch_{uuid.uuid4()}\")\n shutil.copytree(REF_REPO_ROOT, repo_path)\n conda_env = make_conda_env(build_cfg=build_cfg)\n retcode = _build(\n repo_path,\n checkout,\n \"install\",\n conda_env,\n build_cfg,\n show_progress,\n taskset_cores,\n nice,\n max_jobs,\n )\n\n return None if retcode else conda_env\n\n except KeyboardInterrupt:\n print(f\"Build stopped: {checkout}\")\n raise\n\n finally:\n if retcode and conda_env is not None and os.path.exists(conda_env):\n shutil.rmtree(conda_env)\n\n if os.path.exists(repo_path):\n shutil.rmtree(repo_path)\n","sub_path":"v2/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":9455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"74731744","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nx = np.linspace(-3, 3, 200)\n\nfig, ax = plt.subplots()\n\nfor a in range(1, 11):\n y = x*(x + 2)*(x - 2)/a\n ax.plot(x, y)\n fig.savefig('мой график' + str(a))\n\nplt.show()","sub_path":"python/modeling/plottrial.py","file_name":"plottrial.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"188684122","text":"#Taher Dohadwala\n\n#imports\nimport speech_recognition as sr\nimport pyaudio,os,time\nfrom gtts import gTTS\nimport requests,json\nimport wolframalpha\n#------------------------------------------------------------------\n# Rules for adding new commands:\n# -Create a new method in AI Functions\n# -Add the keyword to the variable key_word in the __init__\n# -Add any api keys to the __init__\n# -Add the keyword and method to the commands dictionary\n\n\n#------------------------------------------------------\n#Things to do:\n# 1. use espeak as the voice\n# 2. add description from the weather api\n# 3. add a run() to constantly run the program waiting for keyword (NAME)\n# 4. use amazon cloud for what is command\n# 5. add spotify api\n# 6. add various other command...\nclass speech_program():\n \n\n #-----------------------------------------------------------------\n #init()\n def __init__(self):\n self.key_words = [\"weather\",\"what is\",\"end\"]\n self.weather_key = \"APPID=714964c165954619971a23be696deb1a\"\n self.wolfram_key = \"8A86L7-EQW9P7T464\"\n \n #--------------------------------------------------------------------\n #AI functions-- 1 function = 1 command\n\n def weather(self):\n if \"in\" in self.save_audio:\n self.location = self.save_audio[self.save_audio.find(\"in\")+3:]\n else:\n self.location = \"Naperville\"\n response = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=\"+self.location+\",us&units=imperial&\"+self.weather_key).json() \n a2 = \"The Temperature in\"+self.location+\" is \"+str(response[\"main\"][\"temp\"])+\" degrees fahrenheit\"\n tts = gTTS(text = a2)\n tts.save(\"sample2.mp3\")\n\n os.system(\"sample2.mp3\")\n\n\n def what_is(self):\n client = wolframalpha.Client(self.wolfram_key)\n res = client.query(self.save_audio)\n a2 = self.save_audio+\" the answer is \"+str((next(res.results).text))\n tts = gTTS(text = a2)\n tts.save(\"sample2.mp3\")\n\n os.system(\"sample2.mp3\")\n\n\n def end(self):\n tts = gTTS(text = \"System shutting down\")\n tts.save(\"sample2.mp3\")\n\n os.system(\"sample2.mp3\")\n #--------------------------------------------------------------------\n # AI brain\n def listen(self): \n with sr.Microphone() as source:\n print(\"Say something!\")\n audio = sr.Recognizer().listen(source)\n while audio == None:\n audio = sr.Recognizer().listen(source)\n\n try:\n self.save_audio = sr.Recognizer().recognize_google(audio)\n print (\"You said: \"+ self.save_audio)\n\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n speech_program.listen(self)\n\n except sr.RequestError as e:\n\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\n def find_keyword(self):\n #Checks if a key_word is in user said sentence\n for x in range(len(self.key_words)):\n if self.key_words[x] in self.save_audio:\n self.key_word = self.key_words[x]\n print (\"keyword finder worked\")\n break\n ############################# \n commands = {\"weather\":weather,\n \"what is\":what_is,\n \"end\":end}\n #############################\n \n def compute(self):\n\n # using key_word - made in find_keyword\n # matches the key_word to respective key and executes the key value(AI function)\n for x in self.commands:\n if self.key_word in self.save_audio:\n self.commands.get(self.key_word)(self)\n print (\"reached command\")\n break\n#---------------------------------------------------------------------------------------\n\n\ntester = speech_program()\n\ntester.listen()\ntester.find_keyword()\ntester.compute()\ninput\n \n \n","sub_path":"AI-Project/Project Testing/speech_program3.0.py","file_name":"speech_program3.0.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561360875","text":"# Import everything needed to edit video clips \nfrom moviepy.editor import *\n\nimg = ['output-raspberry-3.jpg']\nclips = [ImageClip(m).set_duration(2)\n for m in img]\nclip1 = concatenate_videoclips(clips, method=\"compose\")\n# loading video dsa gfg intro video \n#clip1 = VideoFileClip(\"output-raspberry-3.mp4\")\nclip2 = VideoFileClip(\"test1.mp4\")\nclip3 = VideoFileClip(\"test2.mp4\")\n#clip4 = VideoFileClip(\"raspberry-influx-temperature-ok-recording-data.mp4\")\n\n\n# speedup clip2\n#clip2_speed= clip2.fx(vfx.speedx, 2)\nclip3_speed= clip3.fx(vfx.speedx, 2)\n\nfinal = concatenate_videoclips([clip1, clip2, clip3_speed], method=\"compose\") \n\n#concat_clip = concatenate_videoclips(clips, method=\"compose\")\nfinal.write_videofile(\"output-rasp-3.mp4\", fps=24)\n","sub_path":"concatenate.py","file_name":"concatenate.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"30247660","text":"\"\"\" Assignment two in the Cteate a Blockchain with Python course. \"\"\"\n\n\nNAMES = list(('Aegir', 'Rosa', 'Hafrun', 'Hronn', 'Oli', 'Thorvaldur', 'Ada'))\n\n\ndef names_is_empty():\n \"\"\" Returns if the NAMES list is empty or not. \"\"\"\n return len(NAMES) < 1\n\n\nprint('Names longer than 5 - contain \\'n\\' or \\'N\\'?')\n\nfor name in NAMES:\n is_n = False\n if len(name) > 5:\n temp = name.lower()\n for letter in temp:\n if letter == 'n':\n is_n = True\n if is_n:\n print('YES - %s' % name)\n else:\n print('NO - %s' % name)\n\nprint('-' * 40)\n\nwhile not names_is_empty():\n print(NAMES.pop())\n","sub_path":"udemy_blockchain/assignment02/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15185487","text":"#!/usr/bin/python\n\nimport os\nimport gtk\nimport pygtk\n\nclass ImageView:\n def __init__(self):\n #self.aspect_frame = gtk.Frame(None)\n\n self.scrolledwindow = gtk.ScrolledWindow()\n self.scrolledwindow.set_policy(gtk.POLICY_NEVER,gtk.POLICY_NEVER)\n\n self.image = gtk.Image()\n self.pixbuf = self.image.get_pixbuf()\n self.image.connect('expose-event', self.on_image_resize, self.scrolledwindow)\n\n self.scrolledwindow.add(self.image)\n\n #self.aspect_frame.add(self.scrolledwindow)\n\n def on_image_resize(self, widget, event, window):\n allocation = self.scrolledwindow.get_allocation()\n\n x, y = self.scale(self.pixbuf.get_width(), self.pixbuf.get_height(), allocation.width, allocation.height)\n pixbuf = self.pixbuf.scale_simple(x, y, gtk.gdk.INTERP_BILINEAR)\n self.image.set_from_pixbuf(pixbuf)\n\n def change_path(self, path):\n self.directory = path\n self.photos = os.listdir(path)\n self.photos.sort()\n self.position = 0\n self.change_photo(None, 0)\n\n def change_photo(self, button, direction):\n self.position = (self.position + direction) % len(self.photos)\n\n self.image.set_from_file(os.path.join(self.directory, self.photos[self.position]))\n self.pixbuf = self.image.get_pixbuf()\n #ratio = self.find_ratio(self.image.get_allocation().width, self.image.get_allocation().height)\n #self.aspect_frame.set(0.5, 0.5, ratio, False)\n\n def get_widget(self):\n return self.scrolledwindow#self.aspect_frame\n\n def scale(self, w, h, x, y, maximum=True):\n nw = y * w / h\n nh = x * h / w\n if maximum ^ (nw >= x):\n return nw or 1, y\n return x, nh or 1\n\n def find_ratio(self, a, b):\n if b == 0:\n return a\n return self.find_ratio(b, a % b)\n\n\n\nif __name__ == '__main__':\n window = gtk.Window()\n\n photo = ImageView()\n photo.change_path('/home/ben/Pictures/Testimages')\n\n window.add(photo.get_widget())\n window.show_all()\n gtk.main()\n","sub_path":"ImageView.py","file_name":"ImageView.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"66841460","text":"from django.conf.urls import patterns, include, url\nfrom Scheduler.forms import SchedulesForm\nfrom Indigo7.views import SimpleViewListView, SimpleViewDetailView, SimpleViewCreateView, SimpleViewDeleteView, SimpleViewUpdateView\nfrom Scheduler.models import Schedules\n\n\nurlpatterns = patterns('Scheduler.views', \n url(r'^scheduleddays/', include('Scheduler.urlconf.scheduleddays')),\n \n url(r'^$',\n SimpleViewListView.as_view(model=Schedules), \n name='schedules_list'),\n \n url(r'^create/$',\n SimpleViewCreateView.as_view(model=Schedules, \n form_class=SchedulesForm, \n success_url='schedules_list'), \n name='create_schedules'), \n \n url(r'^update/(?P\\d+)/$',\n SimpleViewUpdateView.as_view(model=Schedules, \n form_class=SchedulesForm, \n success_url='schedules_list'), \n name='update_schedules'), \n\n url(r'^delete/(?P\\d+)/$',\n SimpleViewDeleteView.as_view(model=Schedules, \n success_url='schedules_list'), \n name='delete_schedules'), \n)","sub_path":"Scheduler/urlconf/schedules.py","file_name":"schedules.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"627083366","text":"import tkinter\nimport random\nimport time\nimport pickle\n\n\n\nrunGPIO = False\ntry:\n import RPi.GPIO as GPIO\n runGPIO = True\n print(\"imported GPIO library\")\nexcept:\n print(\"cannot connect to GPIO\")\n \nimport time\n\n\nclass CanvasController:\n def __init__(self, importedGPIO):\n self.motor = Motors(18,33, importedGPIO)\n self.root = tkinter.Tk()\n self.canvas = tkinter.Canvas(self.root,bg=(\"white\"), height= 100, width=100)#9000,width=1200)\n self.root.grid()\n self.canvas.pack()\n self.get_commands()\n\n \n \n def get_commands(self): \n self.root.bind(\"\", self.process_key)\n\n \n def process_key(self, event):\n \n if event.keysym == \"Up\":\n self.motor.y_speed=1\n \n elif event.keysym == \"Down\":\n self.motor.y_speed=-1\n\n elif event.keysym == \"Right\":\n self.motor.x_speed=1\n \n\n elif event.keysym == \"Left\":\n self.motor.x_speed=-1\n \n elif event.keysym == \"space\":\n self.motor.x_speed=0\n self.motor.y_speed=0\n \n elif event.keysym == \"BackSpace\":\n self.motor.destroy()\n else:\n print(\"unrecognized\", event.keysym)\n self.motor.move()\n\nclass Motors:\n def __init__(self, motorPin, steerPin, runGPIO = False):\n self.steerPin = steerPin\n self.motorPin = motorPin\n self.runGPIO = runGPIO\n self.x_speed = 0\n self.y_speed = 0\n \n if self.runGPIO:\n GPIO.setmode(GPIO.BOARD)\n\n # set up motor\n GPIO.setup(self.motorPin, GPIO.OUT)\n GPIO.output(self.motorPin, GPIO.LOW)\n self.motorPWM = GPIO.PWM(self.motorPin, 100) # Set Frequency\n self.motorPWM.start(0) # Set the starting Duty Cycle\n\n # set up steering\n GPIO.setup(self.steerPin, GPIO.OUT)\n GPIO.output(self.steerPin, GPIO.LOW)\n self.steerPWM = GPIO.PWM(self.steerPin, 100) # Set Frequency\n self.steerPWM.start(0) # Set the starting Duty Cycle\n print(\"setup complete\")\n\n \n def move(self):\n if self.runGPIO:\n print(\"moving\", self.x_speed, self.y_speed)\n if self.y_speed>0: \n self.motorPWM.ChangeDutyCycle(20)\n elif self.y_speed<0:\n self.motorPWM.ChangeDutyCycle(10)\n else:\n self.motorPWM.ChangeDutyCycle(0)\n \n if self.x_speed>0: \n self.steerPWM.ChangeDutyCycle(20)\n elif self.x_speed<0:\n self.steerPWM.ChangeDutyCycle(10)\n else:\n self.steerPWM.ChangeDutyCycle(0)\n\n \n def destroy(self):\n if self.runGPIO:\n self.steerPWM.stop()\n self.motorPWM.stop()\n GPIO.output(self.steerPin, GPIO.LOW)\n GPIO.output(self.motorPin, GPIO.LOW)\n GPIO.cleanup()\n\n\n\n\nif __name__ == '__main__':\n myController = CanvasController(runGPIO)\n\n","sub_path":"simple_controller.py","file_name":"simple_controller.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646431130","text":"import pygame\nimport math\nimport random\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nWINDOW_WIDTH = 1500\nWINDOW_HEIGHT = 1000\nFONT_SIZE = 32\nTIME_DELAY = 3000\n\ndef main():\n pygame.init()\n font = pygame.font.Font('freesansbold.ttf', FONT_SIZE)\n screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n pygame.display.set_caption(\"Mouse Sensitivity Calibrator\")\n numTargets = 3\n rectList = []\n running = True\n completed = False\n accurateClicks = 0\n totalClicks = 0\n displayInstructions(screen, font, numTargets)\n pygame.display.update()\n setupTime = pygame.time.get_ticks()\n #pause 3 seconds before starting\n pygame.time.wait(TIME_DELAY)\n clearScreen(screen)\n #draw rectangles\n for i in range(0, numTargets):\n rect = generateRandRect()\n rectList.append(rect)\n pygame.draw.rect(screen, GREEN, rect, 0)\n pygame.display.update()\n start = pygame.time.get_ticks() - setupTime\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n pygame.quit()\n elif not completed:\n displayTimer(screen, font)\n pos = pygame.mouse.get_pos()\n if event.type == pygame.MOUSEBUTTONUP:\n totalClicks += 1\n for rectangle in rectList:\n if rectangle.collidepoint(pos):\n accurateClicks += 1\n rectList.remove(rectangle)\n clearScreen(screen)\n for rect in rectList:\n pygame.draw.rect(screen, GREEN, rect, 0)\n pygame.display.update()\n numTargets -= 1\n if numTargets == 0:\n total_time = (pygame.time.get_ticks() - start)\n completed = True\n accuracy = (accurateClicks / totalClicks) * 100\n stopTimer(screen, font, total_time)\n displayAccuracy(screen, font, accuracy)\n else:\n continue\n \n#returns a random pygame.rect object\ndef generateRandRect():\n width = random.randint(50,100)\n height = width\n left = random.randint(0, WINDOW_WIDTH - width)\n top = random.randint(0, WINDOW_HEIGHT - height)\n\n return pygame.Rect(left, top, width, height)\n\ndef displayInstructions(screen, font, numTargets):\n text1 = font.render('Click on the ' + str(numTargets) + \" targets as fast as you can\", True, GREEN, BLACK)\n text1Rect = text1.get_rect()\n text1Rect.center = (WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n screen.blit(text1, text1Rect)\n \n text2 = font.render('Get ready...', True, GREEN, BLACK)\n text2Rect = text2.get_rect()\n text2Rect.center = (WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2 + FONT_SIZE)\n screen.blit(text2, text2Rect)\n\n return\n\ndef displayTimer(screen, font):\n pygame.draw.rect(screen, BLACK, (WINDOW_WIDTH-100,0,100,50))\n text = font.render(\"%d\"%(pygame.time.get_ticks() - TIME_DELAY), True, WHITE, BLACK)\n text_rect = text.get_rect()\n text_rect.center = (WINDOW_WIDTH-text_rect.width//2, 30)\n screen.blit(text, text_rect)\n pygame.display.update()\n \n return\n\ndef stopTimer(screen, font, time):\n pygame.draw.rect(screen, BLACK, (WINDOW_WIDTH-100,0,100,50))\n text = font.render(str(time), True, WHITE, BLACK)\n text_rect = text.get_rect()\n text_rect.center = (WINDOW_WIDTH-text_rect.width//2, 30)\n screen.blit(text, text_rect)\n pygame.display.update()\n\n return\n\ndef displayAccuracy(screen, font, accuracy):\n text = font.render(\"Accuracy: {:.2f}%\".format(accuracy), True, GREEN, BLACK)\n textRect = text.get_rect()\n textRect.center = (WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n screen.blit(text, textRect)\n pygame.display.update()\n\ndef clearScreen(screen):\n screen.fill(BLACK)\n\n return\n\nmain()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"494321939","text":"\"\"\"\nPURPOSE:\n\nDESCRIPTION:\n\"\"\"\n\n__pgmname__ = \"SurveyMU\"\n__author__ = \"AJ Reynolds\"\n__email__ = \"ar380v@att.com\"\n\n__maintainer__ = __author__\n\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom jetCore import logger\nfrom jetCore.errorhandler import errorHandler\nfrom jetCore.db.tables import dbResponseGroup\nfrom jetCore.jetObject.baseobject import *\n\nlog = logger.getLogger(__pgmname__)\n\n\nclass SurveyMU(jetObject):\n def __init__(self, parent=None, **kwargs):\n super(SurveyMU, self).__init__()\n self.ResponseGrp_ID = None\n self.Survey_ID = None\n self.RG_Steward = None\n self.RG_Create_Date = None\n self.RG_Purpose = None\n self.RG_Target_Date = None\n self.RG_ActualEnd_Date = None\n self.Appl_Idnt = None\n\n self._fieldMap = OrderedDict()\n self._fieldMap['ResponseGrp_ID'] = 'ID'\n self._fieldMap['Survey_ID'] = 'Survey'\n self._fieldMap['RG_Steward'] = 'Created By'\n self._fieldMap['RG_Create_Date'] = 'Created On'\n self._fieldMap['RG_Purpose'] = 'Purpose'\n self._key = 'ResponseGrp_ID'\n self._value = 'Survey_ID'\n self._dbResponseGroup = None\n self.loadAttr(dbResponseGroup, **kwargs)\n self.parent = parent\n return\n\n @property\n def URL(self):\n if self.ResponseGrp_ID is None:\n return None\n return 'https://sre.it.att.com:8443/jetonline/survey.xhtml?responseGroupID={}'.format(self.ResponseGrp_ID)\n\n def add(self):\n from jetPy.personnel import userID\n if self.RG_Steward is None:\n self.RG_Steward = userID\n self._dbResponseGroup = dbResponseGroup(Survey_ID=self.Survey_ID,\n RG_Steward=self.RG_Steward,\n RG_Create_Date=datetime.utcnow(),\n RG_Purpose=self.RG_Purpose,\n RG_Target_Date=self.RG_Target_Date,\n Appl_Idnt=self.Appl_Idnt)\n _session = self.session()\n try:\n _session.add(self._dbResponseGroup)\n _session.commit()\n self.updAttr(dbResponseGroup, **self._dbResponseGroup.as_dict())\n _session.flush()\n return self\n except:\n _session.rollback()\n errorHandler()\n return False\n\n def copy(self):\n return False\n\n def delete(self):\n if self._dbResponseGroup is None and self.get() is None:\n return False\n _session = self.session(self._dbResponseGroup)\n try:\n _session.delete(self._dbResponseGroup)\n _session.commit()\n _session.flush()\n self._dbResponseGroup = None\n except:\n _session.rollback()\n errorHandler()\n return\n\n def get(self):\n _session = self.session(self._dbResponseGroup)\n if self._dbResponseGroup is None:\n try:\n self._dbResponseGroup = (_session.query(dbResponseGroup)\n .filter(dbResponseGroup.ResponseGrp_ID == self.ResponseGrp_ID)\n .first())\n if self._dbResponseGroup is None:\n return None\n except:\n _session.rollback()\n errorHandler()\n return self\n self.updAttr(dbResponseGroup, **self._dbResponseGroup.as_dict())\n return self\n\n def update(self, field, value):\n if self._dbResponseGroup is None and self.get() is None:\n return False\n _session = self.session(self._dbResponseGroup)\n try:\n setattr(self, field, value)\n setattr(self._dbResponseGroup, field, decode(value))\n _session.commit()\n _session.flush()\n except:\n _session.rollback()\n errorHandler()\n return self\n","sub_path":"jetCore/jetObject/survey.py","file_name":"survey.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528310620","text":"#The parse function takes as input the filepath of the file (\"Prescription.txt\") \n#and produces as output a dictionary\n\ndef parse(filepath):\n contents = open(filepath, 'r')\n lines = contents.readlines()\n doctors = {}\n for line in lines:\n if line[0:9] == 'Physician':\n if line[11:-1] not in doctors:\n doctors = {line[11:-1]}\n doctor = line[11:-1]\n if line[0:11] == 'Appointment':\n appts = {line[18:-1]}\n appt = line[18:-1]\n if line[0:10] == 'Medication':\n meds = {'Prescribed Medications': line[12:-1]}\n if line[0:4] == 'Dose':\n dosage = {'Dose': line[6:-1]}\n if line[0:10] == 'Directions':\n instructions = {'Directions': line[12:-1]}\n if line[0:8] == 'Pharmacy':\n pharm = {'Pharmacy': line[10:-1]}\n if line[0:7] == 'Address':\n loc = {'Address': line[9:-1]}\n if line[0:7] == 'Pick-up':\n pickup = {'Pick-up Date': line[14:]}\n appts = {appt: [meds, dosage, instructions, pharm, loc, pickup]}\n doctors = {doctor: appts}\n return doctors\n\n\n\n#Below uses the smtplib and email modules to create and send email notifications \n\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\n\nprescription = parse(filepath)\n\nemail_msg = MIMEMultipart()\nemail_msg['From'] = 'from email'\n\n#If sending a text message notification instead of an email, then 'receiving email' \n#should be the mobile service provider's email to SMS Gateway\n #e.g., '10-digit phone number'@txt.att.net; '10-digit phone number'@vtext.com \n \nemail_msg['To'] = 'receiving email'\nemail_msg['Subject'] = 'subject header'\n\n#An example of what would go in between the single quotes of msg:\n #prescription['Dr. Rodwell']['6/30/2015'][3]['Pharmacy']\n #prescription['Dr. Rodwell']['6/30/2015'][4]['Address']\n #prescription['Dr. Rodwell']['6/30/2015'][5]['Pick-up Date']\n\nmsg = ''\n\nmail = smtplib.SMTP('smtp.gmail.com', 'port') #email service provider and port number\nmail.ehlo_or_helo_if_needed() #greet the smtp server\nmail.starttls() #create tls connection\nmail.login('from email', 'password') #login with email credentials\nmail.sendmail('from email', 'receiving email', msg) #send msg to recipient\n\nmail.close() #logout\n\n","sub_path":"parse_email_text.py","file_name":"parse_email_text.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"404839197","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import Column, Integer, String, DateTime\nfrom sqlalchemy import create_engine\n\nimport datetime\n\n\nengine = create_engine('sqlite:///db.sqlite3')\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n\n\nclass Photo(Base):\n __tablename__ = 'photo'\n id = Column(Integer, primary_key=True)\n vk_id = Column(\"VK photo id \", Integer, unique=True, nullable=False)\n link = Column(\"link\", String(500), nullable=False)\n created_on = Column(\n \"Created on\",\n DateTime,\n default=datetime.datetime.now,\n nullable=False)\n changed_on = Column(\n \"last updated\",\n DateTime,\n default=datetime.datetime.now,\n onupdate=datetime.datetime.now,\n nullable=False\n )\n\n def __repr__(self):\n return \"\".format(self.id, self.vk_id)\n","sub_path":"trunin/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116294177","text":"import cv2\nimport os\n\ndef assure_path_exists(path):\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n \nvid_cam = cv2.VideoCapture(1)\nface_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nassure_path_exists(\"dataset/front dataset/\")\n\nwhile(True):\n name = input(\"Siapa? \")\n count = 0\n if (name == \"q\"):\n break\n ids = input(\"id? \")\n while(True): \n _, image_frame = vid_cam.read()\n gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)\n faces = face_detector.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(image_frame,(x,y),(x+w,y+h), (255,0,0), 2)\n count += 1\n cv2.imwrite(\"dataset/front dataset/\" + name + '.' + ids + '.' + str(count) + \".jpg\", gray[y:y+h,x:x+w])\n cv2.imshow('frame', image_frame)\n cv2.waitKey(100)\n print (count)\n if(count > 39):\n break\n \n \nvid_cam.release()\ncv2.destroyAllWindows()\n","sub_path":"face_datasets_front.py","file_name":"face_datasets_front.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362273991","text":"import traceback\n\nfrom ckan_cloud_operator import logs\n\nimport ckan_cloud_operator.routers.routes.manager as routes_manager\n\n\ndef _add_route(config, domains, route, enable_ssl_redirect):\n route_name = routes_manager.get_name(route)\n logs.info(f'adding route to nginx config: {route_name}')\n logs.debug_verbose(config=config, domains=domains, route=route, enable_ssl_redirect=enable_ssl_redirect)\n backend_url = routes_manager.get_backend_url(route)\n frontend_hostname = routes_manager.get_frontend_hostname(route)\n print(f'F/B = {frontend_hostname} {backend_url}')\n root_domain, sub_domain = routes_manager.get_domain_parts(route)\n domains.setdefault(root_domain, []).append(sub_domain)\n # if route['spec'].get('extra-no-dns-subdomains'):\n # extra_hostnames = ',' + ','.join([f'{s}.{root_domain}' for s in route['spec']['extra-no-dns-subdomains']])\n # else:\n extra_hostnames = ''\n logs.debug_verbose(route_name=route_name, backend_url=backend_url, frontend_hostname=frontend_hostname, root_domain=root_domain,\n sub_domain=sub_domain, domains=domains, extra_hostnames=extra_hostnames)\n if backend_url:\n raise NotImplementedError()\n\n\ndef get(routes, letsencrypt_cloudflare_email, enable_access_log=False, wildcard_ssl_domain=None, external_domains=False, dns_provider=None, force=False):\n assert dns_provider == 'cloudflare'\n logs.info('Generating nginx configuration', routes_len=len(routes) if routes else 0,\n letsencrypt_cloudflare_email=letsencrypt_cloudflare_email, enable_access_log=enable_access_log,\n wildcard_ssl_domain=wildcard_ssl_domain, external_domains=external_domains)\n config = {}\n domains = {}\n enable_ssl_redirect = True\n logs.info('Adding routes')\n i = 0\n errors = 0\n for route in routes:\n try:\n _add_route(config, domains, route, enable_ssl_redirect)\n i += 1\n except Exception as e:\n if force:\n logs.error(traceback.format_exc())\n logs.error(str(e))\n errors += 1\n else:\n raise\n logs.info(f'Added {i} routes')\n if errors > 0:\n logs.warning(f'Encountered {errors} errors')\n return config\n","sub_path":"ckan_cloud_operator/routers/nginx/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"522867970","text":"import random\n\nDEBUG = True\n\nPAGES = lambda n: [i for i in range(n)]\n\nMEM = lambda n: {\n 'fifo': [i for i in range(n)],\n 'rand': [i for i in range(n)],\n 'lru': [(i, 0) for i in range(n)],\n 'approx_lru': [(i, 0) for i in range(n)],\n 'opt': [i for i in range(n)]\n}\n\nPAGE_ERRORS = {\n 'fifo': 0,\n 'rand': 0,\n 'lru': 0,\n 'approx_lru': 0,\n 'opt': 0\n}\n\n\ndef get_random_page(pages, last):\n take_local_page = random.randrange(0, 10) > 3\n if take_local_page:\n next = random.choice(pages[last:last+2])\n else:\n next = random.choice(pages)\n return next\n\n\ndef load_page(memories, page_num, alg, page_loads=None):\n mem = memories[alg.__name__]\n if page_loads is not None:\n alg(page_num, mem, page_loads=page_loads)\n else:\n alg(page_num, mem)\n\n\ndef fifo(page_num, mem):\n if page_num not in mem:\n PAGE_ERRORS['fifo'] += 1\n mem.pop(0)\n mem.append(page_num)\n\n\ndef rand(page_num, mem):\n if page_num not in mem:\n PAGE_ERRORS['rand'] += 1\n r = random.choice(mem)\n mem.pop(mem.index(r))\n mem.append(page_num)\n\n\ndef lru(page_num, mem):\n p = [i for i, time in mem]\n if page_num not in p:\n PAGE_ERRORS['lru'] += 1\n tmp = [k for k in mem]\n sort = sorted(tmp, key=lambda k: k[1], reverse=True)\n i = sort.pop(0)\n mem.pop(mem.index(i))\n mem.append((page_num, 0))\n else:\n f = list(filter(lambda k: k[0] == page_num, mem))[0]\n index = mem.index(f)\n mem[index] = (f[0], 0)\n for i in range(len(mem)):\n page = mem[i]\n mem[i] = (page[0], page[1]+1)\n #print(mem)\n #input()\n\n\ndef approx_lru(page_num, mem):\n p = [i for i, time in mem]\n if page_num not in p:\n PAGE_ERRORS['approx_lru'] += 1\n tmp = [k for k in mem]\n sort = sorted(tmp, key=lambda k: k[1])\n i = sort.pop(0)\n mem.pop(mem.index(i))\n for i in mem:\n mem[mem.index(i)] = (i[0], 0)\n mem.append((page_num, 1))\n else:\n f = list(filter(lambda k: k[0] == page_num, mem))[0]\n i = mem.index(f)\n mem[i] = (f[0], 1)\n\n\ndef generate_page_loads(pages, num):\n l = []\n seed = random.choice(pages)\n next = seed\n for i in range(num):\n next = get_random_page(pages, next)\n l.append(next)\n return l\n\n\ndef opt(page_num, mem, page_loads):\n if page_num not in mem:\n PAGE_ERRORS['opt'] += 1\n max_page_index = -1\n to_delete = None\n #print(mem)\n for i in mem:\n if i in page_loads:\n if page_loads.index(i) > max_page_index:\n max_page_index = page_loads.index(i)\n to_delete = i\n else:\n to_delete = i\n mem[mem.index(to_delete)] = page_num\n #print(MEM['opt'])\n #input()\n\n\nif __name__ == '__main__':\n d = 0\n for m in [3, 5, 10, 20, 30, 50, 100]:\n k = m * (2 + d)\n d += 1\n memory = MEM(m)\n pages = PAGES(k)\n page_loads = generate_page_loads(pages, 1000)\n i = 0\n #print(page_loads)\n for next in page_loads:\n load_page(memory, next, fifo)\n load_page(memory, next, rand)\n load_page(memory, next, lru)\n load_page(memory, next, approx_lru)\n i += 1\n load_page(memory, next, opt, page_loads=page_loads[i:])\n print (\"FIFO(%d, %d): %d errors\" % (m, k, PAGE_ERRORS['fifo']))\n print (\"RAND(%d, %d): %d errors\" % (m, k, PAGE_ERRORS['rand']))\n print (\"LRU(%d, %d): %d errors\" % (m, k, PAGE_ERRORS['lru']))\n print (\"APPROX LRU(%d, %d): %d errors\" % (m, k, PAGE_ERRORS['approx_lru']))\n print (\"OPT(%d, %d): %d errors\" % (m, k, PAGE_ERRORS['opt']))\n print (\"*********************************************\")\n for k, v in PAGE_ERRORS.items():\n PAGE_ERRORS[k] = 0\n","sub_path":"zad3/zad3.py","file_name":"zad3.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89501075","text":"import os\nimport unittest\nimport sys\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\n\n\"\"\"\nUnit tests for the QT GUI\n=========================\n\nIn order to run the tests, first install SasView and sasmodels to site-packages\nby running ``python setup.py install`` in both repositories.\n\nThe tests can be run with ``python GUITests.py``, or\n``python GUITests.py suiteName1 suiteName2 ...`` for a subset of tests.\n\nTo get more verbose console output (recommended), use ``python GUITests.py -v``\n\"\"\"\n\n# List of all suite names. Every time a new suite is added, its name should\n# also be added here\nALL_SUITES = [\n 'calculatorsSuite',\n 'mainSuite',\n 'fittingSuite',\n 'plottingSuite',\n 'utilitiesSuite',\n 'corfuncPerspectiveSuite',\n 'invariantPerspectiveSuite',\n 'inversionPerspectiveSuite',\n ]\n\n# Prepare the general QApplication instance\napp = QtWidgets.QApplication(sys.argv)\n\n# Main Window\nfrom MainWindow.UnitTesting import AboutBoxTest\nfrom MainWindow.UnitTesting import DataExplorerTest\nfrom MainWindow.UnitTesting import WelcomePanelTest\nfrom MainWindow.UnitTesting import DroppableDataLoadWidgetTest\nfrom MainWindow.UnitTesting import GuiManagerTest\nfrom MainWindow.UnitTesting import MainWindowTest\n\n## Plotting\nfrom Plotting.UnitTesting import AddTextTest\nfrom Plotting.UnitTesting import PlotHelperTest\nfrom Plotting.UnitTesting import WindowTitleTest\nfrom Plotting.UnitTesting import ScalePropertiesTest\nfrom Plotting.UnitTesting import SetGraphRangeTest\nfrom Plotting.UnitTesting import LinearFitTest\nfrom Plotting.UnitTesting import PlotPropertiesTest\nfrom Plotting.UnitTesting import PlotUtilitiesTest\nfrom Plotting.UnitTesting import ColorMapTest\nfrom Plotting.UnitTesting import BoxSumTest\nfrom Plotting.UnitTesting import SlicerModelTest\nfrom Plotting.UnitTesting import SlicerParametersTest\nfrom Plotting.UnitTesting import PlotterBaseTest\nfrom Plotting.UnitTesting import PlotterTest\nfrom Plotting.UnitTesting import Plotter2DTest\n\n# Calculators\nfrom Calculators.UnitTesting import KiessigCalculatorTest\nfrom Calculators.UnitTesting import DensityCalculatorTest\nfrom Calculators.UnitTesting import GenericScatteringCalculatorTest\nfrom Calculators.UnitTesting import SLDCalculatorTest\nfrom Calculators.UnitTesting import SlitSizeCalculatorTest\nfrom Calculators.UnitTesting import ResolutionCalculatorPanelTest\nfrom Calculators.UnitTesting import DataOperationUtilityTest\n\n# Utilities\nfrom Utilities.UnitTesting import GuiUtilsTest\nfrom Utilities.UnitTesting import SasviewLoggerTest\nfrom Utilities.UnitTesting import GridPanelTest\nfrom Utilities.UnitTesting import ModelEditorTest\nfrom Utilities.UnitTesting import PluginDefinitionTest\nfrom Utilities.UnitTesting import TabbedModelEditorTest\nfrom Utilities.UnitTesting import AddMultEditorTest\nfrom Utilities.UnitTesting import ReportDialogTest\nfrom Utilities.UnitTesting import FileConverterTest\n\n# Unit Testing\nfrom UnitTesting import TestUtilsTest\n\n# Perspectives\n# Fitting\nfrom Perspectives.Fitting.UnitTesting import FittingWidgetTest\nfrom Perspectives.Fitting.UnitTesting import FittingPerspectiveTest\nfrom Perspectives.Fitting.UnitTesting import FittingLogicTest\nfrom Perspectives.Fitting.UnitTesting import FittingUtilitiesTest\nfrom Perspectives.Fitting.UnitTesting import FitPageTest\nfrom Perspectives.Fitting.UnitTesting import FittingOptionsTest\nfrom Perspectives.Fitting.UnitTesting import MultiConstraintTest\nfrom Perspectives.Fitting.UnitTesting import ComplexConstraintTest\nfrom Perspectives.Fitting.UnitTesting import ConstraintWidgetTest\n\n# Invariant\nfrom Perspectives.Invariant.UnitTesting import InvariantPerspectiveTest\nfrom Perspectives.Invariant.UnitTesting import InvariantDetailsTest\n\n# Inversion\nfrom Perspectives.Inversion.UnitTesting import InversionPerspectiveTest\n\n# Corfunc\nfrom Perspectives.Corfunc.UnitTesting import CorfuncTest\n\ndef plottingSuite():\n suites = (\n # Plotting\n unittest.makeSuite(Plotter2DTest.Plotter2DTest, 'test'),\n unittest.makeSuite(PlotHelperTest.PlotHelperTest, 'test'),\n unittest.makeSuite(AddTextTest.AddTextTest, 'test'),\n unittest.makeSuite(WindowTitleTest.WindowTitleTest, 'test'),\n unittest.makeSuite(ScalePropertiesTest.ScalePropertiesTest, 'test'),\n unittest.makeSuite(SetGraphRangeTest.SetGraphRangeTest, 'test'),\n unittest.makeSuite(LinearFitTest.LinearFitTest, 'test'),\n unittest.makeSuite(PlotPropertiesTest.PlotPropertiesTest, 'test'),\n unittest.makeSuite(PlotUtilitiesTest.PlotUtilitiesTest, 'test'),\n unittest.makeSuite(ColorMapTest.ColorMapTest, 'test'),\n unittest.makeSuite(BoxSumTest.BoxSumTest, 'test'),\n unittest.makeSuite(SlicerModelTest.SlicerModelTest, 'test'),\n unittest.makeSuite(SlicerParametersTest.SlicerParametersTest, 'test'),\n unittest.makeSuite(PlotterBaseTest.PlotterBaseTest, 'test'),\n unittest.makeSuite(PlotterTest.PlotterTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef mainSuite():\n suites = (\n # Main window\n unittest.makeSuite(DataExplorerTest.DataExplorerTest, 'test'),\n unittest.makeSuite(DroppableDataLoadWidgetTest.DroppableDataLoadWidgetTest, 'test'),\n unittest.makeSuite(MainWindowTest.MainWindowTest, 'test'),\n unittest.makeSuite(GuiManagerTest.GuiManagerTest, 'test'),\n unittest.makeSuite(AboutBoxTest.AboutBoxTest, 'test'),\n unittest.makeSuite(WelcomePanelTest.WelcomePanelTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef utilitiesSuite():\n suites = (\n ## Utilities\n unittest.makeSuite(TestUtilsTest.TestUtilsTest, 'test'),\n unittest.makeSuite(SasviewLoggerTest.SasviewLoggerTest, 'test'),\n unittest.makeSuite(GuiUtilsTest.GuiUtilsTest, 'test'),\n unittest.makeSuite(GuiUtilsTest.DoubleValidatorTest, 'test'),\n unittest.makeSuite(GuiUtilsTest.HashableStandardItemTest, 'test'),\n unittest.makeSuite(GridPanelTest.BatchOutputPanelTest, 'test'),\n unittest.makeSuite(ModelEditorTest.ModelEditorTest, 'test'),\n unittest.makeSuite(PluginDefinitionTest.PluginDefinitionTest, 'test'),\n unittest.makeSuite(TabbedModelEditorTest.TabbedModelEditorTest,'test'),\n unittest.makeSuite(AddMultEditorTest.AddMultEditorTest, 'test'),\n unittest.makeSuite(ReportDialogTest.ReportDialogTest, 'test'),\n unittest.makeSuite(FileConverterTest.FileConverterTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef calculatorsSuite():\n suites = (\n # Calculators\n unittest.makeSuite(KiessigCalculatorTest.KiessigCalculatorTest, 'test'),\n unittest.makeSuite(DensityCalculatorTest.DensityCalculatorTest, 'test'),\n unittest.makeSuite(GenericScatteringCalculatorTest.GenericScatteringCalculatorTest, 'test'),\n unittest.makeSuite(SLDCalculatorTest.SLDCalculatorTest, 'test'),\n unittest.makeSuite(SlitSizeCalculatorTest.SlitSizeCalculatorTest, 'test'),\n unittest.makeSuite(ResolutionCalculatorPanelTest.ResolutionCalculatorPanelTest, 'test'),\n unittest.makeSuite(DataOperationUtilityTest.DataOperationUtilityTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef fittingSuite():\n suites = (\n # Perspectives\n # Fitting\n unittest.makeSuite(FittingPerspectiveTest.FittingPerspectiveTest, 'test'),\n unittest.makeSuite(FittingWidgetTest.FittingWidgetTest, 'test'),\n unittest.makeSuite(FittingLogicTest.FittingLogicTest, 'test'),\n unittest.makeSuite(FittingUtilitiesTest.FittingUtilitiesTest, 'test'),\n unittest.makeSuite(FitPageTest.FitPageTest, 'test'),\n unittest.makeSuite(FittingOptionsTest.FittingOptionsTest, 'test'),\n unittest.makeSuite(MultiConstraintTest.MultiConstraintTest, 'test'),\n unittest.makeSuite(ConstraintWidgetTest.ConstraintWidgetTest, 'test'),\n unittest.makeSuite(ComplexConstraintTest.ComplexConstraintTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef perspectivesSuite():\n suites = (\n # Invariant\n unittest.makeSuite(InvariantPerspectiveTest.InvariantPerspectiveTest, 'test'),\n unittest.makeSuite(InvariantDetailsTest.InvariantDetailsTest, 'test'),\n # Inversion\n unittest.makeSuite(InversionPerspectiveTest.InversionTest, 'test'),\n # Corfunc\n unittest.makeSuite(CorfuncTest.CorfuncTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef invariantPerspectiveSuite():\n suites = (\n # Invariant only\n unittest.makeSuite(InvariantPerspectiveTest.InvariantPerspectiveTest, 'test'),\n unittest.makeSuite(InvariantDetailsTest.InvariantDetailsTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef corfuncPerspectiveSuite():\n suites = (\n # Corfunc only\n unittest.makeSuite(CorfuncTest.CorfuncTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\ndef inversionPerspectiveSuite():\n suites = (\n # Inversion only\n unittest.makeSuite(InversionPerspectiveTest.InversionTest, 'test'),\n )\n return unittest.TestSuite(suites)\n\nif __name__ == \"__main__\":\n\n user_suites = ALL_SUITES\n # Check if user asked for specific suites:\n if len(sys.argv) > 1:\n user_suites = sys.argv[1:]\n errors = {}\n for suite in user_suites:\n # create the suite object from name\n try:\n\n suite_instance = globals()[suite]()\n result=unittest.TextTestResult(sys.stdout,True,True)\n print(\"\\nRunning %d test cases for %s\"%(suite_instance.countTestCases(), suite))\n result.buffer=True\n suite_instance.run(result)\n\n if not result.wasSuccessful():\n if len(result.errors) or len(result.failures):\n errors[suite] = (result.errors, result.failures)\n if len(result.errors):\n print(\"\\n============ Errors disovered ===================\")\n if len(result.failures):\n print(\"\\n============ Failures disovered =================\")\n else:\n print(\"\\nAll tests successful\")\n\n except KeyError as ex:\n print(\"Failure : %s \"%str(ex))\n print(\"ERROR: Incorrect suite name: %s \" % suite)\n pass\n\n if len(errors.keys())>0:\n for suite, errors in errors.items():\n for r in errors[0]:\n print(\"\\nSuite: %s had following errors:\\n %s : %s\"%(suite, r[0], r[1]))\n for r in errors[1]:\n print(\"\\nSuite: %s had following failures:\\n %s : %s\"%(suite, r[0], r[1]))\n print(\"=================================================\")\n print(\"Exiting with error\")\n os._exit(1)\n","sub_path":"src/sas/qtgui/GUITests.py","file_name":"GUITests.py","file_ext":"py","file_size_in_byte":11112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"426527105","text":"\n\nfrom xai.brain.wordbase.verbs._bunt import _BUNT\n\n#calss header\nclass _BUNTS(_BUNT, ):\n\tdef __init__(self,): \n\t\t_BUNT.__init__(self)\n\t\tself.name = \"BUNTS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"bunt\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_bunts.py","file_name":"_bunts.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588662545","text":"class Iterable_tool:\n @staticmethod #不接收实例变量和类变量\n def find_all(iterable, condition):\n for number in iterable:\n if not condition(number):\n # print(number._dict_)\n yield number\n @staticmethod\n def find_single(iterable,condition,number):\n for item in iterable:\n if condition(item,number):\n yield item\n\n @staticmethod\n def get_count(iterable,condition,number):\n count = 0\n for item in iterable:\n if condition(item,number): #与参数构造调用函数\n count += 1\n return number,count","sub_path":"fancy_month01/day15_fancy/day15_note/common/iterable_tool.py","file_name":"iterable_tool.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"366829657","text":"titles = list()\r\n\r\n# read titles from file\r\n\r\nfilename = 'titles.txt'\r\nwith open (filename) as fin:\r\n for line in fin:\r\n titles.append(line.strip())\r\n\r\n\r\n# sort titles\r\n\r\ntitles.sort(key=len, reverse=True)\r\nprint(titles)\r\n\r\n# write sorted titles to file\r\n\r\nfilename = 'Descending Characters.txt'\r\nwith open(filename, 'w') as fout:\r\n for title in titles:\r\n fout.write(title + '\\n') ","sub_path":"Dots/Scripts/Descending Characters.py","file_name":"Descending Characters.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"609189461","text":"import telegram\nfrom database import push_note, get_note\nimport modules.extract as extract\n\nlock = 0\nmsg_filter = 0\nfilter_text = {}\nnotes_data = {}\n\n\ndef note_check(update, context):\n global notes_data\n res = update.message.text\n try:\n if res.startswith(\"#\") == True:\n\n chat_id = str(update.effective_chat.id)\n text = \"\"\n\n shrt = res[1:]\n\n chat_idd = chat_id[1:]\n n = get_note(chat_id=chat_idd, note_name=shrt)\n if n != -1:\n text = str(n[0])\n update.message.reply_text(text, disable_web_page_preview=True)\n return\n else:\n return\n except:\n return\n\n\ndef notes(update, context):\n global notes_data\n\n msg = update.message\n\n res = update.message.text.split(None, 3)\n chat_id = update.effective_chat.id\n\n text_1 = text_3 = text = \"\"\n\n try:\n text_1 = res[1]\n except:\n try:\n chat_idd = str(chat_id)[1:]\n user_id = str(msg.from_user.id)\n\n u = get_note(chat_id=chat_idd, all_name=1)\n\n if u == -1:\n raise\n\n text = \"Available notes -\\n\\n\"\n\n for i in u:\n text = text + \"• \" + i[0] + \"\\n\"\n text = text + \"\\nUse #notename to view the note\"\n\n update.message.reply_text(\n text=text, parse_mode=\"HTML\", disable_web_page_preview=True)\n return\n except:\n update.message.reply_text(\n text=\"Notes not available..\", parse_mode=\"HTML\", disable_web_page_preview=True)\n return\n\n try:\n text_2 = res[2]\n except:\n text_2 = \"\"\n\n m = extract.sudocheck(update, context)\n if m == 2:\n return\n # ss\n if text_1 == 'remove':\n if text_2 != \"\":\n chat_idd = str(chat_id)[1:]\n user_id = str(msg.from_user.id)\n\n u = push_note(chat_id=chat_idd, note_name=text_2, pop=1)\n if u == 1:\n text = \"Note #\" + text_2 + \" deleted !\"\n elif u == -2:\n text = \"Noet #\" + text_2 + \" does not exist !\"\n else:\n text = \"Error !\"\n\n elif text_1 == 'set':\n try:\n text_2 = res[2]\n except:\n update.message.reply_text(\"Note Name & Content not provided !\")\n return\n try:\n text_3 = res[3]\n except:\n update.message.reply_text(\"Note content not provided !\")\n return\n\n chat_idd = str(chat_id)\n chat_idd = chat_idd[1:]\n user_id = str(msg.from_user.id)\n\n push_note(chat_id=chat_idd, note_name=text_2,\n note=text_3, set_by=user_id)\n text = 'Note #' + str(text_2) + ' - \\n\"' + str(text_3) + '\"'\n\n else:\n text = \"Wrong format !\"\n\n update.message.reply_text(text)\n","sub_path":"modules/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188018220","text":"'''\nGiven a collection of distinct integers, return all possible permutations(排列).\n\nExample:\nInput: [1,2,3]\nOutput:\n[\n [1,2,3],\n [1,3,2],\n [2,1,3],\n [2,3,1],\n [3,1,2],\n [3,2,1]\n]\n'''\n\nclass Solution:\n # 耶!一次过!\n # 97.59%\n def permute1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if len(nums) == 0: return []\n if len(nums) == 1: return [nums]\n if len(nums) == 2: return [[nums[0], nums[1]],[nums[1], nums[0]]]\n res = []\n for i in range(len(nums)):\n cur = nums[i]\n remain = nums.copy()\n remain.remove(cur)\n lastRes = self.permute(remain)\n for r in lastRes:\n res.append([cur]+r)\n\n return list(set(res))\n\n # 99.81%\n def permute2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n from itertools import permutations\n return list(permutations(nums, len(nums)))\n\n # 97.59%\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if not nums:\n return []\n\n nums.sort()\n res = [nums[:]]\n n = len(nums)\n i = n - 1\n # 其实就是排序完求全排列\n '''\n 1.首先从最尾端开始往前寻找两个相邻元素,令第一元素为i-1,第二元素为i,且满足i-1 0:\n if nums[i - 1] < nums[i]: # 从后往前循环,找到第一个相邻的递增的位置,i是右边大的那一个,i-1则是左边较小的那个数\n j = n - 1\n while nums[j] < nums[i - 1]: # 再找到从后向前找到要交换的位置\n j -= 1\n nums[i - 1], nums[j] = nums[j], nums[i - 1]\n nums[i:] = sorted(nums[i:]) # 把之后的序列排序\n res.append(nums[:])\n i = n - 1\n else:\n i -= 1\n\n return res\n\nso = Solution()\nnums = [1,2,3,4,5]\nprint(so.permute(nums))\n\n","sub_path":"Algorithm01-50/46_Permutations.py","file_name":"46_Permutations.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468387605","text":"from p21 import divisors\n\ndef abundant(n):\n return n < sum(divisors(n))\n\nlimit = 28123 \nabundants = filter(abundant, range(1, limit+1))\n\ndef sumsOfAbundants():\n sums = [a+b for a in abundants\n for b in abundants\n if (b >= a) and (b <= limit - a)]\n return list(set(sums))\n \ndef solution():\n s = sum(range(1, limit+1))\n t = sum(sumsOfAbundants())\n return s - t\n\nif __name__ == \"__main__\":\n print(solution())\n \n","sub_path":"src/p23.py","file_name":"p23.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"10401920","text":"\"\"\"\nSingleton class containing all known personality traits/attributes.\n\"\"\"\n\n\nclass Personality(object):\n def __init__(self):\n \"\"\"\n Describes the five traits of the big five personality model.\n A value of 0 denotes e.g. a very introverted person, a value of 100 e.g. a very extroverted person.\n \"\"\"\n self.openness = 0\n self.conscientiousness = 0\n self.extraversion = 0\n self.agreeableness = 0\n self.neuroticism = 0\n self.dict_all_traits = {\"openness\": self.openness, \"conscientiousness\": self.conscientiousness,\n \"extraversion\": self.extraversion, \"agreeableness\": self.agreeableness,\n \"neuroticism\": self.neuroticism}\n\n # Saves the amount of changes/updates that were made to a trait.\n # This is a measure for how sure we are of that trait's prediction.\n self.changes_made = {\"openness\": 0, \"conscientiousness\": 0, \"extraversion\": 0, \"agreeableness\": 0,\n \"neuroticism\": 0}\n\n def update_dict(self):\n self.dict_all_traits = {\"openness\": self.openness, \"conscientiousness\": self.conscientiousness,\n \"extraversion\": self.extraversion, \"agreeableness\": self.agreeableness,\n \"neuroticism\": self.neuroticism}\n\n","sub_path":"PersonalInformationCollection/Personality.py","file_name":"Personality.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"12100188","text":"#!/usr/bin/env python3\n\n# 2020暑期排位5K_生日谜题\n# https://codeforces.com/group/H9K9zY8tcT/contest/286081/problem/K\n# bit-operation? 不适合python? 也能做\n# 还能用dfs来做!?\n\nt = input()\nl = list(map(int,input().split())) \n\nnob = 20 #as ai<10^5\nn = len(l)\ncounts = [sum([(1< 313 h-1 Mpc z = 0.087 -> 257 h-1 Mpc\n\n\n################################################################################\n#\n# OPEN FILES\n#\n################################################################################\n\n\ninfile = Table.read(in_filename, format='ascii.commented_header')\nmaskfile = Table.read(mask_filename, format='ascii.commented_header')\n\n\n################################################################################\n#\n# FILTER GALAXIES\n#\n################################################################################\n\n\n'''\ncoord_min_table, mask, ngrid = filter_galaxies(infile, maskfile, min_dist, max_dist, survey_name, True)\n\ntemp_outfile = open(\"filter_galaxies_output.pickle\", 'wb')\npickle.dump((coord_min_table, mask, ngrid), temp_outfile)\ntemp_outfile.close()\n'''\n\n\n\n################################################################################\n#\n# FIND VOIDS\n#\n################################################################################\n\n\ntemp_infile = open(\"filter_galaxies_output.pickle\", 'rb')\ncoord_min_table, mask, ngrid = pickle.load(temp_infile)\ntemp_infile.close()\n\n\nfind_voids(ngrid, min_dist, max_dist, coord_min_table, mask, out1_filename, out2_filename, survey_name)\n","sub_path":"SDSS_VoidFinder.py","file_name":"SDSS_VoidFinder.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526733096","text":"from math import sqrt\nfor n in range(1, 60):\n r_org = 2.0\n r = r_org\n for i in range(n):\n r = sqrt(r)\n for i in range(n):\n r = r ** 2\n print ('With {} times sqrt and then {} times **2, the number {} becomes: {:.16f}'.format(n,n,r_org,r))\n\n\n '''\n What this code is doing is take the square root of two and then squaring is again. As it does this, it goes through a for loop square rooting and resquaring up to the specified number of times.\n However, the more times we square, the smaller the round error gets and that's why resquaring doesn't arrive back at our original value.\n '''\n","sub_path":"homework/3/round_off_errors.py","file_name":"round_off_errors.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"350435438","text":"import sys\nD = {}\ninFile = open(sys.argv[1])\nouFile = open(sys.argv[1]+'.stop','w')\nouFile2 = open(sys.argv[1]+'.stop2','w')\nouFile3 = open(sys.argv[1]+'.not.stop','w')\nfor line in inFile:\n fields = line.split(',')\n pep = fields[2]\n if pep[-1]=='K' or pep[-1]=='R':\n uniprot=fields[9].split('|')[1]\n ouFile3.write(pep+'\\t'+uniprot+'\\n')\n elif pep==' Peptide':\n D.setdefault(fields[2], 0)\n D[fields[2]]+=1\ninFile.close()\n\n'''\ndef protein():\n D2 = {}\n inFile = open('/netshare1/home1/people/hansun/StopGainProteomics/2.uniprot/human_uniprot_sprot.fa')\n while True:\n line1 = inFile.readline().strip()\n line2 = inFile.readline().strip()\n if line1:\n fields = line1.split('|')\n name = fields[1]\n D2.setdefault(line2,[])\n D2[line2].append(name)\n else:\n break\n inFile.close()\n return D2\n\nD2=protein()\n\nfor k in D:\n ouFile.write(k+'\\t'+str(D[k])+'\\n')\n ouFile2.write(k+'\\t')\n for x in D2:\n if x[-len(k):]==k:\n ouFile2.write('\\t'.join(D2[x])+'\\t')\n ouFile2.write('\\n')\nouFile2.close()\n'''\n","sub_path":"Project/StopGainProteomics/8.omssa/1-stop.py","file_name":"1-stop.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"368101664","text":"# Program de citit ziare online\n# de pe mai multe surse precum\n# Adevarul\n# Program realizat de catre:\n# Olariu Alexandru-Razvan\n# Contact: ufcolonel@gmail.com\n\n\nimport Crawler,Scraper\nfrom datetime import datetime\nfrom tkinter import *\nfrom tkinter import ttk\n\n\n\ndef save_status():\n global alegeri\n global interest\n alegeri = [option1.get(),option2.get(),option3.get(),\n option4.get(),option5.get(),option6.get(),\n option7.get(),option8.get(),option9.get()]\n interest = [search.get()]\n \ndef check():\n for source in range(len(alegeri)):\n https = alegeri[source]\n if https.startswith(\"https://\"):\n try:\n https = Crawler.crawl(https,interest)\n except Exception as error:\n with open(\"Reports.txt\",\"w\") as f:\n f.write(str(error))\n finally:\n interest = Scraper.scrape(https)\n\n\ndef save():\n alegeri = [option1.get(),option2.get(),option3.get(),\n option4.get(),option5.get(),option6.get(),\n option7.get(),option8.get(),option9.get()]\n for source in range(len(alegeri)):\n if alegeri[source].startswith(\"https://\"):\n header = f\"Date din sursa {alegeri[source]} :\\n\"\n with open(\"Salvate.txt\",\"a\") as g:\n g.write(datetime.now().strftime(\"\\t%d/%m/%Y <---> %H:%M:%S\\n\"))\n g.write(header)\n g.write(interest)\n\n \n\n# Graphical User Interface initialization - FINISHED\napp = Tk()\napp.title('Ziare online de Olariu Alexandru-Razvan')\napp.geometry('900x500')\napp.resizable(False,False)\n\n\n# Search\nttk.Label(app,text=\"Căutare după termeni:\").place(x=10,y=5)\nsearch = ttk.Entry(app)\nsearch.place(x=150,y=5,width=300,height=23)\n\n# Online News Variables\noption1, option2, option3 = StringVar(), StringVar(), StringVar()\noption4, option5, option6 = StringVar(), StringVar(), StringVar() \noption7, option8, option9 = StringVar(), StringVar(), StringVar() \n\n# Online News\n\nttk.Checkbutton(app,text=\"Ziarul Libertatea\",variable=option1,\n onvalue='https://www.libertatea.ro/',\n command=save_status).place(x=10,y=50)\nttk.Checkbutton(app,text=\"Ziarul Financiar\",variable=option2,\n onvalue='https://www.zf.ro/',\n command=save_status).place(x=10,y=100)\nttk.Checkbutton(app,text=\"Mediafax\",variable=option3,\n onvalue='https://www.mediafax.ro/',\n command=save_status).place(x=160,y=50)\nttk.Checkbutton(app,text=\"Evenimentul Zilei\",variable=option4,\n onvalue='https://evz.ro/',\n command=save_status).place(x=160,y=100)\nttk.Checkbutton(app,text=\"Ziarul Adevarul\",variable=option5,\n onvalue='https://adevarul.ro/cauta',\n command=save_status).place(x=310,y=50)\nttk.Checkbutton(app,text=\"Jurnalul Zilei\",variable=option6,\n onvalue='https://jurnalulnational.ro/',\n command=save_status).place(x=310,y=100)\nttk.Checkbutton(app,text=\"Digi24\",variable=option7,\n onvalue='https://www.digi24.ro/',\n command=save_status).place(x=470,y=50)\nttk.Checkbutton(app,text=\"Realitatea.NET\",variable=option8,\n onvalue='https://www.realitatea.net/',\n command=save_status).place(x=470,y=100)\n\n\n# Buttons\ncheck_button = ttk.Button(app,text=\"Caută\",command=check)\ncheck_button.place(x=470,y=4)\nsave_button = ttk.Button(app,text=\"Salvare\",command=save)\nsave_button.place(x=570,y=4)\n\n\n\n# Output\noutput = ttk.Notebook(app)\nziar1, ziar2, ziar3 = Text(output), Text(output), Text(output)\nziar4, ziar5, ziar6 = Text(output), Text(output), Text(output)\nziar7, ziar8, ziar9 = Text(output), Text(output), Text(output) \noutput.add(ziar1, text=\"Ziarul Libertatea\")\noutput.add(ziar2, text=\"Ziarul Financiar\")\noutput.add(ziar3, text=\"Gazeta Sporturilor\")\noutput.add(ziar4, text=\"Romania Libera\")\noutput.add(ziar5, text=\"Ziarul Adevarul\")\noutput.add(ziar6, text=\"Jurnalul Zilei\")\noutput.add(ziar7, text=\"Digi24\")\noutput.add(ziar8, text=\"Realitatea.NET\")\noutput.add(ziar9, text=\"Ajutor\")\noutput.place(x=0,y=140,width=710,height=360)\n\n\n# Progress TODO\nprogress_bar = ttk.Progressbar(app,orient=HORIZONTAL,length=185,mode='determinate')\nprogress_bar.place(x=711,y=477)\n\n# ADS\n\nad1_image = PhotoImage(file=\"ad1.png\")\nad1 = Label(app,image=ad1_image)\nad1.place(x=707,y=0)\nttk.Label(app,text=\"Reclama TA, AICI!\").place(x=765,y=231)\nad2_image = PhotoImage(file=\"ad2.png\")\nad2 = Label(app,image=ad2_image)\nad2.place(x=707,y=247)\n\n\n\napp.mainloop()","sub_path":"GUInterface.py","file_name":"GUInterface.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214647893","text":"import copy\nimport io\nimport sys\nimport warnings\nfrom typing import (\n List, Set, Dict, Any, Optional, Union, Tuple\n\n)\n\nfrom gelidum.collections import frozendict, frozenlist, frozenzet\nfrom gelidum.exceptions import FrozenException\nfrom gelidum.frozen import make_frozen_class, FrozenBase\nfrom gelidum.typing import OnFreezeFuncType, OnUpdateFuncType, T, FrozenType, FrozenList\nfrom gelidum.utils import isbuiltin\n\n\ndef freeze(\n obj: T,\n on_update: Union[str, OnUpdateFuncType] = \"exception\",\n on_freeze: Union[str, OnFreezeFuncType] = \"copy\",\n inplace: Optional[bool] = None,\n ) -> FrozenType:\n\n # inplace argument will be removed from freeze in the next major version (0.5.0)\n if isinstance(inplace, bool):\n warnings.warn(\n DeprecationWarning(\n \"Use of inplace is deprecated and will be removed in next major version (0.5.0)\"\n )\n )\n if inplace:\n on_freeze_func: OnFreezeFuncType = __on_freeze_func(on_freeze=\"inplace\")\n else:\n on_freeze_func: OnFreezeFuncType = __on_freeze_func(on_freeze=\"copy\")\n\n else:\n on_freeze_func: OnFreezeFuncType = __on_freeze_func(on_freeze=on_freeze)\n\n on_update_func: OnUpdateFuncType = __on_update_func(on_update=on_update)\n\n return __freeze(obj=obj, on_update=on_update_func, on_freeze=on_freeze_func)\n\n\ndef __freeze(obj: Any, on_update: OnUpdateFuncType,\n on_freeze: OnFreezeFuncType) -> Any:\n\n if isbuiltin(obj):\n return obj\n\n if isinstance(obj, FrozenBase):\n return obj\n\n class_name = type(obj).__name__\n freeze_func_name = f\"__freeze_{class_name}\"\n this_module = sys.modules[__name__]\n if hasattr(this_module, freeze_func_name):\n freeze_func = getattr(this_module, freeze_func_name)\n return freeze_func(obj, on_update=on_update, on_freeze=on_freeze)\n\n if isinstance(obj, object):\n return __freeze_object(obj, on_update=on_update, on_freeze=on_freeze)\n\n # Actually, this code is unreachable\n raise ValueError(f\"object of type {obj.__class__} not frozen\") # pragma: no cover\n\n\ndef __freeze_bytearray(obj: bytearray, *args, **kwargs) -> bytes: # noqa\n return bytes(obj)\n\n\ndef __freeze_dict(obj: Dict, on_update: OnUpdateFuncType,\n on_freeze: OnFreezeFuncType) -> frozendict:\n def freeze_func(item: Any) -> FrozenType:\n return freeze(item, on_update=on_update, on_freeze=on_freeze)\n return frozendict(obj, freeze_func=freeze_func)\n\n\ndef __freeze_list(obj: List, on_update: OnUpdateFuncType,\n on_freeze: OnFreezeFuncType) -> FrozenList:\n def freeze_func(item: Any) -> FrozenType:\n return freeze(item, on_update=on_update, on_freeze=on_freeze)\n return frozenlist(obj, freeze_func=freeze_func)\n\n\ndef __freeze_tuple(obj: Tuple, on_update: OnUpdateFuncType,\n on_freeze: OnFreezeFuncType) -> Tuple:\n return tuple(freeze(item, on_update=on_update, on_freeze=on_freeze)\n for item in obj)\n\n\ndef __freeze_set(obj: Set, on_update: OnUpdateFuncType,\n on_freeze: OnFreezeFuncType) -> frozenzet:\n def freeze_func(item: Any) -> FrozenType:\n return freeze(item, on_update=on_update, on_freeze=on_freeze)\n return frozenzet(obj, freeze_func=freeze_func)\n\n\ndef __freeze_TextIOWrapper(*args, **kwargs) -> None: # noqa\n raise io.UnsupportedOperation(\"Text file handlers can't be frozen\")\n\n\ndef __freeze_BufferedWriter(*args, **kwargs) -> None: # noqa\n raise io.UnsupportedOperation(\"Binary file handlers can't be frozen\")\n\n\ndef __freeze_object(obj: object, on_update: OnUpdateFuncType,\n on_freeze: OnFreezeFuncType) -> FrozenBase:\n\n if hasattr(obj.__class__, \"__slots__\"):\n raise FrozenException(\"gelidum does not support classes with __slots__\")\n\n frozen_obj = on_freeze(obj)\n for attr, value in frozen_obj.__dict__.items():\n attr_value = getattr(frozen_obj, attr)\n setattr(frozen_obj, attr, freeze(attr_value, on_update=on_update, on_freeze=on_freeze))\n\n frozen_class = make_frozen_class(\n klass=obj.__class__,\n attrs=list(obj.__dict__.keys()),\n on_update=on_update\n )\n frozen_obj.__class__ = frozen_class\n return frozen_obj\n\n\ndef __on_freeze_func(on_freeze: Union[str, OnFreezeFuncType]) -> OnFreezeFuncType:\n if isinstance(on_freeze, str):\n if on_freeze == \"copy\":\n return lambda obj: copy.deepcopy(obj)\n elif on_freeze == \"inplace\":\n return lambda obj: obj\n else:\n raise AttributeError(\n f\"Invalid value for on_freeze parameter, '{on_freeze}' found, \"\n f\"only 'copy' and 'inplace' are valid options if passed a string\"\n )\n\n elif callable(on_freeze):\n return on_freeze\n\n else:\n raise AttributeError(\n f\"Invalid value for on_freeze parameter, '{on_freeze}' found, \"\n f\"only 'copy', 'inplace' or a function are valid options\"\n )\n\n\ndef __on_update_exception(\n frozen_obj: FrozenBase, message: str, *args, **kwargs # noqa\n) -> None:\n raise FrozenException(message)\n\n\ndef __on_update_warning(\n frozen_obj: FrozenBase, message: str, *args, **kwargs # noqa\n) -> None:\n warnings.warn(message)\n\n\ndef __on_update_func(on_update: OnUpdateFuncType) -> OnUpdateFuncType:\n if isinstance(on_update, str):\n if on_update == \"exception\":\n return __on_update_exception\n elif on_update == \"warning\":\n return __on_update_warning\n elif on_update == \"nothing\":\n return lambda message, *args, **kwargs: None\n else:\n raise AttributeError(\n f\"Invalid value for on_update parameter, '{on_update}' found, \"\n f\"only 'exception', 'warning', and 'nothing' are valid options \"\n f\"if passed a string\"\n )\n\n elif callable(on_update):\n return on_update\n\n else:\n raise AttributeError(\n f\"Invalid value for on_update parameter, '{on_update}' found, \"\n f\"only 'exception', 'warning', 'nothing' or a function are \"\n f\"valid options\"\n )\n","sub_path":"gelidum/freeze.py","file_name":"freeze.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"415403875","text":"import unittest\n\nfrom pesel_generator.exception import PeselGeneratorException\nfrom pesel_generator.pesel import population_description, pesel_generator\n\n\nclass TestPesel(unittest.TestCase):\n\n def test_population_description0(self):\n try:\n with population_description.PopulationDescription() as ep:\n ep.define_population_range(age_start=24, age_stop=25, female_quantity=1200, male_quantity=2400)\n ep.define_population_range(age_start=26, age_stop=28, female_quantity=1200, male_quantity=2400)\n ep.define_population_range(age_start=25, age_stop=26, female_quantity=1200, male_quantity=2400)\n except PeselGeneratorException:\n self.fail()\n\n def test_population_description1(self):\n with self.assertRaises(Exception):\n with population_description.PopulationDescription() as ep:\n ep.define_population_range(age_start=24, age_stop=25, female_quantity=1200, male_quantity=2400)\n ep.define_population_range(age_start=26, age_stop=28, female_quantity=1200, male_quantity=2400)\n ep.define_population_range(age_start=25, age_stop=27, female_quantity=1200, male_quantity=2400)\n\n def test_pesel_generator0(self):\n try:\n with population_description.PopulationDescription() as ep:\n ep.define_population_range(age_start=24, age_stop=25, female_quantity=1200, male_quantity=2400)\n ep.define_population_range(age_start=26, age_stop=28, female_quantity=1200, male_quantity=2400)\n with pesel_generator.PeselGenerator() as pg:\n _ = pg.gen_based_on_desc(ep)\n\n except PeselGeneratorException:\n self.fail()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pesel_generator/tests/pesel_generator_tests.py","file_name":"pesel_generator_tests.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"653692996","text":"# -*- coding: utf-8 -*-\n\"\"\"\n功能:本程序旨在实验出少女前线重装部队芯片强化的最优情况\n作者:史学超\n更新日期:2018.10.23\n现在已经实现了一个简单的模型,需要的是不断额往芯片池中添加芯片\n代码变得更简洁,生成的方案更多,也更加容易出现重复的地方\n\"\"\"\n# TODO(sxc): 添加图像化界面\n# TODO(sxc): 芯片数据要能够附带属性\n# TODO(sxc): 重复结果的删除和结果的评分\n\nimport copy\n\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def clear(self):\n del self.items[:]\n\n def empty(self):\n return self.size() == 0\n\n def size(self):\n return len(self.items)\n\n def top(self):\n return self.items[self.size() - 1]\n\n # this is my add\n def show(self):\n i = 1\n for x in self.items:\n print('this is stack no ', i)\n show_map(x)\n i = i + 1\n\n def rest(self):\n tmp = ''\n for x in self.items:\n tmp = tmp + str(x)\n return tmp\n\n\nmap_stack = Stack() # 用来记录安装芯片后的地图信息\nchip_stack = Stack() # 用来记录安装的芯片在芯片池中的起始序号\nChipList33 = [(0, 0), (0, 1), (0, 2),\n (1, 0), (1, 1), (1, 2)]\nChipList222 = [(0, 0), (0, 1),\n (1, 0), (1, 1),\n (2, 0), (2, 1)]\nChipList6 = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]\nMapLList = [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]\nChipPoolLists = [ChipList33, ChipList222, ChipList6]\nresultList = []\n\n\n# 用于在屏幕上输出map的图像结果\ndef show_map(ml):\n _MapLList = ml\n _resStr = 'MapInfo:\\n'\n _MapWidth = len(MapLList)\n for x in range(_MapWidth):\n for y in range(_MapWidth):\n if _MapLList[x][y] == -1:\n _resStr = _resStr + 'x' + ' '\n else:\n _resStr = _resStr + str(_MapLList[x][y]) + ' '\n _resStr = _resStr + '\\n'\n print(_resStr)\n\n\n# 传入的_chip是上一次插入的芯片序号\n# 返回0的时候是按预设情况正常结束,返回1则异常结束\ndef install_chip(_map, _chip=-1):\n # 用于中断递归的条件\n if map_stack.empty() and _chip == (len(ChipPoolLists) - 1):\n print('install_chip is completed')\n return\n if _chip != (len(ChipPoolLists) - 1):\n chip_start = _chip + 1 # 记录本次芯片池中插入芯片的起始序号\n maplist = copy.deepcopy(_map)\n _MapWidth = len(maplist)\n for x in range(_MapWidth):\n for y in range(_MapWidth):\n # 这边得到的xy是可插入点坐标,下面的Z是芯片池中的序号\n if maplist[x][y] == 0:\n for z in range(chip_start, len(ChipPoolLists)):\n chiplist = ChipPoolLists[z]\n if insertable(x, y, chiplist, maplist):\n no = map_stack.size() + 1 # 插入位置的数值,用来区分插入的是哪一块芯片\n maplist = inserted(x, y, chiplist, no, maplist)\n chip_start = 0\n # show_map(maplist)\n map_stack.push(maplist)\n chip_stack.push(z)\n # print('==============================')\n break\n else:\n continue\n if isfulll(maplist):\n resultList.append(maplist)\n show_map(maplist)\n print(\"+++有一个方案已经完成+++\")\n map_stack.pop()\n tempc1 = chip_stack.pop()\n if map_stack.empty():\n tempm1 = copy.deepcopy(MapLList)\n else:\n tempm1 = map_stack.top()\n install_chip(tempm1, tempc1)\n return\n\n\ndef insertable(x, y, _chiplist, _map):\n maplist = _map\n for g in _chiplist:\n xp, yp = g\n try:\n gno = maplist[x + xp][y + yp]\n except IndexError:\n return False\n finally:\n pass\n if gno != 0:\n return False\n return True\n\n\n# xy表示插入起始点的坐标,_chiplist表示插入芯片的类型,\n# _no表示插入的数值, _map表示被插入的map信息\n# 返回插入后的map\ndef inserted(x, y, _chiplist, _no, _map):\n maplist = copy.deepcopy(_map)\n for g in _chiplist:\n xp, yp = g\n maplist[x + xp][y + yp] = _no\n # print(_chiplist, 'is inserted')\n return maplist\n\n\ndef rotate90(_chiplist, _time=0):\n list_ = []\n if _time == 0:\n list_ = _chiplist\n return list_\n else:\n for t in range(_time):\n list_ = []\n for item in _chiplist:\n x, y = item\n list_.append((-y, x))\n return list_\n\n\n# 返回False为不满,代表未完全插入;返回True为满,已经完全插入.\ndef isfulll(_map):\n maplist = copy.deepcopy(_map)\n _MapWidth = len(maplist)\n for x in range(_MapWidth):\n for y in range(_MapWidth):\n if _map[x][y] == 0:\n return False\n return True\n\n\ndef writefile(_aimlist):\n with open('./resultList.txt', 'w') as f:\n for x in _aimlist:\n f.write(str(x) + '\\n==========\\n')\n print('已经成功写入文件到', 'resultList.txt')\n\n\nif __name__ == \"__main__\":\n # map_stack.push(copy.deepcopy(MapLList))\n # install_chip(MapLList, -1)\n # writefile(resultList)\n print('总共有', len(resultList), '个结果')\n print('the process is end')\n","sub_path":"GFHC0.py","file_name":"GFHC0.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153364219","text":"# 学习使用lambda函数\n# 原定义的函数\n\"\"\"\ndef true():\n return True\ntrue()\n\"\"\"\n\n# 进一步简化,两行写为一行\n\"\"\"\ndef true():return True\ntrue()\n\"\"\"\n# 如果把函数名也简化了,那就是用lambda\n\"\"\"\ndef true():return True\nlambda : True\n\"\"\"\n\"\"\"\nlambda函数:\ndef add(x,y):\n return x+y\nadd(3,5)\n8\nlambda x,y: x+y\n at 0x00000296DD590BF8>\n\"\"\"\n# 一些示例\n\"\"\"\nlambda x:x<=(month,day)\n# 转化成的函数\ndef func1(x):\n return x<=(month,day)\n\nlambda item:item[1]\ndef func2(item):\n return item[1]\n\"\"\"\n\n\n# func2展示\ndef func2(item):\n print(item[1])\n return item[1]\n\n\nadict = {'a': 'aa', 'b': 'bb'}\nfor i in adict.items():\n func2(i)\n","sub_path":"Section4/lesson26/lesson26.py","file_name":"lesson26.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"43860304","text":"import json\nfrom pillarsdk import utils\nfrom pillarsdk.users import User\nfrom pillarsdk.nodes import Node\nfrom pillarsdk.nodes import NodeType\nfrom pillarsdk.tokens import Token\n\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import flash\nfrom flask import session\nfrom flask import redirect\nfrom flask import request\nfrom flask import url_for\n\nfrom application.modules.users.forms import UserLoginForm\nfrom application.modules.users.forms import UserProfileForm\nfrom application.helpers import Pagination\n\nfrom application import SystemUtility\nfrom application import userClass\n\nfrom flask.ext.login import login_user\nfrom flask.ext.login import logout_user\nfrom flask.ext.login import current_user\nfrom flask.ext.login import login_required\n\n\n# Name of the Blueprint\nusers = Blueprint('users', __name__)\n\n\ndef authenticate(username, password):\n import requests\n import socket\n payload = dict(\n username=username,\n password=password,\n hostname=socket.gethostname())\n try:\n r = requests.post(\"{0}/u/identify\".format(\n SystemUtility.blender_id_endpoint()), data=payload)\n except requests.exceptions.ConnectionError as e:\n raise e\n\n if r.status_code == 200:\n response = r.json()\n else:\n response = None\n return response\n\n\n@users.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n form = UserLoginForm()\n if form.validate_on_submit():\n auth = authenticate(form.email.data, form.password.data)\n if auth and auth['status'] == 'success':\n user = userClass(auth['data']['token'])\n login_user(user)\n flash('Welcome {0}!'.format(form.email.data))\n return redirect('/')\n elif auth:\n flash('{0}'.format(auth['data']))\n return redirect('/')\n return render_template('users/login.html', form=form)\n\n\n@users.route(\"/logout\")\ndef logout():\n logout_user()\n flash('Successfully logged out')\n return redirect('/')\n\n\n@users.route(\"/profile\", methods=['GET', 'POST'])\n@login_required\ndef profile():\n \"\"\"Profile view and edit page. This is a temporary implementation.\n \"\"\"\n api = SystemUtility.attract_api()\n user = User.find(current_user.objectid, api=api)\n\n form = UserProfileForm(\n first_name = user.first_name,\n last_name = user.last_name)\n\n if form.validate_on_submit():\n user.first_name = form.first_name.data\n user.last_name = form.last_name.data\n user.update(api=api)\n flash(\"Profile updated\")\n\n return render_template('users/profile.html',\n form=form)\n\n\ndef type_names():\n api = SystemUtility.attract_api()\n\n types = NodeType.all(api=api)[\"_items\"]\n type_names = []\n for names in types:\n type_names.append(str(names['name']))\n return type_names\n\n\n@users.route(\"/tasks\", methods=['GET', 'POST'])\n@login_required\ndef tasks():\n \"\"\"User-assigned tasks\"\"\"\n # Pagination index\n page = request.args.get('page', 1)\n max_results = 50\n\n api = SystemUtility.attract_api()\n node_type_list = NodeType.all({'where': \"name=='task'\"}, api=api)\n\n if len(node_type_list['_items']) == 0:\n return \"Empty NodeType list\", 200\n\n node_type = node_type_list._items[0]\n\n tasks = Node.all({\n 'where': '{\"node_type\" : \"%s\", \"properties.owners.users\": {\"$in\": [\"%s\"]}}'\\\n % (node_type['_id'], current_user.objectid),\n 'max_results': max_results,\n 'page': page,\n 'embedded': '{\"parent\":1, \"picture\":1}',\n 'sort' : \"order\"}, api=api)\n\n # Build the pagination object\n # pagination = Pagination(int(page), max_results, tasks._meta.total)\n\n tasks_datatable = []\n for task in tasks._items:\n cut_in = 0\n cut_out = 0\n if task.parent.properties.cut_in:\n cut_in = task.parent.properties.cut_in\n if task.parent.properties.cut_out:\n cut_out = task.parent.properties.cut_out\n data = {\n 'DT_RowId': \"row_{0}\".format(task._id),\n '_id': task._id,\n 'order': task.order,\n 'picture': None,\n 'name': task.name,\n 'timing': {\n 'cut_in': task.parent.properties.cut_in,\n 'cut_out': task.parent.properties.cut_out,\n },\n 'parent': task.parent.to_dict(),\n 'description': task.description,\n 'url_view': url_for('nodes.view', node_id=task._id),\n 'url_edit': url_for('nodes.edit', node_id=task._id, embed=1),\n 'status': task.properties.status,\n }\n\n tasks_datatable.append(data)\n\n return render_template(\n 'users/tasks.html',\n title=\"task\",\n tasks_data=json.dumps(tasks_datatable),\n node_type=node_type)\n","sub_path":"pillar-web/application/modules/users/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"404044723","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport glob\nimport threading\nfrom PyQt4 import Qt\n\nsys.path = [\"..\"] + sys.path\nfrom ssc32 import SSC32\n#import ssc32\nfrom ssc32yaml import load_yaml, load_config\n\n\nclass MainWindow(Qt.QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n\n self.scripts = {}\n config = load_config()\n\n for scr in glob.glob(os.path.join(os.path.dirname(__file__), 'scripts', '*.yaml')):\n self.scripts[os.path.basename(scr)] = load_yaml(scr)\n \n print(config)\n\n self.ssc = SSC32(config['port'], config['baud'], config=config['config'])\n\n widget = Qt.QWidget()\n layout = Qt.QHBoxLayout()\n self.setCentralWidget(widget)\n widget.setLayout(layout)\n\n self.axis = {}\n\n for name in ['JOINT'+str(x) for x in range(5)] + ['GRIP']:\n slider = Qt.QScrollBar()\n slider.setMaximum(self.ssc[name].max)\n slider.setValue(1500)\n slider.setMinimum(self.ssc[name].min)\n\n layout.addWidget(slider)\n self.axis[name] = slider\n self.connect(slider, Qt.SIGNAL('valueChanged(int)'),\n self.on_slider(name))\n\n bwidget = Qt.QWidget()\n blayout = Qt.QVBoxLayout()\n bwidget.setLayout(blayout)\n layout.addWidget(bwidget)\n\n self.state = Qt.QLabel(\"Connected\")\n blayout.addWidget(self.state)\n\n for name, script in self.scripts.iteritems():\n button = Qt.QPushButton(name)\n blayout.addWidget(button)\n self.connect(button, Qt.SIGNAL('clicked()'),\n self.on_run_script(script))\n\n def on_movement_done(self, pn, move):\n self.update_state()\n self.update_sliders()\n\n self.state.setText(\n self.state.text() + \n '\\nMovement {0} of {1}\\n'.format(pn[0], pn[1]))\n\n def update_state(self):\n t = ''\n k = self.axis.keys()\n k.sort()\n for name in k:\n t += '{name}:\\tabs: {servo.position}\\t' \\\n 'deg: {servo.degrees:3.3}\\trad: {servo.radians:0.4}\\n'.format(\n name=name, servo=self.ssc[name])\n self.state.setText(t)\n\n def update_sliders(self):\n for name, slider in self.axis.iteritems():\n slider.setValue(self.ssc[name].position)\n\n def on_run_script(self, script):\n def inner():\n def thread_run():\n script.on_movement_done = self.on_movement_done\n script(self.ssc)\n self.update_sliders()\n self.update_state()\n\n threading.Thread(target=thread_run).start()\n return inner\n\n def on_slider(self, name):\n def inner(val_):\n val = self.axis[name].value()\n self.ssc[name].position = val\n print(\"Commit {0}: {1}\".format(name, val))\n self.ssc.commit()\n self.update_state()\n\n return inner\n\n\nif __name__ == '__main__':\n app = Qt.QApplication(sys.argv)\n mw = MainWindow()\n mw.show()\n sys.exit(app.exec_())\n\n","sub_path":"examples/qtarm.py","file_name":"qtarm.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"460177323","text":"import os, re, subprocess\n\n\ndef to_audio(sf2, midi_file, out_dir, out_type='wav', txt_file=None, append=True):\n \"\"\"\n Convert a single midi file to an audio file. If a text file is specified,\n the first line of text in the file will be used in the name of the output\n audio file. For example, with a MIDI file named '01.mid' and a text file\n with 'A major', the output audio file would be 'A_major_01.wav'. If\n append is false, the output name will just use the text (e.g. 'A_major.wav')\n\n Args:\n sf2 (str): the file path for a .sf2 soundfont file\n midi_file (str): the file path for the .mid midi file to convert\n out_dir (str): the directory path for where to write the audio out\n out_type (str): the output audio type (see 'fluidsynth -T help' for options)\n txt_file (str): optional text file with additional information of how to name\n the output file\n append (bool): whether or not to append the optional text to the original\n .mid file name or replace it\n \"\"\"\n fbase = os.path.splitext(os.path.basename(midi_file))[0]\n if not txt_file:\n out_file = out_dir + '/' + fbase + '.' + out_type\n else:\n line = 'out'\n with open(txt_file, 'r') as f:\n line = re.sub(r'\\s', '_', f.readline().strip())\n\n if append:\n out_file = out_dir + '/' + line + '_' + fbase + '.' + out_type\n else:\n out_file = out_dir + '/' + line + '.' + out_type\n\n subprocess.call(['fluidsynth', '-T', out_type, '-F', out_file, '-ni', sf2, midi_file])\n\n# to_audio('Soundfont.sf2','/home/ihor/PycharmProjects/musicmaker/midi/99611.mid','/home/ihor/PycharmProjects/musicmaker/wavs')\n","sub_path":"utils/midi.py","file_name":"midi.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"612552635","text":"import numpy as np\nimport sys\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) !=4:\n\t\tprint(\"usage :\", sys.argv[0], \"datafile labelsfile outputfile\")\n\t\tsys.exit()\n\telse:\n\t\tX = np.genfromtxt(sys.argv[1], delimiter =',', dtype='float')\n\t\ty = np.genfromtxt(sys.argv[2], delimiter =',', dtype='float')\n\t\t\n\t\tmean_vectors = []\n\t\tfor cl in range(1,4):\n\t\t mean_vectors.append(np.mean(X[y==cl], axis=0))\n\t\t print('Mean Vector class %s: %s\\n' %(cl, mean_vectors[cl-1]))\n\t\t\t\n\t\tS_W = np.zeros((4,4))\n\t\tfor cl,mv in zip(range(1,4), mean_vectors):\n\t\t class_sc_mat = np.zeros((4,4)) \n\t\t for row in X[y == cl]:\n\t\t row, mv = row.reshape(4,1), mv.reshape(4,1) \n\t\t class_sc_mat += (row-mv).dot((row-mv).T)\n\t\t S_W += class_sc_mat \n\t\t\t\n\t\tprint('within-class Scatter Matrix:\\n', S_W)\n\t\t\n\t\teigvals, eigvecs = np.linalg.eigh(S_W@S_W.T)\n\t\teigvecs = eigvecs[:,np.argsort(eigvals)]\n\t\teigvecs = eigvecs[:,:2]\n\t\tresult = []\n\t\t\n\t\tfor val in X:\n\t\t\tres = eigvecs.T@val\n\t\t\tresult.append(res)\n\t\t\t\n\t\toutput = np.asarray(result)\n\t\t\n\t\twith open(sys.argv[3], mode = 'w', newline='') as op:\n\t\t\tnp.savetxt(op, output , newline = \" \")\n\t\t\t\n\t\t","sub_path":"Project2/scatter3.py","file_name":"scatter3.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"513284915","text":"import resource\nfrom unittest.mock import patch\nimport multiprocessing\nfrom typing import Callable, Tuple\nfrom autotester.server.utils import resource_management as rm\n\n\ndef enqueue_limit(limit: int, queue: multiprocessing.Queue, func: Callable) -> None:\n config = {\"rlimit_settings\": {\"nproc\": [200, 200], \"cpu\": [10, 10]}}\n adjustments = {\"nproc\": 2}\n with patch.dict(\"autotester.server.utils.resource_management.config._settings\", config):\n with patch.dict(\"autotester.server.utils.resource_management.RLIMIT_ADJUSTMENTS\", adjustments):\n func()\n queue.put(resource.getrlimit(limit))\n\n\ndef run_test(limit: int, func: Callable) -> Tuple[int, int]:\n queue = multiprocessing.Queue()\n proc = multiprocessing.Process(target=enqueue_limit, args=(limit, queue, func))\n proc.start()\n proc.join()\n return queue.get(block=False)\n\n\nclass TestSetRlimitsBeforeTest:\n def test_sets_rlimits_with_adjustments(self):\n \"\"\" Reduces rlimit by the adjustment amount from the config setting \"\"\"\n assert run_test(resource.RLIMIT_NPROC, rm.set_rlimits_before_test) == (198, 198)\n\n def test_sets_rlimits_without_adjustments(self):\n \"\"\" Does not reduce the rlimit from the config setting if no adjustment given \"\"\"\n assert run_test(resource.RLIMIT_CPU, rm.set_rlimits_before_test) == (10, 10)\n","sub_path":"src/autotester/server/utils/tests/resource_management_test.py","file_name":"resource_management_test.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"406772322","text":"#!/usr/bin/python\n\nimport pyscreenshot as ImageGrab\n\nwidth = 1920\nheight = 1080\ncelldim = 48, 48\nspriteWidth = 24 # The width of each sprite\n\n\nclass Cell:\n im = None # The image of the cell\n\n def __init__(self, image):\n self.im = image\n\n\ndef getCell(p):\n im = ImageGrab.grab(bbox=(\n int((width / 2) - (celldim[0] * (1 - p[0] * 2))),\n int((height / 2) - (celldim[1] * (1 + p[1] * 2))),\n int((width / 2) + (celldim[0] * (1 + p[0] * 2))),\n int((height / 2) + (celldim[1] * (1 - p[1] * 2)))\n ))\n return im\n\n\ndef getNearCells():\n im_upper = Cell(getCell((0, 1)))\n im_lower = Cell(getCell((0, -1)))\n im_right = Cell(getCell((1, 0)))\n im_left = Cell(getCell((-1, 0)))\n im_right_upper = Cell(getCell((1, 1)))\n im_right_lower = Cell(getCell((1, -1)))\n im_left_upper = Cell(getCell((-1, 1)))\n im_left_lower = Cell(getCell((-1, -1)))\n cells = [im_upper, im_lower, im_right, im_left, im_right_upper,\n im_right_lower, im_left_upper, im_left_lower]\n return cells\n","sub_path":"Modules/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"78162040","text":"\"\"\"\nSiEPIC Photonics Package \n\nAuthor: Mustafa Hammood\n Mustafa@siepic.com\n\nExample: Application of SiEPIC_PP cutback function\n to extract the porpagation losses from three different length spirals.\n\"\"\"\n\n#%% import package and installed dependent packages\nimport sys, os\n# go up two directories\ndir_path = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(os.path.dirname(dir_path)))\n\nimport SiEPIC_Photonics_Package as SiEPIC_PP\nfrom SiEPIC_Photonics_Package.setup import *\n\n#%% download .mat files from GitHub repo and parse it to a variable (data)\n# responses to extract losses from\n# in this example, file name units are in um (microns)\nunit = [0, 5000, 10000, 30000]\n\n# divide by 10000 to see result in dB/cm\nunit_cm = [i/10000 for i in unit]\n\ninput_data_response = []\n\nfor i in unit:\n file_name = 'SpiralWG'+str(i)+'TE'\n file_extension = '.mat'\n url = 'https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package/blob/master/Examples/'+file_name+file_extension+'?raw=true'\n PORT = 1\n input_data_response.append( SiEPIC_PP.core.download_response(url,PORT) )\n\n#%% apply SiEPIC_PP cutback extraction function\n[insertion_loss_wavelength, insertion_loss_fit, insertion_loss_raw] = SiEPIC_PP.core.cutback( input_data_response, unit_cm, 1550e-9 )\n\n#%% plot responses and save pdf\n\n# plot all cutback structures responses\nmatplotlib.pyplot.figure(2)\nwavelength = input_data_response[0][0]*1e9\nfig0 = matplotlib.pyplot.plot(wavelength,input_data_response[0][1], label='L = 0', color='blue')\nfig1 = matplotlib.pyplot.plot(wavelength,input_data_response[1][1], label='L = 5000 um', color='black')\nfig2 = matplotlib.pyplot.plot(wavelength,input_data_response[2][1], label='L = 10000 um', color='green')\nfig3 = matplotlib.pyplot.plot(wavelength,input_data_response[3][1], label='L = 30000 um', color='red')\nmatplotlib.pyplot.legend(loc=0)\nmatplotlib.pyplot.ylabel('Power (dBm)', color = 'black')\nmatplotlib.pyplot.xlabel('Wavelength (nm)', color = 'black')\nmatplotlib.pyplot.xlim(round(min(wavelength)),round(max(wavelength)))\nmatplotlib.pyplot.title(\"Raw measurement of cutback structures\")\nmatplotlib.pyplot.savefig('cutback_measurement'+'.pdf')\nmatplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n\n# Insertion loss vs wavelength plot\nmatplotlib.pyplot.figure(1)\nlinspace = numpy.linspace(unit_cm[0],unit_cm[len(unit_cm)-1], len(insertion_loss_fit))\nfig1 = matplotlib.pyplot.plot(linspace,insertion_loss_raw, label='Insertion loss (raw)', color='blue')\nfig2 = matplotlib.pyplot.plot(linspace,insertion_loss_fit, label='Insertion loss (fit)', color='red')\nmatplotlib.pyplot.legend(loc=0)\nmatplotlib.pyplot.ylabel('Loss (dB/cm)', color = 'black')\nmatplotlib.pyplot.xlabel('Wavelength (nm)', color = 'black')\nmatplotlib.pyplot.setp(fig2, 'linewidth', 4.0)\nmatplotlib.pyplot.xlim(round(min(unit_cm)),round(max(unit_cm)))\nmatplotlib.pyplot.title(\"Insertion losses using the cut-back method\")\nmatplotlib.pyplot.savefig('cutback'+'.pdf')\nmatplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n\n","sub_path":"Examples/cutback/cutback.py","file_name":"cutback.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595942115","text":"import sys\n\nclass ProbBasedTagger:\n def __init__(self):\n self.__hinTaggerOut = []\n self.__engTaggerOut = []\n self.__output = []\n \n def __giveSentences(self, oFile):\n sents = []\n sent = []\n oFile = open(oFile)\n line = oFile.readline()\n while 1:\n if not line:\n if len(sent)!=0:\n sents.append(sent)\n sent = []\n break\n if line.strip() == \"\":\n sents.append(sent)\n sent = []\n else:\n line = line.strip().split() ## Spliting based on \\s rather than a tab\n try:\n sent.append((line[0],line[2],line[3]))\n except:\n sys.stderr.write(\"line[3] doesn't exist:\"+str(line)+\"\\n\")\n dummy = raw_input(\"Temporary Stall!\")\n line = oFile.readline()\n return sents\n\n def loadTaggerOutputs(self, engOutput, hinOutput):\n self.__hinTaggerOut = self.__giveSentences(hinOutput)\n self.__engTaggerOut = self.__giveSentences(engOutput)\n sys.stderr.write(\"Number of Hindi sentences:\"+str(len(self.__hinTaggerOut))+\"\\n\")\n sys.stderr.write(\"Number of English sentences:\"+str(len(self.__engTaggerOut))+\"\\n\")\n \n def __printOutputInFile(self, outFile):\n for sent in self.__output:\n outFile.write('\\n'.join(map(lambda x:'\\t'.join(x), sent))+'\\n\\n')\n outFile.close()\n \n def __fixTags(self):\n if len(self.__hinTaggerOut) != len(self.__engTaggerOut):\n sys.stderr.write(\"__fixTags:Sizes of Hindi and English outputs different.. halting..\\n\")\n sys.exit(0)\n for index in range(len(self.__hinTaggerOut)):\n newOuptut = []\n engOutput = self.__hinTaggerOut[index]\n hinOutput = self.__engTaggerOut[index]\n if len(engOutput) != len(hinOutput):\n sys.stderr.write(\"__fixTags:Sizes of Hindi and English sentence different.. halting..\\n\")\n sys.exit(0)\n for wordIndex in range(len(engOutput)):\n word = engOutput[wordIndex][0]\n tag = \"\"\n if engOutput[wordIndex][2] > hinOutput[wordIndex][2]:\n tag = engOutput[wordIndex][1]\n else:\n tag = hinOutput[wordIndex][1]\n newOuptut.append((word,tag))\n self.__output.append(newOuptut)\n \n def probBasedTag(self, outFile):\n self.__fixTags()\n outFile = open(outFile,\"w\")\n self.__printOutputInFile(outFile)\n \nif __name__ == \"__main__\":\n engOut = \"/usr0/home/pgadde/Work/CodeSwitching/Hinglish/BaselineTagger/Eng/bigBossTest.crf.eng.out\"\n hinOut = \"/usr0/home/pgadde/Work/CodeSwitching/Hinglish/BaselineTagger/Hin/bigBossTest.crf.hin.out\"\n taggerOutput = \"/usr0/home/pgadde/Work/CodeSwitching/Hinglish/BaselineTagger/Combining/combinedProb\"\n PT = ProbBasedTagger()\n PT.loadTaggerOutputs(engOut, hinOut)\n PT.probBasedTag(taggerOutput)","sub_path":"CSPOSTaggingBackup/src/baselineTagger/probBasedTagger.py","file_name":"probBasedTagger.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207423656","text":"\"\"\"\n Copyright (c) 2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging\nimport os\nimport subprocess\n\nfrom ote import MMDETECTION_TOOLS\n\n\ndef collect_ap(path):\n \"\"\" Collects average precision values in log file. \"\"\"\n\n average_precisions = []\n beginning = 'Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = '\n with open(path) as read_file:\n content = [line.strip() for line in read_file]\n for line in content:\n if line.startswith(beginning):\n average_precisions.append(float(line.replace(beginning, '')))\n return average_precisions\n\n\ndef update_outputs(outputs, metric_keys, metric_names, metric_values):\n assert len(metric_values) == len(metric_names) == len(metric_keys), \\\n f'{metric_values} vs {metric_names} vs {metric_keys}'\n for key, name, value in zip(metric_keys, metric_names, metric_values):\n assert 0 <= value <= 1.0, f'{key} = {value}'\n outputs.append(\n {'key': key, 'value': value * 100, 'unit': '%', 'display_name': name})\n\n\ndef run_test_script(config_path, work_dir, snapshot, update_config, show_dir, metrics):\n res_pkl = os.path.join(work_dir, 'res.pkl')\n test_py_stdout = os.path.join(work_dir, 'test_py_stdout')\n\n update_config = ' '.join([f'{k}={v}' for k, v in update_config.items()])\n update_config = f' --update_config {update_config}' if update_config else ''\n update_config = update_config.replace('\"', '\\\\\"')\n show_dir = f' --show-dir {show_dir}' if show_dir else ''\n\n if snapshot.split('.')[-1] in {'xml', 'bin', 'onnx'}:\n if snapshot.split('.')[-1] == 'bin':\n snapshot = '.'.join(snapshot.split('.')[:-1]) + '.xml'\n tool = 'test_exported.py'\n else:\n tool = 'test.py'\n\n subprocess.run(\n f'python3 {MMDETECTION_TOOLS}/{tool}'\n f' {config_path} {snapshot}'\n f' --out {res_pkl} --eval {metrics}'\n f'{show_dir}{update_config}'\n f' | tee {test_py_stdout}',\n check=True, shell=True\n )\n\n return test_py_stdout\n\n\ndef coco_ap_eval(config_path, work_dir, snapshot, update_config, show_dir='',\n metric_names=('AP @ [IoU=0.50:0.95]', ), metrics='bbox', **kwargs):\n \"\"\" Computes COCO AP. \"\"\"\n\n metric_keys = metrics.split(' ')\n assert len(metric_keys) == len(metric_names), f'{len(metric_keys)} != {len(metric_names)}'\n allowed_metric_keys = {'bbox', 'segm'}\n assert all(x in allowed_metric_keys for x in metric_keys)\n outputs = []\n if not(update_config['data.test.ann_file'] and update_config['data.test.img_prefix']):\n logging.warning('Passed empty path to annotation file or data root folder. '\n 'Skipping AP calculation.')\n update_outputs(outputs, metric_keys, metric_names, [None for _ in metric_keys])\n else:\n test_py_stdout = run_test_script(config_path, work_dir, snapshot,\n update_config, show_dir, metrics)\n\n average_precision = collect_ap(test_py_stdout)\n update_outputs(outputs, metric_keys, metric_names, average_precision)\n\n return outputs\n\n\ndef coco_ap_eval_det(config_path, work_dir, snapshot, update_config, show_dir='', **kwargs):\n return coco_ap_eval(config_path, work_dir, snapshot, update_config, show_dir)\n\n\ndef coco_ap_eval_segm(config_path, work_dir, snapshot, update_config, show_dir='', **kwargs):\n return coco_ap_eval(\n config_path, work_dir, snapshot, update_config, show_dir,\n metric_names=['Bbox AP @ [IoU=0.50:0.95]', 'Segm AP @ [IoU=0.50:0.95]'],\n metrics='bbox segm')\n","sub_path":"ote/ote/metrics/detection/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493503158","text":"# Stacks using linkedlist\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n \nclass Stack:\n def __init__(self):\n self.top = None\n self.bottom = None\n self.length = 0\n\n def peek(self):\n if self.top == None:\n return None\n else:\n return self.top.value\n\n def push(self, value):\n newNode = Node(value)\n if self.length == 0:\n self.top = newNode\n self.bottom = newNode\n else:\n holdingPointer = self.top\n self.top = newNode\n self.top.next = holdingPointer\n \n self.length+=1\n return f\"pushed {value} in stack\"\n \n def pop(self):\n if (self.top == None): \n return None\n else:\n holdingPointer = self.top\n self.top = self.top.next\n self.length-=1\n return f\"popped {holdingPointer.value} from stack\"\n\n\nmystack = Stack()\n\nprint(mystack.push(23))\nprint(mystack.push(43))\nprint(mystack.push(56))\nprint(mystack.pop())\nprint(mystack.pop())\nprint(mystack.peek())","sub_path":"stacks.py","file_name":"stacks.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292520547","text":"# https://practice.geeksforgeeks.org/problems/form-a-palindrome/0\r\n\r\ndef lcs( X, Y, m, n ) :\r\n\r\n\tL=[[0 for i in range(n+1)] for i in range(m+1)]\r\n\t\r\n\t\r\n\t\r\n\tfor i in range(0, m+1):\r\n\t\tfor j in range(0, n+1):\r\n\r\n\t\t\tif (i == 0 or j == 0) :\r\n\t\t\t\tL[i][j] = 0\r\n\t\t\r\n\t\t\telif (X[i - 1] == Y[j - 1]) :\r\n\t\t\t\tL[i][j] = L[i - 1][j - 1] + 1; \r\n\t\t\r\n\t\t\telse:\r\n\t\t\t\tL[i][j] = max(L[i - 1][j], L[i][j - 1]); \r\n\t\r\n\t\r\n\t\r\n\treturn L[m][n]; \r\n\r\n\r\n\r\n\r\n\r\nfor _ in range(int(input())):\r\n\ts=input()\r\n\ta=[]\r\n\tfor i in s:\r\n\t\ta.append(i)\r\n\ta.reverse()\r\n\ts1=\"\"\r\n\tfor i in a:\r\n\t\ts1+=i\r\n\ta=lcs(s, s1, len(s), len\r\n\t(s))\r\n\tprint(len(s)-a)\r\n","sub_path":"minimum char addition to make it palindrome.py","file_name":"minimum char addition to make it palindrome.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191165650","text":"# File: knights-tour-3.py\n\n# Description: implemented fix to problem caused by starting at 5,3\n\n# Student Name: Russell Kan\n\n# Student UT EID: rjk854\n\n# Course Name: lul\n\n# Unique Number: heh\n\n# Date Created:7-11-17\n\n# Date Last Modified: 7-11-17\n\nfrom graphics import *\n\ndef drawBoard(win):\n board = Rectangle(Point(25,75), Point(505,555))\n board.setFill(\"white\")\n board.draw(win)\n # Draw horizontal lines\n for row in range(0,8):\n for col in range(0,8):\n top = Point(row*60+25, col*60+75)\n bottom = Point((row+1)*60+25, (col+1)*60+75)\n tile = Rectangle(top,bottom)\n if (row+col)%2 == 0:\n color = color_rgb(251,201,159)\n else:\n color = color_rgb(210,136,70)\n tile.setFill(color)\n tile.draw(win)\n\ndef countMoves(board, numBoard, moves, x, y):\n for d in range(len(board[0])):\n for c in range(len(board[0])):\n count = 0\n for move in moves:\n if isValid(board, move, c, d):\n count += 1\n numBoard[d][c] = count\n\ndef findTile(pt):\n # Change point coordinates into location on board\n x = int((pt.getX() - 25)/60)\n y = int((pt.getY() - 75)/60)\n return (x, y)\n\ndef findPixel(x, y):\n return Point(x*60+55,y*60+105)\n\ndef makeMove(board, win, x, y):\n board[y][x] = 1\n center = findPixel(x, y)\n center.draw(win)\n x1 = Line(Point(center.getX()-15, center.getY()-15), Point(center.getX()+15, center.getY()+15))\n x2 = Line(Point(center.getX()-15, center.getY()+15), Point(center.getX()+15, center.getY()-15))\n x1.setWidth(4)\n x2.setWidth(4)\n x1.draw(win)\n x2.draw(win)\n\ndef isValid(board, move, x, y): # check if a move is valid\n if x+move[0] >= 0 and y+move[1] >= 0 and x+move[0] < 8 and y+move[1] < 8: # check boundaries\n if board[y+move[1]][x+move[0]] != 1: # check if a space has been visited already\n return True\n else:\n return False\n\n\ndef step(board, numBoard, win, moves, x, y, rgb):\n lowest = 8\n for move in moves:\n if isValid(board, move, x, y):\n if numBoard[y+move[1]][x+move[0]] < lowest:\n lowsum = 0\n for m in moves:\n if isValid(board, [move[0]+m[0], move[1]+m[1]], x, y):\n lowsum += numBoard[y+move[1]+m[1]][x+move[0]+m[0]]\n newX = x + move[0]\n newY = y + move[1]\n lowest = numBoard[newY][newX]\n nextMove = move\n\n if numBoard[y+move[1]][x+move[0]] == lowest:\n tempsum = 0\n for n in moves:\n if isValid(board, [move[0]+n[0], move[1]+n[1]], x, y):\n tempsum += numBoard[y+move[1]+n[1]][x+move[0]+m[0]]\n if tempsum <= lowsum:\n newX = x + move[0]\n newY = y + move[1]\n lowest = numBoard[newY][newX]\n nextMove = move\n lowsum = 0\n\n oldCenter = findPixel(x, y)\n newCenter = findPixel(newX, newY)\n line = Line(Point(oldCenter.getX(), oldCenter.getY()), Point(newCenter.getX(), newCenter.getY()))\n line.setWidth(2.5)\n color = color_rgb(rgb[0], rgb[1], rgb[2])\n line.setFill(color)\n line.draw(win)\n\n makeMove(board, win, newX, newY)\n countMoves(board, numBoard, moves, x, y)\n\n print(\"board of moves:\")\n for a in numBoard:\n print(a)\n print(\"next move\", nextMove, \"to [\", newX, \",\", newY, \"]\")\n\n input(\"press any key...\")\n return newX, newY\n\ndef main():\n board = [[0 for y in range(8)] for x in range(8)]\n numBoard = [[0 for y in range(8)] for x in range(8)]\n moves = [[-2,1], [-1,2], [1,2], [2,1], [2,-1], [1,-2], [-1,-2], [-2,-1]] # possible moves of a knight\n\n win = GraphWin('Knight\\'s Tour', 530, 580)\n win.setBackground('white')\n message = Text(Point(win.getWidth()/2, 30), 'Knight\\'s Tour Version 2') \n message.setTextColor('red')\n message.setStyle('italic')\n message.setSize(20)\n message.draw(win)\n\n drawBoard(win)\n \n p1 = Point(0,0)\n while p1.getX() < 25 or p1.getX() > 505 or p1.getY() < 75 or p1.getY() > 555:\n p1 = win.getMouse() # returns point of mouse click\n\n x, y = findTile(p1)\n makeMove(board, win, x, y)\n countMoves(board, numBoard, moves, x, y)\n rgb = [0, 0, 0]\n\n while True:\n x, y = step(board, numBoard, win, moves, x, y, rgb)\n\n if rgb[2] < 248:\n rgb[2] += 8\n else:\n rgb[0] += 8\n rgb[1] += 8\n \n\n if all(v == 1 for r in board for v in r):\n break\n\n message.setText('Click anywhere to quit') # change text message\n win.getMouse()\n win.close() \n\nmain()\n","sub_path":"knights-tour-3.py","file_name":"knights-tour-3.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"380291695","text":"# Given a square matrix, calculate the absolute difference between the sums of its diagonals.\n# For example, the square matrix is shown below:\n# 1 2 3\n# 4 5 6\n# 9 8 9\n\n# Function description\n# Complete the function in the editor below.\n# diagonalDifference takes the following parameter:\n# int arr[n][m]: an array of integers\n# Return\n# int: the absolute diagonal difference\n# Input Format\n# The first line contains a single integer, n, the number of rows and columns in the square matrix arr.\n# Each of the next lines describes a row, arr[i], and consists of space-separated integers arr[i][j].\n\n# Constraints\n# -100 <= arr[i][j] <= 100\n\n# Output Format\n# Return the absolute difference between the sums of the matrix's two diagonals as a single integer.\n\n# Sample Input\n# 3\n# 11 2 4\n# 4 5 6\n# 10 8 -12\n# Sample Output\n# 15\n\n# Explanation\n\n# The primary diagonal is:\n\n# 11\n# 5\n# -12\n# Sum across the primary diagonal: 11 + 5 - 12 = 4\n\n# The secondary diagonal is:\n\n# 4\n# 5\n# 10\n# Sum across the secondary diagonal: 4 + 5 + 10 = 19\n# Difference: |4 - 19| = 15\n\n# Note: |x| is the absolute value of x\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'diagonalDifference' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts 2D_INTEGER_ARRAY arr as parameter.\n#\n\ndef diagonalDifference(arr):\n # if the arr is not defined or row count is 0\n if arr is None or len(arr) == 0: return\n\n rows = len(arr)\n\n leftDiagSum = 0\n rightDiagSum = 0\n\n for i in range(rows):\n leftDiagSum += arr[i][i]\n rightDiagSum += arr[i][rows-1-i]\n \n return abs(leftDiagSum-rightDiagSum)\n\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().strip())\n\n arr = []\n\n for _ in range(n):\n arr.append(list(map(int, input().rstrip().split())))\n\n result = diagonalDifference(arr)\n print(result)\n\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n","sub_path":"src/HackerRank/7DayPrep/Day 2/diagonalDiff.py","file_name":"diagonalDiff.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"441213340","text":"#!/usr/bin/env python3\n# coding: utf-8\nfrom collections import defaultdict\nfrom string import punctuation\nimport unittest\n\n\nclass WordCloudData:\n def __init__(self, input_string):\n self.sentence = input_string\n self.words_to_count = {}\n for w in self.split_words():\n if w in self.words_to_count:\n self.words_to_count[w] += 1\n elif w.lower() in self.words_to_count:\n self.words_to_count[w.lower()] += 1\n else:\n self.words_to_count[w] = 1\n\n def split_words(self):\n punct = set(punctuation + ' ') - {'-', '\\''}\n ret = []\n next_word = []\n for i in self.sentence:\n if i in punct and len(next_word) > 0:\n ret.append(''.join(next_word))\n next_word = []\n elif (i == '-' and len(next_word) == 0) or i in punct:\n continue\n else:\n next_word.append(i)\n\n if len(next_word) > 0:\n ret.append(''.join(next_word))\n\n return ret\n\n\nclass TestWordCloudData(unittest.TestCase):\n def setUp(self):\n self.split_words_test_cases = [\n ('I like cake', ['I', 'like', 'cake']),\n (\n 'Chocolate cake for dinner and pound cake for dessert',\n ['Chocolate', 'cake', 'for', 'dinner', 'and', 'pound', 'cake', 'for', 'dessert']\n ),\n ('Strawberry short cake? Yum!', ['Strawberry', 'short', 'cake', 'Yum']),\n ('Dessert - mille-feuille cake', ['Dessert', 'mille-feuille', 'cake']),\n ('Mmm...mmm...decisions decisions', ['Mmm', 'mmm', 'decisions', 'decisions']),\n ('Allie\\'s Bakery: Sasha\\'s Cakes', ['Allie\\'s', 'Bakery', 'Sasha\\'s', 'Cakes'])\n ]\n\n def test_split_words(self):\n for i, e in self.split_words_test_cases:\n self.assertEqual(WordCloudData(i).split_words(), e)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n\n","sub_path":"HashTables/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"441573812","text":"from data import question_data\nfrom question_model import Question\nfrom quiz_brain import QuizBrain\n\nquestion_bank = [] #list of question obj\nfor question in question_data:\n q_text = question[\"question\"]\n q_answer = question[\"correct_answer\"]\n question_data = Question(q_text, q_answer)\n question_bank.append(question_data)\n\nquiz = QuizBrain(question_bank)\nwhile quiz.still_has_question():\n quiz.next_question()\n\nprint(\"You have completed the challenge\")\nprint(f\"Your final score is {quiz.score}/{len(question_bank)}\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219038870","text":"import pandas\nimport vincent\nvincent.core.initialize_notebook()\n\nt_data = pandas.read_csv('./turnstile_data_master_with_weather.csv')\n\nrain = t_data[t_data.rain == 1].groupby('Hour')['ENTRIESn_hourly'].mean()\nclear = t_data[t_data.rain == 0].groupby('Hour')['ENTRIESn_hourly'].mean()\ndata = pandas.DataFrame({'rain': rain, 'clear': clear, 'Hour': rain.index})\n\nline = vincent.Line(data)\nline.axis_titles(x='Hour', y='Mean entities per hour')\nline.legend('weather')\nvincent.core.initialize_notebook()\nline.display()\n","sub_path":"notes/reference/moocs/udacity/ud359-intro-to-data-science/final_project/entries_per_hour_wet_to_dry.py","file_name":"entries_per_hour_wet_to_dry.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"136761833","text":"#!/usr/bin/env python\nfrom EPPs.common import StepEPP\n\n\nclass AssignNextStepSeqPico(StepEPP):\n _use_load_config = False # prevent the loading of the config\n\n \"\"\"\n This script assigns the next step for samples in the Sequencing Plate Picgreen step to either \"review\" if they\n have failed or complete (as last step in the protocol).\n \"\"\"\n\n def _run(self):\n # Artifacts that are standards next step is \"remove\".\n # Artifacts where picogreen passed QC next is \"complete\"\n # Artifacts that are not samples and picogreen did not pass qc then next step is \"review\"\n for next_action in self.process.step.actions.next_actions:\n art = next_action['artifact']\n if art.name.split(' ')[0] == 'SDNA':\n next_action['action'] = 'remove'\n elif str(art.udf.get(\"Picogreen Conc Review\")).find('FAIL') > -1:\n next_action['action'] = 'review'\n else:\n next_action['action'] = 'complete'\n\n self.process.step.actions.put()\n\n\nif __name__ == \"__main__\":\n AssignNextStepSeqPico().run()\n","sub_path":"scripts/next_step_assignment_seq_pico.py","file_name":"next_step_assignment_seq_pico.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168857258","text":"class Account(object):\n\t\"\"\"docstring for Account\"\"\"\n\tdef __init__(self, rate):\n\t\tself._amt = 0\n\t\tself.rate = rate\n\n\t@property\n\tdef amount(self):\n\t return self._amt\n\t\n\t@property\n\tdef cny(self):\n\t return self._amt * self.rate\n\n\tdef amount(self, value):\n\t\tif value < 0:\n\t\t\tprint(\"Sorry, no negative amount in the account\")\n\t\t\treturn\n\t\tself._amt = value\n\nif __name__ == '__main__':\n\tacc = Account(rate = 6.6)\n\tacc.amount = 20\n\tprint('Dollar amount:', acc.amount)\n\tprint(\"In CNY:\", acc.cny)\n\n\tacc.amount = -100\n\tprint('Dollar amount:', acc.amount)","sub_path":"property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"52131563","text":"\"\"\" FMI 1.0 interface \"\"\"\r\n\r\nimport os\r\nimport pathlib\r\nimport numpy as np\r\nfrom ctypes import *\r\nfrom . import free, freeLibrary, platform, sharedLibraryExtension, calloc\r\n\r\n\r\nfmi1Component = c_void_p\r\nfmi1ValueReference = c_uint\r\nfmi1Real = c_double\r\nfmi1Integer = c_int\r\nfmi1Boolean = c_char\r\nfmi1String = c_char_p\r\n\r\nfmi1True = b'\\x01'\r\nfmi1False = b'\\x00'\r\n\r\nfmi1UndefinedValueReference = -1\r\n\r\nfmi1Status = c_int\r\n\r\nfmi1OK = 0\r\nfmi1Warning = 1\r\nfmi1Discard = 2\r\nfmi1Error = 3\r\nfmi1Fatal = 4\r\n\r\nfmi1CallbackLoggerTYPE = CFUNCTYPE(None, fmi1Component, fmi1String, fmi1Status, fmi1String, fmi1String)\r\nfmi1CallbackAllocateMemoryTYPE = CFUNCTYPE(c_void_p, c_size_t, c_size_t)\r\nfmi1CallbackFreeMemoryTYPE = CFUNCTYPE(None, c_void_p)\r\n# fmi1StepFinishedTYPE = CFUNCTYPE(None, fmi1Component, fmi1Status)\r\nfmi1StepFinishedTYPE = c_void_p\r\n\r\n\r\nclass fmi1CallbackFunctions(Structure):\r\n\r\n _fields_ = [('logger', fmi1CallbackLoggerTYPE),\r\n ('allocateMemory', fmi1CallbackAllocateMemoryTYPE),\r\n ('freeMemory', fmi1CallbackFreeMemoryTYPE),\r\n ('stepFinished', fmi1StepFinishedTYPE)]\r\n\r\n def __str__(self):\r\n return 'fmi1CallbackFunctions(' \\\r\n 'logger=%s, ' \\\r\n 'allocateMemory=%s, ' \\\r\n 'freeMemory=%s, ' \\\r\n 'stepFinished=%s)' % (self.logger,\r\n self.allocateMemory,\r\n self.freeMemory,\r\n self.stepFinished)\r\n\r\n\r\nclass fmi1EventInfo(Structure):\r\n\r\n _fields_ = [('iterationConverged', fmi1Boolean),\r\n ('stateValueReferencesChanged', fmi1Boolean),\r\n ('stateValuesChanged', fmi1Boolean),\r\n ('terminateSimulation', fmi1Boolean),\r\n ('upcomingTimeEvent', fmi1Boolean),\r\n ('nextEventTime', fmi1Real)]\r\n\r\n def __str__(self):\r\n return 'fmi1EventInfo(' \\\r\n 'iterationConverged=%s, ' \\\r\n 'stateValueReferencesChanged=%s, ' \\\r\n 'stateValuesChanged=%s, ' \\\r\n 'terminateSimulation=%s, ' \\\r\n 'upcomingTimeEvent=%s, ' \\\r\n 'nextEventTime=%s)' % (self.iterationConverged,\r\n self.stateValueReferencesChanged,\r\n self.stateValuesChanged,\r\n self.terminateSimulation,\r\n self.upcomingTimeEvent,\r\n self.nextEventTime)\r\n\r\n\r\ndef logger(component, instanceName, status, category, message):\r\n if status == fmi1Warning:\r\n print('[WARNING]', message)\r\n elif status > fmi1Warning:\r\n print('[ERROR]', message)\r\n\r\n\r\ndef allocateMemory(nobj, size):\r\n return calloc(nobj, size)\r\n\r\n\r\ndef freeMemory(obj):\r\n free(obj)\r\n\r\n\r\ndef stepFinished(componentEnvironment, status):\r\n pass\r\n\r\n\r\ncallbacks = fmi1CallbackFunctions()\r\ncallbacks.logger = fmi1CallbackLoggerTYPE(logger)\r\ncallbacks.allocateMemory = fmi1CallbackAllocateMemoryTYPE(allocateMemory)\r\ncallbacks.freeMemory = fmi1CallbackFreeMemoryTYPE(freeMemory)\r\n#callbacks.stepFinished = fmi1StepFinishedTYPE(stepFinished)\r\ncallbacks.stepFinished = None\r\n\r\n\r\nclass _FMU(object):\r\n \"\"\" Base class for all FMUs \"\"\"\r\n\r\n def __init__(self, guid, modelIdentifier, unzipDirectory, instanceName, logFMICalls=False):\r\n\r\n self.guid = guid\r\n self.modelIdentifier = modelIdentifier\r\n self.unzipDirectory = unzipDirectory\r\n self.instanceName = instanceName if instanceName is not None else self.modelIdentifier\r\n self.logFMICalls = logFMICalls\r\n\r\n # remember the current working directory\r\n work_dir = os.getcwd()\r\n\r\n library_dir = os.path.join(unzipDirectory, 'binaries', platform)\r\n\r\n # change to the library directory as some DLLs expect this to resolve dependencies\r\n os.chdir(library_dir)\r\n\r\n # load the shared library\r\n library_path = str(os.path.join(library_dir, self.modelIdentifier + sharedLibraryExtension))\r\n self.dll = cdll.LoadLibrary(library_path)\r\n\r\n # change back to the working directory\r\n os.chdir(work_dir)\r\n\r\n self.component = None\r\n\r\n def freeLibrary(self):\r\n # unload the shared library\r\n freeLibrary(self.dll._handle)\r\n\r\n def _print_fmi_args(self, fname, argnames, argtypes, args, restype, res):\r\n\r\n f = '[FMI] ' + fname + '('\r\n\r\n l = []\r\n\r\n for i, (n, t, v) in enumerate(zip(argnames, argtypes, args)):\r\n\r\n a = n + '='\r\n\r\n if t == c_void_p:\r\n # component pointer\r\n a += hex(v)\r\n elif t == POINTER(c_uint):\r\n # value references\r\n a += '[' + ', '.join(map(str, v)) + ']'\r\n elif t == POINTER(c_double):\r\n if hasattr(v, '__len__'):\r\n # c_double_Array_N\r\n a += '[' + ', '.join(map(str, v)) + ']'\r\n else:\r\n # double pointers are always flowed by the size of the array\r\n arr = np.ctypeslib.as_array(v, (args[i+1],))\r\n a += '[' + ', '.join(map(str, arr)) + ']'\r\n elif hasattr(v, '_obj'):\r\n # byref object\r\n if hasattr(v._obj, 'value'):\r\n # pointer (e.g. c_char_p)\r\n a += str(v._obj.value)\r\n else:\r\n # struct\r\n a += str(v._obj)\r\n else:\r\n a += str(v)\r\n\r\n l.append(a)\r\n\r\n f += ', '.join(l) + ')'\r\n\r\n if restype == c_int:\r\n\r\n f += ' -> '\r\n\r\n if res == 0:\r\n f += 'OK'\r\n elif res == 1:\r\n f += 'WARNING'\r\n elif res == 2:\r\n f += 'DISCARD'\r\n elif res == 3:\r\n f += 'ERROR'\r\n elif res == 4:\r\n f += 'FATAL'\r\n elif res == 5:\r\n f += 'PENDING'\r\n else:\r\n f += str(res)\r\n elif restype == c_void_p:\r\n f += ' -> ' + hex(res)\r\n\r\n print(f)\r\n\r\n\r\nclass _FMU1(_FMU):\r\n\r\n def __init__(self, **kwargs):\r\n\r\n super(_FMU1, self).__init__(**kwargs)\r\n\r\n # common FMI 1.0 functions\r\n\r\n self._fmi1Function('GetReal',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1Real)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('GetInteger',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1Integer)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('GetBoolean',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1Boolean)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('GetString',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1String)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('SetReal',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1Real)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('SetInteger',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1Integer)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('SetBoolean',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1Boolean)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('SetString',\r\n ['component', 'vr', 'nvr', 'value'],\r\n [fmi1Component, POINTER(fmi1ValueReference), c_size_t, POINTER(fmi1String)],\r\n fmi1Status)\r\n\r\n def _fmi1Function(self, name, argnames, argtypes, restype):\r\n\r\n f = getattr(self.dll, self.modelIdentifier + '_fmi' + name)\r\n f.argtypes = argtypes\r\n f.restype = restype\r\n\r\n def w(*args, **kwargs):\r\n\r\n res = f(*args, **kwargs)\r\n\r\n if self.logFMICalls:\r\n self._print_fmi_args('fmi' + name, argnames, argtypes, args, restype, res)\r\n\r\n if restype == fmi1Status:\r\n # check the status code\r\n if res > 1:\r\n raise Exception(\"FMI call failed with status %d.\" % res)\r\n\r\n return res\r\n\r\n setattr(self, 'fmi1' + name, w)\r\n\r\n def getReal(self, vr):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1Real * len(vr))()\r\n self.fmi1GetReal(self.component, vr, len(vr), value)\r\n return list(value)\r\n\r\n def getInteger(self, vr):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1Integer * len(vr))()\r\n self.fmi1GetInteger(self.component, vr, len(vr), value)\r\n return list(value)\r\n\r\n def getBoolean(self, vr):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1Boolean * len(vr))()\r\n self.fmi1GetBoolean(self.component, vr, len(vr), value)\r\n return list(map(lambda b: 0 if b == fmi1False else 1, value))\r\n\r\n def getString(self, vr):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1String * len(vr))()\r\n self.fmi1GetString(self.component, vr, len(vr), value)\r\n return list(value)\r\n\r\n def setReal(self, vr, value):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1Real * len(vr))(*value)\r\n self.fmi1SetReal(self.component, vr, len(vr), value)\r\n\r\n def setInteger(self, vr, value):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1Integer * len(vr))(*value)\r\n self.fmi1SetInteger(self.component, vr, len(vr), value)\r\n\r\n def setBoolean(self, vr, value):\r\n # convert value to a byte string\r\n s = b''\r\n for v in value:\r\n s += fmi1True if v else fmi1False\r\n\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = (fmi1Boolean * len(vr))(s)\r\n self.fmi1SetBoolean(self.component, vr, len(vr), value)\r\n\r\n def setString(self, vr, value):\r\n vr = (fmi1ValueReference * len(vr))(*vr)\r\n value = map(lambda s: s.encode('utf-8'), value)\r\n value = (fmi1String * len(vr))(*value)\r\n self.fmi1SetString(self.component, vr, len(vr), value)\r\n\r\n\r\nclass FMU1Slave(_FMU1):\r\n\r\n def __init__(self, **kwargs):\r\n\r\n super(FMU1Slave, self).__init__(**kwargs)\r\n\r\n # FMI 1.0 Co-Simulation functions\r\n self._fmi1Function('InstantiateSlave',\r\n ['instanceName', 'guid', 'fmuLocation', 'mimeType', 'timeout', 'visible', 'interactive', 'functions', 'loggingOn'],\r\n [fmi1String, fmi1String, fmi1String, fmi1String, fmi1Real, fmi1Boolean, fmi1Boolean, fmi1CallbackFunctions, fmi1Boolean],\r\n fmi1Component)\r\n\r\n self._fmi1Function('InitializeSlave',\r\n ['component', 'tStart', 'stopTimeDefined', 'tStop'],\r\n [fmi1Component, fmi1Real, fmi1Boolean, fmi1Real],\r\n fmi1Status)\r\n\r\n self._fmi1Function('DoStep',\r\n ['component', 'currentCommunicationPoint', 'communicationStepSize', 'newStep'],\r\n [fmi1Component, fmi1Real, fmi1Real, fmi1Boolean],\r\n fmi1Status)\r\n\r\n self._fmi1Function('TerminateSlave', ['component'], [fmi1Component], fmi1Status)\r\n\r\n self._fmi1Function('ResetSlave', ['component'], [fmi1Component], fmi1Status)\r\n\r\n self._fmi1Function('FreeSlaveInstance', ['component'], [fmi1Component], None)\r\n\r\n def instantiate(self, mimeType='application/x-fmu-sharedlibrary', timeout=0, visible=fmi1False,\r\n interactive=fmi1False, functions=callbacks, loggingOn=fmi1False):\r\n\r\n fmuLocation = pathlib.Path(self.unzipDirectory).as_uri()\r\n\r\n self.component = self.fmi1InstantiateSlave(self.instanceName.encode('UTF-8'),\r\n self.guid.encode('UTF-8'),\r\n fmuLocation.encode('UTF-8'),\r\n mimeType.encode('UTF-8'),\r\n timeout,\r\n visible,\r\n interactive,\r\n functions,\r\n loggingOn)\r\n\r\n def initialize(self, tStart=0.0, stopTime=None):\r\n stopTimeDefined = fmi1True if stopTime is not None else fmi1False\r\n tStop = stopTime if stopTime is not None else 0.0\r\n return self.fmi1InitializeSlave(self.component, tStart, stopTimeDefined, tStop)\r\n\r\n def terminate(self):\r\n return self.fmi1TerminateSlave(self.component)\r\n\r\n def reset(self):\r\n return self.fmi1ResetSlave(self.component)\r\n\r\n def freeInstance(self):\r\n self.fmi1FreeSlaveInstance(self.component)\r\n self.freeLibrary()\r\n\r\n def doStep(self, currentCommunicationPoint, communicationStepSize, newStep=fmi1True):\r\n return self.fmi1DoStep(self.component, currentCommunicationPoint, communicationStepSize, newStep)\r\n\r\n\r\nclass FMU1Model(_FMU1):\r\n\r\n def __init__(self, **kwargs):\r\n\r\n super(FMU1Model, self).__init__(**kwargs)\r\n\r\n self.eventInfo = fmi1EventInfo()\r\n\r\n self._fmi1Function('InstantiateModel',\r\n ['instanceName', 'guid', 'functions', 'loggingOn'],\r\n [fmi1String, fmi1String, fmi1CallbackFunctions, fmi1Boolean],\r\n fmi1Component)\r\n\r\n self._fmi1Function('SetTime',\r\n ['component', 'time'],\r\n [fmi1Component, fmi1Real],\r\n fmi1Status)\r\n\r\n self._fmi1Function('Initialize',\r\n ['component', 'toleranceControlled', 'relativeTolerance', 'eventInfo'],\r\n [fmi1Component, fmi1Boolean, fmi1Real, POINTER(fmi1EventInfo)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('GetContinuousStates',\r\n ['component', 'states', 'nx'],\r\n [fmi1Component, POINTER(fmi1Real), c_size_t],\r\n fmi1Status)\r\n\r\n self._fmi1Function('GetDerivatives',\r\n ['component', 'derivatives', 'nx'],\r\n [fmi1Component, POINTER(fmi1Real), c_size_t],\r\n fmi1Status)\r\n\r\n self._fmi1Function('SetContinuousStates',\r\n ['component', 'x', 'nx'],\r\n [fmi1Component, POINTER(fmi1Real), c_size_t],\r\n fmi1Status)\r\n\r\n self._fmi1Function('CompletedIntegratorStep',\r\n ['component', 'callEventUpdate'],\r\n [fmi1Component, POINTER(fmi1Boolean)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('GetEventIndicators',\r\n ['component', 'eventIndicators', 'ni'],\r\n [fmi1Component, POINTER(fmi1Real), c_size_t],\r\n fmi1Status)\r\n\r\n self._fmi1Function('EventUpdate',\r\n ['component', 'intermediateResults', 'eventInfo'],\r\n [fmi1Component, fmi1Boolean, POINTER(fmi1EventInfo)],\r\n fmi1Status)\r\n\r\n self._fmi1Function('Terminate',\r\n ['component'],\r\n [fmi1Component],\r\n fmi1Status)\r\n\r\n self._fmi1Function('FreeModelInstance',\r\n ['component'],\r\n [fmi1Component],\r\n None)\r\n\r\n def instantiate(self, functions=callbacks, loggingOn=fmi1False):\r\n self.component = self.fmi1InstantiateModel(self.instanceName.encode('UTF-8'),\r\n self.guid.encode('UTF-8'),\r\n functions,\r\n loggingOn)\r\n\r\n def setTime(self, time):\r\n return self.fmi1SetTime(self.component, time)\r\n\r\n def initialize(self, toleranceControlled=fmi1False, relativeTolerance=0.0):\r\n return self.fmi1Initialize(self.component, toleranceControlled, relativeTolerance, byref(self.eventInfo))\r\n\r\n def getContinuousStates(self, states, size):\r\n return self.fmi1GetContinuousStates(self.component, states, size)\r\n\r\n def setContinuousStates(self, states, size):\r\n return self.fmi1SetContinuousStates(self.component, states, size)\r\n\r\n def getDerivatives(self, derivatives, size):\r\n return self.fmi1GetDerivatives(self.component, derivatives, size)\r\n\r\n def completedIntegratorStep(self):\r\n stepEvent = fmi1Boolean()\r\n self.fmi1CompletedIntegratorStep(self.component, byref(stepEvent))\r\n return stepEvent != fmi1False\r\n\r\n def getEventIndicators(self, eventIndicators, size):\r\n return self.fmi1GetEventIndicators(self.component, eventIndicators, size)\r\n\r\n def eventUpdate(self, intermediateResults=fmi1False):\r\n return self.fmi1EventUpdate(self.component, intermediateResults, byref(self.eventInfo))\r\n\r\n def terminate(self):\r\n return self.fmi1Terminate(self.component)\r\n\r\n def freeInstance(self):\r\n self.fmi1FreeModelInstance(self.component)\r\n self.freeLibrary()\r\n","sub_path":"fmpy/fmi1.py","file_name":"fmi1.py","file_ext":"py","file_size_in_byte":18436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292034802","text":"import math\nimport matplotlib.pyplot as plt\nmath.exp(0)-math.exp(-10)\n\nx=range(0,10)\ny=[math.exp(-xi) for xi in x] # isto se chama \"list comprehension\"\nprint(x)\nprint(y)\n\nplt.bar(x,y,color=\"red\",align=\"edge\",width=1)\nx1=[i/100 for i in range(0,1000)]\ny1=[math.exp(-xi) for xi in x1]\nplt.plot(x1,y1)\n#plt.show()\n\nS1 = 0\nxfim = 10\nI_a = 1 - math.exp(-xfim)\nn = 100\n\nwhile abs(S1- I_a)/I_a > 0.01 :\n S1 =0\n dx = xfim/n\n xi=0\n for i in range (0,n):\n S1 += math.exp(-xi) * dx\n xi += dx\n n += 1\n print(S1)\n \nprint(\"Soma {0} caixinhas = {1}\".format(n-1,S1))\n","sub_path":"Aula 10/aula10_exr1.py","file_name":"aula10_exr1.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"40589472","text":"\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 12 13:22:51 2017\n\n@author: Chris.Cirelli\n\"\"\"\n\nimport os\nimport pandas as pd\n\n# Directory Commands\n\n# Get current directory\n\n'''os.getcwd'''\n\n# Change dir\n\n'''os.chdir('C:\\\\Windows')'''\n\n# Create new folder\n\n'''os.makedirs()\n\nfor x in range(0,5):\n os.makedirs(str(x))\n'''\n\n# Finding File Sizes and Folder Contents\n\n'''\nList1 = []\n[List1.append(x) for x in os.listdir('C:\\\\Users\\Chris.Cirelli\\Desktop')]\nprint(pd.DataFrame(List1))\n'''\n\n# Get file size all files in a directory\n\n\ndef get_totalsize(directory1):\n totalsize = 0\n for filename in os.listdir(directory1):\n totalsize1 = totalsize + os.path.getsize(os.path.join('C:\\\\Users\\Chris.Cirelli\\Desktop', filename))\n print(totalsize1)\n\n\n\ndef get_all_files():\n List1 = []\n Dir = 'C:\\\\Users\\Chris.Cirelli\\Desktop'\n for x in os.listdir('C:\\\\Users\\\\Chris.Cirelli\\\\Desktop'):\n List1.append(Dir + '\\\\' + x)\n df1 = pd.DataFrame(List1)\n df1.to_clipboard(excel = True, sep = True)\n\n \n\n# Opening and Writing to fillings\n\n''' \nStep1: Create File\n\nFilename = open('file.txt', 'w') #name file, open in write format. 'w' = write\nFilename.write('Hello world!')\nFilename.close()\n\nStep2: Append File\n\nFilename = open('filename', 'a') # 'a' = append\nFilename.write('Bacon is better than vegetables')\nFilename.close()\n\nStep3: Print Content\n\ncontent = Filename.read()\nprint(content)\n'''\n\n\n# Daving Variables with teh shelve Module\n\n''' This module allows you to save variables to a shelve for later use'''\nimport shelve\n\nshelfFile = shelve.open('mydata') # shelve.open() ***creates a file, pass it a file name\n # shelfFile = ***this is the creation of the variable that references your shelve file. \ncats = ['Mitzy', 'Trixie', 'Shila']# create your values\nshelfFile['cats'] = cats # name your variable\nshelfFile.close() # close file. \n'''You should see three new files created in your directory.'''\n\nshelfFile = shelve.open('mydata')\nprint(shelfFile) # check to see that the file was created properly\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Book - Automate the Boring Stuff w Python/Chapter 8 - Reading & Writing Files.py","file_name":"Chapter 8 - Reading & Writing Files.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366269056","text":"\"\"\"Simple link checker that will recursively extract URLs from .md files\nfrom the current repository.\n\nAsynchronous features with the help of the following page:\nhttps://stackoverflow.com/a/13530258/8787680\n\"\"\"\nimport multiprocessing as mp\nimport os\nimport subprocess\nimport time\n\n# Third-party imports\nimport requests\nfrom pydub import AudioSegment\nfrom pydub.playback import play\n\n\nFILE_NAME = \"dead.txt\" # Will appear if it contains dead links\nHEADERS = {\n \"User-Agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36\"\n )\n}\n\nEXCLUDED_WEBSITES = [\n \"calendar.google.com\",\n \"challengerocket.com\",\n \"https://www.khanacademy.org/math/precalculus/x9e81a4f98389efdf\",\n \"https://yukaichou.com/gamification-book/\",\n]\n\n\ndef extract_links(excluded_websites: list) -> list:\n \"\"\"Extract links from all .md files recursively from the current\n repository.\"\"\"\n cmd = (\n 'cat ../../**/*.md | grep -Eo \"(http|https)://[a-zA'\n '-Z0-9./?=_~-]*\" | sort | uniq > links.txt'\n )\n subprocess.run(cmd, shell=True)\n print(\"Links extracted from repository.\")\n\n sites = []\n with open(\"links.txt\") as f:\n content = f.readlines()\n for line in content:\n excluded = False\n for website in excluded_websites:\n if website in line:\n print(f\"Excluding: {website}\")\n excluded = True\n if not excluded:\n line = line.strip()\n sites.append(line)\n\n os.unlink(\"links.txt\")\n return sites\n\n\ndef download_site(url, q):\n \"\"\"Request an URL. If it can properly be retrieved, add it to queue\n `q` so that it gets written to a file with the job manager.\"\"\"\n global HEADERS\n try:\n with requests.get(url, headers=HEADERS) as response:\n response_length = len(response.content)\n print(f\"Current URL: {url}\")\n if not response_length or not response.ok:\n if response.status_code != 403: # ignore blocked requests\n q.put(f\"[{response.status_code}] {url}\")\n except: # Any kind of error occurring\n try:\n q.put(f\"[{response.status_code}] {url}\")\n except UnboundLocalError: # response doesn't exist → connection error\n q.put(url)\n\n\ndef check_all_ok(dead_file):\n \"\"\"Once we have requested all URLs, check if any was found to be\n dead and print them with their specific error code.\"\"\"\n try:\n with open(dead_file) as f:\n content = f.readlines()\n if not content: # We get an empty list\n print(\"No dead link found.\")\n os.unlink(dead_file)\n else:\n print(\"Dead links found:\\n\")\n for line in content:\n print(line.strip())\n except FileNotFoundError:\n print(f\"<{dead_file}> doesn't exist.\")\n\n\ndef listener(q):\n \"\"\"Listens for messages on the `q`, writes to file.\"\"\"\n\n with open(FILE_NAME, \"w\") as f:\n while True:\n m = q.get()\n\n # String received, signaling we're done\n if m == \"kill\":\n break\n\n f.write(str(m) + \"\\n\")\n f.flush()\n\n\ndef job_manager(sites):\n \"\"\"The purpose of the job manager is to add \"jobs\" in a \"queue\" so\n that at any time only one \"job\" will be written to a file thanks to\n a \"listener\" that receives incoming \"jobs\" from the \"queue\".\"\"\"\n # Must use Manager queue here, or will not work\n manager = mp.Manager()\n q = manager.Queue()\n pool = mp.Pool(mp.cpu_count() + 2)\n\n # Put listener to work first\n _ = pool.apply_async(listener, (q,))\n\n # Fire off workers\n jobs = []\n for site in sites:\n job = pool.apply_async(download_site, (site, q))\n jobs.append(job)\n\n # collect results from the workers through the pool result queue\n for job in jobs:\n job.get()\n\n # Now we are done, kill the listener\n q.put(\"kill\")\n pool.close()\n\n\nif __name__ == \"__main__\":\n SITES = extract_links(EXCLUDED_WEBSITES) # Generate list of links\n START_TIME = time.time() # Start timer right before launching job manager\n\n # Add links as 'jobs' to write dead links to a file\n job_manager(SITES)\n\n DURATION = time.time() - START_TIME\n print(f\"Checked {len(SITES)} in {DURATION:.4f} seconds\")\n\n # Scan through the file that contains dead links. If empty, delete it.\n check_all_ok(FILE_NAME)\n\n # Play a nice little sound when the script is done and shows ugly output\n # for the file being played\n song = AudioSegment.from_wav(\"./sounds/done.wav\")\n play(song)\n","sub_path":"_assets/scripts/link_checker.py","file_name":"link_checker.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"85378647","text":"from Estructura.nodo import Nodo\nclass DomObject(Nodo):\n\tdef __init__(self, etiqueta, contenido = \"\"):\n\t\tsuper(DomObject, self).__init__()\n\n\t\tself.etiqueta = etiqueta\n\t\tself.contenido = contenido\n\n\t\tself.atributos = {\n\t\t\t\"accesskey\" : None,\n\t\t\t\"class\" : None,\n\t\t\t\"contenteditable\" : None,\n\t\t\t\"contextmenu\" : None,\n\t\t\t\"data-\" : None,\n\t\t\t\"dir\" : None,\n\t\t\t\"draggable\" : None,\n\t\t\t\"dropzone\" : None,\n\t\t\t\"hidden\" : None,\n\t\t\t\"id\" : None,\n\t\t\t\"itemprop\" : None,\n\t\t\t\"lang\" : None,\n\t\t\t\"spellcheck\" : None,\n\t\t\t\"style\" : None,\n\t\t\t\"tabindex\" : None,\n\t\t\t\"title\" : None,\n\t\t}\n\n\tdef agregarAtributo(self, atributo, valor):\n\t\tif self.atributos[atributo] != None:\n\t\t\tself.atributos[atributo] += \" \" + valor\n\t\telse:\n\t\t\tself.atributos[atributo] = valor\n\n\tdef etiquetaInicio(self):\n\t\tatributos = \" \"\n\t\tfor i in self.atributos:\n\t\t\tif self.atributos[i] != None:\n\t\t\t\tatributos += \" \" + i + '=\"' + self.atributos[i] + '\"'\n\t\treturn \"<\" + self.etiqueta + atributos+ \">\"\n\n\tdef agregarContenido(self, contenido):\n\t\tself.contenido = contenido\n\n\tdef etiquetaFinal(self):\n\t\treturn \"\"\n\n\tdef sumarDiccionario(self,dic):\n\t\tfor i in self.atributos:\n\t\t\tdic[i] = self.atributos[i]\n\t\treturn dic","sub_path":"Componentes/domobject.py","file_name":"domobject.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166970130","text":"# secret contains keys =\nimport secret\nimport tweepy\nimport unirest\nimport time\nimport os\nimport psycopg2\nimport urlparse\n\ndef get_tweet_id():\n conn = get_conn()\n cur = conn.cursor()\n cur.execute('SELECT * FROM ids;')\n tweet_id = cur.fetchone()[0]\n cur.close()\n conn.commit()\n conn.close()\n return tweet_id\n\ndef update_tweet_id(tweet_id):\n conn = get_conn()\n cur = conn.cursor()\n cur.execute('UPDATE ids SET id=\\'{0}\\';'.format(tweet_id))\n cur.close()\n conn.commit()\n conn.close()\n\ndef get_conn():\n urlparse.uses_netloc.append(\"postgres\")\n url = urlparse.urlparse(os.environ[\"DATABASE_URL\"])\n conn = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n )\n return conn\n\n\nif __name__ == '__main__':\n \n # initialise api\n auth = tweepy.OAuthHandler(secret.CONSUMER_KEY, secret.CONSUMER_SECRET)\n auth.set_access_token(secret.ACCESS_TOKEN, secret.ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n \n # headers and url for the request to the Mashape api\n headers = {'X-Mashape-Key': secret.MASHAPE_KEY, 'Accept':'text/plain'}\n url = 'https://montanaflynn-l33t-sp34k.p.mashape.com/encode?text={0}'\n \n\n # program runs forever as it is hosted on heroku\n while True:\n # get most recent tweet (single tweet as count is set as 1)\n tweet = api.user_timeline(id='DalaiLama', count=1)[0]\n last_id = get_tweet_id()\n print(tweet.id_str, last_id)\n if tweet.id_str != last_id: \n\n # log a new tweet\n print('L33T L4M4: New Tweet')\n \n # get text and format it to be inserted into the request url\n text = tweet.text\n text = text.replace(' ', '+').encode('utf-8')\n\n # write tweet id to first line\n update_tweet_id(tweet.id_str)\n\n # make request to l33tsp34k api\n url = url.format(text)\n response = unirest.get(url,\n headers={\n \"X-Mashape-Key\": secret.MASHAPE_KEY,\n \"Accept\": \"text/plain\"\n } \n )\n # try tweeting the translation, an exception may occur if the \n # translation api made the text > 140 characters, so that is \n # excepted and the program just continues\n try:\n api.update_status(status=response.body)\n except:\n pass\n else:\n print('L33T L4M4: No new tweets')\n # sleep for five minutes to reduce server load\n time.sleep(300)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106314502","text":"'''4.Write a recursive program which accept number from user and return\r\nsummation of its digits.\r\nInput : 879\r\nOutput : 24'''\r\n\r\nsum = 0;\r\ndef MyFunc(num):\r\n if (num!=0):\r\n global sum;\r\n sum=(sum + (num%10));\r\n num = num//10;\r\n MyFunc(num);\r\n \r\ndef main():\r\n num = int(input(\"Enter a number: \"));\r\n MyFunc(num);\r\n print(sum);\r\n\r\nif __name__ == \"__main__\":\r\n main();\r\n","sub_path":"Assignments/Assignment5/Assignment5_4.py","file_name":"Assignment5_4.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"653167262","text":"class Node(object):\n def __init__(self, item):\n self.item = item\n self.rchild = None\n self.lchild = None\n\n\nclass BinaryTree(object):\n def __init__(self, node=None):\n self.root = node\n\n def add(self, item):\n queue = []\n if self.root == None:\n self.root = Node(item)\n else:\n queue.append(self.root)\n while queue:\n node = queue.pop(0)\n\n if node.lchild:\n queue.append(node.lchild)\n else:\n node.lchild = Node(item)\n return\n if node.rchild:\n queue.append(node.rchild)\n else:\n node.rchild = Node(item)\n return\n\n def travel(self):\n queue = []\n\n queue.append(self.root)\n while queue:\n node = queue.pop(0)\n print(node.item, end=\" \")\n if node.lchild:\n queue.append(node.lchild)\n if node.rchild:\n queue.append(node.rchild)\n\n def preorder_travel(self, root):\n if root is not None:\n print(root.item, end=\" \")\n self.preorder_travel(root.lchild)\n self.preorder_travel(root.rchild)\n\n\nif __name__ == \"__main__\":\n tree = BinaryTree()\n tree.add(0)\n tree.add(1)\n tree.add(2)\n tree.add(3)\n tree.travel()\n print(\"\")\n tree.preorder_travel(tree.root)\n print(\"\")\n","sub_path":"数据结构/binary_tree_01.py","file_name":"binary_tree_01.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"392873106","text":"def compute_deriv(poly):\n \"\"\"\n Computes and returns the derivative of a polynomial function. If the\n derivative is 0, returns (0.0,).\n Example:\n >>> poly = (-13.39, 0.0, 17.5, 3.0, 1.0)\n # x4 + 3.0x3 + 17.5x2 - 13.39\n >>> print compute_deriv(poly)\n # 4.0x3 + 9.0x2 + 35.0x\n (0.0, 35.0, 9.0, 4.0)\n poly: tuple of numbers, length > 0\n returns: tuple of numbers\n \"\"\"\n\n return tuple([x * i for i, x in enumerate(poly)])[1:]\n\nif __name__ == '__main__':\n assert compute_deriv((-13.39, 0.0, 17.5, 3.0, 1.0)) == (0.0, 35.0, 9.0, 4.0)\n print('Well done!')","sub_path":"ps2/ps2_derivative.py","file_name":"ps2_derivative.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"161366816","text":"# coding=utf-8\n\n# Copyright 2016 MoSeeker\n\nfrom tornado import gen\n\nimport conf.path as path\nfrom handler.base import BaseHandler\nfrom util.common import ObjectDict\nfrom util.common.decorator import handle_response\n\n\nclass LandingHandler(BaseHandler):\n \"\"\"\n 企业搜索页\n \"\"\"\n\n @handle_response\n @gen.coroutine\n def get(self):\n display_locale = self.get_current_locale()\n is_referral = self.params.is_referral\n search_seq = yield self.landing_ps.make_search_seq(self.current_user.company, self.params, self.locale, display_locale, is_referral)\n if is_referral:\n next_url = \"/m\" + path.POSITION_REFERRAL_LIST\n else:\n next_url = \"/m\" + path.POSITION_LIST\n\n self.logger.debug(\"[landing] search_seq: %s\" % search_seq)\n\n data = ObjectDict({\n \"logo\": self.static_url(self.current_user.company.logo),\n \"name\": self.current_user.company.get(\"abbreviation\"),\n \"image\": self.static_url(self.current_user.company.conf_search_img),\n \"search_seq\": search_seq,\n \"next_url\": next_url\n })\n\n yield self._make_share_info(self.current_user.company)\n\n self.render_page(template_name=\"company/dynamic_search.html\", data=data, meta_title=self.locale.translate(\"search_title\"))\n\n @gen.coroutine\n def _make_share_info(self, company_info):\n \"\"\"构建 share 内容\"\"\"\n\n cover = self.share_url(company_info.logo)\n title = \"{}高级搜索\".format(company_info.abbreviation)\n description = \"\"\n\n link = self.make_url(\n path.SEARCH_FILITER,\n self.params,\n recom=self.position_ps._make_recom(self.current_user.sysuser.id),\n escape=[\"pid\"]\n )\n\n self.params.share = ObjectDict({\n \"cover\": cover,\n \"title\": title,\n \"description\": description,\n \"link\": link\n })\n","sub_path":"handler/platform/landing.py","file_name":"landing.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"24388042","text":"import numpy as np\nfrom statistics import mean, mode\nfrom numpy import linalg as LA\nimport matplotlib.pyplot as plt\nimport math\n\n\n\nTrain = open('G:/My Drive/SCHOOL/School/CSCI 5521/HW2/resources/optdigits_train.txt', 'r').read().split()\nTest = open('G:/My Drive/SCHOOL/School/CSCI 5521/HW2/resources/optdigits_test.txt', 'r').read().split()\n\nTrainD = list()\nTrainR = list()\n\nTestD = list()\nTestR = list()\n\nfor i in range(len(Train)):\n Train[i] = np.array(Train[i].split(\",\"), dtype=float)\n TrainD.append(Train[i][0:64])\n TrainR.append(Train[i][64])\n \nfor i in range(len(Test)):\n Test[i] = np.array(Test[i].split(\",\"), dtype=float)\n TestD.append(Test[i][0:64])\n TestR.append(Test[i][64])\n\n\nTrainD_mean = list()\nfor i in range(len(np.transpose(TrainD))):\n TrainD_mean.append(mean(np.transpose(TrainD)[i]))\n\nTrainD_mean = np.transpose(TrainD_mean)\n\ns = np.cov(np.transpose(TrainD-np.transpose(TrainD_mean)))\n\neigenvalues, components = LA.eig(s)\n\n\neigenvalues = np.sort(eigenvalues, axis=None)[::-1] \n# eigenvalues = eigenvalues/sum(eigenvalues)\n\neigV = list()\nfor i in components:\n eigV.append(i[0:2]) \n\nZtrain = np.transpose(np.transpose(eigV) @ np.transpose(TrainD))\nZtest = np.transpose(np.transpose(eigV) @ np.transpose(TestD))\n\n\ncolors = ['red','sandybrown','navy','c','purple','pink','Aqua', 'green','blue','orange']\n\nfor i in range(len(Ztrain)):\n plt.scatter(Ztrain[i,0], -Ztrain[i,1], TrainR[i], color=[colors[int(TrainR[i])]])\n \nfor i in range(len(Test)):\n plt.text(Ztrain[i,0], -Ztrain[i,1], str(int(TrainR[i])))\nplt.show() \n\n\nfor i in range(len(Ztest)):\n plt.scatter(Ztest[i,0], -Ztest[i,1], TrainR[i], color=[colors[int(TrainR[i])]])\n \nfor i in range(len(Ztest)):\n plt.text(Ztest[i,0], -Ztest[i,1], str(int(TestR[i])))\nplt.show() \n\n\n\n# inc = [0.0] * 64\n# inc[0] = eigenvalues[0]\n# for i in range(1, len(eigenvalues)):\n# inc[i] = eigenvalues[i] + inc[i-1]\n \n\n# plt.plot(inc)\n# plt.xlabel('Eigenvector')\n# plt.ylabel('Proportion of Variance')\n\n\n# MinK = 0;\n# for i in range(len(inc)):\n# if (inc[i] >= 0.9):\n# MinK = i + 1;\n# break\n\n\n# print(\"Minimum number of eigenvectors that explain at least 90% of the variance = {}\".format(MinK))\n\n\n# TN_mean = list()\n# for i in range(len(np.transpose(TN))):\n# TN_mean.append(mean(np.transpose(TN)[i]))\n\n# TT_mean = list()\n# for i in range(len(np.transpose(TT))):\n# TT_mean.append(mean(np.transpose(TT)[i]))\n\n# components_k = list()\n# for i in range(len(components)):\n# components_k.append(components[i][0:21])\n\n# z_train = (np.transpose(np.transpose(components_k) @ np.transpose((TN - np.transpose(TN_mean))))).tolist()\n# z_test = (np.transpose(np.transpose(components_k) @ np.transpose((TT - np.transpose(TT_mean))))).tolist()\n\n\n# for i in range(len(z_train)):\n# z_train[i].append(Train[i][64])\n\n\n\n# for i in range(len(z_test)):\n# z_test[i].append(Test[i][64])\n\n\n# z_train = np.array(z_train)\n# z_test = np.array(z_test)\n\n\n# for k in range(1,8,2):\n \n# dist = 0.0\n# idx = [[0 for i in range(k)] for j in range(len(z_test))]\n \n# minVal = [[float(\"inf\") for i in range(k)] for j in range(len(z_test))]\n\n# for i in range(len(z_test)):\n# for j in range(len(z_train)):\n# dist = np.linalg.norm(z_train[j][0:21] - z_test[i][0:21])\n \n# for l in range(k):\n# if dist < minVal[i][l]:\n# idx[i][l] = z_train[j][21]\n# minVal[i][l] = dist\n# break\n \n# count = 0\n# for i in range(len(z_test)):\n# if mode(idx[i]) != z_test[i][21]:\n# count+=1\n\n# E = count/len(z_test)\n\n# print(\"k = {} - Error Rate: {}\".format(k, E))\n \n# plt.show() \n","sub_path":"HW4/code/script_2c.py","file_name":"script_2c.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"440871357","text":"from setuptools import find_packages, setup\n\nEXTRAS_REQUIRE = {\n 'tests': ['pytest', 'pytest-cov'],\n 'lint': ['flake8==3.7.7', 'flake8-isort==2.7.0', 'isort==4.3.20', 'black==19.3.b0'],\n}\nEXTRAS_REQUIRE['dev'] = EXTRAS_REQUIRE['tests'] + EXTRAS_REQUIRE['lint'] + ['tox']\n\n\ndef read(fname):\n \"\"\"Read the content of the file `fname`.\"\"\"\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name='logparser',\n version='0.0.1',\n description='log parsing utility',\n long_description=read('README.md'),\n long_description_content_type='text/markdown',\n author='Szabolcs Antal',\n author_email='antalszabolcs01@gmail.com',\n url='https://github.com/kamforka/logparser',\n packages=find_packages(),\n extras_require=EXTRAS_REQUIRE,\n license='MIT',\n keywords='log parsing processing exporting',\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n python_requires='>=3.5',\n entry_points={'console_scripts': ['logparser=logparser.cli:main']},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"464237622","text":"import os\nimport time\nimport pprint\nfrom data import init_dict_token_dex\nfrom orfeed import Orfeed\nfrom dotenv import load_dotenv\nload_dotenv()\nimport web3\nfrom web3 import Web3\nfrom tqdm import tqdm\n\ndef getTokenToTokenPrice(orfeed_i, tokenSrc, tokenDst, provider, amount=1, normalized=False):\n if normalized == True:\n res = orfeed_i.getExchangeRateNormalized(tokenSrc, tokenDst, provider)\n else:\n res = orfeed_i.getExchangeRate(tokenSrc, tokenDst, provider, amount)\n\n return {\n \"tokenSrc\" : tokenSrc,\n \"tokenDst\" : tokenDst,\n \"tokenPair\" : tokenSrc + '-' + tokenDst,\n \"provider\" : provider,\n \"price\" : res\n }\n\n\ndef getTokenToTokenPriceFeed(orfeed_i, threshold = 0, verbose = False):\n result = {}\n dict_token_dex = init_dict_token_dex()\n providers_list = [\"UNISWAPBYSYMBOLV2\", \"KYBERBYSYMBOLV1\"]\n for token in tqdm(dict_token_dex, desc=\"finding arbitrage\", total=len(dict_token_dex)):\n tmp_res = {}\n for provider in providers_list:\n buy = getTokenToTokenPrice(orfeed_i, \"ETH\", token, provider, amount=orfeed_i.w3.toWei('1', 'ether'))\n sell = getTokenToTokenPrice(orfeed_i, token, \"ETH\", provider, amount=buy[\"price\"])\n if not(buy[\"price\"] > 0 and sell[\"price\"] > 0):\n continue\n\n tmp_res[provider] = {\n \"buy_price_wei\": buy[\"price\"]/1e18,\n \"sell_price_wei\": sell[\"price\"]*buy[\"price\"]/(1e18*1e18),\n }\n\n if len(tmp_res.keys()) != 2:\n continue\n\n path = {\n \"one\": (tmp_res[providers_list[1]]['sell_price_wei'] - tmp_res[providers_list[0]]['buy_price_wei'])/tmp_res[providers_list[0]]['buy_price_wei'] * 100,\n \"two\": (tmp_res[providers_list[0]]['sell_price_wei'] - tmp_res[providers_list[1]]['buy_price_wei'])/tmp_res[providers_list[1]]['buy_price_wei'] * 100\n }\n if path[\"one\"] > path[\"two\"] and path[\"one\"] > 0: # buy at uniswap and sell at kyber\n result[token] = {\n \"buy_at\": providers_list[0],\n \"buy_price\": tmp_res[providers_list[0]][\"buy_price_wei\"],\n \"sell_at\": providers_list[1],\n \"sell_price\": tmp_res[providers_list[1]][\"sell_price_wei\"],\n \"%\": path[\"one\"]\n }\n elif path[\"two\"] > path[\"one\"] and path[\"two\"] > 0: # buy at kyber and sell at uniswap\n result[token] = {\n \"sell_at\": providers_list[0],\n \"sell_price\": tmp_res[providers_list[0]][\"sell_price_wei\"],\n \"buy_at\": providers_list[1],\n \"buy_price\": tmp_res[providers_list[1]][\"buy_price_wei\"],\n \"%\": path[\"two\"]\n }\n else:\n continue\n\n if verbose:\n print(\"\"+ token + ' : ' + result[token][\"buy_at\"] + \" -> \" + result[token][\"sell_at\"] + ' : ' + str(result[token][\"%\"]))\n pprint.pprint({token: result[token]})\n print(\"\")\n return result\n\ndef simple_getTokenToTokenPrice(orfeed_i, src_token, src_token_infos, dst_token, dst_token_infos):\n result = {}\n providers_list = [\"UNISWAPBYSYMBOLV2\", \"KYBERBYSYMBOLV1\"]\n tmp_res = {}\n for provider in providers_list:\n buy = getTokenToTokenPrice(orfeed_i, src_token, dst_token, provider, amount=10**src_token_infos['decimals'])\n sell = getTokenToTokenPrice(orfeed_i, dst_token, src_token, provider, amount=buy[\"price\"])\n if buy[\"price\"] > 0 and sell[\"price\"] > 0:\n tmp_res[provider] = {\n \"buy_price_wei\": buy[\"price\"]/(10**dst_token_infos['decimals']),\n \"sell_price_wei\": sell[\"price\"]*buy[\"price\"]/(10**(dst_token_infos['decimals'] + src_token_infos['decimals'])),\n }\n else:\n return -1\n\n path = {\n \"one\": (tmp_res[providers_list[1]]['sell_price_wei'] - tmp_res[providers_list[0]]['buy_price_wei'])/tmp_res[providers_list[0]]['buy_price_wei'] * 100,\n \"two\": (tmp_res[providers_list[0]]['sell_price_wei'] - tmp_res[providers_list[1]]['buy_price_wei'])/tmp_res[providers_list[1]]['buy_price_wei'] * 100\n }\n\n if path[\"one\"] > path[\"two\"] and path[\"one\"] > 0 and path[\"one\"] < 200: # buy at uniswap and sell at kyber\n result = {\n \"buy asset : \": dst_token,\n \"sell asset : \": src_token,\n \"buy_at\": providers_list[0],\n \"buy_price\": tmp_res[providers_list[0]][\"buy_price_wei\"],\n \"sell_at\": providers_list[1],\n \"sell_price\": tmp_res[providers_list[1]][\"sell_price_wei\"],\n \"%\": path[\"one\"]\n }\n elif path[\"two\"] > path[\"one\"] and path[\"two\"] > 0: # buy at kyber and sell at uniswap\n result = {\n \"buy asset : \": dst_token,\n \"sell asset : \": src_token,\n \"sell_at\": providers_list[0],\n \"sell_price\": tmp_res[providers_list[0]][\"sell_price_wei\"],\n \"buy_at\": providers_list[1],\n \"buy_price\": tmp_res[providers_list[1]][\"buy_price_wei\"],\n \"%\": path[\"two\"]\n }\n else:\n result = -1\n\n return result\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"174391909","text":"#!/usr/bin/env python3\nimport collections\nimport re\n\nimport data\nimport my\n\nwidth = 5\ncount = collections.Counter()\nfor station in my.stations:\n name = data.namefromstation[station]\n href = ''.join(re.split(r'\\W', name))\n count[href] += 1\n if count[href] > 1:\n href = '{}-{}'.format(href, count[href])\n text = ' '.join(re.findall(r'\\w+|\\S', name.replace('-', '–')))\n text = re.sub(r'([\\w])\\s+', r'\\1 ', text)\n print('

{1}

'.format(href, text))\n print('')\n rows = [(sorted(set(data.routesfromstop[stop]) & my.routeset), stop)\n for stop in data.stopsfromstation[station]]\n rows.sort(key=lambda row: sorted((not data.common(row[1],route), route) for route in row[0]))\n rows = [[(stop, route) for route in row] for (row, stop) in rows if row]\n for i in range(2):\n packedrows = []\n for row in rows:\n if packedrows and (\n i or\n (packedrows[-1][-1][1].isdigit() == row[0][1].isdigit())\n ) and len(packedrows[-1]) + len(row) <= width:\n packedrows[-1].extend(row)\n else:\n packedrows.append(row)\n rows = packedrows\n for row in rows:\n print('')\n for stop, route in row:\n print(''.format(\n data.feedidfromroute[route], data.stopidfromstop[stop + 'N'],\n route))\n for i in range(width - len(row)):\n print('')\n print('')\n print('')\n for stop, route in row:\n tdclass = 'route-{0}'.format(route) if data.common(\n stop, route) else 'route-{0}-dark'.format(route)\n print(''.format(tdclass, route))\n for i in range(width - len(row)):\n print('')\n print('')\n print('')\n for stop, route in row:\n print(''.format(\n data.feedidfromroute[route], data.stopidfromstop[stop + 'S'],\n route))\n for i in range(width - len(row)):\n print('')\n print('')\n print('
 
{} 
 
')\n","sub_path":"generate_main.py","file_name":"generate_main.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357506186","text":"\n\nfrom xai.brain.wordbase.nouns._anathema import _ANATHEMA\n\n#calss header\nclass _ANATHEMAS(_ANATHEMA, ):\n\tdef __init__(self,): \n\t\t_ANATHEMA.__init__(self)\n\t\tself.name = \"ANATHEMAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"anathema\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_anathemas.py","file_name":"_anathemas.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"253122816","text":"from typing import Union, Type\n\nfrom flexmeasures.data.models.assets import AssetType, Asset, Power\nfrom flexmeasures.data.models.markets import MarketType, Market, Price\nfrom flexmeasures.data.models.weather import WeatherSensorType, WeatherSensor, Weather\n\n\ndef determine_asset_type_by_asset(\n generic_asset: Union[Asset, Market, WeatherSensor]\n) -> Union[AssetType, MarketType, WeatherSensorType]:\n if isinstance(generic_asset, Asset):\n return generic_asset.asset_type\n elif isinstance(generic_asset, Market):\n return generic_asset.market_type\n elif isinstance(generic_asset, WeatherSensor):\n return generic_asset.sensor_type\n else:\n raise TypeError(\"Unknown generic asset type.\")\n\n\ndef determine_asset_value_class_by_asset(\n generic_asset: Union[Asset, Market, WeatherSensor]\n) -> Type[Union[Power, Price, Weather]]:\n if isinstance(generic_asset, Asset):\n return Power\n elif isinstance(generic_asset, Market):\n return Price\n elif isinstance(generic_asset, WeatherSensor):\n return Weather\n else:\n raise TypeError(\"Unknown generic asset type.\")\n","sub_path":"flexmeasures/data/models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"216111998","text":"# coding: utf-8\n\n\"\"\"\nSuppose you have some texts of news and know their categories.\nYou want to train a system with this pre-categorized/pre-classified\ntexts. So, you have better call this data your training set.\n\"\"\"\n\n# You need to train the system passing each text one by one to the trainer module.\nnewsSet = [\n {'text': 'Bonjour', 'category': 'hello'},\n {'text': 'Salut', 'category': 'hello'},\n {'text': 'Yo', 'category': 'hello'},\n {'text': 'wesh', 'category': 'hello'},\n {'text': 'hello', 'category': 'hello'},\n {'text': 'Bonjour, je cherche un film à voir ce soir', 'category': 'hello'},\n\n {'text': 'Qui est tu ?', 'category': 'philo'},\n {'text': 'A quoi tu sers ?', 'category': 'philo'},\n {'text': 'A quoi sers tu ?', 'category': 'philo'},\n {'text': \"qui t'es\", 'category': 'philo'},\n {'text': 'quelle age as tu ?', 'category': 'philo'},\n {'text': 'quelle age as tu', 'category': 'philo'},\n {'text': \"t'as quel age ?\", 'category': 'philo'},\n {'text': 'Quelle est le sens de la vie ?', 'category': 'philo'},\n {'text': 'qui suis je', 'category': 'philo'},\n {'text': \"Comment tu t'appelle ?\", 'category': 'philo'},\n \n {'text': 'Je cherche un film', 'category': 'question'},\n {'text': 'je ne sais pas quoi regarder comme film ce soir', 'category': 'question'},\n {'text': 'je ne sais pas quoi regarder comme film ce soir', 'category': 'question'},\n {'text': 'je cherche un film à voir ce soir', 'category': 'question'},\n\n {'text': 'ta gueule', 'category': 'insulte'},\n {'text': 'fils de pute', 'category': 'insulte'},\n {'text': 'connard', 'category': 'insulte'},\n {'text': 'salaud', 'category': 'insulte'},\n {'text': 'encule', 'category': 'insulte'},\n {'text': 'enfoire', 'category': 'insulte'},\n\n {'text': 'Oui', 'category': 'yes'},\n {'text': 'yes', 'category': 'yes'},\n {'text': 'Ouaip', 'category': 'yes'},\n {'text': 'Ouais', 'category': 'yes'},\n\n {'text': 'Je veux voir un autre film', 'category': 'question2'},\n {'text': 'non je ne veux pas le revoir', 'category': 'question2'},\n {'text': \"j'ai pas vraiment envie d'aller voir ce film\", 'category': 'question2'},\n {'text': \"je veux voir un films que je n'ai jamais vu\", 'category': 'question2'},\n\n {'text': 'non', 'category': 'no'},\n {'text': 'nop', 'category': 'no'},\n {'text': 'nan', 'category': 'no'},\n\n {'text': 'Aurevoir', 'category': 'tiao'},\n {'text': 'Merci, bonne journée !', 'category': 'tiao'},\n {'text': 'Top merci pour votre aide', 'category': 'tiao'},\n]","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507665067","text":"#!/usr/bin/env python3\n\n\"\"\"Create a progress bar for applications that can keep track of a\ndownload in progress. The progress bar will be on a separate thread\nand will communicate with the main thread using delegates.\"\"\"\n\nimport threading\nfrom time import sleep\nimport requests\n\n\ndownloaded = 0\n\n\ndef download(url, filename):\n \"\"\"Downloads the specified URL into filename.\"\"\"\n global downloaded\n req = requests.get(url, stream=True)\n with open(filename, 'wb') as file:\n for chunk in req.iter_content(chunk_size=128):\n file.write(chunk)\n downloaded += 128\n\n\ndef get_size(url):\n \"\"\"Checks the remote file size.\"\"\"\n req = requests.head(url)\n return int(req.headers['Content-Length'])\n\n\ndef check_progress(url):\n \"\"\"Prints the download progress.\"\"\"\n size = get_size(url)\n\n done = 0\n while done < size:\n done = downloaded\n progress = int(done * 100 / size)\n hashes = '#' * int(progress / 2)\n print('Progress: {:s} ({:d}%)'.format(hashes, progress),\n end='\\r', flush=True)\n sleep(1)\n print('')\n\n\ndef download_with_progress(url, filename):\n \"\"\"\n Runs two threads: one to download and another to observe and print\n out the progress.\n \"\"\"\n\n threads = []\n thread1 = threading.Thread(target=download, args=(url, filename))\n threads.append(thread1)\n thread2 = threading.Thread(target=check_progress, args=(url,))\n threads.append(thread2)\n\n thread1.start()\n thread2.start()\n\n\ndef main():\n \"\"\"main function\"\"\"\n\n domain_name = 'distfiles.gentoo.org'\n query_path = '/releases/amd64/autobuilds/20170629/'\n filename = 'install-amd64-minimal-20170629.iso'\n url = 'http://{d}/{q}/{f}'.format(\n d=domain_name,\n q=query_path,\n f=filename\n )\n\n download_with_progress(url, filename)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/threading/progressbar_2.py","file_name":"progressbar_2.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97971139","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nclass Lnwshop_url_scraper(scrapy.Spider):\n name = 'Lnwshop_url_scraper'\n start_urls = [\n 'https://www.lnwshop.com/shop/all',\n ]\n\n def parse(self, response):\n for links in response.xpath('//div[@class=\"item_pin\"]'):\n yield {\n 'shop_url' : links.xpath(\"a/@href\").extract()\n }\n\n next_page_url = response.xpath(\"//a[@class = 'tosakanth-action']\").re(r'href=\"(.+)\">\n and up to N steps\"\"\"\n xAve = np.zeros(N-3)\n x2Ave = np.zeros(N-3)\n r2Ave = np.zeros(N-3)\n # generate random numbers at one time\n rand_direction = np.random.randint(0, 4, (numWalker, N))\n for j in range(numWalker):\n x = 0\n y = 0\n for i in range(N):\n rwalker = rand_direction[j, i]\n if rwalker == 0:\n x = x + 1 # right\n elif rwalker == 1:\n x = x - 1 # left\n elif rwalker == 2:\n y = y + 1 # up\n else:\n y = y - 1 # down\n # first 3 steps are not averaged\n if i >= 3:\n xAve[i-3] += x\n x2Ave[i-3] += x**2\n r2Ave[i-3] += x**2 + y**2\n xAve /= numWalker\n x2Ave /= numWalker\n r2Ave /= numWalker\n return xAve, x2Ave, r2Ave\n\n# run 10000 parallel walkers for 100 steps\nx, x2, r2 = twodwalker(100, 10000)\nnn = np.linspace(4, 100, 97)\n\n# Plot \nplt.figure()\nplt.plot(nn, x, linewidth=2.0)\nplt.xlabel('$n$', fontsize = 20)\nplt.ylabel('$\\langle x_n \\\\rangle$', fontsize = 20)\nplt.savefig('2d_random_walk_xave.pdf')\nplt.figure()\nplt.plot(nn, x2, linewidth=2.0)\nplt.xlabel('$n$', fontsize = 20)\nplt.ylabel('$\\langle x_n^2 \\\\rangle$', fontsize = 20)\nplt.savefig('2d_random_walk_x2ave.pdf')\nplt.figure()\nplt.plot(nn, r2, linewidth=2.0)\nplt.xlabel('$n$', fontsize = 20)\nplt.ylabel('$\\langle r_n^2 \\\\rangle$', fontsize = 20)\nplt.savefig('2d_random_walk_r2ave.pdf')\n######################################################################\n#\n# The result is r^2 = 1.0 * t. for d dimension, we have\n# r^2 = 2dDt, where D is the diffusion constant.\n# So D = 1.0 / (2*2) = 1/4 in this 2D random walk.\n#\n######################################################################\n\nplt.show()\n","sub_path":"python_and_cython/2d_random_walk.py","file_name":"2d_random_walk.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521858162","text":"from collections import deque\nfrom itertools import islice\n\nfrom algorithms.algorithm import Algorithm\n\n\nclass AStar(Algorithm):\n def __init__(self):\n super().__init__()\n\n def add_to_frontier(self, node):\n node.calculate_heuristic(node.depth)\n\n # Find insertion index in the frontier\n n = 0\n for n, frontier_node in enumerate(self.frontier):\n if frontier_node.heuristic_value > node.heuristic_value:\n break\n\n self.frontier.insert(n, node)\n","sub_path":"algorithms/astar.py","file_name":"astar.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121327447","text":"###实例对象不同于类对象,若对创建的对象的属性赋值,则这个引用将变成实例对象;若对类对象数据操作,则创建的类对象的值都要改变\r\nclass C:\r\n x = 0\r\n def D(self):\r\n print('调用了方法')\r\na = C()\r\nb = C()\r\nc = C()\r\nc.x += 10\r\nprint('单独对创建的对象数据操作:',a.x,b.x,c.x)\r\nC.x += 100 ###对类对象进行操作(后面创建的对象的值同样会改变)\r\nd = C()\r\nprint('类对象操作以后:',a.x,b.x,c.x)\r\nprint('新创建的对象的值:',d.x)\r\n\r\n###如果创建新的属性的名字和类里的方法名相同.覆盖!!\r\nd.D()\r\nd.D = 1\r\ntry:\r\n d.D()\r\nexcept:\r\n print(d.D)","sub_path":"类和对象/类,类对象和实例对象.py","file_name":"类,类对象和实例对象.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12979400","text":"\"\"\"Make a program that filters a list of strings and returns a list with only your friends name in it.\n\nIf a name has exactly 4 letters in it, you can be sure that it has to be a friend of yours! Otherwise, you can be sure he's not...\n\nEx: Input = [\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"], Output = [\"Ryan\", \"Yous\"]\n\ni.e.\n\nfriend [\"Ryan\", \"Kieran\", \"Mark\"] `shouldBe` [\"Ryan\", \"Mark\"]\nNote: keep the original order of the names in the output.\"\"\"\n\n\ndef friend(x):\n\n out_friend = []\n for i in x:\n if len(i) < 5 and len(i) > 3:\n out_friend.append(i)\n return out_friend\n\n\nfriends = ['y99J', 'xGe1', '4ko', 'J', 'PoO3', 'Sblo']\n\nprint(friend(friends))\n\n# ['y99J', 'xGe1', '4ko', 'J', 'PoO3', 'Sblo'] should equal ['y99J', 'xGe1', 'PoO3', 'Sblo']\n","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219635935","text":"\"\"\"\nJobgurus spider created on the top of ATSSpider\n\nscrapy crawl jobgurus -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.jobgurus.com.ng/jobs\"\n\nSample URL:\n http://www.jobgurus.com.ng/jobs\n\"\"\"\n\nfrom zlib import crc32\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import ConvertDateString, NormalizedJoin, Prefix, Replace\n\n\nclass Jobgurus(ATSSpider):\n\n name = 'jobgurus'\n\n def parse(self, response):\n sel = Selector(response)\n for div in sel.xpath(\n '//div[@class=\"main-content-section\"]/div[contains(@class, \"job-post-panel\")]'\n ):\n job_url = div.xpath('.//h4/a/@href').extract()\n if job_url:\n yield Request(\n callback=self.parse_job_callback(),\n url=job_url[0]\n )\n\n next_page = sel.xpath(\n '//ul[@class=\"pagination\"]/li[@class=\"active\"]/following-sibling::li[1]/a/@href'\n ).extract()\n if next_page:\n yield Request(\n callback=self.parse,\n url=next_page[0]\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@class=\"main-content-section\"]/h2[@class=\"page-title\"]/text()'\n )\n loader.add_xpath(\n 'location',\n '//li/label[contains(text(), \"Location\")]/../text()',\n NormalizedJoin(', ')\n )\n loader.add_value(\n 'referencenumber',\n str(crc32(response.url)),\n Prefix('%s-' % self.name)\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//div[@class=\"panel-body\"]/div[@id=\"job_content\"]/following-sibling::node()[following-sibling::div/a[contains(text(), \"Click here to Apply\")]]'\n )\n loader.add_xpath(\n 'jobtype',\n '//li/label[contains(text(), \"Job Type:\")]/../text()'\n )\n loader.add_xpath(\n 'company',\n '//li/label[contains(text(), \"Company:\")]/../text()'\n )\n loader.add_xpath(\n 'jobcategory',\n '//li/label[contains(text(), \"Specialization:\")]/../text()'\n )\n loader.add_xpath(\n 'expiration_date',\n '//li/label[contains(text(), \"Deadline Date:\")]/../text()',\n Replace(r\"(\\d+)(st|nd|rd|th)\", r\"\\1\"),\n ConvertDateString('%d %B, %Y')\n )\n loader.add_xpath(\n 'date',\n '//li/label[contains(text(), \"Date Posted:\")]/../text()',\n Replace(r\"(\\d+)(st|nd|rd|th)\", r\"\\1\"),\n ConvertDateString('%d %B, %Y')\n )\n loader.add_xpath(\n 'educationrequirements',\n '//li/label[contains(text(), \"Minimum Qualification:\")]/../text()'\n )\n loader.add_xpath(\n 'experiencerequirements',\n '//li/label[contains(@class, \"Experience:\")]/../text()'\n )\n loader.add_xpath(\n 'baseSalary',\n '//li/label[contains(@class, \"Salary Range:\")]/../text()'\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/jobgurus.py","file_name":"jobgurus.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455382711","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START gae_python37_app]\nfrom flask import Flask\nfrom flask import request\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import render_template\nimport cgi\n\n\n# If `entrypoint` is not defined in app.yaml, App Engine will look for an app\n# called `app` in `main.py`.\napp = Flask(__name__)\n\nmonths = ['January',\n 'February',\n 'March',\n 'April',\n 'May',\n 'June',\n 'July',\n 'August',\n 'September',\n 'October',\n 'November',\n 'December']\n\ndef valid_month(month):\n controlmonth = month.capitalize()\n if controlmonth in months:\n return controlmonth\n else: return None\n\ndef escape_html(s):\n return cgi.escape(s,quote=True)\n\n#
\n# \nform = \"\"\"\n\n\n\n\n
\n\n\n
%(error)s
\n\n
\n\"\"\"\ndef strchange(error):\n return (form % {\"error\":error})\n\n# asdsad\n# @app.route('/', methods=['GET', 'POST']) \n# def postform(): \n# \"\"\"Return a friendly HTTP greeting.\"\"\"\n# return strchange('')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef hello2():\n items = request.args.getlist('food')\n return render_template('form.html',items=items)\n\n@app.route('/thanks', methods=['GET', 'POST'])\ndef thanks():\n # reqforkerem = request.form.get('food')\n reqforkerem = request.form.getlist('foods')\n # foods = ['a','b','c']\n return render_template('thanks.html')\n # return render_template('thanks.html',foods=reqforkerem)\n\n \n# def postform2(): \n# if request.method == 'POST':\n# reqforkerem = request.form.get('month')\n# ret_val = valid_month(reqforkerem)\n# return ret_val\n\n\n # deneme = request\n # \"\"\"Return a friendly HTTP greeting.\"\"\"\n # return deneme\n\n\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n","sub_path":"appengine/standard_python37/hello_world/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354474822","text":"from django.contrib import admin\nfrom django.urls import path\nfrom apps.post import views\n# from apps.post.views import AddPost\n\nurlpatterns = [\n path('post/', views.posts, name='posts'),\n path('post/get//', views.comments, name='comments'),\n\n # path('postAdd//', AddPost.as_view()),\n path('hello', views.hello, name='hello'),\n path('chek_username', views.chek_username, name='chek_username'),\n path('check_email', views.check_email, name='check_email'),\n path('post/search/', views.search, name='search'),\n path('post/addlike//', views.addlike, name='addlike'),\n # path('post/addlike/', views.addlike, name='addlike')\n\n\n\n]\n","sub_path":"apps/post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"280881524","text":"import scrapy\nfrom scrapy import Request\nfrom urllib.parse import urljoin\n\nclass IpAgentSpider(scrapy.Spider):\n name = 'ip_agent'\n allowed_domains = ['www.89ip.cn']\n start_urls = ['https://www.89ip.cn/index_1.html']\n page_url = None\n\n\n def parse(self, response):\n ip_list=response.xpath('//table[@class=\"layui-table\"]/tbody/tr')\n for elem in ip_list:\n ip_url=elem.xpath('normalize-space(./td[1]/text())').extract()\n port=elem.xpath('normalize-space(./td[2]/text())').extract()\n perators=elem.xpath('normalize-space(./td[3]/text())').extract()\n recording_time=elem.xpath('normalize-space(./td[4]/text())').extract()\n item = dict(\n ip_url=ip_url,\n port=port,\n perators=perators,\n recording_time=recording_time\n )\n yield item\n # 获取下一页url\n if item is not None:\n self.page_url = response.xpath('//*[@id=\"layui-laypage-1\"]/a[8]/@href').extract()\n # page_url 是一个数组\n else:\n self.page_url=None\n for next_url in self.page_url:\n\n yield Request(urljoin(\"https://www.89ip.cn\", next_url), callback=self.parse)\n\n","sub_path":"ip_agent_89ip/ip_agent_89ip/spiders/ip_agent.py","file_name":"ip_agent.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"478982853","text":"import altair as alt\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom eda_analysis import eda_analysis as eda\n\n\ndef helper_create_data(n=500):\n \"\"\"\n Helper function for creating dataframe for testing\n\n Parameters\n -----------\n n: int (default value = 500)\n Number of rows to be generated for the dataframe\n\n Returns\n --------\n pandas.DataFrame\n Returns a dataframe to be used for testing\n\n Examples\n ---------\n >>> helper_create_data()\n \"\"\"\n N1 = list(np.random.exponential(3, n))\n N2 = list(np.random.normal(2, 2, n))\n N3 = list(np.random.normal(10, 3, n))\n N4 = list(np.random.exponential(2, n))\n C1 = list(np.random.binomial(1, 0.7, n))\n C2 = list(np.random.poisson(1, n))\n C3 = list(np.random.binomial(5, 0.4, n))\n a = ['cat', 'dog', 'lion']\n C4 = list(np.random.choice(a, n))\n df = pd.DataFrame({\n 'C1': C1,\n 'C2': C2,\n 'C3': C3,\n 'N1': N1,\n 'N2': N2,\n 'N3': N3,\n 'N4': N4,\n 'C4': C4\n })\n rows = list(np.random.randint(0, n, 20))\n cols = list(np.random.randint(0, 7, 5))\n df.iloc[rows, cols] = np.nan\n\n return df\n\n\ndef test_generate_report():\n \"\"\"\n Tests the generate_report function to make sure the outputs are correct.\n\n Returns\n --------\n None\n The test should pass and no asserts should be displayed.\n \"\"\"\n # Calling helper function to create data\n data = helper_create_data()\n cat_vars = ['C1', 'C2', 'C3', 'C4']\n num_vars = ['N1', 'N2', 'N3']\n\n # Positive test case: Checking whether the function runs properly or not\n assert eda.generate_report(data, cat_vars, num_vars), \\\n \"Expected True but False returned\"\n\n # Negative test case: Checking whether the function returns False\n # fr wrong output\n assert not eda.generate_report(data, cat_vars, \"String Input\"), \\\n \"Expected False but True returned\"\n\n\ndef test_describe_cat_var():\n \"\"\"\n Tests the describe_cat_var function to make sure the outputs are correct.\n\n Returns\n --------\n None\n The test should pass and no asserts should be displayed.\n \"\"\"\n # Calling helper function to create data\n data = helper_create_data()\n cat_vars = ['C1', 'C2', 'C3', 'C4']\n # Testing data frame exception\n x = [1, 2, 3]\n try:\n eda.describe_cat_var(x, cat_vars)\n assert False, 'Exception must be thorwn for this test case'\n except Exception as ex:\n assert \"The value of the argument 'dataframe' must be of type \" \\\n \"'pandas.DataFrame'\" == str(ex), 'Expected exception not thrown'\n\n # Testing categorical variable exception\n try:\n eda.describe_cat_var(data, x)\n assert False, 'Exception must be thorwn for this test case'\n except Exception as ex:\n assert \"The value of the argument 'cat_vars' must be\" \\\n \" a list of strings\" == str(ex), 'Expected exception not thrown'\n\n # Testing columns subset exception\n try:\n cols = ['Y1', 'Y2']\n eda.describe_cat_var(data, cols)\n assert False, 'Exception must be thorwn for this test case'\n except Exception as ex:\n assert \"The input categorical column names must belong\" \\\n \" to the dataframe\" == str(ex), 'Expected exception not thrown'\n\n # Testing non-zero input is being passed to n_col\n try:\n eda.describe_cat_var(data, cat_vars, 0)\n assert False, 'Exception must be thorwn for this test case'\n except Exception as ex:\n assert \"The value of the argument 'n_cols' must be a positive \" \\\n \"non zero integer\" == str(ex), 'Expected exception not thrown'\n\n # testing integer is passed to n_col\n try:\n eda.describe_cat_var(data, cat_vars, \"z\")\n assert False, 'Exception must be thorwn for this test case'\n except Exception as ex:\n assert \"The value of the argument 'n_cols' must be a positive\" + \\\n \" non zero integer\" == str(ex), 'Expected exception not thrown'\n\n # Testing type of returned value\n p = eda.describe_cat_var(data, cat_vars)\n assert isinstance(p, alt.vegalite.v3.api.VConcatChart), \\\n 'The function must return an altair plot'\n\n # Testing if the specified columns has been plotted or not\n p = eda.describe_cat_var(data, cat_vars)\n assert set(p.data.columns) == set(cat_vars), \\\n 'The specified categorical columns were not plotted'\n\n\ndef test_calc_cor():\n \"\"\"\n Tests the correlation function calc_cor to make sure the outputs are\n correctly rendering.\n\n Returns\n --------\n None\n The test should pass and no asserts should be displayed.\n \"\"\"\n\n data = helper_create_data()\n num_vars = [\"N1\", \"N2\", \"N3\"]\n chart = eda.calc_cor(data, num_vars)\n\n # Check the data in the correlation matrix to be between -1 and 1\n for i in range(0, len(chart.data)):\n assert chart.data.iloc[i, 2] >= -1, \\\n \"Out of range values: lower than -1\"\n assert chart.data.iloc[i, 2] <= 1, \\\n \"Out of range values: higher than 1\"\n\n # Tests if the first and last value is 1 since it correlates to itself\n assert chart.data.iloc[0, 2] == 1, \\\n \"The first value should be 1 because it is correlated to itself\"\n assert chart.data.iloc[-1, 2] == 1, \\\n \"The last value should be 1 because it is correlated to itself\"\n\n # Test that Var1 and Var2 are equal in the first and last row\n assert chart.data.iloc[0, 0] == chart.data.iloc[0, 1], \\\n \"The Var1 should equal Var2 in the first row\"\n assert chart.data.iloc[-1, 0] == chart.data.iloc[-1, 1], \\\n \"The Var1 should equal Var2 in the last row\"\n\n # Test if the axes are properly mapped to the correct field\n spec = chart.to_dict()\n assert spec[\"layer\"][1][\"encoding\"][\"x\"][\"field\"] == 'Var1', \\\n \"Plot x-axis should be mapped to Var1\"\n assert spec[\"layer\"][1][\"encoding\"][\"y\"][\"field\"] == 'Var2', \\\n \"Plot y-axis should be mapped to Var2\"\n\n # Tests if the plot type is correct\n assert \"altair\" in str(type(chart)), \"Plot type is not an Altair object\"\n\n # Tests the exception is correctly raised when columns are not numeric\n with pytest.raises(Exception) as e:\n assert eda.calc_cor(data, [\"C4\"])\n assert str(e.value) == \"Columns are not all numeric\"\n\n # Tests the exception is correctly raised when\n # 'columns are not numeric 'dataframe' is not the correct type.\n with pytest.raises(Exception) as e:\n assert eda.calc_cor([\"N1\"], [\"N1\"])\n assert str(e.value) == \"Input 'dataframe' is not a dataframe\"\n\n # Tests the exception is correctly raised when 'num_vars' is not a string.\n with pytest.raises(Exception) as e:\n assert eda.calc_cor(data, ['N1', 1])\n assert str(e.value) == \"The value of the argument 'num_vars' should be\" \\\n \" a list of strings.\"\n\n # Tests the exception is correctly raised when 'num_vars' is not a string\n with pytest.raises(Exception) as e:\n assert eda.calc_cor(data, 'N1')\n assert str(e.value) == \"The value of the argument 'num_vars' should be\" \\\n \" a list of strings.\"\n\n # Tests the exception is correctly raised when\n # elements in 'num_vars' are not unique.\n with pytest.raises(Exception) as e:\n assert eda.calc_cor(data, ['N1', 'N1'])\n assert str(e.value) == \"The elements in the argument 'num_vars' \" \\\n \"should be unique.\"\n\n # Test the Exception is correctly raised when 'num_vars' argument\n # is not a subset of the column names of the dataframe\n with pytest.raises(Exception) as e:\n assert eda.calc_cor(data, [\"N1\", \"abc\"])\n assert str(e.value) == \"The argument 'num_vars' should be a subset \" \\\n \"of the column names from the dataframe.\"\n # Generate test data from the helper function.\n\n\n# noinspection PyBroadException\ndef test_describe_na_value():\n \"\"\"\n Tests the test_describe_na_value function\n to make sure the outputs are correct.\n\n Returns\n --------\n None\n The test should pass and no asserts should be displayed.\n \"\"\"\n no_na_dataframe = pd.DataFrame({\"col_1\": [0, 2],\n \"col_2\": [0.5, 0.1],\n \"col_3\": [\"a\", \"b\"]})\n\n na_numerical_dataframe = pd.DataFrame({\"col_1\": [0, 2],\n \"col_2\": [np.nan, 0.1],\n \"col_3\": [\"a\", \"b\"]})\n\n na_categorical_dataframe = pd.DataFrame({\"col_1\": [0, 2],\n \"col_2\": [0.5, 0.1],\n \"col_3\": [np.nan, \"b\"]})\n\n not_a_dataframe = [[0, 2],\n [0.5, 0.1],\n [\"a\", \"b\"]]\n\n # Tests that the correct error message is displayed if a non-dataframe\n # object is passed to 'dataframe'.\n try:\n eda.describe_na_values(not_a_dataframe)\n except Exception:\n pass\n else:\n raise Exception(\"expected an Exception, but none were raised\")\n\n # Tests that the output for a none NA tibble is correct.\n assert isinstance(eda.describe_na_values(no_na_dataframe),\n pd.DataFrame)\n assert np.array_equiv(eda.describe_na_values(no_na_dataframe),\n pd.DataFrame([[1, 1],\n [1, 1],\n [1, 1]],\n index=no_na_dataframe.columns))\n\n # Tests that the output for a tibble with an NA value in numeric\n # columns is correct.\n assert isinstance(eda.describe_na_values(na_numerical_dataframe),\n pd.DataFrame)\n assert np.array_equiv(eda.describe_na_values(na_numerical_dataframe),\n pd.DataFrame([[1, 1],\n [0, 1],\n [1, 1]],\n index=na_numerical_dataframe.columns))\n\n # Tests that the output for a tibble with an NA value in categorical\n # columns is correct.\n assert isinstance(eda.describe_na_values(na_categorical_dataframe),\n pd.DataFrame)\n assert np.array_equiv(eda.describe_na_values(na_categorical_dataframe),\n pd.DataFrame([[1, 1],\n [1, 1],\n [0, 1]],\n index=na_categorical_dataframe.columns))\n\n\ndef test_describe_num_var():\n \"\"\"\n Tests the describe_num_var function to make sure the outputs are correct.\n\n Returns\n --------\n None\n The test should pass and no asserts should be displayed.\n \"\"\"\n # Generate test data from the helper function.\n test_data = helper_create_data()\n test_col = test_data['N1']\n\n # Test the results when the input is correct.\n num_col1 = ['N1', 'N2']\n summary, plot = eda.describe_num_var(test_data, num_col1)\n\n # Test if the statistical summary is correctly calculated.\n assert summary['N1'][0] == np.nanquantile(test_col, 0.25), \\\n \"25% quantile is not correctly calculated.\"\n assert summary['N1'][1] == np.nanquantile(test_col, 0.75), \\\n \"75% quantile is not correctly calculated.\"\n assert summary['N1'][2] == np.nanmin(test_col), \\\n \"Minimal value is not correctly calculated.\"\n assert summary['N1'][3] == np.nanmax(test_col), \\\n \"Maximal value is not correctly calculated.\"\n assert summary['N1'][4] == np.nanmedian(test_col), \\\n \"Median value is not correctly calculated.\"\n assert summary['N1'][5] == np.nanmean(test_col), \\\n \"Mean value is not correctly calculated.\"\n assert summary['N1'][6] == np.nanstd(test_col), \\\n \"Standard deviation is not correctly calculated.\"\n\n # Test the plot type is correct.\n assert \"altair\" in str(type(plot)), \"Plot type is not an Altair object\"\n assert plot.to_dict()['hconcat'][0]['mark']['type'] == 'bar',\\\n \"The plot should be a bar chart.\"\n\n # Test the data of the plot is correct.\n assert len(plot.data) == len(test_data), \\\n 'Some rows are missing when plotting'\n\n num_col2 = ['N1', 'N2', 'N3', 'N4']\n summary, plot1 = eda.describe_num_var(test_data, num_col2)\n\n # Testing if the specified columns has been plotted or not\n assert set(plot1.data.columns) == set(num_col2), \\\n 'The specified numerical columns were not plotted'\n\n # Test the Exception is correctly raised when the type of `dataframe`\n # argument is wrong.\n with pytest.raises(Exception) as e:\n assert eda.describe_num_var('abc', ['N1', 'N2'])\n assert str(e.value) == \"The value of the argument 'dataframe'\" \\\n \" should be of type pandas dataframe.\"\n\n # Test the Exception is correctly raised when the type of `num_vars`\n # argument is wrong.\n with pytest.raises(Exception) as e:\n assert eda.describe_num_var(test_data, ['N1', 1])\n assert str(e.value) == \"The value of the argument 'num_vars' \" \\\n \"should be a list of strings.\"\n\n # Test the Exception is correctly raised when the type of `num_vars`\n # argument is wrong.\n with pytest.raises(Exception) as e:\n assert eda.describe_num_var(test_data, 'N1')\n assert str(e.value) == \"The value of the argument 'num_vars' \" \\\n \"should be a list of strings.\"\n\n # Test the Exception is correctly raised when the elements in\n # `num_vars` argument are not unique.\n with pytest.raises(Exception) as e:\n assert eda.describe_num_var(test_data, ['N1', 'N1'])\n assert str(e.value) == \"The elements in the argument 'num_vars' \" \\\n \"should be unique.\"\n\n # Test the Exception is correctly raised when `num_vars` argument\n # is not a subset of\n # the column names of the dataframe.\n with pytest.raises(Exception) as e:\n assert eda.describe_num_var(test_data, ['N1', 'abc'])\n assert str(e.value) == \"The argument 'num_vars' should be \" \\\n \"a subset of the column names from the dataframe.\"\n\n # Test the Exception is correctly raised when `num_vars` argument\n # contains categorical columns of the dataframe.\n with pytest.raises(Exception) as e:\n assert eda.describe_num_var(test_data, ['N1', 'C4'])\n assert str(e.value) == \"Only numeric columns expected,\" \\\n \" please check the input.\"\n","sub_path":"tests/test_eda_analysis.py","file_name":"test_eda_analysis.py","file_ext":"py","file_size_in_byte":14585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"188170750","text":"\"\"\"Combinatorix\"\"\"\nimport os\nfrom operator import itemgetter\nfrom time import time\n\n\nclass CombinatorixException(Exception):\n pass\n\n\nclass ParseFailure(CombinatorixException):\n\n def __init__(self, stream):\n self.stream = stream\n\n\nclass EndOfStream(ParseFailure):\n # Here we inherit ParseFailure to streamline the code\n # and avoid to check everywhere for EndOfStream (EOS).\n # Instead, if EOS is not expected it's considered an error.\n pass\n\n\nEND_OF_STREAM = object()\n\n\nclass Stream(object):\n\n def __init__(self, string, position=0):\n self.string = string\n self.position = position\n\n def next(self):\n return type(self)(self.string, self.position + 1)\n\n def peek(self):\n try:\n return self.string[self.position]\n except IndexError:\n raise EndOfStream(self)\n\n def __repr__(self):\n return '' % self.position\n\n# combinators\n\ndef each(*parsers):\n \"\"\"Return a closure that takes as argument a ``Stream``\n that will be parsed one after the other in sequence\n by the ``parsers``.\"\"\"\n def closure(stream):\n out = list()\n for parser in parsers:\n item, stream = parser(stream)\n out.append(item)\n return out, stream\n return closure\n\n\ndef either(*parsers):\n \"\"\"Return a closure that takes as argument a ``Stream``\n that will be parsed by the first parser of ``parsers``\n that succeed.\"\"\"\n def closure(stream):\n for parser in parsers:\n try:\n out, stream = parser(stream)\n except ParseFailure:\n continue\n else:\n break\n # if no parser succeed, the combinator fails\n else:\n raise ParseFailure(stream)\n return out, stream\n return closure\n\n\ndef zero_or_more(parser):\n \"\"\"Return a closure that takes as argument a ``Stream``\n that will be parsed by ``parser`` *zero or more* times\n\n .. warning: this can run forever!\n \"\"\"\n\n def closure(stream):\n out = list()\n while True:\n try:\n item, stream = parser(stream)\n except ParseFailure:\n return out, stream\n else:\n out.append(item)\n\n return closure\n\n\ndef one_or_more(parser):\n \"\"\"Return a closure that takes as argument a ``Stream``\n that will be parsed by ``parser`` *one or more* times\n\n .. warning: this can run forever!\n \"\"\"\n def closure(stream):\n combined = each(parser, zero_or_more(parser))\n out, stream = combined(stream)\n # linearize the output\n head, tail = out\n tail.insert(0, head)\n return tail, stream\n return closure\n\n\ndef when(predicate, parser):\n \"\"\"Return a closure that takes as argument a ``Stream``\n that will be parsed using `parser` only if `predicate` succeed.\n\n `predicate` doesn't consume the stream.\n \"\"\"\n def closure(stream):\n if predicate(stream.peek()):\n return parser(stream)\n else:\n raise ParseFailure(stream)\n return closure\n\n\ndef otherwise(predicate, parser):\n \"\"\"Return a closure that takes as argument a ``Stream``\n that will be parsed using `parser` only if `predicate` fails.\n\n `predicate` doesn't consume the stream.\n \"\"\"\n def closure(stream):\n try:\n predicate(stream)\n except ParseFailure:\n return parser(stream)\n else:\n raise ParseFailure(stream)\n\n return closure\n\n# parsers\n\n\ndef nop(stream):\n return '', stream.next()\n\n\ndef anything(stream):\n return stream.peek(), stream.next()\n\n\ndef space(stream):\n char = stream.peek()\n if char.isspace():\n return char, stream.next()\n else:\n raise ParseFailure(stream)\n\n\ndef is_end_of_stream(stream):\n \"\"\"Parser that succeed if the stream is fully consumed\"\"\"\n try:\n stream.peek()\n except EndOfStream:\n return True\n else:\n return False\n\n\ndef char(char):\n def parser(stream):\n other = stream.peek()\n if other == char:\n return char, stream.next()\n else:\n raise ParseFailure(stream)\n return parser\n\n\ndef lift(func, parser):\n \"\"\"Apply ``func`` on the output of ``parser``\"\"\"\n def pseudoparser(stream):\n out, stream = parser(stream)\n return func(out), stream\n return pseudoparser\n\n\ndef string(string):\n def parser(stream):\n parser = each(*map(char, list(string)))\n out, stream = parser(stream)\n return ''.join(out), stream\n return parser\n\n\ndef combinatorix(string, parser):\n stream = Stream(string)\n out, stream = parser(stream)\n return out, stream\n\n# sexp parser\n\n\ndef pk(*args):\n print(*args)\n return args[-1]\n\n\nSPACE = {'space'}\n\n\ndef const(value):\n def wrapper(*args):\n return value\n return wrapper\n\n\ndef isspace(char):\n return char.isspace()\n\n\nparse_whitespace = lift(const(SPACE), when(isspace, anything))\n\n\ndef to_string(x):\n return '\"' + ''.join(x[1]) + '\"'\n\n\nparse_string = lift(to_string,\n each(char('\"'),\n zero_or_more(\n either(\n lift(itemgetter(1), string('\\\\\\\"')),\n otherwise(char('\"'), anything))),\n char('\"')))\n\n\nparse_boolean = either(\n lift(const(True), string('#t')),\n lift(const(False), string('#f')))\n\n\ndef is_symbol(char):\n if char.isspace():\n return False\n elif char in {'(', ')', '[', ']', '{', '}'}:\n return False\n else:\n return True\n\ndef to_symbol(x):\n return ''.join(x)\n\nparse_symbol = lift(to_symbol, one_or_more(when(is_symbol, anything)))\n\n\ndef isdigit(x):\n return x.isdigit()\n\n\ndef to_float(a):\n return int(''.join(a[0])) / int(''.join(a[2]))\n\n\nparse_rational = lift(to_float,\n each(one_or_more(when(isdigit, anything)),\n char('/'),\n one_or_more(when(isdigit, anything))))\n\ndef toint(out):\n return int(''.join(out))\n\n\nparse_number = lift(toint, one_or_more(when(isdigit, anything)))\n\n\ndef nospace(x):\n return [e for e in x if e is not SPACE]\n\n\ndef parse_sexp(stream):\n parser = lift(itemgetter(1),\n each(char('('),\n lift(nospace,\n zero_or_more(\n either(\n parse_sexp,\n parse_boolean,\n parse_rational,\n parse_number,\n parse_string,\n parse_symbol,\n parse_whitespace))),\n char(')')))\n return parser(stream)\n\n\nparse_ignored = lift(const(SPACE),\n zero_or_more(\n either(char('\\n'),\n each(char(';'),\n zero_or_more(\n otherwise(char('\\n'), anything)\n ),\n char('\\n')))))\n\nsexp = either(lift(itemgetter(1), each(parse_ignored, parse_sexp)),\n parse_ignored)\n\n\nif __name__ == '__main__':\n with open('../fixture.scm') as f:\n fixture = f.read()\n ROUNDS = int(os.environ['ROUNDS'])\n # XXX: we use no \"trick\" to compute the elapsed time since no\n # trick is used in scheme\n start = time()\n for i in range(ROUNDS):\n stream = Stream(fixture)\n while not is_end_of_stream(stream):\n out, stream = sexp(stream)\n print(time() - start)\n","sub_path":"cpython-cx-read/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"546616515","text":"import sys\nimport os\nimport re\nimport argparse\nimport pyexcel\nimport numpy\nfrom datetime import date, datetime\nimport pyexcel\nimport logging\nimport re\nimport demjson\nimport json\nimport copy\nimport LocationRange as lr\nimport LocationParser as lp\n\nlogging.basicConfig(format='%(name)s - %(levelname)s - %(message)s', level=logging.WARN)\n\n\nclass TimeSeriesRegion(object):\n def __init__(self, orientation='row', series_range=None, data_range=None, metadata_spec = None,\n time_coordinates=None, global_metadata=None, granularity=None, provenance=None):\n self.orientation = orientation\n self.series_range = series_range\n self.data_range = data_range\n self.granularity = granularity\n self.metadata_spec = metadata_spec\n self.time_coordinates = time_coordinates\n self.global_metadata = global_metadata\n self.provenance = provenance\n self.time_series = []\n\n def parse(self,data, sheet_name):\n metadata = self.parse_global_metadata(data, sheet_name)\n self.parse_ts(data, metadata)\n return self.time_series\n\n def data_to_string(self, data):\n if type(data) is unicode:\n return data\n if type(data) is str:\n return unicode(data, errors='replace')\n else:\n return unicode(str(data), errors='replace')\n\n\n def parse_global_metadata(self,data, sheet_name):\n metadata = {}\n for mdname, mdspec in self.global_metadata.iteritems():\n if mdspec['source'] == 'sheet_name':\n metadata[mdname] = sheet_name\n elif mdspec['source'] == 'cell':\n metadata[mdname] = data[mdspec['row']][mdspec['col']]\n elif mdspec['source'] == 'const':\n metadata[mdname] = mdspec['val']\n else:\n logging.warn(\"Unknown metadata source %s\", mdspec['source'])\n metadata['provenance'] = self.provenance\n return metadata\n\n def parse_tsr_metadata(self,metadata,data,tsidx):\n mds = self.metadata_spec\n md_modes = {}\n all_blank = True\n for md_name in mds:\n if mds[md_name]['mode'] == 'normal':\n if mds[md_name]['source'] == 'cell':\n metadata[md_name] = data[mds[md_name]['loc'][0]][mds[md_name]['loc'][1]]\n if not self.is_blank(metadata[md_name]):\n all_blank = False\n elif mds[md_name]['source'] == 'const':\n metadata[md_name] = mds[md_name]['val']\n else:\n md_vals = []\n for idx in mds[md_name]['loc']:\n coords = self.orient_coords(tsidx, idx)\n val = self.data_to_string(data[coords[0]][coords[1]])\n md_vals.append(val)\n if not self.is_blank(val):\n all_blank = False\n metadata[md_name] = \" \".join(md_vals)\n else:\n md_modes[mds[md_name]['mode']] = True\n if all_blank and not md_modes[\"inline\"]:\n raise IndexError(\"All metadata values blank\")\n return md_modes\n\n def parse_inline_tsr_metadata(self,metadata,data,dataidx):\n mds = self.metadata_spec\n for md_name in mds:\n if mds[md_name]['mode'] == 'inline':\n md_vals = []\n for idx in mds[md_name]['loc']:\n coords = self.orient_coords(idx, dataidx)\n md_vals.append(self.data_to_string(data[coords[0]][coords[1]]))\n metadata[md_name] = \" \".join(md_vals)\n\n def orient_coords(self, tsidx, dataidx):\n if self.orientation == 'row':\n return (tsidx, dataidx)\n else:\n return (dataidx, tsidx)\n\n def generate_time_label(self, data, d_idx):\n time_labels = []\n for tc in self.time_coordinates['locs']:\n coords = self.orient_coords(tc, d_idx)\n val = self.data_to_string(data[coords[0]][coords[1]])\n if self.is_blank(val) and self.time_coordinates['mode'] == 'backfill':\n t_idx = d_idx - 1\n while t_idx > 0 and self.is_blank(val):\n coords = self.orient_coords(tc, t_idx)\n val = self.data_to_string(data[coords[0]][coords[1]])\n t_idx -= 1\n time_labels.append(val)\n time_label = \" \".join(time_labels)\n if self.time_coordinates['post_process']:\n func = eval('lambda v: ' + self.time_coordinates['post_process'])\n time_label = func(time_label)\n return time_label\n\n def parse_ts(self, data, metadata):\n self.time_series = []\n for ts_idx in self.series_range:\n timeseries = []\n ts_metadata = copy.deepcopy(metadata)\n ts_metadata['provenance'][self.orientation]=ts_idx\n\n try:\n md_modes = self.parse_tsr_metadata(ts_metadata, data, ts_idx)\n except IndexError as ie:\n if type(self.series_range.curr_component()) is lr.LocationRangeInfiniteIntervalComponent:\n logging.info(\"all blank metadata cells in infinite interval\")\n break\n else:\n logging.error(\"metadata specifcation indexing error for time series index {}\".format(ts_idx))\n raise ie\n\n inline_md_curr = {}\n inline_md_prev = None\n\n for d_idx in self.data_range:\n time_label = ''\n try:\n time_label = self.generate_time_label(data, d_idx)\n except IndexError as ie:\n if type(self.data_range.curr_component()) is lr.LocationRangeInfiniteIntervalComponent:\n break\n else:\n logging.error(\"metadata specifcation indexing error for data point index {}\".format(d_idx))\n raise ie\n\n if type(self.data_range.curr_component()) is lr.LocationRangeInfiniteIntervalComponent and self.is_blank(time_label):\n logging.info(\"blank cell in infinite interval\")\n break\n\n # if inline metadata has changed (in auto-detect mode)\n # merge previous metadata\n # output old time series\n # re-initialize time series array\n if 'inline' in md_modes:\n self.parse_inline_tsr_metadata(inline_md_curr, data, d_idx)\n if inline_md_prev:\n md_changed = False\n for md_name in inline_md_prev:\n if inline_md_curr[md_name] != inline_md_prev[md_name]:\n md_changed = True\n\n if md_changed:\n new_metadata = dict(ts_metadata)\n for md_name in inline_md_prev:\n new_metadata[md_name] = inline_md_prev[md_name]\n\n self.time_series.append({\n 'metadata': new_metadata,\n 'ts': timeseries\n })\n timeseries = []\n\n inline_md_prev = inline_md_curr\n inline_md_curr = {}\n\n else:\n inline_md_prev = inline_md_curr\n inline_md_curr = {}\n\n coords = self.orient_coords(ts_idx, d_idx)\n timeseries.append((time_label,data[coords[0]][coords[1]]))\n\n self.time_series.append(dict(metadata=ts_metadata, ts=timeseries))\n\n def is_blank(self, data):\n return len(data.strip()) == 0\n\n\nclass SpreadsheetAnnotation(object):\n def __init__(self, annotation, fn):\n self.locparser = lp.LocationParser()\n\n self.properties = annotation['Properties']\n self.sheet_indices = self.locparser.parse_range(annotation['Properties']['sheet_indices'])\n\n self.metadata = self.parse_md(annotation['GlobalMetadata'])\n self.provenance = dict(filename=fn)\n\n self.timeseries_regions = []\n for tsr in annotation['TimeSeriesRegions']:\n self.timeseries_regions.append(self.parse_tsr(tsr))\n\n def parse_md(self, md_json):\n md_dict = {}\n for mdspec in md_json:\n mdname = mdspec['name']\n md_dict[mdname] = {}\n md_dict[mdname]['source'] = mdspec['source']\n if mdspec['source'] == 'cell':\n (md_dict[mdname]['row'], md_dict[mdname]['col']) = self.locparser.parse_coords(mdspec['loc'])\n if mdspec['source'] == 'const':\n md_dict[mdname]['val'] = mdspec['val']\n return md_dict\n\n def parse_tsr(self, tsr_json):\n orientation = tsr_json['orientation']\n series_range = None\n if orientation == 'row':\n series_range = self.locparser.parse_range(tsr_json['rows'])\n else:\n series_range = self.locparser.parse_range(tsr_json['cols'])\n\n data_range = self.locparser.parse_range(tsr_json['locs'])\n\n time_coords = {}\n time_coords['locs'] = self.locparser.parse_range(tsr_json['times']['locs'])\n if 'mode' in tsr_json['times']:\n time_coords['mode'] = tsr_json['times']['mode']\n else:\n time_coords['mode'] = None\n\n time_coords['post_process'] = tsr_json['times'].get('post_process')\n\n mdspec = self.parse_tsr_metadata(tsr_json['metadata'], orientation)\n\n return TimeSeriesRegion(orientation=orientation, series_range=series_range, data_range=data_range,\n metadata_spec=mdspec, time_coordinates=time_coords, global_metadata=self.metadata,\n provenance = self.provenance)\n\n def parse_tsr_metadata(self, md_json, orientation):\n md_dict = {}\n reverse_orientation = {'row': 'col', 'col': 'row'}\n for md_sec in md_json:\n name = md_sec['name']\n md_dict[name] = {}\n\n if 'source' in md_sec:\n md_dict[name]['source'] = md_sec['source']\n else:\n md_dict[name]['source'] = reverse_orientation[orientation]\n\n loc = None\n if 'loc' in md_sec:\n loc = md_sec['loc']\n\n if md_dict[name]['source'] == 'cell':\n md_dict[name]['loc'] = self.locparser.parse_coords(loc)\n\n elif md_dict[name]['source'] == 'row':\n md_dict[name]['loc'] = self.locparser.parse_range(loc)\n\n elif md_dict[name]['source'] == 'col':\n md_dict[name]['loc'] = self.locparser.parse_range(loc)\n\n elif md_dict[name]['source'] == 'const':\n md_dict[name]['val'] = md_sec['val']\n\n if 'mode' in md_sec:\n md_dict[name]['mode'] = md_sec['mode']\n else:\n md_dict[name]['mode'] = 'normal'\n\n return md_dict\n\n\nclass ExtractSpreadsheet(object):\n\n def __init__(self, spreadsheet_fn, annotations_fn):\n self.normalized_source_file = os.path.basename(spreadsheet_fn)\n self.book = pyexcel.get_book(file_name=spreadsheet_fn, auto_detect_datetime=False)\n self.annotations = self.load_annotations(annotations_fn)\n\n def process(self):\n timeseries = []\n for annotation in self.annotations:\n ssa = SpreadsheetAnnotation(annotation, self.normalized_source_file)\n parsed = []\n for anidx in ssa.sheet_indices:\n sheet = self.book.sheet_by_index(anidx)\n data = sheet.to_array()\n for tsr in ssa.timeseries_regions:\n tsr.provenance['sheet']=anidx\n for parsed_tsr in tsr.parse(data, sheet.name):\n parsed.append(parsed_tsr)\n logging.debug(\"%s\",parsed)\n timeseries.append(parsed)\n return timeseries\n def load_annotations(self,annotations_fn):\n anfile = open(annotations_fn)\n annotations_decoded = demjson.decode(anfile.read(), return_errors=True)\n for msg in annotations_decoded[1]:\n if msg.severity == \"error\":\n logging.error(msg.pretty_description())\n return annotations_decoded[0]\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"annotation\", help='Annotation of time series in custom JSON format')\n ap.add_argument(\"spreadsheet\", help='Excel spreadsheet file')\n ap.add_argument(\"outfile\", help='file to write results')\n args = ap.parse_args()\n es = ExtractSpreadsheet(args.spreadsheet, args.annotation)\n timeseries = es.process()\n \n with open(args.outfile, 'w') as outfile:\n json.dump(timeseries, outfile)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utilities/timeseries/extractSpreadsheet.py","file_name":"extractSpreadsheet.py","file_ext":"py","file_size_in_byte":12948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144318174","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport random\nimport csv\nfrom dreams_scrape import csv_reader\n\nwith open(\"all_city_pages.csv\", 'w', newline='') as f:\n csvWriter = csv.writer(f, delimiter=',')\n\n headers = [['url']]\n\n csvWriter.writerows(headers)\n\ndata = csv_reader(\"city_links.csv\")\n\nlinks = []\n\nfor link in data:\n links.append(link[0])\n\nfor link in links:\n wait = random.uniform(5, 15)\n print(\"Sleeping for {0} seconds.\".format(wait))\n time.sleep(wait)\n\n response = requests.get(link)\n soup = BeautifulSoup(response.text)\n\n city_links = []\n\n print(link)\n\n try:\n pages = soup.find_all('a', {'class': 'page_no'})\n page_list = []\n for page in pages:\n page = int(page.text)\n page_list.append(page)\n pages = max(page_list)\n except:\n pages = 1\n pages = range(1, pages + 1)\n\n for page in pages:\n page_link = link + \"/\" + str(page) \n city_links.append([page_link])\n\n with open(\"all_city_pages.csv\", 'a') as f:\n csvWriter = csv.writer(f, delimiter=',')\n\n for url in city_links:\n csvWriter.writerows([url])\n","sub_path":"city_link_scrape.py","file_name":"city_link_scrape.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616045128","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 1 13:52:54 2023\r\n\r\n@author: thomas.grandjean@developpement-durable.gouv.fr\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom functools import lru_cache\r\nimport os\r\n\r\nfrom pynsee.utils._request_insee import _request_insee\r\nfrom pynsee.utils._create_insee_folder import _create_insee_folder\r\nfrom pynsee.utils._hash import _hash\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@lru_cache(maxsize=None)\r\ndef get_descending_area(\r\n area: str,\r\n code: str,\r\n date: str = None,\r\n type: str = None,\r\n update: bool = False,\r\n):\r\n \"\"\"\r\n Get information about areas contained in a given area\r\n\r\n Args:\r\n area (str): case sensitive, area type, any of ('aireDAttractionDesVilles2020', 'arrondissement', 'collectiviteDOutreMer', 'commune', 'departement', 'region', 'uniteUrbaine2020', 'zoneDEmploi2020')\r\n\r\n code (str): area code\r\n\r\n type (str) : case insensitive, any of 'Arrondissement', 'Departement', 'Region', 'UniteUrbaine2020', 'ZoneDEmploi2020', ...\r\n\r\n date (str, optional): date used to analyse the data, format : 'AAAA-MM-JJ'. If date is None, by default the current date is used/\r\n\r\n update (bool): locally saved data is used by default. Trigger an update with update=True.\r\n\r\n Examples:\r\n >>> from pynsee.localdata import get_area_descending\r\n >>> df = get_descending_area(\"commune\", code='59350', date='2018-01-01')\r\n >>> df = get_descending_area(\"departement\", code='59', date='2018-01-01')\r\n >>> df = get_descending_area(\"zoneDEmploi2020\", code='1109')\r\n \"\"\"\r\n\r\n areas = {\r\n \"aireDAttractionDesVilles2020\",\r\n \"arrondissement\",\r\n \"collectiviteDOutreMer\",\r\n \"commune\",\r\n \"departement\",\r\n \"region\",\r\n \"uniteUrbaine2020\",\r\n \"zoneDEmploi2020\",\r\n }\r\n if area not in areas:\r\n msg = f\"area must be one of {areas} \" f\"- found '{area}' instead\"\r\n raise ValueError(msg)\r\n\r\n params_hash = [\"get_descending_area\", area, code, date, type]\r\n params_hash = [x if x else \"_\" for x in params_hash]\r\n filename = _hash(\"\".join(params_hash))\r\n insee_folder = _create_insee_folder()\r\n file_data = insee_folder + \"/\" + filename\r\n\r\n if (not os.path.exists(file_data)) or update:\r\n INSEE_localdata_api_link = \"https://api.insee.fr/metadonnees/V1/geo/\"\r\n\r\n api_link = INSEE_localdata_api_link + area + f\"/{code}/descendants?\"\r\n\r\n params = []\r\n if date is not None:\r\n params.append(f\"date={date}\")\r\n if type is not None:\r\n params.append(f\"type={type}\")\r\n\r\n api_link = api_link + \"&\".join(params)\r\n\r\n request = _request_insee(\r\n api_url=api_link, file_format=\"application/json\"\r\n )\r\n\r\n try:\r\n data = request.json()\r\n\r\n list_data = []\r\n\r\n for i in range(len(data)):\r\n df = pd.DataFrame(data[i], index=[0])\r\n list_data.append(df)\r\n\r\n data_final = pd.concat(list_data).reset_index(drop=True)\r\n\r\n data_final.to_pickle(file_data)\r\n print(f\"Data saved: {file_data}\")\r\n\r\n except Exception:\r\n logger.error(\"No data found !\")\r\n data_final = None\r\n else:\r\n try:\r\n data_final = pd.read_pickle(file_data)\r\n except Exception:\r\n os.remove(file_data)\r\n data_final = get_descending_area(\r\n area=area, code=code, date=date, type=type, update=True\r\n )\r\n else:\r\n logger.info(\r\n \"Locally saved data has been used\\n\"\r\n \"Set update=True to trigger an update\"\r\n )\r\n\r\n return data_final\r\n","sub_path":"pynsee/localdata/get_descending_area.py","file_name":"get_descending_area.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645884063","text":"import requests, re, sys\n\nif sys.version_info[0] == 3:\n from urllib.request import urlretrieve\nelse:\n from urllib import urlretrieve\nlinks = sys.argv[1:]\ndownloadlink = \"https://clients2.google.com/service/update2/crx?response=redirect&prodversion=47.0&x=id%3D{}%26installsource%3Dondemand%26uc\"\nextensionids = []\nregexx = re.compile(\"http[s]?.+\\/(?P.+)\")\nrequests.packages.urllib3.disable_warnings()\nfor link in links:\n extensionids.append(regexx.search(link).group(\"lastpart\"))\nfor extensionid in extensionids:\n link = downloadlink.format(extensionid)\n extensiondownload = requests.get(link,verify=False,headers={\"User-Agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36\"}).url\n urlretrieve(extensiondownload,\"{}.crx\".format(extensionid))","sub_path":"downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"190054533","text":"# system imports\nimport json\n# Third party imports\nfrom twisted.web import resource\nfrom twisted.application import service, internet\nfrom twisted.web.server import Site\n\n\nclass Client(object):\n \n def __init__(self, name):\n self.name = name\n self.inbox = []\n\n def spunge(self):\n messages = self.inbox\n self.inbox = []\n return messages\n\n\nclass ChatRoom(resource.Resource):\n isLeaf = True\n clients = {}\n\n def add_client(self, name):\n if name not in self.clients:\n self.clients[name] = Client(name)\n\n def render_POST(self, request):\n request.setHeader('Content-Type', 'application/json')\n request.setHeader('Access-Control-Allow-Origin', '*')\n request.setHeader('Access-Control-Allow-Methods', 'GET, POST')\n request.setHeader('Access-Control-Allow-Headers', '*')\n request.setHeader('Access-Control-Max-Age', 2520)\n request.setHeader('Content-type', 'application/json')\n\n args = request.args\n if not 'name' in args:\n errMsg = 'Must provide a name'\n return self.to_response({'status': 'error', 'errMsg': errMsg})\n\n name = args['name'][0]\n self.add_client(name)\n\n if 'message' in args:\n self.queue_message(name, args['message'][0])\n return self.to_response(request, {'status': 'success'})\n\n def queue_message(self, name, message):\n for c in self.clients:\n self.clients[c].inbox.append((name, message))\n \n def render_GET(self, request):\n request.setHeader('Content-Type', 'application/json')\n args = request.args\n if not 'name' in args:\n errMsg = 'Must provide a name'\n return self.to_response(request, {'status': 'error', 'errMsg': errMsg})\n if 'callback' in args:\n request.jsonpcallback = args['callback'][0]\n \n name = args['name'][0]\n self.add_client(name)\n return self.to_response(request, {'status': 'success', 'messages': self.clients[name].spunge()})\n \n def to_response(self, request, data):\n response = json.dumps(data)\n if hasattr(request, 'jsonpcallback'):\n return request.jsonpcallback+'('+response+')'\n else:\n return response\n\nchat_room = ChatRoom()\nfactory = Site(chat_room)\nroot = resource.Resource()\nroot.putChild(\"\",chat_room)\napplication = service.Application(\"Chat Room\")\ninternet.TCPServer(8888, Site(root)).setServiceParent(application)\n","sub_path":"chatty/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417375750","text":"class Solution(object):\n solution_count = 0\n\n def check(self, row, column, array, N):\n for i in range(row):\n if array[i][column] == 1:\n return False\n for j in range(column):\n if array[row][j] == 1:\n return False\n\n i = row - 1\n j = column - 1\n while i >= 0 and j >= 0:\n if array[i][j] == 1:\n return False\n\n i = i - 1\n j = j - 1\n\n i = row + 1\n j = column -1\n\n while i < N and j >= 0:\n if array[i][j] == 1:\n return False\n i = i + 1\n j = j - 1\n\n\n return True\n\n def findNQueens(self, current_index, N, array):\n if current_index >= N:\n self.solution_count += 1\n\n else:\n for k in range(N):\n if self.check(k, current_index, array, N):\n array[k][current_index] = 1\n self.findNQueens(current_index + 1, N, array)\n array[k][current_index] = 0\n\n def totalNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n array = [[0] * n for i in range(n)]\n self.findNQueens(0, n, array)\n return self.solution_count\n\ns = Solution()\nprint (s.totalNQueens(8))\n\n\n\n\n\n\n","sub_path":"NQueen.py","file_name":"NQueen.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524659692","text":"import pandas as pd\nimport numpy as np\nimport warnings\nimport gc,os\nfrom time import time\nimport datetime,random\nfrom tqdm.notebook import tqdm\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import KFold, StratifiedKFold,GroupKFold\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.preprocessing import QuantileTransformer\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset,TensorDataset, DataLoader,RandomSampler\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\nimport argparse\n\ndef Parse_args():\n args = argparse.ArgumentParser()\n args.add_argument('--input_dir',\n default='./data', help='input data path of dataset')\n args = args.parse_args()\n return args\n\nargs = Parse_args()\n\nwarnings.simplefilter('ignore')\n\nncompo_genes = 600\nncompo_cells = 50\n\ndef Seed_everything(seed=42):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\nSeed_everything(seed=42)\n\ndef Metric(labels,preds):\n labels = np.array(labels)\n preds = np.array(preds)\n metric = 0\n for i in range(labels.shape[1]):\n metric += (-np.mean(labels[:,i]*np.log(np.maximum(preds[:,i],1e-15))+(1-labels[:,i])*np.log(np.maximum(1-preds[:,i],1e-15))))\n return metric/labels.shape[1]\n\nfiles = ['%s/test_features.csv'%args.input_dir,\n '%s/train_targets_scored.csv'%args.input_dir,\n '%s/train_features.csv'%args.input_dir,\n '%s/train_targets_nonscored.csv'%args.input_dir,\n '%s/train_drug.csv'%args.input_dir,\n '%s/sample_submission.csv'%args.input_dir]\n\ntest = pd.read_csv(files[0])\ntrain_target = pd.read_csv(files[1])\ntrain = pd.read_csv(files[2])\ntrain_nonscored = pd.read_csv(files[3])\ntrain_drug = pd.read_csv(files[4])\nsub = pd.read_csv(files[5])\n\ngenes = [col for col in train.columns if col.startswith(\"g-\")]\ncells = [col for col in train.columns if col.startswith(\"c-\")]\n\nfeatures = genes + cells\ntargets = [col for col in train_target if col!='sig_id']\n\nori_train = train.copy()\nctl_train = train.loc[train['cp_type']=='ctl_vehicle'].append(test.loc[test['cp_type']=='ctl_vehicle']).reset_index(drop=True)\nctl_train2 = train.loc[train['cp_type']=='ctl_vehicle'].reset_index(drop=True)\n\nori_test = test.copy()\nctl_test = test.loc[test['cp_type']=='ctl_vehicle'].reset_index(drop=True)\n\ndef Feature(df):\n transformers={}\n for col in tqdm(genes+cells):\n transformer = QuantileTransformer(n_quantiles=100,random_state=0, output_distribution='normal')\n transformer.fit(df[:train.shape[0]][col].values.reshape(-1,1))\n df[col] = transformer.transform(df[col].values.reshape(-1,1)).reshape(1,-1)[0]\n transformers[col]=transformer\n gene_pca = PCA(n_components = ncompo_genes,\n random_state = 42).fit(df[genes])\n pca_genes = gene_pca.transform(df[genes])\n cell_pca = PCA(n_components = ncompo_cells,\n random_state = 42).fit(df[cells])\n pca_cells = cell_pca.transform(df[cells])\n pca_genes = pd.DataFrame(pca_genes, columns = [f\"pca_g-{i}\" for i in range(ncompo_genes)])\n pca_cells = pd.DataFrame(pca_cells, columns = [f\"pca_c-{i}\" for i in range(ncompo_cells)])\n df = pd.concat([df, pca_genes, pca_cells], axis = 1)\n\n nor_var_col = [col for col in df.columns if col in ['sig_id','cp_type','cp_time','cp_dose'] or '_gt_' in col or '_lt_' in col]\n\n var_thresh = VarianceThreshold(0.8)\n var_cols = [col for col in df.columns if col not in ['sig_id','cp_type','cp_time','cp_dose'] and '_gt_' not in col and '_lt_' not in col]\n var_data = var_thresh.fit_transform(df[var_cols])\n df = pd.concat([df[nor_var_col],pd.DataFrame(var_data)],axis=1)\n for col in ['cp_time','cp_dose']:\n tmp = pd.get_dummies(df[col],prefix=col)\n df = pd.concat([df,tmp],axis=1)\n df.drop([col],axis=1,inplace=True)\n return df,transformers,gene_pca,cell_pca,var_thresh\n\ntt = train.append(test).reset_index(drop=True)\ntt,transformers,gene_pca,cell_pca,var_thresh = Feature(tt)\ntrain = tt[:train.shape[0]]\ntest = tt[train.shape[0]:].reset_index(drop=True)\n\nif 1:\n train_target = train_target.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n train_nonscored = train_nonscored.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n train_drug = train_drug.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n ori_train = ori_train.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n train = train.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n\nclass Model(nn.Module):\n def __init__(self, num_features, num_targets, hidden_size):\n super(Model, self).__init__()\n self.batch_norm1 = nn.BatchNorm1d(num_features)\n self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))\n\n self.batch_norm2 = nn.BatchNorm1d(hidden_size)\n self.dropout2 = nn.Dropout(0.2619422201258426)\n self.dense2 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))\n\n self.batch_norm4 = nn.BatchNorm1d(hidden_size)\n self.dropout4 = nn.Dropout(0.2619422201258426)\n self.dense4 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))\n\n self.batch_norm3 = nn.BatchNorm1d(hidden_size)\n self.dropout3 = nn.Dropout(0.2619422201258426)\n self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets))\n\n def forward(self, x):\n x = self.batch_norm1(x)\n x = F.leaky_relu(self.dense1(x))\n\n x = self.batch_norm2(x)\n x = self.dropout2(x)\n x = F.leaky_relu(self.dense2(x))\n\n x = self.batch_norm4(x)\n x = self.dropout4(x)\n x = F.leaky_relu(self.dense4(x))\n\n x = self.batch_norm3(x)\n x = self.dropout3(x)\n x = self.dense3(x)\n\n return x\n\nclass LabelSmoothingLoss(nn.Module):\n def __init__(self, classes, smoothing=0.0, dim=-1):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.cls = classes\n self.dim = dim\n\n def forward(self, pred, target):\n pred = pred.log_softmax(dim=self.dim)\n with torch.no_grad():\n # true_dist = pred.data.clone()\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (self.cls - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n\nclass resnetModel(nn.Module):\n def __init__(self, num_features, num_targets, hidden_size,ispretrain=False):\n super(resnetModel, self).__init__()\n self.ispretrain=ispretrain\n self.batch_norm1 = nn.BatchNorm1d(num_features)\n self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))\n\n\n self.batch_norm2 = nn.BatchNorm1d(num_features+hidden_size)\n self.dropout2 = nn.Dropout(0.2619422201258426)\n self.dense2 = nn.utils.weight_norm(nn.Linear(num_features+hidden_size, hidden_size))\n self.batch_norm20 = nn.BatchNorm1d(hidden_size)\n self.dropout20 = nn.Dropout(0.2619422201258426)\n self.dense20 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))\n\n\n self.batch_norm3 = nn.BatchNorm1d(2*hidden_size)\n self.dropout3 = nn.Dropout(0.2619422201258426)\n self.dense3 = nn.utils.weight_norm(nn.Linear(2*hidden_size, hidden_size))\n self.batch_norm30 = nn.BatchNorm1d(hidden_size)\n self.dropout30 = nn.Dropout(0.2619422201258426)\n self.dense30 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))\n\n #self.batch_norm6 = nn.BatchNorm1d(2*hidden_size)\n #self.dropout6 = nn.Dropout(0.2619422201258426)\n #self.dense6 = nn.utils.weight_norm(nn.Linear(2*hidden_size, hidden_size))\n #self.batch_norm60 = nn.BatchNorm1d(hidden_size)\n #self.dropout60 = nn.Dropout(0.2619422201258426)\n #self.dense60 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))\n\n\n self.batch_norm4 = nn.BatchNorm1d(2*hidden_size)\n self.dropout4 = nn.Dropout(0.2619422201258426)\n if self.ispretrain:\n self.dense4 = nn.utils.weight_norm(nn.Linear(2*hidden_size, num_targets))\n else:\n self.dense5 = nn.utils.weight_norm(nn.Linear(2*hidden_size, num_targets))\n\n def forward(self, x):\n x1 = self.batch_norm1(x)\n x1 = F.leaky_relu(self.dense1(x1))\n x = torch.cat([x,x1],1)\n\n x2 = self.batch_norm2(x)\n x2 = self.dropout2(x2)\n x2 = F.leaky_relu(self.dense2(x2))\n x2 = self.batch_norm20(x2)\n x2 = self.dropout20(x2)\n x2 = F.leaky_relu(self.dense20(x2))\n x = torch.cat([x1,x2],1)\n\n x3 = self.batch_norm3(x)\n x3 = self.dropout3(x3)\n x3 = F.leaky_relu(self.dense3(x3))\n x3 = self.batch_norm30(x3)\n x3 = self.dropout30(x3)\n x3 = F.leaky_relu(self.dense30(x3))\n x3 = torch.cat([x2,x3],1)\n\n #x4 = self.batch_norm3(x)\n #x4 = self.dropout3(x4)\n #x4 = F.leaky_relu(self.dense3(x4))\n #x4 = self.batch_norm30(x4)\n #x4 = self.dropout30(x4)\n #x4 = F.leaky_relu(self.dense30(x4))\n #x4 = torch.cat([x3,x4],1)\n\n x4 = self.batch_norm4(x3)\n x4 = self.dropout4(x4)\n if self.ispretrain:\n x4 = self.dense4(x4)\n else:\n x4 = self.dense5(x4)\n return x4\n\nclass resnetsimpleModel(nn.Module):\n def __init__(self, num_features, num_targets, hidden_size,ispretrain=False):\n super(resnetsimpleModel, self).__init__()\n self.ispretrain=ispretrain\n self.batch_norm1 = nn.BatchNorm1d(num_features)\n self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))\n\n\n self.batch_norm2 = nn.BatchNorm1d(num_features+hidden_size)\n self.dropout2 = nn.Dropout(0.2619422201258426)\n self.dense2 = nn.utils.weight_norm(nn.Linear(num_features+hidden_size, hidden_size))\n\n self.batch_norm3 = nn.BatchNorm1d(2*hidden_size)\n self.dropout3 = nn.Dropout(0.2619422201258426)\n self.dense3 = nn.utils.weight_norm(nn.Linear(2*hidden_size, hidden_size))\n\n self.batch_norm4 = nn.BatchNorm1d(2*hidden_size)\n self.dropout4 = nn.Dropout(0.2619422201258426)\n if self.ispretrain:\n self.dense4 = nn.utils.weight_norm(nn.Linear(2*hidden_size, num_targets))\n else:\n self.dense5 = nn.utils.weight_norm(nn.Linear(2*hidden_size, num_targets))\n\n def forward(self, x):\n x1 = self.batch_norm1(x)\n x1 = F.leaky_relu(self.dense1(x1))\n x = torch.cat([x,x1],1)\n\n x2 = self.batch_norm2(x)\n x2 = self.dropout2(x2)\n x2 = F.leaky_relu(self.dense2(x2))\n x = torch.cat([x1,x2],1)\n\n x3 = self.batch_norm3(x)\n x3 = self.dropout3(x3)\n x3 = self.dense3(x3)\n x3 = torch.cat([x2,x3],1)\n\n x3 = self.batch_norm4(x3)\n x3 = self.dropout4(x3)\n if self.ispretrain:\n x3 = self.dense4(x3)\n else:\n x3 = self.dense5(x3)\n return x3\n\nclass transModel(nn.Module):\n def __init__(self, num_features, num_targets, hidden_size):\n super(transModel, self).__init__()\n d_model = 20\n self.batch_norm1 = nn.BatchNorm1d(num_features)\n self.dropout1 = nn.Dropout(0.2619422201258426)\n self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))\n\n encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=4, dropout=0.75)\n # self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3, norm=nn.BatchNorm1d(hidden_size // d_model, eps=1e-5))\n self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=2, norm=nn.LayerNorm(d_model, eps=1e-5))\n #self.transformer_encoder2 = nn.TransformerEncoder(encoder_layer, num_layers=2, norm=nn.LayerNorm(d_model, eps=1e-5))\n\n self.batch_norm2 = nn.BatchNorm1d(hidden_size)\n self.dropout2 = nn.Dropout(0.2619422201258426)\n self.dense2 = nn.utils.weight_norm(nn.Linear(hidden_size, hidden_size))\n\n self.batch_norm3 = nn.BatchNorm1d(hidden_size)\n self.dropout3 = nn.Dropout(0.2619422201258426)\n self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets))\n\n def forward(self, x):\n x = self.batch_norm1(x)\n x = self.dropout1(x)\n x = F.leaky_relu(self.dense1(x))\n x = x.view(x.shape[0], -1, 20)\n x = F.leaky_relu(self.transformer_encoder(x))\n x = x.view(x.shape[0], -1)\n\n x = self.batch_norm2(x)\n x = self.dropout2(x)\n x = F.leaky_relu(self.dense2(x))\n\n x = self.batch_norm3(x)\n x = self.dropout3(x)\n x = self.dense3(x)\n\n return x\n\nimport torch\nfrom torch.nn.modules.loss import _WeightedLoss\nimport torch.nn.functional as F\n\nclass SmoothBCEwLogits(_WeightedLoss):\n def __init__(self, weight=None, reduction='mean', smoothing=0.0):\n super().__init__(weight=weight, reduction=reduction)\n self.smoothing = smoothing\n self.weight = weight\n self.reduction = reduction\n\n @staticmethod\n def _smooth(targets:torch.Tensor, n_labels:int, smoothing=0.0):\n assert 0 <= smoothing < 1\n with torch.no_grad():\n targets = targets * (1.0 - smoothing) + 0.5 * smoothing\n return targets\n\n def forward(self, inputs, targets):\n targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1),\n self.smoothing)\n loss = F.binary_cross_entropy_with_logits(inputs, targets,self.weight)\n\n if self.reduction == 'sum':\n loss = loss.sum()\n elif self.reduction == 'mean':\n loss = loss.mean()\n\n return loss\n\ndef Ctl_augment(train,target,include_test=0):\n if include_test==0:\n ctl_aug=ctl_train2.copy()\n if include_test==1:\n ctl_aug=ctl_train.copy()\n aug_trains = []\n aug_targets = []\n for t in [24,48,72]:\n for d in ['D1','D2']:\n for _ in range(2):\n train1 = train.loc[(train['cp_time']==t)&(train['cp_dose']==d)]\n target1 = target.loc[(train['cp_time']==t)&(train['cp_dose']==d)]\n ctl1 = ctl_aug.loc[(ctl_aug['cp_time']==t)&(ctl_aug['cp_dose']==d)].sample(train1.shape[0],replace=True)\n ctl2 = ctl_aug.loc[(ctl_aug['cp_time']==t)&(ctl_aug['cp_dose']==d)].sample(train1.shape[0],replace=True)\n train1[genes+cells] = train1[genes+cells].values + ctl1[genes+cells].values - ctl2[genes+cells].values\n aug_train = train1.merge(target1,how='left',on='sig_id')\n aug_trains.append(aug_train[['cp_time','cp_dose']+genes+cells])\n aug_targets.append(aug_train[targets])\n df = pd.concat(aug_trains).reset_index(drop=True)\n target = pd.concat(aug_targets).reset_index(drop=True)\n for col in tqdm(genes+cells):\n df[col] = transformers[col].transform(df[col].values.reshape(-1,1)).reshape(1,-1)[0]\n pca_genes = gene_pca.transform(df[genes])\n pca_cells = cell_pca.transform(df[cells])\n pca_genes = pd.DataFrame(pca_genes, columns = [f\"pca_g-{i}\" for i in range(ncompo_genes)])\n pca_cells = pd.DataFrame(pca_cells, columns = [f\"pca_c-{i}\" for i in range(ncompo_cells)])\n df = pd.concat([df, pca_genes, pca_cells], axis = 1)\n for col in ['cp_time','cp_dose']:\n tmp = pd.get_dummies(df[col],prefix=col)\n df = pd.concat([df,tmp],axis=1)\n df.drop([col],axis=1,inplace=True)\n xs = df[train_cols].values\n ys = target[targets]\n #ys_ns = target[targets_ns]\n return xs,ys#,ys_ns\n\ndef Ctl_augment2(train,target,include_test=0):\n if include_test==0:\n ctl_aug=ctl_train2.copy()\n if include_test==1:\n ctl_aug=ctl_train.copy()\n aug_trains = []\n aug_targets = []\n for t in [24,48,72]:\n for d in ['D1','D2']:\n for _ in range(1):\n train1 = train.loc[(train['cp_time']==t)&(train['cp_dose']==d)]\n target1 = target.loc[(train['cp_time']==t)&(train['cp_dose']==d)]\n ctl1 = ctl_aug.loc[(ctl_aug['cp_time']==t)&(ctl_aug['cp_dose']==d)].sample(train1.shape[0],replace=True)\n ctl2 = ctl_aug.loc[(ctl_aug['cp_time']==t)&(ctl_aug['cp_dose']==d)].sample(train1.shape[0],replace=True)\n ctl3 = ctl_aug.loc[(ctl_aug['cp_time']==t)&(ctl_aug['cp_dose']==d)].sample(train1.shape[0],replace=True)\n ctl4 = ctl_aug.loc[(ctl_aug['cp_time']==t)&(ctl_aug['cp_dose']==d)].sample(train1.shape[0],replace=True)\n train1[genes+cells] = train1[genes+cells].values + ctl1[genes+cells].values + ctl2[genes+cells].values -ctl3[genes+cells].values - ctl4[genes+cells].values\n aug_train = train1.merge(target1,how='left',on='sig_id')\n aug_trains.append(aug_train[['cp_time','cp_dose']+genes+cells])\n aug_targets.append(aug_train[targets])\n df = pd.concat(aug_trains).reset_index(drop=True)\n target = pd.concat(aug_targets).reset_index(drop=True)\n for col in tqdm(genes+cells):\n df[col] = transformers[col].transform(df[col].values.reshape(-1,1)).reshape(1,-1)[0]\n pca_genes = gene_pca.transform(df[genes])\n pca_cells = cell_pca.transform(df[cells])\n pca_genes = pd.DataFrame(pca_genes, columns = [f\"pca_g-{i}\" for i in range(ncompo_genes)])\n pca_cells = pd.DataFrame(pca_cells, columns = [f\"pca_c-{i}\" for i in range(ncompo_cells)])\n df = pd.concat([df, pca_genes, pca_cells], axis = 1)\n for col in ['cp_time','cp_dose']:\n tmp = pd.get_dummies(df[col],prefix=col)\n df = pd.concat([df,tmp],axis=1)\n df.drop([col],axis=1,inplace=True)\n xs = df[train_cols].values\n ys = target[targets]\n #ys_ns = target[targets_ns]\n return xs,ys#,ys_ns\n\ndef Ctl_augment_new(train,target,include_test=0):\n if include_test==0:\n ctl_aug=ctl_train2.copy()\n if include_test==1:\n ctl_aug=ctl_train.copy()\n aug_trains = []\n aug_targets = []\n for _ in range(3):\n train1 = train.copy()\n target1 = target.copy()\n ctl1 = ctl_train.sample(train1.shape[0],replace=True).reset_index(drop=True)#.loc[(ctl_train['cp_time']==t)&(ctl_train['cp_dose']==d)]\n ctl2 = ctl_train.sample(train1.shape[0],replace=True).reset_index(drop=True)\n\n ctl3 = ctl_train.sample(train1.shape[0],replace=True).reset_index(drop=True)#.loc[(ctl_train['cp_time']==t)&(ctl_train['cp_dose']==d)]\n ctl4 = ctl_train.sample(train1.shape[0],replace=True).reset_index(drop=True)\n mask_index1 = list(np.random.choice(ctl3.index.tolist(),int(ctl3.shape[0]*0.4),replace=False))\n ctl3.loc[mask_index1,genes+cells] = 0.0\n ctl4.loc[mask_index1,genes+cells] = 0.0\n\n ctl5 = ctl_train.sample(train1.shape[0],replace=True).reset_index(drop=True)#.loc[(ctl_train['cp_time']==t)&(ctl_train['cp_dose']==d)]\n ctl6 = ctl_train.sample(train1.shape[0],replace=True).reset_index(drop=True)\n mask_index2 = list(np.random.choice(list(set(ctl5.index)-set(mask_index1)),int(ctl5.shape[0]*0.3),replace=False))\n ctl5.loc[mask_index1+mask_index2,genes+cells] = 0.0\n ctl6.loc[mask_index1+mask_index2,genes+cells] = 0.0\n\n train1[genes+cells] = train1[genes+cells].values + ctl1[genes+cells].values - ctl2[genes+cells].values \\\n + ctl3[genes+cells].values - ctl4[genes+cells].values + ctl5[genes+cells].values - ctl6[genes+cells].values\n\n aug_train = train1.merge(target1,how='left',on='sig_id')\n aug_trains.append(aug_train[['cp_time','cp_dose']+genes+cells])\n aug_targets.append(aug_train[targets])\n\n df = pd.concat(aug_trains).reset_index(drop=True)\n target = pd.concat(aug_targets).reset_index(drop=True)\n for col in tqdm(genes+cells):\n df[col] = transformers[col].transform(df[col].values.reshape(-1,1)).reshape(1,-1)[0]\n pca_genes = gene_pca.transform(df[genes])\n pca_cells = cell_pca.transform(df[cells])\n pca_genes = pd.DataFrame(pca_genes, columns = [f\"pca_g-{i}\" for i in range(ncompo_genes)])\n pca_cells = pd.DataFrame(pca_cells, columns = [f\"pca_c-{i}\" for i in range(ncompo_cells)])\n df = pd.concat([df, pca_genes, pca_cells], axis = 1)\n\n nor_var_col = [col for col in df.columns if col in ['sig_id','cp_type','cp_time','cp_dose'] or '_gt_' in col or '_lt_' in col]\n var_cols = [col for col in df.columns if col not in ['sig_id','cp_type','cp_time','cp_dose'] and '_gt_' not in col and '_lt_' not in col]\n var_data = var_thresh.transform(df[var_cols])\n df = pd.concat([df[nor_var_col],pd.DataFrame(var_data)],axis=1)\n\n for col in ['cp_time','cp_dose']:\n tmp = pd.get_dummies(df[col],prefix=col)\n df = pd.concat([df,tmp],axis=1)\n df.drop([col],axis=1,inplace=True)\n xs = df[train_cols].values\n ys = target[targets]\n #ys_ns = target[targets_ns]\n return xs,ys#,ys_ns\n\nclass MoADataset:\n def __init__(self, features, targets,noise=0.1,val=0):\n self.features = features\n self.targets = targets\n self.noise = noise\n self.val = val\n\n def __len__(self):\n return (self.features.shape[0])\n\n def __getitem__(self, idx):\n sample = self.features[idx, :].copy()\n\n if 0 and np.random.rand()<0.3 and not self.val:\n sample = self.swap_sample(sample)\n\n dct = {\n 'x' : torch.tensor(sample, dtype=torch.float),\n 'y' : torch.tensor(self.targets[idx, :], dtype=torch.float)\n }\n return dct\n\n def swap_sample(self,sample):\n #print(sample.shape)\n num_samples = self.features.shape[0]\n num_features = self.features.shape[1]\n if len(sample.shape) == 2:\n batch_size = sample.shape[0]\n random_row = np.random.randint(0, num_samples, size=batch_size)\n for i in range(batch_size):\n random_col = np.random.rand(num_features) < self.noise\n #print(random_col)\n sample[i, random_col] = self.features[random_row[i], random_col]\n else:\n batch_size = 1\n\n random_row = np.random.randint(0, num_samples, size=batch_size)\n\n\n random_col = np.random.rand(num_features) < self.noise\n #print(random_col)\n #print(random_col)\n\n sample[ random_col] = self.features[random_row, random_col]\n\n return sample\n\nclass TestDataset:\n def __init__(self, features):\n self.features = features\n\n def __len__(self):\n return (self.features.shape[0])\n\n def __getitem__(self, idx):\n dct = {\n 'x' : torch.tensor(self.features[idx, :], dtype=torch.float)\n }\n return dct\n\ndevice = ('cuda' if torch.cuda.is_available() else 'cpu')\nEPOCHS1 = 29\nEPOCHS = 23\ntrn_loss_=[]\ndef train_and_predict(features, sub, aug, folds=5, seed=817119,lr=1/90.0/3.5*3,weight_decay=1e-5/3):\n oof = train[['sig_id']]\n for t in targets:\n oof[t] = 0.0\n preds = []\n test_X = test[features].values\n test_data_loader = DataLoader(dataset=TensorDataset(torch.Tensor(test_X)),batch_size=1024,shuffle=False)\n eval_train_loss = 0.0\n for fold, (trn_ind, val_ind) in enumerate(MultilabelStratifiedKFold(n_splits = folds, shuffle=True, random_state=seed)\\\n .split(train, train_target[targets])):\n train_X = train.loc[trn_ind,features].values\n train_Y = train_target.loc[trn_ind,targets].values\n eval_train_Y = train_target.loc[trn_ind,targets].values\n eval_train_dataset = MoADataset(train_X, eval_train_Y)\n eval_train_data_loader = torch.utils.data.DataLoader(eval_train_dataset, batch_size=128, shuffle=False)\n\n valid_X = train.loc[val_ind,features].values\n valid_Y = train_target.loc[val_ind,targets].values\n valid_dataset = MoADataset(valid_X, valid_Y,val=1)\n valid_data_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1024, shuffle=False)\n\n aug_X,aug_Y = Ctl_augment_new(ori_train.loc[trn_ind],train_target.loc[trn_ind],include_test=1)\n train_X_ = np.concatenate([train_X,aug_X],axis=0)\n train_Y_ = np.concatenate([train_Y,aug_Y],axis=0)\n\n train_dataset = MoADataset(train_X_, train_Y_)\n train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)\n\n model = resnetModel(len(features),len(targets),1500)\n model.to(device)\n\n\n optimizer = torch.optim.Adam(model.parameters(),betas=(0.9, 0.99), lr=1e-3, weight_decay=weight_decay,eps=1e-5)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3,\n max_lr=lr, epochs=EPOCHS1, steps_per_epoch=len(train_data_loader))\n\n loss_fn = nn.BCEWithLogitsLoss()\n loss_tr = SmoothBCEwLogits(smoothing =0.001)\n\n best_valid_metric = 1e9\n not_improve_epochs = 0\n for epoch in range(EPOCHS1):\n # train\n train_loss = 0.0\n train_num = 0\n for data in (train_data_loader):\n optimizer.zero_grad()\n x,y = data['x'].to(device),data['y'].to(device)\n outputs = model(x)\n loss = loss_tr(outputs, y)\n loss.backward()\n optimizer.step()\n scheduler.step()\n train_num += x.shape[0]\n train_loss += (loss.item()*x.shape[0])\n\n train_loss /= train_num\n # eval\n model.eval()\n valid_loss = 0.0\n valid_num = 0\n for data in (valid_data_loader):\n x,y = data['x'].to(device),data['y'].to(device)\n outputs = model(x)\n loss = loss_fn(outputs, y)\n valid_num += x.shape[0]\n valid_loss += (loss.item()*x.shape[0])\n valid_loss /= valid_num\n t_preds = []\n for data in (test_data_loader):\n x = data[0].to(device)\n with torch.no_grad():\n outputs = model(x)\n t_preds.extend(list(outputs.sigmoid().cpu().detach().numpy()))\n pred_mean = np.mean(t_preds)\n if valid_loss < best_valid_metric:\n torch.save(model.state_dict(),'./model/model_resnet2_fold%s'%fold+'_'+str(seed)+'.ckpt')\n not_improve_epochs = 0\n best_valid_metric = valid_loss\n print('[epoch %s] lr: %.6f, train_loss: %.6f, valid_metric: %.6f, pred_mean:%.6f'%(epoch,optimizer.param_groups[0]['lr'],train_loss,valid_loss,pred_mean))\n trn_loss_.append(train_loss)\n else:\n not_improve_epochs += 1\n print('[epoch %s] lr: %.6f, train_loss: %.6f, valid_metric: %.6f, pred_mean:%.6f, NIE +1 ---> %s'%(epoch,optimizer.param_groups[0]['lr'],train_loss,valid_loss,pred_mean,not_improve_epochs))\n if not_improve_epochs >= 30 and epoch>15:\n break\n model.train()\n if epoch!=28:\n aug_X,aug_Y = Ctl_augment_new(ori_train.loc[trn_ind],train_target.loc[trn_ind],include_test=1)\n train_X_ = np.concatenate([train_X,aug_X],axis=0)\n train_Y_ = np.concatenate([train_Y,aug_Y],axis=0)\n train_dataset = MoADataset(train_X_, train_Y_)\n train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)\n\n state_dict = torch.load('./model/model_resnet2_fold%s'%fold+'_'+str(seed)+'.ckpt', torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") )\n model.load_state_dict(state_dict)\n model.eval()\n\n valid_preds = []\n for data in tqdm(valid_data_loader):\n x,y = data['x'].to(device),data['y'].to(device)\n with torch.no_grad():\n outputs = model(x)\n valid_preds.extend(list(outputs.cpu().detach().numpy()))\n oof.loc[val_ind,targets] = 1 / (1+np.exp(-np.array(valid_preds)))\n t_preds = []\n for data in tqdm(test_data_loader):\n x = data[0].to(device)\n with torch.no_grad():\n outputs = model(x)\n t_preds.extend(list(outputs.sigmoid().cpu().detach().numpy()))\n print(np.mean(t_preds))\n preds.append(t_preds)\n train_preds=[]\n\n for data in (eval_train_data_loader):\n x = data['x'].to(device)\n with torch.no_grad():\n outputs = model(x)\n train_preds.extend(list(outputs.sigmoid().cpu().detach().numpy()))\n train_loss = Metric(eval_train_Y,train_preds)\n eval_train_loss += train_loss\n print('eval_train_loss:',train_loss)\n\n sub[targets] = np.array(preds).mean(axis=0)\n return oof,sub\n\n\n\ntrain_cols = [col for col in train.columns if col not in ['sig_id','cp_type']]\n\nSeed_everything(0)\noof,sub = train_and_predict(train_cols,sub.copy(),aug=True,seed=0,lr=1/90.0/2,weight_decay=1e-5/2.7)\n\noutputs = []\nfor seed in [1,2,3]:\n Seed_everything(seed)\n outputs.append(train_and_predict(train_cols,sub.copy(),aug=True,seed=seed,lr=1/90.0/2,weight_decay=1e-5/2.7))\n\nfor output in outputs:\n oof[targets] += output[0][targets]\n sub[targets] += output[1][targets]\noof[targets] /= (1+len(outputs))\nsub[targets] /= (1+len(outputs))\n\nvalid_metric = Metric(train_target[targets].values,oof[targets].values)\nprint('oof mean:%.6f,sub mean:%.6f,valid metric:%.6f'%(oof[targets].mean().mean(),sub[targets].mean().mean(),valid_metric))\nsub.loc[test['cp_type']=='ctl_vehicle',targets] = 0.0\nsub.to_csv('./shiji_submission2.csv',index=False)\n","sub_path":"shiji_solution2.py","file_name":"shiji_solution2.py","file_ext":"py","file_size_in_byte":30060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"515896538","text":"import platform\nfrom datetime import datetime\nimport asyncio\n\nimport discord\n\nfrom utils.consts import FOOTER_BOT\nfrom utils.consts import TZ\nfrom utils.games import HuskerSchedule\nimport cogs.fap as FAP\n\n\ndef build_image_embed(title, image):\n embed = discord.Embed(title=title, color=0xD00000)\n embed.set_author(name=\"Bot Frost\", url=\"https://github.com/refekt/Husker-Bot\", icon_url=\"https://i.imgur.com/Ah3x5NA.png\")\n embed.set_footer(text=FOOTER_BOT)\n embed.set_image(url=image)\n return embed\n\n\ndef build_embed(title, **kwargs):\n timestamp = datetime.now().astimezone(tz=TZ)\n\n if \"color\" in kwargs.keys():\n if \"description\" in kwargs.keys():\n embed = discord.Embed(title=title, description=kwargs[\"description\"], color=kwargs[\"color\"], timestamp=timestamp)\n else:\n embed = discord.Embed(title=title, color=kwargs[\"color\"], timestamp=timestamp)\n else:\n if \"description\" in kwargs.keys():\n embed = discord.Embed(title=title, description=kwargs[\"description\"], color=0xD00000)\n else:\n embed = discord.Embed(title=title, color=0xD00000)\n\n embed.set_author(name=\"Bot Frost\", url=\"https://github.com/refekt/Husker-Bot\", icon_url=\"https://i.imgur.com/Ah3x5NA.png\")\n\n if \"footer\" in kwargs.keys():\n embed.set_footer(text=kwargs[\"footer\"])\n else:\n embed.set_footer(text=FOOTER_BOT)\n\n if \"url\" in kwargs.keys():\n embed.url = kwargs[\"url\"]\n\n if \"image\" in kwargs.keys():\n embed.set_image(url=kwargs[\"image\"])\n\n if \"thumbnail\" in kwargs.keys():\n embed.set_thumbnail(url=kwargs[\"thumbnail\"])\n else:\n embed.set_thumbnail(url=\"https://ucomm.unl.edu/images/brand-book/Our-marks/nebraska-n.jpg\")\n\n try:\n for field in kwargs[\"fields\"]:\n if \"inline\" in kwargs:\n embed.add_field(name=field[0], value=field[1], inline=kwargs[\"inline\"])\n else:\n embed.add_field(name=field[0], value=field[1])\n except KeyError:\n pass\n\n return embed\n\n\ndef build_recruit_embed(rec): # rec == recruit\n def predictions_pretty():\n pretty = \"\"\n for item in rec.predictions:\n pretty += f\"{item}\\n\"\n return pretty\n\n def epxerts_pretty():\n pretty = \"\"\n for item in rec.experts:\n pretty += f\"{item}\\n\"\n return pretty\n\n def offers_pretty():\n pretty = \"\"\n for index, item in enumerate(rec.recruit_interests):\n if index > 9:\n return pretty + f\"[View remaining offers...]({rec.recruit_interests_url})\"\n\n pretty += f\"{item.school}{' - ' + item.status if not item.status == 'None' else ''}\\n\"\n\n return pretty\n\n def fap_predictions(recruit):\n fap_preds = FAP.get_faps(recruit)\n if fap_preds is None:\n return \"There are no predictions for this recruit.\"\n else:\n init_string = f'''\n Team: Percent (Avg Confidence)'''\n for p in fap_preds:\n init_string += f\"\\n{p['team']}: {p['percent']:.0f}% ({p['confidence']:.1f})\"\n init_string += f\"\\nTotal Predictions: {fap_preds[0]['total']}\"\n return init_string\n\n nl = \"\\n\"\n embed = build_embed(\n title=f\"{rec.name}, {str(rec.rating_stars) + '⭐ ' if rec.rating_stars else ''}{rec.year} {rec.position}\",\n description=f\"{rec.committed if rec.committed is not None else ''}{': ' + rec.committed_school if rec.committed_school is not None else ''} {': ' + str(rec.commitment_date.strftime('%b %d, %Y')) if rec.commitment_date is not None else ''}\",\n fields=[\n [\"**Biography**\", f\"{rec.city}, {rec.state}\\n\"\n f\"School: {rec.school}\\n\"\n f\"School Type: {rec.school_type}\\n\"\n f\"Height: {rec.height}\\n\"\n f\"Weight: {rec.weight}\\n\"],\n\n [\"**Social Media**\", f\"{'[@' + rec.twitter + '](' + 'https://twitter.com/' + rec.twitter + ')' if not rec.twitter == 'N/A' else 'N/A'}\"],\n\n [\"**Highlights**\", f\"{'[247Sports](' + rec.x247_highlights + ')' if rec.x247_highlights else '247Sports N/A'}\\n\"\n f\"{'[Rivals](' + rec.rivals_highlights + ')' if rec.rivals_highlights else 'Rivals N/A'}\\n\"],\n\n [\"**Recruit Info**\", f\"[247Sports Profile]({rec.x247_profile})\\n\"\n f\"[Rivals Profile]({rec.rivals_profile})\\n\"\n f\"Comp. Rating: {rec.rating_numerical if rec.rating_numerical else 'N/A'} \\n\"\n f\"Nat. Ranking: [{rec.national_ranking:,}](https://247sports.com/Season/{rec.year}-Football/CompositeRecruitRankings/?InstitutionGroup\"\n f\"={rec.school_type.replace(' ', '')})\\n\"\n f\"State Ranking: [{rec.state_ranking}](https://247sports.com/Season/{rec.year}-Football/CompositeRecruitRankings/?InstitutionGroup={rec.school_type.replace(' ', '')}&State\"\n f\"={rec.state_abbr})\\n\"\n f\"Pos. Ranking: [{rec.position_ranking}](https://247sports.com/Season/{rec.year}-Football/CompositeRecruitRankings/?InstitutionGroup=\"\n f\"{rec.school_type.replace(' ', '')}&Position\"\n f\"={rec.pos_abbr})\\n\"\n f\"{'All Time Ranking: [' + rec.all_time_ranking + '](https://247sports.com/Sport/Football/AllTimeRecruitRankings/)' + nl if rec.all_time_ranking else ''}\"\n f\"{'Early Enrollee' + nl if rec.early_enrollee else ''}\"\n f\"{'Early Signee' + nl if rec.early_signee else ''}\"\n f\"{'Walk-On' + nl if rec.walk_on else ''}\"],\n\n [\"**Expert Averages**\", f\"{predictions_pretty() if rec.predictions else 'N/A'}\"],\n\n [\"**Lead Expert Picks**\", f\"{epxerts_pretty() if rec.experts else 'N/A'}\"],\n\n [\"**Offers**\", f\"{offers_pretty() if rec.recruit_interests else 'N/A'}\"],\n\n [\"**FAP Predictions**\", f\"{fap_predictions(rec)}\"]\n ]\n )\n\n if (rec.committed.lower() if rec.committed is not None else None) not in ['signed', 'enrolled']:\n if ((FAP.get_croot_predictions(rec)) is not None):\n embed.set_footer(text=FOOTER_BOT + \"\\nClick the 🔮 to predict what school you think this recruit will commit to.\"\n + \"\\nClick the 📜 to get the inividual predictions for this recruit.\")\n else:\n embed.set_footer(text=FOOTER_BOT + \"\\nClick the 🔮 to predict what school you think this recruit will commit to.\")\n else:\n if ((FAP.get_croot_predictions(rec)) is not None):\n embed.set_footer(text=FOOTER_BOT + \"\\nClick the 📜 to get the inividual predictions for this recruit.\")\n else:\n embed.set_footer(text=FOOTER_BOT)\n\n if not rec.thumbnail == \"/.\":\n embed.set_thumbnail(url=rec.thumbnail)\n return embed\n\n\ndef build_schedule_embed(year, **kwargs):\n scheduled_games, season_stats = HuskerSchedule(year=year)\n\n ARROW = \"» \"\n _NL = \"\\n\"\n\n embed = build_embed(\n title=f\"Nebraska's {year} Schedule ({season_stats.wins} - {season_stats.losses})\",\n )\n\n if \"week\" in kwargs:\n game = scheduled_games[int(kwargs[\"week\"]) - 1]\n\n value_string = f\"{ARROW + ' ' + game.outcome + _NL if not game.outcome == '' else ''}\" \\\n f\"{ARROW}{'B1G Game' if game.opponent.conference == 'Big Ten' else 'Non-Con Game'}{_NL}\" \\\n f\"{ARROW}{game.opponent.date_time}{_NL}\" \\\n f\"{ARROW}{game.location}\"\n\n embed.add_field(\n name=f\"**#{game.week}: {game.opponent.name}**\",\n value=value_string\n )\n\n embed.set_image(url=game.opponent.icon)\n else:\n for index, game in enumerate(scheduled_games):\n value_string = f\"{ARROW + ' ' + game.outcome + _NL if not game.outcome == '' else ''}\" \\\n f\"{ARROW}{'B1G Game' if game.opponent.conference == 'Big Ten' else 'Non-Con Game'}{_NL}\" \\\n f\"{ARROW}{game.opponent.date_time}{_NL}\" \\\n f\"{ARROW}{game.location}\"\n\n embed.add_field(\n name=f\"**#{game.week}: {game.opponent.name}**\",\n value=value_string\n )\n\n return embed\n","sub_path":"utils/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339937741","text":"import kerastuner as kt\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing import image\n\n\n#############################################################################################\n# 参数\n#############################################################################################\nclass canshu:\n wenjianjia_xunlian = '/home/c/Desktop/CODE/shachepian_shuju/20200612'\n wenjianjia_ceshi = '/home/c/Desktop/CODE/shachepian_shuju/ceshi'\n\n\n#############################################################################################\n# 训练数据\n#############################################################################################\n\nxunlian_shuju = image.DirectoryIterator(\n directory=canshu.wenjianjia_xunlian,\n image_data_generator=image.ImageDataGenerator(\n data_format='channels_first',\n dtype='float64'\n )\n)\n\nprint('------ 数据信息 ----------------------------------------------------------------------')\nprint(\"xunlian_shuju.image_shape: {}\".format(xunlian_shuju.image_shape))\nprint(\"len(xunlian_shuju): {}\".format(len(xunlian_shuju)))\nprint(\"xunlian_shuju.batch_size: {}\".format(xunlian_shuju.batch_size))\nprint(\"xunlian_shuju.filenames: \".format(xunlian_shuju.n))\nprint('----------------------------------------------------------------------------')\n\n#############################################################################################\n# 训练\n#############################################################################################\nqianyimoxing = keras.applications.MobileNetV2(\n input_shape=xunlian_shuju.image_shape,\n include_top=False,\n weights='imagenet',\n)\nqianyimoxing.trainable = False\n\n\ndef shengchengmoxing(hp):\n shuru = keras.layers.Input(shape=xunlian_shuju.image_shape)\n x = keras.layers.experimental.preprocessing.Rescaling(1. / 127.5)(shuru)\n x = x - 1\n x = keras.layers.experimental.preprocessing.RandomFlip()(x)\n x = keras.layers.experimental.preprocessing.RandomRotation(0.1)(x)\n x = qianyimoxing(x, training=False)\n x = keras.layers.GlobalAveragePooling2D()(x)\n hp_units = hp.Int('units', min_value=32, max_value=512, step=32)\n x = keras.layers.Dense(hp_units, activation=keras.activations.relu)(x)\n x = keras.layers.Dropout(0.2)(x)\n shuchu = keras.layers.Dense(xunlian_shuju.num_classes, activation='softmax')(x)\n\n moxing = keras.Model(shuru, shuchu)\n\n hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])\n\n moxing.compile(\n optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),\n loss=keras.losses.CategoricalCrossentropy(),\n metrics=['accuracy']\n )\n return moxing\n\n\ntuner = kt.Hyperband(shengchengmoxing,\n objective='val_accuracy',\n max_epochs=10,\n factor=3,\n directory='my_dir',\n project_name='intro_to_kt')\n\nceshi_shuju = image.DirectoryIterator(\n directory=canshu.wenjianjia_ceshi,\n image_data_generator=image.ImageDataGenerator(data_format='channels_first', dtype='float64'),\n)\n\ntuner.search(\n xunlian_shuju,\n epochs=10,\n validation_data=ceshi_shuju\n)\n\nbest_hps = tuner.get_best_hyperparameters(num_trials=1)[0]\n\nprint(f\"\"\"\nThe hyperparameter search is complete. The optimal number of units in the first densely-connected\nlayer is {best_hps.get('units')} and the optimal learning rate for the optimizer\nis {best_hps.get('learning_rate')}.\n\"\"\")\n\nmodel = tuner.hypermodel.build(best_hps)\nmodel.fit(xunlian_shuju, epochs=10, validation_data=ceshi_shuju)\n","sub_path":"lianxi01.py","file_name":"lianxi01.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"348906998","text":"import ftplib\nimport os\nimport json\nimport datetime\nfrom datetime import datetime, date\nimport time\nimport shutil\n\n\ndef main():\n global arquivo\n conf=open('config.json').read()\n configuracao=json.loads(conf)\n host=str(configuracao['host'])\n user=configuracao['user']\n passw=configuracao['passw']\n origem=configuracao['origem']\n destino=configuracao['destino']\n backup=configuracao['backup']\n\n \n \n for _, _, arquivo in os.walk(origem):\n print(arquivo)\n\n try:\n ftp = ftplib.FTP(host)\n ftp.login(user, passw)\n print('Conectado com sucesso!')\n \n except:\n print(\"Falha de conexao FTP!\")\n \n finally:\n ftp.cwd(destino) \n \n \n for i in range(len(arquivo)):\n data_e_hora_atuais = datetime.now()\n data_e_hora_em_texto = data_e_hora_atuais.strftime('%Y-%m-%d %H-%M-%S')\n #shutil.copy(origem+arquivo[i], backup)\n \n \n \n with open(origem+arquivo[i],'rb') as file:\n print('Arquivo de origem aberto!')\n try: \n ftp.storbinary('STOR '+arquivo[i] , file)\n print(\"Arquivo enviado com sucesso!!\")\n except:\n print(\"Erro ao enviar o arquivo!\")\n os.rename(origem + arquivo[i], backup + data_e_hora_em_texto + ' ' + arquivo[i])\n\n ftp.quit()\n\nif __name__ == '__main__':\n main()","sub_path":"MarcacaoFTP.py","file_name":"MarcacaoFTP.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22745707","text":"import donkeypart_xbox_1s_controller as xbox\nfrom inspect import *\n\ndef test_device_read_loop(input_device):\n print('reading loop')\n ctlr = BluetoothGameController()\n ctlr.device.read_loop()\n\ndef main():\n classes = getmembers(xbox, isclass)\n for name in classes:\n print(name)\n\nmain()\n","sub_path":"test_xbox_package.py","file_name":"test_xbox_package.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315549351","text":"\"\"\"Command to create models.\"\"\"\nimport click\n\nfrom inv.asset_manufacturer import AssetManufacturer\nfrom inv.asset_model import AssetModel\nfrom inv.cli.custom_types import ASSET_MANUFACTURER\nfrom inv.cli.env import get_inv\n\n\n@click.command()\n@click.option('--name', prompt=True, type=click.STRING)\n@click.option('--container', prompt=True, type=click.BOOL)\n@click.option('--manufacturer', prompt=True, type=ASSET_MANUFACTURER())\ndef create(name: str, container: bool, manufacturer: AssetManufacturer) -> None:\n \"\"\"Create a new model.\"\"\"\n inv = get_inv()\n\n model = AssetModel.create_instance(\n name=name,\n container=container,\n manufacturer=manufacturer,\n )\n\n model.save(inv)\n","sub_path":"inv/cli/model/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95314511","text":"import os\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nos.environ['MASTER_ADDR'] = 'localhost'\nos.environ['MASTER_PORT'] = '12355'\n\ndef example(rank, world_size):\n # 初始化\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n # 创建模型\n model = nn.Linear(10, 10).to(rank)\n # 放入DDP\n ddp_model = DDP(model, device_ids=[rank])\n loss_fn = nn.MSELoss()\n optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n # 进行前向后向计算\n for i in range(1000):\n outputs = ddp_model(torch.randn(20, 10).to(rank))\n labels = torch.randn(20, 10).to(rank)\n loss_fn(outputs, labels).backward()\n optimizer.step()\n\ndef main():\n nprocs = 2\n mp.spawn(example,\n args=(nprocs,),\n nprocs=nprocs,\n join=True)\n\nif __name__==\"__main__\":\n main()","sub_path":"mp.py","file_name":"mp.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144423453","text":"# -*- encoding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom mock import call\nfrom mock import patch\n\nfrom tuskar.cmd import load_role\nfrom tuskar.cmd import load_roles\nfrom tuskar.cmd import load_seed\nfrom tuskar.tests.base import TestCase\n\n\nclass LoadRoleTests(TestCase):\n\n ROLES = \"\"\" -r role_name1.yaml -r /path/role_name2.yaml\n --role /path1/role_name3.yaml \"\"\"\n\n ROLE_EXTRA = \"\"\" --role-extra /path/metadata/compute_data.yaml\n -re /path/metadata/common_data.yaml \"\"\"\n\n ENV_DATA = \"\"\"\nresource_registry:\n OS::TripleO::Another: required_file.yaml\n \"\"\"\n\n @patch('tuskar.storage.load_utils.load_file', return_value=\"YAML\")\n @patch('tuskar.cmd.load_roles._print_names')\n def test_main(self, mock_print, mock_read):\n main_args = \" --master-seed=seed.yaml %s %s\" % (\n self.ROLES, self.ROLE_EXTRA)\n expected_res = ['role_name1', 'role_name2', 'role_name3',\n 'extra_compute_data_yaml', 'extra_common_data_yaml']\n\n # test\n load_roles.main(argv=(main_args).split())\n\n # verify\n self.assertEqual([call('Created', expected_res)],\n mock_print.call_args_list)\n\n def test_load_seed_invalid_args(self):\n main_args = \"tuskar-load-seed\"\n self.assertRaises(SystemExit, load_seed.main, main_args.split())\n\n main_args = \"tuskar-load-seed --master-seed=seed.yaml\"\n self.assertRaises(SystemExit, load_seed.main, main_args.split())\n\n main_args = \"tuskar-load-seed --resource-registry=registry.yaml\"\n self.assertRaises(SystemExit, load_seed.main, main_args.split())\n\n @patch('tuskar.storage.load_utils.load_file', return_value=\"YAML\")\n @patch('tuskar.storage.load_roles.load_file', return_value=ENV_DATA)\n @patch('tuskar.cmd.load_seed._print_names')\n def test_load_seed(self, mock_print, mock_read, mock_read2):\n main_args = (\"tuskar-load-seed --master-seed=seed.yaml\"\n \" --resource-registry=registry.yaml\")\n expected_created = ['_master_seed', '_registry', 'required_file.yaml']\n\n load_seed.main(argv=(main_args).split())\n\n self.assertEqual([call('Created', expected_created)],\n mock_print.call_args_list)\n\n @patch('tuskar.storage.load_utils.load_file', return_value=\"YAML\")\n @patch('tuskar.cmd.load_role._print_names')\n def test_load_role(self, mock_print, mock_read):\n main_args = (\" tuskar-load-role -n Compute\"\n \" --filepath /path/to/puppet/compute-puppet.yaml \"\n \" --extra-data /path/to/puppet/hieradata/compute.yaml \"\n \" --extra-data /path/to/puppet/hieradata/common.yaml \")\n expected_res = ['extra_compute_yaml', 'extra_common_yaml', 'Compute']\n\n load_role.main(argv=(main_args).split())\n\n self.assertEqual([call('Created', expected_res)],\n mock_print.call_args_list)\n\n @patch('tuskar.storage.load_utils.load_file', return_value=\"YAML\")\n @patch('tuskar.cmd.load_role._print_names')\n def test_load_role_no_name(self, mock_print, mock_read):\n main_args = (\" tuskar-load-role\"\n \" -f /path/to/puppet/compute-puppet.yaml \"\n \" --extra-data /path/to/puppet/hieradata/compute.yaml \"\n \" --extra-data /path/to/puppet/hieradata/common.yaml \")\n expected_res = ['extra_compute_yaml', 'extra_common_yaml',\n 'compute-puppet']\n\n load_role.main(argv=(main_args).split())\n\n self.assertEqual([call('Created', expected_res)],\n mock_print.call_args_list)\n\n @patch('tuskar.storage.load_utils.load_file', return_value=\"YAML\")\n @patch('tuskar.cmd.load_role._print_names')\n def test_load_role_no_path(self, mock_print, mock_read):\n main_args = (\" tuskar-load-role\"\n \" --extra-data /path/to/puppet/hieradata/compute.yaml \"\n \" --extra-data /path/to/puppet/hieradata/common.yaml \")\n self.assertRaises(SystemExit, load_role.main, (main_args.split()))\n","sub_path":"tuskar/tests/cmd/test_load_roles.py","file_name":"test_load_roles.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429661227","text":"# pylint:disable=missing-docstring,line-too-long\n\"\"\"\nCrawler for MD Energy Administration spider\n\"\"\"\n__author__ = 'Afroze Khan'\n__modified__ = 'Kiran Koduru'\n__created__ = '12/4/2014'\n\nfrom scrapy.spider import Spider\nfrom scrapy import Request\nfrom helpers.string_processor import process_string, remove_tags\nfrom dateutil import parser\nfrom helpers.items import BaseItem\nfrom urlparse import urljoin\n\nclass PressReleasesSpider(Spider):\n name = \"md-ea-pressrelease\"\n allowed_domains = \"energy.maryland.gov\",\n start_urls = (\"http://energy.maryland.gov/ARCHIVEPRESS.HTML\",)\n\n def parse(self, response):\n sites = response.css(\"#Column800 > div > ul > li > a::attr(href)\").extract()\n for url in sites:\n navigate = urljoin(response.url, url)\n yield Request(navigate, dont_filter=True, callback=parse_document)\n\ndef parse_document(response):\n sites = response.css(\"div#Column800 > div.boxBlank p\")\n for site in sites:\n if site.css(\"p::text\").extract():\n item = BaseItem()\n item['title'] = process_string(remove_tags(site.css(\"a\").extract()[0]))\n item['publishdate'] = parser.parse(process_string(site.css(\"p::text\").extract()[0]))\n url = site.css(\"a::attr(href)\").extract()[0]\n item['url'] = urljoin(response.url, url)\n yield item\n","sub_path":"spiders/md/ea/PressReleasesSpider.py","file_name":"PressReleasesSpider.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"385754171","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot\n\n# Scales a symbol within an image\ndef scale_symbol(img, contour, new_size):\n\t# Get the bounding rect of the contour\n\t[x, y, w, h] = cv2.boundingRect(contour)\n\n\t# Get rectangle as it's own image\n\tsymbol = img[y:y+h, x:x+w]\n\tnew_w = new_size[0]\n\tnew_h = new_size[1]\n\n\t# If the image is scaled too small\n\tif 0 in symbol.shape:\n\t\treturn None\n\n\t# Resize the symbol\n\tscaled = cv2.resize(symbol, (new_w, new_h), interpolation=cv2.INTER_AREA)\n\n\t# Return the scaled symbol\n\treturn scaled\n\n# Blit src image onto dst at position (x,y)\n# Kind of like copying/pasting src onto dst\ndef blit_image(src, dst, x, y):\n\t# Get the image height, width\n\th, w = src.shape[0], src.shape[1]\n\t# Blit src onto dst\n\tdst[y:y+h, x:x+w] = src\n\treturn dst\n\n# Get the contours from an image\ndef get_all_contours(img):\n\t# Get grayscale img\n\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t# Find threshed automatically, good for symbol detection\n\tret2, th2 = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\n\t# Morphological operation to ensure smaller portions are part of bigger character\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n\tthresh = cv2.morphologyEx(th2, cv2.MORPH_CLOSE, kernel)\n\n\t# Only find external contours, characters (probably) won't be nested\n\tcontours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\treturn contours\n\n# Main program to test\nif __name__ == '__main__':\n\tfrom sys import argv\n\n\t# argv check / usage info\n\tif len(argv) < 2:\n\t\tprint(\"Usage: {} image_path\".format(argv[0]))\n\t\texit(1)\n\n\t# Read the image\n\timg = cv2.imread(argv[1])\n\tif img is None:\n\t\tprint(\"Error loading image\")\n\t\texit(1)\n\n\t# Scale all symbols down to 9x9\n\tnew_w, new_h = 9, 9\n\n\t# Get the contours\n\tcontours = get_all_contours(img)\n\tfor contour in contours:\n\t\tx, y, w, h = cv2.boundingRect(contour)\n\t\tsymbol = scale_symbol(img, contour, (new_w, new_h))\n\t\t\n\t\t# Clear the area and blit the resized symbol\n\t\tcv2.rectangle(img, (x, y), (x+w, y+h), 255, cv2.FILLED)\n\t\tblit_image(symbol, img, x, y)\n\t\n\t# Show the image\n\tpyplot.imshow(img)\n\tpyplot.show()\n\n\tprint(\"All done\")\n","sub_path":"symbols.py","file_name":"symbols.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"28480858","text":"from app import db\nfrom flask_login import current_user\nfrom flask import request\nfrom app.data import ProjectData, IssueData\n\ndef get_project_choices(idToken):\n if current_user.is_admin():\n projects = db.child(\"Projects\").get(token=idToken).each()\n if projects is None:\n projects = []\n else:\n all_projects = db.child(\"Projects\").get(token=idToken).each()\n projects = []\n if all_projects is not None:\n for current in all_projects:\n if int(current_user.get_id()) in list(db.child(\"Projects\").child(current.key()).child(\"Assignees\").get(token=idToken).val()):\n projects.append(current)\n\n project_choices = []\n for x in range(len(projects)):\n projects[x] = projects[x].key()\n project_choices.append(tuple([projects[x], db.child(\"Projects\").child(projects[x]).child(\"Name\").get(token=idToken).val()]))\n\n return project_choices\n\ndef get_user_choices(idToken):\n if current_user.is_admin():\n users = db.child(\"Users\").get(token=idToken).each()\n else:\n return []\n \n user_choices = []\n for x in range(len(users)):\n users[x] = users[x].key()\n first_name = db.child(\"Users\").child(users[x]).child(\"First Name\").get(token=idToken).val()\n last_name = db.child(\"Users\").child(users[x]).child(\"Last Name\").get(token=idToken).val()\n user_type = db.child(\"Users\").child(users[x]).child(\"Type\").get(token=idToken).val()\n text = \"{} {} ({})\".format(first_name, last_name, user_type)\n user_choices.append(tuple([users[x], text]))\n\n return user_choices\n\ndef get_user_issues(idToken, project_choices, open_only=True):\n if current_user.is_admin():\n admin_issues = []\n issues = db.child(\"Issues\").get(token=idToken).each()\n if issues is None:\n issues = []\n for issue in issues:\n issue_key = int(issue.key())\n if isinstance(db.child(\"Issues\").child(issue_key).child(\"Assignees\").get(token=idToken).val(), list):\n if int(current_user.get_id()) in db.child(\"Issues\").child(issue_key).child(\"Assignees\").get(token=idToken).val():\n all_projects = db.child(\"Projects\").get(token=idToken).each()\n project_id = -1\n for project in all_projects:\n if issue_key in project.val()[\"Issues\"]:\n project_id = int(project.key())\n break\n if not open_only:\n admin_issues.append(IssueData(idToken, issue_key, request.host_url, project_id))\n elif db.child(\"Issues\").child(issue_key).child(\"Status\").get(token=idToken).val() == \"OPEN\":\n admin_issues.append(IssueData(idToken, issue_key, request.host_url, project_id))\n return admin_issues\n elif current_user.is_developer():\n dev_issues = []\n issues = db.child(\"Issues\").get(token=idToken).each()\n if issues is None:\n issues = []\n for issue in issues:\n issue_key = int(issue.key())\n if isinstance(db.child(\"Issues\").child(issue_key).child(\"Assignees\").get(token=idToken).val(), list):\n if int(current_user.get_id()) in db.child(\"Issues\").child(issue_key).child(\"Assignees\").get(token=idToken).val():\n if not open_only:\n dev_issues.append(IssueData(idToken, issue_key, request.host_url, project_choices[0][0]))\n elif db.child(\"Issues\").child(issue_key).child(\"Status\").get(token=idToken).val() == \"OPEN\":\n dev_issues.append(IssueData(idToken, issue_key, request.host_url, project_choices[0][0]))\n return dev_issues\n else:\n return None\n\ndef get_user_projects(idToken, project_choices):\n if current_user.is_admin():\n projs = []\n for current_project in project_choices:\n projs.append(ProjectData(idToken, current_project[0], request.host_url))\n return projs\n elif current_user.is_client():\n if len(project_choices) > 0:\n project = ProjectData(idToken, project_choices[0][0], request.host_url)\n else:\n project = None\n return project\n elif current_user.is_developer():\n dev_projects = []\n for current_project in project_choices:\n dev_projects.append(ProjectData(idToken, current_project[0], request.host_url))\n return dev_projects\n\ndef check_project_exists(idToken, project_id):\n if db.child(\"Projects\").child(project_id).get(token=idToken).val() is None:\n return False\n return True\n\ndef check_if_user_is_project_member(idToken, user_id, project_id):\n if current_user.is_admin():\n return True\n else:\n project_assignees = db.child(\"Projects\").child(project_id).child(\"Assignees\").get(token=idToken).val()\n if not isinstance(project_assignees, list) or user_id not in project_assignees:\n return False\n return True\n\ndef get_project_issues(idToken, project_id, open_only=True):\n all_issues = db.child(\"Projects\").child(project_id).child(\"Issues\").get(token=idToken).val()\n issues = []\n project_issues = []\n if not isinstance(issues, list):\n project_issues = []\n issues = []\n else:\n if open_only:\n for current_issue in all_issues:\n if db.child(\"Issues\").child(current_issue).child(\"Status\").get(token=idToken).val() == \"OPEN\":\n issues.append(current_issue)\n for issue in issues:\n project_issues.append(IssueData(idToken, issue, request.host_url, project_id))\n for x in range(len(issues)):\n issues[x] = tuple([issues[x], db.child(\"Issues\").child(issues[x]).child(\"Name\").get(token=idToken).val()])\n return project_issues, issues\n\ndef get_project_devs(idToken, project_id):\n all_dev_choices = db.child(\"Projects\").child(project_id).child(\"Assignees\").get(token=idToken).val()\n if not isinstance(all_dev_choices, list):\n all_dev_choices = []\n dev_choices = []\n for choice in all_dev_choices:\n if db.child(\"Users\").child(choice).child(\"Type\").get(token=idToken).val() in [\"admin\", \"developer\"]:\n dev_choices.append(choice)\n else:\n for x in range(len(dev_choices)):\n first_name = db.child(\"Users\").child(dev_choices[x]).child(\"First Name\").get(token=idToken).val()\n last_name = db.child(\"Users\").child(dev_choices[x]).child(\"Last Name\").get(token=idToken).val()\n user_type = db.child(\"Users\").child(dev_choices[x]).child(\"Type\").get(token=idToken).val()\n entry = \"{} {} ({})\".format(first_name, last_name, user_type)\n dev_choices[x] = tuple([dev_choices[x], entry])\n return dev_choices\n\ndef check_issue_exists(idToken, issue_id):\n if db.child(\"Issues\").child(issue_id).get(token=idToken).val() is None:\n return False\n return True\n\ndef check_if_issue_is_project_member(idToken, issue_id, project_id):\n project_issues = db.child(\"Projects\").child(project_id).child(\"Issues\").get(token=idToken).val()\n if not isinstance(project_issues, list) or issue_id not in project_issues:\n return False\n return True","sub_path":"app/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"127068736","text":"# -*- coding: utf-8 -*-\nimport os\nimport yaml\nimport logging\nimport smtplib\nfrom logging.handlers import RotatingFileHandler\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nRUN_LOG_FILE = os.path.join(BASE_DIR, 'log', 'check_device_stats.txt')\nERROR_LOG_FILE = os.path.join(BASE_DIR, 'log', 'check_device_stats.error.log')\nconfig_file = os.path.join(BASE_DIR, 'config.yml')\nconfig = yaml.load(open(config_file, 'rb'))['config']\n\n\nclass Logger(object):\n __instance = None\n\n def __init__(self):\n self.run_log_file = RUN_LOG_FILE\n self.error_log_file = ERROR_LOG_FILE\n self.run_logger = None\n self.error_logger = None\n self.initialize_run_log()\n self.initialize_error_log()\n\n def __new__(cls, *args, **kwargs):\n if not cls.__instance:\n cls.__instance = object.__new__(cls, *args, **kwargs)\n return cls.__instance\n\n @staticmethod\n def check_path_exist(log_abs_file):\n log_path = os.path.split(log_abs_file)[0]\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n\n def initialize_run_log(self):\n self.check_path_exist(self.run_log_file)\n file_1_1 = RotatingFileHandler(filename=self.run_log_file, maxBytes=1024 * 1024 * 2, backupCount=15,\n encoding='utf-8')\n fmt = logging.Formatter(fmt=\"%(message)s\")\n file_1_1.setFormatter(fmt=fmt)\n logger1 = logging.Logger('run_log', level=logging.INFO)\n logger1.addHandler(file_1_1)\n self.run_logger = logger1\n\n def initialize_error_log(self):\n self.check_path_exist(self.error_log_file)\n file_1_1 = RotatingFileHandler(filename=self.error_log_file, maxBytes=1024 * 1024 * 2, backupCount=15,\n encoding='utf-8')\n fmt = logging.Formatter(fmt=\"%(asctime)s - %(levelname)s : %(message)s\")\n file_1_1.setFormatter(fmt=fmt)\n logger1 = logging.Logger('run_log', level=logging.ERROR)\n logger1.addHandler(file_1_1)\n self.error_logger = logger1\n\n def log(self, message, mode=True):\n if mode:\n self.run_logger.info(message)\n else:\n self.error_logger.error(message, exc_info=True)\n\n\nclass SendMail(object):\n def __init__(self):\n self._smtp_server = config['smtp']['smtp_server']\n self._mail_user = config['smtp']['user']\n self._mail_passwd = config['smtp']['passwd']\n self._type = 'plain'\n self.server = smtplib.SMTP_SSL(self._smtp_server, 465)\n self.server.login(self._mail_user, self._mail_passwd)\n\n def send_plain(self, to, subject, msg):\n message = MIMEText(msg, self._type, 'utf-8')\n message['Subject'] = subject\n message['From'] = self._mail_user\n message['TO'] = ';'.join(to)\n self.server.sendmail(self._mail_user, to, message.as_string())\n\n def send_file(self,to,subject,text,files):\n message = MIMEMultipart()\n message['Subject'] = subject\n message['From'] = self._mail_user\n message['TO'] = ';'.join(to)\n message.attach(MIMEText(text,'plain','utf-8'))\n att = MIMEText(open(files,'rb').read(),'base64','utf-8')\n att[\"Content-Type\"] = 'application/octet-stream'\n att[\"Content-Disposition\"] = 'attachment; filename=\"check_device_stats.txt\"'\n message.attach(att)\n self.server.sendmail(self._mail_user,to,message.as_string())","sub_path":"python/tools/IermuStats/server/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"56220653","text":"import numpy as np\nimport array\n\nimport dynet_config\n# Set some parameters manualy\ndynet_config.set(mem=500,random_seed=1)\n# Initialize dynet import using above configuration in the current scope\nimport dynet as dy\n\nclass BiLstmModel:\n\n def __init__(self, vocab_size, output_size, embed_size = 86, hidden_size = 8, embeddings = None):\n self.name = self.__class__.__name__\n self.model = dy.ParameterCollection()\n self.trainer = dy.SimpleSGDTrainer(self.model)\n\n # Embedding\n if embeddings is None:\n self.lookup = self.model.add_lookup_parameters((vocab_size, embed_size))\n else:\n self.lookup = self.model.lookup_parameters_from_numpy(embeddings)\n (embed_size, vocab_size), _ = self.lookup.dim()\n\n\n self.bilstm = dy.BiRNNBuilder(\n num_layers = 1,\n input_dim = embed_size,\n hidden_dim = hidden_size * 2,\n model = self.model,\n rnn_builder_factory = dy.LSTMBuilder)\n\n # Dense layer\n self.w = self.model.add_parameters((output_size, hidden_size * 2))\n self.b = self.model.add_parameters(output_size)\n \n def _calculate_loss(self, sentence, sentence_labels):\n # Embedding + Bi-LSTM + Linear layer\n embeddings = [self.lookup[w] for w in sentence]\n bilstm_output = self.bilstm.transduce(embeddings)\n probs = [dy.softmax(self.w * o + self.b) for o in bilstm_output]\n losses = [-dy.log(dy.pick(dist, label)) for dist, label in zip(probs, sentence_labels)]\n return dy.esum(losses)\n\n def fit(self, data, labels):\n \"\"\"\n Expects the inputs and labels to be transformed to integers beforehand\n \"\"\"\n for sentence, sentence_labels in zip(data, labels):\n dy.renew_cg()\n loss = self._calculate_loss(sentence, sentence_labels)\n\n loss.value()\n loss.backward()\n self.trainer.update()\n\n def fit_auto_batch(self, data, labels, mini_batch_size = 1, epochs = 1):\n train_pairs = list(zip(data, labels))\n loss_progression = []\n\n for epoch in range(epochs):\n np.random.shuffle(train_pairs)\n mini_batches = [train_pairs[x:x+mini_batch_size] for x in range(0, len(train_pairs), mini_batch_size)]\n\n for batch in mini_batches:\n dy.renew_cg()\n losses = []\n for sentence, sentence_labels in batch:\n loss = self._calculate_loss(sentence, sentence_labels)\n losses.append(loss)\n loss = dy.esum(losses)\n\n loss_value = loss.value()\n loss.backward()\n self.trainer.update()\n\n loss_progression.append(loss_value)\n\n return loss_progression\n\n def fit_batch(self, inputs, labels, mini_batch_size = 1, epochs = 1):\n pass\n\n def predict(self, sentence):\n dy.renew_cg()\n # Embedding + Bi-LSTM + Linear layer\n embeddings = [self.lookup[w] for w in sentence]\n bi_lstm_output = self.bilstm.transduce(embeddings)\n score_vecs = [dy.softmax(self.w * o + self.b) for o in bi_lstm_output]\n return [np.argmax(props.value()) for props in score_vecs]\n\n def predict_auto_batch(self, sentences):\n pass\n\n def predict_batch(self, sentences):\n pass\n\n def save(self, filepath):\n self.model.save(filepath)\n\n def load(self, filepath):\n self.model.populate(filepath)\n\n","sub_path":"code/dynet_sequence_labeling/bi_lstm_model.py","file_name":"bi_lstm_model.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402158786","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n@version: 0.1\n@author: wufeng\n@contact: pipcoo@pipcoo.com\n@site: http://www.pipcoo.com\n@software: PyCharm\n@file: key_handle.py\n@time: 2017/4/9 22:56\n\"\"\"\n\nfrom . import dbfile_handle\n\n\ndef check_tabname(tabname):\n db = dbf_manager.dbf_load()\n if tabname in db[tablelist]:\n return True\n else:\n return False\n\n\ndef get_tablename(command_key,command):\n command_list = command.split(' ')\n if command_key == 'select':\n tablename = command_list[command_list.index('from') + 1]\n elif command_key == 'insert':\n tablename = command_list[command_list.index('into') + 1]\n elif command_key == 'update':\n tablename = command_list[command_list.index('update') + 1]\n elif command_key == 'delete':\n tablename = command_list[command_list.index('from') + 1]\n elif command_key == 'drop':\n tablename = command_list[command_list.index('table') + 1]\n return tablename\n\ndef get_colname(current_database,tabname,command):\n \"\"\"\n 获取需要查询的列\n :param command: \n :return: \n \"\"\"\n colsname=[]\n command_list = command.split(' ')\n cols = command_list[command_list.index('from') - 1]\n tablename = command_list[command_list.index('from') + 1]\n if cols == '*':\n tabdata = dbfile_handle.read_tbf(current_database, tabname)\n columns = tabdata['columns']\n for dict in columns:\n for k in dict:\n colsname.append(k)\n else:\n colsname = cols.split(',')\n\n return colsname\n\n#print(get_colname('emp','staff_table','select * from staff_table'))\n\ndef judge_key_split(current_database,tabname,in_wkey,judge_key):\n \"\"\"\n 处理条件判断分隔符 \n :param current_database:\n :param tabname: \n :param in_wkey: 一组where条件\n :param judge_key: 判断符\n :return: 返回 包含 列名,判断条件,值 在内的一个列表 \n \"\"\"\n wkey = []\n wkey_list = in_wkey.split(judge_key)\n wkey_colname = wkey_list[0].replace(' ','')\n colname_check_result = dbfile_handle.check_colname(current_database,tabname,wkey_colname)\n if colname_check_result[0]:\n wkey.append(wkey_colname)\n wkey.append(judge_key)\n if colname_check_result[1] == 'int':\n wkey_value = int(wkey_list[1].replace(' ', '').replace('\\'', '').replace('\\\"', ''))\n wkey.append(wkey_value)\n else:\n wkey_value = str(wkey_list[1].replace(' ', '').replace('\\'', '').replace('\\\"', ''))\n wkey.append(wkey_value)\n return wkey\n else:\n print('列不存在',in_wkey)\n return False\n\ndef get_where_key(current_database,tabname,command):\n \"\"\"\n 获取where条件 \n :param current_database: \n :param tabname: \n :param command: \n :return: 返回包含 所有where条件的列表 eg:[[x],[x],...]\n \"\"\"\n where_key = []\n command_list = command.split('where')\n where_list = command_list[1].replace(' ','').split('and')\n for i in where_list:\n if '=' in i and '>' not in i and '<' not in i:\n if judge_key_split(current_database,tabname,i,'='):\n where_key.append(judge_key_split(current_database,tabname,i,'='))\n else:\n break\n elif '>' in i and '=' not in i and '<' not in i:\n if judge_key_split(current_database,tabname,i,'>'):\n where_key.append(judge_key_split(current_database,tabname,i,'>'))\n else:\n break\n elif '<' in i and '=' not in i and '>' not in i:\n if judge_key_split(current_database,tabname,i,'<'):\n where_key.append(judge_key_split(current_database,tabname,i,'<'))\n else:\n break\n elif '<>' in i and '=' not in i :\n if judge_key_split(current_database,tabname,i,'<>'):\n where_key.append(judge_key_split(current_database,tabname,i,'<>'))\n else:\n break\n elif '<=' in i :\n if judge_key_split(current_database,tabname,i,'<='):\n where_key.append(judge_key_split(current_database,tabname,i,'<='))\n else:\n break\n elif '>=' in i :\n if judge_key_split(current_database,tabname,i,'>='):\n where_key.append(judge_key_split(current_database,tabname,i,'>='))\n else:\n break\n elif 'like' in i:\n if judge_key_split(current_database,tabname,i,'like'):\n where_key.append(judge_key_split(current_database,tabname,i,'like'))\n else:\n break\n else:\n print('where 条件输入错误!')\n break\n else:\n return where_key\n\n\ndef _update_set_key_handle(command):\n set_value = []\n set_value.append(command.split('=')[0].rstrip().split(' ')[-1])\n set_value.append(command.split('=')[1].lstrip().split(' ')[0])\n return set_value\n\n# a= \"select * from staff_table where staff_id = 1 and phone like '133'\"\n# print (get_where_key('emp','staff_table',a))","sub_path":"level2/ATM/ext/DB/core/key_handle.py","file_name":"key_handle.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525222631","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\nimport os\nimport pwd\nimport time\nimport sys\nimport json\nimport ssl\nimport subprocess\nfrom shlex import quote\nfrom optparse import OptionParser\nfrom LPCScriptsConfig import *\nfrom FERRYTools import *\nfrom EOSTools import *\nfrom EmailTools import *\n\n\n\n\"\"\"\nThe meat of the thing\n\"\"\"\n\ndef main(argv):\n\n\n \"\"\"\n Options\n \"\"\"\n usage = \"Usage: %prog [options] thing\\n\"\n parser = OptionParser(usage=usage)\n\n parser.add_option(\"-s\", \"--server\", action=\"store\", type=\"string\",\n default=FERRYHOSTURL, dest=\"hosturl\",\n help=\"Server host URL\")\n\n parser.add_option(\"-p\", \"--capath\", action=\"store\", type=\"string\",\n default=CAPATH, dest=\"capath\",\n help=\"CA Path\")\n\n defaultcertloc = \"/tmp/x509up_u\"+str(os.getuid())\n\n parser.add_option(\"-c\", \"--cert\", action=\"store\", type=\"string\",\n default=defaultcertloc, dest=\"cert\",\n help=\"full path to cert\")\n\n parser.add_option(\"-u\", \"--username\", action=\"store\", type=\"string\",\n default=None, dest=\"username\",\n help=\"username to force create (other info must be in FERRY)\")\n\n parser.add_option(\"-d\", \"--debug\", action=\"store_true\", dest=\"debug\",\n default=False,\n help=\"debug output\")\n\n parser.add_option(\"-n\", \"--nothing\", action=\"store_true\", dest=\"donothing\",\n default=False,\n help=\"check only -- don't perform any action\")\n\n adayago = time.time()-(60.0*60.0*24.0)\n # so that we don't go before the BIG RESET of Apr 8 2019\n aweekago = max(1554730693, time.time()-(60.0*60.0*24.0*7.0))\n\n\n\n\n parser.add_option(\"-t\", \"--timesince\", action=\"store\", type=\"int\",\n dest=\"timesince\",default=aweekago,\n help=\"timestamp of earliest quota\")\n\n\n (options,args) = parser.parse_args()\n\n\n \"\"\"\n And here we go...\n \"\"\"\n\n\n\n path, execname = os.path.split(sys.argv[0])\n if len(execname) == 0:\n execname=\"checkRecentUsers.py\"\n\n\n# setting up logging -- end up passing this to *Tools so everybody logs to the same place\n\n logger = logging.getLogger(execname)\n\n logformatter = logging.Formatter('%(levelname)s %(message)s')\n filelogformatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')\n\n\n if not options.debug:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.DEBUG)\n logsh = logging.StreamHandler()\n\n logsh.setFormatter(logformatter)\n logger.addHandler(logsh)\n\n\n if not os.path.exists(LOGDIR):\n logger.debug(\"Log dir %s doesn't exist -- creating!\", LOGDIR)\n os.mkdir(LOGDIR)\n logfilename=execname+\".log\"\n logpath = os.path.join(LOGDIR,logfilename)\n logfh = logging.FileHandler(logpath)\n\n logfh.setFormatter(filelogformatter)\n logger.addHandler(logfh)\n\n logger.info (\"New user checking beginning...\")\n\n logger.debug(\"Parsing Options\")\n\n\n\n if options.debug:\n logger.debug(\"server: %s\", options.hosturl)\n logger.debug(\"capath: %s\", options.capath)\n logger.debug(\"cert: %s\", options.cert)\n logger.debug(\"username: %s\", options.username)\n logger.debug(\"donothing: %s\", options.donothing)\n logger.debug(\"debug: %s\", options.debug)\n logger.debug(\"timesince: %s\", options.timesince)\n\n\n if not os.path.exists(options.cert):\n print(\"cert: \", options.cert,\n \" not found -- proceeding to assume host is in whitelist...\")\n options.cert=None\n\n Ferry=FERRYTools(hosturl=options.hosturl, cert=options.cert, capath=options.capath,\n logobj=logger)\n\n # hacked this in to force account check/creation for a known user that the time based\n # query doesn't pull up. getUserInfo returns an array of users so next line does\n # a minor tweak to make same format as what comes back from getRecentUsers -- then\n # the rest behaves the same:\n\n replyJson=[];\n \n # if options.username is not None:\n \n# normally this will be a list, so tacking the one user onto it\n# replyJson.append(Ferry.getUserInfo(username=options.username, \n# debug=options.debug))\n# replyJson[0]=options.username\n \n\n# else:\n\n replyJson = Ferry.getRecentUsers(timestamp=options.timesince, debug=options.debug)\n\n\n \n\n logger.debug(replyJson)\n\n if len(replyJson) == 0:\n logger.critical(\"Empty reply from FERRY, aborting!\")\n sys.exit(3)\n\n if \"ferry_error\" in replyJson:\n # means something is wrong, so we don't want to mess with the existing gridmap\n # without some human eyes somewhere.\n logger.critical(\"FERRY came back with an error, aborting!\")\n logger.critical(\"Returned json:\")\n logger.critical(replyJson)\n sys.exit(2)\n\n\n userlist = []\n useruidlist = []\n userfullnamemap = {}\n\n for user in replyJson:\n \n# if we've set the username option, we skip everybody except the username in question.\n \n logger.debug(\"anybody: %s\", user)\n \n if (options.username is not None):\n if (user['username']==options.username):\n pass\n else:\n logger.debug(\"skipping: %s\", user)\n continue\n \n \n if Ferry.isInCMS(username=user['username'], debug=options.debug):\n logger.info(\"New cms user: \" + str(user['username']) + \" \" +\n str(user['uid']) + \" \" + str(user['gecos'])) # + \" \" \n# str(user['expirationdate'])) # not in passwd file \n userfullnamemap[user['username']]=user['gecos']\n\n try:\n pwd.getpwnam(user['username'])\n userlist.append(user['username'])\n useruidlist.append(user['uid'])\n except KeyError:\n logger.info(\"User %s does not exist on the system yet, skipping for now\" %\n user['username'])\n\n\n\n\n\n logger.info(\"Walking through new users:\")\n\n# Here we go through the latest users and start checking that the relevant physical\n# bits and pieces are where they need to be.\n\n# get ready to do stuff with EOS if necessary\n\n eos = EOSTools(mgmnode=EOSMGMHOST, logobj=logger, debug=options.debug)\n\n\n# also set up mail bits\n\n email = EmailTools(logobj=logger, debug=options.debug)\n\n\n for user in userlist:\n\n replyJson = {}\n\n replyJson = Ferry.getUserShellandHomedir(user, options.debug)\n homedir = replyJson['homedir']\n\n if os.path.exists(homedir):\n\n logger.info(\"Homedir: %s exists\", homedir)\n\n else:\n \n logger.info(\"Homedir: %s DOES NOT EXIST\", homedir)\n \n sanitizedusername=quote(user)\n\n logger.debug(\"Sanitizing username %s into: %s\" % (user,sanitizedusername))\n \n j = scriptexec(command=[\"ssh\",\"cmseosmgm01.fnal.gov\",\"id\", sanitizedusername], debug=options.debug,\n logobj=logger)\n \n j = scriptexec(command=[\"ssh\",\"cmsnfs2.fnal.gov\", \"id\", sanitizedusername], debug=options.debug,\n logobj=logger)\n \n \n\n if not options.donothing:\n\n# sanitizedusername=quote(user)\n\n# logger.debug(\"Sanitizing username %s into: %s\" % (user,sanitizedusername))\n\n# We're not going to rely on discovering these things from FERRY just yet\n# First make the /uscms/homes guy, then make the link.\n\n# Historically:\n\n# mkdir /uscms/homes/u/username\n# ln -s /uscms/homes/u/username /uscms/home/username\n# mkdir -p /uscms/homes/u/username/work\n# mkdir -p /uscms/homes/u/username/private\n# mkdir -p /uscms/homes/u/username/.globus\n#\n# mkdir -p /uscms/data/d3/username\n# ln -s /uscms_data/d3/user /uscms_data/d1/username\n# ln -s /uscms_data/d1/user /uscms/homes/u/username/nobackup\n\n# chown -R user.us_cms /uscms/homes/u/username\n# chown -R user.us_cms /uscms_data/d3/username\n# chown -R user.us_cms /uscms_data/d1/username\n\n# chmod 700 /uscms/homes/u/username/.globus\n# chmod 755 /uscms/homes/u/username\n# chmod 755 /uscms/homes/u/username/work\n# chmod 700 /uscms/homes/u/username/private\n# chmod 755 /uscms_data/d3/username\n\n\n\n\n# EOS things:\n\n\n# /usr/bin/eos -b mkdir /eos/uscms/store/user/username\n# /usr/bin/eos -b chown username:us_cms /eos/uscms/store/user/username\n# ls -ald /eos/uscms/store/user/username\n# /usr/bin/eos -b quota set -u username -v 4TB -i 500000 /eos/uscms/store/user/\n\n\n\n usernamefirstchar = sanitizedusername[0]\n realhomedir = \"/uscms/homes/\" + usernamefirstchar + \"/\"\n realhomedir = realhomedir + sanitizedusername\n j = scriptexec(command=[\"mkdir\", realhomedir], debug=options.debug,\n logobj=logger)\n\n oldhomedir = \"/uscms/home/\" + sanitizedusername\n j = scriptexec(command=[\"ln\", \"-s\", realhomedir, oldhomedir],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"mkdir\", \"-p\", realhomedir+\"/work\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"mkdir\", \"-p\", realhomedir+\"/private\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"mkdir\", \"-p\", realhomedir+\"/.globus\"],\n debug=options.debug, logobj=logger)\n\n# NFS stuff\n\n nfsdir = \"/uscms_data/d3/\" + sanitizedusername\n\n linknfsdir = \"/uscms_data/d1/\" + sanitizedusername\n\n j = scriptexec(command=[\"mkdir\", \"-p\", nfsdir],\n debug=options.debug, logobj=logger)\n\n# this is hardwired at the moment\n quotastring = \"limit -u bsoft=100g bhard=120g \" + sanitizedusername\n\n j = scriptexec (command=[\"xfs_quota\", \"-x\", \"-c\", '\"'+quotastring+'\"',\n \"/uscms_data/d3\"],\n debug=options.debug, logobj=logger)\n\n\n j = scriptexec(command=[\"ln\", \"-s\", nfsdir, linknfsdir],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"ln\", \"-s\", linknfsdir, realhomedir+\"/nobackup\"],\n debug=options.debug, logobj=logger)\n\n# setting permissions\n\n j = scriptexec(command=[\"chown\", \"-R\", sanitizedusername+\".us_cms\",\n realhomedir],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chown\", \"-R\", sanitizedusername+\".us_cms\",\n nfsdir],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chown\", \"-R\", sanitizedusername+\".us_cms\",\n linknfsdir],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chmod\", \"755\", realhomedir],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chmod\", \"700\", realhomedir+\"/.globus\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chmod\", \"755\", realhomedir+\"/work\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chmod\", \"700\", realhomedir+\"/private\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chmod\", \"755\", nfsdir],\n debug=options.debug, logobj=logger)\n\n# skeleton .bash_profile also committed along with this code -- copy to homedir\n\n j = scriptexec(command=[\"cp\", \"./proto_bash_profile\",\n realhomedir+\"/.bash_profile\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chown\", sanitizedusername+\".us_cms\",\n realhomedir+\"/.bash_profile\"],\n debug=options.debug, logobj=logger)\n\n j = scriptexec(command=[\"chmod\", \"644\",\n realhomedir+\"/.bash_profile\"],\n debug=options.debug, logobj=logger)\n\n\n# then EOS -- relies being able to log into the MGM\n\n rawoutput=\"\"\n\n eosdir = \"/eos/uscms/store/user/\" + sanitizedusername\n eosexecstring = \"mkdir \" + eosdir\n logger.debug(eosexecstring)\n rawoutput = eos.mgmexec(execstring=eosexecstring, debug=options.debug)\n logger.info (\"EOS returns: %s\" % rawoutput)\n\n eosexecstring = \"chown \" + sanitizedusername + \":us_cms \" + eosdir\n logger.debug(eosexecstring)\n rawoutput = eos.mgmexec(execstring=eosexecstring, debug=options.debug)\n logger.info (\"EOS returns: %s\" % rawoutput)\n\n eosexecstring = \"quota set -u \" + sanitizedusername + \" -v 4TB -i 500000 \"\n eosexecstring = eosexecstring + \"/eos/uscms/store/user\"\n logger.debug(eosexecstring)\n rawoutput = eos.mgmexec(execstring=eosexecstring, debug=options.debug)\n logger.info (\"EOS returns: %s\" % rawoutput)\n\n\n# done with the physical stuff, finally do the email bits\n\n j = email.userAccountMadeMail(user=sanitizedusername, BCC=True)\n\n fullname=''\n if user in userfullnamemap:\n fullname=userfullnamemap[user]\n j = email.addToUAFList(user=user, userfullname=fullname)\n\n\n else:\n\n\n logger.info (\"donothing option set -- not taking any action...\")\n\n\n\n\n\n\n\n# feeds subprocess and logs results\n\ndef scriptexec(command = [], debug=False, logobj=None):\n\n if logobj is not None and isinstance(logobj,logging.getLoggerClass()):\n logger=logobj\n else:\n print (\"scriptexec called without logobj -- skipping doing:\")\n print (\"%s\" % command)\n return 1\n\n# sticking this in here for debugging\n# command = [\"echo\"] + command\n\n logger.debug(\"Command Array: %s\" % command)\n\n commandstring = \"\"\n\n for a in command:\n commandstring = commandstring + a + \" \"\n\n logger.info (\"Executing: %s\" % commandstring)\n\n output = \"\"\n try:\n output = subprocess.check_output(command, stderr=subprocess.STDOUT)\n logger.info(\"Output: %s\" % str(output))\n except subprocess.CalledProcessError as e:\n logger.info(\"Exec Error: %s\" % e.output)\n return 2\n except Exception as e:\n logger.info(\"Other error: %s\" % e)\n return 3\n\n\n return 0\n\n\n\n\nif __name__ == '__main__':\n\n# \"\"\"\n# Lets get this out of the way in the beginning. Don't want to screw with the urllib changes\n# pre python2.7, so we just go for python3 off the bat.\n# \"\"\"\n# if (sys.version_info < (3, 0)):\n# print (\"run as: python3 detailsCMSUser.py ...\")\n# sys.exit()\n\n\n main(sys.argv[1:])\n","sub_path":"checkRecentUsers.py","file_name":"checkRecentUsers.py","file_ext":"py","file_size_in_byte":15840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622285159","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name,missing-docstring\n\nfrom test.common import QiskitSympyTestCase\n\nimport unittest\n\nfrom sympy import sqrt\n\nfrom qiskit import (load_qasm_file, execute, QuantumRegister,\n ClassicalRegister, QuantumCircuit, wrapper)\nfrom qiskit_addon_sympy import SympyProvider\n\n\nclass SympyStatevectorSimulatorTest(QiskitSympyTestCase):\n \"\"\"Test local statevector simulator.\"\"\"\n\n def setUp(self):\n\n self.qasm_filename = self._get_resource_path('simple.qasm')\n self.q_circuit = load_qasm_file(self.qasm_filename)\n\n def test_sympy_statevector_simulator(self):\n \"\"\"Test final state vector for single circuit run.\"\"\"\n SyQ = SympyProvider()\n backend = SyQ.get_backend('statevector_simulator')\n\n result = execute(self.q_circuit, backend).result()\n actual = result.get_statevector(self.q_circuit)\n\n self.assertEqual(result.get_status(), 'COMPLETED')\n self.assertEqual(actual[0], sqrt(2)/2)\n self.assertEqual(actual[1], 0)\n self.assertEqual(actual[2], 0)\n self.assertEqual(actual[3], sqrt(2)/2)\n\n\nclass TestQobj(QiskitSympyTestCase):\n \"\"\"Check the objects compiled for this backend create names properly\"\"\"\n\n def setUp(self):\n\n qr = QuantumRegister(2, name=\"qr2\")\n cr = ClassicalRegister(2, name=None)\n qc = QuantumCircuit(qr, cr, name=\"qc10\")\n qc.h(qr[0])\n qc.measure(qr[0], cr[0])\n self.qr_name = qr.name\n self.cr_name = cr.name\n self.circuits = [qc]\n\n def test_qobj_sympy_statevector_simulator(self):\n SyQ = SympyProvider()\n backend = SyQ.get_backend('statevector_simulator')\n qobj = wrapper.compile(self.circuits, backend)\n cc = qobj.experiments[0].as_dict()\n ccq = qobj.experiments[0].header.compiled_circuit_qasm\n self.assertIn(self.qr_name, map(lambda x: x[0], cc['header']['qubit_labels']))\n self.assertIn(self.qr_name, ccq)\n self.assertIn(self.cr_name, map(lambda x: x[0], cc['header']['clbit_labels']))\n self.assertIn(self.cr_name, ccq)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_sympy_statevector_simulator.py","file_name":"test_sympy_statevector_simulator.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"468863327","text":"'''..................................................................................................\nAuthor: Diego Inácio\nGitHub: github.com/diegoinacio\n Date: AUG/28/2018\n.....................................................................................................\nScript do tipo \"Drop Window\" para para scatter de instancias sobre uma superficie.\n- Suporte para probabilidade e controle de ID por meio de pesos. (node Python)\n- Numero instancias devem ser maior ou igual a 2. Superficie deve ser apenas uma.\n- Variacoes de escala, rotacao e etc. (node Random)\n- Controle de densidade e escala. (node Strength)\n..................................................................................................'''\nimport MASH.api as mapi\nimport flux.core as fx\nimport maya.cmds as cmds\n\nSCRIPT = '''# script para randomizacao por meio de probabilidade\nimport openMASH\nimport random\n\n# inicializacao do sistema MASH\nmd = openMASH.MASHData(thisNode)\ncount = md.count()\n\n# recebe valores dos atributos do node python\ncountID = cmds.getAttr(thisNode + '.countID')\nseed = cmds.getAttr(thisNode + '.seed')\n\n##############\n# parametros #\n##############\nrandom.seed(seed)\n# numero de ids\nids = countID\n# lista de pesos\np = [1]*ids\n\n# set de probabilidade\n{0}\n# calculo de probabilidade\np = [float(e)/sum(p) for e in p]\np = [int(e*count + 0.5) for e in p]\nif sum(p) != count:\n\tdiff = count - sum(p)\n\targmax = p.index(max(p))\n\tp[argmax] += diff\nID = sum([[i]*p[i] for i in range(ids)], [])\nrandom.shuffle(ID)\n\nfor i in range(count):\n\tmd.outId[i] = ID[i]\n\nmd.setData()\n'''\n\ndef runPreset():\n\tcmds.select(clear=True)\n\tcmds.promptDialog(message='Nome do sistema MASH:')\n\tmashName = cmds.promptDialog(query=True, text=True)\n\tif not mashName:\n\t\tmashName = '#'\n\telse:\n\t\tmashName = '_' + mashName\n\t############################################################################################\n\tsteps = [\t'Selecione as instancias | n >= 2 (arraste com o botao do meio)',\n\t\t\t\t'Selecione a superficie | n = 1 (arraste com o botao do meio)']\n\taccepts = [['mesh'], ['mesh']]\n\t############################################################################################\n\tfx.DropWindow.getDrop(\tsteps,\n\t\t\t\t\t\t\tlambda data: smartPreset.send(data),\n\t\t\t\t\t\t\taccepts=accepts)\n\tnode = yield\n\tnodes = node.split('\\n')\n\t############################################################################################\n\tcmds.promptDialog(message='Numero de pontos:')\n\tpointCount = cmds.promptDialog(query=True, text=True)\n\ttry:\n\t\tpointCount = int(pointCount)\n\texcept:\n\t\tpointCount = 10\n\t\n\tmash = mapi.Network()\n\tmash.createNetwork(name='MASH' + mashName, geometry='Instancer')\n\n\tnode = yield\n\tnode = node.split('\\n')[0]\n\n\tmash.meshDistribute(node)\n\t############################################################################################\n\tdistNodeName = mash.waiter + '_Distribute'\n\tcmds.setAttr(distNodeName + '.pointCount', pointCount)\n\n\tidNodeName = mash.waiter + '_ID'\n\tcmds.setAttr(idNodeName + '.idtype', 2)\n\tcount = cmds.getAttr(idNodeName + '.numObjects')\n\n\tpyNode = mash.addNode('MASH_Python')\n\tcmds.addAttr(pyNode.name, longName='countID', attributeType='long', defaultValue=count)\n\texpr = '{0}.countID = {1}.numObjects'.format(pyNode.name, idNodeName)\n\tcmds.expression(s=expr)\n\tcmds.addAttr(pyNode.name, longName='seed', attributeType='long', defaultValue=1234)\n\tscriptID = ''.join(['p[{0}] = 1 # {1} prob.\\n'.format(i, e) for i, e in enumerate(nodes)])\n\tcmds.setAttr(pyNode.name + '.pyScript', SCRIPT.format(scriptID), type='string')\n\n\trandNode = mash.addNode('MASH_Random')\n\tcmds.setAttr(randNode.name + '.positionX', 0)\n\tcmds.setAttr(randNode.name + '.positionY', 0)\n\tcmds.setAttr(randNode.name + '.positionZ', 0)\n\tcmds.setAttr(randNode.name + '.rotationY', 180)\n\tcmds.setAttr(randNode.name + '.scaleX', 1)\n\tcmds.setAttr(randNode.name + '.uniformRandom', 1)\n\n\tstrengthNode = mash.addNode('MASH_Strength')\n\n\tyield\n\nsmartPreset = runPreset()\nsmartPreset.next()","sub_path":"Maya/MASH-Scripts/MASH_probScatter.py","file_name":"MASH_probScatter.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"188349898","text":"# -*- coding: utf-8 -*-\n\n# Visigoth: A lightweight Python3 library for rendering data visualizations in SVG\n# Copyright (C) 2020 Niall McCarroll\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software\n# and associated documentation files (the \"Software\"), to deal in the Software without \n# restriction, including without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or \n# substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport html\n\nfrom visigoth.svg import foreign_object\nfrom visigoth.common import DiagramElement, EmbeddedHtml\n\nstart = \"\"\"
%(title)s\"\"\"\n\ncontrol_template = \"\"\"\n \n \n \n \n\"\"\"\n\njs_template = \"\"\"\ndocument.getElementById(\"%(control_id)s\").checked = %(visible)s;\nvar cb = function(evt) {\n var control_id = \"%(control_id)s\";\n var control_ele = document.getElementById(control_id);\n var range_ele = document.getElementById(control_id+\"_range\");\n var opacity = Number.parseFloat(range_ele.value);\n if (!control_ele.checked) {\n var payload = {\"layer\":\"%(layer_id)s\",\"value\":false};\n } else {\n var payload = {\"layer\":\"%(layer_id)s\",\"value\":opacity};\n }\n pubsubs_publish(\"%(id)s\",payload,\"manage_layers\");\n};\ndocument.getElementById(\"%(control_id)s\").onclick = cb;\ndocument.getElementById(\"%(control_id)s_range\").onchange = cb;\ndocument.getElementById(\"%(control_id)s_range\").oninput = cb;\n\"\"\"\n\nend = \"\"\"
\n \n \n \n \n
\"\"\"\n\ncss = \"\"\"\nfieldset {\n border-width: 2px;\n border-radius: 5px;\n}\"\"\"\n\nclass MapLayerManager(EmbeddedHtml):\n \"\"\"\n Create an embedded HTML map layer manager\n\n Arguments:\n layers(list): list of dicts containg \"label\" and \"id\" keys\n\n Keyword Arguments:\n title: a title to display above the layer controls\n width(int): width of the embedded HTML\n height(int): height of the embedded HTML\n \"\"\"\n\n def __init__(self,map_layers,title=\"Layer Controls\",width=512,height=512):\n EmbeddedHtml.__init__(self,\"\",css,width,height)\n html_content = start%({\"id\":self.getId(),\"title\":html.escape(title,True)})\n js_content = \"\"\n for map_layer in map_layers:\n control_label = map_layer[\"label\"]\n layer_obj = map_layer[\"layer\"]\n layer_id = layer_obj.getId()\n visible = \"false\"\n if layer_obj.getVisible():\n visible = \"true\"\n control_id = DiagramElement.getNextId()\n html_content += control_template%({\n \"id\":self.getId(),\n \"control_label\":control_label,\n \"control_id\":control_id,\n \"visible\":visible,\n \"layer_id\":layer_id})\n js_content += js_template%({\n \"id\":self.getId(),\n \"control_label\":control_label,\n \"control_id\":control_id,\n \"visible\":visible,\n \"layer_id\":layer_id})\n\n html_content += end\n self.setHtml(html_content)\n self.setJs(js_content)\n","sub_path":"visigoth/common/map_layer_manager/map_layer_manager.py","file_name":"map_layer_manager.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"388952012","text":"from latex import *\nfrom bits import *\n\ndef binary_to_unsigned_decimal_general(doc=vdoc):\n doc.line(r\"Given a binary number as $b_{n}b_{n-1}\\hdots b_{1}b_{0}$,\")\n doc.line(r\"we can convert it to decimal using the sum\")\n doc.line(r\"\\[\")\n doc.line(r\" \\sum_{i=0}^{n} b_{i} \\cdot 2^{i}\")\n doc.line(r\"\\]\")\n\ndef binary_to_unsigned_decimal(bits, doc=vdoc):\n doc.line(r\"Converting $%s$ to decimal, we get\" % str(bits))\n doc.line(r\"\\[\")\n bits_reversed = bits[::-1]\n res = 0\n for i,bit in enumerate(bits_reversed):\n plus = \"+\" if i < len(bits_reversed)-1 else \"\"\n doc.line(r\" %d \\cdot 2^{%d} %s\" % (bit, i, plus))\n res += bit * 2**i\n doc.line(r\" = %d\" % res)\n doc.line(r\"\\]\")\n return res\n\n\ndef add_twos_complement(a, b, doc=vdoc):\n r = a.reverse() + b.reverse()\n return r.reverse()\n\ndef negate_twos_complement(bits, doc=vdoc):\n cs = \"c\" * (len(bits) + 1)\n def bits_table_line(bits):\n return \" & \".join(map(lambda x: \"$%s$\" % x, str(bits)))\n normal = bits_table_line(bits)\n flipped = bits_table_line(bits.flip())\n one = Bits(\"1\").reverse().width(len(bits)).reverse()\n one_line = bits_table_line(one)\n result_reversed = bits.flip().reverse() + Bits(\"1\").reverse()\n result = result_reversed.reverse()\n result_line = bits_table_line(result)\n doc.line(r\"\\begin{table}[H]\")\n doc.line(r\"\\centering\")\n doc.line(r\"\\begin{tabular}{%s}\" % cs)\n doc.line(r\" & %s \\\\\" % normal)\n doc.line(r\"$\\neg$ & %s \\\\\" % flipped)\n doc.line(r\"$+$ & %s \\\\\" % one_line)\n doc.line(r\"\\hline \\\\\")\n doc.line(r\" & %s \\\\\" % result_line)\n doc.line(r\"\\end{tabular}\")\n doc.line(r\"\\end{table}\")\n return result\n\n\ndef binary_to_twos_complement(bits, doc=vdoc):\n bits_reversed = bits[::-1]\n msb = bits_reversed.msb()\n doc.line(r\"To convert a binary number to decimal in two's complement\")\n doc.line(r\"we first look at the MSB. \")\n doc.skip()\n doc.line(r\"In case of our $%s$, the MSB is $%d$.\" % (str(bits), msb))\n if msb == True:\n doc.line(r\"This means that the number is negative.\")\n doc.line(r\"Therefore we convert it to positive by flipping every bit and adding $1$ to it:\")\n doc.skip()\n neg = negate_twos_complement(bits, doc)\n doc.skip()\n dec = binary_to_unsigned_decimal(neg, doc)\n doc.skip()\n doc.line(r\"As we concluded the original number was negative, therefore this must be too.\")\n doc.line(r\"Therefore the final result is: $%d$\" % (-dec))\n else:\n doc.line(r\"This means that the number is positive.\")\n doc.line(r\"Now we simply convert it to decimal as usual:\")\n return binary_to_unsigned_decimal(bits, doc)\n \n\n\ndef binary_to_hexidecimal(bits, doc=vdoc):\n bits_reversed = bits[::-1]\n def hex_(i):\n s = hex(i)\n r = s[2:].upper()\n return r\n chunks = bits_reversed.chunks(4)\n rev_chunks = chunks[::-1]\n doc.line(r\"To convert binary to hexidecimal,\")\n doc.line(r\"we split the number up in chunks of 4: \")\n doc.skip()\n doc.line(r\"%s\" % \" \".join(map(str, rev_chunks)))\n doc.skip()\n doc.line(r\"Now, for every chunk we convert it to decimal,\")\n doc.line(r\"and numbers above 9 will be represented by the letters A-F.\")\n doc.skip()\n result = []\n for c in chunks:\n rc = c.reverse()\n i = int(str(c), 2)\n h = hex_(i)\n doc.line(r\"%s $\\rightarrow$ %d $\\rightarrow$ %s \\\\\" % (str(c), i, h))\n result.append(h)\n rev_result = result[::-1]\n st = \"\".join(rev_result)\n doc.line(r\"Finally, we can write our result as: %s\" % st)\n return st\n\n\ndef decimal_to_binary(d, doc=vdoc):\n res = []\n n = d\n doc.line(r\"To convert decimal to binary,\")\n doc.line(r\"we divide the number by 2,\")\n doc.line(r\"and find the quotient and remainder.\")\n doc.line(r\"Then we divide the quotient with 2 and continue until\")\n doc.line(r\"the result is zero\")\n\n doc.line(r\"\\begin{table}[H]\")\n doc.line(r\"\\centering\")\n doc.line(r\"\\begin{tabular}{cccccc}\")\n doc.line(r\"$q_{old}$ & & & & $q_{new}$ & Bit \\\\\")\n doc.line(r\"\\hline \\\\\")\n while n > 0:\n n2, b = divmod(n, 2)\n doc.line(r\"%d & div & 2 & = & %d & \\\\\" % (n, n2))\n doc.line(r\"%d & mod & 2 & = & & %d \\\\\" % (n, b))\n n = n2\n res.append(bool(b))\n doc.line(r\"\\end{tabular}\")\n doc.line(r\"\\end{table}\")\n\n bits = Bits(res).reverse()\n doc.line(r\"The final result is: %s.\" % str(bits))\n return bits\n\ndef decimal_to_twos_complement(d, doc=vdoc):\n doc.line(r\"When converting a decimal number to twos complement,\")\n doc.line(r\"we first look at the sign.\")\n doc.line(r\"In our case of $%d$,\" % d)\n if d == 0:\n doc.line(r\"we know that the result is simple zero,\")\n doc.line(r\"so we don't need to look at the sign.\")\n elif d > 0:\n doc.line(r\"the number is positive,\")\n doc.line(r\"so we simply convert to binary unsigned:\")\n bits = decimal_to_binary(d, doc)\n doc.line(r\"Then we add a zero as the MSB to indicate that it is positive.\")\n bits.pad_left(1)\n doc.line(r\"The final result is: %s.\" % str(bits))\n else: # d < 0\n doc.line(r\"the number is negative.\")\n doc.line(r\"Therefore we remove the sign and convert it to binary:\")\n pos_bits = decimal_to_binary(-d, doc)\n doc.line(r\"As we know that the result is negative, we negate it:\")\n neg_bits = negate_twos_complement(pos_bits, doc)\n doc.line(r\"Then we add a one as the MSB to indicate that it is negative.\")\n res = neg_bits.pad_left(1, True)\n doc.line(r\"The final result is: %s.\" % str(res))\n return res\n\n# TODO: Implement elementary school multiplication with binary\n# Note: Might not be implemented. Use Booth algorithm.\ndef binary_multiply(a, b, doc=vdoc):\n pass\n\n# Implementation of booth algorithm described as a flow chart\n# on page 39 in slide 2.\ndef binary_multiply_booth(a_, b_, doc=vdoc):\n l = max(len(a_), len(b_))\n a = Bits().width(l) # All zeros\n m = a_.reverse().width(l).reverse() # M <- Multiplicand\n q = b_.reverse().width(l).reverse() # Q <- Multiplier\n q_1 = Bits(\"0\") # Q_-1 <- 0\n count = l # Count <- n\n doc.line(r\"We want to find the result of %s $\\cdot$ %s.\" % (m, q))\n\n doc.line(r\"\\begin{align*}\")\n doc.line(r\"A &\\gets %s \\\\\" % a)\n doc.line(r\"Q &\\gets %s \\\\\" % q)\n doc.line(r\"Q_{-1} &\\gets %s \\\\\" % q)\n doc.line(r\"M &\\gets %s \\\\\" % m)\n doc.line(r\"\\end{align*}\")\n\n doc.line(r\"\\begin{table}[H]\")\n doc.line(r\"\\begin{tabular}{ccccl}\")\n doc.line(r\" $A$ & $Q$ & $Q_{-1}$ & $M$ & Notes \\\\ \\hline \\\\\")\n doc.line(r\" %s & %s & %s & %s & %s \\\\\" % (a, q, q_1, m, \"Initial values\"))\n while count > 0:\n # Q0, Q-1\n qq = (q[-1], q_1[0])\n if qq == (True, False):\n # Q0, Q-1 = 1,0\n # A <- A - M\n a = add_twos_complement(a, negate_twos_complement(m))\n note = \"$1,0 \\Rightarrow A \\gets A - M$ \"\n doc.line(r\" %s & %s & %s & %s & %s \\\\\" % (a, q, q_1, m, note))\n elif qq == (False, True):\n # Q0, Q-1 = 0,1\n # A <- A + M\n a = add_twos_complement(a, m)\n note = \"$0,1 \\Rightarrow A \\gets A + M$ \"\n doc.line(r\" %s & %s & %s & %s & %s \\\\\" % (a, q, q_1, m, note))\n else:\n #Q0, Q-1 = 0,0 or 1,1 - do nothing\n qq_str = \"0,0\" if (False, False) else \"1,1\"\n note = \"$%s$ -- Only shift\" % qq_str\n doc.line(r\" %s & %s & %s & %s & %s \\\\\" % (a, q, q_1, m, note))\n\n # Arithmetic shift right A,Q,Q-1\n aqq = Bits(a.bits + q.bits + q_1.bits)\n aqq.shift_right(arithmetic=True)\n\n (a, q, q_1) = tuple(aqq.chunks(l))\n q_1.width(1)\n\n note = \"Arithmetic shift right\"\n doc.line(r\" %s & %s & %s & %s & %s \\\\\" % (a, q, q_1, m, note))\n\n # Count <- Count - 1\n count -= 1\n doc.line(r\"\\end{tabular}\")\n doc.line(r\"\\end{table}\")\n\n \n aq = Bits(a.bits + q.bits)\n doc.line(r\"The resut is in A,Q: %s\" % (aq))\n return aq\n\n# TODO: Implement elementary school long division with binary\n# Note: Might not be implemented. Use division alg below.\ndef binary_division(a, b, doc=vdoc):\n pass\n\n# Implementation of algorithm described as a flow chart\n# on page 98 in slide 2.\ndef binary_division_alg(a_, b_, doc=vdoc):\n l = max(len(a_), len(b_))\n a = Bits().width(l) # All zeros\n q = a_.reverse().width(l).reverse() # Q <- Dividend\n m = b_.reverse().width(l).reverse() # M <- Divisor\n count = l # Count <- n\n doc.line(r\"We want to find the quotient and remainder of %s / %s.\" % (q, m))\n\n doc.line(r\"\\begin{align*}\")\n doc.line(r\"A &\\gets %s \\\\\" % a)\n doc.line(r\"Q &\\gets %s \\\\\" % q)\n doc.line(r\"M &\\gets %s \\\\\" % m)\n doc.line(r\"\\end{align*}\")\n\n doc.line(r\"\\begin{table}[H]\")\n doc.line(r\"\\begin{tabular}{cccl}\")\n doc.line(r\" $A$ & $Q$ & $M$ & Notes \\\\ \\hline \\\\\")\n doc.line(r\" %s & %s & %s & %s \\\\\" % (a, q, m, \"Initial values\"))\n while count > 0:\n # Combine A,Q, shift left and update A,Q\n aq = Bits(a.bits + q.bits)\n aq.shift_left()\n a, q = tuple(aq.chunks(l))\n\n note = \"Shift left\"\n doc.line(r\" %s & %s & %s & %s \\\\\" % (a, q, m, note))\n\n # A <- A - M\n a = add_twos_complement(a, negate_twos_complement(m))\n note = \"$A \\gets A - M$\"\n doc.line(r\" %s & %s & %s & %s \\\\\" % (a, q, m, note))\n\n # If A < 0\n if a.lsb() == True:\n # Q0 <- 0\n # A <- A + M\n q.bits[-1] = False\n a = add_twos_complement(a, m)\n note = \"$A < 0$, $Q_{0} \\gets 0$, $A \\gets A + M$\"\n doc.line(r\" %s & %s & %s & %s \\\\\" % (a, q, m, note))\n else:\n # Q0 <- 1\n q.bits[-1] = True\n note = \"$A \\geq 0$, $Q_{0} \\gets 1$\"\n doc.line(r\" %s & %s & %s & %s \\\\\" % (a, q, m, note))\n\n # Count <- Count - 1\n count -= 1\n doc.line(r\"\\end{tabular}\")\n doc.line(r\"\\end{table}\")\n\n doc.line(r\"Quotient in Q: %s\" % (q))\n doc.skip()\n doc.line(r\"Remainder in A: %s\" % (a))\n return (q, a)\n\n\n\ndef float_to_binary(f, doc=vdoc):\n pass\n\ndef binary_to_float(bits, doc=vdoc):\n pass\n\ndef float_decimal_addition(a, b, doc=vdoc):\n pass\n\ndef float_decimal_multiplication(a, b, doc=vdoc):\n pass\n","sub_path":"scripts/binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":10510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"117137299","text":"\n\n\n\nimport libtcodpy as libtcod\n\nfrom entity import Entity\nfrom fov_functions import initialize_fov, recompute_fov\nfrom input_handlers import handle_keys\nfrom map_objects.game_map import GameMap\nfrom render_functions import clear_all, render_all\n\ndef load_customfont():\n #The index of the first custom tile in the file\n a = 256\n\n #The \"y\" is the row index, here we load the sixth row in the font file. Increase the \"6\" to load any new rows from the file\n for y in range(5,6):\n libtcod.console_map_ascii_codes_to_font(a, 32, 0, y)\n a += 32\n\n\ndef main():\n screen_width = 40\n screen_height = 30\n\n # Size of the map\n map_width = 40\n map_height = 30\n\n # Some variables for the rooms in the map\n room_max_size = 10\n room_min_size = 6\n max_rooms = 30\n\n fov_algorithm = 0\n fov_light_walls = True\n fov_radius = 10\n\n colors = {\n 'dark_wall': libtcod.Color(0, 0, 100),\n 'dark_ground': libtcod.Color(50, 50, 150),\n 'light_wall': libtcod.Color(130, 110, 50),\n 'light_ground': libtcod.Color(200, 180, 50)\n }\n\n libtcod.console_init_root(screen_width, screen_height, 'python/libtcod tutorial', False, libtcod.RENDERER_SDL)\n\n # libtcod.console_set_custom_font('arial8x8.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)\n libtcod.console_set_custom_font('TiledFontMed.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD, 32, 10)\n # libtcod.console_set_custom_font('TiledFontTiny.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD, 32, 10)\n load_customfont()\n\t\t\n wall_tile = 256 \n floor_tile = 257\n player_tile = 258\n orc_tile = 259\n troll_tile = 260\n scroll_tile = 261\n healingpotion_tile = 262\n sword_tile = 263\n shield_tile = 264\n stairsdown_tile = 265\n dagger_tile = 266\n nullbot_tile = 267\n \n player = Entity(int(screen_width / 2), int(screen_height / 2), '@', libtcod.white)\n npc = Entity(int(screen_width / 2 - 4), int(screen_height / 2), '@', libtcod.yellow)\n entities = [npc, player]\n\n \n\n\n\n con = libtcod.console_new(screen_width, screen_height)\n\n game_map = GameMap(map_width, map_height)\n game_map.make_map(max_rooms, room_min_size, room_max_size, map_width, map_height, player)\n\n fov_recompute = True\n\n fov_map = initialize_fov(game_map)\n\n key = libtcod.Key()\n mouse = libtcod.Mouse()\n\n while not libtcod.console_is_window_closed():\n libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS, key, mouse)\n\n if fov_recompute:\n recompute_fov(fov_map, player.x, player.y, fov_radius, fov_light_walls, fov_algorithm)\n\n render_all(con, entities, game_map, fov_map, fov_recompute, screen_width, screen_height, colors)\n\n fov_recompute = False\n\n libtcod.console_flush()\n\n clear_all(con, entities)\n\n action = handle_keys(key)\n\n move = action.get('move')\n exit = action.get('exit')\n fullscreen = action.get('fullscreen')\n\n if move:\n dx, dy = move\n\n if not game_map.is_blocked(player.x + dx, player.y + dy):\n player.move(dx, dy)\n\n fov_recompute = True\n\n if exit:\n return True\n\n if fullscreen:\n libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())\n\n\nif __name__ == '__main__':\n main()","sub_path":"engine_safe.py","file_name":"engine_safe.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"415612550","text":"import json\nimport STORM_Samurai as sam\n\n# CONVERTING JSON TO PYTHON:\n# some JSON:\nx = '{ \"filename\": \"/Users/RajSeehra/University/Masters/Semester 2/Teaching_python-master/Week 2/Data/640.tif\", \"filter\":\"kernel\"}'\n\n# parse x:\ny = json.loads(x)\n\n# the result is a Python dictionary:\nprint(y[\"filename\"])\n\n# CONVERTING JSON TO PYTHON:\n\n\n# CONVERTING PYTHON TO JSON:\n# a Python object (dict):\nx = {\n \"filename\": \"/Users/RajSeehra/University/Masters/Semester 2/Teaching_python-master/Week 2/Data/640.tif\",\n \"filter\": \"kernel\",\n}\n\n# convert into JSON:\ny = json.dumps(x)\n\n# the result is a JSON string:\nprint(y)\n\n# Saving with json.\nwith open(\"data_file.json\", \"w\") as write_file:\n json.dump(x, write_file)\n# CONVERTING PYTHON TO JSON:\n\n\nwith open(\"data_file.json\", \"r\") as read_file:\n data = json.load(read_file)\n\nprint(data[\"filename\"])\n\nx = sam.filter_switcher(data[\"filename\"], data[\"filter\"])\nprint(x)","sub_path":"Return1/Raj/STORM/Settings_file.py","file_name":"Settings_file.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"502172383","text":"from torch.utils.data.dataset import Dataset\nfrom config import config\nfrom torch.utils.data import Dataset\nimport pandas as pd\n\nclass NewsSummaryDataset(Dataset):\n def __init__(self, \n data : pd.DataFrame,\n tokenizer,\n text_max_token_len : int = config.text_token_max_length,\n summary_max_token_len : int = config.summary_token_max_length):\n \n self.tokenizer = tokenizer\n self.data = data\n self.text_max_token_len = text_max_token_len\n self.summary_max_token_len = summary_max_token_len\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index : int):\n data_row = self.data.iloc[index]\n # Encode text\n text = data_row['text']\n text_encoding = self.tokenizer(text,\n max_length = self.text_max_token_len,\n padding = \"max_length\",\n truncation = True,\n return_attention_mask = True,\n add_special_tokens = True,\n return_tensors = \"pt\")\n\n \n \n # Encode summary\n summary = data_row['summary']\n summary_encoding = self.tokenizer(summary,\n max_length = self.summary_max_token_len,\n padding = \"max_length\",\n truncation = True,\n return_attention_mask = True,\n add_special_tokens = True,\n return_tensors = \"pt\")\n \n # Replace 0's with -100 to let the transformer understand\n labels = summary_encoding['input_ids']\n labels[labels == 0] = -100\n\n return dict(\n text = text,\n summary = summary,\n text_input_ids = text_encoding['input_ids'].flatten(),\n text_attention_mask = text_encoding['attention_mask'].flatten(),\n labels = labels.flatten(),\n labels_attention_mask = summary_encoding['attention_mask'].flatten()\n )","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505392096","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport webtest\n\nimport pecan.testing\n\nfrom keystone.openstack.common import jsonutils\nfrom keystone.tests.contrib.kds import base\n\n\ndef urljoin(*args):\n return \"/%s/\" % \"/\".join([a.strip(\"/\") for a in args])\n\n\ndef method_func(method):\n def func(self, url, **kwargs):\n kwargs['method'] = method\n return self.request(url, **kwargs)\n\n return func\n\n\nclass BaseTestCase(base.BaseTestCase):\n\n METHODS = {'get': webtest.TestApp.get,\n 'post': webtest.TestApp.post,\n 'put': webtest.TestApp.put,\n 'patch': webtest.TestApp.patch,\n 'delete': webtest.TestApp.delete,\n 'options': webtest.TestApp.options,\n 'head': webtest.TestApp.head}\n\n def setUp(self):\n super(BaseTestCase, self).setUp()\n root = 'keystone.contrib.kds.api.root.RootController'\n\n self.app_config = {\n 'app': {\n 'root': root,\n 'modules': ['keystone.contrib.kds.api']\n },\n }\n\n self.app = pecan.testing.load_test_app(self.app_config)\n self.addCleanup(pecan.set_config, {}, overwrite=True)\n\n def request(self, url, method, **kwargs):\n try:\n json = kwargs.pop('json')\n except KeyError:\n pass\n else:\n kwargs['content_type'] = 'application/json'\n kwargs['params'] = jsonutils.dumps(json)\n\n try:\n func = self.METHODS[method.lower()]\n except KeyError:\n self.fail(\"Unsupported HTTP Method: %s\" % method)\n else:\n return func(self.app, url, **kwargs)\n\n get = method_func('get')\n post = method_func('post')\n put = method_func('put')\n delete = method_func('delete')\n options = method_func('options')\n head = method_func('head')\n","sub_path":"keystone/tests/contrib/kds/api/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"219557501","text":"# _*_ coding:utf-8 _*_\nimport sys\n\nfrom Task import TK_papaandroid, TK_TerminalHandle\nfrom common import dataprovide, main_package,stringHelper\nfrom element_locator import papaandroid\nimport imp\nfrom common import script_ultils\nsys.path.append(\"..\")\nimport unittest\nfrom time import sleep\nfrom Task import TK_TerminalHandle\n\n\nclass Case(TK_papaandroid.Task, TK_TerminalHandle.TerminalHandle, unittest.TestCase):\n #“我的”页面验证页面要素跳转\n def test_nologin_my(self):\n \"\"\"“我的”页面验证页面元素跳转\"\"\"''\n self.pop_close()\n self.click_button(papaandroid.b_My)\n self.click_button(papaandroid.b_login)\n self.assertTrue(self.isElement(papaandroid.edit_iphone))\n print(\"点击登陆按钮,成功进入进入登陆页面\")\n self.back()\n self.click_button(papaandroid.b_goods_address)\n self.assertTrue(self.isElement(papaandroid.edit_iphone))\n print(\"点击收货地址,成功进入进入登陆页面\")\n self.back()\n self.click_button(papaandroid.b_credit_recycling)\n # self.assertTrue(self.isElement(papaandroid.edit_iphone))\n print(\"点击信用回收,成功进入进入登陆页面\")\n self.back()\n self.click_button(papaandroid.b_about)\n self.assertTrue(self.isElement(papaandroid.tx_assertAbout))\n print(\"点击关于我们,成功进入关于我们页面\")\n\n\n\n\n\nif __name__ == '__main__':\n main_package.main_package(Case(\"test_nologin_my\"),\"/Users/xygjzgs/selenium/\")\n","sub_path":"UIAutoTestCase_papa-master-318dedd5f9b3bd112147717b3e654d000e216775/UIAutoTestCase_papa-master-318dedd5f9b3bd112147717b3e654d000e216775/TestCase/TC_ppandroid2_nologin_my.py","file_name":"TC_ppandroid2_nologin_my.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"456223717","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import beta\n###########################\n##### plot functions #####\n###########################\ndef plot(x,y,y1):\n plot_dist(x,y,y1)\n plt.show()\n plt.close()\ndef plot_dist(x,y,y1):\n plt.plot(x,y,'k.')\n plt.plot(x,y1)\n###################################################\n##### functions for random data distribution #####\n###################################################\ndef create_data(l,n,typ='lin'):\n ''' Plot data in range 0 to l with n random distributed data points. Chose between\n lin, sqrt for typ of data. Default is linear'''\n if typ=='lin':\n x,y1,y2=c_sqrt(l,n)\n elif typ=='sqrt':\n x,y1,y2=c_lin(l,n)\n plt.plot(x,y1)\n plt.plot(x,y2,'k.')\n plt.show()\ndef c_line(d,n):\n '''Create linear distributed data for fit function '''\n x=np.linspace(0.1,d,n).reshape(n,1)\n y1=2*x+2\n return x,y1,np.random.normal(y1)\ndef c_sqrt(d,n):\n '''Create sqrt distributed data for fit function '''\n x=np.linspace(0.1,d,n).reshape(n,1)\n y1=x**2+0.5*x-5\n return x,y1,np.random.normal(y1,scale=13)\ndef c_sin(d,n):\n '''Create poly distributed data for fit function '''\n x=np.linspace(0.1,d,n).reshape(n,1)\n y1=np.sin(2*x)\n return x,y1,np.random.normal(y1,scale=0.35)\ndef c_beta(d,n,dis=0.05):\n '''Create cum-beta distributed data for fit function '''\n x=np.linspace(0.1,d,n).reshape(n,1)\n b=beta(4,3,scale=d)\n y1=b.cdf(x)\n y2=np.zeros(y1.shape)\n for i in range(y2.shape[0]):\n y2[i]=np.random.normal(y1[i],scale=dis+dis*y1[i])\n y2[np.where(y2<0)]=0\n y2[np.where(y2>1)]=1\n return x,y1,y2\n####################################################################################\n##### Bootstrap and normal function to solve eqaution for data distribution #####\n####################################################################################\ndef normal_f2(X,y):\n '''Normal fuction to slove the given equations. Input is X matrix and y vector. \n b=(X^T*X)^-1*X^T*y '''\n try:\n return np.linalg.inv(X.transpose().dot(X)).dot(X.transpose()).dot(y)\n except:\n return np.zeros(X.shape[1])\ndef normal_f(x,y):\n '''Normal fuction to slove the given equations. Input is X matrix and y vector. \n b=(X^T*X)^-1*X^T*y '''\n X=np.ones((x.shape[0],x.shape[1]+1))\n X[:,1:]=x\n return np.linalg.inv(X.transpose().dot(X)).dot(X.transpose()).dot(y)\ndef boost(x,y,n,dim=3,frac=0.75):\n ''' Bootstrap method for ranomly picking variables '''\n size=int(x.shape[0]*frac)\n length=x.shape[0]\n b=np.zeros((n,dim))\n for i in range(n):\n s=np.random.randint(0,length,length)\n b[i]=normal_f(x[s],y[s]).transpose() \n return b\ndef boost2(x,y,n):\n ''' Bootstrap method for ranomly picking variables '''\n length=x.shape[0]\n b=np.zeros((n,x.shape[1]))\n b=[]\n for i in range(n):\n s=np.random.randint(0,length,length)\n X=x[s]\n Y=y[s]\n try:\n b.append(np.linalg.inv(X.transpose().dot(X)).dot(X.transpose()).dot(Y))\n except:\n continue\n return np.array(b)\n###################################################\n##### liner, and poly regression functions #####\n###################################################\ndef mlinear(d,n):\n '''Plot the linear regression function created of n random distributed points in R2 space '''\n x,y1,y=c_line(d,n)\n b=normal_f(x,y)\n plot_dist(x,y,y1)\n plt.plot(x,b[0]+b[1]*x,'r')\n plt.show()\ndef msqrt(d,n):\n '''Plot the quadratic regression function created of n random distributed points in R2 space '''\n x,y1,y=c_sqrt(d,n)\n X=np.append(x,x**2,axis=1)\n y2=np.zeros(y1.shape[0])\n b0=normal_f(X,y)\n B=boost(X,y,2000)\n plot_dist(x,y,y1)\n for b in B:\n dy=b[0]+b[1]*X[:,0]+b[2]*X[:,1]\n plt.plot(x,dy,'g',alpha=0.01)\n b=B.sum(axis=0)/B.shape[0]\n plt.plot(x,b[0]+b[1]*X[:,0]+b[2]*X[:,1],'k')\n plt.plot(x,b0[0]+b0[1]*X[:,0]+b0[2]*X[:,1],'r')\n plt.show()\n plt.close()\ndef msin(d,n,frac):\n '''Plot the quadratic regression function created of n random distributed points in R2 space '''\n x,y1,y=c_sin(d,n)\n X=np.append(x,x**2,axis=1)\n X=np.append(X,x**3,axis=1)\n X=np.append(X,x**4,axis=1)\n b0=normal_f(X,y)\n B=boost(X,y,20000,dim=5,frac=frac)\n plot_dist(x,y,y1)\n# for b in B:\n# plt.plot(x,b[0]+b[1]*X[:,0]+b[2]*X[:,1]+b[3]*X[:,2]+b[4]*X[:,3],'g',alpha=0.05)\n b=B.sum(axis=0)/B.shape[0]\n plt.plot(x,b[0]+b[1]*X[:,0]+b[2]*X[:,1]+b[3]*X[:,2]+b[4]*X[:,3],'k')\n plt.plot(x,b0[0]+b0[1]*X[:,0]+b0[2]*X[:,1]+b0[3]*X[:,2]+b0[4]*X[:,3],'r')\n plt.show()\n plt.close()\n######################################\n##### piecewice linear functions #####\n######################################\ndef create_y(X,b):\n y=b[0]+b[1]*X[:,1]\n for j in range(2,X.shape[1]):\n y+=b[j]*X[:,j]\n return y\ndef piecewice_lin(d,n):\n '''Plot the linear regression function created of n random distributed points in R2 space '''\n x,y1,y=c_sin(d,n)# y is the data points and y1 is the line\n plot_dist(x,y,y1)\n xs=x.reshape(x.shape[0])\n X=np.ones(n).reshape(n,1)\n for i in range(d):\n X=np.append(X,x-i,axis=1)\n X[np.where(X<0)]=0\n b=normal_f2(X,y)\n plt.plot(x,create_y(X,b),'g')\n B=boost2(X,y,20000)\n b=B.sum(axis=0)/B.shape[0]\n plt.plot(x,create_y(X,b,d),'r')\n plt.show()\ndef piecewice_sqrt(d,n,dis):\n '''Plot the linear regression function created of n random distributed points in R2 space '''\n x,y1,y=c_beta(d,n,dis)# y is the data points and y1 is the line\n plot_dist(x,y,y1)\n xs=x.reshape(x.shape[0])\n X=np.ones(n).reshape(n,1)\n X=np.append(X,x,axis=1)\n X=np.append(X,x**2,axis=1)\n for i in range(d):\n X=np.append(X,(x-i)**3,axis=1)\n X[np.where(X<0)]=0\n b=normal_f2(X,y)\n B=boost2(X,y,2000)\n for b in B:\n plt.plot(x,create_y(X,b),'g',alpha=0.01)\n b=B.sum(axis=0)/B.shape[0]\n plt.plot(x,create_y(X,b),'r')\n plt.axis([0,d,0,1.4])\n plt.show()\nif __name__=='__main__':\n piecewice_sqrt(7,110,0.03)\n# msin(8,1000,0.75)\n\n","sub_path":"boots.py","file_name":"boots.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"427528239","text":"def solve(arr, target):\n if len(arr) == 0 or len(arr[0]) == 0:\n return False\n\n low = 0\n high = len(arr) - 1\n\n while low <= high:\n mid = (low + high) // 2\n\n if arr[mid][0] == target:\n return True\n elif target < arr[mid][0]:\n high = mid - 1\n else:\n if target > arr[mid][-1]:\n low = mid + 1\n else:\n low = mid\n break\n\n if low > high:\n return False\n\n row = arr[low]\n\n low = 0\n high = len(row) - 1\n while low <= high:\n mid = (low + high) // 2\n\n if row[mid] == target:\n return True\n elif target < row[mid]:\n high = mid - 1\n else:\n low = mid + 1\n\n return False\n\n\nA = [\n [1]\n]\nB = 2\nprint(solve(A, B))\n","sub_path":"src/arrays/search-a-2d-matrix.py","file_name":"search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"29461548","text":"#! /usr/bin/env python3\nimport argparse\nimport time\nfrom datetime import datetime\nfrom functools import update_wrapper\nfrom time import sleep\n\nfrom rich import box, print\nfrom rich.align import Align\nfrom rich.bar import Bar\nfrom rich.console import Console, RenderGroup\nfrom rich.layout import Layout\nfrom rich.live import Live\nfrom rich.panel import Panel\nfrom rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn\nfrom rich.progress_bar import ProgressBar\nfrom rich.syntax import Syntax\nfrom rich.table import Table\nfrom rich.text import Text\n\nfrom pfxbrick import *\n\nconsole = Console()\n\n\ndef make_layout() -> Layout:\n \"\"\"Define the layout.\"\"\"\n layout = Layout(name=\"root\")\n layout.split(\n Layout(name=\"header\", size=3),\n Layout(name=\"brightvol\", size=3),\n Layout(name=\"status\", size=4),\n Layout(name=\"filesys\", size=4),\n Layout(name=\"bluetooth\", size=4),\n Layout(name=\"lights\", size=11),\n Layout(name=\"motors\", size=5),\n Layout(name=\"motor_rate\", size=5),\n Layout(name=\"audio\", size=7),\n )\n layout[\"brightvol\"].split_row(\n Layout(name=\"bright\"),\n Layout(name=\"vol\", ratio=1),\n )\n layout[\"status\"].split_row(\n Layout(name=\"status1\"),\n Layout(name=\"status2\"),\n )\n layout[\"audio\"].split_row(\n Layout(name=\"audio_ch\"),\n Layout(name=\"audio_state\"),\n )\n layout[\"audio_state\"].split_column(\n Layout(name=\"audio_peak\"),\n Layout(name=\"audio_idx\"),\n )\n return layout\n\n\nclass Header:\n \"\"\"Display header with clock.\"\"\"\n\n def __init__(self, brick):\n self.brick = brick\n\n def __rich__(self) -> Panel:\n grid = Table.grid(expand=False)\n grid.add_column(justify=\"left\", min_width=22, ratio=1)\n grid.add_column(justify=\"left\", min_width=15, ratio=1)\n grid.add_column(justify=\"left\", min_width=42, ratio=1)\n grid.add_column(justify=\"right\", ratio=1)\n grid.add_row(\n \"[light_slate_blue]%s [bold cyan]%s\"\n % (self.brick.product_id, self.brick.product_desc),\n \"S/N: [yellow]%s\" % (self.brick.serial_no),\n \"[white]Firmware [green]v.%s build %s [white]ICD [green]v.%s\"\n % (self.brick.firmware_ver, self.brick.firmware_build, self.brick.icd_rev),\n \"\",\n \"[bold yellow]%s\" % (self.brick.name),\n # datetime.now().ctime().replace(\":\", \"[blink]:[/]\"),\n )\n return Panel(grid)\n\n\ndef update_status(brick):\n st = brick.state\n panel = Table.grid(padding=0)\n for _ in range(8):\n panel.add_column(ratio=1, min_width=7, justify=\"center\")\n panel.add_row(\"State\", \"Err\", \"sec\", \"msec\", \"Script\", \"Line\", \"L2\")\n es = (\n \"[bold red]0x%02X\" % (brick.error)\n if brick.error > 0\n else \"[green]0x%02X\" % (brick.error)\n )\n ss = (\n \"[magenta]%02X\" % (st.script_state)\n if st.script_state == 0\n else console.status(\"[bold magenta]0x%02X\" % (st.script_state))\n )\n panel.add_row(\n \"0x%02X\" % (brick.status),\n es,\n \"[cyan]%5d\" % (st.slow_count),\n \"[cyan]%5d\" % (st.millisec_count),\n ss,\n \"[magenta]%3d\" % (st.script_line),\n \"%02X\" % (st.status_latch2),\n )\n return Panel(panel)\n\n\ndef update_fs_status(st):\n panel = Table.grid(padding=0)\n for i in range(9):\n width = 7 if i < 6 else 19\n panel.add_column(ratio=1, min_width=width, justify=\"center\")\n panel.add_row(\n \"\", \"State\", \"Flags\", \"Files\", \"Open\", \"Erase\", \"Capacity\", \"Free\", \"Empty\"\n )\n panel.add_row(\n \"[bold white]Filesystem \",\n \"[cyan]0x%02X\" % (st.filesys.task_state),\n \"[cyan]0x%02X\" % (st.filesys.flags),\n \"[magenta]%2d\" % (st.filesys.file_count),\n \"[magenta]%2d\" % (st.filesys.open_files),\n \"[cyan]0x%04X\" % (st.filesys.erase_sector),\n \"%d sec [b]%d kB[/b]\"\n % (st.filesys.sector_capacity, st.filesys.sector_capacity * 4096 / 1000),\n \"%d sec [b]%d kB[/b]\"\n % (st.filesys.free_sectors, st.filesys.free_sectors * 4096 / 1000),\n \"%d sec [b]%d kB[/b]\"\n % (st.filesys.empty_sectors, st.filesys.empty_sectors * 4096 / 1000),\n )\n return Panel(panel)\n\n\ndef update_bt_status(st):\n panel = Table.grid(padding=0)\n for i in range(9):\n justify = \"center\" if i < 7 else \"right\"\n panel.add_column(ratio=1, min_width=9, justify=justify)\n panel.add_row(\n \"\", \"State\", \"Flags\", \"Error\", \"Services\", \"Features\", \"Auth\", \"Tx\", \"Rx\"\n )\n es = (\n \"[bold red]0x%04X\" % (st.bt.error)\n if st.bt.error > 0\n else \"[green]0x%04X\" % (st.bt.error)\n )\n stx = \"[orange1]%d\" % (st.bt.tx_count)\n srx = \"[green]%d\" % (st.bt.rx_count)\n if st.status_latch1 & 0x40:\n stx = console.status(stx)\n srx = console.status(srx)\n\n panel.add_row(\n \"[bold white]Bluetooth \",\n \"[cyan]0x%02X\" % (st.bt.state),\n \"[cyan]0x%04X\" % (st.bt.flags),\n es,\n \"0x%04X\" % (st.bt.services),\n \"0x%04X\" % (st.bt.features),\n \"0x%04X\" % (st.bt.auth),\n stx,\n srx,\n )\n return Panel(panel)\n\n\ndef update_audio(st, brick):\n panel = Table.grid(expand=True)\n panel.title = \"Audio Channels\"\n panel.title_style = \"bold white\"\n panel.add_column(ratio=1, justify=\"left\", min_width=4)\n panel.add_column(ratio=2, justify=\"left\", min_width=10)\n panel.add_column(ratio=5, min_width=24)\n for i in range(4):\n fid = st.audio_ch[i].file_id\n fn = \"\"\n if not fid == 0xFF:\n fn = brick.filedir.get_file_dir_entry(fid)\n if fn is not None:\n fn = fn.name\n fs = \"File: --- \" if fid == 0xFF else \"File: [aquamarine3]0x%02X %s\" % (fid, fn)\n panel.add_row(\n \"Ch %d \" % (i),\n \"Mode: [cyan]0x%02X \" % (st.audio_ch[i].mode),\n fs,\n )\n return Panel(panel)\n\n\ndef update_audio_state(st):\n panel = Table.grid()\n level_bar = ProgressBar(\n total=255,\n complete_style=\"aquamarine3\",\n finished_style=\"aquamarine3\",\n completed=st.audio_peak,\n )\n for i in range(3):\n panel.add_column(ratio=1)\n panel.add_row(\n \"Audio peak: \",\n level_bar,\n \"[aquamarine3]0x%02X\" % (st.audio_peak),\n )\n return Panel(panel)\n\n\ndef update_audio_idx(brick):\n panel = Table.grid()\n panel.add_column(ratio=2)\n for _ in range(brick.config.settings.notchCount):\n panel.add_column(ratio=1)\n notches = [\"0x%02X \" % x for x in brick.config.settings.notchBounds]\n n = brick.state.audio_notch\n if n < len(brick.config.settings.notchBounds):\n notches[n] = \"[black on white]0x%02X \" % (brick.config.settings.notchBounds[n])\n panel.add_row(\"Curr Notch \", \"%d\" % (brick.state.audio_notch + 1))\n panel.add_row(\"Notch\", *notches)\n return Panel(panel)\n\n\ndef update_brightness(value) -> Panel:\n \"\"\"Some example content.\"\"\"\n panel = Table.grid(padding=1)\n bright_bar = ProgressBar(\n total=255,\n complete_style=\"light_slate_blue\",\n finished_style=\"light_slate_blue\",\n completed=value,\n )\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_row(\"Brightness: \", bright_bar, \"[light_slate_blue]%3d\" % (value))\n return Panel(panel)\n\n\ndef update_volume(value) -> Panel:\n \"\"\"Some example content.\"\"\"\n panel = Table.grid()\n bright_bar = ProgressBar(\n total=255,\n complete_style=\"aquamarine3\",\n finished_style=\"aquamarine3\",\n completed=value,\n )\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_row(\"Volume: \", bright_bar, \"[aquamarine3]%3d\" % (value))\n return Panel(panel)\n\n\ndef update_motors(st):\n panel = Table.grid()\n panel.title = \"Motor Channels\"\n panel.title_style = \"bold white\"\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n for ch in range(0, 2):\n motor = st.motors[ch]\n tgtspd = motor.target_speed\n curspd = motor.current_speed\n pwmspd = motor.pwm_speed\n chactive = (ch == 0 and st.status_latch2 & 0x03) or (\n ch == 1 and st.status_latch2 & 0x0C\n )\n style = \"green\"\n if not chactive:\n s = \":black_medium_square:\"\n if chactive and motor.dir == \"Forward\":\n s = \":arrow_forward:\"\n style = \"green\"\n elif chactive:\n s = \":arrow_backward:\"\n tgtspd = (~tgtspd + 1) & 0xFF\n curspd = (~curspd + 1) & 0xFF\n style = \"red\"\n tgt = ProgressBar(\n total=255, completed=tgtspd, complete_style=style, finished_style=style\n )\n curr = ProgressBar(\n total=255, completed=curspd, complete_style=style, finished_style=style\n )\n pwm = ProgressBar(\n total=255, completed=pwmspd, complete_style=style, finished_style=style\n )\n panel.add_row(\n \"Ch %d %s Tgt: \" % (ch + 1, s),\n tgt,\n \" [cyan]%3d \" % (tgtspd),\n \"Curr: \",\n curr,\n \" [cyan]%3d \" % (curspd),\n \"PWM: \",\n pwm,\n \" [cyan]0x%02X\" % (pwmspd),\n )\n return Panel(panel)\n\n\ndef update_lights(st):\n panel = Table.grid()\n panel.title = \"Light Channels\"\n panel.title_style = \"bold white\"\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n panel.add_column(ratio=1)\n style = \"light_goldenrod3\"\n for ch in range(0, 8):\n light = st.lights[ch]\n tgt = ProgressBar(\n total=255,\n completed=light.target_level,\n complete_style=style,\n finished_style=style,\n )\n curr = ProgressBar(\n total=255,\n completed=light.current_level,\n complete_style=style,\n finished_style=style,\n )\n if light.active:\n s = \":black_square_button: \"\n else:\n s = \":black_large_square: \"\n panel.add_row(\n \"Ch %d %s Tgt: \" % (ch + 1, s),\n tgt,\n \" [cyan]%3d \" % (light.target_level),\n \"Curr: \",\n curr,\n \" [cyan]%3d \" % (light.current_level),\n )\n return Panel(panel)\n\n\ndef update_bitfield(byte, size, labels):\n panel = Table.grid(expand=True)\n for _ in range(size):\n panel.add_column(width=8, justify=\"center\", ratio=1)\n bits = []\n for x in range(size):\n mask = 1 << x\n if byte & mask:\n s = \" :black_square_button: \"\n else:\n s = \" :black_large_square: \"\n bits.append(s)\n panel.add_row(*bits)\n panel.add_row(*labels)\n return Panel(panel)\n\n\ndef update_motor_rates(st, b):\n panel = Table.grid(expand=True)\n panel.title = \"Triggered Sound State\"\n panel.title_style = \"bold white\"\n for _ in range(8):\n panel.add_column(width=8, justify=\"center\", ratio=1)\n panel.add_row(\n \"Motor Spd\",\n \"Motor PWM\",\n \"Motor Rate\",\n \"[Change Dir]\",\n \"[Set Off]\",\n \"[Rapid Acc]\",\n \"[Rapid Dec]\",\n \"[Brake]\",\n )\n if st.motor_rate_ptr & 0x80:\n rate = -((~st.motor_rate_ptr + 1) & 0xFF)\n else:\n rate = st.motor_rate_ptr\n if st.trig_change_dir_state > 0:\n s1 = \"[bold magenta]%2d\" % (st.trig_change_dir_state)\n else:\n s1 = \"[bold black]%2d\" % (st.trig_change_dir_state)\n if st.trig_set_off_state > 0:\n s2 = \"[bold magenta]%2d\" % (st.trig_set_off_state)\n else:\n s2 = \"[bold black]%2d\" % (st.trig_set_off_state)\n if st.trig_rapid_accel_state > 0:\n s3 = \"[bold magenta]%2d\" % (st.trig_rapid_accel_state)\n else:\n s3 = \"[bold black]%2d\" % (st.trig_rapid_accel_state)\n if st.trig_rapid_decel_state > 0:\n s4 = \"[bold magenta]%2d\" % (st.trig_rapid_decel_state)\n else:\n s4 = \"[bold black]%2d\" % (st.trig_rapid_decel_state)\n if st.trig_brake_state > 0:\n s5 = \"[bold magenta]%2d\" % (st.trig_brake_state)\n else:\n s5 = \"[bold black]%2d\" % (st.trig_brake_state)\n panel.add_row(\n \"[cyan]%3d\" % (st.motor_ptr),\n \"[cyan]0x%02X\" % (st.motor_pwm_ptr),\n \"[green]%2d\" % (rate),\n s1,\n s2,\n \"[green]%2d %s\" % (b.config.settings.rapidAccelThr, s3),\n \"[green]%2d %s\" % (b.config.settings.rapidDecelThr, s4),\n \"[green]%2d [cyan]%2d %s\"\n % (\n b.config.settings.brakeDecelThr,\n b.config.settings.brakeSpeedThr,\n s5,\n ),\n )\n return Panel(panel)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"PFx Brick real time monitoring utility. Press -C to exit monitor.\"\n )\n parser.add_argument(\n \"-s\",\n \"--serialno\",\n default=None,\n help=\"Perform monitoring on PFx Brick with specified serial number\",\n )\n args = parser.parse_args()\n argsd = vars(args)\n\n b = get_one_pfxbrick(argsd[\"serialno\"])\n r = b.open()\n if not r:\n exit()\n b.get_status()\n b.get_config()\n icd = b.get_icd_rev()\n name = b.get_name()\n b.refresh_file_dir()\n\n layout = make_layout()\n layout[\"header\"].update(Header(b))\n with Live(layout, refresh_per_second=8, screen=True):\n flip = True\n while True:\n st = b.get_current_state()\n b.get_status()\n if flip:\n b.get_fs_state()\n flip = False\n else:\n if b.has_bluetooth:\n b.get_bt_state()\n flip = True\n layout[\"bright\"].update(update_brightness(st.brightness))\n layout[\"vol\"].update(update_volume(st.volume))\n layout[\"status1\"].update(\n update_bitfield(\n st.status_latch1,\n 8,\n [\n \"USB:link:\",\n \"USB\",\n \"IR\",\n \"IR:lock:\",\n \":speaker:\",\n \"BLE:link:\",\n \"BLE\",\n \"FS\",\n ],\n )\n )\n layout[\"filesys\"].update(update_fs_status(st))\n layout[\"status2\"].update(update_status(b))\n if b.has_bluetooth:\n layout[\"bluetooth\"].update(update_bt_status(st))\n layout[\"lights\"].update(update_lights(st))\n layout[\"motors\"].update(update_motors(st))\n layout[\"motor_rate\"].update(update_motor_rates(st, b))\n layout[\"audio_ch\"].update(update_audio(st, b))\n layout[\"audio_peak\"].update(update_audio_state(st))\n layout[\"audio_idx\"].update(update_audio_idx(b))\n\n time.sleep(0.15)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pfxbrick/scripts/pfxmonitor.py","file_name":"pfxmonitor.py","file_ext":"py","file_size_in_byte":15276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437875696","text":"# -*- coding: utf-8 -*-\n#\n# Update an article via Qiita API\n#\nimport sys\nimport os\nimport logging\nimport json\nimport requests\nimport qiita_api\n\nQIITA_URL = 'https://qiita.com/api/v2/items'\n\nTAGS = [\"id\", \"title\", \"tags\"]\n\ndef parse(item):\n return {\n 'title': item['title'],\n 'qiita_id': item['id'],\n 'tags': item['tags'],\n 'body': f'''\n この記事はGitHub Pagesへ移動しました。\\n\n \"https://perpouh.github.io/blog/qiita/{item['title']}.html\"\n ''',\n 'tweet': False,\n 'private': False,\n }\n\ndef submit(item, token, url=QIITA_URL, article_id=None):\n u'''Submit to Qiita v2 API'''\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer {}'.format(token)\n }\n\n if article_id is None or article_id == '':\n return\n else:\n url = \"{}/{}\".format(url, article_id)\n res = requests.patch(url, headers=headers, json=parse(item))\n if res.status_code >= 400:\n logging.error(res.json())\n res.raise_for_status()\n\n logging.info(json.dumps(res.json(), indent=2))\n\n return res\n\ndef execute(item, token):\n if item is None:\n logging.warning(\"SKIP. No qiita_id tag found.\")\n return\n item_id = item['id']\n\n res = submit(item, token=token, article_id=item_id)\n\nif __name__ == \"__main__\":\n argvs = sys.argv\n token = argvs[1]\n user = argvs[2]\n qiitaApi = qiita_api.QiitaApi(token)\n\n items = qiitaApi.query_user_items(user)\n for item in items:\n execute(item, token)\n break","sub_path":"moved_message.py","file_name":"moved_message.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338060765","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nimport mmap\nimport os\nimport tarfile\n\nfrom StringIO import StringIO\n\n\nclass FileStream(object):\n def __init__(self):\n self.buffer = StringIO()\n self.offset = 0\n\n def write(self, s):\n self.buffer.write(s)\n self.offset += len(s)\n\n def close(self):\n self.buffer.close()\n\n def tell(self):\n return self.offset\n\n def pop(self):\n s = self.buffer.getvalue()\n self.buffer.close()\n self.buffer = StringIO()\n return s\n\n\ndef _tar_stream(in_file, tar, buf_size=1048576):\n try:\n info = tarfile.TarInfo(in_file) # create a tar info object\n stat = os.stat(in_file) # OS meta data about the file\n info.uid = stat.st_uid # user id\n info.gid = stat.st_gid # group id of\n info.size = stat.st_size # size of file, in bytes\n info.mtime = stat.st_mtime # content modification time\n # info.ctime = stat.st_ctime # metadata change time on Unix\n # time of creation on Windows\n info.mode = stat.st_mode # file permissions\n tar.addfile(info) # add info to tar without the object\n\n yield\n\n with open(in_file, 'rb') as in_fp: # open in 'rb' to work on Windows\n\n with contextlib.closing(mmap.mmap(in_fp.fileno(), 0,\n access=mmap.ACCESS_READ)) as m:\n\n while True:\n s = m.read(buf_size)\n\n if s:\n tar.fileobj.write(s)\n\n yield\n\n if len(s) < buf_size:\n blocks, remainder = divmod(info.size,\n tarfile.BLOCKSIZE)\n\n if remainder > 0:\n tar.fileobj.write(tarfile.NUL *\n (tarfile.BLOCKSIZE - remainder))\n\n yield\n\n blocks += 1\n\n tar.offset += blocks * tarfile.BLOCKSIZE\n break\n\n yield\n\n except (OSError, IOError):\n # happens when a syslink is broken\n print('Error processing file {0}'.format(in_file))\n\n\ndef tar_stream(path_or_file, exclude_dirs=[], exclude_files=()):\n streaming = FileStream() # stream object to write the chunks\n tar = tarfile.TarFile.open(mode='w:', fileobj=streaming)\n\n # check if the path_or_file is a file\n if os.path.isfile(path_or_file):\n for i in _tar_stream(path_or_file, tar):\n s = streaming.pop()\n if s:\n yield s\n else:\n # if the path_or_file is a path chdir to path\n os.chdir(path_or_file)\n # iterate on the top dir to exclude directories and files\n for root, dirs, files in os.walk('.', topdown=True):\n dirs[:] = [d for d in dirs if d not in exclude_dirs]\n files[:] = [f for f in files if not f.endswith(exclude_files)]\n\n for file_ in files:\n file_ = os.path.join(root, file_)\n for i in _tar_stream(file_, tar):\n s = streaming.pop()\n if s:\n yield s\n tar.close()\n\n\ndef untar_stream(restore_path, tar_path):\n # TODO: extract from tar in stream\n # TODO: accept input in chunks\n try:\n tar_ = tarfile.open(tar_path, mode='r:')\n tar_.extractall(path=restore_path)\n except (tarfile.TarError, Exception) as error:\n print('tar error: ', error)\n","sub_path":"tar/tar.py","file_name":"tar.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"286025920","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/solan/core.py\n# Compiled at: 2019-05-04 10:50:47\n# Size of source mod 2**32: 797 bytes\nimport click, http.server, os\n\n@click.command()\n@click.option('-p', '--port', type=int, default=8000, show_default=True)\n@click.argument('directory', type=click.Path(exists=True))\ndef run(directory, port):\n click.echo('Sharing {}'.format(directory))\n os.chdir(directory)\n click.echo('Running the server')\n solan_server = http.server.HTTPServer\n solan_handler = http.server.SimpleHTTPRequestHandler\n solan_server_address = ('', port)\n httpd = solan_server(solan_server_address, solan_handler)\n click.echo('Service on {url}:{port}'.format(url='XXXXXX',\n port=port))\n httpd.serve_forever()","sub_path":"pycfiles/solan-0.1.0-py3.6/core.cpython-36.py","file_name":"core.cpython-36.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311158578","text":"from magnetic_field_theory import *\n\n\nclass PSO(object):\n def __init__(self, x0):\n self.x0 = x0\n # 空间维度\n self.dim = 3\n # 粒子群大小\n self.size = 300\n # 最大迭代次数\n self.steps = 300\n # 加速因子\n self.c1 = self.c2 = 2\n # 初始值\n self.x = self.produce_x()\n self.v = np.random.rand(self.size, self.dim)\n fitness = self.fitness()\n self.p = self.x\n self.pg = self.x[np.argmin(fitness)]\n self.individual_best_fitness = fitness\n self.global_best_fitness = np.min(fitness)\n\n def produce_x(self):\n dim = self.dim\n size = self.size\n x_bound = np.zeros(shape=(dim, 2))\n # 位置范围\n x_bound[0] = [-300, 300]\n x_bound[1] = [150, 250]\n x_bound[2] = [-10, 10]\n # # 磁矩范围\n # x_bound[3] = []\n # x_bound[4] = []\n # x_bound[5] = []\n x = np.zeros(shape=(size, dim))\n for i in range(0, dim):\n yy = np.random.uniform(x_bound[i][0], x_bound[i][1], (size, 1))\n x[:, i] = yy.reshape(-1)\n return x\n\n def fitness(self):\n x = self.x\n x0 = self.x0\n sub = np.zeros(shape=(self.size, 6))\n for i in range(0, self.size):\n sub[i] = Theory(x[i]).db - Simulation(x0).db\n fitness = np.sum(np.square(sub), axis=1)\n return fitness\n\n def solution(self):\n for step in range(self.steps):\n r1 = np.random.rand(self.size, self.dim)\n r2 = np.random.rand(self.size, self.dim)\n # 惯性因子\n w = 0.9 - 0.3 / (self.steps - 1) * step\n self.v = w * self.v + self.c1 * r1 * (self.p - self.x) + self.c2 * r2 * (self.pg - self.x)\n self.x = self.v + self.x\n # plt.clf()\n # plt.scatter(self.x[:, 0], self.x[:, 1], s=30, color='k')\n # plt.pause(10)\n fitness = self.fitness()\n update_id = np.greater(self.individual_best_fitness, fitness)\n self.p[update_id] = self.x[update_id]\n self.individual_best_fitness[update_id] = fitness[update_id]\n if np.min(fitness) < self.global_best_fitness:\n self.pg = self.x[np.argmin(fitness)]\n self.global_best_fitness = np.min(fitness)\n print(self.pg)\n print(self.global_best_fitness)\n print(\"\\n\")\n print(self.global_best_fitness)\n print(\"\\n\")\n return self.pg\n\n\nif __name__ == \"__main__\":\n a = np.array([125, 200, 0])\n print(PSO(a).solution())\n # plt.show()\n\n","sub_path":"magnetic_field_PSO.py","file_name":"magnetic_field_PSO.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602757697","text":"import matplotlib.pyplot as plt\n\nlog_path = 'C:\\\\Users\\\\Andrew\\\\Documents\\\\GitHub\\\\3dcnn\\\\log\\\\2016-09-09-info.txt'\n\nepoch=range(100)\n\ndef read_data(file_path):\n with open(file_path, 'r') as T:\n lines = T.readlines()\n loss = []\n acc = []\n val_loss = []\n val_acc = []\n for line in lines:\n sp_loss = line.split(' loss:')\n loss.append((sp_loss[1].split(' '))[0])\n sp_acc = line.split(' acc:')\n acc.append((sp_acc[1].split(' '))[0])\n sp_val_loss = line.split(' val_loss:')\n val_loss.append((sp_val_loss[1].split(' '))[0])\n sp_val_acc = line.split(' val_acc:')\n val_acc.append((sp_val_acc[1].split(' '))[0])\n print(loss)\n print(acc)\n print(val_loss)\n print(val_acc)\n return loss, acc, val_loss, val_acc\n\ndef plt_data(epoch, loss, acc, val_loss, val_acc):\n plt.figure(1,figsize=(7,5))\n plt.plot(epoch,loss)\n plt.plot(epoch,val_loss)\n plt.xlabel('num of Epochs')\n plt.ylabel('loss')\n plt.title('train_loss vs val_loss')\n plt.grid(True)\n plt.legend(['train','val'])\n print(plt.style.available) # use bmh, classic,ggplot for big pictures\n plt.style.use(['classic'])\n\n plt.figure(2,figsize=(7,5))\n plt.plot(epoch,acc)\n plt.plot(epoch,val_acc)\n plt.xlabel('num of Epochs')\n plt.ylabel('accuracy')\n plt.title('train_acc vs val_acc')\n plt.grid(True)\n plt.legend(['train','val'],loc=4)\n #print plt.style.available # use bmh, classic,ggplot for big pictures\n plt.style.use(['classic'])\n plt.show()\n\nif __name__ == '__main__':\n loss, acc, val_loss, val_acc = read_data(log_path)\n plt_data(epoch, loss, acc, val_loss, val_acc)","sub_path":"util/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"615085122","text":"from django.contrib.auth import get_user_model\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated\n\n\nclass ScoreManagerMixin():\n\n @action(methods=['POST'], detail=True, url_path='upvote', permission_classes=(IsAuthenticated,))\n def upvote(self, request, pk=None):\n user = request.user if not request.user.is_superuser else get_user_model().objects.random_dummy()\n\n argument = self.get_object()\n argument_author = argument.user\n points = argument_author.points\n\n # USER ALREADY UPVOTED OR DOWNVOTED\n prev_upvoted = user in argument.upvotes.all()\n prev_downvoted = user in argument.downvotes.all()\n\n if prev_downvoted:\n argument.downvotes.remove(user)\n points += 1\n if not prev_upvoted:\n points += 1\n argument.upvotes.add(user)\n else:\n points -= 1\n argument.upvotes.remove(user)\n\n argument_author.points = points if not points < 0 else 0\n argument_author.save()\n return Response(argument.score)\n\n @action(methods=['POST'], detail=True, url_path='downvote', permission_classes=(IsAuthenticated,))\n def downvote(self, request, pk=None):\n user = request.user if not request.user.is_superuser else get_user_model().objects.random_dummy()\n\n argument = self.get_object()\n argument_author = argument.user\n points = argument_author.points\n\n # CHECK IF USER ALREADY UPVOTED OR DOWNVOTED\n prev_upvoted = user in argument.upvotes.all()\n prev_downvoted = user in argument.downvotes.all()\n\n if prev_upvoted:\n argument.upvotes.remove(user)\n points -= 1\n if not prev_downvoted:\n points -= 1\n argument.downvotes.add(user)\n else:\n points += 1\n argument.downvotes.remove(user)\n\n argument_author.points = points if not points < 0 else 0\n argument_author.save()\n return Response(argument.score)\n","sub_path":"server/core/mixins/score_manager.py","file_name":"score_manager.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16084544","text":"def kaartnummer():\n infile = open('kaartnummers.txt')\n kaartlezen = infile.readlines()\n infile.close()\n\n kaartlijst = kaartlezen\n\n for name in kaartlijst:\n fl = name.split(',')\n print('{} heeft kaartnummer : {}'.format(fl[1].strip(), fl[0]))\n\nkaartnummer()","sub_path":"les5/pe5_2.py","file_name":"pe5_2.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"430595190","text":"from dataclasses import dataclass\nfrom base.common.models.request import SimpleRequestModel\n\n\n@dataclass\nclass GetImageThumbnailRequestParams:\n LOAN_NUMBER_ID: str = \"LoanNumberID\"\n STATUS_ID: str = \"StatusID\"\n IMAGE_ID: str = \"ImageID\"\n PAGE_NUMBER: str = \"PageNumber\"\n HEIGHT: str = \"Height\"\n WIDTH: str = \"Width\"\n\n\nclass GetImageThumbnailRequest(SimpleRequestModel):\n def __init__(self, loan_number_id, status_id, image_id, page_number, height, width, session_id, nonce,\n pretty_print):\n self.loan_number_id = loan_number_id\n self.status_id = status_id\n self.image_id = image_id\n self.page_number = page_number\n self.height = height\n self.width = width\n super().__init__(session_id=session_id, nonce=nonce, pretty_print=pretty_print)\n\n def to_params(self):\n args = super().to_params()\n args[GetImageThumbnailRequestParams.LOAN_NUMBER_ID] = self.loan_number_id\n args[GetImageThumbnailRequestParams.STATUS_ID] = self.status_id\n args[GetImageThumbnailRequestParams.IMAGE_ID] = self.image_id\n args[GetImageThumbnailRequestParams.PAGE_NUMBER] = self.page_number\n args[GetImageThumbnailRequestParams.HEIGHT] = self.height\n args[GetImageThumbnailRequestParams.WIDTH] = self.width\n return args\n","sub_path":"APIs/task_items/requests/get_image_thumbnail.py","file_name":"get_image_thumbnail.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"189502274","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport numpy as onp\nimport warnings\n\nfrom .. import lax\nfrom .. import lax_linalg\nfrom .lax_numpy import _not_implemented\nfrom .lax_numpy import _wraps\nfrom . import lax_numpy as np\nfrom ..util import get_module_functions\n\n_EXPERIMENTAL_WARNING = \"numpy.linalg support is experimental and may cause silent failures or wrong outputs\"\n\n_T = lambda x: np.swapaxes(x, -1, -2)\n\n@_wraps(onp.linalg.cholesky)\ndef cholesky(a):\n warnings.warn(_EXPERIMENTAL_WARNING)\n return lax_linalg.cholesky(a)\n\n\n@_wraps(onp.linalg.slogdet)\ndef slogdet(a):\n dtype = lax._dtype(a)\n a_shape = np.shape(a)\n if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:\n msg = \"Argument to slogdet() must have shape [..., n, n], got {}\"\n raise ValueError(msg.format(a_shape))\n lu, pivot = lax_linalg.lu(a)\n diag = np.diagonal(lu, axis1=-2, axis2=-1)\n is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1)\n parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1)\n if np.iscomplexobj(a):\n sign = np.prod(diag / np.abs(diag))\n else:\n sign = np.array(1, dtype=dtype)\n parity = parity + np.count_nonzero(diag < 0)\n sign = np.where(is_zero,\n np.array(0, dtype=dtype),\n sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))\n logdet = np.where(\n is_zero, np.array(-np.inf, dtype=dtype),\n np.sum(np.log(np.abs(diag)), axis=-1))\n return sign, np.real(logdet)\n\n\n@_wraps(onp.linalg.det)\ndef det(a):\n sign, logdet = slogdet(a)\n return sign * np.exp(logdet)\n\n\n@_wraps(onp.linalg.inv)\ndef inv(a):\n warnings.warn(_EXPERIMENTAL_WARNING)\n if np.ndim(a) < 2 or a.shape[-1] != a.shape[-2]:\n raise ValueError(\"Argument to inv must have shape [..., n, n], got {}.\"\n .format(np.shape(a)))\n q, r = qr(a)\n return lax_linalg.triangular_solve(r, _T(q), lower=False, left_side=True)\n\n\n@_wraps(onp.linalg.qr)\ndef qr(a, mode=\"reduced\"):\n warnings.warn(_EXPERIMENTAL_WARNING)\n if mode in (\"reduced\", \"r\", \"full\"):\n full_matrices = False\n elif mode == \"complete\":\n full_matrices = True\n else:\n raise ValueError(\"Unsupported QR decomposition mode '{}'\".format(mode))\n q, r = lax_linalg.qr(a, full_matrices)\n if mode == \"r\":\n return r\n return q, r\n\nfor func in get_module_functions(onp.linalg):\n if func.__name__ not in globals():\n globals()[func.__name__] = _not_implemented(func)\n","sub_path":"jax/numpy/linalg.py","file_name":"linalg.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"449575470","text":"# parse crimes relevant to the given statistics from Project: Murder Accountability dataset of homicides\r\nfrom numpy import save\r\nfrom pandas import read_csv\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n data = read_csv('./database.csv', low_memory=False)\r\n\r\n # we want murders only\r\n murders = data.loc[ data['Crime Type'] == 'Murder or Manslaughter' ]\r\n\r\n unique_cities = murders['City'].unique()\r\n\r\n i = 0\r\n # save data filed by regions\r\n for city in unique_cities:\r\n records = murders.loc[ murders['City'] == city ]\r\n unique_states = records['State'].unique()\r\n\r\n for state in unique_states:\r\n this_log = records.loc[ (records['City'] == city) & (records['State'] == state) ]\r\n save('./by_state/{},{}'.format(state.replace(' ', '_'), city), this_log)\r\n\r\n i+=1\r\n print(i)\r\n\r\n print('\\nFinished processing records for all %d cities.' % i)\r\n","sub_path":"parse_crimes.py","file_name":"parse_crimes.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"209810364","text":"#een functie om de hoogst mogelijke index te berekenen van elk block (block van getallen met 1 cijfer, block met getallen van 2 cijfers,...)\n#in deze functie word ook elk begingetal van elk block berekend en trg meegegeven in de return\ndef check_number(nr):\n\t#aantal iteraties dat we gaan nodig hebben\n\tblock_iterations = 9\n\t#lijst initialiseren die we gaan gebruiken om beide blocks terug te sturen\n\tblock_list = []\n\t#lijst voor alle indexes die we berekenen (dus alle hoogste indexesper block: 9,189,2889,...)\n\tamount_numbers = []\n\t#lijst voor alle beginnummers per block\n\tlowest_number_per_block = []\n\t#hier voegen we al 1 toe aan de lijst voor alle beginnummers, aangezien dit het eerste cijfer is van het eerste block en dit buiten de iteratie valt.\n\tlowest_number_per_block.append(1)\n\t#initialiseren van de variabele die we gebruiken om de indexes te berekenen en voor de laagste nummers\n\tnine_block = \"\"\n\t#initialiseren van de lijst die we gebruiken om de indexes te berekenen\n\tnines = []\n\n\t#begin de iteratie om de indices en de beginnummers te berekenen\n\tfor l in range(block_iterations):\n\t\tnine_block = nine_block + \"9\"\n\t\tif l == 0:\n\t\t\t#is dit de eerste iteratie, dan voegen we gewoon aan beide lijsten (indexes en nines lijst) een 9 toe, dit is de hoogste index van het eerste block\n\t\t\tamount_numbers.append(nine_block)\n\t\t\tnines.append(nine_block)\n\t\telse:\n\t\t\t#is deze iteratie al meerdere malen geweest dan berekenen we de hoogste index voor dit block.\n\t\t\tamount_numbers.append(int(amount_numbers[l-1]) + (l+1) * (int(nine_block) - int(nines[l-1])))\n\t\t\tnines.append(nine_block)\n\t\t\t\n\t\t#voor elke iteratie voegen we het laagste getal van dit block toe.\n\t\tlowest_number_per_block.append(str(int(nine_block)+1))\n\n\t#hier voegen we beide lists, nadat alle iteraties zijn doorlopen, toe aan de lijst voor de 2 lists zodat we beide kunnen teruggeven in de return\n\tblock_list.append(amount_numbers)\n\tblock_list.append(lowest_number_per_block)\n\t#return van beide lijsten zodat we ze kunnen gebruiken in verdere berekeningen\n\treturn block_list\n\n#in deze functie gaan we berekenen welk cijfer er staat op de gevraagde index\n#indexes: de hoogste index per block, allemaal in een lijst\n#nr: de gevraagde index\n#lowest_block: het eerste nummer in het desbetreffende block\ndef get_number_on_index(indexes, nr, lowest_block):\n\n\tdigits = 0;\n\t#in deze loop proberen we het aantal cijfers te vinden dat het getal bevat waarin deze index zich bevind\n\tfor i in range(len(indexes)):\n\t\tif nr < int(indexes[i]):\n\t\t\tdigits = i+1\n\t\t\tbreak\n\n\t#als het getal maar 1 cijfer bevat weten we dat de index gelijk is aan het cijfer\n\tif digits == 1:\n\t\tresult = nr\n\telse:\n\t\t#indien het getal meerdere cijfers bevat gaan we berekenen wat het uiteindelijke cijfer op de gevraagde index is\n\t\t#we vragen eerst het laagste nummer van dit block op\n\t\tlowest = lowest_block[digits - 1]\n\t\t#we zoeken de index van dit laagste cijfer\n\t\tfirst_index = int(indexes[digits - 2])+1\n\t\t#nu berekenen we hoeveel indices er nog over zijn als we de index van het laagste cijfer aftrekken van het opgevraagde index\n\t\tamount_indexes_left = nr - first_index\n\t\t#het bekomen index delen we door het aantal cijfers\n\t\tnumber = amount_indexes_left // digits\n\t\t#het getal waarin onze index bevind is het laagste cijfer van dit block + de uitkomst van de bewerking hierboven\n\t\tknown = str(int(lowest) + number)\n\t\t#als de rest van aantal resterende indices / het aantal cijfers 0 is dan is het eerste cijfer van dit getal het cijfer dat we moeten hebben\n\t\tif amount_indexes_left%digits == 0:\n\t\t\tresult = known[0]\n\t\telse:\n\t\t\t#als de rest van het aantal resterende indices / het aantal cijfers hoger dan 0 is, dan is de rest de index (van een cijfer in dit getal) de index van het cijfer dat we zoeken\n\t\t\tresult = known[amount_indexes_left%digits]\n\n\t#we returnen het resultaat zodat we dit op het scherm kunnen tonen\t\n\treturn result\n\n#hier gaan we het getal opvragen\nk = int(input(\"Geef een getal op: \"))\n\n#in deze loop gaan we checken of het getal correct is, zoniet vragen we het opnieuw op\nwhile 1 > k or k > (2**31-1):\n\tk = int(input(\"Uw getal is te groot. Gelieve een nieuw getal op te geven: \"))\n\n#hier gaan we de nodige lijsten berekenen\nget_block_lists = check_number(k)\n\n#hier halen we het uiteindelijke resultaat op\nend_result = get_number_on_index(get_block_lists[0], k, get_block_lists[1])\n\n#hier schrijven we het resultaat weg naar het scherm\nprint(\"Het cijfer op index [\" + str(k) + \"] is: \" + str(end_result))","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"69795644","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 19 Jan 2022\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\nsource repo: scs_analysis\n\nDESCRIPTION\nThe organisation_devices utility is used to\n\nSYNOPSIS\norganisation_devices.py [-c CREDENTIALS] { -F { -l ORG_LABEL | -t DEVICE_TAG } | \\\n-C -l ORG_LABEL -t DEVICE_TAG -p PATH_ROOT GROUP LOCATION -d DEPLOYMENT_LABEL | \\\n-E -l ORG_LABEL -t DEVICE_TAG -p PATH_ROOT GROUP LOCATION | \\\n-D -t DEVICE_TAG } \\\n[-i INDENT] [-v]\n\nEXAMPLES\norganisation_devices.py -F -l NARA\n\nDOCUMENT EXAMPLE\n{\"DeviceTag\": \"scs-bgx-401\", \"OrgID\": 1, \"DevicePath\": \"south-coast-science-demo/brighton/loc/1/\",\n\"EnvironmentPath\": \"south-coast-science-demo/brighton/device/praxis-000401/\",\n\"StartDatetime\": \"2022-01-17T10:40:04Z\", \"EndDatetime\": null,\n\"DeploymentLabel\": \"Preston Circus\"}\n\nSEE ALSO\nscs_analysis/cognito_credentials\nscs_analysis/cognito_devices\n\"\"\"\n\nimport sys\n\nfrom scs_analysis.cmd.cmd_organisation_devices import CmdOrganisationDevices\n\nfrom scs_core.aws.config.project import Project\n\nfrom scs_core.aws.security.cognito_client_credentials import CognitoClientCredentials\nfrom scs_core.aws.security.cognito_device import CognitoDeviceCredentials\nfrom scs_core.aws.security.cognito_login_manager import CognitoLoginManager\n\nfrom scs_core.aws.security.organisation import Organisation, OrganisationPathRoot, OrganisationDevice\nfrom scs_core.aws.security.organisation_manager import OrganisationManager\n\nfrom scs_core.client.http_exception import HTTPException\n\nfrom scs_core.data.datetime import LocalizedDatetime\nfrom scs_core.data.datum import Datum\nfrom scs_core.data.json import JSONify\n\nfrom scs_core.sys.logging import Logging\n\nfrom scs_host.sys.host import Host\n\n\n# --------------------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n\n logger = None\n org = None\n report = []\n\n try:\n # ------------------------------------------------------------------------------------------------------------\n # cmd...\n\n cmd = CmdOrganisationDevices()\n\n if not cmd.is_valid():\n cmd.print_help(sys.stderr)\n exit(2)\n\n Logging.config('organisation_devices', verbose=cmd.verbose)\n logger = Logging.getLogger()\n\n logger.info(cmd)\n\n if cmd.org_label is not None and not Organisation.is_valid_label(cmd.org_label):\n logger.error(\"the organisation label '%s' is not valid.\" % cmd.org_label)\n exit(2)\n\n if cmd.device_tag is not None and not CognitoDeviceCredentials.is_valid_tag(cmd.device_tag):\n logger.error(\"the device tag '%s' is not valid.\" % cmd.device_tag)\n exit(2)\n\n if cmd.project_organisation is not None and \\\n not OrganisationPathRoot.is_valid_path_root(cmd.project_organisation):\n logger.error(\"the path root '%s' is not valid.\" % cmd.project_organisation)\n exit(2)\n\n if cmd.project_location is not None and not Datum.is_int(cmd.project_location):\n logger.error(\"the project location '%s' must be an integer.\" % cmd.project_location)\n exit(2)\n\n if cmd.deployment_label is not None and not OrganisationDevice.is_valid_deployment_label(cmd.deployment_label):\n logger.error(\"the deployment label '%s' is not valid.\" % cmd.deployment_label)\n exit(2)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # authentication...\n\n credentials = CognitoClientCredentials.load_for_user(Host, name=cmd.credentials_name)\n\n if not credentials:\n exit(1)\n\n gatekeeper = CognitoLoginManager()\n auth = gatekeeper.user_login(credentials)\n\n if not auth.is_ok():\n logger.error(\"login: %s.\" % auth.authentication_status.description)\n exit(1)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # resources...\n\n manager = OrganisationManager()\n\n\n # ------------------------------------------------------------------------------------------------------------\n # validate...\n\n if cmd.org_label is not None:\n org = manager.get_organisation_by_label(auth.id_token, cmd.org_label)\n\n if org is None:\n logger.error(\"no organisation found for label: '%s'.\" % cmd.org_label)\n exit(1)\n\n\n # ------------------------------------------------------------------------------------------------------------\n # run...\n\n if cmd.find:\n if cmd.org_label is not None:\n report = manager.find_devices_by_organisation(auth.id_token, org.org_id)\n else:\n report = manager.find_devices_by_tag(auth.id_token, cmd.device_tag)\n\n if cmd.create:\n project = Project.construct(cmd.project_organisation, cmd.project_group, cmd.project_location)\n device_path = project.device_path + '/'\n location_path = project.location_path + '/'\n\n now = LocalizedDatetime.now()\n\n report = OrganisationDevice(cmd.device_tag, org.org_id, device_path, location_path, now, None,\n cmd.deployment_label)\n\n manager.assert_device(auth.id_token, report)\n\n if cmd.delete:\n manager.delete_device(auth.id_token, cmd.device_tag)\n\n\n # ----------------------------------------------------------------------------------------------------------------\n # end...\n\n if report is not None:\n print(JSONify.dumps(report, indent=cmd.indent))\n\n if cmd.find:\n logger.info(\"found: %s\" % len(report))\n\n except KeyboardInterrupt:\n print(file=sys.stderr)\n\n except HTTPException as ex:\n logger.error(ex.error_report)\n exit(1)\n","sub_path":"src/scs_analysis/organisation_devices.py","file_name":"organisation_devices.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"309756866","text":"from datetime import date\nimport re\nimport os\n\nfrom core.helpers.decorator import Cached\n\n\n@Cached\ndef get_cd(string):\n regex = re.search('\\cd[0-9]', string, re.IGNORECASE)\n\n if regex:\n _cd = regex.group().replace('(', '').replace(')', '')\n else:\n _cd = ''\n\n _cd = _cd.lower().replace('cd', '').strip()\n\n return _cd\n\n\n@Cached\ndef get_movie(title):\n def remove_brackets(string):\n string = re.sub(r\"\\([^)]*\\)\", \"\", string)\n string = re.sub(r\"\\[[^)]*\\]\", \"\", string)\n return string\n\n def remove_double_spaces(string):\n while ' ' in string:\n string = string.replace(' ', ' ')\n return string\n\n def remove_dots(string):\n return string.replace('.', ' ')\n\n def remove_year(string):\n rx = re.search('[0-9+]{4}', string)\n if rx and 1889 < int(rx.group()) < date.today().year + 2:\n string = string.replace(rx.group(), '')\n\n return string\n\n def get_year(string):\n expressions = [re.search('\\([0-9]{4}\\)', string),\n re.search('\\[[0-9]{4}\\]', string),\n re.search(' [0-9]{4} ', string),\n re.search(' [0-9]{4}', string),\n re.search('.[0-9]{4}.', string),\n re.search('.[0-9]{4}', string),\n re.search('^(?!a-Z0-9|\\\\.).[0-9]{4}', string)]\n\n _year = ''\n for rx in expressions:\n if rx is not None:\n rx_result = rx.group().replace('(', '').replace(')', '')\n rx_result = rx_result.replace('[', '').replace(']', '')\n rx_result = rx_result.replace('.', '')\n rx_result = rx_result.replace(' ', '')\n\n if rx_result.isdigit() and 1889 < int(rx_result) < date.today().year + 1:\n _year = rx_result\n break\n\n return _year\n\n def get_imdb_id(string):\n rx_parentheses = re.search('\\(tt[0-9]{7}\\)', string)\n rx_square_brackets = re.search('\\[tt[0-9]{7}\\]', string)\n rx_square_dots = re.search('.tt[0-9]{7}.', string)\n\n imdb = ''\n if rx_parentheses:\n imdb = rx_parentheses.group().replace('(', '').replace(')', '')\n elif rx_square_brackets:\n imdb = rx_square_brackets.group().replace('[', '').replace(']', '')\n elif rx_square_dots:\n imdb = rx_square_dots.group().replace('.', '')\n\n return imdb\n\n year = get_year(title)\n imdb_id = get_imdb_id(title)\n\n title = os.path.basename(os.path.normpath(title))\n _title = remove_brackets(title)\n _title = remove_year(_title)\n _title = remove_dots(_title)\n _title = remove_double_spaces(_title)\n\n if _title == '.':\n _title = ''\n\n result = {'title': _title.strip(), 'year': year.strip(), 'imdbID': imdb_id.strip()}\n return result\n\n\n@Cached\ndef get_episode(title):\n orig_title = title\n title = title.upper() # makes it easier to split matches\n regex_schema1 = re.findall('S[0-9]{1,3}E[0-9]{1,3}', title, re.IGNORECASE)\n regex_schema2 = re.findall('S[0-9]{1,3} E[0-9]{1,3}', title, re.IGNORECASE)\n regex_schema3 = re.findall('[0-9]{1,3}X[0-9]{1,3}', title, re.IGNORECASE)\n regex_schema4 = re.findall('EP[0-9]{1,3}', title, re.IGNORECASE)\n regex_schema5 = re.findall('EP_[0-9]{1,3}', title, re.IGNORECASE)\n\n regex_schema6 = re.findall('S[0-9+]{1,3}[._\\-]E[0-9+]{1,3}', title, re.IGNORECASE)\n results = []\n\n if regex_schema1:\n for match in regex_schema1:\n match = match.split('E')\n season = match[0].replace('S', '')\n episode = match[1]\n results.append({'season': season, 'episode': episode, 'filename': orig_title})\n elif regex_schema2:\n for match in regex_schema2:\n match = match.split('E')\n season = match[0].replace('S', '').strip()\n episode = match[1].strip()\n results.append({'season': season, 'episode': episode, 'filename': orig_title})\n elif regex_schema3:\n for match in regex_schema3:\n match = match.split('X')\n season = match[0]\n episode = match[1]\n results.append({'season': season, 'episode': episode, 'filename': orig_title})\n elif regex_schema4:\n for match in regex_schema4:\n season = ''\n episode = match.replace('EP', '')\n results.append({'season': season, 'episode': episode, 'filename': orig_title})\n elif regex_schema5:\n for match in regex_schema5:\n season = ''\n episode = match.replace('EP_', '')\n results.append({'season': season, 'episode': episode, 'filename': orig_title})\n elif regex_schema6:\n for match in regex_schema6:\n match = match.split('E')\n season = match[0].replace('S', '').replace('S', '').replace('.', '').replace('-', '').replace('_', '')\n episode = match[1].strip()\n results.append({'season': season, 'episode': episode, 'filename': orig_title})\n\n return results","sub_path":"core/helpers/regex.py","file_name":"regex.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"67598551","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nFunctions to calculate moments of areas\r\n\r\n\"\"\"\r\n__version__ = '2.0'\r\n__author__ = 'Noemie Fedon'\r\n\r\nimport numpy as np\r\n\r\n\r\ndef calc_mom_of_areas(constraints, targets, ply_order, n_plies_in_groups):\r\n \"\"\"\r\n calulates ply moments of areas\r\n\r\n OUTPUS\r\n\r\n - mom_areas[ply_index, 0]: signed area of ply of index 'ply_index'\r\n - mom_areas[ply_index, 1]: signed first moment of area of ply of index\r\n 'ply_index'\r\n - mom_areas[ply_index, 2]: signed second moment of area of ply of index\r\n 'ply_index'\r\n\r\n - cummul_mom_areas[:, 0/1/2]: cummulated areas/first/second moments of\r\n areas of the plies in the order in which plies are optimised\r\n\r\n - group_mom_areas[:, 0/1/2]: cummulated areas/first/second moments of\r\n areas of ply groups in the order in which plies are optimised\r\n\r\n INPUTS\r\n\r\n - constraints: lay-up design guidelines\r\n - targets: target lamination parameters and ply counts\r\n - ply_order: ply indices sorted in the order in which plies are optimised\r\n - n_plies_in_groups: number of plies in each group of plies\r\n \"\"\"\r\n group_mom_areas = np.zeros((n_plies_in_groups.size, 3), float)\r\n\r\n if constraints.sym:\r\n\r\n ply_indices = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n mom_areas = np.zeros((\r\n targets.n_plies // 2 + targets.n_plies % 2, 3), float)\r\n\r\n pos_bot = (2 / targets.n_plies) * ply_indices - 1\r\n pos_top = (2 / targets.n_plies) * (ply_indices + 1) - 1\r\n\r\n if targets.n_plies % 2:\r\n pos_top[-1] = 0\r\n\r\n mom_areas[:, 0] = pos_top - pos_bot\r\n mom_areas[:, 1] = pos_top**2 - pos_bot**2\r\n mom_areas[:, 2] = pos_top**3 - pos_bot**3\r\n\r\n n_plies_in_group = 0\r\n ind_ply_group = 0\r\n mom_areas_ply_group = np.zeros((3,), float)\r\n\r\n cummul_mom_areas = np.zeros((\r\n targets.n_plies // 2 + targets.n_plies % 2, 3), float)\r\n\r\n for ply_index in range(targets.n_plies // 2 + targets.n_plies % 2):\r\n\r\n cummul_mom_areas[ply_index:, :] += abs(mom_areas[ply_index, :])\r\n\r\n n_plies_in_group += 1\r\n mom_areas_ply_group += abs(mom_areas[ply_index, :])\r\n\r\n if n_plies_in_group == n_plies_in_groups[ind_ply_group]:\r\n group_mom_areas[ind_ply_group, :] = mom_areas_ply_group\r\n ind_ply_group += 1\r\n n_plies_in_group = 0\r\n mom_areas_ply_group = np.zeros((3,), float)\r\n\r\n else:\r\n mom_areas = np.zeros((targets.n_plies, 3), float)\r\n cummul_mom_areas = np.zeros((targets.n_plies, 3), float)\r\n\r\n ply_indices = np.arange(targets.n_plies)\r\n pos_bot = ((2 / targets.n_plies) * ply_indices - 1)[ply_order]\r\n pos_top = ((2 / targets.n_plies) * (ply_indices + 1) - 1)[ply_order]\r\n\r\n mom_areas[:, 0] = pos_top - pos_bot\r\n mom_areas[:, 1] = pos_top**2 - pos_bot**2\r\n mom_areas[:, 2] = pos_top**3 - pos_bot**3\r\n mom_areas /= 2\r\n\r\n n_plies_in_group = 0\r\n ind_ply_group = 0\r\n mom_areas_ply_group = np.zeros((3,), float)\r\n\r\n for ply_index in range(targets.n_plies - 1):\r\n\r\n cummul_mom_areas[ply_index:, :] += abs(mom_areas[ply_index, :])\r\n\r\n n_plies_in_group += 1\r\n mom_areas_ply_group[:] += abs(mom_areas[ply_index, :])\r\n\r\n if n_plies_in_group == n_plies_in_groups[ind_ply_group]:\r\n group_mom_areas[ind_ply_group, :] = mom_areas_ply_group\r\n ind_ply_group += 1\r\n n_plies_in_group = 0\r\n mom_areas_ply_group = np.zeros((3,), float)\r\n\r\n pos_mom_areas = np.array([\r\n (abs(pos_top[-1]) + abs(pos_bot[-1])) / 2,\r\n (abs(pos_top[-1]**2) + abs(pos_bot[-1]**2)) / 2,\r\n (abs(pos_top[-1]**3) + abs(pos_bot[-1]**3)) / 2])\r\n\r\n cummul_mom_areas[-1, :] += pos_mom_areas\r\n mom_areas_ply_group += pos_mom_areas\r\n group_mom_areas[-1, :] += mom_areas_ply_group\r\n\r\n return mom_areas, cummul_mom_areas, group_mom_areas\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print('*** Test for the functions calc_moment_of_areas ***\\n')\r\n import sys\r\n sys.path.append(r'C:\\BELLA_and_LAYLA')\r\n\r\n from src.LAYLA_V02.constraints import Constraints\r\n from src.LAYLA_V02.targets import Targets\r\n from src.LAYLA_V02.ply_order import calc_ply_order\r\n constraints = Constraints(sym=True)\r\n targets = Targets(n_plies=21)\r\n ply_order = calc_ply_order(constraints, targets)\r\n n_plies_in_groups = np.array([5, 6])\r\n mom_areas, cummul_mom_areas, group_mom_areas = calc_mom_of_areas(\r\n constraints, targets, ply_order, n_plies_in_groups)\r\n print(mom_areas)\r\n print(cummul_mom_areas)\r\n print(group_mom_areas, sum(group_mom_areas))\r\n","sub_path":"src/LAYLA_V02/moment_of_areas.py","file_name":"moment_of_areas.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292133794","text":"#!/usr/bin/env python\n#\n# This implements the website logic, this is where you would do any dynamic\n# programming for the site pages and render them from templaes.\n#\n# NOTE: This file will need heavy customizations. Search for \"XXX\".\n#\n# See the README.md for more information\n#\n# Written by Sean Reifschneider , 2013\n#\n# Part of the python-bottle-skeleton project at:\n#\n# https://github.com/linsomniac/python-bottle-skeleton\n#\n# I hereby place this work, python-bottle-wrapper, into the public domain.\n\n# XXX Remove these two lines if you aren't using a database\nfrom bottledbwrap import dbwrap\nimport model\nimport bottle\n# XXX Remove these lines and the next section if you aren't processing forms\nfrom wtforms import (Form, TextField, SelectField, PasswordField, DateField, BooleanField, DateTimeField, FileField, validators)\n\n# XXX Form validation example\nclass NewUserFormProcessor(Form):\n \n name = TextField('Username', [validators.Length(min=4, max=25)])\n sex = SelectField('Sex', choices=[('m','Male'), ('f', 'Female')])\n full_name = TextField('Full Name', [validators.Length(min=4, max=60)])\n email_address = TextField('Email Address', [validators.Email()])\n #email_address = TextField('Email Address', [validators.Length(min=4, max=60)])\n password = PasswordField(\n 'New Password',\n [validators.Required(),\n validators.EqualTo('confirm',\n message='Passwords must match')\n ])\n confirm = PasswordField('Repeat Password~')\n\n birthday = DateField(u'Your birthday')\n now = DateTimeField(u'Current time', description='...for no particular reason')\n sample_file = FileField(u'Your favorite file')\n eula = BooleanField(u'I did not read the terms and conditions',\n validators=[validators.Required('You must agree to not agree!')])\n\ndef build_application():\n from bottle import (view, TEMPLATE_PATH, Bottle, static_file, request,\n redirect, BaseTemplate, template)\n\n # XXX Define application routes in this class\n\n app = Bottle()\n\n # Pretty much this entire function needs to be written for your\n\n BaseTemplate.defaults['app'] = app # XXX Template global variable\n TEMPLATE_PATH.insert(0, 'views') # XXX Location of HTML templates\n\n # XXX Routes to static content\n #@app.route('/')\n @app.route('/static/')\n def static(path):\n 'Serve static content.'\n return static_file(path, root='static/')\n\n # bootstrap test\n ##################################################\n @app.route('/bootstrap', name='bootstrap')\n @view('bootstrap-starter')\n def bootstrap():\n return locals()\n\n @app.route('/bootstrap/hello', name='bootstrap_hello')\n @view('bootstrap-hello')\n def bootstrap_hello():\n return locals()\n\n @app.route('/bootstrap/theme', name='bootstrap_theme')\n @view('bootstrap-theme')\n def bootstrap_theme():\n return locals()\n\n @app.route('/bootstrap/starter', name='bootstrap_starter')\n #@view('bootstrap-starter')\n def bootstrap_starter():\n return template('bootstrap-starter')\n ####################################################\n \n # XXX Index page\n @app.route('/', name='index') # XXX URL to page\n @view('index') # XXX Name of template\n def index():\n 'A simple form that shows the date'\n\n import datetime\n now = datetime.datetime.now()\n\n # any local variables can be used in the template\n return locals()\n\n @app.route('/question/all', name='question_list')\n @view('question-list')\n def question_list():\n 'A simple page from a dabase.'\n\n db = dbwrap.session()\n\n questions = db.query(model.ChoiceQuestion).order_by(model.ChoiceQuestion.id)\n\n # any local variables can be used in the template\n return locals()\n \n @app.route('/question/', name='question') # XXX URL to page\n @view('question') # XXX Name of template\n def question_info(qid):\n 'A simple page from a dabase.'\n\n q = model.question_by_id(qid)\n choices = q.choice_list.split(\"@\")\n\n # any local variables can be used in the template\n return locals()\n\n @app.get('/question/new-choice', name='new_choice')\n @app.post('/question/new-choice')\n @view('new-choice')\n def new_choice():\n 'input choice question'\n\n if request.method == 'POST':\n db = dbwrap.session()\n\n multi = 0\n if len(request.POST.type.strip()) > 0:\n multi = 1\n\n choice_list = request.POST.itema.strip() \\\n + '@' + request.POST.itemb.strip() \\\n + '@' + request.POST.itemc.strip() \\\n + '@' + request.POST.itemd.strip()\n q = model.ChoiceQuestion(\n descr=request.POST.descr.strip(),\n multi=multi, choice_list=choice_list,\n note=request.POST.note.strip())\n db.add(q)\n db.commit()\n\n redirect(app.get_url('question', qid=q.id))\n\n # any local variables can be used in the template\n return locals()\n\n @app.get('/question/new-essay', name='new_essay')\n @app.post('/question/new-essay')\n @view('new-essay')\n def new_essay():\n 'input essay question'\n\n if request.method == 'POST':\n db = dbwrap.session()\n\n descr = request.POST.descr.strip()\n q = model.EssayQuestion(\n descr=descr,\n note=request.POST.note.strip())\n db.add(q)\n db.commit()\n\n # any local variables can be used in the template\n return locals()\n\n @app.get('/question/new-truefalse', name='new_truefalse')\n @app.post('/question/new-truefalse')\n @view('new-truefalse')\n def new_truefalse():\n 'input true-false question'\n\n if request.method == 'POST':\n db = dbwrap.session()\n\n # any local variables can be used in the template\n return locals()\n\n @app.get('/question/new-snapshot', name='new_snapshot')\n @app.post('/question/new-snapshot')\n @view('new-snapshot')\n def new_snapshot():\n 'input snapshot question'\n\n if request.method == 'POST':\n db = dbwrap.session()\n\n # any local variables can be used in the template\n return locals()\n\n # XXX User list page\n @app.route('/users', name='user_list') # XXX URL to page\n @view('users') # XXX Name of template\n def user_list():\n 'A simple page from a dabase.'\n\n db = dbwrap.session()\n\n users = db.query(model.User).order_by(model.User.name)\n\n # any local variables can be used in the template\n return locals()\n\n # XXX User list page\n @app.route('/users/all', name='user_table')\n @view('user-table')\n def user_table():\n 'A simple page from a dabase.'\n\n db = dbwrap.session()\n\n users = db.query(model.User).order_by(model.User.id)\n\n # any local variables can be used in the template\n return locals()\n\n # XXX User details dynamically-generated URL\n @app.route('/users/', name='user') # XXX URL to page\n @view('user') # XXX Name of template\n def user_info(username):\n 'A simple page from a dabase.'\n\n user = model.user_by_name(username)\n\n # any local variables can be used in the template\n return locals()\n\n # XXX A simple form example, not used on the demo site\n @app.get('/form', name='form') # XXX URL to page\n @app.post('/form') \n @view('form') # XXX Name of template\n def form():\n 'A simple form processing example'\n\n form = NewUserFormProcessor(request.forms.decode())\n if request.method == 'POST' and form.validate():\n # XXX Do something with form fields here\n\n # if successful\n redirect('/users/%s' % form.name.data)\n\n # any local variables can be used in the template\n return locals()\n\n # XXX Create a new user, form processing, including GET and POST\n @app.get('/new-user', name='user_new') # XXX GET URL to page\n @app.post('/new-user') # XXX POST URL to page\n @view('user-new') # XXX Name of template\n def new_user():\n 'A sample of interacting with a form and a database.'\n\n form = NewUserFormProcessor(request.forms.decode())\n\n if request.method == 'POST' and form.validate():\n db = dbwrap.session()\n\n sean = model.User(\n full_name=form.full_name.data, name=form.name.data,\n email_address=form.email_address.data)\n db.add(sean)\n db.commit()\n\n redirect(app.get_url('user', username=form.name.data))\n\n # any local variables can be used in the template\n return locals()\n\n # REQUIRED: return the application handle herre\n return app\n","sub_path":"website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":9242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"3987659","text":"import fractions, functools\n\nif __name__ == '__main__':\n N = int(input())\n\n for __ in range(N):\n size = int(input())\n s = list(map(int, input().split()))\n\n gcd = functools.reduce(fractions.gcd, s)\n print ('YES' if gcd == 1 else 'NO')\n","sub_path":"algorithms/python/sherlock_and_gcd.py","file_name":"sherlock_and_gcd.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"479865899","text":"import abc\nimport copy\nimport importlib\nimport importlib.util\nimport inspect\nimport itertools\nimport os.path\nimport pathlib\nimport pkgutil\nimport re\nimport sys\nimport warnings\nfrom collections import OrderedDict, defaultdict\nfrom dataclasses import dataclass, field\nfrom functools import partial\nfrom inspect import Parameter\nfrom types import ModuleType\nfrom typing import Type, Callable, Dict, List, Optional, OrderedDict as OrderedDictT, TextIO, Tuple, NamedTuple, Any, \\\n MutableMapping, Generator, Union\n\nimport torch\nimport yaml\nfrom sympy import parse_expr\nfrom torch import nn\nfrom torch.nn import Module\nfrom torch.utils.data import DataLoader\n\nfrom loss.uncertainty import UncertaintyWeightedLoss\nfrom utilities.neptune import NeptuneRun\nfrom utilities.path import config_path, src_path, checkpoints_path, root_path\nfrom utilities.train import TrainingSetup\n\n\nclass IArgResolver(metaclass=abc.ABCMeta):\n CACHE: Dict[str, Dict[str, str]] = {}\n\n @abc.abstractmethod\n def resolve(self, class_name: str, instance: nn.Module) -> str or None:\n raise NotImplementedError\n\n def cache(self, cls_name: str, arg: str, arg_cls_name: str) -> None:\n if cls_name not in self.__class__.CACHE.keys():\n self.__class__.CACHE[cls_name] = {}\n if arg not in self.__class__.CACHE[cls_name].keys():\n self.__class__.CACHE[cls_name][arg] = arg_cls_name\n\n @abc.abstractmethod\n def state_dict_params(self) -> dict:\n raise NotImplementedError\n\n def state_dict(self) -> dict:\n return {\n 'class': self.__class__.__name__,\n 'init_kwargs': self.state_dict_params()\n }\n\n @staticmethod\n def from_sd(sd: dict) -> 'IArgResolver':\n cls_name = sd['class']\n cls = None\n for sc in IArgResolver.__subclasses__():\n if sc.__name__ == cls_name:\n cls = sc\n break\n assert cls is not None, f'Subclass named \"{cls_name}\" is not a subclass of IArgResolver'\n # noinspection PyArgumentList\n return cls(**sd['init_kwargs'])\n\n\nclass IClassResolver(metaclass=abc.ABCMeta):\n CACHE: Dict[str, Type[nn.Module]] = {}\n CONDE_ENV_PATH: str = pathlib.Path(os.sep.join(sys.exec_prefix.split(os.sep)))\n\n def cache(self, class_name: str, cls: Type[nn.Module] or Callable[[str], Type[nn.Module]]):\n if class_name not in self.__class__.CACHE.keys():\n if callable(cls):\n cls = cls(class_name)\n self.__class__.CACHE[class_name] = cls\n else:\n print(f'\\t[IClassResolver::cache] Cache hit for class \"{class_name}\"')\n\n def can_resolve(self, class_name) -> bool:\n try:\n return self.resolve(class_name) is not None\n except:\n pass\n return False\n\n @abc.abstractmethod\n def resolve(self, class_name: str) -> nn.Module or None:\n raise NotImplementedError\n\n @abc.abstractmethod\n def state_dict_params(self) -> dict:\n raise NotImplementedError\n\n def state_dict(self) -> dict:\n return {\n 'class': self.__class__.__name__,\n 'init_kwargs': self.state_dict_params()\n }\n\n @staticmethod\n def fix_abs_path(p: str) -> str:\n project_name = root_path.stem\n p_parts = p.split('/' if p.startswith('/') else '\\\\')\n return os.path.join(str(root_path), *p_parts[p_parts.index(project_name) + 1:])\n\n @staticmethod\n def fix_conda_path(p: str) -> str or None:\n p_parts = p.split('/' if p.startswith('/') else '\\\\')\n new_path = os.path.join(str(IClassResolver.CONDE_ENV_PATH), *p_parts[p_parts.index('envs') + 2:])\n return new_path if os.path.exists(new_path) else None\n\n @staticmethod\n def fix_abs_path_rec(d: dict) -> dict:\n for k in d.keys():\n v = d[k]\n if type(v) == str:\n if 'conda' in v.lower():\n d[k] = IClassResolver.fix_conda_path(v)\n elif v.startswith('/') or ':\\\\' in v.lower():\n d[k] = IClassResolver.fix_abs_path(v)\n return d\n\n @staticmethod\n def from_sd(sd: dict) -> 'IClassResolver':\n cls_name = sd['class']\n cls = None\n for sc in IClassResolver.__subclasses__():\n if sc.__name__ == cls_name:\n cls = sc\n break\n assert cls is not None, f'Subclass named \"{cls_name}\" is not a subclass of IClassResolver'\n init_kwargs = IClassResolver.fix_abs_path_rec(sd['init_kwargs'])\n # noinspection PyArgumentList\n return cls(**init_kwargs)\n\n\n# ==================================\n\n\nclass ConfigArgResolver(IArgResolver):\n def __init__(self, mapping: Dict[str, Type[nn.Module] or str] or None = None):\n self.__class__.CACHE = {}\n if mapping is None:\n mapping = {}\n for cls_dot_arg, arg_cls_name in mapping.items():\n cls_name, arg_name = cls_dot_arg.split('.', maxsplit=1)\n if type(arg_cls_name) != str:\n arg_cls_name = arg_cls_name.__name__ if hasattr(arg_cls_name, '__name__') else str(arg_cls_name)\n self.cache(cls_name, arg_name, arg_cls_name)\n self.mapping = mapping\n\n def resolve(self, class_name: str, instance: nn.Module) -> str or None:\n if class_name not in self.__class__.CACHE.keys():\n return None\n for arg_name, arg_class_name in self.__class__.CACHE[class_name].items():\n if arg_class_name == instance.__class__.__name__ or (\n arg_class_name.startswith('*') and\n instance.__class__.__name__.endswith(arg_class_name.replace('*', ''))\n ):\n return arg_name\n return None\n\n def state_dict_params(self):\n return {'mapping': self.mapping}\n\n\nclass GlobalClassResolver(IClassResolver):\n def resolve(self, class_name: str) -> Type[nn.Module] or None:\n try:\n self.cache(class_name, lambda cn: globals()[cn])\n return IClassResolver.CACHE[class_name]\n except KeyError:\n print(f'[GlobalClassResolver::resolve] Class resolution FAILed (class_name=\"{class_name}\")',\n file=sys.stderr)\n return None\n\n def state_dict_params(self):\n return {}\n\n\nclass ImportedModuleClassResolver(IClassResolver):\n # noinspection PyBroadException\n def __init__(self, module_name: Optional[str] = None, module_file: Optional[str] = None,\n module: Optional[ModuleType] = None):\n if module is None and module_file is None:\n # all hopes go to import_module(\"module_name\")\n module = importlib.import_module(module_name)\n if module is not None:\n module_name, module_file = module.__name__, module.__file__\n spec = importlib.util.spec_from_file_location(module_name, module_file)\n self.module = importlib.util.module_from_spec(spec)\n try:\n spec.loader.exec_module(self.module)\n except:\n pass\n self.module_name, self.module_file = module_name, module_file\n\n def resolve(self, class_name: str) -> Type[nn.Module] or None:\n if hasattr(self.module, class_name):\n return getattr(self.module, class_name)\n return None\n\n def state_dict_params(self):\n return {'module_name': self.module_name, 'module_file': self.module_file}\n\n\nclass RecursiveModuleClassResolver(IClassResolver):\n def __init__(self, module_name: str = os.path.join(src_path, 'model')):\n self.module_name = module_name\n self.modules = [importlib.import_module('.' + name, os.path.basename(module_name))\n for (_, name, _) in pkgutil.iter_modules([module_name])]\n\n def resolve(self, class_name: str) -> Type[nn.Module] or None:\n for module in self.modules:\n if hasattr(module, class_name):\n return getattr(module, class_name)\n return None\n\n def state_dict_params(self):\n return {'module_name': self.module_name}\n\n\n# ==================================\n\n@dataclass\nclass ConfigEntry:\n class_name: str\n cls: Type[nn.Module]\n init_kwargs: dict = field(default_factory=dict)\n instance: Optional[nn.Module] = None\n overridden_kwargs: Optional[dict] = None\n\n @property\n def cls_args(self) -> Dict[str, Parameter]:\n # noinspection PyTypeChecker\n return inspect.signature(getattr(self.cls, '__init__')).parameters\n\n @property\n def original_args(self) -> Dict[str, int or float or str or bool]:\n return {k: v for k, v in self.init_kwargs.items() if not hasattr(v, '__dict__')}\n\n @property\n def resolved(self) -> bool:\n return self.instance is not None\n\n @property\n def unresolved_args(self) -> List[str]:\n return [p.name for p in self.cls_args.values()\n if inspect.isclass(p.annotation) and issubclass(p.annotation, nn.Module)\n and p.name not in self.init_kwargs.keys()]\n\n def clone(self) -> 'ConfigEntry':\n return ConfigEntry(self.class_name, self.cls, self.init_kwargs, self.instance)\n\n def collect_overridden(self, od: dict) -> Optional[Dict[str, Any]]:\n found_k = None\n for k in od.keys():\n if re.search(k, self.class_name):\n found_k = k\n break\n collected = None\n if found_k is not None:\n self.overridden_kwargs = od.pop(found_k)\n collected = {found_k: copy.deepcopy(self.overridden_kwargs)}\n self._process_overridden_args()\n if self.overridden_kwargs is not None and len(self.overridden_kwargs):\n print(f'\\t[ConfigEntry::collect_overridden] collecting \"{found_k}\": {str(self.overridden_kwargs)}')\n return collected\n\n def _recursively_resolved(self, d: dict or Any) -> bool:\n if type(d) != dict:\n if type(d) == ConfigEntry:\n return d.resolved\n return True\n for k, v in d.items():\n if k.startswith('__'):\n continue\n if type(v) == ConfigEntry:\n if not v.resolved:\n return False\n d[k] = v.instance\n if type(v) == dict and not self._recursively_resolved(v):\n return False\n elif type(v) == list:\n for i, vi in enumerate(v):\n if not self._recursively_resolved(vi):\n return False\n elif type(vi) == ConfigEntry:\n d[k][i] = vi.instance\n return True\n\n def resolvable(self) -> bool:\n return (self.unresolved_args is None or len(self.unresolved_args) == 0) \\\n and self._recursively_resolved(self.init_kwargs)\n\n def _parse_deferred_args(self, deferred_args: dict or None) -> dict:\n if deferred_args is None or type(deferred_args) != dict:\n return {}\n for dk in deferred_args.keys():\n if dk in self.cls_args.keys() and dk not in self.init_kwargs.keys():\n self.init_kwargs[dk] = deferred_args[dk]\n\n def _process_overridden_args(self):\n if self.overridden_kwargs is not None and type(self.overridden_kwargs) == dict:\n for dk in list(self.overridden_kwargs.keys()):\n if dk in self.cls_args.keys() and dk in self.init_kwargs.keys():\n if type(self.overridden_kwargs[dk]) == dict:\n child_ce = self.init_kwargs[dk]\n if type(child_ce) != ConfigEntry:\n child_ce = child_ce[list(child_ce.keys())[-1]]\n assert isinstance(child_ce, ConfigEntry)\n child_ce.collect_overridden({str(child_ce.class_name): self.overridden_kwargs.pop(dk)})\n\n def _override_init_kwargs(self):\n if self.overridden_kwargs is not None:\n for k, v in self.overridden_kwargs.items():\n if k in self.init_kwargs.keys():\n self.init_kwargs[k] = v\n\n def _process_init_kwargs(self):\n if self.init_kwargs is None:\n self.init_kwargs = {}\n self.init_kwargs = {k: v for k, v in self.init_kwargs.items() if not k.startswith('__')}\n for k in self.init_kwargs.keys():\n v = self.init_kwargs[k]\n if type(v) == dict and len(v.keys()) in [1, 2]:\n for vk, vv in v.items():\n if vk == '__resolved':\n continue\n if type(vv) == ConfigEntry and vk == vv.class_name and vv.resolved:\n self.init_kwargs[k] = vv.instance\n break\n if isinstance(vv, object) and vk == vv.__class__.__name__:\n self.init_kwargs[k] = vv\n break\n\n # noinspection PyArgumentList,PyTypeChecker,PyBroadException\n def resolve(self, deferred_args=None):\n # noinspection PyArgumentList\n self._process_init_kwargs()\n self._override_init_kwargs()\n self._parse_deferred_args(deferred_args)\n try:\n self.instance = self.cls(**self.init_kwargs)\n except Exception as e:\n print(f'\\t[ConfigEntry::resolve] {str(e)}', file=sys.stderr)\n print(f'\\t[ConfigEntry::resolve] Initializing {self.class_name} as partial with args: {self.init_kwargs}')\n self.instance = partial(self.cls, **self.init_kwargs)\n return self.instance\n\n @staticmethod\n def from_cloned(ce: 'ConfigEntry') -> 'ConfigEntry':\n cn = ConfigEntry('', nn.Module)\n cn.class_name = ce.class_name\n cn.cls = ce.cls\n cn.init_kwargs = ce.init_kwargs\n cn.instance = ce.instance\n return cn\n\n\nclass ConfigFullLoader(yaml.FullLoader):\n def __init__(self, stream):\n super().__init__(stream)\n self.add_constructor(tag='!eval', constructor=self.evaluate)\n\n @staticmethod\n def flatten_dict(d: MutableMapping, parent_key: str = '', sep: str = '.') -> MutableMapping:\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(ConfigFullLoader.flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n @staticmethod\n def evaluate(loader: yaml.Loader, node: yaml.nodes.MappingNode):\n # noinspection PyTypeChecker\n expr = loader.construct_scalar(node)\n try:\n fd = ConfigFullLoader.flatten_dict(list(loader.constructed_objects.values())[0])\n val = round(float(parse_expr(expr, local_dict={k.split('.')[-1]: v for k, v in fd.items()})), 6)\n if val == int(val):\n val = int(val)\n except (ValueError, TypeError):\n return expr\n return val\n\n\nCLAR = Tuple[nn.Module, TrainingSetup, Optional[DataLoader], Optional[DataLoader], Optional[DataLoader], 'ConfigReader']\nBPD = Tuple[List[ConfigEntry], int, List[Tuple[str, int or float or bool or dict]], dict, dict, str]\n\n\nclass ConfigReader:\n INSTANCES: List['ConfigReader'] = []\n\n def __init__(self, cls_resolvers: List[IClassResolver] or IClassResolver or None = None,\n arg_resolver: IArgResolver or None = None, override_keys: dict or None = None, chkpt_fpath=None):\n if cls_resolvers is None:\n self.cls_resolvers = [GlobalClassResolver()]\n else:\n self.cls_resolvers = cls_resolvers if type(cls_resolvers) is list else [cls_resolvers]\n self.arg_resolver = ConfigArgResolver(mapping=dict()) if arg_resolver is None else arg_resolver\n self.override_keys = {} if override_keys is None else ConfigReader._to_nested_dict(override_keys)\n self.entries: List[ConfigEntry] = []\n self.n_unresolved = 0\n self.unused = []\n self._yaml_dict = None # model config\n self.train_config = None # train config\n self.chkpt_fpath = chkpt_fpath\n if len(ConfigReader.INSTANCES) > 0:\n warnings.warn('\\t[ConfigReader::__init__] trying to re-instantiate class object (should be singleton)')\n ConfigReader.INSTANCES.append(self)\n\n @property\n def unused_dict(self) -> OrderedDictT:\n return OrderedDict(self.unused)\n\n @property\n def yaml_dict(self) -> dict or OrderedDictT:\n return self._yaml_dict\n\n @yaml_dict.setter\n def yaml_dict(self, yd: dict or OrderedDictT) -> None:\n self.entries: List[ConfigEntry] = []\n self.n_unresolved = 0\n self.unused = []\n self._yaml_dict = yd\n\n def capture(self) -> BPD:\n return [e.clone() for e in self.entries], self.n_unresolved, copy.deepcopy(self.unused), \\\n copy.deepcopy(self._yaml_dict), self.train_config, self.chkpt_fpath\n\n def get_config_of_instance(self, instance: Any) -> dict or None:\n for entry in self.entries:\n if entry.instance is not None and entry.instance == instance:\n return entry.original_args\n return None\n\n @staticmethod\n def _replace_in_dict(inp: dict, key: str, value: dict or Any) -> dict:\n if type(value) == dict and re.search(list(value.keys())[0], key):\n value = value[list(value.keys())[0]]\n if key not in inp.keys():\n for key_, value_ in inp.items():\n if type(value_) == dict and key in value_.keys():\n inp = inp[key_]\n break\n else:\n return inp\n for k, v in value.items():\n if k in inp[key].keys():\n if type(v) == dict and type(inp[key]) == dict:\n inp[key] = ConfigReader._replace_in_dict(inp[key], k, v)\n else:\n inp[key][k] = v\n elif type(inp[key][list(inp[key].keys())[0]]) == dict and k in inp[key][list(inp[key].keys())[0]].keys():\n inp[key][list(inp[key].keys())[0]][k] = v\n else:\n raise AttributeError(f'\\t[ConfigReader::_replace_in_dict] {key}-{value}: Not found in dict. ({inp})')\n return inp\n\n def _load_dict(self, d: dict or Any, depth: int = 0, unpack: bool = False) -> dict or Any:\n if type(d) == dict and '__resolved' in d.keys() and d['__resolved']:\n for dk, dv in d.items():\n if type(dv) == ConfigEntry:\n dv.instance = None # force re-resolution\n self.entries.append(dv)\n return d\n elif type(d) != dict:\n if type(d) == str:\n # Class Resolution\n for resolver in self.cls_resolvers:\n if resolver.can_resolve(d):\n cls = resolver.resolve(class_name=d)\n ce = ConfigEntry(class_name=d, cls=cls, init_kwargs={})\n self.entries.append(ce)\n return ce\n return d\n\n d_out = {'__resolved': True} if not unpack else []\n for k, v in d.items():\n # Recursive Resolution\n if type(v) == list:\n v = [self._load_dict(vi, depth + 1, unpack=True) for vi in v]\n else:\n v = self._load_dict(v, depth + 1)\n\n # Class Resolution\n for resolver in self.cls_resolvers:\n if resolver.can_resolve(k):\n cls = resolver.resolve(class_name=k)\n break\n else:\n if not unpack:\n d_out[k] = v\n else:\n d_out.append(v)\n if depth == 0:\n self.unused.append((k, v))\n continue\n ce = ConfigEntry(class_name=k, cls=cls, init_kwargs=v)\n if self.override_keys is not None:\n collected = ce.collect_overridden(self.override_keys)\n if collected is not None:\n ConfigReader._replace_in_dict(self._yaml_dict, k, collected)\n if self.train_config is not None:\n ConfigReader._replace_in_dict(self.train_config, k, collected)\n self.entries.append(ce)\n if not unpack:\n d_out[k] = ce\n else:\n d_out.append(ce)\n if unpack and len(d_out) == 1:\n return d_out[0]\n return d_out\n\n def load(self, yaml_fp_or_stream_or_dict: Union[str, dict, TextIO]) -> None:\n if type(yaml_fp_or_stream_or_dict) in [dict, OrderedDict, NamedTuple]:\n yaml_dict = yaml_fp_or_stream_or_dict\n else:\n yaml_dict = yaml.load(yaml_fp_or_stream_or_dict, Loader=ConfigFullLoader)\n try:\n self.yaml_dict = copy.deepcopy(yaml_dict)\n except TypeError:\n self.yaml_dict = yaml_dict\n\n self._load_dict(self.yaml_dict, depth=0)\n self.n_unresolved = len(self.entries)\n\n def resolved(self, resolved_entry: ConfigEntry):\n print(f'\\t[ConfigReader::resolved] module resolved'\n f'{\" (partially)\" if type(resolved_entry.instance) == partial else \"\"}'\n f': {resolved_entry.class_name}')\n for entry in [e for e in self.entries if not e.resolved and len(e.unresolved_args) > 0]:\n entry_arg = self.arg_resolver.resolve(entry.class_name, resolved_entry.instance)\n if entry_arg is not None:\n entry.init_kwargs[entry_arg] = resolved_entry.instance\n self.n_unresolved -= 1\n\n def resolve(self, only_keys=None, deferred_args=None) -> OrderedDictT[str, nn.Module]:\n n_iters, instances = 0, []\n while self.n_unresolved > 0 and n_iters < 10:\n for entry_i, entry in enumerate(self.entries):\n if entry.resolvable() and not entry.resolved:\n entry.resolve(deferred_args=deferred_args)\n self.resolved(entry)\n if only_keys is None or entry.class_name in only_keys or entry.class_name.lower() in only_keys:\n instances.append((entry.class_name, entry.instance))\n n_iters += 1\n return ConfigReader._to_ordered_dict(instances)\n\n def resolve_singleton(self, config: dict or OrderedDictT, return_top: bool = False, **resolve_kwargs):\n backup = self.capture()\n cls_name = [k for k in config.keys() if not k.startswith('__')][0]\n # config[cls_name] = {**config[cls_name], **resolve_kwargs.pop('deferred_args', {})}\n self.load(config)\n resolved = self.resolve(**resolve_kwargs)\n if cls_name in resolved.keys() or return_top:\n if cls_name not in resolved.keys():\n cls_name = list(resolved.keys())[0]\n resolved = resolved[cls_name]\n self.restore(backup)\n return resolved\n\n def restore(self, data: BPD) -> None:\n self.entries = [ConfigEntry.from_cloned(e) for e in data[0]]\n self.n_unresolved = data[1]\n self.unused = copy.deepcopy(data[2])\n self._yaml_dict = copy.deepcopy(data[3])\n # self.train_config = copy.deepcopy(data[4]) DO NOT RESTORE TRAIN CONFIG WHEN DOING GRID SEARCH\n self.chkpt_fpath = data[5]\n\n def state_dict(self) -> dict:\n return {\n 'config': self.yaml_dict,\n 'train_config': self.train_config,\n 'cls_resolver': [cr.state_dict() for cr in self.cls_resolvers],\n 'arg_resolver': self.arg_resolver.state_dict()\n }\n\n # ---------------------------------\n\n # def\n\n # ---------------------------------\n\n @staticmethod\n def _to_ordered_dict(items: list) -> OrderedDictT:\n items_uk, items_uv = [], [] # unique keys and corresponding merged values\n for item in items:\n item_key, item_value = item\n if item_key not in items_uk:\n items_uk.append(item_key)\n items_uv.append(item_value)\n else:\n uv_item = items_uv[items_uk.index(item_key)]\n if type(uv_item) == list:\n uv_item.append(item_value)\n else:\n items_uv[items_uk.index(item_key)] = [uv_item, item_value]\n return OrderedDict(zip(items_uk, items_uv))\n\n @staticmethod\n def _to_nested_dict(orig_dict: dict) -> dict:\n def deep_dict():\n return defaultdict(deep_dict)\n\n result = deep_dict()\n\n def deep_insert(key, value):\n d = result\n keys = key.split(\".\")\n for subkey in keys[:-1]:\n d = d[subkey]\n d[keys[-1]] = value\n\n for orig_dict_k, orig_dict_v in orig_dict.items():\n deep_insert(orig_dict_k, orig_dict_v)\n\n def cast_to_dict(input_dict: dict or defaultdict):\n for k in input_dict.keys():\n if type(input_dict[k]) == defaultdict:\n input_dict[k] = cast_to_dict(input_dict[k])\n return dict(input_dict)\n\n return cast_to_dict(result)\n\n @staticmethod\n def from_sd(sd: dict) -> 'ConfigReader':\n cr = ConfigReader()\n cr.cls_resolvers = [\n *[RecursiveModuleClassResolver(os.path.join(src_path, module_name))\n for module_name in ['dataset', 'loss', 'model', ]],\n ImportedModuleClassResolver(module=torch.nn),\n ImportedModuleClassResolver(module=torch.nn.init),\n ImportedModuleClassResolver(module=torch.optim),\n ImportedModuleClassResolver(module=torch.optim.lr_scheduler),\n ]\n cr.arg_resolver = IArgResolver.from_sd(sd['arg_resolver'])\n cr.load(sd['config'])\n cr.train_config = sd.pop('train_config', None)\n return cr\n\n @staticmethod\n def from_config(cfg: Union[str, dict, TextIO, None] = None, cr: Optional['ConfigReader'] = None,\n deferred_args: dict or None = None, args_mapping: dict or None = None,\n **cr_kwargs) -> Tuple[OrderedDictT[str, Module], 'ConfigReader']:\n if cr is None:\n cr = ConfigReader(\n cls_resolvers=[\n *[RecursiveModuleClassResolver(os.path.join(src_path, module_name))\n for module_name in ['dataset', 'loss', 'model', ]],\n ImportedModuleClassResolver(module=torch.nn),\n ImportedModuleClassResolver(module=torch.nn.init),\n ImportedModuleClassResolver(module=torch.optim),\n ImportedModuleClassResolver(module=torch.optim.lr_scheduler),\n ],\n arg_resolver=ConfigArgResolver(args_mapping),\n **cr_kwargs\n )\n cr.load(cfg)\n else:\n assert isinstance(cr, ConfigReader)\n if 'override_keys' in cr_kwargs is not None:\n cr.override_keys = cr_kwargs['override_keys']\n resolved = cr.resolve(deferred_args=deferred_args)\n return resolved, cr\n\n @staticmethod\n def load_model(model_key: str = None, model_config: str = 'default', args_mapping: dict or None = None,\n cr: 'ConfigReader' or None = None, override_keys: dict or None = None,\n **unused_kwargs) -> Tuple[nn.Module, 'ConfigReader']:\n assert model_key is not None or cr is not None, 'Either model_key or cr must be provided.'\n cfg, chkpt_fpath = None, None\n if model_key is not None:\n yaml_fpath = os.path.join(config_path, 'model', (model_key + '_' + model_config).lower() + '.yaml')\n assert os.path.exists(yaml_fpath), yaml_fpath\n cfg = open(yaml_fpath)\n chkpt_fpath = f'{model_key}_{model_config}.pth'.lower()\n resolved, cr = ConfigReader.from_config(\n cfg,\n cr=cr,\n args_mapping=args_mapping,\n override_keys=override_keys,\n chkpt_fpath=chkpt_fpath,\n )\n cr.unused += [(k, v) for k, v in unused_kwargs.items()]\n return resolved.popitem()[-1], cr\n\n @staticmethod\n def load_all_grid_search(grid: Dict[str, list], load_kwargs):\n def _to_iterable(obj: Any):\n return obj if type(obj) in [list, tuple] else [obj]\n\n gvs = [_to_iterable(v) for v in grid.values()]\n for grid_values in itertools.product(*gvs):\n hparams_dict = dict(zip(grid.keys(), grid_values))\n load_kwargs['override_keys'] = hparams_dict\n hp_indices = tuple([hpv.index(v) for hpv, v in zip(gvs, hparams_dict.values())])\n yield ConfigReader.load_all(**load_kwargs), hparams_dict, hp_indices\n\n @staticmethod\n def load_all(gs_dict: Optional[Dict[str, list]] = None, **load_kwargs) -> CLAR or Generator[CLAR, None, None]:\n if gs_dict is not None:\n return ConfigReader.load_all_grid_search(gs_dict, load_kwargs)\n\n device = load_kwargs.pop('device', 'cpu')\n # Instantiate Model\n model, cr = ConfigReader.load_model(**load_kwargs)\n unused_keys = copy.deepcopy(cr.unused_dict)\n model = model.to(device)\n\n # Load train config\n train_config_arg = load_kwargs.pop('train_config', None)\n if cr.train_config is not None and len(cr.train_config.keys()) > 0 and train_config_arg is None:\n # train_config = cr.train_config\n ...\n else:\n if train_config_arg is None:\n train_config_arg = ''\n train_config_fpath = os.path.join(str(config_path), 'train', f'{train_config_arg}.yaml')\n if os.path.exists(train_config_fpath) and os.path.isfile(train_config_fpath):\n with open(train_config_fpath) as tyfp:\n train_config = yaml.load(tyfp, Loader=ConfigFullLoader)\n else:\n train_config = {}\n cr.train_config = copy.deepcopy(train_config)\n if sys.version_info[0] >= 3 and sys.version_info[1] >= 9:\n unused_keys |= cr.train_config\n else:\n unused_keys = {**unused_keys, **cr.train_config}\n\n # Instantiate TrainingSetup\n ts = TrainingSetup(num_epochs=unused_keys.pop('num_epochs', 1), seed=unused_keys.pop('seed', 0),\n device=device, train_config=cr.train_config)\n for key in ['init_fn', 'optim', 'scheduler', 'loss']:\n if key in unused_keys.keys():\n key_dict = unused_keys.pop(key)\n deferred_args = {\n 'optim': {'params': model.parameters()},\n 'scheduler': {'optimizer': ts.optimizer},\n }.get(key, {})\n cls = cr.resolve_singleton(key_dict, deferred_args=deferred_args)\n\n if 'init_fn' == key:\n def weight_initializer(m: nn.Module):\n if isinstance(m, (nn.Conv2d, nn.Conv1d, nn.ConvTranspose1d, nn.ConvTranspose2d)):\n cls(m.weight)\n elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm, nn.LayerNorm,\n nn.InstanceNorm1d, nn.InstanceNorm2d, nn.Linear)):\n if hasattr(m, 'weight'):\n if m.weight.dim() == 1:\n nn.init.constant_(m.weight, 1.0)\n else:\n cls(m.weight)\n if hasattr(m, 'bias'):\n nn.init.constant_(m.bias, 0.0)\n\n model.apply(weight_initializer)\n print('\\t[ConfigReader::load_all] model initialized')\n elif key == 'loss' and isinstance(cls, partial):\n loss = cls(device=device)\n setattr(ts, key, loss)\n elif hasattr(ts, key):\n setattr(ts, key, cls)\n\n if isinstance(ts.loss_function, UncertaintyWeightedLoss):\n ts.optimizer.add_param_group({\n 'params': [ts.loss_function.log_var1, ts.loss_function.log_var2],\n 'lr': ts.optimizer.param_groups[0]['lr']\n })\n\n # Instantiate Dataloaders\n dl_train, dl_val, dl_test = None, None, None\n if 'dataloader' in unused_keys:\n resolved_dls = cr.resolve_singleton(unused_keys['dataloader'], return_top=True,\n deferred_args=dict(device=device))\n resolved_dls = resolved_dls if type(resolved_dls) == list else [resolved_dls]\n # noinspection PyTypeChecker\n dl_train, dl_test, dl_val = resolved_dls + [None] * (3 - len(resolved_dls))\n\n # Return everything\n return model, ts, dl_train, dl_val, dl_test, cr\n\n @staticmethod\n def load_all_from_checkpoint_grid_search(grid: Dict[str, list], load_kwargs):\n def _to_iterable(obj: Any):\n return obj if type(obj) in [list, tuple] else [obj]\n\n gvs = [_to_iterable(v) for v in grid.values()]\n for grid_values in itertools.product(*gvs):\n hparams_dict = dict(zip(grid.keys(), grid_values))\n load_kwargs['override_keys'] = hparams_dict\n hp_indices = tuple([hpv.index(v) for hpv, v in zip(gvs, hparams_dict.values())])\n load_kwargs['chkpt_suffix'] = f'_{\"\".join([str(_) for _ in hp_indices])}'\n yield ConfigReader.load_all_from_checkpoint(**load_kwargs), hparams_dict, hp_indices\n\n @staticmethod\n def load_model_from_checkpoint(model_key: str, model_config: str, device: str = 'cpu', chkpt_suffix: str = ''):\n # 1) Open checkpoint and load config\n chkpt_fname = f'{model_key}_{model_config}{chkpt_suffix}.pth'.lower()\n chkpt_fpath = os.path.join(checkpoints_path, chkpt_fname.replace(str(checkpoints_path), ''))\n if not os.path.isfile(chkpt_fpath) or not os.path.exists(chkpt_fpath):\n raise FileNotFoundError(chkpt_fpath)\n # return ConfigReader.load_all(model_key=model_key, model_config=model_config, train_config=train_config,\n # device=device, **load_kwargs)\n chkpt = torch.load(chkpt_fpath)\n assert 'config' in chkpt.keys(), \\\n '\\t[ConfigReader::load_all_from_checkpoint] checkpoint error: No \"config\" key found'\n config_dict = chkpt.pop('config')\n cr = ConfigReader.from_sd(config_dict)\n cr.chkpt_fpath = chkpt_fpath\n\n # 2) Initialize model/optim/sched/dls from config\n # Instantiate Model\n model, cr = ConfigReader.load_model(cr=cr)\n model.load_state_dict(chkpt.pop('model'))\n model.trained = True\n return model.to(device)\n\n @staticmethod\n def load_all_from_checkpoint(model_key: str, model_config: str, device: str = 'cpu', chkpt_suffix: str = '',\n train_config: Optional[str] = None, gs_dict: Optional[Dict[str, list]] = None,\n **load_kwargs) -> CLAR or Generator[CLAR, None, None]:\n if gs_dict is not None:\n extra_kwargs = {'model_key': model_key, 'model_config': model_config, 'device': device,\n 'chkpt_suffix': chkpt_suffix, 'train_config': train_config}\n return ConfigReader.load_all_from_checkpoint_grid_search(gs_dict, {**load_kwargs, **extra_kwargs})\n\n # 1) Open checkpoint and load config\n chkpt_fname = f'{model_key}_{model_config}{chkpt_suffix}.pth'.lower()\n chkpt_fpath = os.path.join(checkpoints_path, chkpt_fname.replace(str(checkpoints_path), ''))\n if not os.path.isfile(chkpt_fpath) or not os.path.exists(chkpt_fpath):\n raise FileNotFoundError(chkpt_fpath)\n # return ConfigReader.load_all(model_key=model_key, model_config=model_config, train_config=train_config,\n # device=device, **load_kwargs)\n chkpt = torch.load(chkpt_fpath)\n assert 'config' in chkpt.keys(), \\\n '\\t[ConfigReader::load_all_from_checkpoint] checkpoint error: No \"config\" key found'\n config_dict = chkpt.pop('config')\n cr = ConfigReader.from_sd(config_dict)\n cr.chkpt_fpath = chkpt_fpath\n\n # 2) Initialize model/optim/sched/dls from config\n load_kwargs.setdefault('device', device)\n load_kwargs.setdefault('train_config', train_config)\n model, ts, dl_train, dl_val, dl_test, cr = ConfigReader.load_all(cr=cr, **load_kwargs)\n\n # 3) Load states\n # 3.1) model\n model.load_state_dict(chkpt.pop('model'))\n model.trained = True\n loaded_keys = ['model']\n # 3.2) optimizer\n try:\n loaded_keys.extend(\n ts.load_state(chkpt)\n )\n except ValueError:\n ts.optimizer.add_param_group({\n 'params': [torch.tensor(0.), torch.tensor(0.)],\n 'lr': ts.optimizer.param_groups[0]['lr']\n })\n loaded_keys.extend(\n ts.load_state(chkpt)\n )\n # 3.3) dataloaders\n # Nothing, since we are using stateless dataloaders.\n # 3.4) loss\n # if 'loss' in chkpt.keys():\n # ts.loss_function.load_state_dict(chkpt.pop('loss'))\n # loaded_keys.append('loss')\n print(f'\\t[ConfigReader::load_all_from_checkpoint] Loaded state dicts from: {os.path.basename(chkpt_fpath)}')\n print(f'\\t[ConfigReader::load_all_from_checkpoint] State dict keys used: {loaded_keys}')\n\n # Final tasks\n model = model.to(device)\n return model, ts, dl_train, dl_val, dl_test, cr\n\n\n# ==================================\n\n\n# noinspection PyUnusedLocal\ndef capture_checkpoint(model: nn.Module, ts: TrainingSetup, cr: ConfigReader, chkpt_suffix,\n neptune_run: NeptuneRun) -> dict:\n # TODO alter this to account for separate pretrained/fine-tuned chkpts\n os.makedirs(checkpoints_path, exist_ok=True)\n # 1) Capture state dicts\n chkpt = ts.state_dict()\n chkpt['model'] = model.state_dict()\n chkpt['loss'] = ts.loss_function.state_dict()\n # 2) Retrieve config\n chkpt['config'] = cr.state_dict()\n # 3) Create final file\n filename = os.path.basename(cr.chkpt_fpath)\n filename_with_path = os.path.join(str(checkpoints_path), filename.replace('.pth', chkpt_suffix + '.pth'))\n torch.save(chkpt, filename_with_path)\n print(f'[capture_checkpoint] {filename_with_path}')\n neptune_run.upload_checkpoint(filename_with_path)\n return chkpt\n\n\nif __name__ == '__main__':\n vit = ConfigReader.load_all(\n model_key='ViT',\n model_config='12-16',\n train_config='default'\n )[0]\n print(vit)\n","sub_path":"src/utilities/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":38747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489297734","text":"#!/usr/bin/python\nimport boto\nimport time\nimport sys\nimport glob\nimport os\nimport string\nimport subprocess\nimport os.path\nimport argparse\nimport logging\nimport ConfigParser\n\nclass NextAvailableDeviceFinder:\n \"\"\"Instances of this class return the next available storage device, /dev/xvdX, where\n X is a lower case letter a-z (in reverse order, starting from z). \"\"\"\n def __init__(self):\n self.available_letters = []\n\n for letter in string.lowercase:\n self.available_letters.append(letter)\n\n os.chdir(\"/dev\")\n\n for existing_device in glob.glob(\"xvd?\"):\n device_letter = existing_device[-1]\n self.available_letters.remove(device_letter)\n\n def find_next_device(self):\n return \"/dev/xvd%s\" % (self.available_letters.pop())\n\nclass Sleep:\n \"\"\"Sleep with a 1.5x exponential backoff\"\"\"\n def __init__(self, seconds=3):\n self.seconds = seconds\n\n def sleep(self):\n logging.debug(\"Sleeping for %d seconds\\n\" % (int(self.seconds)))\n time.sleep(int(self.seconds))\n self.seconds *= 1.5\n\nclass OpendedupWrangler:\n def __init__(self, sdfs_volume_name):\n self.sdfs_volume_name = sdfs_volume_name\n config = ConfigParser.ConfigParser()\n config.read('/usr/local/etc/snapdirector.cfg')\n self.bucketname = config.get('general', 'bucketname')\n self.aws_access_key_id = config.get('general', 'aws_access_key_id')\n self.aws_secret_access_key = config.get('general', 'aws_secret_access_key')\n\n\n def ensure_sdfs_volume_exists(self):\n \"\"\"This is supposed to be idempotent\"\"\"\n if not os.path.isfile(\"/etc/sdfs/%s-volume-cfg.xml\" % (self.sdfs_volume_name)):\n logging.info(\"No /etc/sdfs/%s-volume-cfg.xml, attempting to sync from s3\" % (self.sdfs_volume_name))\n command = \"aws s3 sync s3://%s/etc/sdfs /etc/sdfs\" % (self.bucketname)\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n if not os.path.isfile(\"/etc/sdfs/%s-volume-cfg.xml\" % (self.sdfs_volume_name)):\n logging.info(\"Still no /etc/sdfs/%s-volume-cfg.xml so it probably doesn't exist yet. Creating.\" % (self.sdfs_volume_name))\n output = subprocess.check_output(command.split(\" \"))\n command = \"bash /sbin/mkfs.sdfs\"\n command += \" --volume-name=%s\" % (self.sdfs_volume_name)\n command += \" --volume-capacity=256TB\"\n command += \" --aws-enabled=true\"\n command += \" --cloud-access-key=%s\" % (self.aws_access_key_id)\n command += \" --cloud-bucket-name=%s\" % (self.bucketname)\n command += \" --cloud-secret-key=%s\" % (self.aws_secret_access_key)\n command += \" --chunk-store-encrypt=true\"\n command += \" --aws-bucket-location=US\"\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n else:\n logging.info(\"Looks like there's an sdfs on S3. Got /opt/sdfs already, going to also get /etc/sdfs and /var/log/sdfs\")\n command = \"aws s3 sync s3://%s/opt/sdfs /opt/sdfs\" % (self.bucketname)\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n command = \"aws s3 sync s3://%s/var/log/sdfs /var/log/sdfs\" % (self.bucketname)\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n\n def start_sdfs(self):\n command = \"bash /sbin/mount.sdfs %s /media/%s/\" % (self.sdfs_volume_name, self.sdfs_volume_name)\n self.mount_process = subprocess.Popen(command.split(\" \"))\n time.sleep(10)\n\n def stop_and_sync_sdfs(self):\n command = \"killall java\" # a bit ham-handed\n try:\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n time.sleep(30)\n except subprocess.CalledProcessError:\n logging.exception(\"killall java failed, which is probably just fine\")\n\n command = \"aws s3 sync /opt/sdfs s3://%s/opt/sdfs\" % (self.bucketname)\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n\n command = \"aws s3 sync /etc/sdfs s3://%s/etc/sdfs\" % (self.bucketname)\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n\n command = \"aws s3 sync /var/log/sdfs s3://%s/var/log/sdfs\" % (self.bucketname)\n output = subprocess.check_output(command.split(\" \"))\n logging.debug(\"Output of '%s': %s\" % (command, output))\n\ndef get_settings_dict_from_string(settings_string):\n settings = {}\n for setting in settings_string.split(\",\"):\n [key, value] = setting.split(\"=\")\n settings[key] = value\n\n return settings\n\nclass SnapDirector:\n def __init__(self, c, volume_id):\n self.c = c\n self.volume_id = volume_id\n\n def create_snapshot(self):\n self.volume = self.c.get_all_volumes(filters={'volume_id': self.volume_id})[0]\n skip = False\n if 'snapshot-director-settings' in self.volume.tags.keys():\n self.volume_settings = get_settings_dict_from_string(self.volume.tags['snapshot-director-settings'])\n if 'original-volume-id' in self.volume_settings.keys(): skip = True\n try:\n if self.volume_settings['skip-me'] == \"True\": skip = True\n except:\n pass\n if not skip:\n self.snapshot = self.c.create_snapshot(self.volume.id)\n logging.info(\"Creating snapshot for volume %s\" % (self.volume.id))\n else:\n logging.info(\"Will not create snapshot for volume %s because it's not an original or has 'skip-me' turned on\" % (self.volume.id))\n\n sleep = Sleep()\n while True:\n self.snapshot.update()\n if self.snapshot.status == 'completed':\n logging.info(\"Finished creating snapshot %s for volume %s\" % (self.snapshot, self.snapshot.volume_id))\n return\n sleep.sleep()\n\n\n def create_volume(self, availability_zone):\n logging.info(\"Creating volume...\")\n\n self.volume = self.snapshot.create_volume(zone=availability_zone, volume_type=\"io1\", iops=self.snapshot.volume_size * 30)\n tag_value = \"original-volume-id=%s,from-snapshot-id=%s,from-snapshot-start-time=%s\" % (self.snapshot.volume_id, self.snapshot.id, self.snapshot.start_time)\n self.volume.add_tag('snapshot-director-settings', tag_value)\n self.volume.add_tag('original-volume-id', self.snapshot.volume_id)\n self.volume.add_tag('from-snapshot-id', self.snapshot.id)\n self.volume.add_tag('from-snapshot-start-time', self.snapshot.start_time)\n\n sleep = Sleep()\n while True:\n self.volume.update()\n if self.volume.status == 'available':\n self.volume_settings = get_settings_dict_from_string(self.volume.tags['snapshot-director-settings'])\n logging.info(\"Finished creating new volume %s based on volume %s's snapshot\" % (self.volume.id, self.volume_settings['original-volume-id']))\n return\n sleep.sleep()\n\n def attach_volume(self, instance_id):\n next_available_device_finder = NextAvailableDeviceFinder()\n\n device = next_available_device_finder.find_next_device()\n a = self.c.attach_volume(self.volume.id, instance_id, device)\n\n sleep = Sleep()\n while True:\n self.volume.update()\n if self.volume.status == 'in-use':\n if os.path.exists(self.volume.attach_data.device):\n volume_settings = get_settings_dict_from_string(self.volume.tags['snapshot-director-settings'])\n logging.info(\"%s mounted on %s is a copy of %s from snapshot %s started at %s\" % (\n self.volume.id,\n self.volume.attach_data.device,\n self.volume_settings['original-volume-id'],\n self.volume_settings['from-snapshot-id'],\n self.volume_settings['from-snapshot-start-time']\n )\n )\n return\n sleep.sleep()\n\n def add_volume_to_dedup_catalog(self):\n logging.info(\"Backing up volume\")\n try:\n volume_settings = get_settings_dict_from_string(self.volume.tags['snapshot-director-settings'])\n logging.info(\"Going to try dd if=%s of=/media/s3backed0/%s__%s__%s\" % (\n self.volume.attach_data.device,\n self.volume_settings['original-volume-id'],\n self.volume_settings['from-snapshot-id'],\n self.volume_settings['from-snapshot-start-time']\n )\n )\n output = subprocess.check_output([\"ls\", \"-l\", self.volume.attach_data.device])\n logging.debug(\"Output of 'ls -l %s': %s\" % (self.volume.attach_data.device, output))\n output = subprocess.check_output([\n \"dd\",\n \"bs=128k\",\n \"if=%s\" % (self.volume.attach_data.device),\n \"of=/media/s3backed0/%s__%s__%s\" % (volume_settings['original-volume-id'], volume_settings['from-snapshot-id'], volume_settings['from-snapshot-start-time'])\n ])\n logging.debug(\"Output of 'dd': %s\" % (output))\n logging.info(\"AUDIT_LOG Backup complete for %s\" % (volume_settings['original-volume-id']))\n except:\n logging.exception(\"AUDIT_LOG Backup FAILED for %s\" % (volume_settings['original-volume-id']))\n\n def detach_and_delete_volume(self):\n logging.info(\"Deleting snapshot\")\n self.snapshot.delete()\n\n logging.info(\"Detaching volume\")\n self.volume.detach()\n\n sleep = Sleep()\n while True:\n self.volume.update()\n if self.volume.status == 'available':\n logging.info(\"Volume detached\")\n self.volume.delete()\n logging.info(\"Volume deleted\")\n return\n sleep.sleep()\n\nif __name__ == \"__main__\":\n pass\n\n#touch \"/mnt/`date`\"\n#time python snapdirector.py\n#time dd if=/dev/xvdu of=/media/s3backed0/vol-7db9f696__snap-f800a492__2015-06-19_13:48:05\n#python2.6 /usr/bin/s3cmd du s3://290093585298-snapdirector\n#du -hs /opt/sdfs/volumes/s3backed0/*\n#aws s3 ls s3://290093585298-snapdirector | wc -l\n\n","sub_path":"snapdirector.py","file_name":"snapdirector.py","file_ext":"py","file_size_in_byte":10818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464485831","text":"# 함수 정의 및 람다(lambda) 사용\n\"\"\" \n- 함수 선언\ndef 함수명(parameter):\n code\n\n\n- 함수 다양한 사용\n- 다양한 반환 값\n- *args, **kwargs\n- 람다 함수\n\"\"\"\n\n# 함수 선언 위치 중요\n\n# 예제 1\ndef hello(world):\n print(\"Hello\", world)\n\nhello('Python!')\nhello(777)\n\n# 예제 2\ndef hello_return(world):\n val = \"Hello \" + str(world)\n return val\n\nstr = hello_return(\"Python!!!!\")\nprint(str)\n\n# 예제 3 (다중리턴)\ndef func_mul(x):\n y1 = x * 100\n y2 = x * 200\n y3 = x * 300\n return y1, y2, y3\n\nval1, val2, val3 = func_mul(100)\nprint(val1, val2, val3)\n\n# 예제 3 (데이터 타입 반환)\ndef func_mul2(x):\n y1 = x * 100\n y2 = x * 200\n y3 = x * 300\n return (y1, y2, y3)\n\nlt = func_mul2(100)\nprint(lt, type(lt))\n\n# 예제 4\n# *args, *kwargs\n\ndef args_func(*args):\n # print(type(args), args)\n # for i,v in enumerate(args):\n # print(i, v)\n for i,v in enumerate(range(10)):\n print(i, v)\n\nargs_func('kim')\nargs_func('kim', 'Park')\nargs_func('kim', 'Park', 'Lee')\n\n# kwargs\ndef kwargs_func(**kwargs):\n for k, v in kwargs.items():\n print(kwargs)\n\nkwargs_func(name1='Kim', name2='Park', name3='Lee')\n\n# 전체 혼합\ndef example_mul(arg1, arg2, *args, **kwargs):\n print(arg1, arg2, args, kwargs)\n\nexample_mul(10, 20)\nexample_mul(10, 20, 'park', 'kim', age1=24, age2=35)\n\n# 예제 5\n# 중첩함수(클로저)\n# decorator\ndef nested_func(num):\n def func_in_func(num):\n print(num)\n print(\"in func\")\n func_in_func(num + 1000)\n\nnested_func(1000)\n\n# 예제 6 (hint)\ndef func_mul3(x : int) -> list:\n y1 = x * 100\n y2 = x * 200\n y3 = x * 300\n return (y1, y2, y3)\n\nprint(func_mul3(5))\n\n# 람다식 예제\n\"\"\"\n- 람다식 : 메모리 절약, 가독성 향상, 코드 간결\n- 함수는 객체 생성 -> 리소스(메모리) 할당\n- 람다는 즉시 실행(Heap 초기화) -> 메모리 초기화\n\"\"\"\n\n# 일반적 함수 -> 변수 할당\ndef mul_10(num : int) -> int:\n return num * 10\n\nvar_func = mul_10\nprint(var_func)\nprint(var_func(10))\n\nlambda_mul_10 = lambda num: num * 10\n\nprint(lambda_mul_10(10))\n\ndef func_final(x, y, func):\n print(x * y * func(10))\n\nfunc_final(10, 10, lambda_mul_10)\n\nprint(func_final(10, 10, lambda x : x * 1000))\n\n\n\"\"\"\n클래스 선언 및 Self 의 이해\n\n** 클래스, 인스턴스 차이 중요\n** 네임스페이스 : 객체를 인스턴스화 할 떄 저장된 공간\n** 클래스 변수 : 직접 사용 가능, 객체보다 먼저 생성\n** 인스턴스 변수 : 객체마다 별도 존재\n\n- 클래스 선언\nclass 클래스명:\n 함수\n 함수\n 함수\n\n- 클래스 네임스페이스 Self\n- 클래스, 인스턴스 변수\n- Self\n\n\n클래스 상송, 다중 상속\n- 클래스 상속\n- 클래스 상속 예제 코드\n- 클래스 다중 상속\n\"\"\"\n\n# 선언\n# 예제 1\nclass UserInfo:\n # 속성, 메소드\n def __init__(self, name):\n self.name = name\n def user_info_p(self):\n print(\"Name : \", self.name)\n\n# 네임스페이스\nuser1 = UserInfo(\"Kim\")\nuser1.user_info_p()\nuser2 = UserInfo(\"Park\")\nuser2.user_info_p()\n\nprint(id(user1))\nprint(id(user2))\nprint(user1.__dict__)\nprint(user2.__dict__)\n\n# 예제 2\n# self의 이해\nclass SelfTest:\n def function1():\n print('function1 called!')\n def function2(self):\n print('function2 called!')\n\n# self_test = SelfTest()\n# self_test.function1()\n\nclass WareHouse:\n # 클래스 변수\n stock_num = 0\n def __init__(self, name):\n self.name = name\n WareHouse.stock_num += 1\n def __del__(self):\n WareHouse.stock_num -= 1\n\nuser1 = WareHouse('Kim')\nuser2 = WareHouse('Park')\nuser3 = WareHouse('Lee')\n\nprint(user1.__dict__)\nprint(user2.__dict__)\nprint(user3.__dict__)\nprint(WareHouse.__dict__)\n\n\n# 상속, 다중상속\n# 예제 1\n# 상속 기본\n# 슈퍼클래스(부모) 및 서비클래스(자식) -> 모든 속성, 메소드 사용 가능\n\n# 라면 -> 속성(종류, 회사, 맛, 면 종류, 이름) : 부모\nclass Car:\n \"\"\"Parent Class\"\"\"\n def __init__(self, tp, color):\n self.type = tp\n self.color = color\n \n def show(self):\n return 'Car Class \"show Method!\"'\n\nclass BmwCar(Car):\n \"\"\"Sub Class\"\"\"\n def __init__(self, car_name, tp, color):\n super().__init__(tp, color)\n self.car_name = car_name\n \n def show_model(self) -> None:\n return \"Your Car Name : %s\" % self.car_name\n\nclass BenzCar(Car):\n \"\"\"Sub Class\"\"\"\n def __init__(self, car_name, tp, color):\n super().__init__(tp, color)\n self.car_name = car_name\n \n def show_model(self) -> None:\n return \"Your Car Name : %s\" % self.car_name\n \n def show(self):\n print(super().show())\n return \"Car Info : %s %s %s\" % (self.car_name, self.type, self.color)\n\n\n# 일반 사용\nmodel1 = BmwCar('520d', 'sedan', 'red')\n\nprint(model1.color) # Super\nprint(model1.type) # Super\nprint(model1.car_name) # Sub\nprint(model1.show()) # Super\nprint(model1.show_model()) # Sub\nprint(model1.__dict__)\n\n# Method Overriding(오버라이딩)\nmodel2 = BenzCar(\"220d\", \"suv\", \"black\")\nprint(model2.show())\n\n# Parent Method Call\nmodel3 = BenzCar(\"350s\", 'sedan', 'silver')\nprint(model3.show())\n\n# Inheritance Info(상속 정보 리스트 타입으로 반환)\nprint(BmwCar.mro())\nprint(BenzCar.mro())\n\n# 예제 2\n# 다중 상속\n\nclass X():\n pass\n\nclass Y():\n pass\n\nclass Z():\n pass\n\nclass A(X, Y):\n pass\n\nclass B(Y, Z):\n pass\n\nclass M(B, A, Z):\n pass\n\nprint(M.mro())\nprint(A.mro())","sub_path":"pythonLectureCode/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"457891308","text":"#!/usr/bin/env python\n\nglobal CellInfo_live, CellInfo_cached\nCellInfo_live=None\nCellInfo_cached=None\n\nimport unittest\nimport sys, os\nfrom ConfigParser import ConfigParser\n\nsys.path.append(\"..\")\n\nfrom afs.util.AfsConfig import setupDefaultConfig\nfrom afs.util.options import define, options\nfrom afs.service import CellService\n\nclass TestCellServiceMethods(unittest.TestCase):\n \"\"\"\n Tests VolService Methods\n \"\"\"\n \n def setUp(self):\n \"\"\"\n setup\n \"\"\"\n self.CellService = CellService.CellService()\n self.TestCfg=ConfigParser()\n self.TestCfg.read(options.setup)\n self.numFSs=int(self.TestCfg.get(\"CellService\", \"numFSs\"))\n self.allDBs=self.TestCfg.get(\"CellService\", \"allDBs\").split(\",\")\n self.minUbikDBVersion=self.TestCfg.get(\"general\",\"minUbikDBVersion\")\n self.allDBs.sort()\n self.FS=self.TestCfg.get(\"CellService\", \"FS\")\n self.FsUUID=self.TestCfg.get(\"CellService\", \"FsUUID\")\n return\n \n def test_Cellinfo_DBList_live(self) :\n DBList=CellInfo_live.DBServers\n DB_IPs=[]\n for db in DBList :\n DB_IPs.append(db['ipaddrs'][0])\n DB_IPs.sort()\n self.assertEqual(self.allDBs, DB_IPs)\n return\n\n def test_getDBList_cached(self) :\n DBList=CellInfo_cached.DBServers\n DB_IPs=[]\n for db in DBList :\n DB_IPs.append(db['ipaddrs'][0])\n DB_IPs.sort()\n self.assertEqual(self.allDBs, DB_IPs)\n return\n \n def test_getFSServers_live(self) :\n FSList=CellInfo_live.FileServers\n self.assertEqual(self.numFSs, len(FSList))\n return\n\n def test_getFSServers_cached(self) :\n FSList=CellInfo_cached.FileServers\n self.assertEqual(self.numFSs, len(FSList))\n return\n \n def test_getFsUUID_live(self) :\n uuid=self.CellService.getFsUUID(self.FS, cached=False)\n self.assertEqual(self.FsUUID, uuid)\n return\n\n def test_getFsUUID_cached(self) :\n uuid=self.CellService.getFsUUID(self.FS,cached=True)\n self.assertEqual(self.FsUUID, uuid)\n return\n \n def test_PTDBVersion_cached(self):\n DBVersion=CellInfo_cached.PTDBVersion\n self.assertTrue((DBVersion>self.minUbikDBVersion))\n return\n \n def test_PTDBVersion_live(self):\n DBVersion = CellInfo_live.PTDBVersion\n self.assertTrue((DBVersion>self.minUbikDBVersion))\n return\n\n def test_PTDBSyncSite_cached(self):\n DBSyncSite=CellInfo_cached.PTDBSyncSite\n self.assertTrue((DBSyncSite in self.allDBs))\n return\n \n def test_PTDBSyncSite_live(self):\n DBSyncSite=CellInfo_live.PTDBSyncSite\n self.assertTrue((DBSyncSite in self.allDBs))\n return\n\n def test_VLDBVersion_cached(self):\n DBVersion=CellInfo_cached.VLDBVersion\n self.assertTrue((DBVersion>self.minUbikDBVersion))\n return\n \n def test_VLDBVersion_live(self):\n DBVersion=CellInfo_live.VLDBVersion\n self.assertTrue((DBVersion>self.minUbikDBVersion))\n return\n\n def test_VLDBSyncSite_cached(self):\n DBSyncSite=CellInfo_cached.VLDBSyncSite\n self.assertTrue((DBSyncSite in self.allDBs))\n return\n \n def test_VLDBSyncSite_live(self):\n DBSyncSite=CellInfo_live.VLDBSyncSite\n self.assertTrue((DBSyncSite in self.allDBs))\n return\n\nif __name__ == '__main__' :\n define(\"setup\", default=\"./Test.cfg\", help=\"path to Testconfig\")\n setupDefaultConfig()\n if not os.path.exists(options.setup) :\n sys.stderr.write(\"Test setup file %s does not exist.\\n\" % options.setup)\n sys.exit(2)\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCellServiceMethods)\n CS=CellService.CellService()\n CellInfo_live=CS.getCellInfo(cached=False)\n CellInfo_cached=CS.getCellInfo(cached=True)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"tests/CellServiceTest.py","file_name":"CellServiceTest.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"299817410","text":"r\"\"\"\nDefinition\n----------\n\nParameters for this model are the core axial ratio $X_{core}$ and a shell\nthickness $t_{shell}$, which are more often what we would like to determine\nand make the model better behaved, particularly when polydispersity is\napplied, than the four independent radii used in the original parameterization\nof this model.\n\n\n.. figure:: img/core_shell_ellipsoid_geometry.png\n\nThe geometric parameters of this model are shown in the diagram above, which\nshows (a) a cut through at the circular equator and (b) a cross section through\nthe poles, of a prolate ellipsoid.\n\nWhen $X_{core}$ < 1 the core is oblate; when $X_{core}$ > 1 it is prolate.\n$X_{core}$ = 1 is a spherical core.\n\nFor a fixed shell thickness $X_{polar shell}$ = 1, to scale $t_{shell}$\npro-rata with the radius set or constrain $X_{polar shell}$ = $X_{core}$.\n\n.. note::\n\n When including an $S(q)$, the radius in $S(q)$ is calculated to be that of\n a sphere with the same 2nd virial coefficient of the outer surface of the\n ellipsoid. This may have some undesirable effects if the aspect ratio of the\n ellipsoid is large (ie, if $X << 1$ or $X >> 1$), when the $S(q)$\n - which assumes spheres - will not in any case be valid. Generating a\n custom product model will enable separate effective volume fraction and\n effective radius in the $S(q)$.\n\nIf SAS data are in absolute units, and the SLDs are correct, then scale should\nbe the total volume fraction of the \"outer particle\". When $S(q)$ is introduced\nthis moves to the $S(q)$ volume fraction, and scale should then be 1.0, or\ncontain some other units conversion factor (for example, if you have SAXS data).\n\nThe calculation of intensity follows that for the solid ellipsoid, but\nwith separate terms for the core-shell and shell-solvent boundaries.\n\n.. math::\n\n P(q,\\alpha) = \\frac{\\text{scale}}{V} F^2(q,\\alpha) + \\text{background}\n\nwhere\n\n.. In following equation SK changed radius\\_equat\\_core to R_e\n.. math::\n :nowrap:\n\n \\begin{align*}\n F(q,\\alpha) = &f(q,R_e,R_e.x_{core},\\alpha) \\\\\n &+ f(q,R_e + t_{shell},\n R_e.x_{core} + t_{shell}.x_{polar shell},\\alpha)\n \\end{align*}\n\nwhere\n\n.. math::\n\n f(q,R_e,R_p,\\alpha) = \\frac{3 \\Delta \\rho V (\\sin(qr)\n - qr\\cos(qr)}\n {(qr)^3}\n\nfor\n\n.. math::\n\n r = \\left[ R_e^2 \\sin^2 \\alpha + R_p^2 \\cos^2 \\alpha \\right]^{1/2}\n\n\n$\\alpha$ is the angle between the axis of the ellipsoid and $\\vec q$,\n$V = (4/3)\\pi R_pR_e^2$ is the volume of the ellipsoid , $R_p$ is the\npolar radius along the rotational axis of the ellipsoid, $R_e$ is the\nequatorial radius perpendicular to the rotational axis of the ellipsoid,\n$t_{shell}$ is the thickness of the shell at the equator,\nand $\\Delta \\rho$ (the contrast) is the scattering length density difference,\neither $(\\rho_{core} - \\rho_{shell})$ or $(\\rho_{shell} - \\rho_{solvent})$.\n\nFor randomly oriented particles:\n\n.. math::\n\n F^2(q)=\\int_{0}^{\\pi/2}{F^2(q,\\alpha)\\sin(\\alpha)d\\alpha}\n\nFor oriented ellipsoids the *theta*, *phi* and *psi* orientation parameters\nwill appear when fitting 2D data, see the :ref:`elliptical-cylinder` model\nfor further information.\n\nReferences\n----------\nsee for example:\n\n#. Kotlarchyk, M.; Chen, S.-H. *J. Chem. Phys.*, 1983, 79, 2461\n\n#. Berr, S. *J. Phys. Chem.*, 1987, 91, 4760\n\nAuthorship and Verification\n----------------------------\n\n* **Author:** NIST IGOR/DANSE **Date:** pre 2010\n* **Last Modified by:** Richard Heenan (reparametrised model) **Date:** 2015\n* **Last Reviewed by:** Steve King **Date:** March 27, 2019\n\"\"\"\n\nimport numpy as np\nfrom numpy import inf, sin, cos, pi\n\nname = \"core_shell_ellipsoid\"\ntitle = \"Form factor for an spheroid ellipsoid particle with a core shell structure.\"\ndescription = \"\"\"\n [core_shell_ellipsoid] Calculates the form factor for an spheroid\n ellipsoid particle with a core_shell structure.\n The form factor is averaged over all possible\n orientations of the ellipsoid such that P(q)\n = scale*/Vol + bkg, where f is the\n single particle scattering amplitude.\n [Parameters]:\n radius_equat_core = equatorial radius of core,\n x_core = ratio of core polar/equatorial radii,\n thick_shell = equatorial radius of outer surface,\n x_polar_shell = ratio of polar shell thickness to equatorial shell thickness,\n sld_core = SLD_core\n sld_shell = SLD_shell\n sld_solvent = SLD_solvent\n background = Incoherent bkg\n scale =scale\n Note:It is the users' responsibility to ensure\n that shell radii are larger than core radii.\n oblate: polar radius < equatorial radius\n prolate : polar radius > equatorial radius - this new model will make this easier\n and polydispersity integrals more logical (as previously the shell could disappear).\n \"\"\"\ncategory = \"shape:ellipsoid\"\n\n# pylint: disable=bad-whitespace, line-too-long\n# [\"name\", \"units\", default, [lower, upper], \"type\", \"description\"],\nparameters = [\n [\"radius_equat_core\",\"Ang\", 20, [0, inf], \"volume\", \"Equatorial radius of core\"],\n [\"x_core\", \"None\", 3, [0, inf], \"volume\", \"axial ratio of core, X = r_polar/r_equatorial\"],\n [\"thick_shell\", \"Ang\", 30, [0, inf], \"volume\", \"thickness of shell at equator\"],\n [\"x_polar_shell\", \"\", 1, [0, inf], \"volume\", \"ratio of thickness of shell at pole to that at equator\"],\n [\"sld_core\", \"1e-6/Ang^2\", 2, [-inf, inf], \"sld\", \"Core scattering length density\"],\n [\"sld_shell\", \"1e-6/Ang^2\", 1, [-inf, inf], \"sld\", \"Shell scattering length density\"],\n [\"sld_solvent\", \"1e-6/Ang^2\", 6.3, [-inf, inf], \"sld\", \"Solvent scattering length density\"],\n [\"theta\", \"degrees\", 0, [-360, 360], \"orientation\", \"elipsoid axis to beam angle\"],\n [\"phi\", \"degrees\", 0, [-360, 360], \"orientation\", \"rotation about beam\"],\n ]\n# pylint: enable=bad-whitespace, line-too-long\n\nsource = [\"lib/sas_3j1x_x.c\", \"lib/gauss76.c\", \"core_shell_ellipsoid.c\"]\nhave_Fq = True\nradius_effective_modes = [\n \"average outer curvature\", \"equivalent volume sphere\",\n \"min outer radius\", \"max outer radius\",\n ]\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n volume = 10**np.random.uniform(5, 12)\n outer_polar = 10**np.random.uniform(1.3, 4)\n outer_equatorial = np.sqrt(volume/outer_polar) # ignore 4/3 pi\n # Use a distribution with a preference for thin shell or thin core\n # Avoid core,shell radii < 1\n thickness_polar = np.random.beta(0.5, 0.5)*(outer_polar-2) + 1\n thickness_equatorial = np.random.beta(0.5, 0.5)*(outer_equatorial-2) + 1\n radius_polar = outer_polar - thickness_polar\n radius_equatorial = outer_equatorial - thickness_equatorial\n x_core = radius_polar/radius_equatorial\n x_polar_shell = thickness_polar/thickness_equatorial\n pars = dict(\n #background=0, sld=0, sld_solvent=1,\n radius_equat_core=radius_equatorial,\n x_core=x_core,\n thick_shell=thickness_equatorial,\n x_polar_shell=x_polar_shell,\n )\n return pars\n\nq = 0.1\n# tests had in old coords theta=0, phi=0; new coords theta=90, phi=0\nqx = q*cos(pi/6.0)\nqy = q*sin(pi/6.0)\n# 11Jan2017 RKH sorted tests after redefinition of angles\ntests = [\n # Accuracy tests based on content in test/utest_coreshellellipsoidXTmodel.py\n [{'radius_equat_core': 200.0,\n 'x_core': 0.1,\n 'thick_shell': 50.0,\n 'x_polar_shell': 0.2,\n 'sld_core': 2.0,\n 'sld_shell': 1.0,\n 'sld_solvent': 6.3,\n 'background': 0.001,\n 'scale': 1.0,\n }, 1.0, 0.00189402],\n\n # Additional tests with larger range of parameters\n [{'background': 0.01}, 0.1, 11.6915],\n\n [{'radius_equat_core': 20.0,\n 'x_core': 200.0,\n 'thick_shell': 54.0,\n 'x_polar_shell': 3.0,\n 'sld_core': 20.0,\n 'sld_shell': 10.0,\n 'sld_solvent': 6.0,\n 'background': 0.0,\n 'scale': 1.0,\n }, 0.01, 8688.53],\n\n # 2D tests\n [{'background': 0.001,\n 'theta': 90.0,\n 'phi': 0.0,\n }, (0.4, 0.5), 0.00690673],\n\n [{'radius_equat_core': 20.0,\n 'x_core': 200.0,\n 'thick_shell': 54.0,\n 'x_polar_shell': 3.0,\n 'sld_core': 20.0,\n 'sld_shell': 10.0,\n 'sld_solvent': 6.0,\n 'background': 0.01,\n 'scale': 0.01,\n 'theta': 90.0,\n 'phi': 0.0,\n }, (qx, qy), 0.01000025],\n]\n","sub_path":"sasmodels/models/core_shell_ellipsoid.py","file_name":"core_shell_ellipsoid.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361957184","text":"import pyglet\nfrom pyglet.window import key\nimport random\n\n\ndef center_image(image):\n \"\"\"center image\"\"\"\n image.anchor_x = image.width / 2\n image.anchor_y = image.height / 2\n\n\n# init program\nscreen_width = 760\nscreen_height = 600\nwindow = pyglet.window.Window(screen_width, screen_height, \"Hello World\")\nbatch = pyglet.graphics.Batch()\n# load images\nfighter_image = pyglet.resource.image(\"fighter.png\")\nmissile_image = pyglet.resource.image(\"missile.png\")\nunit_image = pyglet.resource.image(\"fighter_2.png\")\n# load music\nbackground_music = pyglet.resource.media('background.mp3')\nbackground_music.play()\n\ncenter_image(fighter_image)\ncenter_image(missile_image)\ncenter_image(unit_image)\n\n\nclass BasicObject(pyglet.sprite.Sprite):\n def __init__(self, *args, **kwargs):\n super(BasicObject, self).__init__(*args, **kwargs)\n self.velocity_x, self.velocity_y = 0.0, 0.0\n\n def update(self, dt):\n self.x += self.velocity_x * dt\n self.y += self.velocity_y * dt\n self.check_bounds()\n\n def check_bounds(self):\n min_x = -self.image.width / 2\n min_y = -self.image.height / 2\n max_x = 800 + self.image.width / 2\n max_y = 600 + self.image.height / 2\n if self.x < min_x:\n self.x = max_x\n elif self.x > max_x:\n self.x = min_x\n if self.y < min_y:\n self.y = max_y\n elif self.y > max_y:\n self.y = min_y\n\nclass Fighter(BasicObject):\n def __init__(self, *args, **kwargs):\n super(Fighter, self).__init__(*args, **kwargs)\n\n\n# units\ndef units(num, batch=None):\n units = []\n for i in range(num):\n unit_x = random.randint(0, screen_width)\n unit_y = random.randint(0, screen_height)\n new_unit = BasicObject(img=unit_image, x=unit_x, y=unit_y, batch=batch)\n new_unit.rotation = random.randint(0, 360)\n new_unit.velocity_x = random.random() * 40\n new_unit.velocity_y = random.random() * 40\n units.append(new_unit)\n return units\n\n\nfighter_x = screen_width // 2\nfighter_y = 10 + fighter_image.height // 2\n# create components\nscore_label = pyglet.text.Label(text=\"Score: {0}\".format(0), x=10, y=screen_height - 20,\n batch=batch) # positive direction of y axis is upward\ntime_label = pyglet.text.Label(text=\"Time: {0}\".format(0), x=10, y=screen_height - 40, batch=batch)\nfighter = pyglet.sprite.Sprite(img=fighter_image, x=fighter_x, y=fighter_y, batch=batch)\n\n\n@window.event\ndef on_draw():\n window.clear()\n\n batch.draw()\n\n\nenemy_units = units(10, batch)\n\ndef update(dt):\n for unit in enemy_units:\n unit.update(dt)\n\n\nif __name__ == '__main__':\n pyglet.clock.schedule_interval(update, 1 / 120.0)\n pyglet.app.run()\n","sub_path":"Python/Pyglet/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355293571","text":"from MiroClasses.MiroAPI_selector import SelectedAPI as MiroAPI\nfrom MiroClasses.MiroAPI_agx import agxVecify as agxVec\nimport numpy as np\n\nfrom MiroClasses import MiroModule as MM\nfrom MiroClasses import MiroComponent as MC\n\nimport sys\n\ntry:\n import agx\nexcept:\n print(\"Could not import AGX to run Robotcourse\")\n\nimport agxCollide\nimport agxOSG\nimport agxSDK\nimport agxPython\nimport agxIO\nimport agxModel\nimport agxRender\n\nimport time as TIME\nimport math\nimport random\n\nwidth = 8\n\nclass Timer(agxSDK.ContactEventListener):\n def __init__(self, trigger_object):\n super().__init__(agxSDK.ContactEventListener.ALL)\n self.trigger = trigger_object\n self.start = TIME.time()\n self.checkpoints = []\n self.checks = []\n\n def addCheckpoint(self, checkpoint_object):\n self.checkpoints.append(checkpoint_object)\n self.checks.append(False)\n\n def impact(self, time, contact):\n # Check if all checkpoints have been reached\n complete = True\n for i in range(len(self.checks)):\n if(not self.checks[i] and contact.contains(self.checkpoints[i]) >= 0):\n self.checks[i] = True\n print('Checkpoint '+str(i+1)+' reached!')\n if not self.checks[i]:\n complete = False\n \n if(complete and contact.contains(self.trigger) >= 0):\n if TIME.time() - self.start > 10:\n timenum = TIME.time() - self.start\n seconds = str(round(timenum % 60, 2))\n minutes = round(np.floor(timenum/60))\n if minutes < 10:\n minutes = '0'+str(minutes)\n else:\n minutes = str(minutes)\n print('Time: '+minutes+':'+seconds)\n self.start = TIME.time()\n for i in range(len(self.checks)):\n self.checks[i] = False\n return agxSDK.ContactEventListener.KEEP_CONTACT\n\ndef buildArena(arena_pos):\n sim = agxPython.getContext().environment.getSimulation()\n app = agxPython.getContext().environment.getApplication()\n root = agxPython.getContext().environment.getSceneRoot()\n\n arena_size = [width, width, 0.2]\n h = 0.35\n \n floor = agx.RigidBody( agxCollide.Geometry( agxCollide.Box(arena_size[0]/2, arena_size[1]/2, arena_size[2]/2)))\n floor.setPosition(arena_pos[0], arena_pos[1], arena_pos[2]-arena_size[2]/2)\n floor.setMotionControl(1)\n sim.add(floor)\n agxOSG.setDiffuseColor(agxOSG.createVisual(floor, root), agxRender.Color.Gray())\n\n # Octagon sides\n sides = 8\n skip_sides = [9]\n side_len = width/(1+np.sqrt(2)) + arena_size[2]/2/np.sqrt(2)\n base_pos = agx.Vec3(arena_pos[0], arena_pos[1], arena_pos[2]-arena_size[2]/2+h/2)\n for w in range(sides):\n if w not in skip_sides:\n theta = -w*np.pi/4\n rot = agx.Quat(theta, agx.Vec3(0,0,1))\n rot_pos = agx.Vec3(np.sin(theta)*width/2, -np.cos(theta)*width/2, 0)\n\n wall = agx.RigidBody( agxCollide.Geometry( agxCollide.Box(side_len/2, arena_size[2]/2, h/2)))\n wall.setPosition(base_pos + rot_pos)\n wall.setMotionControl(1)\n wall.setRotation(rot)\n sim.add(wall)\n agxOSG.setDiffuseColor(agxOSG.createVisual(wall, root), agxRender.Color.DarkGray())\n\n # Ramp up to the course\n ramp_dim = [1.4, side_len, 0.2] # *np.cos(np.pi/4)\n ramp = agx.RigidBody( agxCollide.Geometry( agxCollide.Box(ramp_dim[0]/2, ramp_dim[1]/2, ramp_dim[2]/2)))\n theta = -np.arcsin(ramp_dim[2]/ramp_dim[0])/2\n ramp.setPosition(arena_pos[0]-arena_size[0]/2-ramp_dim[0]/2*np.cos(theta)-ramp_dim[2]/2*np.sin(theta), arena_pos[1], arena_pos[2]-arena_size[2]*3/4) # +arena_size[1]/2-ramp_dim[1]/2\n ramp.setRotation(agx.Quat(theta, agx.Vec3(0,1,0)))\n ramp.setMotionControl(1)\n sim.add(ramp)\n agxOSG.setDiffuseColor(agxOSG.createVisual(ramp, root), agxRender.Color.Gray())\n\n obstacles(sim, root, arena_pos)\n\ndef obstacles(sim, root, arena_pos):\n myObstacles = [] #appendix contaning all objects\n h = arena_pos[2]\n #start plattform\n start_dims = [1.5*(width/14), 1.5*(width/14), 0.06]\n start_pos = [-(arena_pos[1]+width/4),arena_pos[0], h+start_dims[2]/2]\n start_box = addboxx(sim, root, start_dims, start_pos)\n myObstacles.append(start_box)\n\n # dims = [0.1*(width/14), 1.5*(width/14), 0.3]\n # pos = [start_pos[0]+start_dims[0]/2, arena_pos[0], h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n \n # dims = [0.1*(width/14), 1.5*(width/14), 0.3]\n # pos = [start_pos[0]-start_dims[0]/2, arena_pos[0], h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n #timer\n timer = Timer(myObstacles.append(start_box))\n sim.add(timer)\n \n\n\n # # Random stuff in the first quarter\n # dims = [0.2*(width/14), 0.2*(width/14), 0.8]\n # pos = [-4*(width/14), 0, h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n \n # dims = [0.4*(width/14), 0.25*(width/14), 0.2]\n # pos = [-3*(width/14), 1.5*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n\n # for i in range(30):\n # x = -0.5*(width/14) - random.random()*4.75*(width/14)\n # y = -0.5*(width/14) + random.random()*6*(width/14)\n # dims = [random.random()*0.6*(width/14), random.random()*0.6*(width/14), random.random()*0.6]\n # pos = agx.Vec3(x, y, 0)\n # if pos.length() < 1.5*(width/14):\n # pos.setLength(1.5*(width/14)+random.random()*3.75*(width/14))\n # if pos.length() > 7.0*(width/14):\n # pos.setLength(1.5*(width/14)+random.random()*5.5*(width/14))\n # pos.set(h+dims[2]/2, 2)\n # addboxx(sim, root, dims, pos)\n \n # Pole in the middle with walls around\n # dims = [0.28*(width/14), 1.4*(width/14)]\n # pos = [0, 0, h+dims[1]/2]\n # can = addcylinder(sim, root, dims, pos, texture='textures/schrodbull.png')\n # can.setRotation(agx.Quat(np.pi/2, agx.Vec3(1,0,0)))\n # pos = [0, 0, h+dims[1]]\n # dims = [0.28*(width/14), 0.025*(width/14)]\n # pos[2] = pos[2] + dims[1]/2\n # lid = addcylinder(sim, root, dims, pos, texture='textures/sodacan_lid.png')\n # lid.setRotation(agx.Quat(np.pi/2, agx.Vec3(1,0,0)))\n\n\n # indelningsvägg\n dims = [width,0.3*(width/14) , 0.4]\n pos = [arena_pos[0], arena_pos[2], h+dims[2]/2]\n section_wall = addboxx(sim, root, dims, pos)\n myObstacles.append(section_wall)\n\n dims = [0.3*(width/14), width, 0.4]\n pos = [arena_pos[0], arena_pos[1], h+dims[2]/2]\n section_wall = addboxx(sim, root, dims, pos)\n myObstacles.append(section_wall)\n # # Seesaw board\n # dims = [2.1*(width/14), 0.25*(width/14), 0.3]\n # pos = [2.1*(width/14), 0.4*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n # seesaw(sim, root, [3.75*(width/14),0.9*(width/14),h*(width/14)], -0.85*np.pi, h=0.1)\n # dims = [0.5*(width/14), 3.8*(width/14), 0.18]\n # pos = [4.8*(width/14), 3.15*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # # Ballroom walls\n # dims = [0.25*(width/14), 4.3*(width/14), 0.3]\n # pos = [6.0*(width/14), 0.0, h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # dims = [0.25*(width/14), 1.0*(width/14), 0.3]\n # pos = [2.1*(width/14), -1.0*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # dims = [0.25*(width/14), 1.4*(width/14), 0.3]\n # pos = [3.0*(width/14), -0.4*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # dims = [0.25*(width/14), 2.4*(width/14), 0.3]\n # pos = [2.5*(width/14), -3.0*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # dims = [1.1*(width/14), 0.25*(width/14), 0.3]\n # pos = [3.1*(width/14), -3.1*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # dims = [0.66*(width/14), 0.25*(width/14), 0.3]\n # pos = [2.3*(width/14), -1.65*(width/14), h+dims[2]/2]\n # wall_1 = addboxx(sim, root, dims, pos)\n # dims = [2.8*(width/14), 0.25*(width/14), 0.3]\n # pos = [3.95*(width/14), -2.0*(width/14), h+dims[2]/2]\n # wall_2 = addboxx(sim, root, dims, pos)\n # dims = [3.91*(width/14), 0.25*(width/14), 0.3]\n # pos = [4.65*(width/14), -3.45*(width/14), h+dims[2]/2]\n # wall_3 = addboxx(sim, root, dims, pos)\n # wall_1.setRotation(agx.Quat(-np.pi/4, agx.Vec3(0,0,1)))\n # wall_2.setRotation(agx.Quat(-np.pi/4, agx.Vec3(0,0,1)))\n # wall_3.setRotation(agx.Quat( np.pi/4, agx.Vec3(0,0,1)))\n\n # # Ballroom balls\n # for i in range(200):\n # x = 4.0*(width/14) + random.random()*2.8*(width/14)\n # y = 0.75*(width/14) - random.random()*2.5*(width/14)\n # rad = 0.025*(width/14) + random.random()*0.075*(width/14)\n # pos = agx.Vec3(x, y, h+rad+3*random.random()*rad)\n # addball(sim, root, rad, pos, Fixed=False)\n\n # Climbing ramp\n # dx = 0.8\n # bot_tilt = 0.0445\n # dims = [dx*(width/14), 2.5*(width/14), 0.6]\n # bot_tilt = np.arcsin(0.45/(2.5*4))\n # dif = dims[1]/2*np.cos(bot_tilt)-0.002*np.sin(bot_tilt)\n # dh = 2*np.sin(bot_tilt)*dif\n # for i in range(4):\n # angle = (i+1)*np.pi\n # pos = [2*(width/14)-dx*i*(width/14), -4.7*(width/14)-0.015*(-1)**i*(width/14), -0.3+h+(i+1/2)*dh]\n # hip = addboxx(sim, root, dims, pos)\n # hip.setRotation(agx.Quat( bot_tilt, agx.Vec3(1,0,0)))\n # hip.setRotation(hip.getRotation()*agx.Quat(angle, agx.Vec3(0,0,1)))\n # addboxx(sim, root, [2*dx*(width/14), 1.1*(width/14), dims[2]], [2*(width/14)-dx*(i+1/2)*(width/14), -4.7*(width/14)-1.8*((-1)**i)*(width/14), -0.3+h+(i+1)*dh])\n \n # # Bridge boxes\n # dims = [1.5*(width/14), 1.8*(width/14), 0.45*(width/14)]\n # pos = [-1.5*(width/14), -3.25*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n\n # dims = [1.5*(width/14), 1.8*(width/14), 0.45*(width/14)]\n # pos = [-4.5*(width/14), -3.25*(width/14), h+dims[2]/2]\n # addboxx(sim, root, dims, pos)\n \n # # Bridge part\n # bridge = addboxx(sim, root, [1.5*(width/14), 0.5*(width/14), 0.08], [-3.0*(width/14), -3.25*(width/14), h+0.45*(width/14)-0.04])\n # timer.addCheckpoint(bridge)\n\n # # final ramp\n # addboxx(sim, root, [0.1*(width/14), 2.5*(width/14), 0.5*(width/14)], [-6.5*(width/14), -2.05*(width/14), h+0.5*(width/14)/2])\n # addboxx(sim, root, [0.1*(width/14), 2.5*(width/14), 0.5*(width/14)], [-5.5*(width/14), -2.05*(width/14), h+0.5*(width/14)/2])\n\n # hip = addboxx(sim, root, [0.9*(width/14), 1.5*(width/14), 0.1*(width/14)], [-6.0*(width/14), -1.53*(width/14), h+0.026*(width/14)+np.sin(0.15)*1.5/2])\n # hip.setRotation(agx.Quat(0.15, agx.Vec3(1,0,0)))\n\n # addboxx(sim, root, [0.9*(width/14), 2.0*(width/14), 0.2*(width/14)], [-6.0*(width/14), -2.25*(width/14), h+0.075*(width/14)])\n # addboxx(sim, root, [1.4*(width/14), 0.8*(width/14), 0.2*(width/14)], [-5.8*(width/14), -3.7*(width/14), h+0.45*(width/14)-0.1])\n\n # hip = addboxx(sim, root, [0.9*(width/14), 1.5*(width/14), 0.1*(width/14)], [-6.0*(width/14), -3.0*(width/14), h+0.026+np.sin(0.15)*1.5/2*(width/14)])\n # hip.setRotation(agx.Quat(-0.15, agx.Vec3(1,0,0)))\n\n \n\n# def seesaw(sim, root, pos, angle, h=0.08):\n# d = 0.8\n# # Sides\n# dims = [0.6*(width/14), 0.15*(width/14), h*3/2]\n# pos_s = [pos[0]+(d/2+0.3)*np.cos(angle)*(width/14), pos[1]+(d/2+0.3)*np.sin(angle)*(width/14), pos[2]+h/2]\n# sideP = addboxx(sim, root, dims, pos_s)\n# dims = [0.6*(width/14), 0.15*(width/14), h*3/2]\n# pos_s = [pos[0]-(d/2+0.3)*np.cos(angle)*(width/14), pos[1]-(d/2+0.3)*np.sin(angle)*(width/14), pos[2]+h/2]\n# sideN = addboxx(sim, root, dims, pos_s)\n# # Main board\n# dims = [d*(width/14), 0.9*(width/14), 0.004]\n# pos_s = [pos[0]+0.06*np.sin(angle)*(width/14), pos[1]-0.06*np.cos(angle)*(width/14), pos[2]+h]\n# board = addboxx(sim, root, dims, pos_s, Fixed=False)\n\n# sideP.setRotation(agx.Quat(angle, agx.Vec3(0,0,1)))\n# sideN.setRotation(agx.Quat(angle, agx.Vec3(0,0,1)))\n# board.setRotation(agx.Quat(angle, agx.Vec3(0,0,1)))\n\n #Some stops under\n # bot_tilt = 0.17\n # dims = [d, 0.43, 0.004]\n # dif = 0.215*np.cos(bot_tilt)*(width/14)-0.002*np.sin(bot_tilt)*(width/14)\n # pos_s = [pos[0]+(0.06+dif)*np.sin(angle)*(width/14), pos[1]-(0.06+dif)*np.cos(angle)*(width/14), pos[2]+np.sin(bot_tilt)*dif]\n # bottom1 = addboxx(sim, root, dims, pos_s, color=agxRender.Color.DarkGray())\n # pos_s = [pos[0]+(0.06-dif)*np.sin(angle)*(width/14), pos[1]-(0.06-dif)*np.cos(angle)*(width/14), pos[2]+np.sin(bot_tilt)*dif]\n # bottom2 = addboxx(sim, root, dims, pos_s, color=agxRender.Color.DarkGray())\n\n \n # bottom1.setRotation(agx.Quat( bot_tilt, agx.Vec3(1,0,0)))\n # bottom1.setRotation(bottom1.getRotation()*agx.Quat(angle, agx.Vec3(0,0,1)))\n \n # bottom2.setRotation(agx.Quat(-bot_tilt, agx.Vec3(1,0,0)))\n # bottom2.setRotation(bottom2.getRotation()*agx.Quat(angle, agx.Vec3(0,0,1)))\n \n\n # hf = agx.HingeFrame()\n # hf.setAxis(agx.Vec3( np.cos(angle),np.sin(angle),0))\n # hf.setCenter(agx.Vec3(pos[0]+(d/2)*np.cos(angle)*(width/14), pos[1]+(d/2)*np.sin(angle)*(width/14), pos[2]+h))\n # axleP = agx.Hinge(hf, board, sideP)\n # sim.add(axleP)\n\n # hf = agx.HingeFrame()\n # hf.setAxis(agx.Vec3( np.cos(angle),np.sin(angle),0))\n # hf.setCenter(agx.Vec3(pos[0]*(width/14)-(d/2)*np.cos(angle)*(width/14), pos[1]*(width/14)-(d/2)*np.sin(angle)*(width/14), pos[2]+h))\n # axleN = agx.Hinge(hf, board, sideN)\n # sim.add(axleN)\n\n\n\n\n\n\n\n\n\n \n\ndef addboxx(sim, root, dims, pos, Fixed=True, color = agxRender.Color.Red()):\n if type(pos) == type([]):\n pos = agx.Vec3(pos[0], pos[1], pos[2])\n boxx = agx.RigidBody( agxCollide.Geometry( agxCollide.Box(dims[0]/2, dims[1]/2, dims[2]/2)))\n boxx.setPosition(pos)\n if(Fixed):\n boxx.setMotionControl(1)\n sim.add(boxx)\n agxOSG.setDiffuseColor(agxOSG.createVisual(boxx, root), color)\n return boxx\n\ndef addcylinder(sim, root, dims, pos, Fixed=True, color = agxRender.Color.Red(), texture=False):\n if type(pos) == type([]):\n pos = agx.Vec3(pos[0], pos[1], pos[2])\n cyl = agx.RigidBody( agxCollide.Geometry( agxCollide.Cylinder(dims[0], dims[1])))\n cyl.setPosition(pos)\n if(Fixed):\n cyl.setMotionControl(1)\n sim.add(cyl)\n vis_body = agxOSG.createVisual(cyl, root)\n if texture:\n agxOSG.setTexture(vis_body, texture, True, agxOSG.DIFFUSE_TEXTURE, 1.0, 1.0)\n else:\n agxOSG.setDiffuseColor(vis_body, color)\n \n return cyl\n\ndef addball(sim, root, rad, pos, Fixed=True):\n if type(pos) == type([]):\n pos = agx.Vec3(pos[0], pos[1], pos[2])\n ball = agx.RigidBody( agxCollide.Geometry( agxCollide.Sphere(rad)))\n ball.setPosition(pos)\n if(Fixed):\n ball.setMotionControl(1)\n sim.add(ball)\n agxOSG.setDiffuseColor(agxOSG.createVisual(ball, root), agxRender.Color.Red())\n return ball\n","sub_path":"src/Racetrack.py","file_name":"Racetrack.py","file_ext":"py","file_size_in_byte":14784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"457731595","text":"import numpy as np \nimport seaborn as sns\nimport matplotlib.pyplot as plt \nsns.set(color_codes = True)\nimport pandas as pd \n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import preprocessing\n# %matplotlib inline\n\ntrain = pd.read_csv('D:\\Stream\\Stream_DC_SM10\\Road to Data Scientist\\dataTitanic\\set_train.csv')\ntest = pd.read_csv('D:\\Stream\\Stream_DC_SM10\\Road to Data Scientist\\dataTitanic\\set_test.csv')\n\nmeanAge = np.mean(train.Age)\ntrain.Age = train.Age.fillna(meanAge)\n\nmeanAge = np.mean(test.Age)\ntest.Age = test.Age.fillna(meanAge)\nmeanFare = np.mean(test.Fare)\ntest.Fare = test.Fare.fillna(meanFare)\n\n# print(train.info())\n# print(test.info())\n\ntrain = train.drop(['PassengerId', 'Name', 'Cabin', 'Ticket', 'Embarked', 'Pclass'], axis = 1)\ntrain = train.dropna(axis = 0)\n\ntest = test.drop(['PassengerId', 'Name', 'Cabin', 'Ticket', 'Embarked', 'Pclass'], axis = 1)\ntest = test.dropna(axis = 0)\n\n\n# print(test.describe())\ntrain['Sex'] = train['Sex'].map({'female': 1, 'male': 0})\n# train['Embarked'] = train['Embarked'].map({'Q':0,'S':1,'C':2})\n\n# meanEmbarked = np.mean(train.Embarked)\n# train.Embarked = train.Embarked.fillna(meanEmbarked)\n\ntest['Sex'] = test['Sex'].map({'female': 1, 'male': 0})\n# test['Embarked'] = test['Embarked'].map({'Q':0,'S':1,'C':2})\n\ntrain['Sex'] = pd.to_numeric(train['Sex'], errors='ignore')\n# train['Embarked'] = pd.to_numeric(train['Embarked'], errors='ignore')\n\ntest['Sex'] = pd.to_numeric(test['Sex'], errors='ignore')\n# test['Embarked'] = pd.to_numeric(test['Embarked'], errors='ignore')\n\n# print(train.info())\n# print(test.info())\n\nx = train.values #returns a numpy array\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(x)\ntrain = pd.DataFrame(x_scaled)\n\nx = test.values #returns a numpy array\nmin_max_scaler = preprocessing.MinMaxScaler()\nx_scaled = min_max_scaler.fit_transform(x)\ntest = pd.DataFrame(x_scaled)\n\n\nx_train = train.iloc[:, :-1].values\ny_train = train.iloc[:,-1].values\n\n\nx_test = test.iloc[:,:-1].values\ny_test = test.iloc[:,-1].values\n\n# print(train)\n\n# train.to_csv('D:\\Stream\\Stream_DC_SM10\\Road to Data Scientist\\dataTitanic\\iamlkate_titanic_cleaned_data.csv')\n\n#LogisticRegression\nprint('----------*****naive_bayes*****----------')\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(x_train, y_train)\n\ny_pred = classifier.predict(x_test)\n# print(y_pred)\nprint('classification_report:')\nprint(classification_report(y_test, y_pred))\nprint('confusion_matrix:')\nprint(confusion_matrix(y_test, y_pred))\n\n#Accuracy score\nprint ('accuracy is', accuracy_score(y_pred, y_test))\nprint('')\n\nprint('----------*****GaussianNB*****----------')\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(x_train, y_train)\n\ny_pred = classifier.predict(x_test)\nprint('classification_report:')\nprint(classification_report(y_test, y_pred))\nprint('confusion_matrix:')\nprint(confusion_matrix(y_test, y_pred))\n\nprint ('accuracy is', accuracy_score(y_pred, y_test))\nprint('')\n\nprint('----------*****support vector machine\\'s*****----------')\nfrom sklearn.svm import SVC\nclassifier = SVC()\nclassifier.fit(x_train, y_train)\n\ny_pred = classifier.predict(x_test)\nprint('classification_report:')\nprint(classification_report(y_test, y_pred))\nprint('confusion_matrix:')\nprint(confusion_matrix(y_test, y_pred))\n\nprint ('accuracy is', accuracy_score(y_pred, y_test))\nprint('')\n\nprint('----------*****K-Nearest Neighbours*****----------')\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=3)\nclassifier.fit(x_train, y_train)\n\ny_pred = classifier.predict(x_test)\nprint('classification_report:')\nprint(classification_report(y_test, y_pred))\nprint('confusion_matrix:')\nprint(confusion_matrix(y_test, y_pred))\n\nprint ('accuracy is', accuracy_score(y_pred, y_test))\nprint('')\n\nprint('----------*****Decision Tree\\'s*****----------')\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier = DecisionTreeClassifier()\nclassifier.fit(x_train, y_train)\n\ny_pred = classifier.predict(x_test)\nprint('classification_report:')\nprint(classification_report(y_test, y_pred))\nprint('confusion_matrix:')\nprint(confusion_matrix(y_test, y_pred))\n\nprint ('accuracy is', accuracy_score(y_pred, y_test))\n\n# np.savetxt(\"foo.csv\", y_pred, delimiter=\",\")","sub_path":"titanicModel.py","file_name":"titanicModel.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"337407442","text":"# -*- coding: utf-8 -*-\nfrom ..utils.builtins import *\nfrom ..utils import collections\n\nfrom ..__past__.api07_comp import CompareDict\nfrom ..__past__.api07_comp import CompareSet\nfrom ..utils.misc import _is_nsiterable\n\nfrom .base import BaseSource\n\n\nclass _FilterValueError(ValueError):\n \"\"\"Used by AdapterSource. This error is raised when attempting to\n unwrap a filter that specifies an inappropriate (non-missing) value\n for a missing column.\"\"\"\n pass\n\n\nclass AdapterSource(BaseSource):\n \"\"\"A wrapper class that adapts a data *source* to an *interface* of\n column names. The *interface* should be a sequence of 2-tuples where\n the first item is the existing column name and the second item is\n the desired column name. If column order is not important, the\n *interface* can, alternatively, be a dictionary.\n\n For example, a CSV file that contains the columns 'AAA', 'BBB',\n and 'DDD' can be adapted to behave as if it has the columns\n 'AAA', 'BBB', 'CCC' and 'DDD' with the following::\n\n source = CsvSource('mydata.csv')\n interface = [\n ('AAA', 'AAA'),\n ('BBB', 'BBB'),\n (None, 'CCC'),\n ('DDD', 'DDD'),\n ]\n subject = AdapterSource(source, interface)\n\n An :class:`AdapterSource` can be thought of as a virtual source that\n renames, reorders, adds, or removes columns of the original\n *source*.\n\n To add a column that does not exist in original, use None in place\n of a column name (see column 'CCC', above). Columns mapped to None\n will contain *missing* values (defaults to empty string). To remove\n a column, simply omit it from the interface.\n\n The original source can be accessed via the :attr:`__wrapped__`\n property.\n \"\"\"\n def __init__(self, source, interface, missing=''):\n if not isinstance(interface, collections.Sequence):\n if isinstance(interface, dict):\n interface = interface.items()\n interface = sorted(interface)\n\n source_columns = source.columns()\n interface_cols = [x[0] for x in interface]\n for c in interface_cols:\n if c != None and c not in source_columns:\n raise KeyError(c)\n\n self._interface = list(interface)\n self._missing = missing\n self.__wrapped__ = source\n\n def __repr__(self):\n self_class = self.__class__.__name__\n wrapped_repr = repr(self.__wrapped__)\n interface = self._interface\n missing = self._missing\n if missing != '':\n missing = ', missing=' + repr(missing)\n return '{0}({1}, {2}{3})'.format(self_class, wrapped_repr, interface, missing)\n\n def columns(self):\n return [new for (old, new) in self._interface if new != None]\n\n def __iter__(self):\n interface = self._interface\n missing = self._missing\n for row in self.__wrapped__.__iter__():\n yield dict((new, row.get(old, missing)) for old, new in interface)\n\n def filter_rows(self, **kwds):\n try:\n unwrap_kwds = self._unwrap_filter(kwds)\n except _FilterValueError:\n return # <- EXIT! Raises StopIteration to signify empty generator.\n\n interface = self._interface\n missing = self._missing\n for row in self.__wrapped__.filter_rows(**unwrap_kwds):\n yield dict((new, row.get(old, missing)) for old, new in interface)\n\n def distinct(self, columns, **kwds_filter):\n unwrap_src = self.__wrapped__ # Unwrap data source.\n unwrap_cols = self._unwrap_columns(columns)\n try:\n unwrap_flt = self._unwrap_filter(kwds_filter)\n except _FilterValueError:\n return CompareSet([]) # <- EXIT!\n\n if not unwrap_cols:\n iterable = iter(unwrap_src)\n try:\n next(iterable) # Check for any data at all.\n length = 1 if isinstance(columns, str) else len(columns)\n result = [tuple([self._missing]) * length] # Make 1 row of *missing* vals.\n except StopIteration:\n result = [] # If no data, result is empty.\n return CompareSet(result) # <- EXIT!\n\n results = unwrap_src.distinct(unwrap_cols, **unwrap_flt)\n rewrap_cols = self._rewrap_columns(unwrap_cols)\n return self._rebuild_compareset(results, rewrap_cols, columns)\n\n def sum(self, column, keys=None, **kwds_filter):\n return self._aggregate('sum', column, keys, **kwds_filter)\n\n def count(self, column, keys=None, **kwds_filter):\n return self._aggregate('count', column, keys, **kwds_filter)\n\n def _aggregate(self, method, column, keys=None, **kwds_filter):\n \"\"\"Call aggregation method ('sum' or 'count'), return result.\"\"\"\n unwrap_src = self.__wrapped__\n unwrap_col = self._unwrap_columns(column)\n unwrap_keys = self._unwrap_columns(keys)\n try:\n unwrap_flt = self._unwrap_filter(kwds_filter)\n except _FilterValueError:\n if keys:\n result = CompareDict({}, keys)\n else:\n result = 0\n return result # <- EXIT!\n\n # If all *columns* are missing, build result of missing values.\n if not unwrap_col:\n distinct = self.distinct(keys, **kwds_filter)\n result = ((key, 0) for key in distinct)\n return CompareDict(result, keys) # <- EXIT!\n\n # Get method ('sum' or 'count') and perform aggregation.\n aggregate = getattr(unwrap_src, method)\n result = aggregate(unwrap_col, unwrap_keys, **unwrap_flt)\n\n rewrap_col = self._rewrap_columns(unwrap_col)\n rewrap_keys = self._rewrap_columns(unwrap_keys)\n return self._rebuild_comparedict(result, rewrap_col, column,\n rewrap_keys, keys, missing_col=0)\n\n def mapreduce(self, mapper, reducer, columns, keys=None, **kwds_filter):\n unwrap_src = self.__wrapped__\n unwrap_cols = self._unwrap_columns(columns)\n unwrap_keys = self._unwrap_columns(keys)\n try:\n unwrap_flt = self._unwrap_filter(kwds_filter)\n except _FilterValueError:\n if keys:\n result = CompareDict({}, keys)\n else:\n result = self._missing\n return result # <- EXIT!\n\n # If all *columns* are missing, build result of missing values.\n if not unwrap_cols:\n distinct = self.distinct(keys, **kwds_filter)\n if isinstance(columns, str):\n val = self._missing\n else:\n val = (self._missing,) * len(columns)\n result = ((key, val) for key in distinct)\n return CompareDict(result, keys) # <- EXIT!\n\n result = unwrap_src.mapreduce(mapper, reducer,\n unwrap_cols, unwrap_keys, **unwrap_flt)\n\n rewrap_cols = self._rewrap_columns(unwrap_cols)\n rewrap_keys = self._rewrap_columns(unwrap_keys)\n return self._rebuild_comparedict(result, rewrap_cols, columns,\n rewrap_keys, keys,\n missing_col=self._missing)\n\n def _unwrap_columns(self, columns, interface_dict=None):\n \"\"\"Unwrap adapter *columns* to reveal hidden adaptee columns.\"\"\"\n if not columns:\n return None # <- EXIT!\n\n if not interface_dict:\n interface_dict = dict((new, old) for old, new in self._interface)\n\n if isinstance(columns, str):\n return interface_dict[columns] # <- EXIT!\n\n unwrapped = (interface_dict[k] for k in columns)\n return tuple(x for x in unwrapped if x != None)\n\n def _unwrap_filter(self, filter_dict, interface_dict=None):\n \"\"\"Unwrap adapter *filter_dict* to reveal hidden adaptee column\n names. An unwrapped filter cannot be created if the filter\n specifies that a missing column equals a non-missing value--if\n this condition occurs, a _FilterValueError is raised.\n \"\"\"\n if not interface_dict:\n interface_dict = dict((new, old) for old, new in self._interface)\n\n translated = {}\n for k, v in filter_dict.items():\n tran_k = interface_dict[k]\n if tran_k != None:\n translated[tran_k] = v\n else:\n if v != self._missing:\n raise _FilterValueError('Missing column can only be '\n 'filtered to missing value.')\n return translated\n\n def _rewrap_columns(self, unwrapped_columns, rev_dict=None):\n \"\"\"Take unwrapped adaptee column names and wrap them in adapter\n column names (specified by _interface).\n \"\"\"\n if not unwrapped_columns:\n return None # <- EXIT!\n\n if rev_dict:\n interface_dict = dict((old, new) for new, old in rev_dict.items())\n else:\n interface_dict = dict(self._interface)\n\n if isinstance(unwrapped_columns, str):\n return interface_dict[unwrapped_columns]\n return tuple(interface_dict[k] for k in unwrapped_columns)\n\n def _rebuild_compareset(self, result, rewrapped_columns, columns):\n \"\"\"Take CompareSet from unwrapped source and rebuild it to match\n the CompareSet that would be expected from the wrapped source.\n \"\"\"\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n columns = normalize(columns)\n\n if rewrapped_columns == columns:\n return result # <- EXIT!\n\n missing = self._missing\n def rebuild(x):\n lookup_dict = dict(zip(rewrapped_columns, x))\n return tuple(lookup_dict.get(c, missing) for c in columns)\n return CompareSet(rebuild(x) for x in result)\n\n def _rebuild_comparedict(self,\n result,\n rewrapped_columns,\n columns,\n rewrapped_keys,\n keys,\n missing_col):\n \"\"\"Take CompareDict from unwrapped source and rebuild it to\n match the CompareDict that would be expected from the wrapped\n source.\n \"\"\"\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n rewrapped_keys = normalize(rewrapped_keys)\n columns = normalize(columns)\n keys = normalize(keys)\n\n if rewrapped_keys == keys and rewrapped_columns == columns:\n if isinstance(result, CompareDict):\n key_names = (keys,) if isinstance(keys, str) else keys\n result.key_names = key_names\n return result # <- EXIT!\n\n try:\n item_gen = iter(result.items())\n except AttributeError:\n item_gen = [(self._missing, result)]\n\n if rewrapped_keys != keys:\n def rebuild_keys(k, missing):\n if isinstance(keys, str):\n return k\n key_dict = dict(zip(rewrapped_keys, k))\n return tuple(key_dict.get(c, missing) for c in keys)\n missing_key = self._missing\n item_gen = ((rebuild_keys(k, missing_key), v) for k, v in item_gen)\n\n if rewrapped_columns != columns:\n def rebuild_values(v, missing):\n if isinstance(columns, str):\n return v\n if not _is_nsiterable(v):\n v = (v,)\n value_dict = dict(zip(rewrapped_columns, v))\n return tuple(value_dict.get(v, missing) for v in columns)\n item_gen = ((k, rebuild_values(v, missing_col)) for k, v in item_gen)\n\n return CompareDict(item_gen, key_names=keys)\n","sub_path":"datatest/sources/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":11939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325486477","text":"from random import randint\n\nimport os\nfrom FileReader import FileReader\n\nclass DataLoader(object):\n def __init__(self, dataPath):\n self.dataPath = dataPath\n\n def __get_files(self):\n folders = [self.dataPath + folder + '/' for folder in os.listdir(self.dataPath)]\n class_titles = os.listdir(self.dataPath)\n files = {}\n for folder, title in zip(folders, class_titles):\n files[title] = [folder + f for f in os.listdir(folder)]\n self.files = files\n\n def get_json(self):\n self.__get_files()\n data = []\n for topic in self.files:\n i = 0\n for file in self.files[topic]:\n content = FileReader(filePath=file).content()\n data.append({\n 'category': topic,\n 'content': content\n })\n if i == 1000:\n break\n else:\n i += 1\n return data\n \n","sub_path":"DataMining/Final/source/source_code/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"390558690","text":"# This file contains an example Flask-User application.\n# To keep the example simple, we are applying some unusual techniques:\n# - Placing everything in one file\n# - Using class-based configuration (instead of file-based configuration)\n# - Using string-based templates (instead of file-based templates)\n\nimport datetime\nfrom flask import Flask, request, render_template_string, render_template\nfrom flask_babelex import Babel\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_user import current_user, login_required, roles_required, UserManager, UserMixin\n\n\n# Class-based application configuration\nclass ConfigClass(object):\n \"\"\" Flask application config \"\"\"\n\n # Flask settings\n SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'\n\n # Flask-SQLAlchemy settings\n SQLALCHEMY_DATABASE_URI = 'sqlite:///basic_app.sqlite' # File-based SQL database\n SQLALCHEMY_TRACK_MODIFICATIONS = False # Avoids SQLAlchemy warning\n\n # Flask-Mail SMTP server settings\n MAIL_SERVER = 'smtp.gmail.com'\n MAIL_PORT = 465\n MAIL_USE_SSL = True\n MAIL_USE_TLS = False\n MAIL_USERNAME = 'mattandlukeandpython@gmail.com'\n MAIL_PASSWORD = 'calldrip1234'\n MAIL_DEFAULT_SENDER = '\"MyApp\" '\n\n # Flask-User settings\n USER_APP_NAME = \"Flask-User Basic App\" # Shown in and email templates and page footers\n USER_ENABLE_EMAIL = True # Enable email authentication\n USER_ENABLE_USERNAME = False # Disable username authentication\n USER_EMAIL_SENDER_NAME = USER_APP_NAME\n USER_EMAIL_SENDER_EMAIL = \"noreply@example.com\"\n\n\ndef execute_sql(param, commit):\n pass\n\n\ndef create_app():\n \"\"\" Flask application factory \"\"\"\n\n # Create Flask app load app.config\n app = Flask(__name__)\n app.config.from_object(__name__ + '.ConfigClass')\n\n # Initialize Flask-BabelEx\n babel = Babel(app)\n\n # Initialize Flask-SQLAlchemy\n db = SQLAlchemy(app)\n\n # Define the User data-model.\n # NB: Make sure to add flask_user UserMixin !!!\n class User(db.Model, UserMixin):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')\n\n # User authentication information. The collation='NOCASE' is required\n # to search case insensitively when USER_IFIND_MODE is 'nocase_collation'.\n email = db.Column(db.String(255, collation='NOCASE'), nullable=False, unique=True)\n email_confirmed_at = db.Column(db.DateTime())\n password = db.Column(db.String(255), nullable=False, server_default='')\n\n # User information\n first_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='')\n last_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='')\n\n # Define the relationship to Role via UserRoles\n roles = db.relationship('Role', secondary='user_roles')\n\n # Define the Role data-model\n class Role(db.Model):\n __tablename__ = 'roles'\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(50), unique=True)\n\n # Define the UserRoles association table\n class UserRoles(db.Model):\n __tablename__ = 'user_roles'\n id = db.Column(db.Integer(), primary_key=True)\n user_id = db.Column(db.Integer(), db.ForeignKey('users.id', ondelete='CASCADE'))\n role_id = db.Column(db.Integer(), db.ForeignKey('roles.id', ondelete='CASCADE'))\n\n # Setup Flask-User and specify the User data-model\n user_manager = UserManager(app, db, User)\n\n # Create all database tables\n db.create_all()\n\n # Create 'member@example.com' user with no roles\n if not User.query.filter(User.email == 'member@example.com').first():\n user = User(\n email='member@example.com',\n email_confirmed_at=datetime.datetime.utcnow(),\n password=user_manager.hash_password('Password1'),\n )\n db.session.add(user)\n db.session.commit()\n\n # Create 'admin@example.com' user with 'Admin' and 'Agent' roles\n if not User.query.filter(User.email == 'admin@example.com').first():\n user = User(\n email='admin@example.com',\n email_confirmed_at=datetime.datetime.utcnow(),\n password=user_manager.hash_password('Password1'),\n )\n user.roles.append(Role(name='Admin'))\n user.roles.append(Role(name='Agent'))\n db.session.add(user)\n db.session.commit()\n\n # The Home page is accessible to anyone\n @app.route('/')\n def home_page():\n return render_template('index.html')\n\n #New Route\n @app.route('/admin')\n @roles_required('Admin')\n def admin_page():\n return render_template('admin.html')\n\n @app.route('/seedDB')\n @roles_required('Admin')\n def seedDB():\n sqlQ = db.engine.execute('DROP TABLE IF EXISTS Book', commit=True)\n\n sqlQuery = db.engine.execute(' CREATE TABLE Book (Author TEXT,title TEXT, description TEXT)', commit=True)\n\n sqlQuery2 = db.engine.execute('INSERT INTO BOOK (author, title, description) VALUES (\"Marry Shelly\", '\n '\"Frankenstein\", \"My Mother-in-law in the morning\")', commit=True)\n sqlQuery2 = db.engine.execute('INSERT INTO BOOK (author, title, description) VALUES (\"Henry James\", '\n '\"The Turn of the Screw\", \"My Mother-in-law in the morning2\")', commit=True)\n sqlQuery2 = db.engine.execute('INSERT INTO BOOK (author, title, description) VALUES (\"Max Weber\", '\n '\"The Protestant Work Ethic and the Spirit of Capitalism\", '\n '\"My Mother-in-law in the mornings\")', commit=True)\n sqlQuery2 = db.engine.execute('INSERT INTO BOOK (author, title, description) VALUES (\"Robert Putnam\", '\n '\"Bowling Alone\", \"My Mother-in-law in the morning4\")', commit=True)\n\n booksQuery = db.engine.execute('SELECT rowid, * FROM Book')\n for book in booksQuery:\n print(book['rowid'])\n print(book['Author'])\n print(book['title'])\n print(book['description'])\n\n return '

DB Seeded!

'\n\n @app.route('/erase_db')\n @roles_required('Admin')\n def eraseDB():\n sqlQ = db.engine.execute('DELETE FROM Book', commit=True)\n return '

DB ERASED!

'\n\n @app.route('/all_books')\n @login_required\n def all_books():\n books = db.engine.execute('SELECT * FROM Book')\n my_list_of_books = [row for row in books]\n return render_template('all_books.html', books=my_list_of_books)\n\n @app.route('/add_book', methods={'GET', 'POST'})\n @login_required\n def addbook():\n if request.method == 'POST':\n author = request.form['author']\n title = request.form['title']\n description = request.form['description']\n\n returnStatus = db.engine.execute('INSERT INTO Book (author, title, description) VALUES (?, ?, ?)',\n (author, title, description), commit=True)\n\n return render_template('add_book.html', book_title=title)\n return render_template('add_book.html', book_title=\"\")\n\n\n @app.context_processor\n def utility_processor():\n def isAdmin(user):\n sqlStatement = \"SELECT roles.name FROM roles JOIN user_roles ON roles.id=user_roles.role_id JOIN users ON users.id=user_roles.user_id WHERE users.email='\" + user + \"'AND roles.name='Admin' \"\n roleName = db.engine.execute(sqlStatement)\n roleName = [row for row in roleName]\n if len(roleName) > 0 and roleName[0]['name'] == 'Admin':\n returnValue = 1\n else:\n returnValue = 0\n return returnValue\n return dict(isAdmin=isAdmin)\n\n return app\n\n\n# Start development web server\nif __name__ == '__main__':\n app = create_app()\n app.run(host='0.0.0.0', port=5000, debug=True)","sub_path":"basic_app.py","file_name":"basic_app.py","file_ext":"py","file_size_in_byte":8051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"557147269","text":"from datetime import datetime\nfrom domainmodel.movie import Movie\n\n\nclass Review:\n def __init__(self, movie=Movie(), review_text=\"\", rating=int()):\n if type(rating) == int and rating >= 1 and rating <= 10:\n self.rating = rating\n else:\n self.rating = None\n self.movie = movie\n self.review_text = review_text.strip()\n self.timestamp = datetime.today()\n\n def __repr__(self):\n return \"{}, Review: {}, Rating: {}, Time: {}\".format(str(self.movie), str(self.review_text), str(self.rating),\n str(self.timestamp))\n\n def __eq__(self, other):\n string1 = str(self.movie) + self.review_text + str(self.rating) + str(self.timestamp)\n string2 = str(other.movie) + other.review_text + str(other.rating) + str(other.timestamp)\n return string1 == string2\n\n\n\n","sub_path":"domainmodel/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"289072857","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 22 11:27:15 2019\n\n@author: Alex\nClassification Template, will be used for Neural Networks\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 20 15:44:48 2019\n\n@author: Alex\n\nLogistic Regression Part - The Data preprocessing from this section will be used in Deep Learning.\n\"\"\"\n\n#Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom data_preprocessing import PreprocessingData\n\n#Preprocessing the data\npreprocessing = PreprocessingData()\npreprocessing.import_dataset(\"Social_Network_Ads.csv\")\npreprocessing.choose_which_column_to_dataset(indVariables = 2, depVariables = 4)\npreprocessing.seperate_training_test_set(test_size_training = 0.25, random_state_training = 0)\npreprocessing.feature_scaling()\ndataset, independantVariables, dependantVariables = preprocessing.get_imported_variables()\nindependantVariables_TrainingSet, independantVariables_TestSet, dependantVariables_TrainingSet, dependantVariables_TestSet = preprocessing.get_trainingSet_variables()\n\n#Fitting the classigier to the Training set\n#Create your classifier here\n\n#Predicting the Test set results\nindependantVariables_prediction = classifier.predict(independantVariables_TestSet)\n\n#Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(dependantVariables_TestSet, independantVariables_prediction)\n\n# Visualising the Training set results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = independantVariables_TrainingSet, dependantVariables_TrainingSet\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Logistic Regression (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()\n\n# Visualising the Test set results\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = independantVariables_TestSet, dependantVariables_TestSet\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\nplt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Logistic Regression (Training set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.show()","sub_path":"Part 3 - Classification/Classification_Template/classification_template.py","file_name":"classification_template.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389507849","text":"import sys\nimport os\nfrom PyQt4 import QtGui, QtCore\n\nclass Converter(QtGui.QMainWindow):\n def __init__(self):\n super(Converter, self).__init__()\n self.setGeometry(400, 400, 280, 100)\n self.setWindowTitle(\"PyQt Designer Converter\") \n self.setWindowIcon(QtGui.QIcon(r\"C:\\Program Files (x86)\\ArcGIS\\Desktop10.4\\bin\\Icons\\GenericRefresh_B_32.png\"))\n self.setFixedSize(280, 100)\n #sadece kapatma dugmesini birakacak sekilde minimize, maximize dugmelerinin kapatilmasi\n self.setWindowFlags(QtCore.Qt.Window | \n QtCore.Qt.CustomizeWindowHint | \n QtCore.Qt.WindowTitleHint | \n QtCore.Qt.WindowCloseButtonHint | \n QtCore.Qt.WindowStaysOnTopHint) \n\n #tum bilesenleri iceren home fonksiyonunun cagirilmasi\n self.home() \n\n\n def home(self):\n #cizgi objesinin ekrana eklenmesi, once centralwidgetin tanimlanmasi gerekiyor\n centralwidget = QtGui.QWidget(self) \n line = QtGui.QFrame(centralwidget)\n line.setGeometry(0, 65, 280, 3)\n line.setFrameShape(QtGui.QFrame.HLine)\n line.setFrameShadow(QtGui.QFrame.Sunken)\n self.setCentralWidget(centralwidget)\n\n #Ui'yi yukleyecek dugmenin tanimlanmasi\n self.btnUiChooser = QtGui.QPushButton(\"Choose Ui\", self) \n self.btnUiChooser.setGeometry(20, 20, 75, 23)\n self.btnUiChooser.setToolTip(\"Choose Ui to Convert\")\n #basilinca calisacak fonksiyonun tanimlanmasi\n self.btnUiChooser.clicked.connect(self.uiChoose) \n\n #Py'i kayit yerini belirleyecek dugmenin tanimlanmasi\n self.btnPySaver = QtGui.QPushButton(\"Py Location\", self) \n self.btnPySaver.setGeometry(185, 10, 75, 23)\n self.btnPySaver.setToolTip(\"Choose Py file location\")\n #basilinca calisacak fonksiyonun tanimlanmasi\n self.btnPySaver.clicked.connect(self.pySave) \n\n self.lblUiLocation = QtGui.QLabel(\"Save in Ui Folder\", self)\n self.lblUiLocation.setGeometry(185, 40, 80, 23)\n\n #Ui'in belirtilecek yere mi yoksa Ui ile ayni yere mi convert edileceginin belirlenmesini \n #saglayacak secim aracinin tanimlanmasi. Basildiginda diger secim pasif olmaktadir.\n self.radPySaver = QtGui.QRadioButton(\"\", self) \n self.radPySaver.setGeometry(165, 10, 15, 23)\n self.radPySaver.setChecked(True)\n self.lblUiLocation.setEnabled(False)\n #basilinca calisacak fonksiyonun tanimlanmasi\n self.radPySaver.toggled.connect(self.button_state)\n\n #Yukarida bahsedilen pasif olan diger secim aracinin tanimlanmasi\n radUiLocation = QtGui.QRadioButton(\"\", self) \n radUiLocation.setGeometry(165, 40, 75, 23)\n\n #Convert islemini yapacak dugmenin tanimlanmasi\n self.btnConvert = QtGui.QPushButton(\"Convert Ui to Py\", self) \n self.btnConvert.setGeometry(20, 73, 240, 23)\n self.btnConvert.setIcon(QtGui.QIcon(r\"C:\\Program Files (x86)\\ArcGIS\\Desktop10.4\\bin\\Icons\\GenericRefresh_B_32.png\"))\n #dugmenin renginin degistirilmesi\n color = QtGui.QColor(247, 149, 89) \n self.btnConvert.setStyleSheet(\"background-color: %s\" %color.name())\n #basilinca calisacak fonksiyonun tanimlanmasi\n self.btnConvert.clicked.connect(self.convertAction) \n\n #tum arayuzun temasinin degistirilmesi\n QtGui.QApplication.setStyle(QtGui.QStyleFactory.create('Plastique')) \n #bilesenleri iceren fonksiyonun gosterilmesi\n self.show() \n\n\n def uiChoose(self):\n #convert edilecek dosyayi secen aracin ve girilebilecek formatin tanimlanmasi\n #eger bir dosya secildiyse dosyanin konumunu ve dosya adini ayristiran, \n #dugmenin ismini dosya ismi ile degistiren aracin tanimlanmasi\n uiFile = QtGui.QFileDialog.getOpenFileNameAndFilter(self, \"Please choose Ui file\", \"\", \"*.ui\")\n if not uiFile[0] == \"\":\n self.uiFilePath, self.uiFileName = os.path.split(str(uiFile[0]))\n self.btnUiChooser.setText(self.uiFileName)\n\n\n def pySave(self):\n #kayit yapilacak yeri secen aracin ve girilebilecek formatin tanimlanmasi\n #eger bir dosya secildiyse dosyanin konumunu ve dosya adini ayristiran, \n #dugmenin ismini dosya ismi ile degistiren aracin tanimlanmasi\n saveFile = QtGui.QFileDialog.getSaveFileNameAndFilter(self, \"Please choose save location\", \"\", \"*.py\") \n if not saveFile[0] == \"\":\n self.saveFilePath, self.saveFileName = os.path.split(str(saveFile[0]))\n self.btnPySaver.setText(self.saveFileName)\n\n\n def button_state(self):\n #radyo dugmeleri secildikce calisacak kodun tanimlanmasi. Label ve diger secimin dugmeleri pasif olmaktadir\n if self.radPySaver.isChecked() == True:\n self.lblUiLocation.setEnabled(False)\n self.btnPySaver.setEnabled(True)\n else:\n self.lblUiLocation.setEnabled(True)\n self.btnPySaver.setEnabled(False)\n\n\n def convertAction(self):\n #donusturme dugmesine basilinca calisacak kodun tanimlanmasi. Donusum islemini yapacak bat dosyasinin konumu belirlenir, \n #ui dosyasinin konumu ve dosya adi+formati tutulur\n bat = r\"C:\\Python27\\ArcGIS10.4\\Lib\\site-packages\\PyQt4\\pyuic4.bat\"\n ui = (self.uiFilePath + \"/\" + self.uiFileName)\n \n #Py dosyasi olarak kaydedilme secenegi secildiyse py'in konumunun, \n #isaretlenmediyse ui dosyasinin konumunun ve adinin belirlenmesi, sonuna .py yazilmasi\n if self.radPySaver.isChecked() == True:\n pay = (self.saveFilePath + \"/\" + self.saveFileName)\n else:\n pay = (self.uiFilePath + \"/\" + (self.uiFileName).split(\".\")[0] + \".py\")\n \n os.system((\"%s -x %s -o %s\") %(bat, ui, pay))\n\n\n#tum kodun calistirilmasi, sinifin orneklenmesi, kod basariyla calistiysa bilgi mesajinin sys'ye donulmesi\ndef run():\n app = QtGui.QApplication(sys.argv)\n GUI = Converter()\n sys.exit(app.exec_())\n\nrun()\n","sub_path":"Python2/PyQt4/PyUiConverter.py","file_name":"PyUiConverter.py","file_ext":"py","file_size_in_byte":6032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"194919961","text":"try:\n\tfrom celery.decorators import task\nexcept ImportError:\n\tdef task(ignore_result=None):\n\t\treturn lambda x: x\n\n\n@task(ignore_result=True)\ndef update_all_partner_posts(celery=True):\n\treturn update_all_partner_posts_task(celery=celery)\n\t\ndef update_all_partner_posts_task(celery=True):\n\t\"\"\"\n\tFetch all partners, and for each one, pass the feed_url to update_posts_for_feed\n\t\"\"\"\n\tfrom partner_feeds.models import Partner\n\tfrom datetime import datetime\n\n\tpartners = Partner.objects.all()\n\tfor partner in partners:\n\t\t# find all the posts in the current partner feeds and update them\n\t\tif celery:\n\t\t\tupdate_posts_for_feed.delay(partner)\n\t\telse:\n\t\t\tupdate_posts_for_feed(partner)\n\t\t\t\n\t\t# Set the current time as when the partner feed was last retrieved\n\t\tPartner.objects.filter(pk=partner.pk).update(date_feed_updated=datetime.now())\n\t\t\n@task(ignore_result=True)\ndef update_posts_for_feed(partner):\n\treturn update_posts_for_feed_task(partner)\n\t\ndef update_posts_for_feed_task(partner):\n\t\"\"\"\n\tLoad and parse the RSS or ATOM feed associated with the given feed url, and for each entry, parse out the individual\n\tentries and save each one as a partner_feeds.\n\t\"\"\"\n\tfrom feedparser import parse\n\tfrom partner_feeds.models import Post\n\timport timelib, re, time\n\n\tfeed = parse(partner.feed_url)\n\n\tfor entry in feed.entries:\n\t\tp = Post()\n\t\ttry:\n\t\t\t\n\t\t\tp.partner_id = partner.id\n\t\t\tp.title = entry.title\n\n\t\t\tp.subheader = entry.summary\n\t\t\t\n\t\t\ttry:\n\t\t\t\tp.author = entry.author\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\n\t\t\ttry:\n\t\t\t\tp.guid = entry.id\n\t\t\texcept AttributeError:\n\t\t\t\tp.guid = entry.link\n\n\t\t\tp.url = entry.link\n\n\t\t\t# try to get the date of the entry, otherwise, try the date of the feed\n\t\t\ttry:\n\t\t\t\tentry_date = re.sub('\\|','', entry.date)\n\t\t\t\tentry_date = timelib.strtotime(entry_date) # convert to a timestamp\n\t\t\t\tentry_date = time.localtime(entry_date) # converts to a time.struct_time (with regards to local timezone)\n\t\t\t\tentry_date = time.strftime(\"%Y-%m-%d %H:%M:%S\", entry_date) # converts to mysql date format\n\t\t\t\tp.date = entry_date\n\t\t\texcept AttributeError:\n\t\t\t\tp.date = time.strftime(\"%Y-%m-%d %H:%M:%S\", feed.date)\n\n\t\t\tp.save()\n\t\texcept AttributeError:\n\t\t\t# needs logging\n\t\t\tpass\n\n@task(ignore_result=True)\ndef delete_old_posts(num_posts_to_keep=20, celery=True):\n\t\"\"\" \n\tFetch all partners, and for each partner,\n\tdelete all but `num_posts_to_keep` number of posts\n\t\"\"\"\n\tfrom partner_feeds.models import Partner\n\n\tpartners = Partner.objects.all()\n\t\n\tfor partner in partners:\n\t\tif celery: \n\t\t\tdelete_old_posts_for_partner.delay(partner, num_posts_to_keep)\n\t\telse:\n\t\t\tdelete_old_posts_for_partner(partner, num_posts_to_keep)\n\t\t\t\n@task(ignore_result=True)\ndef delete_old_posts_for_partner(partner, num_posts_to_keep=20):\n\t\"\"\"\n\tDeletes all posts except for the most recent `num_posts_to_keep`\n\tBecause Django won't let us do a delete of a query with an offset, we first find\n\tthe IDs of the posts that we want to keep and then exclude them from the delete.\t\n\t\"\"\"\n\tfrom partner_feeds.models import Post\n\t\n\trecent_posts = list(Post.objects.filter(partner=partner).values_list('id', flat=True)[:num_posts_to_keep])\n\t\n\tPost.objects.filter(partner=partner).exclude(pk__in=recent_posts).delete()","sub_path":"partner_feeds/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"34561989","text":"#!/usr/bin/python3\n\nimport copy\nimport getopt\nimport json\nimport logging\nfrom mpi4py import MPI\nimport os\nimport socket\nimport subprocess\nimport sys\n\nANALYSIS = \"analysis\"\nCMDLINE = \"cmd\"\nLOGFILE = \"logfile\"\nOUTPUT_PATH = \"results\"\n\n\nclass MPIMain:\n \"\"\"\n A wrapper class used by the MPIPortfolioAlgorithm in CPAchecker. It uses MPI to\n start additional CPAchecker instances in which the actual analyses are concurrently\n executed on different processes. The command line for the subprocess is decided by\n the number of the MPI rank.\n \"\"\"\n\n comm = MPI.COMM_WORLD\n input_args = {}\n analysis_param = {}\n\n main_node_network_config = None\n\n def __init__(self, argv):\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %I:%M:%S\",\n level=logging.DEBUG,\n ) # TODO change logging level to INFO\n\n # Name of the processor\n self.name = MPI.Get_processor_name()\n\n # Number of total processes\n self.size = self.comm.Get_size()\n # Rank of the this process\n self.rank = self.comm.Get_rank()\n\n try:\n logging.debug(\"Input of user args: %s\", str(argv))\n opts, args = getopt.getopt(argv, \"di:\", [\"input=\"])\n except getopt.GetoptError:\n logging.critical(\n \"Unable to parse user input. Usage: %s -d -i \", __file__\n )\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == \"-d\":\n logging.basicConfig(level=logging.DEBUG)\n elif opt in (\"-i\", \"--input\"):\n if isinstance(arg, str):\n self.input_args = eval(arg)\n elif isinstance(arg, dict):\n self.input_args = arg\n else:\n logging.critical(\"Input has an invalid type: %s\", type(arg))\n sys.exit(2)\n\n self.main_node_network_config = self.input_args.get(\n \"main_node_network_settings\"\n )\n if self.main_node_network_config is not None:\n aws_main_ip = os.environ.get(\"AWS_BATCH_JOB_MAIN_NODE_PRIVATE_IPV4_ADDRESS\")\n if (\n aws_main_ip is not None\n and self.main_node_network_config[\"main_node_ipv4_address\"]\n == aws_main_ip\n ):\n logging.critical(\"Inconsistent ip addresses for main node received.\\n\")\n sys.exit(2)\n\n logging.debug(json.dumps(self.input_args, sort_keys=True, indent=4))\n\n def print_self_info(self):\n \"\"\"Print an info about the proccesor name, the rank of the executed process, and\n the number of total processes.\"\"\"\n logging.info(\n \"Executing process on '{}' (rank {} of {})\".format(\n self.name, self.rank, self.size\n )\n )\n\n def execute_verifier(self):\n logging.debug(\"Running script %s from dir '%s'\", __file__, os.getcwd())\n cmdline = self.analysis_param[CMDLINE]\n if cmdline is None:\n logging.info(\n \"Cmdline does not contain any input. Will not do anything (Rank %d).\",\n self.rank,\n )\n else:\n logging.info(\"executing cmd: %s\", cmdline)\n # Redirect all output from the errorstream in the child CPAchecker\n # instances, such that the output log stays consistent\n process = subprocess.run(cmdline, stderr=sys.stdout.buffer)\n logging.info(\"Process exited with status code %d\", process.returncode)\n\n def prepare_cmdline(self):\n logging.info(\"Running analysis with number: %d\", self.rank)\n if self.rank <= len(self.input_args) - 1:\n analysis_args = self.input_args.get(\"Analysis_{}\".format(self.rank))\n if analysis_args is None:\n logging.warning(\"No arguments for the analysis found.\")\n else:\n\n def replace_escape_chars(d):\n for key, value in d.items():\n if isinstance(value, str):\n d[key] = value.replace(\"\\\\\", \"\")\n elif isinstance(value, list):\n d[key] = [w.replace(\"\\\\\", \"\") for w in value]\n\n self.analysis_param = copy.deepcopy(analysis_args)\n replace_escape_chars(self.analysis_param)\n\n logging.debug(\"Running analysis: %s\", self.analysis_param[ANALYSIS])\n logging.debug(\"Running cmd: %s\", self.analysis_param[CMDLINE])\n logging.debug(\"Writing log in file: %s\", self.analysis_param[LOGFILE])\n logging.debug(\"Storing output in dir: %s\", self.analysis_param[OUTPUT_PATH])\n\n def push_results_to_master(self):\n if self.main_node_network_config is None:\n logging.info(\n \"Already on main node. Result files are already \"\n \"in the correct location.\"\n )\n else:\n # Get the local ip adress and compare it with the address from the main node\n # If they differ, create an ssh-connection and push all result files to the\n # main node. Otherwise, do nothing.\n hostname = socket.gethostname()\n local_ip_address = socket.gethostbyname(hostname)\n if (\n local_ip_address\n != self.main_node_network_config[\"main_node_ipv4_address\"]\n ):\n logging.info(\"Copy result files via scp to the main-node\")\n scp_cmd = [\n \"scp\",\n \"-r\",\n self.analysis_param[OUTPUT_PATH],\n \"{}@{}:{}/{}\".format(\n self.main_node_network_config[\"user_name_main_node\"],\n self.main_node_network_config[\"main_node_ipv4_address\"],\n self.main_node_network_config[\"project_location_main_node\"],\n self.analysis_param[OUTPUT_PATH],\n ),\n ]\n subprocess.run(scp_cmd)\n\n\ndef main():\n mpi = MPIMain(sys.argv[1:])\n mpi.print_self_info()\n mpi.prepare_cmdline()\n mpi.execute_verifier()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/mpi_portfolio.py","file_name":"mpi_portfolio.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"512750252","text":"import csv\nfrom django.core.management.base import BaseCommand\nfrom phones.models import Phone\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n with open('phones.csv', 'r') as csvfile:\n phone_reader = csv.DictReader(csvfile, delimiter=';')\n for line in phone_reader:\n slug = line['name'].lower().replace(' ', '_')\n line['slug'] = slug\n model = Phone(id = line['id'],\n name = line['name'],\n image = line['image'],\n price = line['price'],\n release_date = line['release_date'],\n lte_exists = line['lte_exists'],\n slug = line['slug']\n )\n model.save()\n\n","sub_path":"databases/work_with_database/phones/management/commands/import_phones.py","file_name":"import_phones.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"5829143","text":"#! python2\n\n#### Andy Bowling\n#### June 29, 2015\n#### Magic Numbers challenge from https://www.codeeval.com/open_challenges/193/\n\n#### A magic number is a number that has two characteristics:\n#### 1. No digits repeat.\n####\n#### 2. Beginning with the leftmost digit, take the value of the digit and move\n#### that number of digits to the right. Repeat the process again using the\n#### value of the current digit to move right again. Wrap back to the leftmost\n#### digit as necessary. A magic number will visit every digit exactly once and\n#### end at the leftmost digit.\n####\n#### Given two numbers per line, find all magic numbers between them (inclusive)\n\n\nimport sys\n\nwith open(sys.argv[1]) as a:\n\tfor line in a:\n\t\tif not line == '\\n':\n\t\t\t# initialize count_magic. We use this later to print -1 if no\n\t\t\t# magic numbers are in our range\n\t\t\tcount_magic = 0\n\t\t\t\n\t\t\t# read file for bottom and top numbers\n\t\t\tbottom = int(line.split()[0])\n\t\t\ttop = int(line.split()[1])\n\t\t\t\n\t\t\tfor i in range(bottom, top + 1):\n\t\t\t# loop through integers, adding each to match_array to check for duplicates\n\t\t\t\tnum_as_str = str(i)\n\t\t\t\tnum_length = len(num_as_str)\n\t\t\t# Boolean magic var. If breaks any rule, value goes to 0\n\t\t\t\tmagic = 1\n\t\t\t\tcur_index = 0\n\t\t\t\tmatch_array = []\n\t\t\t\tfor i in range(num_length):\n\t\t\t\t# iterate through the number. Index is added to value and modulo used\n\t\t\t\t# to keep index in bounds\n\t\t\t\t\tcur_number = int(num_as_str[cur_index])\n\t\t\t\t\tif cur_number in match_array:\n\t\t\t\t\t\tmagic = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tmatch_array.append(cur_number)\n\t\t\t\t\t\tcur_index = (cur_index + cur_number) % num_length\n\t\t\t\t\n\t\t\t\t# make sure the final number takes us back to index 0\n\t\t\t\tif not cur_index == 0:\n\t\t\t\t\tmagic = 0\n\t\t\t\t\n\t\t\t\t# print if valid, add to count so -1 isn't printed\n\t\t\t\tif magic == 1:\n\t\t\t\t\tprint(num_as_str),\n\t\t\t\t\tcount_magic += 1\n\t\t\t\t\t\n\t\t\tif count_magic == 0:\n\t\t\t\tprint('-1'),\n\t\t\t\n\t\t\tprint('')","sub_path":"MagicNum/magicnum.py","file_name":"magicnum.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"208473980","text":"import sys\n\ninstruction_codes = [\n\t('mov', 0x1),\n\t('jmp', 0x2),\n\t('add', 0x3),\n\t('sht', 0x4),\n\t('out', 0x5),\n\t('in' , 0x6)\n]\n\ndef main():\n\tinput_path = sys.argv[1];\n\toutput_path = sys.argv[2];\n\n\tin_f = open(input_path, 'r')\n\tou_f = open(output_path, 'wb')\n\n\tfor line in in_f:\n\t\tou_f.write(asm_line(line))\n\ndef asm_line(line):\n\tbinary = bytearray()\n\telements = line.split(' ')\n\telements = [e.replace('\\n', '') for e in elements]\n\tinstruction_code = get_instruction_code(elements[0])\n\toperands = asm_operands(elements[1:])\n\n\tbinary.append(instruction_code)\n\tbinary += operands\n\treturn binary\n\ndef get_instruction_code(word):\n\tfor instruction in instruction_codes:\n\t\tif instruction[0] == word:\n\t\t\treturn instruction[1]\n\tprint('[ERROR] Unknown instruction: {0}'.format(word))\n\ndef asm_operands(operands):\n\tarray = bytearray()\n\tfor operand in operands:\n\t\tarray += asm_operand(operand.replace(',', ''))\n\treturn array\n\ndef asm_operand(operand_str):\n\tarray = bytearray()\n\tif operand_str[0] == 'r':\n\t\tarray.append(0)\n\t\tarray += bytearray(int32_to_int8(int(operand_str[1:])))\n\telif operand_str.startswith('0x'):\n\t\toperand_str = operand_str.replace('0x', '')\n\t\tarray.append(2)\n\t\tarray += bytearray(int32_to_int8(int(operand_str, 16)))\n\telse:\n\t\tprint('[ERROR] Bad operand: {0}'.format(operand_str))\n\treturn array\n\ndef int32_to_int8(int32):\n\tmask = (1 << 8) - 1\n\treturn [(int32 >> k) & mask for k in range(0, 32, 8)]\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"asm.py","file_name":"asm.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"111156009","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 14 17:31:42 2020\r\n\r\n@author: DHRUV\r\n\"\"\"\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\n\r\ny=np.array([1,2,3,4,5])\r\nx=y**2\r\nplt.plot(x,y)\r\nplt.show()\r\n\r\nimg=cv2.imread('dhruv.jpeg')\r\nrgb_img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\nplt.imshow(rgb_img)\r\nplt.axis('off') \r\nprint(img.shape)\r\nplt.imshow(img[0:950,200:950,:])","sub_path":"DS QuickstartMode/pythonrevision3.py","file_name":"pythonrevision3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"415877773","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import ttest_ind, ttest_rel\r\nfrom skimage.util import random_noise\r\nfrom scipy.ndimage import gaussian_filter\r\nfrom joblib import Parallel\r\n\r\n\r\nimageSize = (128,128)\r\n\r\nregionCentre = (64,64)\r\nregionSize = (16,16)\r\n\r\nNEvent = 50\r\nNNoEvent = 50\r\n\r\nNperm = 1000\r\n\r\ndef genSDImage(loc, scale, amp):\r\n sd = np.zeros(imageSize)\r\n gaus = lambda x,mu,s : np.exp(-(x - mu)**2/(2.0*s**2))\r\n for i in range(0,imageSize[0]):\r\n sd[i,:] = gaus(i, loc, scale)\r\n\r\n return amp * sd\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n sd = genSDImage(32, 32, 10)\r\n meanDoseE = np.zeros(imageSize)\r\n meanDoseE[:,:] = 60.0\r\n meanDoseN = np.zeros(imageSize)\r\n meanDoseN[:,:] = 60.0\r\n\r\n meanDoseE[regionCentre[0]-regionSize[0]:regionCentre[0]+regionSize[0], regionCentre[1]-regionSize[1]:regionCentre[1]+regionSize[1] ] = 58.0\r\n meanDoseE = random_noise(meanDoseE, clip=False, var=1.0)\r\n\r\n meanDoseN[regionCentre[0]-regionSize[0]:regionCentre[0]+regionSize[0], regionCentre[1]-regionSize[1]:regionCentre[1]+regionSize[1] ] = 60.0\r\n meanDoseN = random_noise(meanDoseN, clip=False, var=1.0)\r\n\r\n\r\n\r\n\r\n simDoseE = np.zeros((NEvent, imageSize[0], imageSize[1])) \r\n simDoseN = np.zeros((NNoEvent, imageSize[0], imageSize[1]))\r\n\r\n print(simDoseE.shape)\r\n for i in range(0, NEvent):\r\n simDoseE[i, :,:] = gaussian_filter(np.random.normal(loc=meanDoseE, scale=sd, size=(imageSize[0], imageSize[1])), 5)\r\n for i in range(0, NNoEvent):\r\n simDoseN[i, :,:] = gaussian_filter(np.random.normal(loc=meanDoseN, scale=sd, size=(imageSize[0], imageSize[1])), 5)\r\n\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(121)\r\n ax2 = fig.add_subplot(122)\r\n \r\n # ax1.imshow(meanDoseN)\r\n # ax2.imshow(meanDoseE)\r\n\r\n ax1.imshow(simDoseE[0,:,:])\r\n ax1.set_title(\"Events\")\r\n ax2.imshow(simDoseN[0,:,:])\r\n ax2.set_title(\"No events\")\r\n plt.show()\r\n\r\n\r\n t_true, p_analytical = ttest_ind(simDoseE, simDoseN, axis=0)\r\n print(t_true.shape, p_analytical.shape)\r\n\r\n fig2 = plt.figure(2)\r\n ax2_1 = fig2.add_subplot(121)\r\n ax2_2 = fig2.add_subplot(122)\r\n ax2_1.imshow(t_true)\r\n # ax2_2.imshow(simDoseE[:,:,0])\r\n ax2_2.contour(p_analytical, levels=[0.05])\r\n\r\n plt.show()\r\n\r\n stackedDoses = np.vstack((simDoseE, simDoseN))\r\n\r\n print(stackedDoses.shape)\r\n\r\n permutedT = np.zeros((Nperm, *imageSize))\r\n perm_p = np.ones(imageSize)\r\n\r\n for n in range(0, Nperm):\r\n permuted = np.random.permutation(stackedDoses)\r\n permutedT[n,:,:], _ = ttest_ind(permuted[:NEvent], permuted[NNoEvent:], axis=0)\r\n perm_p[np.where(np.abs(permutedT[n,:,:]) > np.abs(t_true))] += 1\r\n\r\n perm_p /= Nperm\r\n\r\n fig3 = plt.figure(3)\r\n ax3_1 = fig3.add_subplot(121)\r\n ax3_2 = fig3.add_subplot(122)\r\n ax3_1.imshow(t_true)\r\n ax3_2.contour(perm_p, levels=[0.05], colors='r')\r\n ax3_2.contour(p_analytical, levels=[0.05], colors='b')\r\n plt.show()\r\n\r\n","sub_path":"generateDoses.py","file_name":"generateDoses.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"348239663","text":"'''\nReference - https://datascienceschool.net/view-notebook/ae35a40deb884cf88e85135b4b5a1130/\nbayesian update\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import beta, bernoulli\n\n# prior dist.\n'''\nConsider coin toss game\nupside is winning\n'''\n\ndef getBetaMode(a, b):\n '''\n to get most frequent value\n from parameter a and b of beta dist.\n '''\n return (a - 1) / (a + b - 2)\n\nmu0 = 0.65 # initial parameter (mu= prob. of success )\na = [1]\nb = [1]\nz = []\nprint(\"Initial Inf. Mode = ???\")\n\nx_simulation = np.linspace(0, 1, 1000)\nmodel0 = beta(a[-1], b[-1])\nz0 = model0.pdf(x_simulation)\nz.append(z0)\n\nplt.plot(x_simulation, z0, ls=\":\", label=\"Initial\")\n\n'''\nbernoulli parameter inference by using non-informative (beta(1, 1))\n-> the parametric method\n'''\ntrial_tot = 3\nsampling = 50 # size of each bernoulli sampling\n\n# to save results\nfor i in range(trial_tot):\n x_bern = bernoulli(mu0).rvs(sampling)\n num_fail, num_success = np.bincount(x_bern, minlength=2)\n a_ = a[-1] + num_success\n b_ = b[-1] + num_fail\n\n model_ = beta(a_, b_)\n z_ = model_.pdf(x_simulation)\n\n a.append(a_)\n b.append(b_)\n z.append(z_)\n\n# to visualize\ntrial = 0\nfor aa, bb, zz in zip(a, b, z):\n trial += 1\n plt.plot(x_simulation, zz, ls=\"-.\", label=\"Inference {}\".format(trial))\n # print(\"Inf. {}: Mode = {:4.2f}\".format(trial, getMode(aa, bb)))\n plt.vlines(x=0.65, ymin=0, ymax=12)\n plt.ylim(0, 12)\n plt.legend()\n plt.title(\"Bayesian Inference for Bernoulli Mu\")\n plt.show()\n\n# to get mode(most frequent mu)\ntrial = 0\nfor aa, bb in zip(a, b):\n trial += 1\n if aa != 1 or bb != 1:\n print(\"Inf. {}: Mode = {:4.2f}\".format(trial, getBetaMode(aa, bb)))\n","sub_path":"bayes_bernoulli.py","file_name":"bayes_bernoulli.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"106920057","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/07/16 21:01:14\n# @Author : Izumi Sakai\n# @File : logging_module.py\n# @Software: PyCharm\nimport logging\n\n\n# logging.basicConfig(level=logging.DEBUG,\n# format='%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)',\n# datefmt='%a %d %b %Y %H:%M:%S',\n# filename='test.log',\n# filemode='w',\n# )\n#\n\n# 模块级别的函数\nlogger = logging.getLogger() # 拿到一个日志对象\nfh = logging.FileHandler('./test.log') # 文件对象\n\nch = logging.StreamHandler() # 屏幕对象\n\nformatter = logging.Formatter('%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)') # 标准流对象\nfh.setFormatter(formatter) # 取来格式\n\nch.setFormatter(formatter)\n\nlogging.debug('1') # 日志级别上到下递增\nlogging.info('2') #级别可修改\nlogging.warning('3')\nlogging.error('4')\nlogging.critical('5')\n\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\n\n\n","sub_path":"模块和包/logging_module.py","file_name":"logging_module.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"314983398","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\n\r\ncount = 0\r\nbirds = ''\r\nasks = ''\r\narr_bird = []\r\narr_asks = []\r\narr_price_green = []\r\narr_price_green_2 = []\r\narr_price_red = []\r\narr_price_red_2 = []\r\nsum_bird = 0\r\nsum_asks = 0\r\n\r\nval = \"HOT\"\r\nurl = \"https://www.binance.com/api/v1/depth?symbol=\" + val + \"ETH\"\r\nresponse = requests.get(url)\r\nsoup = str(BeautifulSoup(response.text, \"html.parser\"))\r\nsoup += \"&\"\r\n# print(soup)\r\n# print(\"________________\")\r\n\r\nwhile soup[count] != '&':\r\n if soup[count] == ',' and soup[count+1] == '\"' and soup[count+2] == 'a':\r\n break\r\n else:\r\n count += 1\r\nbirds = str(soup[7 + 27:3289]) + ']&'\r\n# print(birds)\r\nasks = str(soup[3291+8:-3]) + ']&'\r\n\r\n\r\n\r\ncount = 0\r\nfor i in range(12):\r\n while birds[count] != '[':\r\n count += 1\r\n m_str = birds[count + 2: count + 12]\r\n arr_price_green.append(\"{:.8f}\".format(float(m_str)))\r\n count += 1\r\n# print(arr_price_green)\r\n\r\ncount = 0\r\nfor i in range(len(birds)):\r\n if birds[count] == '\"' and birds[count + 1] == ',' and birds[count + 2] == '\"':\r\n m_str = birds[count + 3:count + 12]\r\n arr_price_green_2.append(m_str)\r\n count += 1\r\n else:\r\n count += 1\r\n if len(arr_price_green_2) == 12:\r\n break\r\n# print(arr_price_green_2)\r\n\r\nfor i in range(len(arr_price_green)):\r\n a = float(arr_price_green[i])\r\n b = float(arr_price_green_2[i])\r\n arr_bird.append(a * b)\r\nsum_bird = sum(arr_bird)\r\n\r\n# -----------------------------\r\n\r\n\r\ncount = 0\r\nfor i in range(12):\r\n while asks[count] != '[':\r\n count += 1\r\n m_str = asks[count+2: count+12]\r\n arr_price_red.append(\"{:.8f}\".format(float(m_str)))\r\n count += 1\r\n# print(arr_price_green)\r\n\r\ncount = 0\r\nfor i in range(len(asks)):\r\n if asks[count] == '\"' and asks[count+1] == ',' and asks[count+2] == '\"':\r\n m_str = asks[count+3:count+12]\r\n arr_price_red_2.append(m_str)\r\n count += 1\r\n else:\r\n count += 1\r\n if len(arr_price_red_2) == 12:\r\n break\r\n# print(arr_price_green_2)\r\n\r\nfor i in range(len(arr_price_green)):\r\n a = float(arr_price_red[i])\r\n b = float(arr_price_red_2[i])\r\n arr_asks.append(a * b)\r\nsum_asks = sum(arr_asks)\r\n\r\n# -----------------------------------\r\n# print(sum_bird)\r\n# print(sum_asks)\r\nprint(\"------------------------------\")\r\n\r\nurl = \"https://www.binance.com/api/v3/ticker/price?symbol=HOTETH\"\r\nprice_old = 0\r\ninc = 1\r\n\r\nresponse = requests.get(url)\r\nsoup = str(BeautifulSoup(requests.get(url).text, \"html.parser\"))\r\nprint('Текущий курс по монете -', soup[28:38])\r\nprice = float(soup[28:38])\r\n\r\nprint(\"-------------------------------\")\r\nanswer = int(input('Вы собираетесь покупать монеты или же хотите продать?\\n1 - покупка, 2 - продажа,3 - получить совет, 4 - выход\\n'))\r\nos.system('cls')\r\nprint(\"Данная программа не покупает и не продает никакие валюты, а лишь просчитывает возможный курс после возможной транзакции.\\nНаша организация ответственности за ущер и крах не несет. Спасибо, что выбираете 'SiBears New'!\")\r\nc = price\r\nif answer == 1:\r\n k = sum_bird\r\n n = int(input(\"Введите ваш возможный баланс, на который вы хотите закупить монету Money\\n\"))\r\n for i in range(10):\r\n c = c * (1 - k / (k + c * n / 10)) + c\r\n print(i, c)\r\n print(\"Курс изменится на такой\", \"{:.10f}\".format(float(c)), 'после вашего возможного трейда')\r\nelif answer == 2:\r\n k = sum_asks\r\n n = int(input(\"Введите вашу возможную сумму на которую вы хотите продать монеты(монету) Money\\n\"))\r\n for i in range(10):\r\n c = (k / (k + c * n / 10)) * c\r\n c1 = \"{:.8f}\".format(float(c))\r\n # print(i, c1)\r\n print(\"Курс изменится на такой\", c1, 'после вашего возможного трейда')\r\nelif answer == 3:\r\n os.system('info.py')\r\nelif answer == 4:\r\n exit()\r\nprint('Хотите начать все сначала? Тогда нажмите 1, иначе любую другую клавишу')\r\nanswer = int(input())\r\nif answer == 1:\r\n os.system('start_programm.py')\r\n","sub_path":"run_hot.py","file_name":"run_hot.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"18684801","text":"\"\"\"features table\n\nRevision ID: d672f2011872\nRevises: \nCreate Date: 2019-07-21 15:51:47.864168\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd672f2011872'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('features',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('key', sa.String(length=64), nullable=True),\n sa.Column('value', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_features_key'), 'features', ['key'], unique=False)\n op.create_index(op.f('ix_features_value'), 'features', ['value'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_features_value'), table_name='features')\n op.drop_index(op.f('ix_features_key'), table_name='features')\n op.drop_table('features')\n # ### end Alembic commands ###\n","sub_path":"server/migrations/versions/d672f2011872_features_table.py","file_name":"d672f2011872_features_table.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"201850701","text":"import cv2\nimport os\nimport numpy as np\nimport math\nfrom utils import *\n\n\ndef rotate_img(img_src):\n\n img_gray = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)\n img_mdb = cv2.medianBlur(img_gray, 5)\n edges = cv2.Canny(img_mdb, 150, 200)\n\n # lines = cv2.HoughLines(edges, 1, np.pi / 180, 100)\n # if lines is None:\n # return\n # print(len(lines))\n\n # # This part is for drawing rotate lines and showing\n # drawing = img_src.copy()\n # for line in lines:\n # rho, theta = line[0]\n # # print(line)\n # # print(\"hahah\")\n # a = np.cos(theta)\n # b = np.sin(theta)\n # x0 = a * rho\n # y0 = b * rho\n # x1 = int(x0 + 1000 * (-b))\n # y1 = int(y0 + 1000 * (a))\n # x2 = int(x0 - 1000 * (-b))\n # y2 = int(y0 - 1000 * (a))\n # cv2.line(drawing, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\n # 霍夫曼变换取直线段\n lines = cv2.HoughLinesP(edges, 1.1, np.pi / 180, 90, minLineLength=100, maxLineGap=15)\n if lines is None:\n return img_src\n\n angles_ave = 0\n angles = []\n\n drawing = img_src.copy()\n for line in lines:\n x1, y1, x2, y2 = line[0]\n if x2 - x1 == 0:\n continue\n kslop = (y2 - y1) / (x2 - x1)\n angle = math.atan(kslop) * 180 / math.pi\n if angle >= 25 or angle <= -25:\n continue\n angles.append(angle)\n angles_ave += angle\n cv2.line(drawing, (x1, y1), (x2, y2), (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n if len(angles) == 0:\n return img_src\n\n angles_ave /= len(angles)\n count_angles = len(angles)\n print(angles_ave, 'ori')\n\n # # roughly screen lines\n # n=0\n # while n < count_angles and count_angles > 1:\n # if abs(angles[n] - angles_ave) > 3:\n # angles_ave = (angles_ave * count_angles - angles[n]) / (count_angles - 1)\n # angles.remove(angles[n])\n # print(\"xx\")\n # count_angles -= 1\n # n = 0\n # n += 1\n\n if angles_ave > 3 and count_angles <= 3:\n return img_src\n\n # screen lines //still need to be optimized\n angles.sort()\n while count_angles > 1:\n max_angle = angles[count_angles - 1]\n min_angle = angles[0]\n if abs(max_angle - angles_ave) > abs(min_angle - angles_ave) and abs(max_angle - angles_ave) >= 2:\n remove_angle = max_angle\n elif abs(min_angle - angles_ave) >= 2:\n remove_angle = min_angle\n else:\n break\n angles_ave = (angles_ave * count_angles - remove_angle) / (count_angles - 1)\n angles.remove(remove_angle)\n count_angles -= 1\n\n print(angles_ave, 'res_angel_ave')\n print('////////')\n height, width = img_src.shape[:2]\n mat = cv2.getRotationMatrix2D((width / 2, height / 2), angles_ave, 1)\n ro_img = cv2.warpAffine(img_src, mat, (width, height))\n # cv2.imshow('rotate', ro_img)\n # cv2.imshow('ori_drawlines', drawing)\n # cv2.waitKey(0)\n return ro_img\n # cv2.imshow('testshow', imgsrcc)\n\n\ndef pre_process_img(img_dir, img_output_dir):\n if file_process.img_is_exist(img_dir, 'jpg'):\n print('img not exists')\n return\n img_src = cv2.imread(img_dir)\n\n img_rotate = rotate_img(img_src)\n # img_blf = cv2.bilateralFilter(img_rotate, 15, 15 * 2, 15 / 2)\n # img_gauss = cv2.GaussianBlur(img_blf, (3, 3), 3)\n # img_resize = cv2.resize(img_gauss, (500, 500))\n\n # img_out = file_process.output_file(img_output_dir, img_dir, '.jpg')\n img_out_ori = file_process.output_file(img_output_dir + 'ori/', img_dir, '.jpg')\n # cv2.imwrite(img_out, img_gauss)\n cv2.imwrite(img_out_ori, img_rotate)\n\n\nimg_output_dir = '/Users/lizhengyang/Desktop/myrecoimg/'\nimg_root_dir = '/Users/lizhengyang/Desktop/xml/'\nimgnames = os.listdir(img_root_dir)\nfor imgname in imgnames:\n img_dir = img_root_dir + imgname\n pre_process_img(img_dir, img_output_dir)\n","sub_path":"img_preprocess/img_rotate.py","file_name":"img_rotate.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"621130681","text":"import cv2\nimport numpy as np\n\n\ndef OcclusionDetector(frame, size=(150, 100), super_pixel_set=(3, 2)):\n \"\"\" OcclusionDetector\n Input:\n Required:(but you do not necessarily need to use)\n frame\n size\n super_pixel_set\n Optinal: (please set a default value)\n ...\n Output:\n score within 0 and 1\n \"\"\"\n\n # resize\n frame = cv2.resize(frame, size)\n # cv2.imshow(\"winname\", frame)\n # cv2.waitKey(0)\n frame = cv2.GaussianBlur(frame, (5, 5), 0)\n # edge detection\n sobelX = cv2.Sobel(frame, cv2.CV_16S, 1, 0, ksize=3)\n sobelY = cv2.Sobel(frame, cv2.CV_16S, 0, 1, ksize=3)\n absX = cv2.convertScaleAbs(sobelX) # 转回uint8\n absY = cv2.convertScaleAbs(sobelY)\n dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\n # thresholding\n ret, thresh = cv2.threshold(dst, 50, 1, cv2.THRESH_BINARY)\n # super pixel setting\n w_num_pixel = int(size[0]/super_pixel_set[0])\n h_num_pixel = int(size[1]/super_pixel_set[1])\n score_mat = np.zeros(super_pixel_set[::-1])\n for w in range(super_pixel_set[0]):\n for h in range(super_pixel_set[1]):\n score = np.mean(thresh[h_num_pixel*h:h_num_pixel *\n (h+1), w_num_pixel*w:w_num_pixel*(w+1)])\n score_mat[h, w] = score\n divider = np.mean(score_mat)\n score = np.std(score_mat) / divider / \\\n 2 if divider > 0 else 1\n # print(score)\n return min(1, score)\n\n\nif __name__ == \"__main__\":\n\n frame = cv2.imread('../data/Occluded/309.jpg', 0)\n score = OcclusionDetector(frame)\n print(score)\n","sub_path":"src/OcclusionDetector.py","file_name":"OcclusionDetector.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"366948598","text":"from datetime import datetime, timedelta\n\n\ndef daily_mail_stats():\n #trigger at 9:00PM\n now = datetime.now()\n now = datetime.today() - timedelta(days=1)\n now = now.replace(hour=21)\n print(now)\n #DailyMailCount.objects.filter(datetime>=now-1)\n\ndaily_mail_stats()","sub_path":"DjangoRestApi/email_api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626920082","text":"import jsonpickle\nfrom flask import Flask, request\nfrom paperbroker import PaperBroker\nfrom paperbroker.orders import Order\n\n# initialize a PaperBroker with defaults\nbroker = PaperBroker()\n\n# set the project root directory as the static folder, you can set others.\napp = Flask(__name__, static_url_path='')\n\n# helper function to return pickled json\ndef json(data):\n global app\n response = app.response_class(\n response=jsonpickle.encode(data, unpicklable=False),\n status=200,\n mimetype='application/json'\n )\n return response\n\n# begin routes\n\n@app.route(\"/quotes/\", methods=['GET'])\ndef get_quote(asset:str):\n return json(broker.get_quote(asset))\n\n\n@app.route(\"/quotes//options/\", methods=['GET'])\ndef get_options(asset=None, expiration_date=None):\n return json(broker.get_options(asset, expiration_date))\n\n\n@app.route(\"/expirations/\", methods=['GET'])\ndef get_expiration_dates(asset=None):\n return json(broker.get_expiration_dates(asset))\n\n\n@app.route(\"/accounts\", methods=['POST'])\n@app.route(\"/accounts/create\", methods=['GET'])\ndef open_account():\n return json(broker.open_account())\n\n\n@app.route(\"/accounts/\", methods=['GET'])\ndef get_account(account_id: str = None):\n return json(broker.get_account(account_id=account_id))\n\n\n@app.route(\"/accounts//orders/buy_to_open/\", methods=['POST'])\ndef buy_to_open(account_id:str = None, asset:str = None):\n quantity = int(request.args.get('quantity', 1))\n simulate = not ( not (request.args.get('simulate', False) ) )\n return json(broker.buy_to_open(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))\n\n\n@app.route(\"/accounts//orders/sell_to_open/\", methods=['POST'])\ndef sell_to_open(account_id:str = None, asset:str = None):\n quantity = int(request.args.get('quantity', 1))\n simulate = not ( not (request.args.get('simulate', False) ) )\n return json(broker.sell_to_open(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))\n\n\n@app.route(\"/accounts//orders/buy_to_close/\", methods=['POST'])\ndef buy_to_close(account_id:str = None, asset:str = None):\n quantity = int(request.args.get('quantity', 1))\n simulate = not ( not (request.args.get('simulate', False) ) )\n return json(broker.buy_to_close(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))\n\n\n@app.route(\"/accounts//orders/sell_to_close/\", methods=['POST'])\ndef sell_to_close(account_id:str = None, asset:str = None):\n quantity = int(request.args.get('quantity', 1))\n simulate = not ( not (request.args.get('simulate', False) ) )\n return json(broker.sell_to_close(account = broker.get_account(account_id=account_id), asset=asset, quantity=quantity, simulate=simulate))\n\n\n\n@app.route(\"/accounts//positions/liquidate\", methods=['POST'])\ndef liquidate_account_positions(account_id:str, positions=None, simulate=None):\n simulate = (not ( not (request.args.get('simulate', False) ) )) if simulate is not None else simulate\n account = broker.get_account(account_id=account_id)\n return json(broker.close_positions(account=account, positions=account.positions, simulate=simulate))\n\n\n@app.route(\"/accounts//orders/simulate\", methods=['POST'])\n@app.route(\"/accounts//orders/create/simulate\", methods=['GET'])\ndef simulate_order(account_id: str):\n return enter_order(account_id, simulate=True)\n\n\n@app.route(\"/accounts//orders\", methods=['POST'])\n@app.route(\"/accounts//orders/create\", methods=['GET'])\ndef enter_order(account_id: str, simulate=None):\n simulate = (not ( not (request.args.get('simulate', False) ) )) if simulate is not None else simulate\n\n order = Order()\n for x in range(4):\n if request.args.get('asset[{}]'.format(x), None) is not None:\n asset = request.args.get('asset[{}]'.format(x), None)\n order_type = request.args.get('order_type[{}]'.format(x), None)\n quantity = request.args.get('quantity[{}]'.format(x), None)\n\n if order_type is None:\n raise Exception('order_type is a required field')\n\n if quantity is None:\n raise Exception('quantity is a required field')\n\n order.add_leg(asset=asset, order_type=order_type, quantity=quantity)\n\n return json(broker.enter_order(account = broker.get_account(account_id=account_id), order=order, simulate=simulate))\n\n\n\n\"\"\"\n\n@app.route(\"/live/sell_otm_vertical_spread_quick_order_page/\", methods=['GET'])\ndef simulate_order(account: Account, order: Order, estimator: Estimator = None):\n account_after = self.market_adapter.simulate_order(account=account, order=order, estimator=estimator)\n validate_account(account_after)\n return account_after\n\n\n@app.route(\"/live/sell_otm_vertical_spread_quick_order_page/\", methods=['GET'])\ndef close_position(account: Account, position=None):\n return self.close_positions(account, [position])\n\n\n@app.route(\"/live/sell_otm_vertical_spread_quick_order_page/\", methods=['GET'])\ndef close_positions(account: Account, positions=None):\n if positions is None:\n positions = []\n\n btc = {}\n stc = {}\n assets_by_symbol = {}\n for p in positions:\n assets_by_symbol[p.asset.symbol] = p.asset\n if p.quantity > 0:\n stc[p.asset.symbol] = stc.get(p.asset.symbol, 0) + p.quantity\n else:\n btc[p.asset.symbol] = btc.get(p.asset.symbol, 0) + p.quantity\n\n o = Order()\n for s in stc.keys():\n o.add_leg(order_type='stc', asset=assets_by_symbol[s], quantity=-1 * stc[s])\n for b in btc.keys():\n o.add_leg(order_type='btc', asset=assets_by_symbol[b], quantity=abs(btc[b]))\n\n self.enter_order(account, o)\n\n\"\"\"\n\nif __name__ == \"__main__\":\n port = 8231\n app.debug = False\n print(\"PaperBroker Flask Server is starting on localhost:{}\".format(port))\n app.run(host = \"127.0.0.1\", port = port, debug=False)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"52348599","text":"from keras.layers import Layer\nfrom keras import backend as K\nfrom keras.utils import np_utils\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import PairwiseKernel\nimport pandas as pd\nimport numpy as np\nimport os\nfrom keras.layers import Dense, Flatten,Dropout,Conv2D,MaxPooling2D\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom keras.losses import binary_crossentropy\nfrom keras.callbacks import ModelCheckpoint\nimport h5py\ndef isGroup(obj):\n if isinstance(obj, h5py.Group):\n return True\n return False\n\n\ndef isDataset(obj):\n if isinstance(obj, h5py.Dataset):\n return True\n return False\n\n\ndef getdatastefromgroup(dataset, obj):\n if (isGroup(obj)):\n for key in obj:\n x = obj[key]\n getdatastefromgroup(dataset, x)\n else:\n dataset.append(obj)\n\n\ndef geWeightforlayer(layername, filename):\n weight = []\n with h5py.File(filename, mode='r') as f:\n for keys in f:\n if layername in keys:\n obj = f[keys]\n datasets = []\n getdatastefromgroup(datasets, obj)\n for dataset in datasets:\n w = np.array(dataset)\n weight.append(w)\n return weight\n\n\ndef printWeightforlayer(layername,filename):\n with h5py.File(filename,mode='r') as f:\n for keys in f:\n print(keys,f[keys])\n o=f[keys]\n for key1 in o:\n print(key1,o[key1])\n r=o[key1]\n for key2 in r:\n print(key2,r[key2])\ndef read_data(data_dirname_path):\n Trainx = np.load(os.path.join(data_dirname_path,\"trainx.npy\"))\n Trainy = np.load(os.path.join(data_dirname_path,\"trainy.npy\"))\n # Vaildx = np.load(os.path.join(data_dirname_path,\"Vaildx.npy\"))\n # Vaildy = np.load(os.path.join(data_dirname_path,\"Vaildy.npy\"))\n Testx = np.load(os.path.join(data_dirname_path,\"testx.npy\"))\n Testy = np.load(os.path.join(data_dirname_path,\"testy.npy\"))\n\n\n Trainx =Trainx.reshape(-1,28,28,1)\n # Vaildx = Vaildx.reshape(-1,166, 586, 1)\n Testx = Testx.reshape(-1, 28,28, 1)\n\n\n Trainy =np_utils.to_categorical(Trainy,num_classes=10)\n # Vaildy =np_utils.to_categorical(Vaildy,num_classes=12)\n Testy =np_utils.to_categorical(Testy,num_classes=10)\n return Trainx, Trainy,Testx, Testy\nclass RBFLayer(Layer):\n def __init__(self, units, gamma, **kwargs):\n super(RBFLayer, self).__init__(**kwargs)\n self.units = units\n self.gamma = K.cast_to_floatx(gamma)\n\n def build(self, input_shape):\n# print(input_shape)\n# print(self.units)\n self.mu = self.add_weight(name='mu',\n shape=(int(input_shape[1]), self.units),\n initializer='uniform',\n trainable=True)\n super(RBFLayer, self).build(input_shape)\n\n def call(self, inputs):\n diff = K.expand_dims(inputs) - self.mu\n l2 = K.sum(K.pow(diff, 2), axis=1)\n res = K.exp(-1 * self.gamma * l2)\n # res = 1/K.sqrt(1 + self.gamma * l2)\n return res\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.units)\n\ndef RBF():\n model = Sequential()\n model.add(Dense(128, input_dim=(28*28), activation='relu'))\n # model.add(Conv2D(filters=6,input_dim=(28*28),activation='sigmoid',padding=\"valid\",strides=(1, 1),kernel_size=(5,5)))\n # model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(RBFLayer(64, 0.5))\n # model.add(Dropout(0.2))\n model.add(Dense(10, activation='softmax'))\n model.summary()\n # 保存模型\n model.save('RBF.h5')\n return model\ndef NN():\n\n model = Sequential()\n # model.save_weights(checkpoint_path.format(epoch=0))\n model.add(Dense(10, input_dim=(28*28), activation='softmax'))\n\n # model.add(Dense(256, activation='relu'))\n # model.add(Dropout(0.5))\n # model.add(Dense(128, activation='relu'))\n # model.add(Dense(64, activation='relu'))\n # model.add(Dense(10, activation='softmax'))\n # 查看网络结构\n model.summary()\n # 保存模型\n return model\ndef RBF_CNN():\n model = Sequential()\n model.add(Conv2D(filters=6,input_shape=(28,28,1),activation='relu',padding=\"valid\",strides=(1, 1),kernel_size=(5,5),data_format='channels_last'))\n model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding=\"valid\"))\n # model.add(Conv2D(filters=12,activation='relu',padding=\"valid\",strides=(1, 1),kernel_size=(5,5),data_format='channels_last'))\n # model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding=\"valid\"))\n model.add(Flatten())\n model.add(RBFLayer(128, 0.5))\n # model.add(Dropout(0.2))\n model.add(Dense(10, activation='softmax'))\n model.summary()\n # 保存模型\n model.save('RBF_CNN.h5')\n return model\ndef CNN():\n model = Sequential()\n model.add(Conv2D(filters=6,input_shape=(28,28,1),activation='relu',padding=\"valid\",strides=(1, 1),kernel_size=(5,5),data_format='channels_last'))\n model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding=\"valid\"))\n model.add(Conv2D(filters=12,activation='relu',padding=\"valid\",strides=(1, 1),kernel_size=(5,5),data_format='channels_last'))\n model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding=\"valid\"))\n model.add(Flatten())\n model.add(Dense(10,activation='softmax'))\n # model.add(Dropout(0.2))\n # model.add(Dense(10, activation='softmax'))\n model.summary()\n # 保存模型\n model.save('CNN.h5')\n return model\nif __name__ == \"__main__\":\n config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))\n sess = tf.compat.v1.Session(config=config)\n Trainx, Trainy,Testx, Testy=read_data(os.path.join(os.path.dirname(__file__),'mnist_dataset'))\n print(Trainy.shape)\n print(Trainx.shape)\n print(Testy.shape)\n print(Testx.shape)\n print(np.max(Trainx[1,15,:,0]))\n # os.system('pause')\n # model_name='NN_mnist_weight'\n # model_name='NN'\n model_name='RBF'\n if model_name=='RBF_CNN':\n # print(Trainx.shape)\n # os.system('pause')\n model = RBF_CNN()\n model.compile(optimizer=Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-08),loss='categorical_crossentropy', metrics=['accuracy'])\n History = model.fit(Trainx, Trainy, batch_size=300, epochs=60, verbose=2,validation_data=(Testx,Testy))\n pre = model.evaluate(Testx, Testy, batch_size=300, verbose=2)\n print('test_loss:', pre[0], '- test_acc:', pre[1])\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(History.history['accuracy'])\n plt.plot(History.history['val_accuracy'])\n plt.title('RBF accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.subplot(1, 2, 2)\n plt.plot(History.history['loss'])\n plt.plot(History.history['val_loss'])\n plt.title('RBF loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n plt.show()\n if model_name=='CNN':\n # Trainx = Trainx.reshape(Trainx.shape[0], (28*28))\n # Testx = Testx.reshape(Testx.shape[0], (28*28))\n # print(Trainx.shape)\n # os.system('pause')\n model = CNN()\n model.compile(optimizer=Adam(lr=0.00025, beta_1=0.9, beta_2=0.999, epsilon=1e-08),loss='categorical_crossentropy', metrics=['accuracy'])\n History = model.fit(Trainx, Trainy, batch_size=300, epochs=40, verbose=2,validation_data=(Testx,Testy))\n pre = model.evaluate(Testx, Testy, batch_size=100, verbose=2)\n print('test_loss:', pre[0], '- test_acc:', pre[1])\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(History.history['accuracy'])\n plt.plot(History.history['val_accuracy'])\n plt.title('RBF accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.subplot(1, 2, 2)\n plt.plot(History.history['loss'])\n plt.plot(History.history['val_loss'])\n plt.title('RBF loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n plt.show()\n if model_name=='RBF':\n Trainx = Trainx.reshape(Trainx.shape[0], (28*28))\n Testx = Testx.reshape(Testx.shape[0], (28*28))\n # print(Trainx.shape)\n # os.system('pause')\n model = RBF()\n model.compile(optimizer=Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=1e-08),loss='categorical_crossentropy', metrics=['accuracy'])\n History = model.fit(Trainx, Trainy, batch_size=300, epochs=40, verbose=2,validation_data=(Testx,Testy))\n pre = model.evaluate(Testx, Testy, batch_size=100, verbose=2)\n print('test_loss:', pre[0], '- test_acc:', pre[1])\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(History.history['accuracy'])\n plt.plot(History.history['val_accuracy'])\n plt.title('RBF accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.subplot(1, 2, 2)\n plt.plot(History.history['loss'])\n plt.plot(History.history['val_loss'])\n plt.title('RBF loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n plt.show()\n if model_name=='NN':\n checkpoint_path = \"mnist_model/cp_{epoch:04d}.hdf5\"\n\n checkpoint=ModelCheckpoint(filepath=checkpoint_path,\n verbose=0,\n save_best_only=False,\n save_weights_only=True,\n mode=\"auto\",\n period=1)\n Trainx = Trainx.reshape(Trainx.shape[0], (28 * 28))\n Testx = Testx.reshape(Testx.shape[0], (28 * 28))\n # print(Trainx.shape)\n # os.system('pause')\n model = NN()\n model.compile(optimizer=Adam(lr=0.05, beta_1=0.9, beta_2=0.999, epsilon=1e-08),loss='categorical_crossentropy', metrics=['accuracy'])\n History = model.fit(Trainx, Trainy, batch_size=300, epochs=20, verbose=2,validation_data=(Testx,Testy),callbacks=[checkpoint])\n pre = model.evaluate(Testx, Testy, batch_size=100, verbose=2)\n print('test_loss:', pre[0], '- test_acc:', pre[1])\n\n from tensorflow.python import pywrap_tensorflow\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(History.history['accuracy'])\n plt.plot(History.history['val_accuracy'])\n plt.title('NN accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.subplot(1, 2, 2)\n plt.plot(History.history['loss'])\n plt.plot(History.history['val_loss'])\n plt.title('NN loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n plt.show()\n if model_name=='NN_mnist_weight':\n\n\n path=os.path.join(os.path.dirname(__file__),'mnist_model')\n\n allFileList = os.listdir(path)\n # printWeightforlayer(layername='',filename=os.path.join(path,'cp_0001.hdf5'))\n epoch_weight=[]\n epoch_bias=[]\n for file in allFileList:\n weight = geWeightforlayer(layername=\"dense_1\", filename=os.path.join(path,file))\n layerweight=weight[1].flatten()\n layerbias=weight[0]\n epoch_weight.append(layerweight)\n epoch_bias.append(layerbias)\n epoch_weight=np.array(epoch_weight)\n epoch_bias=np.array(epoch_bias)\n\n plt.figure(figsize=(7, 5))\n # 設定圖的範圍, 不設的話,系統會自行決定\n plt.xlim(1, 10)\n plt.ylim(-0.25,0.25)\n # 照需要寫入x 軸和y軸的 label 以及title\n plt.xlabel(\"epoch\")\n plt.ylabel(\"weight_value\")\n plt.title(\"The Title\")\n x=[i+1 for i in range(epoch_weight.shape[0])]\n for line_num in range(epoch_weight.shape[1]):\n single_weight=epoch_weight[:,line_num]\n print(single_weight)\n plt.plot(x,single_weight)\n\n # os.system('pause')\n plt.show()","sub_path":"RBF_CNN.py","file_name":"RBF_CNN.py","file_ext":"py","file_size_in_byte":12666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"449035972","text":"# how can we print every value of every column in every row of a list of lists?\n\ndata = [\n ['O', 'X', 'O'],\n ['X', 'X', 'O'],\n ['X', 'O', '.'],\n]\n\n# L1 How can we place a letter at the empty bottom right position?\n\ndata[2][2] = \"X\"\n\nfor row in data:\n for column in row:\n print(column, end=\" \")\n print(\"\")\nprint(\"\")\n\n\n# L2 How can we make the above into a function we can re-use?\n\ndef print_board(data):\n for row in data:\n for column in row:\n print(column, end=\" \")\n print(\"\")\n print(\"\")\n\n\nprint_board(data)\n\n\n# L3 How can we combine the data and the function into a Board class?\n\n\nclass Board:\n def __init__(self, data = None):\n self.boardview = data if data is not None else [\n ['X', 'X', 'O'],\n ['X', 'X', 'O'],\n ['X', 'O', 'X'],\n ]\n\n def print_board(self):\n for row in self.boardview:\n for column in row:\n print(column, end=\" \")\n print(\"\")\n print(\"\")\n\n def make_move(self, x, y, value):\n self.x = x\n self.y = y\n self.boardview[x][y] = value\n\n\ncurrent_game=Board(data)\ncurrent_game.make_move(2, 2, \"X\")\ncurrent_game.print_board()\n\n\n# L4 How can we say if X, O, or no-one has won? Can we put this in a Game class that uses the Board class?\n\nclass Game:\n def __init__(self):\n self.board = Board()\n\n def who_has_won(self):\n\n symbols = [\"X\", \"O\"]\n\n for symbol in symbols:\n for row in self.board.boardview:\n if row[0] == symbol and row[1] == symbol and row[2] == symbol:\n return symbol\n\n for index in range(3):\n count = 0\n for row in self.board.boardview:\n if row[index] == symbol:\n count += 1\n if count == 3:\n return symbol\n\n if self.board.boardview[0][0] == symbol and self.board.boardview[1][1] == symbol and \\\n self.board.boardview[2][2] == symbol:\n return symbol\n\n if self.board.boardview[0][2] == symbol and self.board.boardview[1][1] == symbol and \\\n self.board.boardview[2][0] == symbol:\n return symbol\n\n return None\n\n\nnewgame = Game()\nnewgame.board.print_board()\nprint(newgame.who_has_won())\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"506428989","text":"import pygame\r\nfrom pygame import *\r\nimport os\r\nfrom Lib import inspect\r\nimport math\r\n\r\nclass Gimage:\r\n im = None # Image\r\n pixels = None # Pixels data\r\n w = h = 0\r\n\r\n def setIm (self, x):\r\n self.im = x\r\n\r\n def get (self, x, y):\r\n return self.im.get_at ((x, y))\r\n\r\n def set (self, x, y, c):\r\n self.im.set_at ((x, y), c)\r\n\r\n def save (self, s):\r\n pygame.image.save(self.im, (s))\r\n\r\n def draw (self, canvas, x, y):\r\n canvas.blit(self.im,(x,y))\r\n\r\n\r\n_loaded = False\r\nclass Gvideo:\r\n def __init__ (self, name):\r\n global canvas\r\n self.name = name\r\n m = self.loadVideo(name)\r\n self.vid = m\r\n self.size = m.get_size()\r\n self.surf = canvas\r\n m.set_display(canvas, \r\n (0,0,self.size[0], self.size[1]))\r\n self.outer = canvas\r\n self.x = 0\r\n self.y = 0\r\n self._paused = False\r\n self.__DEBUG = True\r\n self.loaded = False\r\n\r\n def loadVideo (self, s):\r\n global _loaded\r\n if not _loaded:\r\n mixer.quit()\r\n _loaded = True\r\n try:\r\n m = movie.Movie (s)\r\n except:\r\n print (\"Problem loading the movie '\", s, \"'\")\r\n m = None\r\n return m \r\n\r\n def play(self):\r\n self.vid.play ()\r\n\r\n def locVideo (self, x, y, w, h):\r\n self.x = x\r\n self.y = y\r\n self.size = (w,h)\r\n self.surf = Surface(self.size)\r\n self.vid.set_display(self.surf, (0,0,w,h))\r\n\r\n def draw (self):\r\n global canvas\r\n canvas.blit (self.surf, (self.x, self.y))\r\n\r\n def copy (self, x, y, w, h):\r\n inst = Gvideo (self.name)\r\n inst.locVideo (x, y, w, h)\r\n return inst\r\n\r\n def stop (self):\r\n self.vid.stop()\r\n\r\n def rewind (self):\r\n self.vid.rewind()\r\n\r\n def is_playing (self):\r\n if self.vid.get_busy(): \r\n return True\r\n return False\r\n\r\n def length (self):\r\n return self.vid.get_length()\r\n\r\n def where (self):\r\n if self.vid.get_busy():\r\n t = self.vid.get_time()\r\n return t\r\n else:\r\n return self.vid.get_length()\r\n\r\n def get_frame (self): \r\n return self.surf.copy()\r\n\r\n def pause(self):\r\n if not self._paused:\r\n self._paused = True\r\n fvid = self.vid.get_frame()\r\n self.vid.pause()\r\n else:\r\n self._paused = False\r\n self.vid.pause()\r\n\r\n def get_frame_number(self):\r\n return self.vid.get_frame()\r\n\r\n def draw_frame(self, f, iplay=0):\r\n x = self.vid.render_frame(f)\r\n if not (self.surf == self.outer):\r\n x = self.outer.blit (self.surf, (self.x, self.y))\r\n return\r\n if iplay == 0:\r\n self.draw()\r\n elif iplay == 1:\r\n self.play()\r\n\r\n\r\n\r\n def get_pixel(self, x, y):\r\n if (x<0) or (x>self.size[0]): \r\n return (-1, -1, -1)\r\n if (y<0) or (y>self.size[1]):\r\n return (-1, -1, -1)\r\n try:\r\n p = tuple(self.surf.get_at((x, y)))\r\n except:\r\n return (-1, -1, -1)\r\n return p\r\n\r\n def auto_play (self):\r\n self.vid.set_display(canvas, \r\n (self.x, self.y,self.size[0], self.size[1]))\r\n self.vid.play()\r\n\r\n def set_volume(self, v):\r\n return self.vid.set_volume(v)\r\n\r\n\r\n\r\n# To-Do: 1. Symbolic names for special charaters (LEFT, etc)\r\n# These are global variables that are used by the user \r\n# or are phantoms\r\n\r\nK_BACKSPACE = pygame.K_BACKSPACE\r\nK_TAB = pygame.K_TAB\r\nK_CLEAR = pygame.K_CLEAR\r\nK_RETURN = pygame.K_RETURN\r\nK_PAUSE = pygame.K_PAUSE\r\nK_ESCAPE = pygame.K_ESCAPE\r\nK_SPACE = pygame.K_SPACE\r\nK_EXCLAIM = pygame.K_EXCLAIM\r\nK_QUOTEDBL = pygame.K_QUOTEDBL\r\nK_HASH = pygame.K_HASH\r\nK_DOLLAR = pygame.K_DOLLAR\r\nK_AMPERSAND = pygame.K_AMPERSAND\r\nK_QUOTE = pygame.K_QUOTE \r\nK_LEFTPAREN = pygame.K_LEFTPAREN \r\nK_RIGHTPAREN = pygame.K_RIGHTPAREN\r\nK_ASTERISK = pygame.K_ASTERISK \r\nK_PLUS = pygame.K_PLUS \r\nK_COMMA = pygame.K_COMMA \r\nK_MINUS = pygame.K_MINUS \r\nK_PERIOD = pygame.K_PERIOD \r\nK_SLASH = pygame.K_SLASH \r\nK_0 = pygame.K_0 \r\nK_1 = pygame.K_1 \r\nK_2 = pygame.K_2 \r\nK_3 = pygame.K_3 \r\nK_4 = pygame.K_4 \r\nK_5 = pygame.K_5\r\nK_6 = pygame.K_6 \r\nK_7 = pygame.K_7 \r\nK_8 = pygame.K_8 \r\nK_9 = pygame.K_9 \r\nK_COLON = pygame.K_COLON\r\nK_SEMICOLON = pygame.K_SEMICOLON \r\nK_LESS = pygame.K_LESS \r\nK_EQUALS = pygame.K_EQUALS \r\nK_GREATER = pygame.K_GREATER \r\nK_QUESTION = pygame.K_QUESTION \r\nK_AT = pygame.K_AT \r\nK_LEFTBRACKET= pygame.K_LEFTBRACKET\r\nK_BACKSLASH = pygame.K_BACKSLASH \r\nK_RIGHTBRACKET= pygame.K_RIGHTBRACKET\r\nK_CARET = pygame.K_CARET \r\nK_UNDERSCORE = pygame.K_UNDERSCORE \r\nK_BACKQUOTE = pygame.K_BACKQUOTE \r\n#K_a = pygame.K_a \r\nK_b = pygame.K_b \r\nK_c = pygame.K_c \r\nK_d = pygame.K_d \r\nK_e = pygame.K_e \r\nK_f = pygame.K_f \r\nK_g = pygame.K_g \r\nK_h = pygame.K_h \r\nK_i = pygame.K_i \r\nK_j = pygame.K_j \r\nK_k = pygame.K_k \r\nK_l = pygame.K_l \r\nK_m = pygame.K_m \r\nK_n = pygame.K_n \r\nK_o = pygame.K_o \r\nK_p = pygame.K_p \r\nK_q = pygame.K_q \r\nK_r = pygame.K_r \r\nK_s = pygame.K_s \r\nK_t = pygame.K_t \r\nK_u = pygame.K_u \r\nK_v = pygame.K_v \r\nK_w = pygame.K_w \r\nK_x = pygame.K_x \r\nK_y = pygame.K_y \r\nK_z = pygame.K_z \r\nK_DELETE = pygame.K_DELETE \r\nK_KP0 = pygame.K_KP0 \r\nK_KP1 = pygame.K_KP1 \r\nK_KP2 = pygame.K_KP2 \r\nK_KP3 = pygame.K_KP3 \r\nK_KP4 = pygame.K_KP4 \r\nK_KP5 = pygame.K_KP5 \r\nK_KP6 = pygame.K_KP6 \r\nK_KP7 = pygame.K_KP7 \r\nK_KP8 = pygame.K_KP8 \r\nK_KP9 = pygame.K_KP9 \r\nK_KP_PERIOD = pygame.K_KP_PERIOD \r\nK_KP_DIVIDE = pygame.K_KP_DIVIDE \r\nK_KP_MULTIPLY= pygame.K_KP_MULTIPLY\r\nK_KP_MINUS = pygame.K_KP_MINUS \r\nK_KP_PLUS = pygame.K_KP_PLUS \r\nK_KP_ENTER = pygame.K_KP_ENTER \r\nK_KP_EQUALS = pygame.K_KP_EQUALS\r\nK_UP = pygame.K_UP \r\nK_DOWN = pygame.K_DOWN \r\nK_RIGHT = pygame.K_RIGHT \r\nK_LEFT = pygame.K_LEFT \r\nK_INSERT = pygame.K_INSERT \r\nK_HOME = pygame.K_HOME \r\nK_END = pygame.K_END \r\nK_PAGEUP = pygame.K_PAGEUP \r\nK_PAGEDOWN = pygame.K_PAGEDOWN \r\nK_F1 = pygame.K_F1\r\nK_F2 = pygame.K_F2\r\nK_F3 = pygame.K_F3\r\nK_F4 = pygame.K_F4\r\nK_F5 = pygame.K_F5\r\nK_F6 = pygame.K_F6\r\nK_F7 = pygame.K_F7\r\nK_F8 = pygame.K_F8\r\nK_F9 = pygame.K_F9\r\nK_F10 = pygame.K_F10\r\nK_F11 = pygame.K_F11\r\nK_F12 = pygame.K_F12\r\nK_F13 = pygame.K_F13\r\nK_F14 = pygame.K_F14\r\nK_F15 = pygame.K_F15\r\nK_NUMLOCK = pygame.K_NUMLOCK \r\nK_CAPSLOCK = pygame.K_CAPSLOCK \r\nK_SCROLLOCK = pygame.K_SCROLLOCK \r\nK_RSHIFT = pygame.K_RSHIFT \r\nK_LSHIFT = pygame.K_LSHIFT \r\nK_RCTRL = pygame.K_RCTRL \r\nK_LCTRL = pygame.K_LCTRL \r\nK_RALT = pygame.K_RALT \r\nK_LALT = pygame.K_LALT \r\nK_RMETA = pygame.K_RMETA \r\nK_LMETA = pygame.K_LMETA \r\nK_LSUPER = pygame.K_LSUPER \r\nK_RSUPER = pygame.K_RSUPER \r\nK_MODE = pygame.K_MODE \r\nK_HELP = pygame.K_HELP \r\nK_PRINT = pygame.K_PRINT \r\nK_SYSREQ = pygame.K_SYSREQ \r\nK_BREAK = pygame.K_BREAK \r\nK_MENU = pygame.K_MENU \r\nK_POWER = pygame.K_POWER \r\nK_EURO = pygame.K_EURO\r\nK_A = 65 \r\nK_B = 66\r\nK_C = 67\r\nK_D = 68\r\nK_E = 69\r\nK_F = 70\r\nK_G = 71\r\nK_H = 72\r\nK_I = 73\r\nK_J = 74\r\nK_K = 75\r\nK_L = 76\r\nK_M = 77\r\nK_N = 78\r\nK_O = 79\r\nK_P = 80\r\nK_Q = 81\r\nK_R = 82\r\nK_S = 83\r\nK_T = 84\r\nK_U = 85\r\nK_V = 86\r\nK_W = 87\r\nK_X = 88\r\nK_Y = 89\r\nK_Z = 90\r\nmousex = 9 # X position of the mouse right now\r\nmousey = 10 # Y position of the mouse right now\r\n_user = None\r\ndrawOK = False\r\n_keyp = False\r\n_keyr = False\r\n_mousep = False\r\n_mouser = False\r\n_uppercase = False\r\n_fvid = 0\r\n_xvid = 0\r\n_yvid = 0\r\n_xwid = 0\r\n_ywid = 0\r\n_time = 0\r\n_pausedv = False\r\nsavedFrame = None\r\nwidth = 100 # Default canvas width\r\nheight = 100 # Default canvas height\r\n_fillcol = (255, 255, 255) # Current fill color\r\n_strokecol = (0, 0, 0) # Current stroke color\r\n_bgcol = (200, 200, 200) # Current background color\r\n_ELLIPSEMODE = 0 # Mode for drawing an ellipse\r\nCENTER = 0 # 0 = center\r\nRADIUS = 1 # 1 = radius\r\nCORNER = 2 # 2 = corner\r\nCORNERS = 3 # 3 = corners\r\n_RECTMODE = CORNER # Mode for drawing rectangles\r\n\r\n_buttons = (False, False, False) # Button presses.\r\nkey = \"\" # Last key that was pressed\r\n_noloop = False # Does DRAW loop?\r\n_dofill = True # Should polygons be filled?\r\n_dostroke = True # Should polygons be outlined?\r\n_linewidth = 1 # Line width in pixels\r\n_framerate = 30\r\nclock = pygame.time.Clock()\r\n_hex = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\r\n_font_family = \"helvetica\"\r\n_font_size = 12\r\n_font_weight = \"normal\"\r\n_font_slant = \"roman\"\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nBACKSPACE = K_BACKSPACE\r\nPIESLICE = \"pieslice\"\r\nfont = None\r\ncanvas = 0\r\n\r\ndef mouseX():\r\n return mousex\r\ndef mouseY():\r\n return mousey\r\n\r\ndef Width():\r\n global width\r\n return width\r\ndef Height():\r\n global height\r\n return height\r\n\r\n# Turn on filling. Set the fill color for polygons, **\r\n# text color too. **\r\ndef fill(r, g=1000, b=1000, a=255):\r\n global _fillcol, _dofill\r\n _dofill = True\r\n if g==1000:\r\n _fillcol = (r,r,r,a)\r\n else:\r\n _fillcol = (r,g,b,a)\r\n \r\n# Turn filling off. **\r\ndef nofill():\r\n global _dofill\r\n _dofill = False\r\n\r\n# Set the line and outline color. **\r\ndef stroke(r, g=1000, b=1000, a=255):\r\n global _strokecol, _dostroke\r\n if g==1000:\r\n _strokecol=(r,r,r,a)\r\n else:\r\n _strokecol=(r,g,b,a)\r\n _dostroke = True\r\n\r\n# Turn off outline drawing. **\r\ndef nostroke():\r\n global _dostroke\r\n _dostroke = False\r\n _strokecol = \"\"\r\n\r\n# Set the mode for drawing ellipses. **\r\ndef ellipsemode (z):\r\n global _ELLIPSEMODE\r\n _ELLIPSEMODE = z\r\n\r\n# Draw an ellipse. Also used for circles. Four modes as described in doc. **\r\ndef ellipse (xpos, ypos, width, height):\r\n global canvas, _ELLIPSEMODE\r\n ccanvas = canvas.copy()\r\n if _ELLIPSEMODE == CENTER: # Mode 0 is CENTER\r\n if _dofill:\r\n pygame.draw.ellipse (ccanvas, _fillcol, (xpos-width/2, ypos-height/2, width, height), 0)\r\n if _dostroke:\r\n if width//2<_linewidth:\r\n width = _linewidth*2\r\n if height//2 < _linewidth:\r\n height = _linewidth*2\r\n pygame.draw.ellipse (ccanvas, _strokecol, (xpos-width/2, ypos-height/2, width, height), _linewidth)\r\n elif _ELLIPSEMODE == RADIUS: # Mode 1 is Radius\r\n if _dofill:\r\n pygame.draw.ellipse (ccanvas, _fillcol, (xpos-width, ypos-height, width*2, height*2), 0)\r\n if _dostroke:\r\n pygame.draw.ellipse (ccanvas, _strokecol, (xpos-width, ypos-height, width*2, height*2), _linewidth)\r\n elif _ELLIPSEMODE == CORNER:\r\n if _dofill:\r\n pygame.draw.ellipse (ccanvas, _fillcol, (xpos, ypos, width, height), 0)\r\n if _dostroke:\r\n pygame.draw.ellipse (ccanvas, _strokecol, (xpos, ypos, width, height), _linewidth)\r\n elif _ELLIPSEMODE == CORNERS:\r\n if _dofill:\r\n pygame.draw.ellipse (ccanvas, _fillcol, (xpos, ypos, width-xpos, height-ypos), 0)\r\n if _dostroke:\r\n pygame.draw.ellipse (ccanvas, _strokecol, (xpos, ypos, width-xpos, height-ypos), _linewidth)\r\n else:\r\n print (\"Error: Illegal value for ELLIPSEMODE\", _ELLIPSEMODE) # 101\r\n ccanvas.set_alpha(_fillcol[3])\r\n canvas.blit (ccanvas, (0,0))\r\n \r\ndef arc2 (x0, y0, x1, y1, a1, ap, mode=\"PIESLICE\"):\r\n global canvas, _strokecol, _linewidth, _fillcol\r\n pygame.draw.arc (canvas, pygame.Rect(x0,y0,x1-x0,y1-y0), a1, ap, _linewidth)\r\n\r\ndef arc (x0, y0, x1, y1, a1, ap, mode=\"PIESLICE\"):\r\n global canvas, _strokecol, _linewidth, _fillcol\r\n ccanvas = canvas.copy()\r\n conv = 3.1415/180.0\r\n r = (x1-x0)/2\r\n xc = x0+r\r\n yc = y0+r\r\n xs = x = xc + r*math.cos(-a1*conv)\r\n ys = y = yc + r*math.sin(-a1*conv)\r\n xe = xc + r*math.cos(-(a1+ap)*conv)\r\n ye = yc + r*math.sin(-(a1+ap)*conv)\r\n a = a1*1.0\r\n pts = ((x, y),)\r\n while a <= a1+ap:\r\n xx = xc + r*math.cos(-a*conv)\r\n yy = yc + r*math.sin(-a*conv)\r\n line (x, y, xx, yy)\r\n pts = pts + ((x, y),)\r\n x = xx\r\n y = yy\r\n a = a + 1\r\n if (a1+ap)-a<1:\r\n break\r\n xx = xc + r*math.cos(-(a1+ap)*conv)\r\n yy = yc + r*math.sin(-(a1+ap)*conv)\r\n pts = pts + ((xx, yy),)\r\n line (x, y, xx, yy)\r\n x = xx\r\n y = yy\r\n if mode == \"CHORD\":\r\n line (xe, ye, xs, ys)\r\n pts = pts + ((xs, ys),)\r\n pygame.draw.polygon (ccanvas, _fillcol, pts, 0)\r\n pygame.draw.polygon (ccanvas, _strokecol, pts, _linewidth) \r\n elif mode == \"PIESLICE\":\r\n line (x, y, xc, yc)\r\n pts = pts + ((xc, yc),)\r\n line (xc, yc, xs, ys)\r\n pts = pts + ((xs, ys),)\r\n pygame.draw.polygon (ccanvas, _fillcol, pts, 0)\r\n pygame.draw.polygon (ccanvas, _strokecol, pts, _linewidth)\r\n elif mode == \"ARC\":\r\n pygame.draw.polygon (ccanvas, _strokecol, pts, _linewidth)\r\n ccanvas.set_alpha(_fillcol[3])\r\n canvas.blit (ccanvas, (0,0))\r\n\r\n# Draw a line **\r\ndef line (x0, y0, x1, y1):\r\n global canvas, _strokecol, _linewidth\r\n ccanvas = canvas.copy ()\r\n pygame.draw.line(ccanvas, _strokecol, (x0,y0), (x1,y1), _linewidth)\r\n ccanvas.set_alpha(_strokecol[3])\r\n canvas.blit (ccanvas, (0,0))\r\n\r\n# Draw a point. **\r\ndef point (x, y):\r\n global _fillcol, canvas\r\n #ccanvas = canvas.copy()\r\n ccanvas = pygame.Surface((1,1), 0, 32)\r\n ccanvas.fill (_fillcol)\r\n # pygame.draw.line(ccanvas, _fillcol, (x,y), (x,y), 1)\r\n ccanvas.set_alpha(_fillcol[3])\r\n canvas.blit (ccanvas, (x,y))\r\n\r\n# Draw a rectangle. Same 4 modes as ellipse. **\r\ndef rect (xpos, ypos, x2, y2):\r\n global canvas, _RECTMODE\r\n ccanvas = canvas.copy()\r\n if _RECTMODE == CENTER: # Mode 0 is CENTER\r\n if _dofill:\r\n pygame.draw.rect (ccanvas, _fillcol, (xpos-x2/2, ypos-y2/2, x2, y2), 0)\r\n if _dostroke:\r\n pygame.draw.rect (ccanvas, _strokecol, (xpos-x2/2, ypos-y2/2, x2, y2), _linewidth)\r\n elif _RECTMODE == RADIUS: # RADIUS mode\r\n if _dofill:\r\n pygame.draw.rect (ccanvas, _fillcol, (xpos-x2, ypos-y2, x2+x2, y2+y2), 0)\r\n if _dostroke:\r\n pygame.draw.rect (ccanvas, _strokecol, (xpos-x2, ypos-y2, x2+x2, y2+y2), _linewidth)\r\n elif _RECTMODE == CORNER: # CORNER mode\r\n if _dofill:\r\n pygame.draw.rect (ccanvas, _fillcol, (xpos, ypos, x2, y2), 0)\r\n if _dostroke:\r\n pygame.draw.rect (ccanvas, _strokecol, (xpos, ypos, x2, y2), _linewidth)\r\n elif _RECTMODE == CORNERS: #CORNERS\r\n if _dofill:\r\n pygame.draw.rect (ccanvas, _fillcol, (xpos, ypos, x2-xpos, y2-ypos), 0)\r\n if _dostroke:\r\n pygame.draw.rect (ccanvas, _strokecol, (xpos, ypos, x2-xpos, y2-ypos), _linewidth)\r\n ccanvas.set_alpha(_fillcol[3])\r\n canvas.blit (ccanvas, (0,0))\r\n \r\n# Draw a triangle specified by three points. **\r\ndef triangle (x0,y0, x1,y1, x2,y2):\r\n global canvas, _fillcol\r\n ccanvas = canvas.copy()\r\n if _dofill:\r\n pygame.draw.polygon (ccanvas, _fillcol, ((x0,y0), (x1,y1), (x2,y2)), 0)\r\n if _dostroke:\r\n pygame.draw.polygon (ccanvas, _strokecol, ((x0,y0), (x1,y1), (x2,y2)), _linewidth)\r\n ccanvas.set_alpha(_fillcol[3])\r\n canvas.blit (ccanvas, (0,0))\r\n\r\n# Set the frame rate **\r\ndef frameRate (r):\r\n global _framerate\r\n _framerate = r\r\n clock.tick(_framerate)\r\n \r\ndef quad (x0,y0, x1,y1, x2,y2, x3,y3):\r\n global canvas, _fillcol\r\n ccanvas = canvas.copy()\r\n canvas.create_polygon (x0,y0, x1,y1, x2,y2, x3,y3, fill=_fillcol, tags='_P')\r\n ccanvas.set_alpha(_fillcol[3])\r\n canvas.blit (ccanvas, (0,0))\r\n\r\ndef strokeweight(s):\r\n global _linewidth\r\n _linewidth = s\r\n\r\ndef cvtColor (z): # Convert an integer to a color (grey)\r\n return (z, z, z, 255)\r\n\r\ndef cvtColor3 (r,g=0,b=0, a=255):\r\n if hasattr(r, \"len\") and len(r) == 3:\r\n return (r[0], r[1], r[2])\r\n elif hasattr(r, \"len\") and len(r) == 4: \r\n return (r[0], r[1], r[2], r[3])\r\n return (r, g, b, a)\r\n\r\ndef noloop():\r\n global _noloop\r\n _noloop = True\r\n\r\ndef rectmode (z):\r\n global _RECTMODE\r\n _RECTMODE = z\r\n\r\ndef background(r,g=1000,b=1000, a=255):\r\n global canvas, width, height, _xvid, _yvid, _xwid, _ywid\r\n ccanvas = canvas.copy()\r\n if g>=1000:\r\n f = cvtColor(r)\r\n else:\r\n f = cvtColor3(r, g, b)\r\n #if _pausedv:\r\n # canvas.fill (f, (0, 0, width, _yvid))\r\n # canvas.fill (f, (0, 0, _xvid, height))\r\n # canvas.fill (f, (_xvid+_xwid, 0, width-_xvid, height))\r\n # canvas.fill (f, (0, _yvid+_ywid, width, height-(_yvid+_ywid)))\r\n #else:\r\n# pygame.draw.rect (canvas, f, (0, 0, width, height), 0)\r\n ccanvas.fill (f, (0, 0, width, height))\r\n ccanvas.set_alpha(f[3])\r\n canvas.blit (ccanvas, (0,0))\r\n\r\ndef setfont(s):\r\n global _font_family, _font_size\r\n font = pygame.font.SysFont(_font_family, _font_size)\r\n\r\ndef textsize (n):\r\n global _font_family, _font_size, _font_weight, _font_slant, font\r\n _font_size = n\r\n font = pygame.font.SysFont(_font_family, _font_size)\r\n\r\n# Draw a text string at the given point. **\r\ndef text (s, x, y):\r\n global canvas,font\r\n\r\n if font == None: # Create a font if needed\r\n font = pygame.font.Font(None, 18)\r\n text = font.render(s, 1, _fillcol) # Render the string in the fill color\r\n textpos = text.get_rect() # Get the rectangle that encloses the text\r\n textpos.bottomleft = [x,y]\r\n canvas.blit(text, textpos)\r\n\r\ndef _draw():\r\n global _user\r\n global mousex, mousey\r\n global i,j,canvas, _buttons, mp\r\n\r\n mp = pygame.mouse.get_pos() # Get mouse coordinates\r\n mousex = mp[0]\r\n mousey = mp[1]\r\n mb = pygame.mouse.get_pressed() # Get mouse buttons.\r\n for i in range(0,3):\r\n if mb[i] and not _buttons[i]: # Button i pressed.\r\n _mousePressed(i)\r\n elif not mb[i] and _buttons[i]: # Button i released\r\n _mouseReleased(i)\r\n _buttons = mb\r\n if drawOK:\r\n _user.draw() # Call the user's draw() function if it exists\r\n\r\ndef _keyPressed (k):\r\n global _user, _keyp\r\n if _keyp:\r\n # if len(k)> 0:\r\n # _user.keyPressed(ord(k[0]))\r\n _user.keyPressed (k)\r\n\r\ndef _keyReleased (k):\r\n global _user, _keyr\r\n if _keyr:\r\n _user.keyReleased (k)\r\n\r\ndef _mouseReleased (b):\r\n global _user, _mouser\r\n if _mouser:\r\n _user.mouseReleased(b) # Method exists, and was used. \r\n\r\ndef _mousePressed (b):\r\n global _user, _mousep\r\n if _mousep:\r\n _user.mousePressed(b)\r\n\r\n\r\n\r\n# Equialent of Processing function size, which sets up the display window.\r\ndef size (xs, ys):\r\n global width, height, canvas\r\n width = xs\r\n height = ys\r\n canvas = pygame.display.set_mode( (xs, ys), pygame.DOUBLEBUF, 32) # Make the sketch window\r\n pygame.display.set_caption('Drawing')\r\n\r\n# ---------------------- Images ---------------------------\r\ndef loadImage (s):\r\n try:\r\n myImage = pygame.image.load (s)\r\n except pygame.error:\r\n return None\r\n gim = Gimage ()\r\n gim.setIm (myImage)\r\n return (gim)\r\n\r\ndef image (s, x, y):\r\n global canvas\r\n if hasattr(s, \"draw\"):\r\n s.draw(canvas, x, y)\r\n elif hasattr(s, \"blit\"):\r\n canvas.blit (s, (x, y))\r\n\r\ndef getpixel (im, x, y):\r\n return im.get(x,y)\r\n\r\ndef setpixel (im, i, j, c):\r\n global canvas\r\n im.set (i, j, c)\r\n\r\ndef save(im, s):\r\n im.save (s)\r\n\r\ndef red (c):\r\n return c[0]\r\ndef green(c):\r\n return c[1]\r\ndef blue (c):\r\n return c[2]\r\n\r\ndef grey (c):\r\n return (c[0]+c[1]+c[2])/3\r\n\r\n# --------------- Video -----------------------------------\r\ndef loadVideo (s):\r\n try:\r\n m = Gvideo(s)\r\n except:\r\n print (\"Problem loading the movie '\", s, \"'\")\r\n m = None\r\n print (\"LoadVideo type \", type(m))\r\n return m \r\n\r\ndef playVideo (m):\r\n m.play ()\r\n\r\ndef pauseVideo(m):\r\n m.pause()\r\n\r\ndef stopVideo (m):\r\n m.stop()\r\n\r\ndef rewindVideo (m):\r\n m.rewind()\r\n\r\ndef isVideoPlaying (m):\r\n return m.is_playing()\r\n\r\ndef setVideoVolume(m, v):\r\n return m.set_volume(v)\r\n\r\ndef lengthVideo (m):\r\n return m.length()\r\n\r\ndef whereVideo(m):\r\n return m.where()\r\n\r\ndef getVideoFrame(m):\r\n return m.get_frame_number()\r\n\r\ndef setVideoFrame(m, f):\r\n m.set_video_frame(f)\r\n\r\ndef getVideoPixel(m, x, y):\r\n return m.get_pixel (x, y)\r\n\r\ndef sizeVideo (m):\r\n return m.size\r\n\r\ndef locVideo (m, x, y, w, h):\r\n global canvas\r\n if m.surf == canvas:\r\n m.vid.set_display(canvas, (x,y,w,h))\r\n else:\r\n m.locVideo (x,y,w,h)\r\n \r\n\r\ndef videoSize (s):\r\n pygame.init() \r\n m = movie.Movie (s)\r\n x = m.get_size()\r\n m = None\r\n return x\r\n\r\n# ------------------- Audio --------------------------------\r\ndef loadSound (s):\r\n if mixer.get_init() == False:\r\n mixer.pre_init(buffersize=512)\r\n mixer.init()\r\n try:\r\n m = mixer.Sound(s)\r\n except:\r\n print (\"Problem loading the sound file '\", s, \"'\")\r\n m = None\r\n return m \r\n\r\ndef playSound(a, loop=0):\r\n a.play(loops=loop)\r\ndef stopSound(a):\r\n a.stop()\r\ndef volumeSound (a, v):\r\n a.set_volume (v)\r\ndef durationSound (a):\r\n return a.get_length()\r\n\r\n#def soundData (a):\r\n# return a.get_raw()\r\n#def playSoundData (a):\r\n# m = mixer.Sound(buffer=a)\r\n# m.play()\r\n# return m\r\n \r\n \r\n# --------------------- Interaction ------------------------\r\n\r\ndef mouse ():\r\n global mousex, mousex\r\n mp = pygame.mouse.get_pos() # Get mouse coordinates\r\n mousex = mp[0]\r\n mousey = mp[1]\r\n return mp\r\n\r\ndef modulename(s):\r\n for i in range(len(s)-1,0, -1):\r\n if s[i]=='/' or s[i]=='\\\\':\r\n return s[i+1:len(s)-3]\r\n\r\ndef capture (s):\r\n pygame.image.save (canvas, s)\r\n\r\ndef startdraw(w=50, h=50):\r\n global font, _framerate, clock, _user, drawOK\r\n global _keyp, _keyr, _mousep, _mouser\r\n\r\n username = inspect.stack()[1][1] # Get the path of the caller's source\r\n username = modulename(username) # extract the file name\r\n pygame.init() # Initialize pygame, obviously\r\n\r\n size (w, h)\r\n ellipsemode(CENTER)\r\n rectmode (CORNER)\r\n stroke (0)\r\n fill (0)\r\n _user = __import__ (username) # Import names from user source\r\n if hasattr (_user, \"initialize\"):\r\n if callable(_user.initialize): \r\n _user.initialize() # Call the user's SETUP() function if it exists\r\n if hasattr (_user, \"keyPressed\"):\r\n if callable(_user.keyPressed): \r\n _keyp = True\r\n if hasattr (_user, \"keyReleased\"):\r\n if callable(_user.keyReleased): \r\n _keyr = True\r\n if hasattr (_user, \"mouseReleased\"):\r\n if callable(_user.mouseReleased): \r\n _mouser = True\r\n if hasattr (_user, \"mousePressed\"):\r\n if callable(_user.mousePressed): \r\n _mousep = True\r\n\r\n background( 200 ) # Initial empty window\r\n # ico = loadImage ('c:/pyp/ico/pyp.ico')\r\n # pygame.display.set_icon(ico.im)\r\n pygame.display.set_caption(\"Glib (Dynamic)\")\r\n font = pygame.font.Font(None, 18)\r\n clock.tick(_framerate)\r\n if hasattr (_user, \"draw\"):\r\n if callable(_user.draw): \r\n drawOK = True\r\n\r\ndef enddraw():\r\n global mousex, mousey\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n if event.type == pygame.KEYDOWN:\r\n try:\r\n _keyPressed(event.key)\r\n except:\r\n continue\r\n elif event.type == pygame.KEYUP:\r\n try:\r\n _keyReleased(event.key)\r\n except Exception as e:\r\n print (\"Problem\", e)\r\n continue\r\n if _noloop == False:\r\n _draw()\r\n clock.tick(_framerate)\r\n pygame.display.update()\r\n\r\n\r\n\r\n\r\n","sub_path":"275-Langs/Python/pythonprogramming/Code/CH12/game/Glib.py","file_name":"Glib.py","file_ext":"py","file_size_in_byte":26040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211366567","text":"'''\nCreated on 16.1.2013\n\n@author: Sampo\n'''\n\n#!flask/bin/python\nfrom app import app\ndebug=0\nif debug==1:\n app.run(debug = True) #Eli nayttaa debugviestit selaimessa, jos kaatuu\nelse:\n app.run(debug = False)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"573977954","text":"from collections import namedtuple\n\nnew_system = 'gold'\n\nInstance = namedtuple('Instance', 'children_words heads_words gold_head_word new_system_head_word preps_words'.split(' '), verbose=True)\n\n# load the data scattered in different files, one preposition per line.\ndef read_one_preposition_per_line(children_words_filename,\n heads_words_filename,\n labels_filename,\n new_system_predictions_filename,\n preps_words_filename):\n instances=[]\n with open(children_words_filename) as children_words_file, \\\n open(heads_words_filename) as heads_words_file, \\\n open(labels_filename) as labels_file, \\\n open(new_system_predictions_filename) as new_system_predictions_file, \\\n open(preps_words_filename) as preps_words_file:\n while True:\n heads_words_line = heads_words_file.readline().strip()\n if heads_words_line == '': break\n heads_words = heads_words_line.split(' ')\n instance = Instance(children_words=children_words_file.readline().strip(), \\\n heads_words=heads_words, \\\n gold_head_word=heads_words[int(labels_file.readline().strip())-1], \\\n new_system_head_word=heads_words[int(new_system_predictions_file.readline().strip())-1], \\\n preps_words=preps_words_file.readline().strip())\n instances.append(instance)\n return instances\n\nprint('before defining train_instances')\ntrain_instances = read_one_preposition_per_line(\"wsj.2-21.txt.dep.pp.children.words\",\n \"wsj.2-21.txt.dep.pp.heads.words\",\n \"wsj.2-21.txt.dep.pp.labels\",\n \"wsj.2-21.txt.dep.pp.\" + new_system + \".predictions\",\n \"wsj.2-21.txt.dep.pp.preps.words\")\n\nprint('before defining test_instances')\ntest_instances = read_one_preposition_per_line(\"wsj.23.txt.dep.pp.children.words\",\n \"wsj.23.txt.dep.pp.heads.words\",\n \"wsj.23.txt.dep.pp.labels\",\n \"wsj.23.txt.dep.pp.\" + new_system + \".predictions\",\n \"wsj.23.txt.dep.pp.preps.words\")\n\nprint('before defining get_instance_indeices')\ndef get_instance_index(instances, likely_index, preps_words, gold_head_word,\n heads_words_candidates):\n preps_words = preps_words.lower()\n gold_head_word = gold_head_word.lower()\n heads_words_candidates = [word.lower() for word in heads_words_candidates]\n attempts = 0\n for i in range(likely_index, len(instances)):\n if i != likely_index:\n pass\n attempts += 1\n if attempts > 10:\n import pdb; pdb.set_trace()\n instance = instances[i]\n if preps_words.lower() != instance.preps_words: continue\n if gold_head_word.lower() != instance.gold_head_word: continue\n compatible = True\n for head_candidate in instance.heads_words:\n if head_candidate not in heads_words_candidates:\n compatible = False\n break\n if not compatible: continue\n # any instance which hasn't been excluded at this point is compatible.\n return i\n assert(False)\n\nprint('before defining yonatan_conll_filenames')\nyonatan_conll_filenames = ['wsj.2-21.txt.dep.pp.yonatan.predictions.conll',\n 'wsj.23.txt.dep.pp.yonatan.predictions.conll']\n\nprint('before processing conll files')\nfor conll_filename in yonatan_conll_filenames:\n instances = train_instances if conll_filename.startswith('wsj.2-21') else test_instances \n new_system_conll_filename = conll_filename.replace('yonatan', new_system) + '.fix'\n with open(conll_filename) as conll_file, open(new_system_conll_filename, mode='w') as new_system_conll_file:\n sent_buffer = []\n preps_counter = -1\n for line in conll_file:\n if len(line.strip()) > 1:\n # add token to sentence buffer.\n sent_buffer.append(line.strip())\n else:\n # process sentence.\n for token_index, token_line in enumerate(sent_buffer):\n if token_line.endswith('_'): \n new_system_conll_file.write(token_line + '\\n')\n continue\n preps_counter += 1\n fields = token_line.split('\\t')\n assert(token_index == int(fields[0])-1)\n preps_words = fields[1].lower()\n gold_head_word = sent_buffer[int(fields[6])-1].split('\\t')[1].lower()\n children_words_candidates = [sent_buffer[i].split('\\t')[1].lower() \\\n for i \\\n in range(token_index+1, len(sent_buffer))]\n heads_words_candidates = [sent_buffer[i].split('\\t')[1].lower() \\\n for i \\\n in range(0, token_index)]\n # instead of searching for a comatible index, check instances[preps_counter].\n preps_counter = get_instance_index(instances,\n preps_counter,\n preps_words, \n gold_head_word, \n heads_words_candidates)\n # write next prediction.\n new_system_head_word = instances[preps_counter].new_system_head_word.lower()\n new_system_head_id = None\n for head_index in reversed(range(0, token_index)):\n if sent_buffer[head_index].split('\\t')[1].lower() == new_system_head_word:\n new_system_head_id = head_index + 1\n break\n if new_system_head_id == None:\n print('ERROR: couldn\\'t find the head word predicted by new_system in the same sentence')\n import pdb; pdb.set_trace()\n assert(False)\n # replace yonatan's prediction with new_system prediction.\n fields[-1] = str(new_system_head_id)\n new_system_conll_file.write('\\t'.join(fields) + '\\n')\n sent_buffer.clear()\n new_system_conll_file.write('\\n')\n","sub_path":"data/pp-data-english/replace_yonatan_with_ontolstm_predictions.py","file_name":"replace_yonatan_with_ontolstm_predictions.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370581742","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 23 19:48:49 2019\r\n\r\n@author: boonping\r\n\"\"\"\r\n\r\nimport cv2\r\nimport os,sys\r\nimport logging as log\r\nimport datetime as dt\r\nfrom time import sleep\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n'''\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.preprocessing.image import load_img\r\n'''\r\n\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint,CSVLogger,LearningRateScheduler\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import Flatten,Dropout\r\nfrom tensorflow.keras.layers import Conv2D\r\nfrom tensorflow.keras.layers import BatchNormalization\r\nfrom tensorflow.keras.layers import Activation\r\nfrom tensorflow.keras.layers import AveragePooling2D,MaxPooling2D,UpSampling2D\r\nfrom tensorflow.keras.layers import add,Lambda\r\nfrom tensorflow.keras.regularizers import l2\r\nfrom tensorflow.keras.utils import to_categorical,plot_model\r\n#from tensorflow.keras.datasets import cifar10\r\nfrom tensorflow.keras import optimizers\r\nfrom tensorflow.keras import backend\r\nfrom tensorflow.keras.models import model_from_json\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator,img_to_array,load_img\r\nimport IPython\r\nfrom scipy import ndimage\r\nfrom scipy.ndimage.interpolation import shift\r\nfrom numpy import savetxt,loadtxt\r\n#savetxt('data.csv', data, delimiter=',')\r\n#data = loadtxt('data.csv', delimiter=',')\r\nimport gc\r\nfrom skimage.transform import resize\r\n\r\n\r\ndef moving_average(a, n=3) :\r\n ret = np.cumsum(a, dtype=float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n return ret[n - 1:] / n\r\n\r\ndef grayplt(img,title=''):\r\n '''\r\n plt.axis('off')\r\n if np.size(img.shape) == 3:\r\n plt.imshow(img[:,:,0],cmap='gray',vmin=0,vmax=1)\r\n else:\r\n plt.imshow(img,cmap='gray',vmin=0,vmax=1)\r\n plt.title(title, fontproperties=prop)\r\n '''\r\n \r\n fig,ax = plt.subplots(1)\r\n ax.set_aspect('equal')\r\n \r\n\r\n\r\n # Show the image\r\n if np.size(img.shape) == 3:\r\n ax.imshow(img[:,:,0],cmap='hot',vmin=0,vmax=1)\r\n else:\r\n ax.imshow(img,cmap='hot',vmin=0,vmax=1)\r\n \r\n plt.show()\r\n\r\ndef adjust_gamma(image, gamma=1.0):\r\n\t# build a lookup table mapping the pixel values [0, 255] to\r\n\t# their adjusted gamma values\r\n\tinvGamma = 1.0 / gamma\r\n\ttable = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype(\"uint8\")\r\n \r\n\t# apply gamma correction using the lookup table\r\n\treturn cv2.LUT(image, table) \r\n\r\n\r\nmodel = load_model('facenet/facenet_keras.h5')\r\nmodel.summary()\r\nprint(model.inputs)\r\nprint(model.outputs)\r\n\r\nmodel.load_weights(\"facenet/facenet_keras_weights.h5\")\r\n\r\nimages = ['frame1.jpg','frame2.jpg','frame3.jpg','frame4.jpg','frame6.jpg']\r\n#images = ['frame6.jpg']\r\n#p2 = 'image2/frame3.jpg'\r\n#a=np.array([23,12,15])\r\n#print( a[a<16].size )\r\n#raise\r\n\r\nos.popen(\"del *merged_representation*\")\r\njjj=-1\r\nfor img in images: #def preprocess_image(img):\r\n jjj+=1\r\n imgs =np.array([])\r\n\r\n imag=cv2.imread(img)\r\n training=np.array([])\r\n #training.append(img.tolist())\r\n\r\n res = cv2.resize(imag,(160, 160), interpolation = cv2.INTER_CUBIC)\r\n grayplt(res/255)\r\n imgs=res #np.expand_dims(res,axis=0)\r\n print(imgs.shape)\r\n \r\n res=adjust_gamma(res, gamma=1.2)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n \r\n '''\r\n res = cv2.resize(imag,(160, 160), interpolation = cv2.INTER_CUBIC)\r\n res = cv2.rectangle(res, (0, 110), (160, 160), (0, 0, 0), -1)\r\n grayplt(res/255)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n res=adjust_gamma(res, gamma=1.2)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n \r\n \r\n res = cv2.resize(imag,(160, 160), interpolation = cv2.INTER_CUBIC)\r\n res = cv2.rectangle(res, (0, 0), (160, 110), (0, 0, 0), -1)\r\n grayplt(res/255)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n res=adjust_gamma(res, gamma=1.2)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n \r\n \r\n res = cv2.resize(imag,(160, 160), interpolation = cv2.INTER_CUBIC)\r\n res = cv2.rectangle(res, (0, 0), (75, 110), (0, 0, 0), -1)\r\n grayplt(res/255)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n res=adjust_gamma(res, gamma=1.2)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n \r\n\r\n res = cv2.resize(imag,(160, 160), interpolation = cv2.INTER_CUBIC)\r\n res = cv2.rectangle(res, (80, 0), (160, 110), (0, 0, 0), -1)\r\n print(res.shape)\r\n grayplt(res/255)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n res=adjust_gamma(res, gamma=1.2)\r\n imgs=np.append(imgs,res,axis=0) \r\n print(imgs.shape)\r\n '''\r\n imgs=imgs.reshape(int(imgs.shape[0]/160),160,160,3)\r\n print(imgs.shape)\r\n \r\n \r\n #raise\r\n \r\n #res=resize(imag,(160,160))\r\n ##print(res.shape)\r\n #raise\r\n #im2=res\r\n ##############################\r\n '''\r\n grayplt(res/255)\r\n cascPath = \"haarcascade_eye_tree_eyeglasses.xml\"\r\n #cascPath = \"haarcascade_mcs_mouth.xml\"\r\n faceCascade = cv2.CascadeClassifier(cascPath)\r\n log.basicConfig(filename='webcam.log',level=log.INFO)\r\n gray = cv2.cvtColor(res, cv2.COLOR_RGB2GRAY)\r\n\r\n faces = faceCascade.detectMultiScale(\r\n gray,\r\n scaleFactor=1.1,\r\n minNeighbors=5,\r\n minSize=(30, 30)\r\n )\r\n\r\n # Draw a rectangle around the faces\r\n for (x, y, w, h) in faces:\r\n resized = cv2.resize(res[y:y+h,x:x+w], (160,160), interpolation = cv2.INTER_AREA)\r\n grayplt(resized/255)\r\n raise\r\n '''\r\n ############################## \r\n \r\n for iii in range(imgs.shape[0]):\r\n #print(img1)\r\n img_temp=imgs[iii]/255\r\n \r\n #cv2.imshow('frame', adjusted)\r\n img=imgs[iii]\r\n print(\"9981\",imgs[iii].shape)\r\n grayplt(img/255)\r\n #print(img[140][25])\r\n #print(img[25][140])\r\n\r\n\r\n #continue\r\n ###############\r\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n #print(hsv[140][25])\r\n #print(hsv[25][140])\r\n \r\n # define range of blue color in HSV\r\n lower_blue= np.array([0,10,45])\r\n lower_blue= np.array([0,10,45])\r\n upper_blue = np.array([55,180,255])\r\n upper_blue = np.array([55,180,255])\r\n \r\n \r\n # Threshold the HSV image to get only blue colors\r\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\r\n \r\n # Bitwise-AND mask and original image\r\n res = cv2.bitwise_and(img,img, mask= mask)\r\n \r\n imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\r\n ret, thresh = cv2.threshold(255-imgray, 127, 255, 0)\r\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cnt = contours[4]\r\n cv2.drawContours(im2, [cnt], 0, (255,255,255), 3)\r\n im2=255-im2 \r\n img_temp2=np.expand_dims(img_temp,axis=0)\r\n #grayplt(img_temp2[0])\r\n \r\n img=np.expand_dims(img,axis=0)/255\r\n res=np.expand_dims(res,axis=0)/255\r\n grayplt(im2)\r\n im2=im2/255\r\n im3=np.fliplr(im2)\r\n im4=np.flipud(im2)\r\n im5=np.fliplr(im4)\r\n #print(im2[im2<0.1].size,160*160*3*0.95)\r\n #print(im2[im2>0.1].size,160*160*3*0.95)\r\n \r\n for i in range(15):\r\n im2=ndimage.maximum_filter(im2, size=2)\r\n im3=ndimage.maximum_filter(im3, size=2)\r\n im4=ndimage.maximum_filter(im4, size=2)\r\n im5=ndimage.maximum_filter(im5, size=2)\r\n \r\n im2=ndimage.maximum_filter(im2, size=2)\r\n im3=ndimage.maximum_filter(im3, size=2)\r\n im4=ndimage.maximum_filter(im4, size=2)\r\n im5=ndimage.maximum_filter(im5, size=2)\r\n \r\n #im2=scipy.ndimage.gaussian_filter(im2, sigma=1.1)\r\n #im2=ndimage.minimum_filter(im2, size=2)\r\n #im3=ndimage.minimum_filter(im3, size=2)\r\n #im4=ndimage.minimum_filter(im4, size=2)\r\n #im5=ndimage.minimum_filter(im5, size=2)\r\n #print(im2[im2<0.1].size,160*160*3*0.95)\r\n #print(im2[im2>0.1].size,160*160*3*0.95)\r\n \r\n im3=np.fliplr(im3)\r\n im4=np.flipud(im4)\r\n im5=np.fliplr(im5)\r\n im5=np.flipud(im5)\r\n #grayplt(im2)\r\n #grayplt(im3)\r\n #grayplt(im4)\r\n #grayplt(im5)\r\n \r\n #raise\r\n im2=im2*im3*im4*im5\r\n print(im2[im2<0.1].size,160*160*3*0.95)\r\n print(im2[im2>0.1].size,160*160*3*0.95)\r\n \r\n img2 = np.zeros( ( np.array(im2).shape[0], np.array(im2).shape[1], 3 ) )\r\n img2[:,:,0] = im2 # same value in each channel\r\n img2[:,:,1] = im2\r\n img2[:,:,2] = im2\r\n \r\n im22=img2\r\n grayplt(im22)\r\n #raise\r\n print(im22[im22<0.1].size,160*160*3*0.95)\r\n print(im22[im22>0.1].size,160*160*3*0.95)\r\n #raise\r\n if (im22[im22<0.1].size)>160*160*3*0.95:\r\n print(\"skip\")\r\n continue\r\n im22=img2*img_temp\r\n print(\"im22\")\r\n #grayplt(im22)\r\n im2=im22*255\r\n #print(im2.shape)\r\n \r\n res5=im2\r\n \r\n #grayplt(res5/255)\r\n \r\n \r\n res=np.expand_dims(im2,axis=0)\r\n training=np.append(training,res)\r\n \r\n \r\n \r\n for sc in range(80,160,20):\r\n print(\"999:\",sc)\r\n #res7 = cv2.resize(res5,(sc, sc), interpolation = cv2.INTER_CUBIC)\r\n res7=resize(res5,(sc,sc))\r\n sc1=160-sc\r\n sc1/=2\r\n sc1=int(sc1)\r\n sc2=80-sc1\r\n #print(sc1)\r\n res1=np.zeros((sc1,sc1,3))\r\n res2=np.zeros((160,sc1,3)) #np.concatenate((res1,res1,res1,res1))\r\n res3=np.zeros((sc1,sc2*2,3)) #np.concatenate((res1,res1),axis=1)\r\n #print(res.shape)\r\n #print(res2.shape)\r\n #print(res3.shape) \r\n #print(res5.shape) \r\n res4=np.concatenate((res3,res7,res3))\r\n res6=np.concatenate((res2,res4,res2),axis=1)\r\n \r\n #training.append(res.tolist())\r\n training=np.append(training,res6)\r\n #grayplt(res6/255)\r\n ##############\r\n \r\n \r\n \r\n for ang in [-45,-30,-15,0,15,30,45]:\r\n img = ndimage.rotate(res6, ang, mode='nearest')\r\n #print(img.shape)\r\n trim1=(img.shape[0]-160)/(2)\r\n trim1=int(trim1)\r\n trim2=(img.shape[1]-160)/(2)\r\n trim2=int(trim2)\r\n res1=img[trim1:trim1+160,trim2:trim2+160]\r\n training=np.append(training,res1) \r\n \r\n shi=20 #int( 30-(sc-80)/2 )\r\n for sh in [-20,-10,0,10,20]: #range(-shi,shi,10):\r\n for sh2 in [-20,-10,0,10,20]: #range(-shi,shi,10):\r\n res9 = np.roll(res1, sh, axis=0)\r\n res9 = np.roll(res9, sh2, axis=1)\r\n training=np.append(training,res9)\r\n \r\n print(\"shape:\",training.shape)\r\n training=training.reshape( int(training.shape[0]/76800),160,160,3)\r\n \r\n img1_representation = model.predict(training)\r\n #savetxt('img%i_representation_%s_%s.csv' % (jjj,iii,sc), img1_representation, delimiter=',')\r\n with open('img%i_merged_representation_%s.csv' % (jjj,iii), \"ab\") as f:\r\n savetxt(f, img1_representation, delimiter=',')\r\n \r\n training=np.array([])\r\n res5=im2\r\n res=np.expand_dims(im2,axis=0)\r\n training=np.append(training,res)\r\n \r\n for sc in range(180,220,20):\r\n \r\n #res1 = cv2.resize(res5,(sc, sc), interpolation = cv2.INTER_CUBIC)\r\n res1=resize(res5,(sc,sc))\r\n sc1=(sc-160)/2\r\n sc1=int(sc1)\r\n res1=res1[sc1:sc1+160,sc1:sc1+160]\r\n print(\"998\",sc)\r\n grayplt(res1/255)\r\n \r\n #training.append(res.tolist())\r\n training=np.append(training,res1)\r\n for ang in [-45,-30,-15,0,15,30,45]:\r\n img = ndimage.rotate(res1, ang, mode='nearest')\r\n #print(img.shape)\r\n trim1=(img.shape[0]-160)/(2)\r\n trim1=int(trim1)\r\n trim2=(img.shape[1]-160)/(2)\r\n trim2=int(trim2)\r\n res2=img[trim1:trim1+160,trim2:trim2+160]\r\n training=np.append(training,res2) \r\n \r\n shi=30 #int( 30-(sc-80)/2 )\r\n for sh in [-20,-10,0,10,20]: #range(-shi,shi,10):\r\n for sh2 in [-20,-10,0,10,20]: #range(-shi,shi,10):\r\n res9 = np.roll(res2, sh, axis=0)\r\n res9 = np.roll(res9, sh2, axis=1)\r\n training=np.append(training,res9)\r\n \r\n print(\"shape:\",training.shape)\r\n training=training.reshape( int(training.shape[0]/76800),160,160,3)\r\n \r\n img1_representation = model.predict(training)\r\n #savetxt('img%i_representation_%s_%s.csv' % (jjj,iii,sc), img1_representation, delimiter=',')\r\n with open('img%i_merged_representation_%s.csv' % (jjj,iii), \"ab\") as ff:\r\n savetxt(ff, img1_representation, delimiter=',')\r\n \r\n training=np.array([])\r\n res5=im2\r\n res=np.expand_dims(im2,axis=0)\r\n training=np.append(training,res)\r\n \r\n\r\n #os.popen(\"copy img%i_representation_*.csv img%i_merged_representation.csv\" % (jjj,jjj) ) \r\n '''\r\n for i in range(training.shape[0]):\r\n grayplt(training[i]/255)\r\n raise\r\n '''\r\n #res=np.expand_dims(res,axis=0)\r\n #return training\r\n\r\n\r\n#import tensorflow as tf\r\n\r\n'''\r\nwith open('jsonmodel.json') as json_file:\r\n json_config = json_file.read()\r\nmodel = model_from_json(json_config)\r\n\r\n#Pre-trained OpenFace weights: https://bit.ly/2Y34cB8\r\nmodel.load_weights(\"openface_weights.h5\")\r\n'''\r\n#p2 = 'image2/frame2.jpg'\r\n#preprocess_image(p1)\r\n#raise \r\n#img1_representation = model.predict(preprocess_image(p1))[0,:]\r\n#img2_representation = model.predict(preprocess_image(p2))[0,:]\r\n#training1=preprocess_image(p1)\r\n#savetxt('training1.csv', training1, delimiter=',')\r\n\r\n#img2_representation = model.predict(preprocess_image(p2))\r\n\r\n\r\n\r\ndef findCosineDistance(source_representation, test_representation):\r\n a = np.matmul(np.transpose(source_representation), test_representation)\r\n b = np.sum(np.multiply(source_representation, source_representation))\r\n c = np.sum(np.multiply(test_representation, test_representation))\r\n return 1 - (a / (np.sqrt(b) * np.sqrt(c)))\r\n \r\ndef l2_normalize(x, axis=-1, epsilon=1e-10):\r\n output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))\r\n return output\r\n \r\ndef findEuclideanDistance(source_representation, test_representation):\r\n euclidean_distance = source_representation - test_representation\r\n euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))\r\n euclidean_distance = np.sqrt(euclidean_distance)\r\n #euclidean_distance = l2_normalize(euclidean_distance )\r\n return euclidean_distance\r\n \r\n'''\r\ncosine = findCosineDistance(img1_representation, img2_representation)\r\neuclidean = findEuclideanDistance(img1_representation, img2_representation)\r\n\r\nif cosine <= 0.02:\r\n print(\"these are same\")\r\nelse:\r\n print(\"these are different\")\r\n'''\r\n#RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',max_depth=None, max_features='auto', max_leaf_nodes=None,min_impurity_split=1e-07, min_samples_leaf=1,min_samples_split=2, min_weight_fraction_leaf=0.0,n_estimators=10, n_jobs=2, oob_score=False, random_state=0,verbose=0, warm_start=False)","sub_path":"facenet_predict2.py","file_name":"facenet_predict2.py","file_ext":"py","file_size_in_byte":16064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430118903","text":"import socket\n\nsockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsockfd.bind((\"127.0.0.1\", 8888))\n\nwhile True:\n try:\n data, addr = sockfd.recvfrom(2048)\n\n if data == \"file\".encode():\n f = open(\"haha.jpeg\", \"wb\")\n while True:\n data, addr = sockfd.recvfrom(2048)\n if data == \"#file\".encode():\n break\n else:\n f.write(data)\n\n f.close()\n\n\n\n print(\"连接到客户端{},信息为:{}\".format(addr,data.decode()))\n sockfd.sendto(\"好的\".encode(),addr)\n except KeyboardInterrupt:\n break\n\nsockfd.close()\n","sub_path":"test_UDP_server.py","file_name":"test_UDP_server.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"214571249","text":"# Team -- Theodore Peters & Mai Rachelevsky\n# Sotdev pd7\n# K06 -- mongo to the polls\n# 2019-02-28\n\nimport pymongo\nimport json\n\nSERVER_ADDR = \"jayy.mooo.com\"\nconnection = pymongo.MongoClient(SERVER_ADDR)\ndb = connection.computerInteraction\ncollection = db.movies\n\nf=open(\"movies.json\",\"r\")\ndata=json.loads(f.read())\nf.close()\n\ncollection.insert_many(data)\n","sub_path":"07_mongo/that.py","file_name":"that.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"341780964","text":"def getHTMX_response(url):\n try:\n #定制伪装头部\n rhead = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'}\n r = requests.get(url ,headers=rhead)\n r.raise_for_status() #如果返回状态不是200,则报错\n r.encoding = r.apparent_encoding #使用apparent_encoding分析出的编码格式来解析response\n\n return r\n except:\n return None\n\ndef get_Image(url):\n url2 = url\n r = getHTMX_response(url2)\n if r != None :\n listurl = pattern.findall(r.text)\n for u in listurl:\n Image_url = root_url + u\n r = getHTMX_response(Image_url)\n\n path = root + 'page' +str(i) + u.split('/')[-1]\n\n with open(path,'wb') as file:\n file.write(r.content)\n print('ok')\n else:\n print('出错')\n\n \nimport requests\nimport re \nroot_url = 'http://www.lhjb.net/'\nnext_url = 'http://www.lhjb.net/xinggan'\nroot = \"C:\\\\Users\\\\Jianlong Jin\\\\Pictures\\\\mm131\\\\\"\npattern = re.compile('/uploads.+jpg')\n\nfor i in range(1,4):\n if i == 1:\n Page_url = next_url\n get_Image(Page_url)\n else:\n Page_url = next_url +'/' + str(i) + \".html\"\n get_Image(Page_url)\n print('已完成一页')\nprint('完成')\n","sub_path":"learn_python/LearnPython/mm131.py","file_name":"mm131.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"547705861","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nimport mysql.connector\r\nfrom tkinter import messagebox\r\nfrom add_book import *\r\nfrom view_books import *\r\nfrom delete_book import *\r\nfrom issue_book import *\r\nfrom return_book import *\r\nfrom view_issued_books import *\r\n\r\nroot = Tk()\r\nroot.title('Library')\r\nroot.minsize(width=400, height=400)\r\nroot.geometry('650x550')\r\n\r\nsame = True\r\nn = 0.9\r\n\r\n# Adding a background image\r\nbackground_image = Image.open('D:/nebula.jpg')\r\n[imageSizeWidth, imageSizeHeight] = background_image.size\r\n\r\nnewImageSizeWidth = int(imageSizeWidth * n)\r\nif same:\r\n newImageSizeHeight = int(imageSizeHeight * n)\r\n # print('if part ',newImageSizeHeight)\r\nelse:\r\n newImageSizeHeight = int(imageSizeHeight / n)\r\n # print('else part ',newImageSizeHeight)\r\n\r\n\r\nbackground_image = background_image.resize((newImageSizeWidth, newImageSizeHeight), Image.ANTIALIAS)\r\nimg = ImageTk.PhotoImage(background_image)\r\nCanvas1 = Canvas(root)\r\nCanvas1.create_image(325, 340, image=img)\r\nCanvas1.config(bg=\"white\", width=newImageSizeWidth, height=newImageSizeHeight)\r\nCanvas1.pack(expand=True, fill=BOTH)\r\n\r\nheadingFrame1 = Frame(root,bg=\"#FFBB00\",bd=5)\r\nheadingFrame1.place(relx=0.2,rely=0.1,relwidth=0.6,relheight=0.16)\r\nheadingLabel = Label(headingFrame1, text=\"Welcome to \\n D.Y.Patil Library\", bg='black', fg='white', font=('Courier',15))\r\nheadingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)\r\n\r\n\r\nadd_book_btn = Button(root, text='Add Book Details', bg='black', fg='white', command=add_book)\r\nadd_book_btn.place(relx=0.28, rely=0.4, relwidth=0.45, relheight=0.1)\r\n\r\ndelete_book_btn = Button(root, text='Delete Book', bg='black', fg='white', command=delete)\r\ndelete_book_btn.place(relx=0.28, rely=0.5, relwidth=0.45, relheight=0.1)\r\n\r\nview_book_btn = Button(root, text='View Book List', bg='black', fg='white', command=view_books)\r\nview_book_btn.place(relx=0.28, rely=0.6, relwidth=0.45, relheight=0.1)\r\n\r\nissue_book_btn = Button(root, text='Issue Book', bg='black', fg='white', command=issue)\r\nissue_book_btn.place(relx=0.28, rely=0.7, relwidth=0.45, relheight=0.1)\r\n\r\nview_issued_book_btn = Button(root, text='View Issued Books', bg='black', fg='white', command=view_issued_books)\r\nview_issued_book_btn.place(relx=0.28, rely=0.8, relwidth=0.45, relheight=0.1)\r\n\r\nreturn_book_btn = Button(root, text='Return Book', bg='black', fg='white', command=return_book_templete)\r\nreturn_book_btn.place(relx=0.28, rely=0.9, relwidth=0.45, relheight=0.1)\r\n# 'D:/nebula.jpg'\r\n\r\n\r\nroot.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343248943","text":"\"\"\"\nwith stdout_redirected():\n CODE\nSuppresses output from CODE.\n\"\"\"\n\nimport os\nimport sys\nimport contextlib\ndef fileno(file_or_fd):\n \"\"\"\n Return file number.\n \"\"\"\n fds = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()\n if not isinstance(fds, int):\n raise ValueError(\"Expected file or file descriptor\")\n return fds\n\n@contextlib.contextmanager\ndef stdout_redirected(too=os.devnull, stdout=None):\n \"\"\"\n http://stackoverflow.com/a/22434262/190597 (J.F. Sebastian)\n \"\"\"\n if stdout is None:\n stdout = sys.stdout\n\n stdout_fd = fileno(stdout)\n # copy stdout_fd before it is overwritten\n #NOTE: `copied` is inheritable on Windows when duplicating a standard stream\n with os.fdopen(os.dup(stdout_fd), 'wb') as copied:\n stdout.flush() # flush library buffers that dup2 knows nothing about\n try:\n os.dup2(fileno(too), stdout_fd) # $ exec >&to\n except ValueError: # filename\n with open(too, 'wb') as to_file:\n os.dup2(to_file.fileno(), stdout_fd) # $ exec > to\n try:\n yield stdout # allow code to be run with the redirected stdout\n finally:\n # restore stdout to its previous value\n #NOTE: dup2 makes stdout_fd inheritable unconditionally\n stdout.flush()\n os.dup2(copied.fileno(), stdout_fd) # $ exec >&copiedi\n","sub_path":"bbh_processing/stdout_redirected.py","file_name":"stdout_redirected.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80208478","text":"\n# https://www.udemy.com/complete-guide-to-tensorflow-for-deep-learning-with-python/learn/lecture/8695996#overview\n\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport matplotlib.pylab as plt\n\n# 전체 데이터 생성\nx_data = np.linspace(0,10.0, 1000000)\nnoise = np.random.randn(len(x_data))\ny_data = (5.1 * x_data) + 2.5 + noise\n\nx_df = pd.DataFrame(data=x_data, columns=['X data'])\ny_df = pd.DataFrame(data=y_data, columns=['Y'])\nmy_data = pd.concat([x_df,y_df],axis=1)\n\n\n# 훈련 및 평가 데이터 생성\n# 데이터가 미리 준비되어 있으면 필요 없음.\nfrom sklearn.model_selection import train_test_split\nx_train, x_eval, y_train, y_eval = train_test_split(x_data, y_data, test_size=0.3, random_state=101)\nprint(type(x_train), x_train.shape)\nprint(type(x_eval), x_eval.shape)\n\n\n# 모델과 입력형상를 정의를 편하게 하기 위해\n# 모델은 estimator 모델 중에서 선정하고\n# 입력형상은 (placeholder격의) feature_column을 정의함.\nfeat_cols = [tf.feature_column.numeric_column('x',shape=[1])]\nestimator = tf.estimator.LinearRegressor(feature_columns=feat_cols)\n\n# 입력 함수를 통해서 모델에 데이터를 주입\n# 입력 함수 정의하는 부분에서 batch_size를 지정하게 되어 있음.\ninput_func = tf.estimator.inputs.numpy_input_fn({'x': x_train}, y_train, batch_size=8, num_epochs=None, shuffle=True)\ntrain_input_func = tf.estimator.inputs.numpy_input_fn({'x': x_train}, y_train, batch_size=8, num_epochs=1000, shuffle=False)\neval_input_func = tf.estimator.inputs.numpy_input_fn({'x': x_eval}, y_eval, batch_size=8, num_epochs=1000, shuffle=False)\n\nestimator.train(input_fn=input_func, steps=1000)\ntrain_metric = estimator.evaluate(input_fn=train_input_func, steps=1000)\neval_metric = estimator.evaluate(input_fn=eval_input_func, steps=1000)\nprint(train_metric)\nprint(eval_metric)\n\n# 결과 regression 라인을 그리기 위한 데이터\nbrand_new_data = np.linspace(0,10,10)\ninput_func_predict = tf.estimator.inputs.numpy_input_fn({'x':brand_new_data}, shuffle=False)\npredictions = []\nfor pred in estimator.predict(input_fn=input_func_predict):\n predictions.append(pred['predictions'])\n\nprint(type(predictions))\n\nmy_data.sample(n=250).plot(kind='scatter', x=\"X data\", y=\"Y\")\nplt.plot(brand_new_data,predictions,'r')\nplt.show()\n\n\n\n","sub_path":"udemy/u32-tf-regression-using-estimator.py","file_name":"u32-tf-regression-using-estimator.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199589389","text":"import os\nimport shutil\nfrom stat import (\n S_IRUSR, S_IWUSR, S_IXUSR,\n S_IRGRP, S_IWGRP, S_IXGRP,\n S_IROTH, S_IWOTH, S_IXOTH,\n S_ISVTX, S_ISGID\n)\n\nfrom traitlets import Bool\n\nfrom .baseapp import TransferApp, transfer_aliases, transfer_flags\nfrom ..utils import self_owned\n\n\naliases = {}\naliases.update(transfer_aliases)\naliases.update({\n})\n\nflags = {}\nflags.update(transfer_flags)\nflags.update({\n 'force': (\n {'ReleaseApp' : {'force' : True}},\n \"Force overwrite of existing files in the exchange.\"\n ),\n})\n\nclass ReleaseApp(TransferApp):\n\n name = u'nbgrader-release'\n description = u'Release an assignment to the nbgrader exchange'\n\n aliases = aliases\n flags = flags\n\n examples = \"\"\"\n Release an assignment to students. For the usage of instructors.\n\n This command is run from the top-level nbgrader folder. Before running\n this command, there are two things you must do.\n\n First, you have to set the unique `course_id` for the course. It must be\n unique for each instructor/course combination. To set it in the config\n file add a line to the `nbgrader_config.py` file:\n\n c.NbGrader.course_id = 'phys101'\n\n To pass the `course_id` at the command line, add `--course=phys101` to any\n of the below commands.\n\n Second, the assignment to be released must already be in the `release` folder.\n The usual way of getting an assignment into this folder is by running\n `nbgrader assign`.\n\n To release an assignment named `assignment1` run:\n\n nbgrader release assignment1\n\n If the assignment has already been released, you will have to add the\n `--force` flag to overwrite the released assignment:\n\n nbgrader release --force assignment1\n\n To query the exchange to see a list of your released assignments:\n\n nbgrader list\n \"\"\"\n\n force = Bool(False, config=True, help=\"Force overwrite existing files in the exchange.\")\n\n def build_extra_config(self):\n extra_config = super(ReleaseApp, self).build_extra_config()\n extra_config.NbGrader.student_id = '.'\n extra_config.NbGrader.notebook_id = '*'\n return extra_config\n\n def init_src(self):\n self.src_path = self._format_path(self.release_directory, self.student_id, self.assignment_id)\n if not os.path.isdir(self.src_path):\n source = self._format_path(self.source_directory, self.student_id, self.assignment_id)\n if os.path.isdir(source):\n # Looks like the instructor forgot to assign\n self.fail(\"Assignment found in '{}' but not '{}', run `nbgrader assign` first.\".format(\n source, self.src_path))\n else:\n self.fail(\"Assignment not found: {}\".format(self.src_path))\n\n def init_dest(self):\n if self.course_id == '':\n self.fail(\"No course id specified. Re-run with --course flag.\")\n\n self.course_path = os.path.join(self.exchange_directory, self.course_id)\n self.outbound_path = os.path.join(self.course_path, 'outbound')\n self.inbound_path = os.path.join(self.course_path, 'inbound')\n self.dest_path = os.path.join(self.outbound_path, self.assignment_id)\n # 0755\n self.ensure_directory(\n self.course_path,\n S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH\n )\n # 0755\n self.ensure_directory(\n self.outbound_path,\n S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH\n )\n # 0733 with set GID so student submission will have the instructors group\n self.ensure_directory(\n self.inbound_path,\n S_ISGID|S_IRUSR|S_IWUSR|S_IXUSR|S_IWGRP|S_IXGRP|S_IWOTH|S_IXOTH\n )\n\n def ensure_directory(self, path, mode):\n \"\"\"Ensure that the path exists, has the right mode and is self owned.\"\"\"\n if not os.path.isdir(path):\n os.mkdir(path)\n # For some reason, Python won't create a directory with a mode of 0o733\n # so we have to create and then chmod.\n os.chmod(path, mode)\n else:\n if not self_owned(path):\n self.fail(\"You don't own the directory: {}\".format(path))\n\n def copy_files(self):\n if os.path.isdir(self.dest_path):\n if self.force:\n self.log.info(\"Overwriting files: {} {}\".format(\n self.course_id, self.assignment_id\n ))\n shutil.rmtree(self.dest_path)\n else:\n self.fail(\"Destination already exists, add --force to overwrite: {} {}\".format(\n self.course_id, self.assignment_id\n ))\n self.log.info(\"Source: {}\".format(self.src_path))\n self.log.info(\"Destination: {}\".format(self.dest_path))\n self.do_copy(self.src_path, self.dest_path)\n self.log.info(\"Released as: {} {}\".format(self.course_id, self.assignment_id))\n","sub_path":"nbgrader/apps/releaseapp.py","file_name":"releaseapp.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199856148","text":"import os\nimport numpy as np\nimport skimage.io as imio\nimport matplotlib.pyplot as plt\n\nfrom selfBreakout.breakout_screen import read_obj_dumps, get_individual_data\nfrom game_environment import GameEnvironment\n\n\n# curently for handling breakout saved scene loading \nclass BreakoutLog(GameEnvironment):\n\n def __init__(self, objdump_path, state_path, frame_shape=(84, 84), *args, **kwargs):\n super(BreakoutLog, self).__init__()\n obj_dumps = read_obj_dumps(objdump_path)\n self.state_path = state_path\n self.frame_shape = frame_shape\n self.batch_size = kwargs.get('batch_size', 10000)\n self.frame_l, self.frame_r = 1, 0 # uninitialized frame interval\n\n self.actions = np.array(get_individual_data('Action', obj_dumps, pos_val_hash=2))\n self.paddle_data = np.array(get_individual_data('Paddle', obj_dumps, pos_val_hash=1), dtype=int)\n \n\n # retrieve selected actions associated with frames\n def retrieve_action(self, idxs):\n return self.actions[idxs]\n\n\n # get frame at index idx\n def get_frame(self, idx):\n if idx >= self.frame_r: # move right\n self.load_range(idx)\n if idx < self.frame_l: # move left\n self.load_range(idx-self.batch_size+1)\n return self.frame_buffer[idx-self.frame_l]\n\n\n # get frame at index l to r\n def get_frame_batch(self, l, r):\n frames = np.zeros((r-l, 1,) + self.frame_shape)\n cur_idx = 0\n while cur_idx+l < r:\n self.load_range(cur_idx+l)\n if self.frame_r > r: # copy part of buffer\n frames[cur_idx:, 0, ...] = self.frame_buffer[:r-l-cur_idx, ...]\n else: # copy everthing\n frames[cur_idx:self.frame_r-l, 0, ...] = self.frame_buffer[:, ...]\n cur_idx = self.frame_r-l\n return frames\n\n\n # load batch\n def load_range(self, l):\n self.frame_l = l\n self.frame_r = l + self.batch_size\n self.frame_buffer = np.zeros((self.frame_r-self.frame_l,) + self.frame_shape)\n for idx in range(self.frame_l, self.frame_r):\n self.frame_buffer[idx-self.frame_l, :] = self._load_image(idx)\n\n # load a scene\n def _load_image(self, idx):\n try:\n img = imio.imread(self._get_image_path(idx), as_gray=True) / 256.0\n except FileNotFoundError:\n img = np.full(self.frame_shape, 0)\n return img\n\n\n # image path\n def _get_image_path(self, idx):\n return os.path.join(self.state_path, 'state%d.png'%(idx))\n\n\ndef plot_focus(bo_game, all_focus):\n PX, PY, SHIFT = 2, 10, 20\n focus_img = bo_game.extract_neighbor(\n list(range(len(all_focus))),\n all_focus,\n nb_size=(15, 20)\n )\n for i in range(PY):\n plt.subplot(PX, PY, i + 1)\n plt.imshow(bo_game.get_frame(i+SHIFT))\n\n plt.subplot(PX, PY, PY + i + 1)\n plt.imshow(focus_img[i+SHIFT])\n plt.show()\n\n\n# test loading 12 images with batch 5\nif __name__ == '__main__':\n bo_game = BreakoutLog('selfBreakout/runs', 'selfBreakout/runs/0', batch_size=5)\n INIT = 321\n imgs = bo_game.get_frame_batch(INIT, INIT+12)\n for i in range(12):\n plt.subplot(3, 4, i+1)\n # img = bo_game.get_frame(i+INIT)\n img = imgs[i]\n plt.imshow(img[0, ...])\n plt.show()","sub_path":"breakout_log.py","file_name":"breakout_log.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"617611800","text":"# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\\\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"AlphaZero tic tac toe example.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow.compat.v1 as tf\n\nfrom open_spiel.python.algorithms import mcts\nfrom open_spiel.python.algorithms import minimax\nfrom open_spiel.python.algorithms.alpha_zero import alpha_zero\nimport pyspiel\n\ntf.enable_eager_execution()\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\n \"num_rounds\", 25,\n \"The number of rounds of self-play followed by neural net training.\")\n\nflags.DEFINE_integer(\"num_self_play_games\", 100,\n \"The number of self-play games to play in a round.\")\n\nflags.DEFINE_integer(\n \"num_training_epochs\", 10,\n \"The number of passes over the replay buffer done during training.\")\n\nflags.DEFINE_integer(\"batch_size\", 128,\n \"Number of transitions to sample at each learning step.\")\n\nflags.DEFINE_integer(\"replay_buffer_capacity\", 50000,\n \"The size of the replay buffer.\")\n\nflags.DEFINE_integer(\n \"evaluation_frequency\", 3,\n (\"The current net will be evaluated against a minimax player every \"\n \"evaluation_frequency rounds.\"))\n\nflags.DEFINE_string(\n \"net_type\", \"mlp\",\n \"The type of network to use. Can be either 'mlp' or 'resnet.\")\n\nflags.DEFINE_string(\n \"device\", \"cpu\",\n \"Device to evaluate neural nets on. Can be 'cpu', 'tpu' or 'gpu'.\")\n\n\nclass MinimaxBot(pyspiel.Bot):\n \"\"\"A minimax bot.\"\"\"\n\n def __init__(self, game):\n pyspiel.Bot.__init__(self)\n self._game = game\n\n def provides_policy(self):\n return False\n\n def step(self, state):\n _, action = minimax.alpha_beta_search(self._game, state=state)\n return action\n\n\ndef bot_evaluation(game, bots, num_evaluations):\n \"\"\"Returns a tuple (wins, losses, draws) for player 2.\"\"\"\n wins, losses, draws = 0, 0, 0\n for i in range(num_evaluations):\n [_, result] = pyspiel.evaluate_bots(game.new_initial_state(), bots, i)\n if result == 0:\n draws += 1\n elif result == 1:\n wins += 1\n else:\n losses += 1\n\n return (wins, losses, draws)\n\n\ndef main(_):\n game = pyspiel.load_game(\"tic_tac_toe\")\n num_actions = game.num_distinct_actions()\n observation_shape = game.observation_tensor_shape()\n\n # 1. Define a keras net\n if FLAGS.net_type == \"resnet\":\n net = alpha_zero.keras_resnet(\n observation_shape, num_actions, num_residual_blocks=1, num_filters=256,\n data_format=\"channels_first\")\n elif FLAGS.net_type == \"mlp\":\n net = alpha_zero.keras_mlp(\n observation_shape, num_actions, num_layers=2, num_hidden=64)\n else:\n raise ValueError((\"Invalid value for 'net_type'. Must be either 'mlp' or \"\n \"'resnet', but was %s\") % FLAGS.net_type)\n\n model = alpha_zero.Model(\n net, l2_regularization=1e-4, learning_rate=0.01, device=FLAGS.device)\n\n # 2. Create an MCTS bot using the previous keras net\n evaluator = alpha_zero.AlphaZeroKerasEvaluator(game, model)\n\n bot = mcts.MCTSBot(game,\n 1.,\n 20,\n evaluator,\n solve=False,\n dirichlet_noise=(0.25, 1.))\n\n # 3. Build an AlphaZero instance\n a0 = alpha_zero.AlphaZero(game,\n bot,\n model,\n replay_buffer_capacity=FLAGS.replay_buffer_capacity,\n action_selection_transition=4)\n\n # 4. Create a bot using min-max search. It can never lose tic-tac-toe, so\n # a success condition for our AlphaZero bot is to draw all games with it.\n minimax_bot = MinimaxBot(game)\n\n # 5. Run training loop\n for num_round in range(FLAGS.num_rounds):\n logging.info(\"------------- Starting round %s out of %s -------------\",\n num_round, FLAGS.num_rounds)\n\n if num_round % FLAGS.evaluation_frequency == 0:\n num_evaluations = 50\n logging.info(\"Playing %s games against the minimax player.\",\n num_evaluations)\n (_, losses, draws) = bot_evaluation(game, [minimax_bot, a0.bot],\n num_evaluations=50)\n logging.info(\"Result against Minimax player: %s losses and %s draws.\",\n losses, draws)\n\n logging.info(\"Running %s games of self play\", FLAGS.num_self_play_games)\n a0.self_play(num_self_play_games=FLAGS.num_self_play_games)\n\n logging.info(\"Training the net for %s epochs.\", FLAGS.num_training_epochs)\n a0.update(FLAGS.num_training_epochs,\n batch_size=FLAGS.batch_size,\n verbose=True)\n evaluator.value_and_prior.cache_clear()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","sub_path":"open_spiel/python/examples/tic_tac_toe_alpha_zero.py","file_name":"tic_tac_toe_alpha_zero.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"370138382","text":"#Create a BlogPost class that has\n#an author_name\n#a title\n#a text\n#a publication_date\n#Create a few blog post objects:\n# \"Lorem Ipsum\" titled by John Doe posted at \"2000.05.04.\"\n# Lorem ipsum dolor sit amet.\n# \"Wait but why\" titled by Tim Urban posted at \"2010.10.10.\"\n# A popular long-form, stick-figure-illustrated blog about almost everything.\n# \"One Engineer Is Trying to Get IBM to Reckon With Trump\" titled by William Turton at \"2017.03.28.\" # nopep8\n# Daniel Hanley, a cybersecurity engineer at IBM, doesn’t want to be the center of attention. When I asked to take his picture outside one of IBM’s New York City offices, he told me that he wasn’t really into the whole organizer profile thing. # nopep8\n\n\nclass BlogPost():\n author_name = \"\"\n title = \"\"\n publication_date = \"\"\n text = \"\"\n\n\nblogpost_first = BlogPost()\nblogpost_second = BlogPost()\nblogpost_third = BlogPost()\n\nblogpost_first.author_name = \"John Doe\"\nblogpost_first.title = \"Lorem Ipsum\"\nblogpost_first.publication_date = \"2000.05.04\"\nblogpost_first.text = \"Lorem ipsum dolor sit amet.\"\n\nblogpost_second.author_name = \"Tim Urban\"\nblogpost_second.title = \"Wait, but why?\"\nblogpost_second.publication_date = \"2010.10.10\"\nblogpost_second.text = \"A popular long-form, stick-figure-illustrated blog about almost everything.\" # nopep8\n\nblogpost_third.author_name = \"William Turton\"\nblogpost_third.title = \"One Engineer Is Trying to Get IBM to Reckon With Trump\"\nblogpost_third.publication_date = \"2017.03.28\"\nblogpost_third.text = \"Daniel Hanley, a cybersecurity engineer at IBM, doesn’t want to be the center of attention. When I asked to take his picture outside one of IBM’s New York City offices, he told me that he wasn’t really into the whole organizer profile thing.\" # nopep8\n","sub_path":"week-04/day-01/blog_post.py","file_name":"blog_post.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"20730812","text":"# initialising the sqlite3 database\n# handle db operations - initialising db from sql, getting db connections\nimport sqlite3\n\n# handle db resource sharing\ndef get_conn():\n conn = None\n try:\n conn = sqlite3.connect('skills.db')\n except Error as e:\n print(e)\n \n conn.execute('PRAGMA foreign_keys = ON')\n return conn\n\ndef init_db():\n db = sqlite3.connect('skills.db')\n load_schema(db)\n insert_data(db)\n load_skills(db)\n\n db.commit()\n db.close()\n\n# read schema from db.sql into skills.db\ndef load_schema(db):\n with open('db_data/db.sql', 'r') as sql_file:\n sql_script = sql_file.read()\n cursor = db.cursor()\n cursor.executescript(sql_script)\n\n# insert sample data from db_insert.sql into skills.db\ndef insert_data(db):\n with open('db_data/db_insert.sql', 'r') as sql_file:\n sql_script = sql_file.read()\n cursor = db.cursor()\n cursor.executescript(sql_script)\n\n# load skills from txt file into db\ndef load_skills(db):\n f = open('./linkedin_skills.txt', \"r\")\n cursor = db.cursor()\n count = 0\n for line in f:\n count += 1\n if (count % 2 == 0): # half the sample data size to improve performance\n try:\n cursor.execute(\"INSERT OR IGNORE INTO Skill (name) VALUES (?)\", (line.rstrip(),))\n except sqlite3.IntegrityError as e:\n print(e)\n db.commit()\n f.close()\n\n\nif __name__ == \"__main__\":\n init_db()\n","sub_path":"backend/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389080637","text":"def get_good_kids():\n global matrix\n res = 0\n for x in matrix:\n if 'V' in x:\n res += x.count('V')\n return res\n\n\ndef get_start_position():\n global matrix, n\n for x in range(n):\n if 'S' in matrix[x]:\n return [x, matrix[x].index('S')]\n\n\ndef santa_got_a_cookie():\n global matrix, row, col, count_of_presents, good_kids_counter\n for r in range(-1, 2):\n for c in range(-1, 2):\n if 0 <= (next_row + r) < n and 0 <= (next_col + c) < n:\n if matrix[next_row + r][next_col + c] != '-':\n count_of_presents -= 1\n if matrix[next_row + r][next_col + c] == 'V':\n good_kids_counter -= 1\n\n matrix[next_row + r][next_col + c] = '-'\n\n\ncount_of_presents = int(input())\nn = int(input())\n\nmatrix = [input().split() for x in range(n)]\n\nsanta_position = get_start_position()\ngood_kids_counter = get_good_kids()\n\ngood_kids_counter_copy = good_kids_counter\n\nmoves = {\n 'up': [-1, 0],\n 'down': [1, 0],\n 'left': [0, -1],\n 'right': [0, 1]\n}\n\nwhile count_of_presents > 0 and good_kids_counter > 0:\n data = input()\n if data == 'Christmas morning':\n break\n\n row, col = santa_position\n next_row, next_col = row + moves[data][0], col + moves[data][1]\n\n if 0 <= next_row < n and 0 <= next_col < n:\n matrix[row][col] = '-'\n matrix_position = matrix[next_row][next_col]\n\n santa_position = [next_row, next_col]\n\n if matrix_position == 'V':\n count_of_presents -= 1\n good_kids_counter -= 1\n\n elif matrix_position == 'C':\n santa_got_a_cookie()\n\n matrix[next_row][next_col] = 'S'\n\nif count_of_presents == 0 and good_kids_counter > 0:\n print('Santa ran out of presents!')\n\n[print(' '.join(x)) for x in matrix]\nif good_kids_counter == 0:\n print(f'Good job, Santa! {good_kids_counter_copy} happy nice kid/s.')\nelse:\n print(f'No presents for {good_kids_counter} nice kid/s.')","sub_path":"Assignments/Exam Preparation/Python Advanced Exam Preparation - 17 February 2020/2. Present Delivery.py","file_name":"2. Present Delivery.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239447112","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.start),\n\turl(r'^WebCrawler$', views.crawler),\n\turl(r'^WBS$', views.WBS),\n\turl(r'^FHB$', views.FHB),\n\turl(r'^zoo$', views.zoo),\n\turl(r'^Snake$', views.Snake),\n\turl(r'^END$', views.END),\n\turl(r'^END2$', views.END2),\n\turl(r'^MoneyGame$', views.moneyGame),\n\turl(r'^click2$', views.click2),\n url(r'^reset$', views.reset),\n url(r'^store$', views.store),\n url(r'^men$', views.men),\n url(r'^fire$', views.fire),\n url(r'^betterPro$', views.betterPro),\n\turl(r'^click$', views.click),\n\turl(r'^reset1$', views.reset1),\n\turl(r'^ticInc$', views.ticInc),\n\turl(r'^ticDec$', views.ticDec),\n\turl(r'^addWorkers$', views.addWorkers),\n\turl(r'^subWorkers$', views.subWorkers),\n\turl(r'^dolph$', views.dolph),\n\turl(r'^polarBear$', views.polarBear),\n\turl(r'^gorilla$', views.gorilla),\n\turl(r'^dino$', views.dino),\n\turl(r'^HRDreset$', views.HRDreset),\n]","sub_path":"apps/Zoo_App/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"442151592","text":"# sAsync:\n# An enhancement to the SQLAlchemy package that provides persistent\n# dictionaries, text indexing and searching, and an access broker for\n# conveniently managing database access, table setup, and\n# transactions. Everything is run in an asynchronous fashion using the Twisted\n# framework and its deferred processing capabilities.\n#\n# Copyright (C) 2006 by Edwin A. Suominen, http://www.eepatents.com\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n# \n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the file COPYING for more details.\n# \n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA\n\n\"\"\"\nLocal data stores connected to a remote data manager via a TCP or SSL network\nclient using a simple implementation of Twisted's L{banana.Pynana} object\nserialization protocol.\n\"\"\"\n\nfrom twisted.python import failure\nfrom twisted.spread import jelly, banana\nfrom twisted.internet import reactor, protocol, task, defer\n\nimport common\nimport sasync.queue as queue\n\n\n# Interval in seconds between checks for updates\nUPDATE_CHECK_INTERVAL = 5\n# Niceness of various commands, in order of priority\nNICE_UPDATES = 0 \nNICE_ITEMS = 2\nNICE_FLAVORS = 2\nNICE_GET = 4\nNICE_SET = 8\n\n\nclass NetworkingError(Exception):\n pass\n\n\nclass NetworkingFailure(failure.Failure):\n def __init__(self, errorMsg):\n failure.Failure.__init__(self, NetworkingError(errorMsg))\n\n\nclass ClientProtocol(common.ProtocolMixin, banana.Banana):\n \"\"\"\n I manage the client end of a simple protocol for remote data access, based\n on Twisted's L{banana.Banana} object serialization protocol.\n\n Construct me with a string representing a shared secret, to which I keep a\n reference only as long as it's needed for login authentication.\n \"\"\"\n def __init__(self, secret):\n banana.Banana.__init__(self, isClient=True)\n self.secret = secret\n \n def connectionReady(self):\n \"\"\"\n Starts things off once the connection is ready by attempting\n authentication.\n\n Fires the factory's \\\"connection-result\\\" deferred with C{True} if this\n successfully completes the client connection process, or C{False}\n otherwise.\n \"\"\"\n def gotResponse(response):\n # Don't need to keep this around anymore\n del self.secret\n if response[0] == True:\n self.factory.d.callback(None)\n else:\n self.factory.d.errback(\n NetworkingFailure(\"Connection not authorized\"))\n\n # The login command is the only one that is executed directly rather\n # than through the queue. That's okay, because no other commands will\n # run until the login command finishes.\n self.command(login, self.secret).addCallback(gotResponse)\n\n def command(self, *tokens):\n \"\"\"\n This method of the entry point for all data access. The command is a\n string supplied as the first argument to this method. Any arguments to\n the command are supplied as additional string arguments to the method.\n\n @return: A deferred that fires with a list containing one or more\n elements representing the results of the command. Each element q\n must be an object of C{str}, C{int}, C{float} type, or a C{list}\n containing those objects of those types.\n \n \"\"\"\n self.dCommand = defer.Deferred()\n failure = None\n if getattr(self, 'commandPending', False):\n failure = NetworkingFailure(\n \"Commands must be executed sequentially\")\n if tokens[0] not in self.commands:\n failure = NetworkingFailure(\n \"Invalid command '%s'\" % tokens[0])\n if failure is None:\n self.sendEncoded(tokens)\n self.commandPending = True\n else:\n self.dCommand.errback(failure)\n return self.dCommand\n\n def expressionReceived(self, expression):\n \"\"\"\n This method handles all responses to commands, firing the callback to\n the command deferred with the result if it is a qualified list or\n firing the errback if not.\n \"\"\"\n self.commandPending = False\n failure = None\n if expression[0] != 'list':\n failure = NetworkingFailure(\"Rejected non-list response\")\n try:\n responseList = jelly.unjelly(expression, self.security)\n except:\n failure = failure.Failure()\n if failure is None:\n self.dCommand.callback(responseList)\n else:\n self.dCommand.errback(failure)\n\n\nclass ClientFactory(protocol.ReconnectingClientFactory):\n \"\"\"\n I am a reconnecting client factory for the remote data protocol.\n\n Construct me with a string representing a shared secret, which I\n immediately pass on to the L{ClientProtocol} object I'll be using without\n keeping any reference to the secret myself.\n \"\"\"\n def __init__(self, secret):\n self.p = ClientProtocol(secret)\n self.d = defer.Deferred()\n\n def buildProtocol(self, addr):\n \"\"\"\n I create an instance of L{ClientProtocol} in my constructor, where I\n have momentary access to the shared secret, so this method just sets\n the I{factory} attribute of the protocol and returns a reference to it.\n \"\"\"\n self.p.factory = self\n return self.p\n\n\nclass Client(common.ClientServerMixin):\n \"\"\"\n I am a base class that manages a single TCP or SSL connection to a remote\n data source. The connection serves all subclass instances.\n \"\"\"\n @classmethod\n def startup(cls, host, port, secret, timeout=10, SSL=False):\n \"\"\"\n Starts up an authenticated network connection to the server at\n I{host:port}, using the specified shared I{secret}.\n\n The I{secret} must be a string, the longer and more implausible (i.e.,\n higher entropy) the better.\n\n If there are security concerns about transmitting the shared secret and\n data in the clear, use SSL for the connection with I{SSL=True} or\n tunnel the default TCP connection.\n\n Stops trying after the specified I{timeout}, which defaults to 10\n seconds. Returns a deferred that fires C{True} if the connection\n succeeded or C{False} otherwise.\n \"\"\"\n def doneTryingToConnect(success, callID):\n if success:\n callID.cancel()\n q = cls.q = queue.AsynchronousQueue()\n q.startup()\n cls.fpCommand = cls.factory.p.command\n cls.checkers = []\n else:\n cls.factory.stopTrying()\n return success\n\n if not isinstance(secret, str):\n raise NetworkingError(\"Shared secret must be a string\")\n factory = cls.factory = DataManagerClientFactory(secret)\n callID = reactor.callLater(timeout, factory.d.callback, False)\n factory.d.addCallback(doneTryingToConnect, callID)\n if SSL:\n self.reactorSSL(port, factory, host=host)\n else:\n reactor.connectTCP(host, port, factory)\n return factory.d\n\n @classmethod\n def shutdown(cls):\n \"\"\"\n Shuts down the command queue and then the network connection, returning\n a deferred to the completion of the shutdown.\n \"\"\"\n def wrapThingsUp(null):\n for checker in cls.checkers:\n checker.stop()\n cls.factory.protocol.transport.loseConnection()\n \n d = cls.q.shutdown()\n d.addCallback(wrapThingsUp)\n return d\n\n @classmethod\n def newChecker(cls, instance):\n \"\"\"\n Installs and starts a new update checker for the supplied I{instance}\n of me.\n \"\"\"\n checker = task.LoopingCall(instance._checkForUpdates)\n cls.checkers[hash(instance)] = checker\n checker.start(UPDATE_CHECK_INTERVAL)\n\n\n \n\n \n \n \n \n \n \n \n","sub_path":"tums/trunk/etch-release/sasync/datacator/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"75598493","text":"\"\"\"Load weather from weather.gov.\n\nWorks for USA only (NDFD regions).\n\nUsage:\n Weather().get_weather(latitude, longitude)\n\"\"\"\n\nimport datetime\nfrom typing import Dict, List, Optional\nfrom urllib.request import urlopen\nfrom xml.dom import minidom\n\n\nclass Weather:\n \"\"\"Weather from weather.gov.\"\"\"\n\n def get_weather(\n self, latitude: str, longitude: str, days: int = 1, units: str = \"m\"\n ) -> Dict[str, List[Optional[int]]]:\n \"\"\"Get weather from weather.gov.\n\n :param latitude:\n :param longitude:\n :param days:\n :param units: m - metric, e - USA\n :return:\n {'temp_min': [], 'temp_max': [], 'icon': [], 'day': []}\n with lists for each day\n or None if no key or load error\n \"\"\"\n weather_xml = urlopen(\n f\"http://graphical.weather.gov/xml/SOAP_server/ndfdSOAPclientByDay.php?\"\n f\"whichClient=NDFDgenByDay&lat={latitude}&lon={longitude}\"\n f\"&format=24+hourly&numDays={days}&Unit={units}\"\n ).read()\n dom = minidom.parseString(weather_xml)\n error = dom.getElementsByTagName(\"error\")\n if error:\n print(weather_xml)\n return None\n\n # Parse temperatures\n xml_temperatures = dom.getElementsByTagName(\"temperature\")\n highs = [None] * days\n lows = [None] * days\n for item in xml_temperatures:\n if item.getAttribute(\"type\") == \"maximum\":\n values = item.getElementsByTagName(\"value\")\n for i in range(len(values)):\n highs[i] = int(values[i].firstChild.nodeValue)\n if item.getAttribute(\"type\") == \"minimum\":\n values = item.getElementsByTagName(\"value\")\n for i in range(len(values)):\n lows[i] = int(values[i].firstChild.nodeValue)\n\n xml_icons = dom.getElementsByTagName(\"icon-link\")\n icons = [None] * days\n for i, xml_icon in enumerate(xml_icons):\n icons[i] = (\n xml_icon.firstChild.nodeValue.split(\"/\")[-1].split(\".\")[0].rstrip(\"0123456789\")\n )\n\n xml_day_one = dom.getElementsByTagName(\"start-valid-time\")[0].firstChild.nodeValue[:10]\n day_one = datetime.datetime.strptime(xml_day_one, \"%Y-%m-%d\")\n\n return {\"temp_min\": lows, \"temp_max\": highs, \"icon\": icons, \"day\": [day_one]}\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from pprint import pprint\n\n weather = Weather()\n pprint(weather.get_weather(\"39.3286\", \"-76.6169\"))\n","sub_path":"src/weather_gov.py","file_name":"weather_gov.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294683419","text":"#!/usr/bin/env python\nfrom app import db\nfrom timelib import strtodatetime\nimport simplejson as json\nimport hashlib\nimport datetime\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n # User authentication information\n email = db.Column(db.String(255), nullable=False, unique=True)\n password = db.Column(db.String(255), nullable=False, server_default='')\n\n # User information\n active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')\n first_name = db.Column(db.String(100), nullable=False, server_default='')\n last_name = db.Column(db.String(100), nullable=False, server_default='')\n\n def __init__(self, email, password, first_name, last_name):\n m = hashlib.md5()\n m.update(password.encode('utf-8'))\n self.email = email\n self.password = m.hexdigest()\n self.first_name = first_name\n self.last_name = last_name\n self.active = True\n\n @classmethod\n def get_all(cls):\n users = db.session.query(User) \\\n .all()\n return users\n\n @classmethod\n def get(cls, id):\n user = db.session.query(User) \\\n .filter(User.id == id) \\\n .first()\n return user\n\n @classmethod\n def get_by_email(cls, email):\n user = db.session.query(User) \\\n .filter(User.email == email) \\\n .first()\n return user\n\n @classmethod\n def authenticate(cls, email, password):\n m = hashlib.md5()\n m.update(password.encode('utf-8'))\n user = db.session.query(User) \\\n .filter(User.email == email) \\\n .filter(User.password == m.hexdigest()) \\\n .first()\n return user\n\n @classmethod\n def exists_by_email(cls, email, id=None):\n if id:\n user = db.session.query(User) \\\n .filter(User.email == email) \\\n .filter(User.id != id) \\\n .first()\n else:\n user = db.session.query(User) \\\n .filter(User.email == email) \\\n .first()\n if user:\n return True\n return False\n\n @classmethod\n def update_password(cls, id, new_password):\n user = User.get(id)\n m = hashlib.md5()\n m.update(new_password.encode('utf-8'))\n user.password = m.hexdigest()\n db.session.commit()\n\n @classmethod\n def update(cls, id, first_name, last_name, email, active):\n user = User.get(id)\n user.first_name = first_name\n user.last_name = last_name\n user.email = email\n user.active = active\n\n db.session.commit()\n\nclass UserShop(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n\n name = db.Column(db.String(255), nullable=False)\n url = db.Column(db.Text, nullable=False)\n active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')\n\n secret = db.Column(db.String(100), nullable=False, server_default='')\n password = db.Column(db.String(100), nullable=False, server_default='')\n\n shopify_id = db.Column(db.Integer, nullable=False)\n last_updated = db.Column(db.DateTime, nullable=True)\n\n def __init__(self, name, url, active, secret, password):\n self.name = name\n self.url = url\n self.active = active\n self.secret = secret\n self.password = password\n\n @classmethod\n def get_all(cls):\n shops = db.session.query(UserShop) \\\n .all()\n return shops\n\n @classmethod\n def get_dashboard(cls):\n query = \"\"\"\n SELECT name,\n (SELECT status FROM audit_status\n WHERE shop_id = shop.id \n ORDER BY id DESC\n LIMIT 1) as status,\n (SELECT logged_on FROM audit_status\n WHERE shop_id = shop.id \n ORDER BY id DESC\n LIMIT 1) as logged_on,\n (SELECT run_id FROM audit_status\n WHERE shop_id = shop.id \n ORDER BY id DESC\n LIMIT 1) as last_run_id\n FROM\n user_shop shop\n ORDER BY status DESC,\n logged_on DESC\n \"\"\"\n result = db.engine.execute(query)\n rows = []\n for row in result:\n rows.append(dict(\n name=row[0],\n status=row[1],\n logged_on=row[2],\n run_id=row[3]\n ))\n return rows\n\n @classmethod\n def get(cls, id):\n shop = db.session.query(UserShop) \\\n .filter(UserShop.id == id) \\\n .first()\n return shop\n\n @classmethod\n def update(cls, id, name, url, active, secret, password):\n shop = UserShop.get(id)\n\n shop.name = name\n shop.url = url\n shop.active = active\n shop.secret = secret\n shop.password = password\n \n db.session.commit()\n\n @classmethod\n def set_shopify_id(cls, id, shopify_id):\n userShop = UserShop.get(id)\n\n userShop.shopify_id = shopify_id\n db.session.commit()\n return True\n\n @classmethod\n def mark_updated(cls, id):\n userShop = UserShop.get(id)\n\n userShop.last_updated = datetime.datetime.utcnow()\n db.session.commit()\n return True\n\nclass Order(db.Model):\n __tablename__ = 'order'\n\n id = db.Column(db.Integer, primary_key=True)\n customer_id = db.Column(db.Integer, db.ForeignKey('customer.id'), nullable=False)\n customer = db.relationship(\"Customer\")\n shop_id = db.Column(db.Integer, db.ForeignKey('shop.id'), nullable=False)\n shop = db.relationship(\"Shop\")\n email = db.Column(db.String(255), nullable=False)\n closed_at = db.Column(db.DateTime, nullable=True)\n created_at = db.Column(db.DateTime, nullable=False)\n updated_at = db.Column(db.DateTime, nullable=True)\n number = db.Column(db.Integer, nullable=False)\n note = db.Column(db.Text, nullable=True)\n token = db.Column(db.String(200), nullable=False)\n gateway = db.Column(db.String(200), nullable=True)\n test = db.Column(db.Integer, nullable=False)\n total_price = db.Column(db.Float, nullable=False)\n subtotal_price = db.Column(db.Float, nullable=False)\n total_weight = db.Column(db.Integer, nullable=False)\n total_tax = db.Column(db.Float, nullable=False)\n taxes_included = db.Column(db.Integer, nullable=False)\n currency = db.Column(db.String(10), nullable=False)\n financial_status = db.Column(db.String(20), nullable=False)\n confirmed = db.Column(db.Integer, nullable=False)\n total_discounts = db.Column(db.Float, nullable=False)\n total_line_items_price = db.Column(db.Float, nullable=False)\n cart_token = db.Column(db.String(200), nullable=True)\n buyer_accepts_marketing = db.Column(db.Integer, nullable=False)\n name = db.Column(db.String(200), nullable=False)\n referring_site = db.Column(db.Text, nullable=True)\n landing_site = db.Column(db.Text, nullable=True)\n cancelled_at = db.Column(db.DateTime, nullable=True)\n cancel_reason = db.Column(db.Text, nullable=True)\n total_price_usd = db.Column(db.Float, nullable=False)\n checkout_token = db.Column(db.String(200), nullable=True)\n reference = db.Column(db.String(200), nullable=True)\n user_id = db.Column(db.String(200), nullable=True)\n location_id = db.Column(db.String(200), nullable=True)\n source_identifier = db.Column(db.String(200), nullable=True)\n source_url = db.Column(db.String(200), nullable=True)\n processed_at = db.Column(db.DateTime, nullable=True)\n device_id = db.Column(db.Text, nullable=True)\n browser_ip = db.Column(db.Text, nullable=True)\n landing_site_ref = db.Column(db.Text, nullable=True)\n order_number = db.Column(db.Integer, nullable=True)\n processing_method = db.Column(db.String(200), nullable=False)\n checkout_id = db.Column(db.Integer, nullable=True)\n source_name = db.Column(db.String(200), nullable=False)\n fulfillment_status = db.Column(db.String(200), nullable=True)\n tags = db.Column(db.Text, nullable=False)\n contact_email = db.Column(db.String(255), nullable=True)\n order_status_url = db.Column(db.String(255), nullable=True)\n client_details = db.Column(db.Text, nullable=True)\n discount_codes = db.Column(db.Text, nullable=True)\n note_attributes = db.Column(db.Text, nullable=True)\n payment_gateway_names = db.Column(db.Text, nullable=True)\n tax_lines = db.Column(db.Text, nullable=True)\n payment_details = db.Column(db.Text, nullable=True)\n shipping_lines = db.Column(db.Text, nullable=True)\n\n @classmethod\n def get_store_data(cls, shop_id, date_start, date_end):\n query = \"\"\"\n SELECT \n o.name as 'Order ID',\n o.financial_status AS 'Order Status',\n DATE(o.created_at) AS 'Order Date',\n MIN(DATE(ot.created_at)) AS 'Invoice Date',\n MIN(DATE(otr.created_at)) AS 'Refund Date',\n MIN(DATE(oful.created_at)) AS 'Ship Date',\n oli.sku AS 'SKU',\n oli.name AS 'Item Description',\n oli.quantity AS 'Order Quantity',\n oli.quantity AS 'Invoiced Quantity',\n oli.quantity AS 'Shipped Quantity',\n oli.price AS 'Unit Price',\n o.total_tax AS 'Total Tax',\n oli.tax_lines AS 'Tax Lines',\n oli.price as 'Shipped Amount',\n o.shipping_lines as 'Shipping Lines',\n o.total_price_usd as 'Order Total',\n CONCAT(c.first_name, ' ', c.last_name) AS 'Customer Name',\n c.email as 'Email',\n GROUP_CONCAT(ot.gateway) as 'Payment Method',\n osa.city as 'Ship To City',\n osa.province as 'Ship To State',\n osa.zip as 'Ship To Zip',\n osa.country as 'Ship To Country',\n SUM(osl.price) as 'Shipping Total'\n FROM\n olm.`order` o\n INNER JOIN\n `order_line_items` oli ON oli.order_id = o.id\n LEFT JOIN\n `order_transaction` ot ON ot.order_id = o.id AND ot.kind = 'sale'\n LEFT JOIN\n `order_transaction` otr ON otr.order_id = o.id\n AND ot.kind = 'refund'\n LEFT JOIN\n `order_fulfillment` oful ON oful.order_id = o.id\n LEFT JOIN\n `customer` c ON c.id = o.customer_id\n LEFT JOIN\n `order_shipping_address` osa ON osa.order_id = o.id\n LEFT JOIN\n `order_shipping_lines` osl ON osl.order_id = o.id\n WHERE\n o.shop_id = 7990083\n AND o.created_at > '%s'\n AND o.created_at < '%s'\n GROUP BY oli.id;\n \"\"\" % (date_start, date_end)\n result = db.engine.execute(query)\n rows = []\n for row in result:\n rows.append(dict(\n order_id=row[0],\n order_status=row[1],\n order_date=row[2],\n invoice_date=row[3],\n refund_date=row[4],\n ship_date=row[5],\n sku=row[6],\n item=row[7],\n order_quantity=row[8],\n invoiced_quantity=row[9],\n shipped_quantity=row[10],\n unit_price=row[11],\n total_tax=row[12],\n total_price_usd=row[16],\n customer_name=row[17],\n email=row[18],\n payment_method=row[19],\n ship_to_city=row[20],\n ship_to_state=row[21],\n ship_to_zip=row[22],\n ship_to_country=row[23],\n shipping_total=row[24],\n ))\n return rows\n\n @classmethod\n def get_sales_activity_by_order_status(cls, shop_id, date_start, date_end):\n query = \"\"\"\n SELECT\n SUM(oli.quantity) AS qty,\n financial_status,\n SUM(oli.price * oli.quantity)\n FROM\n olm.`order` o\n INNER JOIN\n `order_line_items` oli ON oli.order_id = o.id\n LEFT JOIN\n `order_fulfillment` oful ON oful.order_id = o.id\n WHERE\n o.shop_id = 7990083\n AND o.created_at > '%s'\n AND o.created_at < '%s'\n GROUP BY financial_status;\n \"\"\" % (date_start, date_end)\n\n result = db.engine.execute(query)\n rows = []\n for row in result:\n rows.append(dict(\n number_shipped=row[0],\n financial_status=row[1],\n amount_shipped=row[2]\n ))\n\n return rows\n\n @classmethod\n def get_sales_by_best_sellers(cls, shop_id, date_start, date_end):\n query = \"\"\"\n SELECT\n SUM(oli.price * oli.quantity) as price,\n SUM(oli.quantity) AS qty,\n oli.name,\n oli.sku\n FROM\n olm.`order` o\n INNER JOIN\n `order_line_items` oli ON oli.order_id = o.id\n LEFT JOIN\n `order_fulfillment` oful ON oful.order_id = o.id\n WHERE\n o.shop_id = 7990083\n AND o.created_at > '%s'\n AND o.created_at < '%s'\n GROUP BY oli.sku, oli.name\n ORDER BY SUM(oli.price * oli.quantity) DESC;\n \"\"\" % (date_start, date_end)\n\n result = db.engine.execute(query)\n rows = []\n for row in result:\n rows.append(dict(\n total_amount=row[0],\n total_quantity=row[1],\n name=row[2],\n sku=row[3]\n ))\n return rows\n\n @classmethod\n def get_list(cls, page=1):\n orders = Order.query \\\n .paginate(page, 25, False)\n\n return orders\n\n @classmethod\n def get(cls, id):\n order = db.session.query(Order) \\\n .filter(Order.id == id) \\\n .first()\n return order\n\n @classmethod\n def upsert(cls, data, shop_id):\n add = False\n try:\n o = Order.get(data['id'])\n if not o:\n o = Order()\n add = True\n o.id = data['id']\n o.shop_id = shop_id\n if 'customer' in data and 'id' in data['customer']:\n o.customer_id = data['customer']['id']\n o.email = data['email']\n o.closed_at = data['closed_at']\n o.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n o.updated_at = strtodatetime(data['updated_at'].encode('utf-8'))\n o.number = data['number']\n o.note = data['note']\n o.token = data['token']\n o.gateway = data['gateway']\n o.test = data['test']\n o.total_price = data['total_price']\n o.subtotal_price = data['subtotal_price']\n o.total_weight = data['total_weight']\n o.total_tax = data['total_tax']\n o.taxes_included = data['taxes_included']\n o.currency = data['currency']\n o.financial_status = data['financial_status']\n o.confirmed = data['confirmed']\n o.total_discounts = data['total_discounts']\n o.total_line_items_price = data['total_line_items_price']\n o.cart_token = data['cart_token']\n o.buyer_accepts_marketing = data['buyer_accepts_marketing']\n o.name = data['name']\n o.referring_site = data['referring_site']\n o.landing_site = data['landing_site']\n o.cancelled_at = data['cancelled_at']\n o.cancel_reason = data['cancel_reason']\n o.total_price_usd = data['total_price_usd']\n o.checkout_token = data['checkout_token']\n o.reference = data['reference']\n o.user_id = data['user_id']\n o.location_id = data['location_id']\n o.source_identifier = data['source_identifier']\n o.source_url = data['source_url']\n o.processed_at = strtodatetime(data['processed_at'].encode('utf-8'))\n o.device_id = data['device_id']\n o.browser_ip = data['browser_ip']\n o.landing_site_ref = data['landing_site_ref']\n o.order_number = data['order_number']\n o.processing_method = data['processing_method']\n o.checkout_id = data['checkout_id']\n o.source_name = data['source_name']\n o.fulfillment_status = data['fulfillment_status']\n o.tags = data['tags']\n o.contact_email = data['contact_email']\n o.order_status_url = data['order_status_url']\n o.client_details = json.dumps(data['client_details']) if 'client_details' in data else None\n o.discount_codes = json.dumps(data['discount_codes'])\n o.note_attributes = json.dumps(data['note_attributes'])\n o.payment_gateway_names = json.dumps(data['payment_gateway_names'])\n o.tax_lines = json.dumps(data['tax_lines'])\n o.shipping_lines = json.dumps(data['shipping_lines'])\n if 'payment_details' in data:\n o.payment_details = json.dumps(data['payment_details'])\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n sys.exit(0)\n return\n except Exception as e:\n print(\"Order Exception: \", e)\n sys.exit(0)\n return\n return o.id\n\n\nclass Product(db.Model):\n __tablename__ = 'product'\n\n id = db.Column(db.Integer, primary_key=True)\n shop_id = db.Column(db.Integer, nullable=False)\n title = db.Column(db.Text, nullable=False)\n body_html = db.Column(db.Text, nullable=False)\n vendor = db.Column(db.String(255), nullable=False)\n product_type = db.Column(db.String(255), nullable=False)\n created_at = db.Column(db.DateTime, nullable=False)\n updated_at = db.Column(db.DateTime, nullable=True)\n published_at = db.Column(db.DateTime, nullable=True)\n handle = db.Column(db.String(255), nullable=False)\n template_suffix = db.Column(db.String(255), nullable=True)\n published_scope = db.Column(db.String(255), nullable=False)\n tags = db.Column(db.String(255), nullable=False)\n\n @classmethod\n def get_all(cls, page=1):\n products = Product.query \\\n .paginate(page, 25, False)\n return products\n\n @classmethod\n def upsert(self, data, shop_id):\n add = False\n try:\n p = Product.get(data['id'])\n if not p:\n p = Product()\n add = True\n p.id = data['id']\n p.shop_id = shop_id\n p.title = data['title']\n p.body_html = data['body_html']\n p.vendor = data['vendor']\n p.product_type = data['product_type']\n p.created_at = strtodatetime(data['created_at'].encode('utf-8')) if data['created_at'] else None\n p.updated_at = strtodatetime(data['updated_at'].encode('utf-8')) if data['updated_at'] else None\n p.published_at = strtodatetime(data['published_at'].encode('utf-8')) if data['published_at'] else None\n p.handle = data['handle']\n p.template_suffix = data['template_suffix']\n p.published_scope = data['published_scope']\n p.tags = data['tags']\n\n if add:\n db.session.add(p)\n db.session.commit()\n except KeyError as e:\n print(\"Key Error: \", e)\n except Exception as e:\n print(\"Product EXCEPTION: \", e)\n return\n return p.id\n\n @classmethod\n def get(cls, id):\n product = db.session.query(Product) \\\n .filter(Product.id == id) \\\n .first()\n return product\n\n\nclass ProductVariant(db.Model):\n __tablename__ = 'productVariant'\n\n id = db.Column(db.Integer, primary_key=True)\n product_id = db.Column(db.Integer, nullable=False)\n title = db.Column(db.Text, nullable=False)\n price = db.Column(db.Float, nullable=False)\n sku = db.Column(db.String(255), nullable=True)\n position = db.Column(db.Integer, nullable=False)\n grams = db.Column(db.Integer, nullable=True)\n inventory_policy = db.Column(db.String(10), nullable=False)\n compare_at_price = db.Column(db.String(200), nullable=True)\n fulfillment_service = db.Column(db.String(10), nullable=True)\n inventory_management = db.Column(db.String(255), nullable=True)\n option1 = db.Column(db.String(255), nullable=True)\n option2 = db.Column(db.String(255), nullable=True)\n option3 = db.Column(db.String(255), nullable=True)\n created_at = db.Column(db.DateTime, nullable=False)\n updated_at = db.Column(db.DateTime, nullable=True)\n taxable = db.Column(db.Integer, nullable=False)\n barcode = db.Column(db.String(255), nullable=True)\n image_id = db.Column(db.Integer, nullable=True)\n inventory_quantity = db.Column(db.Integer, nullable=False)\n weight = db.Column(db.Float, nullable=False)\n weight_unit = db.Column(db.String(20), nullable=False)\n old_inventory_quantity = db.Column(db.Integer, nullable=False)\n requires_shipping = db.Column(db.Integer, nullable=False)\n\n @classmethod\n def upsert(self, data, product_id):\n add = False\n try:\n p = ProductVariant.get(data['id'])\n if not p:\n p = ProductVariant()\n add = True\n p.id = data['id']\n p.product_id = product_id\n p.title = data['title']\n p.price = data['price']\n p.sku = data['sku']\n p.position = data['position']\n p.grams = data['grams']\n p.inventory_policy = data['inventory_policy']\n p.compare_at_price = data['compare_at_price']\n p.fulfillment_service = data['fulfillment_service']\n p.inventory_management = data['inventory_management']\n p.option1 = data['option1']\n p.option2 = data['option2']\n p.option3 = data['option3']\n p.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n p.updated_at = strtodatetime(data['updated_at'].encode('utf-8'))\n p.taxable = data['taxable']\n p.barcode = data['barcode']\n p.image_id = data['image_id']\n p.inventory_quantity = data['inventory_quantity']\n p.weight = data['weight']\n p.weight_unit = data['weight_unit']\n p.old_inventory_quantity = data['old_inventory_quantity']\n p.requires_shipping = data['requires_shipping']\n\n if add:\n db.session.add(p)\n db.session.commit()\n except KeyError as e:\n print(\"Key Error: \", e)\n except Exception as e:\n print(\"ProductVariant EXCEPTION: \", e)\n return\n return p.id\n\n @classmethod\n def get(cls, id):\n productVariant = db.session.query(ProductVariant) \\\n .filter(ProductVariant.id == id) \\\n .first()\n return productVariant\n\nclass Shop(db.Model):\n __tablename__ = 'shop'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(200), nullable=False)\n email = db.Column(db.String(200), nullable=False)\n domain = db.Column(db.String(200), nullable=False)\n created_at = db.Column(db.DateTime, nullable=False)\n province = db.Column(db.String(200), nullable=False)\n country = db.Column(db.String(200), nullable=False)\n address1 = db.Column(db.String(200), nullable=False)\n zip = db.Column(db.String(200), nullable=False)\n city = db.Column(db.String(200), nullable=False)\n source = db.Column(db.String(200), nullable=True)\n phone = db.Column(db.String(200), nullable=False)\n updated_at = db.Column(db.DateTime, nullable=False)\n customer_email = db.Column(db.String(200), nullable=True)\n latitude = db.Column(db.Float, nullable=False)\n longitude = db.Column(db.Float, nullable=False)\n primary_location_id = db.Column(db.String(200), nullable=True)\n primary_locale = db.Column(db.String(200), nullable=False)\n country_code = db.Column(db.String(200), nullable=False)\n country_name = db.Column(db.String(200), nullable=False)\n currency = db.Column(db.String(200), nullable=False)\n timezone = db.Column(db.String(200), nullable=False)\n iana_timezone = db.Column(db.String(200), nullable=False)\n shop_owner = db.Column(db.String(200), nullable=False)\n money_format = db.Column(db.String(200), nullable=False)\n money_with_currency_format = db.Column(db.String(200), nullable=False)\n province_code = db.Column(db.String(200), nullable=True)\n taxes_included = db.Column(db.Integer, nullable=False)\n tax_shipping = db.Column(db.String(200), nullable=True)\n county_taxes = db.Column(db.Integer, nullable=False)\n plan_display_name = db.Column(db.String(200), nullable=False)\n plan_name = db.Column(db.String(200), nullable=False)\n has_discounts = db.Column(db.Integer, nullable=False)\n has_gift_cards = db.Column(db.Integer, nullable=False)\n myshopify_domain = db.Column(db.String(200), nullable=False)\n google_apps_domain = db.Column(db.String(200), nullable=True)\n google_apps_login_enabled = db.Column(db.Integer, nullable=True)\n money_in_emails_format = db.Column(db.String(200), nullable=False)\n money_with_currency_in_emails_format = db.Column(db.String(200), nullable=False)\n eligible_for_payments = db.Column(db.Integer, nullable=True)\n requires_extra_payments_agreement = db.Column(db.Integer, nullable=False)\n password_enabled = db.Column(db.Integer, nullable=False)\n has_storefront = db.Column(db.Integer, nullable=False)\n eligible_for_card_reader_giveaway = db.Column(db.Integer, nullable=False)\n setup_required = db.Column(db.Integer, nullable=False)\n force_ssl = db.Column(db.Integer, nullable=False)\n\n def upsert(self, data):\n add = False\n try:\n s = Shop.get(data['id'])\n if not s:\n s = Shop()\n add = True\n s.id = data['id']\n s.name = data['name']\n s.email = data['email']\n s.domain = data['domain']\n s.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n s.province = data['province']\n s.country = data['country']\n s.address1 = data['address1']\n s.zip = data['zip']\n s.city = data['city']\n s.source = data['source']\n s.phone = data['phone']\n s.updated_at = strtodatetime(data['updated_at'].encode('utf-8'))\n s.customer_email = data['customer_email']\n s.latitude = data['latitude']\n s.longitude = data['longitude']\n s.primary_location_id = data['primary_location_id']\n s.primary_locale = data['primary_locale']\n s.country_code = data['country_code']\n s.country_name = data['country_name']\n s.currency = data['currency']\n s.timezone = data['timezone']\n s.iana_timezone = data['iana_timezone']\n s.shop_owner = data['shop_owner']\n s.money_format = data['money_format']\n s.money_with_currency_format = data['money_with_currency_format']\n s.province_code = data['province_code']\n s.taxes_included = data['taxes_included']\n s.tax_shipping = data['tax_shipping']\n s.county_taxes = data['county_taxes']\n s.plan_display_name = data['plan_display_name']\n s.plan_name = data['plan_name']\n s.has_discounts = data['has_discounts']\n s.has_gift_cards = data['has_gift_cards']\n s.myshopify_domain = data['myshopify_domain']\n s.google_apps_domain = data['google_apps_domain']\n s.google_apps_login_enabled = data['google_apps_login_enabled']\n s.money_in_emails_format = data['money_in_emails_format']\n s.money_with_currency_in_emails_format = data['money_with_currency_in_emails_format']\n s.eligible_for_payments = data['eligible_for_payments']\n s.requires_extra_payments_agreement = data['requires_extra_payments_agreement']\n s.password_enabled = data['password_enabled']\n s.has_storefront = data['has_storefront']\n s.eligible_for_card_reader_giveaway = data['eligible_for_card_reader_giveaway']\n s.setup_required = data['setup_required']\n s.force_ssl = data['force_ssl']\n\n if add:\n db.session.add(s)\n db.session.commit()\n except KeyError as e:\n print(\"Key Error: \", e)\n except Exception as e:\n print(\"Shop EXCEPTION: \", e)\n return\n\n @classmethod\n def get(cls, shop_id):\n shop = db.session.query(Shop) \\\n .filter(Shop.id == shop_id) \\\n .first()\n return shop\n\nclass AuditStatuses:\n COMPLETE = 'complete'\n RUNNING = 'running'\n ERROR = 'error'\n\nclass AuditStatus(db.Model):\n __tablename__ = 'audit_status'\n\n id = db.Column(db.Integer, primary_key=True)\n run_id = db.Column(db.String(100), nullable=True)\n shop_id = db.Column(db.Integer, nullable=False)\n status = db.Column(db.String(100), nullable=False)\n logged_on = db.Column(db.DateTime, nullable=False)\n\n def __init__(self, run_id, shop_id, status=AuditStatuses.RUNNING):\n self.run_id = run_id\n self.shop_id = shop_id\n self.status = status\n self.logged_on = datetime.datetime.utcnow()\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def get_shop_id_by_run_id(cls, run_id):\n msg = db.session.query(AuditStatus) \\\n .filter(AuditStatus.run_id == run_id) \\\n .first()\n if not msg:\n return 0\n return msg.shop_id\n\n\nclass AuditLog(db.Model):\n __tablename__ = 'audit_log'\n \n id = db.Column(db.Integer, primary_key=True)\n run_id = db.Column(db.String(100), nullable=False)\n shop_id = db.Column(db.Integer, nullable=False)\n message = db.Column(db.Text, nullable=False)\n logged_on = db.Column(db.DateTime, nullable=False)\n\n\n def __init__(self, run_id, shop_id, message):\n self.run_id = run_id\n self.shop_id = shop_id\n self.message = message\n self.logged_on = datetime.datetime.utcnow()\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def get_by_run(cls, run_id):\n log_messages = db.session.query(AuditLog) \\\n .filter(AuditLog.run_id == run_id) \\\n .order_by(db.desc(AuditLog.logged_on)) \\\n .order_by(db.desc(AuditLog.id)) \\\n .all()\n return log_messages\n\n\nclass OrderFulfillment(db.Model):\n __tablename__ = 'order_fulfillment'\n\n id = db.Column(db.Integer, primary_key=True)\n order_id = db.Column(db.Integer, nullable=False)\n created_at = db.Column(db.DateTime, nullable=False)\n updated_at = db.Column(db.DateTime, nullable=True)\n status = db.Column(db.String(100), nullable=False)\n service = db.Column(db.String(100), nullable=False)\n tracking_company = db.Column(db.String(100), nullable=True)\n tracking_number = db.Column(db.String(100), nullable=True)\n tracking_numbers = db.Column(db.Text, nullable=True)\n tracking_url = db.Column(db.Text, nullable=True)\n tracking_urls = db.Column(db.Text, nullable=True)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n o = OrderFulfillment.get(data['id'])\n if not o:\n o = OrderFulfillment()\n add = True\n o.id = data['id']\n o.order_id = data['order_id']\n o.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n o.updated_at = strtodatetime(data['updated_at'].encode('utf-8'))\n o.status = data['status']\n o.service = data['service']\n o.tracking_company = data['tracking_company']\n o.tracking_number = data['tracking_number']\n o.tracking_numbers = json.dumps(data['tracking_numbers'])\n o.tracking_url = data['tracking_url']\n o.tracking_urls = json.dumps(data['tracking_urls'])\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderFulfillment Exception: \", e)\n return\n return o.id\n\n @classmethod\n def get(cls, id):\n orderFulfillment = db.session.query(OrderFulfillment) \\\n .filter(OrderFulfillment.id == id) \\\n .first()\n return orderFulfillment\n\nclass OrderLineItems(db.Model):\n __tablename__ = 'order_line_items'\n\n id = db.Column(db.Integer, primary_key=True)\n order_id = db.Column(db.Integer, nullable=False)\n variant_id = db.Column(db.Integer, nullable=True)\n title = db.Column(db.Text, nullable=False)\n quantity = db.Column(db.Integer, nullable=False)\n price = db.Column(db.Float, nullable=False)\n grams = db.Column(db.Integer, nullable=False)\n sku = db.Column(db.String(200), nullable=True)\n variant_title = db.Column(db.String(200), nullable=True)\n vendor = db.Column(db.String(200), nullable=True)\n fulfillment_service = db.Column(db.String(200), nullable=False)\n product_id = db.Column(db.Integer, nullable=True)\n requires_shipping = db.Column(db.Integer, nullable=False)\n taxable = db.Column(db.Integer, nullable=False)\n gift_card = db.Column(db.Integer, nullable=False)\n name = db.Column(db.Text, nullable=False)\n variant_inventory_management = db.Column(db.String(100), nullable=True)\n properties = db.Column(db.Text, nullable=False)\n product_exists = db.Column(db.Integer, nullable=False)\n fulfillable_quantity = db.Column(db.Integer, nullable=False)\n total_discount = db.Column(db.Float, nullable=False)\n fulfillment_status = db.Column(db.String(200), nullable=True)\n tax_lines = db.Column(db.Text, nullable=False)\n\n @classmethod\n def upsert(cls, data, order_id):\n add = False\n try:\n o = OrderLineItems.get(data['id'])\n if not o:\n o = OrderLineItems()\n add = True\n o.id = data['id']\n o.order_id = order_id\n o.variant_id = data['variant_id']\n o.title = data['title']\n o.quantity = data['quantity']\n o.price = data['price']\n o.grams = data['grams']\n o.sku = data['sku']\n o.variant_title = data['variant_title']\n o.vendor = data['vendor']\n o.fulfillment_service = data['fulfillment_service']\n o.product_id = data['product_id']\n o.requires_shipping = data['requires_shipping']\n o.taxable = data['taxable']\n o.gift_card = data['gift_card']\n o.name = data['name']\n o.variant_inventory_management = data['variant_inventory_management']\n o.product_exists = data['product_exists']\n o.fulfillable_quantity = data['fulfillable_quantity']\n o.total_discount = data['total_discount']\n o.fulfillment_status = data['fulfillment_status']\n\n o.properties = json.dumps(data['properties'])\n o.tax_lines = json.dumps(data['tax_lines'])\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderLineItems Exception: \", e)\n return\n return o.id\n\n\n\n @classmethod\n def get(cls, id):\n orderLineItem = db.session.query(OrderLineItems) \\\n .filter(OrderLineItems.id == id) \\\n .first()\n return orderLineItem\n\nclass OrderShippingLines(db.Model):\n __tablename__ = 'order_shipping_lines'\n\n id = db.Column(db.Integer, primary_key=True)\n order_id = db.Column(db.Integer, nullable=False)\n title = db.Column(db.Text, nullable=True)\n price = db.Column(db.Float, nullable=True)\n code = db.Column(db.Text, nullable=True)\n source = db.Column(db.String(200), nullable=True)\n phone = db.Column(db.String(200), nullable=True)\n delivery_category = db.Column(db.String(200), nullable=True)\n carrier_identifier = db.Column(db.String(200), nullable=True)\n tax_lines = db.Column(db.Text, nullable=True)\n\n @classmethod\n def upsert(cls, data, order_id):\n add = False\n try:\n o = OrderShippingLines.get(data['id'])\n if not o:\n o = OrderShippingLines()\n add = True\n\n o.id = data['id']\n o.order_id = order_id\n o.title = data['title']\n o.price = data['price']\n o.code = data['code']\n o.source = data['source']\n o.phone = data['phone']\n if 'delivery_category' in data:\n o.delivery_category = data['delivery_category']\n o.carrier_identifier = data['carrier_identifier']\n o.tax_lines = json.dumps(data['tax_lines'])\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderShippingLines Exception: \", e)\n return\n return o.id\n\n @classmethod\n def get(cls, id):\n orderShippingLines = db.session.query(OrderShippingLines) \\\n .filter(OrderShippingLines.id == id) \\\n .first()\n return orderShippingLines\n\nclass OrderShippingAddress(db.Model):\n __tablename__ = 'order_shipping_address'\n\n order_id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.String(255), nullable=True)\n last_name = db.Column(db.String(255), nullable=True)\n address1 = db.Column(db.String(255), nullable=True)\n address2 = db.Column(db.String(255), nullable=True)\n phone = db.Column(db.String(255), nullable=True)\n city = db.Column(db.String(255), nullable=True)\n zip = db.Column(db.String(255), nullable=True)\n province = db.Column(db.String(255), nullable=True)\n country = db.Column(db.String(255), nullable=True)\n company = db.Column(db.String(255), nullable=True)\n latitude = db.Column(db.String(255), nullable=True)\n longitude = db.Column(db.String(255), nullable=True)\n name = db.Column(db.String(255), nullable=True)\n country_code = db.Column(db.String(255), nullable=True)\n province_code = db.Column(db.String(255), nullable=True)\n\n @classmethod\n def upsert(cls, data, order_id):\n add = False\n try:\n o = OrderShippingAddress.get(order_id)\n if not o:\n o = OrderShippingAddress()\n add = True\n\n o.order_id = order_id\n o.first_name = data['first_name']\n o.last_name = data['last_name']\n o.address1 = data['address1']\n o.address2 = data['address2']\n o.phone = data['phone']\n o.city = data['city']\n o.zip = data['zip']\n o.province = data['province']\n o.country = data['country']\n o.company = data['company']\n o.latitude = data['latitude']\n o.longitude = data['longitude']\n o.name = data['name']\n o.country_code = data['country_code']\n o.province_code = data['province_code']\n\n if add:\n db.session.add(o)\n\n db.session.commit()\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderShippingAddress Exception: \", e)\n return\n\n return o.order_id\n\n @classmethod\n def get(cls, order_id):\n orderShippingAddress = db.session.query(OrderShippingAddress) \\\n .filter(OrderShippingAddress.order_id == order_id) \\\n .first()\n return orderShippingAddress\n\nclass OrderRefund(db.Model):\n __tablename__ = 'order_refund'\n\n id = db.Column(db.Integer, primary_key=True)\n order_id = db.Column(db.Integer, nullable=False);\n created_at = db.Column(db.DateTime, nullable=False);\n note = db.Column(db.Text, nullable=True);\n restock = db.Column(db.Text, nullable=True);\n user_id = db.Column(db.Integer, nullable=True);\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n o = OrderRefund.get(data['id'])\n if not o:\n o = OrderRefund()\n add = True\n o.id = data['id']\n o.order_id = data['order_id']\n o.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n o.note = data['note']\n o.restock = data['restock']\n o.user_id = data['user_id']\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderRefund Exception: \", e)\n return\n return o.id\n\n @classmethod\n def get(cls, id):\n orderRefund = db.session.query(OrderRefund) \\\n .filter(OrderRefund.id == id) \\\n .first()\n return orderRefund\n\nclass OrderRefundLineItem(db.Model):\n __tablename__ = 'order_refund_line_item'\n\n id = db.Column(db.Integer, primary_key=True)\n refund_id = db.Column(db.Integer, nullable=False)\n quantity = db.Column(db.Integer, nullable=False)\n line_item_id = db.Column(db.Integer, nullable=False)\n\n @classmethod\n def upsert(cls, data, refund_id):\n add = False\n try:\n o = OrderRefundLineItem.get(data['id'])\n if not o:\n o = OrderRefundLineItem()\n add = True\n o.id = data['id']\n o.refund_id = refund_id\n o.quantity = data['quantity']\n o.line_item_id = data['line_item_id']\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderRefundLineItem Exception: \", e)\n return\n return o.id\n\n @classmethod\n def get(cls, id):\n orderRefundLineItem = db.session.query(OrderRefundLineItem) \\\n .filter(OrderRefundLineItem.id == id) \\\n .first()\n return orderRefundLineItem\n\n\nclass OrderRefundTransaction(db.Model):\n __tablename__ = 'order_refund_transaction'\n\n id = db.Column(db.Integer, primary_key=True)\n refund_id = db.Column(db.Integer, nullable=False)\n order_id = db.Column(db.Integer, nullable=False)\n amount = db.Column(db.Float, nullable=False)\n kind = db.Column(db.String(100), nullable=False)\n gateway = db.Column(db.String(100), nullable=True)\n status = db.Column(db.String(100), nullable=False)\n message = db.Column(db.Text, nullable=True)\n created_at = db.Column(db.DateTime, nullable=False)\n test = db.Column(db.Integer, nullable=False)\n authorization = db.Column(db.String(200), nullable=True)\n currency = db.Column(db.String(20), nullable=False)\n location_id = db.Column(db.Integer, nullable=True)\n user_id = db.Column(db.Integer, nullable=True)\n parent_id = db.Column(db.Integer, nullable=True)\n device_id = db.Column(db.Integer, nullable=True)\n error_code = db.Column(db.String(200), nullable=True)\n source_name = db.Column(db.String(200), nullable=False)\n\n @classmethod\n def upsert(cls, data, refund_id):\n add = False\n try:\n o = OrderRefundTransaction.get(data['id'])\n if not o:\n o = OrderRefundTransaction()\n add = True\n o.id = data['id']\n o.refund_id = refund_id\n o.order_id = data['order_id']\n o.amount = data['amount']\n o.kind = data['kind']\n o.gateway = data['gateway']\n o.status = data['status']\n o.message = data['message']\n o.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n o.test = data['test']\n o.authorization = data['authorization']\n o.currency = data['currency']\n o.location_id = data['location_id']\n o.user_id = data['user_id']\n o.parent_id = data['parent_id']\n o.device_id = data['device_id']\n o.error_code = data['error_code']\n o.source_name = data['source_name']\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderRefundTransaction Exception: \", e)\n return\n return o.id\n\n @classmethod\n def get(cls, id):\n orderRefundTransaction = db.session.query(OrderRefundTransaction) \\\n .filter(OrderRefundTransaction.id == id) \\\n .first()\n return orderRefundTransaction\n\nclass OrderRisk(db.Model):\n __tablename__ = 'order_risk'\n\n id = db.Column(db.Integer, primary_key=True)\n order_id = db.Column(db.Integer, nullable=False)\n checkout_id = db.Column(db.Integer, nullable=False)\n source = db.Column(db.String(50), nullable=False)\n score = db.Column(db.Float, nullable=False)\n recommendation = db.Column(db.String(100), nullable=False)\n display = db.Column(db.Integer, nullable=False)\n cause_cancel = db.Column(db.String(255), nullable=True)\n message = db.Column(db.Text, nullable=True)\n merchant_message = db.Column(db.Text, nullable=False)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n o = OrderRisk.get(data['id'])\n if not o:\n o = OrderRisk()\n add = True\n o.id = data['id']\n o.order_id = data['order_id']\n o.checkout_id = data['order_id']\n o.source = data['source']\n o.score = data['score']\n o.recommendation = data['recommendation']\n o.display = data['display']\n o.cause_cancel = data['cause_cancel']\n o.message = data['message']\n o.merchant_message = data['merchant_message']\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderRisk Exception: \", e)\n return\n return o.id\n\n\n @classmethod\n def get(cls, id):\n orderRisk = db.session.query(OrderRisk) \\\n .filter(OrderRisk.id == id) \\\n .first()\n return orderRisk\n\nclass OrderTransaction(db.Model):\n __tablename__ = 'order_transaction'\n\n id = db.Column(db.Integer, primary_key=True)\n order_id = db.Column(db.Integer, nullable=False)\n amount = db.Column(db.Float, nullable=False)\n kind = db.Column(db.String(100), nullable=False)\n gateway = db.Column(db.String(100), nullable=True)\n status = db.Column(db.String(100), nullable=False)\n message = db.Column(db.Text, nullable=True)\n created_at = db.Column(db.DateTime, nullable=False)\n test = db.Column(db.Integer, nullable=False)\n authorization = db.Column(db.String(200), nullable=True)\n currency = db.Column(db.String(20), nullable=False)\n location_id = db.Column(db.Integer, nullable=True)\n user_id = db.Column(db.Integer, nullable=True)\n parent_id = db.Column(db.Integer, nullable=True)\n device_id = db.Column(db.Integer, nullable=True)\n error_code = db.Column(db.String(200), nullable=True)\n source_name = db.Column(db.String(200), nullable=False)\n payment_details = db.Column(db.Text, nullable=True)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n o = OrderTransaction.get(data['id'])\n if not o:\n o = OrderTransaction()\n add = True\n o.id = data['id']\n o.order_id = data['order_id']\n o.amount = data['amount']\n o.kind = data['kind']\n o.gateway = data['gateway']\n o.status = data['status']\n o.message = data['message']\n o.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n o.test = data['test']\n o.authorization = data['authorization']\n o.currency = data['currency']\n o.location_id = data['location_id']\n o.user_id = data['user_id']\n o.parent_id = data['parent_id']\n o.device_id = data['device_id']\n o.error_code = data['error_code']\n o.source_name = data['source_name']\n if 'payment_details' in data:\n o.payment_details = json.dumps(data['payment_details'])\n \n if add:\n db.session.add(o)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"OrderTransaction Exception: \", e)\n return\n return o.id\n\n @classmethod\n def get(cls, id):\n orderTransaction = db.session.query(OrderTransaction) \\\n .filter(OrderTransaction.id == id) \\\n .first()\n return orderTransaction\n\nclass Customer(db.Model):\n __tablename__ = 'customer'\n\n id = db.Column(db.Integer, primary_key=True)\n shop_id = db.Column(db.Integer, nullable=False)\n email = db.Column(db.String(200), nullable=True)\n accepts_marketing = db.Column(db.Integer, nullable=False)\n created_at = db.Column(db.DateTime, nullable=False)\n updated_at = db.Column(db.DateTime, nullable=False)\n first_name = db.Column(db.String(200), nullable=True)\n last_name = db.Column(db.String(200), nullable=True)\n orders_count = db.Column(db.Integer, nullable=False)\n state = db.Column(db.String(100), nullable=False)\n total_spent = db.Column(db.Float, nullable=False)\n last_order_id = db.Column(db.Integer, nullable=True)\n note = db.Column(db.Text, nullable=True)\n verified_email = db.Column(db.Integer, nullable=False)\n multipass_identifier = db.Column(db.String(200), nullable=True)\n tax_exempt = db.Column(db.Integer, nullable=False)\n tags = db.Column(db.Text, nullable=True)\n last_order_name = db.Column(db.String(200), nullable=True)\n\n @classmethod\n def upsert(self, data, shop_id):\n add = False\n try:\n c = Customer.get(data['id'])\n if not c:\n c = Customer()\n add = True\n c.id = data['id']\n c.shop_id = shop_id\n c.email = data['email']\n c.accepts_marketing = data['accepts_marketing']\n c.created_at = strtodatetime(data['created_at'].encode('utf-8'))\n c.updated_at = strtodatetime(data['updated_at'].encode('utf-8'))\n c.first_name = data['first_name']\n c.last_name = data['last_name']\n c.orders_count = data['orders_count']\n c.state = data['state']\n c.total_spent = data['total_spent']\n c.last_order_id = data['last_order_id']\n c.note = data['note']\n c.verified_email = data['verified_email']\n c.multipass_identifier = data['multipass_identifier']\n c.tax_exempt = data['tax_exempt']\n c.tags = data['tags']\n c.last_order_name = data['last_order_name']\n\n if add:\n db.session.add(c)\n db.session.commit()\n except KeyError as e:\n print(\"Key Error: \", e)\n except Exception as e:\n print(\"Customer EXCEPTION: \", e)\n return\n return c.id\n\n @classmethod\n def get(cls, id):\n customer = db.session.query(Customer) \\\n .filter(Customer.id == id) \\\n .first()\n return customer\n\nclass CustomerAddress(db.Model):\n __tablename__ = 'customerAddress'\n\n id = db.Column(db.Integer, primary_key=True)\n customer_id = db.Column(db.Integer, nullable=False)\n first_name = db.Column(db.String(200), nullable=True)\n last_name = db.Column(db.String(200), nullable=False)\n company = db.Column(db.String(200), nullable=True)\n address1 = db.Column(db.String(200), nullable=True)\n address2 = db.Column(db.String(200), nullable=True)\n city = db.Column(db.String(200), nullable=True)\n province = db.Column(db.String(200), nullable=True)\n country = db.Column(db.String(200), nullable=True)\n zip = db.Column(db.String(100), nullable=True)\n phone = db.Column(db.String(200), nullable=True)\n name = db.Column(db.String(255), nullable=False)\n province_code = db.Column(db.String(10), nullable=True)\n country_code = db.Column(db.Text, nullable=True)\n country_name = db.Column(db.String(255), nullable=True)\n default = db.Column(db.Integer, nullable=False)\n\n @classmethod\n def upsert(self, data, customer_id):\n add = False\n try:\n c = CustomerAddress.get(data['id'])\n if not c:\n c = CustomerAddress()\n add = True\n c.id = data['id']\n c.customer_id = customer_id\n c.first_name = data['first_name']\n c.last_name = data['last_name']\n c.company = data['company']\n c.address1 = data['address1']\n c.address2 = data['address2']\n c.city = data['city']\n c.province = data['province']\n c.country = data['country']\n c.zip = data['zip']\n c.phone = data['phone']\n c.name = data['name']\n c.province_code = data['province_code']\n c.country_code = data['country_code']\n c.country_name = data['country_name']\n c.default = data['default']\n\n if add:\n db.session.add(c)\n db.session.commit()\n except KeyError as e:\n print(\"Key Error: \", e)\n except Exception as e:\n print(\"CustomerAddress EXCEPTION: \", e)\n return\n return c.id\n\n @classmethod\n def get(cls, id):\n customerAddress = db.session.query(CustomerAddress) \\\n .filter(CustomerAddress.id == id) \\\n .first()\n return customerAddress\n\nclass ShippingZone(db.Model):\n __tablename__ = 'shipping_zone'\n\n id = db.Column(db.Integer, primary_key=True)\n shop_id = db.Column(db.Integer, nullable=False)\n name = db.Column(db.String(255), nullable=False)\n\n @classmethod\n def upsert(cls, data, shop_id):\n add = False\n try:\n s = ShippingZone.get(data['id'])\n if not s:\n s = ShippingZone()\n add = True\n s.id = data['id']\n s.shop_id = shop_id\n s.name = data['name']\n\n if add:\n db.session.add(s)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"ShippingZone Exception: \", e)\n return\n return s.id\n\n @classmethod\n def get(cls, id):\n shippingZone = db.session.query(ShippingZone) \\\n .filter(ShippingZone.id == id) \\\n .first()\n return shippingZone\n\nclass ShippingZoneCountries(db.Model):\n __tablename__ = 'shipping_zone_countries'\n \n id = db.Column(db.Integer, primary_key=True)\n shipping_zone_id = db.Column(db.Integer, nullable=False)\n name = db.Column(db.String(255), nullable=False)\n tax = db.Column(db.Float, nullable=False)\n code = db.Column(db.String(20), nullable=False)\n tax_name = db.Column(db.String(255), nullable=False)\n\n @classmethod\n def upsert(cls, data, shipping_zone_id):\n add = False\n try:\n s = ShippingZoneCountries.get(data['id'])\n if not s:\n s = ShippingZoneCountries()\n add = True\n s.id = data['id']\n s.shipping_zone_id = shipping_zone_id\n s.name = data['name']\n s.tax = data['tax']\n s.code = data['code']\n s.tax_name = data['tax_name']\n\n if add:\n db.session.add(s)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"ShippingZoneCountries Exception: \", e)\n return\n return s.id\n\n @classmethod\n def get(cls, id):\n shippingZoneCountries = db.session.query(ShippingZoneCountries) \\\n .filter(ShippingZoneCountries.id == id) \\\n .first()\n return shippingZoneCountries\n\nclass ShippingZoneCountriesProvinces(db.Model):\n __tablename__ = 'shipping_zone_countries_provinces'\n\n id = db.Column(db.Integer, primary_key=True)\n shipping_zone_id = db.Column(db.Integer, nullable=False)\n country_id = db.Column(db.Integer, nullable=False)\n name = db.Column(db.String(255), nullable=False)\n code = db.Column(db.String(20), nullable=False)\n tax = db.Column(db.Float, nullable=False)\n tax_name = db.Column(db.String(255), nullable=False)\n tax_type = db.Column(db.String(255), nullable=True)\n tax_percentage = db.Column(db.Float, nullable=True)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n s = ShippingZoneCountriesProvinces.get(data['id'])\n if not s:\n s = ShippingZoneCountriesProvinces()\n add = True\n s.id = data['id']\n s.shipping_zone_id = data['shipping_zone_id']\n s.country_id = data['country_id']\n s.name = data['name']\n s.code = data['code']\n s.tax = data['tax']\n s.tax_name = data['tax_name']\n s.tax_type = data['tax_type']\n s.tax_percentage = data['tax_percentage']\n\n if add:\n db.session.add(s)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"ShippingZoneCountriesProvinces Exception: \", e)\n return\n return s.id\n\n @classmethod\n def get(cls, id):\n shippingZoneCountriesProvinces = db.session.query(ShippingZoneCountriesProvinces) \\\n .filter(ShippingZoneCountriesProvinces.id == id) \\\n .first()\n return shippingZoneCountriesProvinces\n\nclass ShippingZoneWeightBasedShippingRates(db.Model):\n __tablename__ = 'shipping_zone_weight_based_shipping_rates'\n\n id = db.Column(db.Integer, primary_key=True)\n shipping_zone_id = db.Column(db.Integer, nullable=False)\n weight_low = db.Column(db.Float, nullable=False)\n weight_high = db.Column(db.Float, nullable=False)\n name = db.Column(db.String(255), nullable=False)\n price = db.Column(db.Float, nullable=False)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n s = ShippingZoneWeightBasedShippingRates.get(data['id'])\n if not s:\n s = ShippingZoneWeightBasedShippingRates()\n add = True\n s.id = data['id']\n s.shipping_zone_id = data['shipping_zone_id']\n s.weight_low = data['weight_low']\n s.weight_high = data['weight_high']\n s.name = data['name']\n s.price = data['price']\n \n if add:\n db.session.add(s)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"ShippingZoneWeightBasedShippingRates Exception: \", e)\n return\n return s.id\n\n @classmethod\n def get(cls, id):\n shippingZoneWeightBasedShippingRates = db.session.query(ShippingZoneWeightBasedShippingRates) \\\n .filter(ShippingZoneWeightBasedShippingRates.id == id) \\\n .first()\n return shippingZoneWeightBasedShippingRates\n\nclass ShippingZonePriceBasedShippingRates(db.Model):\n __tablename__ = 'shipping_zone_price_based_shipping_rates'\n\n id = db.Column(db.Integer, primary_key=True)\n shipping_zone_id = db.Column(db.Integer, nullable=False)\n name = db.Column(db.String(255), nullable=False)\n price = db.Column(db.Float, nullable=False)\n min_order_subtotal = db.Column(db.Float, nullable=True)\n max_order_subtotal = db.Column(db.Float, nullable=True)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n s = ShippingZonePriceBasedShippingRates.get(data['id'])\n if not s:\n s = ShippingZonePriceBasedShippingRates()\n add = True\n s.id = data['id']\n s.shipping_zone_id = data['shipping_zone_id']\n s.name = data['name']\n s.price = data['price']\n s.min_order_subtotal = data['min_order_subtotal']\n s.max_order_subtotal = data['max_order_subtotal']\n \n if add:\n db.session.add(s)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"ShippingZonePriceBasedShippingRates Exception: \", e)\n return\n return s.id\n\n @classmethod\n def get(cls, id):\n shippingZonePriceBasedShippingRates = db.session.query(ShippingZonePriceBasedShippingRates) \\\n .filter(ShippingZonePriceBasedShippingRates.id == id) \\\n .first()\n return shippingZonePriceBasedShippingRates\n\nclass ShippingZoneCarrierShippingRateProviders(db.Model):\n __tablename__ = 'shipping_zone_carrier_shipping_rate_providers'\n\n id = db.Column(db.Integer, primary_key=True)\n shipping_zone_id = db.Column(db.Integer, nullable=False)\n carrier_service_id = db.Column(db.Integer, nullable=False)\n flat_modifier = db.Column(db.Float, nullable=False)\n percent_modifier = db.Column(db.Integer, nullable=False)\n service_filter = db.Column(db.Text, nullable=True)\n\n @classmethod\n def upsert(cls, data):\n add = False\n try:\n s = ShippingZoneCarrierShippingRateProviders.get(data['id'])\n if not s:\n s = ShippingZoneCarrierShippingRateProviders()\n add = True\n s.id = data['id']\n s.shipping_zone_id = data['shipping_zone_id']\n s.carrier_service_id = data['carrier_service_id']\n s.flat_modifier = data['flat_modifier']\n s.percent_modifier = data['percent_modifier']\n s.service_filter = json.dumps(data['service_filter'])\n \n if add:\n db.session.add(s)\n db.session.commit()\n\n except KeyError as e:\n print(\"Key Error: \", e)\n return\n except Exception as e:\n print(\"ShippingZoneCarrierShippingRateProviders Exception: \", e)\n return\n return s.id\n\n @classmethod\n def get(cls, id):\n shippingZoneCarrierShippingRateProviders = db.session.query(ShippingZoneCarrierShippingRateProviders) \\\n .filter(ShippingZoneCarrierShippingRateProviders.id == id) \\\n .first()\n return shippingZoneCarrierShippingRateProviders\n","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":69086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"639058596","text":"## This code creates html table for codeforces. \nfrom openpyxl import Workbook,load_workbook\nimport requests\nfrom math import log10\nimport csv\nimport textwrap\n\ndef getWinProbability(ra,rb):\n\treturn 1.0 / (1.0 + pow( 10.0, (rb - ra) / 400.0))\n\ndef aggregateRatings(teamRatings):\n\tif(len(teamRatings)==0):\n\t\treturn 0\n\n\tleft = 1\n\tright = 1E4\n\n\tfor tt in range(100):\n\t\tr = (left + right) / 2.0\n\n\t\trWinsProbability = 1.0\n\t\tfor i in range (len(teamRatings)):\n\t\t\trWinsProbability *= getWinProbability(r, teamRatings[i])\n\n\t\trating = log10(1 / (rWinsProbability) - 1) * 400 + r\n\n\t\tif (rating > r):\n\t\t\tleft = r\n\t\telse:\n\t\t\tright = r\n\n\treturn (left + right) / 2.0\n\ncolor=[\n{'title':'Legendary grandmaster','user':'user-legendary','lo':3000,'hi':3999},\n{'title':'International Grandmaster','user':'user-red','lo':2600,'hi':2999},\n{'title':'Grandmaster','user':'user-red','lo':2400,'hi':2599},\n{'title':'International master','user':'user-orange','lo':2300,'hi':2399},\n{'title':'Master','user':'user-orange','lo':2100,'hi':2299},\n{'title':'Candidate master','user':'user-violet','lo':1900,'hi':2099},\n{'title':'Expert','user':'user-blue','lo':1600,'hi':1899},\n{'title':'Specialist','user':'user-cyan','lo':1400,'hi':1599},\n{'title':'Pupil','user':'user-green','lo':1200,'hi':1399},\n{'title':'Newbie','user':'user-gray','lo':0,'hi':1199},\n]\n\ndef getColor(rating):\n\tfor i in range(len(color)):\n\t\tif(rating>=color[i]['lo'] and rating<=color[i]['hi']):\n\t\t\treturn color[i]\n\ndef reduce(text):\n\tntxt=textwrap.wrap(text, 11, break_long_words=True)\n\tfor i in range(len(ntxt)):\n\t\tif ntxt[i][0]=='_':\n\t\t\tntxt[i]='\\\\'+ntxt[i]\n\t\t\t# print(ntxt[i])\n\treturn \" \".join(ntxt)\n\ndef getLink(user):\n\tcol=getColor(user['rate'])\n\tlink=''+reduce(user['han'])+''\n\treturn link\n\ndef getRating(handle):\n\tURL = \"https://codeforces.com/api/user.info\"\n\tPARAMS = {'handles': handle}\n\twhile True:\n\t\ttry:\n\t\t\tres = requests.get(url=URL, params=PARAMS,timeout=10).json()\n\t\t\tif (res['status'] == 'OK'):\n\t\t\t\tif 'maxRating' in res['result'][0]:\n\t\t\t\t\treturn res['result'][0]['maxRating']\n\t\t\t\telse:\n\t\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\treturn 0\n\nprint(\"Enter xlsx file name wihtout extension \") \ninput_file = input()\nwb = load_workbook(input_file + \".xlsx\")\nfor ws in wb : \n\t# ws = wb.active\n\tprint(\"================processinng {}==================\\n\\n\" .format(ws.title))\n\tteamList=[]\n\tindex = 0\n\tfor i in range (2,400):\n\t\tif(not (ws.cell(i, 3).value or ws.cell(i, 4).value or ws.cell(i, 5).value)) : \n\t\t\tcontinue\n\n\t\tprint(\"processing \"+str(i))\n\t\tindex = index + 1\n\t\tmem=[{'han':ws.cell(i,3).value,'rate':0},{'han':ws.cell(i,4).value,'rate':0},{'han':ws.cell(i,5).value,'rate':0}]\n\t\trat=0\n\t\trated=[]\n\t\tfor j in range(3):\n\t\t\tif mem[j]['han']:\n\t\t\t\tprint(mem[j]['han'], end=\" \")\n\t\t\t\tmem[j]['rate']=getRating(mem[j]['han'])\n\t\t\t\tmem[j]['han']=mem[j]['han']\n\t\t\t\trated.append(mem[j]['rate'])\n\t\tmem = sorted(mem, key=lambda k: k['rate'],reverse=True)\n\t\trat=round(aggregateRatings(rated))\n\t\tprint(rat)\n\t\tteam={\n\t\t\t'air': index,\n\t\t\t'name':ws.cell(i,1).value,\n\t\t\t'inst':ws.cell(i,2).value,\n\t\t\t'mem1':mem[0],\n\t\t\t'mem2':mem[1],\n\t\t\t'mem3':mem[2],\n\t\t\t'loc':ws.cell(i,7).value,\n\t\t\t'rat':rat\n\t\t}\n\t\t# print(team)\n\t\tteamList.append(team)\n\n\tteamList = sorted(teamList, key=lambda k: (k['rat'],-k['air']),reverse=True)\n\twith open(ws.title + \"_result.txt\", 'w') as result :\n\t\t## Header \n\t\tresult.write(\"\") \n\n\t\tresult.write(\" \")\n\t\tresult.write(\"\")\n\t\tresult.write(\"\")\n\t\tresult.write(\" \")\n\t\tresult.write(\"\")\n\t\tresult.write(\" \")\n\t\tresult.write(\" \")\n\t\tresult.write(\" \")\n\t\tresult.write(\" \")\n\t\tresult.write(\" \")\n\t\tresult.write(\"\")\n\t\tresult.write(\" \")\n\t\t## \n\n\t\t## body \n\t\tresult.write(\" \")\n\t\t# writer = csv.writer(csvFile)\n\t\t# header=['No.','AIR','Team Name','Institute Name','Member 1','Member 2','Member 3','Rating','Location']\n\t\t# writer.writerow(header)\n\t\tfor i in range(len(teamList)):\n\t\t\t# result.write(teamList[i])\n\t\t\tif teamList[i]['mem1']['rate']>0:\n\t\t\t\tteamList[i]['mem1']['han']=getLink(teamList[i]['mem1'])\n\t\t\tif teamList[i]['mem2']['rate']>0:\n\t\t\t\tteamList[i]['mem2']['han']=getLink(teamList[i]['mem2'])\n\t\t\tif teamList[i]['mem3']['rate']>0:\n\t\t\t\tteamList[i]['mem3']['han']=getLink(teamList[i]['mem3'])\n\t\t\tif teamList[i]['rat']:\n\t\t\t\tteamList[i]['name']=getLink({'han':teamList[i]['name'],'rate':teamList[i]['rat']})\n\t\t\t\tteamList[i]['rat']=getLink({'han':str(teamList[i]['rat']),'rate':teamList[i]['rat']})\n\t\t\trow=[i+1]\n\t\t\tresult.write(\" \")\n\t\t\tresult.write(\" \" .format(i + 1))\n\t\t\tresult.write(\" \" .format(teamList[i]['name']))\n\t\t\tresult.write(\" \" .format(teamList[i]['inst']))\n\t\t\tresult.write(\" \" .format(teamList[i]['mem1']['han']))\n\t\t\tresult.write(\" \" .format(teamList[i]['mem2']['han']))\n\t\t\tresult.write(\" \" .format(teamList[i]['mem3']['han']))\n\t\t\tresult.write(\" \" .format(teamList[i]['rat']))\n\t\t\tresult.write(\" \" .format(teamList[i]['loc']))\n\n\t\t# close table\n\t\tresult.write(\" \")\n\t\tresult.write(\"
No. Team Name Institute Name Member1 Member2 Member3 Rating Location
{} {} {} {} {} {} {} {}
\")\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207616455","text":"import requests\nfrom bs4 import BeautifulSoup\n\nwebsite_url = \"https://www.onlinekhabar.com\"\n\n\ndef onlinekhabar_detail_webscraping(link):\n url = link\n r = requests.get(url)\n html_content = r.content\n soup = BeautifulSoup(html_content, 'html.parser')\n description = soup.find('div', class_='col colspan3 main__read--content ok18-single-post-content-wrap').text\n return description\n\n\ndef onlinekhabar_list_webscraping():\n url = website_url + \"/content/news\"\n r = requests.get(url)\n html_content = r.content\n soup = BeautifulSoup(html_content, 'html.parser')\n articles = soup.find_all('div', class_='relative list__post show_grid--view')\n list_of_scrapped_articles = []\n for article in articles:\n title = article.find('div', class_='item__wrap').find('a').text\n image = article.find('img').get('src')\n source = 'onlinekhabar'\n link = article.find('a').get('href')\n description = onlinekhabar_detail_webscraping(link)\n list_of_scrapped_articles.append([title, image, source, description])\n\n return list_of_scrapped_articles\n\n\n","sub_path":"news/webscrapers/onlinekhabar_webscraping.py","file_name":"onlinekhabar_webscraping.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"592351588","text":"from bs4 import BeautifulSoup\n#import urllib\nimport urllib.request\nimport re\nimport sys\nfrom konlpy.tag import Twitter\nfrom collections import Counter\n# Word2Vec embedding\n#from gensim.models import Word2Vec\n\n#tokenized_contents ='' # cohesion tokenizer 쓴 리뷰 토크나이징 결과\n# 100차원 벡터, 주변 단어는 앞 뒤로 2개, 코퍼스 내 출현 빈도 50번 미만인 단어는 분석에서 제외, 코어수 4, 100번 반복학습\n#embedding_model = Word2Vec(tokenized_contents, size=100, window = 2, min_count=50, workers=4, iter=100, sg=1)\n\n# 결과 파일들: 2 = 결과 약간 필터링, 3 = 단어 분석\nRESULT_FILE_NAME1 = \"result1.txt\"\nRESULT_FILE_NAME2 = \"result2.txt\"\nRESULT_FILE_NAME3 = \"result3.txt\"\n# 파싱 대상 URL: 티몬 슈퍼마트 비비고 육개장(http://www.ticketmonster.co.kr/userReview/732328290/19240042/?page=1#content_start)\nURL= 'https://www.ticketmonster.co.kr/userReview/732328290/19240042?coupon_srl=0#content_start'\n# URL 중간에 리뷰s 페이지가 넘어가는 숫자가 있기 때문에 URL을 분리해 둠\nU_head = \"https://www.ticketmonster.co.kr/userReview/732328290/19240042/?page=\"\nU_tail = \"#content_start\"\n\n# 크롤링 함수\ndef get_text(URL):\n # URL 페이징을 위한 숫자\n URL_NUM = 1\n text =''\n for i in range(1, 10):\n # 몇 페이지를 연속적으로 크롤링 할거니까\n full_url = U_head + str(URL_NUM) + U_tail\n source_code_from_URL = urllib.request.urlopen(full_url)\n soup = BeautifulSoup(source_code_from_URL, 'lxml', from_encoding=\"utf-8\")\n # 리뷰가 들어있는 div에서 crawling\n for item in soup.find_all('div', {\"class\": \"review_ct\"}):\n tmp = item.find_all('div', {\"class\": \"sec\"})\n for x in tmp:\n temp = str(x.find('div', {\"class\": \"review_area\"}).find('div', {\"class\": \"txt_box\"}).find('div', {\"class\": \"txt\"}))\n temp = re.sub(r'
', '', temp)\n temp = re.sub('\\n+\\s*', '', temp)\n temp = re.sub(r'[\\s]*
', '', temp)\n temp = re.sub(r'
', '', temp)\n text += temp+\"\\n------------------------------------------------\\n\"\n # 페이지 한 장 넘기기\n URL_NUM += 1\n return text\n # source_code_from_URL = urllib.request.urlopen(URL)\n # soup = BeautifulSoup(source_code_from_URL, 'lxml', from_encoding=\"utf-8\")\n # text = ''\n # review_result = soup.find('div', {\"class\": \"detail_list_review extend_premium\"})\n # list = review_result.find_all('li')\n # # 프리미엄 리뷰 부분 가져오기\n # for item in soup.find_all('div', {\"class\": \"detail_list_review extend_premium\"}):\n # text = text +str(item.find_all(text=True))\n\n # 쿠팡 리뷰 부분 가져오기 -> 쿠팡은 수집 막아놨나? 안되는데ㅋ\n # for item in soup.find_all('div', {\"class\": \"review_ct\"}):\n # text = text + str(item.find_all(text=True))\n # review_result = soup.find('div', class_='list _premiumReviewListArea', display='hidden')\n # lis = review_result.find_all('li')\n #\n # count = 10\n # for li in lis:\n # page = 1\n # #reple = str(li.find('div',class_='item case_2 _click').find('div', class_='row').find('div', class_='col_content').find('div', class_='inner_content').find('div', class_='review_comment').find('p'))\n # reple = str(li.find('div', class_='item case_2 _click').find('p'))\n # text = text + reple\n\n# 개행문자만 제거하려다가 필요 없는 잡다한 문자 제거\ndef remove_newline_char(newline):\n remove_newline = re.sub('[\\{\\}\\[\\]\\/?.,;:|\\)*~`!^\\-_+<>@\\#$%&\\\\\\=\\(\\'\\\"]','', newline)\n remove_newline = re.sub('[a-zA-Z]','', remove_newline)\n return remove_newline\n\n# 키워드 가져오기: http://yoonpunk.tistory.com/7 블로그 참조함\ndef get_keywords(text, ntags = 50):\n spliter = Twitter()\n nouns = spliter.nouns(text)\n count = Counter(nouns)\n return_list = []\n for n, c in count.most_common(ntags):\n temp = {'tag': n, 'count':c}\n return_list.append(temp)\n return return_list\n\n# 메인 함수\ndef main():\n # 비비고 육개장 리뷰들 텍스트 파일에 저장\n open_result_file = open(RESULT_FILE_NAME2,'w', encoding='UTF-8', newline='')\n result_text = get_text(URL)\n # result_text = remove_newline_char(result_text)\n open_result_file.write(result_text)\n open_result_file.close()\n # 저장한 텍스트 파일에서 자주쓰이는 명사 카운팅\n\n open_text_file = open(RESULT_FILE_NAME2, 'r', encoding='UTF-8', newline='')\n text = open_text_file.read()\n tags = get_keywords(text, 20)\n open_text_file.close()\n open_output_file = open(RESULT_FILE_NAME3, 'w', encoding='UTF-8', newline='')\n for tag in tags:\n noun = tag['tag']\n count = tag['count']\n open_output_file.write('{} {}\\n'.format(noun, count))\n\n open_output_file.close()\n\nif __name__ == '__main__':\n main()","sub_path":"Graduation/venv/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"174348381","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom keystoneauth1 import loading\n\nimport keystonemiddleware.auth_token\nfrom keystonemiddleware.auth_token import _base\n\nauth_token_opts = [\n (_base.AUTHTOKEN_GROUP,\n keystonemiddleware.auth_token._OPTS +\n loading.get_auth_common_conf_options())\n]\n\n__all__ = (\n 'list_opts',\n)\n\n\ndef list_opts():\n \"\"\"Return a list of oslo_config options available in auth_token middleware.\n\n The returned list includes the non-deprecated oslo_config options which may\n be registered at runtime by the project. The purpose of this is to allow\n tools like the Oslo sample config file generator to discover the options\n exposed to users by this middleware.\n\n Deprecated Options should not show up here so as to not be included in\n sample configuration.\n\n Each element of the list is a tuple. The first element is the name of the\n group under which the list of elements in the second element will be\n registered. A group name of None corresponds to the [DEFAULT] group in\n config files.\n\n This function is discoverable via the entry point\n 'keystonemiddleware.auth_token' under the 'oslo.config.opts' namespace.\n\n :returns: a list of (group_name, opts) tuples\n \"\"\"\n auth_token_opts = (keystonemiddleware.auth_token._OPTS +\n loading.get_auth_common_conf_options())\n\n return [(_base.AUTHTOKEN_GROUP, copy.deepcopy(auth_token_opts))]\n","sub_path":"keystonemiddleware/auth_token/_opts.py","file_name":"_opts.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"361243245","text":"# *****************************************************************************\n# * Copyright 2019 Amazon.com, Inc. and its affiliates. All Rights Reserved. *\n# *\n# Licensed under the Amazon Software License (the \"License\"). *\n# You may not use this file except in compliance with the License. *\n# A copy of the License is located at *\n# *\n# http://aws.amazon.com/asl/ *\n# *\n# or in the \"license\" file accompanying this file. This file is distributed *\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either *\n# express or implied. See the License for the specific language governing *\n# permissions and limitations under the License. *\n# *****************************************************************************\nimport argparse\nimport logging\nimport sys\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom evaluation_dataset_factory_service_locator import EvaluationDatasetFactoryServiceLocator\nfrom evaluator_factory_service_locator import EvalutorFactoryServiceLocator\nfrom predictor import Predictor\n\n\nclass PredictEvaluate:\n\n def __call__(self, dataset_factory_name, model_path, query_images_dir, gallery_images_dir=None,\n eval_factory_name=\"EvaluationFactory\"):\n # Construct factories\n evalfactory = EvalutorFactoryServiceLocator().get_factory(eval_factory_name)\n evaluator = evalfactory.get_evaluator()\n datasetfactory = EvaluationDatasetFactoryServiceLocator().get_factory(dataset_factory_name)\n\n query_dataset, gallery_dataset = datasetfactory.get(query_images_dir, gallery_images_dir)\n\n # get query embeddings\n class_person_query, embeddings_query = self._get_predictions(query_dataset, model_path)\n\n # Get gallery embeddings\n class_person_gallery, embeddings_gallery = self._get_predictions(gallery_dataset, model_path)\n\n # Evaluate\n result = evaluator(query_embedding=embeddings_query,\n query_target_class=class_person_query,\n gallery_embedding=embeddings_gallery,\n gallery_target_class=class_person_gallery)\n return result\n\n def _get_predictions(self, dataset, model_path):\n \"\"\"\n Returns predictions for the dataset\n :param dataset: Dataset\n :param model_path: Model path to use for predictions\n :return:\n \"\"\"\n batch_size = min(len(dataset), 32)\n dataloader_query = DataLoader(dataset, batch_size=batch_size, shuffle=False)\n model = Predictor(model_path)\n embeddings = []\n class_person = []\n for person_img, target in dataloader_query:\n embedding = model(person_img)\n embeddings.extend(embedding)\n class_person.extend(target)\n\n embeddings = torch.stack(embeddings)\n class_person = torch.stack(class_person)\n return class_person, embeddings\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--dataset\",\n help=\"The type of dataset\",\n choices=EvaluationDatasetFactoryServiceLocator().factory_names, required=True)\n\n parser.add_argument(\"--modelpath\",\n help=\"The model path\", required=True)\n\n parser.add_argument(\"--queryimagesdir\",\n help=\"The directory path containing query dir\", required=True)\n\n parser.add_argument(\"--galleryimagesdir\",\n help=\"The directory path containing gallery dataset\", default=None)\n\n parser.add_argument(\"--log-level\", help=\"Log level\", default=\"INFO\", choices={\"INFO\", \"WARN\", \"DEBUG\", \"ERROR\"})\n\n args = parser.parse_args()\n\n print(args.__dict__)\n\n # Set up logging\n logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n result = PredictEvaluate()(args.dataset, args.modelpath, args.queryimagesdir, args.galleryimagesdir)\n print(\"Score is {}\".format(result))\n","sub_path":"src/main_predict_evaluate.py","file_name":"main_predict_evaluate.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"135660394","text":"import os, inspect, pickle\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0,parentdir)\nfrom kukaGrasp_pybullet.networks.NeuralNetwork import Network,GraspNetwork\nimport tensorflow as tf\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras import Input\nfrom tensorflow.python.keras.layers import Dense,Conv2D,MaxPooling2D,Add,Flatten,Lambda,Concatenate\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nclass Actor(Network):\n\tdef __init__(self,sess,imageInput,imageFeatureLayer,outputDimension,optimizer='AdadeltaOptimizer',loss='MSE',lr=0.01):\n\t\tself.sess=sess\n\t\tself.imageInput=imageInput\n\t\tself.lr=lr\n\t\tself.buildNetwork(imageFeatureLayer,outputDimension,optimizer,loss)\n\n\tdef buildNetwork(self,imageFeatureLayer,outputDimension,optimizer,loss):\n\t\twith tf.variable_scope(\"actor\"):\n\t\t\thiddenLayer=Dense(32,activation='relu')(imageFeatureLayer)\n\t\t\thiddenLayer=Dense(16,activation='relu')(hiddenLayer)\n\t\t\thiddenLayer=Dense(8,activation='relu')(hiddenLayer)\n\t\t\tactionOutput=Dense(outputDimension[0],activation='tanh',name='action_output')(hiddenLayer)\n\t\t\t# actionOutput=tf.sign(actionOutput)\n\t\tself.output=actionOutput\n\n\t\tself.actionGrad=tf.placeholder(tf.float32,[None,outputDimension[0]])\n\t\tnetwork_params=tf.trainable_variables('actor')\n\t\tself.parameters_gradients=tf.gradients(actionOutput,network_params,-self.actionGrad)\n\t\topti=getattr(tf.train,optimizer)(self.lr)\n\t\tself.optimizer=opti.apply_gradients(zip(self.parameters_gradients,network_params))\n\t\t\n\tdef getAction(self,X):\n\t\t# X should be a np-array of state information, other parameters are not used in this network\n\t\treturn self.sess.run(self.output,feed_dict={\n\t\t\tself.imageInput:X\n\t\t})\n\n\tdef fitModelByGrad(self,state,actionGrad):\n\t\t# print(self.sess.run(self.parameters_gradients,feed_dict={\n\t\t# \tself.imageInput:state,\n\t\t# \tself.actionGrad:actionGrad\n\t\t# }))\n\t\t# print(-actionGrad)\n\t\t# print(actionGrad.shape,state.shape)\n\t\t# input()\n\t\tself.sess.run(self.optimizer,feed_dict={\n\t\t\tself.imageInput:state,\n\t\t\tself.actionGrad:actionGrad\n\t\t})\n\n\nclass Critic(Network):\n\tdef __init__(self,sess,imageInput,imageFeatureLayer,actionDimension,outputDimension,outputScale,lr=0.001,optimizer='Adadelta',loss='MSE'):\n\t\tself.sess=sess\n\t\tself.imageInput=imageInput\n\t\tself.scale=outputScale\n\t\tself.lr=lr\n\t\tself.buildNetwork(imageFeatureLayer,actionDimension,outputDimension,optimizer,loss)\n\n\tdef buildNetwork(self,imageFeatureLayer,actionDimension,outputDimension,optimizer,loss):\n\t\ttf.keras.backend.set_session(self.sess)\n\t\tmid,half=((self.scale[0]+self.scale[1])/2,(self.scale[1]-self.scale[0])/2)\n\t\twith tf.variable_scope(\"critic\"):\n\t\t\tactionInput=Input(shape=actionDimension,name='actionInput')\n\t\t\t# hiddenLayer=Lambda(lambda x: x*256)(actionInput)\n\t\t\t# hiddenLayer=Dense(64,activation='relu')(hiddenLayer)\n\t\t\thiddenLayer=Dense(32,activation='relu')(actionInput)\t\t\t\n\t\t\thiddenLayer=Dense(32,activation='tanh')(hiddenLayer)\n\t\t\thiddenLayer=Add()([hiddenLayer,imageFeatureLayer])\n\t\t\thiddenLayer=Dense(64,activation='relu')(hiddenLayer)\n\t\t\thiddenLayer=Dense(32,activation='relu')(hiddenLayer)\n\t\t\t# hiddenLayer=Dense(32,activation='relu')(hiddenLayer)\n\t\t\t# hiddenLayer=Dense(outputDimension[0],activation='tanh',name='value_output')(hiddenLayer)\n\t\t\t# valueOutput=Lambda(lambda x: x*half+mid)(hiddenLayer)\n\t\t\tvalueOutput=Dense(outputDimension[0],activation='tanh',name='value_output')(hiddenLayer)\n\t\tself.output=valueOutput\n\t\tself.actionInput=actionInput\n\n\t\tself.actionGrad=tf.gradients(valueOutput,self.actionInput)\n\t\tmodel=Model(inputs=[self.imageInput,actionInput],outputs=valueOutput)\n\t\topti=getattr(tf.keras.optimizers,optimizer)(lr=self.lr)\n\t\tself.optimizer=opti\n\t\tmodel.compile(loss=loss,optimizer=opti)\n\t\tself.model=model\n\n\tdef getActionGrad(self,state,action):\n\t\treturn self.sess.run(self.actionGrad,feed_dict={\n\t\t\t\tself.imageInput:state,\n\t\t\t\tself.actionInput:action\n\t\t})\n\nclass ActorCriticDDPG(GraspNetwork):\n\tdef buildNetwork(self,imageDimension,actionDimension,outputDimension,rewardScale,optimizer,loss):\n\t\twith tf.variable_scope(\"imageProcess\"):\n\t\t\timageInput=Input(shape=imageDimension,name='imageInput')\n\t\t\thiddenLayer=Conv2D(16,(3,3),strides=2,activation='relu',padding='same')(imageInput)\n\t\t\thiddenLayer=MaxPooling2D(pool_size=(3,3))(hiddenLayer)\n\t\t\t# for convLayer in range(6):\n\t\t\t# \thiddenLayer=Conv2D(64,(5,5),activation='relu')(hiddenLayer)\n\t\t\t# hiddenLayer=MaxPooling2D(pool_size=(3,3))(hiddenLayer)\n\t\t\t# for convLayer in range(6):\n\t\t\t# \thiddenLayer=Conv2D(64,(3,3),activation='relu',padding='same')(hiddenLayer)\n\t\t\t# hiddenLayer=MaxPooling2D(pool_size=(2,2))(hiddenLayer)\n\t\t\t# for convLayer in range(3):\n\t\t\t# \thiddenLayer=Conv2D(64,(3,3),activation='relu',padding='same')(hiddenLayer)\n\t\t\tself.displayLayer=hiddenLayer\n\t\t\thiddenLayer=Flatten()(hiddenLayer)\n\t\t\timageFeatureLayer=Dense(32,activation='tanh')(hiddenLayer)\n\t\tself.imageFeatureLayer=imageFeatureLayer\n\t\tself.imageInput=imageInput\n\n\t\tactor=Actor(self.sess,imageInput,imageFeatureLayer,actionDimension,optimizer+'Optimizer',loss,self.lr)\n\t\tcritic=Critic(self.sess,imageInput,imageFeatureLayer,actionDimension,outputDimension,rewardScale,self.lr,optimizer,loss)\n\t\tself.actor=actor\n\t\tself.critic=critic\n\t\tself.sess.run(tf.global_variables_initializer())\n\n\tdef getAction(self,X,actionSpace):\n\t\treturn self.actor.getAction(X)\n\n\tdef fitModel(self,X,Y,epochs=1,verbose=0,action=False,critic=True):\n\t\tstates,actions=X[0],X[1]\n\t\t# self.critic.fitModel(X,Y,epochs,verbose)\n\t\tif critic:\n\t\t\tself.critic.fitModel(X,Y,epochs,verbose)\n\t\t\n\t\tif action:\n\t\t\tacts=self.actor.getAction(states)\n\t\t\trewards=self.predict([states,acts])\n\t\t\ttotalR=sum(self.predict([states,acts]))/len(acts)\n\t\t\t# print(\"before train actor\")\n\t\t\t# for i in range(len(states)):\n\t\t\t# \tprint(acts[i])\n\t\t\t# \tprint(rewards[i])\n\t\t\t# \tm=states[i]\n\t\t\t# \tplt.imshow(m)\n\t\t\t# \tplt.show()\n\t\t\t# print(totalR)\n\t\t\t\n\t\t\tfor epoch in range(epochs):\n\t\t\t\t# self.critic.fitModel(X,Y,1,0)\t\t\n\t\t\t\tacts=self.actor.getAction(states)\t\n\t\t\t\tactionGrads=self.critic.getActionGrad(states,acts)[0]\n\t\t\t\t# for i in range(len(states)):\n\t\t\t\t\t# print(acts[i])\n\t\t\t\t\t# print(rewards[i])\n\t\t\t\t\t# m=states[i]\n\t\t\t\t\t# plt.imshow(m)\n\t\t\t\t\t# plt.show()\n\t\t\t\t# print(acts)\n\t\t\t\t# print(actionGrads)\n\t\t\t\tself.actor.fitModelByGrad(states,actionGrads)\n\t\t\t\t# input()\n\t\t\t# print('after train actor')\n\t\t\t# acts=self.actor.getAction(states)\n\t\t\t# totalR=sum(self.predict([states,acts]))/len(acts)\n\t\t\t# for i in range(len(states)):\n\t\t\t# \tm=states[i]\n\t\t\t# \tplt.imshow(m)\n\t\t\t# \tplt.show()\n\t\t\t# \tprint(acts[i])\n\t\t\t# print(totalR)\n\t\t\t# input()\n\n\tdef predict(self,X):\n\t\treturn self.critic.predict(X)\n\n\tdef displayImageFeatures(self,X,layer=None):\n\t\tif layer==None:layer=self.displayLayer\n\t\tval=self.sess.run(layer,feed_dict={\n\t\t\tself.imageInput:X\n\t\t\t})[0]\n\t\tfor i in range(64):\n\t\t\tgraph=val[:,:,i]\n\t\t\tplt.imshow(graph)\n\t\t\tplt.show()\n\n\tdef loadModel(self,filePath):\n\t\tsaver=tf.train.Saver()\n\t\tsaver.restore(self.sess,filePath)\n\t\t# self.critic.optimizer=getattr(tf.keras.optimizers,'Adadelta')(lr=1.0)\n\t\t# self.critic.model.compile(loss='MSE',optimizer=self.critic.optimizer)\n\t\t# self.sess.run(tf.variables_initializer(self.critic.optimizer.variables()))\n\n\nif __name__=='__main__':\n\tsess=tf.Session()\n\tddpg=ActorCriticDDPG(sess,(472,472,3),(4,),(1,))\n\tprint(\"success\")","sub_path":"networks/toyDDPG.py","file_name":"toyDDPG.py","file_ext":"py","file_size_in_byte":7333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"220394701","text":"\nimport acm\nimport FUxUtils\nimport FUxCore\n\nshiftDateFunction = acm.GetFunction(\"shiftVolatilityStructureDate\", 2)\n\ndef OnSelectTimeBuckets(self, cd ):\n timeBuckets = acm.UX().Dialogs().SelectTimeBuckets(self.m_fuxDlg.Shell(), self.m_storedTimeBuckets)\n if timeBuckets:\n self.m_storedTimeBuckets = timeBuckets\n self.m_timeBuckets = timeBuckets.TimeBuckets()\n self.UpdateControls()\n\nclass vegaunderlyingMaturityDialog (FUxCore.LayoutDialog):\n def __init__(self):\n self.m_bindings = None\n self.m_initialData = None\n self.m_shiftAllStructures = None\n self.m_volatilityStructures = None\n self.m_storedTimeBuckets = None\n self.m_timeBuckets = None\n self.m_timeBucketsEdit = None\n self.m_timeBucketsBtn = None\n self.m_okBtn = None\n\n def HandleApply( self ):\n if not self.m_bindings.Validate(True):\n return None\n dictResult = self.m_bindings.GetValuesByName()\n dictResult.AtPut('timeBuckets', self.m_timeBuckets )\n return dictResult\n\n def ServerUpdate(self, sender, aspectSymbol, parameter):\n self.UpdateControls()\n\n def UpdateControls(self):\n if self.m_timeBuckets:\n self.m_timeBucketsEdit.SetData(self.m_timeBuckets.StringKey() )\n else:\n self.m_timeBucketsEdit.SetData('')\n shiftAllStructures = self.m_bindings.GetValuesByName().At('shiftAllStructures')\n self.m_volatilityStructures.Enabled(not shiftAllStructures)\n ok = False\n if self.m_bindings.Validate(False):\n vol = self.m_bindings.GetValuesByName().At('volatilityStructure')\n if (self.m_timeBuckets != None) and ((vol != None) or shiftAllStructures): \n ok = True\n self.m_okBtn.Editable( ok )\n\n def HandleCreate( self, dlg, layout):\n self.m_fuxDlg = dlg\n self.m_timeBucketsEdit = layout.GetControl('timeBuckets')\n self.m_timeBucketsBtn = layout.GetControl('timeBucketsBtn')\n self.m_fuxDlg.Caption('Vega Buckets' )\n self.m_okBtn = layout.GetControl('ok')\n self.m_bindings.AddLayout(layout)\n self.m_timeBucketsEdit.Editable(False)\n self.m_timeBucketsBtn.AddCallback('Activate', OnSelectTimeBuckets, self)\n self.m_shiftAllStructures.SetValue(True)\n if self.m_initialData :\n self.m_bindings.SetValuesByName(self.m_initialData)\n self.m_timeBuckets = self.m_initialData.At('timeBuckets')\n self.UpdateControls()\n\n def InitControls(self):\n self.m_bindings = acm.FUxDataBindings()\n self.m_bindings.AddDependent(self)\n self.m_shiftAllStructures = self.m_bindings.AddBinder('shiftAllStructures', acm.GetDomain('bool'), None)\n self.m_volatilityStructures = self.m_bindings.AddBinder('volatilityStructure', acm.GetDomain('FVolatilityStructure'), None)\n\n def CreateLayout(self):\n b = acm.FUxLayoutBuilder()\n b.BeginVertBox('None')\n b. BeginVertBox('Invisible')\n self.m_shiftAllStructures.BuildLayoutPart(b, 'Shift All Volatility Structures')\n self.m_volatilityStructures.BuildLayoutPart(b, 'Volatility Structure')\n b. BeginHorzBox('None')\n b. AddInput('timeBuckets', 'Time Buckets' )\n b. AddButton('timeBucketsBtn', '...', False, True )\n b. EndBox()\n b. EndBox()\n b. BeginHorzBox('None')\n b. AddSpace(50)\n b. AddFill()\n b. AddButton('ok', 'OK')\n b. AddButton('cancel', 'Cancel')\n b. EndBox()\n b.EndBox()\n return b\n\ndef ael_custom_dialog_show(shell, parameters):\n vegaUMDlg = vegaunderlyingMaturityDialog()\n initData = FUxUtils.UnpackInitialData(parameters)\n dialogData = None\n if initData:\n dialogData = initData.At('dialogData')\n vegaUMDlg.InitControls()\n vegaUMDlg.m_initialData = dialogData\n\n dialogData = acm.UX().Dialogs().ShowCustomDialogModal(shell, vegaUMDlg.CreateLayout(), vegaUMDlg)\n if dialogData:\n resultDict = acm.FDictionary()\n resultDict.AtPut('dialogData', dialogData)\n return resultDict \n return None\n\ndef ael_custom_dialog_main(parameters, dictExtra):\n dialogData = parameters.At('dialogData')\n shiftAllStructures = dialogData.At('shiftAllStructures')\n shiftFilter = acm.FObject\n if not shiftAllStructures:\n shiftToDate = 0\n if acm.IsHistoricalMode():\n shiftToDate = acm.Time().DateToday()\n shiftFilter = shiftDateFunction(\n dialogData.At('volatilityStructure'),\n shiftToDate)\n resultVector = []\n for timeBucket in dialogData.At('timeBuckets'):\n np = acm.FNamedParameters()\n np.Name(timeBucket.Name())\n np.UniqueTag(timeBucket.Spec())\n np.AddParameter('timeBucket', timeBucket)\n np.AddParameter('shiftFilter', shiftFilter)\n resultVector.append(np)\n return resultVector\n","sub_path":"Extensions/Core Risk/FPythonCode/vegaDynamicUnderlyingMaturityBuckets.py","file_name":"vegaDynamicUnderlyingMaturityBuckets.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"429067996","text":"import os, sys\nimport copy\n\nclass Parser:\n empty_line_d = {\n 'filename': None,\n 'fn_name': None,\n 'line_number': None,\n 'time': None,\n 'lines': [],\n 'attrs': {},\n 'calls': [],\n }\n\n empty_call_d = {\n 'filename': None,\n 'fn_name': None,\n 'times_called': 0,\n 'time': None,\n }\n\n def __init__(self):\n self.filename_trans = {}\n self.fn_name_trans = {}\n\n self.global_attrs = {}\n\n self.lines_to_build = []\n self.cur_line_d = None\n\n def get_cur_line_d(self, force_new=False):\n if self.cur_line_d is None:\n self.cur_line_d = copy.deepcopy(self.empty_line_d)\n elif force_new:\n print(self.cur_line_d)\n self.cur_line_d = copy.deepcopy(self.empty_line_d)\n\n return self.cur_line_d\n\n def parse_line_attr(self, line):\n k,v = line.split(':', 2)\n if self.cur_line_d is None:\n self.global_attrs[k] = v.strip()\n else:\n self.cur_line_d['attrs'][k] = v.strip()\n\n def de_trans(self, line, trans):\n post = line.split('=', 2)[1]\n trans_no = None\n rest = None\n\n if post[0] == '(':\n trans_no, rest = post[1:].split(')', 2)\n else:\n rest = post\n\n care_name = None\n if trans_no.strip() and not rest.strip():\n care_name = trans.get(trans_no.strip())\n elif trans_no.strip() and rest.strip():\n care_name = rest.strip()\n trans[trans_no.strip()] = care_name\n elif rest.strip():\n care_name = rest.strip()\n\n return care_name\n\n def parse_line_filename(self, line):\n fn_name = self.de_trans(line, self.filename_trans)\n\n line_d = self.get_cur_line_d()\n line_d['filename'] = fn_name\n\n def parse_line_fn_call(self, line):\n fn_name = self.de_trans(line, self.fn_name_trans)\n\n line_d = self.get_cur_line_d()\n line_d['fn_name'] = fn_name\n\n def parse_line_number(self, line):\n line_no, time = line.split(' ')\n\n line_d = self.get_cur_line_d()\n line_d['line_number'] = line_no\n line_d['time'] = time\n\n def parse_line_empty(self, line):\n if self.cur_line_d is None:\n return\n\n print(self.cur_line_d)\n print('\\n'.join(self.lines_to_build))\n self.cur_line_d = copy.deepcopy(self.empty_line_d)\n self.lines_to_build = []\n\n def parse_line(self, line):\n if ':' in line and '::' not in line:\n self.parse_line_attr(line)\n else:\n self.lines_to_build.append(line)\n if 'fn=' in line:\n self.parse_line_filename(line)\n elif 'fl=' in line:\n self.parse_line_fn_call(line)\n elif not line.strip():\n self.parse_line_empty(line)\n elif all(x.isdigit() for x in line.split(' ')):\n self.parse_line_number(line)\n\n def parse_file(self, filename):\n with open(filename) as fd:\n for i, line in enumerate(fd):\n self.parse_line(line)\n if i >= 1000: break\n\n print('\\n'.join(self.lines_to_build))\n print(self.cur_line_d)\n print(self.global_attrs)\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n p = Parser()\n p.parse_file(sys.argv[1])\n else:\n print('Usage:')\n print(' python {0} '.format(sys.argv[0]))\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"71700404","text":"from behave import *\nfrom mock import *\nfrom start import Feinstrubbot\n\nclass AnyStringWith(str):\n def __eq__(self, other):\n return self in other\n\ndef prepare():\n # prepare all the stuff\n bot = Mock\n bot.sendMessage = MagicMock()\n scheduler = Mock()\n returnValue = MagicMock()\n returnValue.inserted_id = 1234\n users = Mock\n users.insert_one = MagicMock(return_value=returnValue)\n gmaps = Mock()\n gmaps.geocode = MagicMock(return_value=[{'geometry': {'location': {'lat': 48.77363949999999, 'lng': 9.17069},\n 'viewport': {'southwest': {'lat': 48.7722905197085,\n 'lng': 9.169341019708497},\n 'northeast': {'lat': 48.7749884802915,\n 'lng': 9.172038980291502}},\n 'location_type': 'ROOFTOP'}, 'partial_match': True,\n 'types': ['street_address'],\n 'place_id': 'ChIJg0Yy5kjbmUcR4QkTPqlizqA',\n 'address_components': [{'long_name': '41/1', 'types': ['street_number'],\n 'short_name': '41/1'},\n {'long_name': 'Rotebühlplatz', 'types': ['route'],\n 'short_name': 'Rotebühlpl.'},\n {'long_name': 'Stuttgart-Mitte',\n 'types': ['political', 'sublocality',\n 'sublocality_level_1'],\n 'short_name': 'Stuttgart-Mitte'},\n {'long_name': 'Stuttgart',\n 'types': ['locality', 'political'],\n 'short_name': 'Stuttgart'},\n {'long_name': 'Stuttgart',\n 'types': ['administrative_area_level_2',\n 'political'], 'short_name': 'Süd'},\n {'long_name': 'Baden-Württemberg',\n 'types': ['administrative_area_level_1',\n 'political'], 'short_name': 'BW'},\n {'long_name': 'Germany',\n 'types': ['country', 'political'],\n 'short_name': 'DE'},\n {'long_name': '70178', 'types': ['postal_code'],\n 'short_name': '70178'}],\n 'formatted_address': 'Rotebühlpl. 41/1, 70178 Stuttgart, Germany'}])\n return Feinstrubbot(users=users, bot=bot, gmaps=gmaps, scheduler=scheduler)\n\n@given('a working telegram account')\ndef step_impl(context):\n context.feinstaub = prepare()\n context.feinstaub.userExists = MagicMock(return_value=False)\n\n@when('the user sends the registration string to the bot')\ndef step_impl(context):\n bot = context.feinstaub.bot\n registrationMessage = Mock()\n registrationMessage.message.from_user.id = 1234\n registrationMessage.message.from_user.first_name = \"TestUser\"\n registrationMessage.message.text = \"/registration 71067 Maichingen\"\n context.feinstaub.registration(bot, registrationMessage)\n\n@then('the user is notified about the registration result')\ndef step_impl(context):\n return context.feinstaub.users.insert_one.assert_called_once()\n\n\n@given('a registration fails')\ndef step_impl(context):\n context.feinstaub = prepare()\n context.feinstaub.userExists = MagicMock(return_value=True)\n\n@when('the user name is already in use')\ndef step_impl(context):\n bot = context.feinstaub.bot\n registrationMessage = Mock()\n registrationMessage.message.from_user.id = 1234\n registrationMessage.message.chat_id = 1337\n registrationMessage.message.from_user.first_name = \"TestUser\"\n registrationMessage.message.text = \"/registration 71067 Maichingen\"\n context.feinstaub.registration(bot, registrationMessage)\n\n@then('the system should report that to the user')\ndef step_impl(context):\n return context.feinstaub.bot.sendMessage.assert_called_with(chat_id=1337, text=AnyStringWith(\"User already registrated\"))\n","sub_path":"DockerEnvironment/pythonBuild/features/steps/steps_signUp.py","file_name":"steps_signUp.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"397842740","text":"import os, sys, shutil\nfrom PIL import Image\n\nvalid_extensions = [\n '.jpg',\n '.jpeg',\n '.png',\n '.gif'\n]\n\n\ndef get_digit(message):\n try:\n digit = int(input(message))\n except:\n print('You must insert a valid number!\\n')\n digit = get_digit(message)\n\n return abs(digit)\n\n\ndef valid_images_list(folder_items):\n list = []\n for item in folder_items:\n filename, file_extension = os.path.splitext(item)\n for extension in valid_extensions:\n if file_extension.lower() == extension:\n list.append(item)\n\n return list\n\n\ndef resize_images(size, quality):\n new_folder = 'resized/'\n files_in_directory = [f for f in os.listdir('.') if os.path.isfile(f)]\n images_list = valid_images_list(files_in_directory)\n\n if os.path.exists(new_folder): shutil.rmtree(new_folder)\n os.makedirs(new_folder)\n\n if not 0 < quality <= 100:\n print('Quality set to 100!\\n')\n quality = 100\n\n for image in images_list:\n new_image = new_folder + image\n try:\n im = Image.open(image)\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(new_image, quality=quality)\n print('Resized ' + image + '.')\n except IOError:\n print('Cannot resize ', image)\n\n print('Done! Resized ' + str(len(images_list)) + ' images.')\n\n\n# Get user input\nwidth = get_digit('Width: ')\nheight = get_digit('Height: ')\nquality = get_digit('Quality: ')\nnew_size = (height, width)\n\nresize_images(new_size, quality)\n","sub_path":"python/img-resizer.py","file_name":"img-resizer.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"165764881","text":"\"\"\"\nhttps://leetcode.com/problems/minimum-size-subarray-sum/\nhttps://leetcode.com/problems/shortest-subarray-with-sum-at-least-k/\n 2 3 1 2 4 3\n 0 2 5 6 8 12 15\n\n 1 4 4\n 0 1 5 9\n\"\"\"\n\n\nimport bisect\nclass Solution:\n\n def getLeftGreat(self,prefixSums,x):\n lt = []\n rt = []\n for i in range(len(prefixSums)):\n while lt and prefixSums[i] >= lt[-1]:\n lt.pop()\n if lt:\n rt = lt[-1]\n else:\n rt.append(None)\n lt.append(prefixSums[i])\n return rt\n \n\n def shortestSubarray(self, nums, k) -> int:\n sm = 0\n prefixSums = []\n minLen = 2**31-1\n for i in range(len(nums)):\n prefixSums.append(sm)\n\n \n if minLen == 2**31-1:\n minLen = -1\n return minLen\n\ns = Solution()\ntarget = 7\n# nums = [2,3,1,2,4,3]\n# nums = [1,4,4]\n# target = 4\n# target = 11\n# nums = [1,1,1,1,1,1,1,1]\nnums = [2,-1,2]\nk = 3\n# nums = [48,99,37,4,-31]\n# k = 140\nprint(s.shortestSubarray(nums,k))","sub_path":"Array/max-size-sub-array-sum-bounded.py","file_name":"max-size-sub-array-sum-bounded.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"6791263","text":"\r\nfrom Models.Configuration import Configuration\r\nfrom Models.DataAccessLayer.Repositories.db.RepositoryBase import RepositoryBase\r\n\r\n\r\nclass TagValueRepository(RepositoryBase):\r\n\r\n config = Configuration()\r\n config.load()\r\n\r\n def create(self, entity):\r\n cursor = self.connection.cursor()\r\n\r\n query = \"INSERT INTO {0} (tag_id, value, value_quality, value_timestamp) VALUES ({1}, {2}, {3}, '{4}')\" \\\r\n .format(self.config.table_name, entity.tag_id, entity.value,\r\n entity.quality, entity.timestamp.strftime(\"%d-%m-%Y %H:%M:%S\"))\r\n\r\n cursor.execute(query)\r\n\r\n self.connection.commit()\r\n cursor.close()\r\n\r\n def update(self, entity):\r\n cursor = self.connection.cursor()\r\n cursor.execute(\"UPDATE {0} \"\r\n \"SET value={1}, value_quality={2} \"\r\n \"WHERE tag_id={3} and value_timestamp='{4}';\"\r\n .format(self.config.table_name, entity.value, entity.quality, entity.tag_id,\r\n entity.timestamp.strftime(\"%d-%m-%Y %H:%M:%S\"), ))\r\n\r\n self.connection.commit()\r\n cursor.close()\r\n\r\n def create_table(self):\r\n cursor = self.connection.cursor()\r\n cursor.execute(\r\n \"CREATE TABLE if not exists {0} (value_id BIGSERIAL, tag_id bigint not null,\"\r\n \"value text, value_quality text, value_timestamp timestamp without time zone,\"\r\n \"PRIMARY KEY (value_id, tag_id),\"\r\n \"FOREIGN KEY (tag_id) REFERENCES tags (tag_id))\"\r\n .format(self.config.table_name))\r\n self.connection.commit()\r\n\r\n try:\r\n cursor.execute(\r\n \"CREATE INDEX fki_{0}_tag_id_fkey \\\r\n ON public.\\\"{0}\\\" USING btree \\\r\n (tag_id) \\\r\n TABLESPACE pg_default;\"\r\n .format(self.config.table_name))\r\n self.connection.commit()\r\n\r\n except Exception:\r\n cursor.execute(\"ROLLBACK\")\r\n self.connection.commit()\r\n\r\n try:\r\n cursor.execute(\r\n \"CREATE INDEX {0}_value_timestamp_index \\\r\n ON public.\\\"{0}\\\" USING btree \\\r\n (value_timestamp) \\\r\n TABLESPACE pg_default;\"\r\n .format(self.config.table_name))\r\n self.connection.commit()\r\n\r\n except Exception:\r\n cursor.execute(\"ROLLBACK\")\r\n self.connection.commit()\r\n\r\n cursor.close()\r\n\r\n def _entity_is_exist(self, tag_value):\r\n query = \"SELECT value_id FROM {0} \" \\\r\n \"WHERE tag_id={1} and value_timestamp='{2}';\" \\\r\n .format(self.config.table_name, tag_value.tag_id,\r\n tag_value.timestamp.strftime(\"%d-%m-%Y %H:%M:%S\"))\r\n\r\n cursor = self.connection.cursor()\r\n cursor.execute(query)\r\n\r\n # retrieve the records from the database\r\n records = cursor.fetchall()\r\n cursor.close()\r\n\r\n for _ in records:\r\n return True\r\n\r\n return False\r\n","sub_path":"Model/DataAccessLayer/Repositories/db/TagValueRepository.py","file_name":"TagValueRepository.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"869369","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\nfrom snacks.models import Snack\n\n# Create your tests here.\n\nclass SnackTests(TestCase):\n \n def test_snack_page(self):\n url = reverse('snack_list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n \n def test_templates(self):\n url = reverse('snack_list')\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'snack_list.html')\n self.assertTemplateUsed(response, 'base.html')\n\n def test_snack_string_representation(self):\n snack = Snack.objects.create(\n name = \"Dried Mango\",\n purchaser = get_user_model().objects.create_user(\n username=\"tester\",\n email=\"tester@email.com\",\n password=\"pass\"),\n description=\"Tastes like candy\")\n self.assertEqual(str(snack), \"Dried Mango\")\n","sub_path":"snacks/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"448438876","text":"#!/usr/bin/env python3\n\nimport asyncio\n\nimport pytest\nfrom fixtures import *\n\nfrom aiospamc import Client\nfrom aiospamc.exceptions import BadResponse, SPAMDConnectionRefused\nfrom aiospamc.headers import ContentLength, MessageClass, Remove, Set\nfrom aiospamc.options import MessageClassOption, RemoveOption, SetOption\nfrom aiospamc.responses import Response\nfrom aiospamc.requests import Request\n\n\n@pytest.mark.asyncio\nasync def test_tell_connection_refused(event_loop, unused_tcp_port, spam):\n client = Client('localhost', unused_tcp_port, loop=event_loop)\n with pytest.raises(SPAMDConnectionRefused):\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\n@pytest.mark.parametrized('test_input,expected', [\n (RemoveOption(local=True, remote=False), 'Remove: local\\r\\n'),\n (RemoveOption(local=False, remote=True), 'Remove: remote\\r\\n'),\n (RemoveOption(local=True, remote=True), 'Remove: local, remote\\r\\n'),\n (SetOption(local=True, remote=False), 'Set: local\\r\\n'),\n (SetOption(local=False, remote=True), 'Set: remote\\r\\n'),\n (SetOption(local=True, remote=True), 'Set: local, remote\\r\\n'),\n])\nasync def test_tell_valid_request(reader, writer, test_input, expected, spam):\n client = Client()\n response = await client.tell(MessageClassOption.spam,\n spam,\n test_input)\n\n args = writer.writer.call_args\n assert expected in args[0][0]\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\nasync def test_tell_verb_at_start(reader, writer, spam):\n client = Client()\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n args = writer.write.call_args\n assert args[0][0].startswith(b'TELL')\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\n@pytest.mark.parametrize('test_input,expected', [\n (RemoveOption(local=True, remote=False), (b'TELL',\n spam().encode(),\n bytes(MessageClass(MessageClassOption.spam)),\n bytes(Remove(RemoveOption(local=True, remote=False))))),\n (SetOption(local=True, remote=False), (b'TELL',\n spam().encode(),\n bytes(MessageClass(MessageClassOption.spam)),\n bytes(Set(SetOption(local=True, remote=False)))))\n])\nasync def test_tell_request_call(reader, writer, test_input, expected, spam):\n client = Client()\n response = await client.tell(MessageClassOption.spam,\n spam,\n test_input)\n\n args = writer.write.call_args\n\n assert all([phrase in args[0][0] for phrase in expected])\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\nasync def test_tell_valid_response(spam):\n client = Client()\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n assert isinstance(response, Response)\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\n@pytest.mark.responses(response_invalid())\nasync def test_tell_invalid_response(spam):\n client = Client()\n with pytest.raises(BadResponse):\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\nasync def test_tell_valid_request(reader, writer, spam):\n client = Client()\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n args = writer.write.call_args[0][0].decode()\n # We can't guarantee the order of the headers, so we have to break things up\n assert args.startswith('TELL SPAMC/1.5\\r\\n')\n assert 'Set: local, remote\\r\\n' in args\n assert 'Message-class: spam\\r\\n' in args\n assert 'Content-length: {}\\r\\n'.format(len(spam.encode())) in args\n assert args.endswith(spam)\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\nasync def test_tell_compress_header_request(reader, writer, spam):\n client = Client(compress=True)\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n args = writer.write.call_args\n assert b'Compress:' in args[0][0]\n\n@pytest.mark.asyncio\n@pytest.mark.usefixtures('mock_stream')\nasync def test_tell_user_header_request(reader, writer, spam):\n client = Client(user='TestUser')\n response = await client.tell(MessageClassOption.spam,\n spam,\n SetOption(local=True, remote=True))\n\n args = writer.write.call_args\n assert b'User: TestUser' in args[0][0]\n","sub_path":"tests/client/test_tell.py","file_name":"test_tell.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"455584025","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# #########################################################################\n# Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2018. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\nimport os, sys\nimport time\nimport numpy\nimport scipy.ndimage.filters as filters\nimport scipy.ndimage.interpolation as interpolation\nimport scipy.ndimage.fourier as fourier\n\nfrom PyQt5.QtWidgets import QMessageBox, QFileDialog, QInputDialog\nfrom PyQt5.QtGui import QTextCursor\n\nfrom orangewidget import gui\nfrom orangewidget.settings import Setting\nfrom oasys.widgets import gui as oasysgui\nfrom oasys.widgets import congruence\nfrom oasys.widgets.gui import ConfirmDialog\n\nfrom oasys.util.oasys_util import EmittingStream\n\nfrom orangecontrib.shadow.util.shadow_objects import ShadowBeam\nfrom orangecontrib.shadow.util.shadow_util import ShadowCongruence, ShadowPlot\nfrom orangecontrib.shadow.widgets.gui.ow_automatic_element import AutomaticElement\nfrom orangecontrib.aps.shadow.util.gui import PowerPlotXYWidget\n\nclass PowerPlotXY(AutomaticElement):\n\n name = \"Power Plot XY\"\n description = \"Display Data Tools: Power Plot XY\"\n icon = \"icons/plot_xy_power.png\"\n maintainer = \"Luca Rebuffi\"\n maintainer_email = \"lrebuffi(@at@)anl.gov\"\n priority = 5.1\n category = \"Display Data Tools\"\n keywords = [\"data\", \"file\", \"load\", \"read\"]\n\n inputs = [(\"Input Beam\", ShadowBeam, \"setBeam\")]\n\n IMAGE_WIDTH = 878\n IMAGE_HEIGHT = 570\n\n want_main_area=1\n plot_canvas=None\n input_beam=None\n\n image_plane=Setting(0)\n image_plane_new_position=Setting(10.0)\n image_plane_rel_abs_position=Setting(0)\n\n x_column_index=Setting(0)\n y_column_index=Setting(2)\n\n x_range=Setting(0)\n x_range_min=Setting(0.0)\n x_range_max=Setting(0.0)\n\n y_range=Setting(0)\n y_range_min=Setting(0.0)\n y_range_max=Setting(0.0)\n\n rays=Setting(1)\n number_of_bins=Setting(100)\n\n title=Setting(\"X,Z\")\n\n keep_result=Setting(1)\n autosave_partial_results = Setting(0)\n\n autosave = Setting(0)\n autosave_file_name = Setting(\"autosave_power_density.hdf5\")\n\n kind_of_calculation = Setting(0)\n replace_poor_statistic = Setting(0)\n good_rays_limit = Setting(100)\n center_x = Setting(0.0)\n center_y = Setting(0.0)\n sigma_x = Setting(0.0)\n sigma_y = Setting(0.0)\n gamma = Setting(0.0)\n\n\n loaded_plot_file_name = \"\"\n\n new_nbins_h = Setting(25)\n new_nbins_v = Setting(25)\n\n filter = Setting(3)\n filter_sigma_h = Setting(1.0)\n filter_sigma_v = Setting(1.0)\n filter_mode = Setting(0)\n filter_cval = Setting(0.0)\n filter_spline_order = Setting(2)\n masking_level = Setting(1e-3)\n\n cumulated_ticket=None\n plotted_ticket = None\n energy_min = None\n energy_max = None\n energy_step = None\n total_power = None\n cumulated_total_power = None\n\n plotted_ticket_original = None\n\n view_type=Setting(1)\n\n autosave_file = None\n\n def __init__(self):\n super().__init__(show_automatic_box=False)\n\n button_box = oasysgui.widgetBox(self.controlArea, \"\", addSpace=False, orientation=\"horizontal\")\n\n gui.button(button_box, self, \"Plot Data\", callback=self.plot_cumulated_data, height=45)\n gui.button(button_box, self, \"Save Plot\", callback=self.save_cumulated_data, height=45)\n\n gui.separator(self.controlArea, 10)\n\n self.tabs_setting = oasysgui.tabWidget(self.controlArea)\n self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)\n\n # graph tab\n tab_set = oasysgui.createTabPage(self.tabs_setting, \"Plot Settings\")\n tab_gen = oasysgui.createTabPage(self.tabs_setting, \"Histogram Settings\")\n tab_post = oasysgui.createTabPage(self.tabs_setting, \"Post Processing\")\n\n screen_box = oasysgui.widgetBox(tab_set, \"Screen Position Settings\", addSpace=True, orientation=\"vertical\", height=120)\n\n self.image_plane_combo = gui.comboBox(screen_box, self, \"image_plane\", label=\"Position of the Image\",\n items=[\"On Image Plane\", \"Retraced\"], labelWidth=260,\n callback=self.set_ImagePlane, sendSelectedValue=False, orientation=\"horizontal\")\n\n self.image_plane_box = oasysgui.widgetBox(screen_box, \"\", addSpace=False, orientation=\"vertical\", height=50)\n self.image_plane_box_empty = oasysgui.widgetBox(screen_box, \"\", addSpace=False, orientation=\"vertical\", height=50)\n\n oasysgui.lineEdit(self.image_plane_box, self, \"image_plane_new_position\", \"Image Plane new Position\", labelWidth=220, valueType=float, orientation=\"horizontal\")\n\n gui.comboBox(self.image_plane_box, self, \"image_plane_rel_abs_position\", label=\"Position Type\", labelWidth=250,\n items=[\"Absolute\", \"Relative\"], sendSelectedValue=False, orientation=\"horizontal\")\n\n self.set_ImagePlane()\n\n general_box = oasysgui.widgetBox(tab_set, \"Variables Settings\", addSpace=True, orientation=\"vertical\", height=350)\n\n self.x_column = gui.comboBox(general_box, self, \"x_column_index\", label=\"X Column\",labelWidth=70,\n items=[\"1: X\",\n \"2: Y\",\n \"3: Z\",\n ],\n sendSelectedValue=False, orientation=\"horizontal\")\n\n gui.comboBox(general_box, self, \"x_range\", label=\"X Range\", labelWidth=250,\n items=[\"\",\n \"Set..\"],\n callback=self.set_XRange, sendSelectedValue=False, orientation=\"horizontal\")\n\n self.xrange_box = oasysgui.widgetBox(general_box, \"\", addSpace=True, orientation=\"vertical\", height=100)\n self.xrange_box_empty = oasysgui.widgetBox(general_box, \"\", addSpace=True, orientation=\"vertical\", height=100)\n\n oasysgui.lineEdit(self.xrange_box, self, \"x_range_min\", \"X min\", labelWidth=220, valueType=float, orientation=\"horizontal\")\n oasysgui.lineEdit(self.xrange_box, self, \"x_range_max\", \"X max\", labelWidth=220, valueType=float, orientation=\"horizontal\")\n\n self.set_XRange()\n\n self.y_column = gui.comboBox(general_box, self, \"y_column_index\", label=\"Y Column\",labelWidth=70,\n items=[\"1: X\",\n \"2: Y\",\n \"3: Z\",\n ],\n\n sendSelectedValue=False, orientation=\"horizontal\")\n\n gui.comboBox(general_box, self, \"y_range\", label=\"Y Range\",labelWidth=250,\n items=[\"\",\n \"Set..\"],\n callback=self.set_YRange, sendSelectedValue=False, orientation=\"horizontal\")\n\n self.yrange_box = oasysgui.widgetBox(general_box, \"\", addSpace=True, orientation=\"vertical\", height=100)\n self.yrange_box_empty = oasysgui.widgetBox(general_box, \"\", addSpace=True, orientation=\"vertical\", height=100)\n\n oasysgui.lineEdit(self.yrange_box, self, \"y_range_min\", \"Y min\", labelWidth=220, valueType=float, orientation=\"horizontal\")\n oasysgui.lineEdit(self.yrange_box, self, \"y_range_max\", \"Y max\", labelWidth=220, valueType=float, orientation=\"horizontal\")\n\n self.set_YRange()\n\n self.cb_rays = gui.comboBox(general_box, self, \"rays\", label=\"Power\", labelWidth=250,\n items=[\"Transmitted\", \"Absorbed (Lost)\", \"Absorbed (Still Good)\"],\n sendSelectedValue=False, orientation=\"horizontal\")\n\n autosave_box = oasysgui.widgetBox(tab_gen, \"Autosave\", addSpace=True, orientation=\"vertical\", height=85)\n\n gui.comboBox(autosave_box, self, \"autosave\", label=\"Save automatically plot into file\", labelWidth=250,\n items=[\"No\", \"Yes\"],\n sendSelectedValue=False, orientation=\"horizontal\", callback=self.set_autosave)\n\n self.autosave_box_1 = oasysgui.widgetBox(autosave_box, \"\", addSpace=False, orientation=\"horizontal\", height=25)\n self.autosave_box_2 = oasysgui.widgetBox(autosave_box, \"\", addSpace=False, orientation=\"horizontal\", height=25)\n\n self.le_autosave_file_name = oasysgui.lineEdit(self.autosave_box_1, self, \"autosave_file_name\", \"File Name\", labelWidth=100, valueType=str, orientation=\"horizontal\")\n\n gui.button(self.autosave_box_1, self, \"...\", callback=self.selectAutosaveFile)\n\n incremental_box = oasysgui.widgetBox(tab_gen, \"Incremental Result\", addSpace=True, orientation=\"vertical\", height=120)\n\n gui.comboBox(incremental_box, self, \"keep_result\", label=\"Keep Result\", labelWidth=250,\n items=[\"No\", \"Yes\"], sendSelectedValue=False, orientation=\"horizontal\", callback=self.set_autosave)\n\n self.cb_autosave_partial_results = gui.comboBox(incremental_box, self, \"autosave_partial_results\", label=\"Save partial plots into file\", labelWidth=250,\n items=[\"No\", \"Yes\"], sendSelectedValue=False, orientation=\"horizontal\")\n\n gui.button(incremental_box, self, \"Clear\", callback=self.clearResults)\n\n self.set_autosave()\n\n histograms_box = oasysgui.widgetBox(tab_gen, \"Histograms settings\", addSpace=True, orientation=\"vertical\", height=270)\n\n oasysgui.lineEdit(histograms_box, self, \"number_of_bins\", \"Number of Bins\", labelWidth=250, valueType=int, orientation=\"horizontal\")\n\n gui.separator(histograms_box)\n\n gui.comboBox(histograms_box, self, \"kind_of_calculation\", label=\"Kind of Calculation\", labelWidth=200,\n items=[\"From Rays\", \"Flat Distribution\", \"Gaussian Distribution\", \"Lorentzian Distribution\"], sendSelectedValue=False, orientation=\"horizontal\", callback=self.set_kind_of_calculation)\n\n self.poor_statics_cb = gui.comboBox(histograms_box, self, \"replace_poor_statistic\", label=\"Activate on Poor Statistics\", labelWidth=250,\n items=[\"No\", \"Yes\"], sendSelectedValue=False, orientation=\"horizontal\", callback=self.set_manage_poor_statistics)\n\n self.poor_statistics_box_1 = oasysgui.widgetBox(histograms_box, \"\", addSpace=False, orientation=\"vertical\", height=30)\n self.poor_statistics_box_2 = oasysgui.widgetBox(histograms_box, \"\", addSpace=False, orientation=\"vertical\", height=30)\n\n self.le_autosave_file_name = oasysgui.lineEdit(self.poor_statistics_box_1, self, \"good_rays_limit\", \"Good Rays Limit\", labelWidth=100, valueType=int, orientation=\"horizontal\")\n\n self.kind_of_calculation_box_1 = oasysgui.widgetBox(histograms_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n self.kind_of_calculation_box_2 = oasysgui.widgetBox(histograms_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n self.kind_of_calculation_box_3 = oasysgui.widgetBox(histograms_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n\n self.le_g_sigma_x = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, \"sigma_x\", \"Sigma H\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n self.le_g_sigma_y = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, \"sigma_y\", \"Sigma V\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n self.le_g_center_x = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, \"center_x\", \"Center H\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n self.le_g_center_y = oasysgui.lineEdit(self.kind_of_calculation_box_2, self, \"center_y\", \"Center V\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n\n self.le_l_gamma = oasysgui.lineEdit(self.kind_of_calculation_box_3, self, \"gamma\", \"Gamma\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n self.le_l_center_x = oasysgui.lineEdit(self.kind_of_calculation_box_3, self, \"center_x\", \"Center H\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n self.le_l_center_y = oasysgui.lineEdit(self.kind_of_calculation_box_3, self, \"center_y\", \"Center V\", labelWidth=100, valueType=float, orientation=\"horizontal\")\n\n self.set_kind_of_calculation()\n\n # post porcessing\n\n post_box = oasysgui.widgetBox(tab_post, \"Post Processing Setting\", addSpace=False, orientation=\"vertical\", height=500)\n\n post_box_1 = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"horizontal\", height=25)\n self.le_loaded_plot_file_name = oasysgui.lineEdit(post_box_1, self, \"loaded_plot_file_name\", \"Loaded File\", labelWidth=100, valueType=str, orientation=\"horizontal\")\n gui.button(post_box_1, self, \"...\", callback=self.selectPlotFile)\n\n gui.separator(post_box)\n\n button_box = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"vertical\")\n gui.button(button_box, self, \"Reset\", callback=self.reloadPlot, height=35)\n gui.separator(button_box)\n gui.button(button_box, self, \"Invert\", callback=self.invertPlot, height=35)\n\n gui.separator(post_box)\n\n button_box = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"horizontal\")\n gui.button(button_box, self, \"Rebin Plot\", callback=self.rebinPlot, height=35)\n\n post_box_0 = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"vertical\", height=60)\n oasysgui.lineEdit(post_box_0, self, \"new_nbins_h\", \"Nr. Bins H\", labelWidth=200, valueType=int, orientation=\"horizontal\")\n oasysgui.lineEdit(post_box_0, self, \"new_nbins_v\", \"Nr. Bins V\", labelWidth=200, valueType=int, orientation=\"horizontal\")\n\n button_box = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"horizontal\")\n gui.button(button_box, self, \"Smooth Plot\", callback=self.smoothPlot, height=35)\n\n gui.separator(post_box)\n\n gui.comboBox(post_box, self, \"filter\", label=\"Filter\", labelWidth=200,\n items=[\"Gaussian\",\n \"Spline\",\n \"Uniform\",\n \"Fourier-Gaussian\",\n \"Fourier-Ellipsoid\",\n \"Fourier-Uniform\",\n \"Fill Holes\"\n ], sendSelectedValue=False, orientation=\"horizontal\", callback=self.set_Filter)\n\n self.post_box_1 = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n self.post_box_2 = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n self.post_box_3 = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n self.post_box_4 = oasysgui.widgetBox(post_box, \"\", addSpace=False, orientation=\"vertical\", height=110)\n\n oasysgui.lineEdit(self.post_box_1, self, \"filter_sigma_h\", \"Sigma/Size H\", labelWidth=200, valueType=float, orientation=\"horizontal\")\n oasysgui.lineEdit(self.post_box_1, self, \"filter_sigma_v\", \"Sigma/Size V\", labelWidth=200, valueType=float, orientation=\"horizontal\")\n\n oasysgui.lineEdit(self.post_box_2, self, \"filter_sigma_h\", \"Sigma/Size H\", labelWidth=200, valueType=float, orientation=\"horizontal\")\n oasysgui.lineEdit(self.post_box_2, self, \"filter_sigma_v\", \"Sigma/Size V\", labelWidth=200, valueType=float, orientation=\"horizontal\")\n\n self.cb_filter_mode = gui.comboBox(self.post_box_2, self, \"filter_mode\", label=\"Mode\", labelWidth=200,\n items=[\"reflect\", \"constant\", \"nearest\", \"mirror\", \"wrap\"],\n sendSelectedValue=False, orientation=\"horizontal\", callback=self.set_FilterMode)\n\n self.le_filter_cval = oasysgui.lineEdit(self.post_box_2, self, \"filter_cval\", \"Constant Value\", labelWidth=250, valueType=float, orientation=\"horizontal\")\n\n oasysgui.lineEdit(self.post_box_3, self, \"filter_spline_order\", \"Spline Order\", labelWidth=250, valueType=int, orientation=\"horizontal\")\n\n gui.separator(post_box)\n\n oasysgui.lineEdit(post_box, self, \"masking_level\", \"Mask if < factor of max value\", labelWidth=250, valueType=float, orientation=\"horizontal\")\n\n self.set_Filter()\n\n self.main_tabs = oasysgui.tabWidget(self.mainArea)\n plot_tab = oasysgui.createTabPage(self.main_tabs, \"Plots\")\n out_tab = oasysgui.createTabPage(self.main_tabs, \"Output\")\n\n view_box = oasysgui.widgetBox(plot_tab, \"Plotting\", addSpace=False, orientation=\"vertical\", width=self.IMAGE_WIDTH)\n view_box_1 = oasysgui.widgetBox(view_box, \"\", addSpace=False, orientation=\"vertical\", width=350)\n\n gui.comboBox(view_box_1, self, \"view_type\", label=\"Plot Accumulated Results\", labelWidth=320,\n items=[\"No\", \"Yes\"], sendSelectedValue=False, orientation=\"horizontal\")\n\n self.image_box = gui.widgetBox(plot_tab, \"Plot Result\", addSpace=True, orientation=\"vertical\")\n self.image_box.setFixedHeight(self.IMAGE_HEIGHT)\n self.image_box.setFixedWidth(self.IMAGE_WIDTH)\n\n self.shadow_output = oasysgui.textArea(height=580, width=800)\n\n out_box = gui.widgetBox(out_tab, \"System Output\", addSpace=True, orientation=\"horizontal\")\n out_box.layout().addWidget(self.shadow_output)\n\n def clearResults(self, interactive=True):\n if not interactive: proceed = True\n else: proceed = ConfirmDialog.confirmed(parent=self)\n\n if proceed:\n self.input_beam = None\n self.cumulated_ticket = None\n self.plotted_ticket = None\n self.energy_min = None\n self.energy_max = None\n self.energy_step = None\n self.total_power = None\n self.cumulated_total_power = None\n\n if not self.autosave_file is None:\n self.autosave_file.close()\n self.autosave_file = None\n\n if not self.plot_canvas is None:\n self.plot_canvas.clear()\n\n def set_kind_of_calculation(self):\n self.kind_of_calculation_box_1.setVisible(self.kind_of_calculation<=1)\n self.kind_of_calculation_box_2.setVisible(self.kind_of_calculation==2)\n self.kind_of_calculation_box_3.setVisible(self.kind_of_calculation==3)\n\n if self.kind_of_calculation > 0:\n self.poor_statics_cb.setEnabled(True)\n else:\n self.poor_statics_cb.setEnabled(False)\n self.replace_poor_statistic = 0\n\n self.set_manage_poor_statistics()\n\n def set_manage_poor_statistics(self):\n self.poor_statistics_box_1.setVisible(self.replace_poor_statistic==1)\n self.poor_statistics_box_2.setVisible(self.replace_poor_statistic==0)\n\n def set_autosave(self):\n self.autosave_box_1.setVisible(self.autosave==1)\n self.autosave_box_2.setVisible(self.autosave==0)\n\n self.cb_autosave_partial_results.setEnabled(self.autosave==1 and self.keep_result==1)\n\n def set_ImagePlane(self):\n self.image_plane_box.setVisible(self.image_plane==1)\n self.image_plane_box_empty.setVisible(self.image_plane==0)\n\n def set_XRange(self):\n self.xrange_box.setVisible(self.x_range == 1)\n self.xrange_box_empty.setVisible(self.x_range == 0)\n\n def set_YRange(self):\n self.yrange_box.setVisible(self.y_range == 1)\n self.yrange_box_empty.setVisible(self.y_range == 0)\n\n def set_Filter(self):\n self.post_box_1.setVisible(3<=self.filter<=5)\n self.post_box_2.setVisible(self.filter==0 or self.filter==2)\n self.post_box_3.setVisible(self.filter==1 )\n self.post_box_4.setVisible(self.filter==6)\n\n if self.filter==0 or self.filter==2: self.set_FilterMode()\n\n def set_FilterMode(self):\n self.le_filter_cval.setEnabled(self.filter_mode==1)\n\n def selectAutosaveFile(self):\n self.le_autosave_file_name.setText(oasysgui.selectFileFromDialog(self, self.autosave_file_name, \"Select File\", file_extension_filter=\"HDF5 Files (*.hdf5 *.h5 *.hdf)\"))\n\n def selectPlotFile(self):\n file_name = oasysgui.selectFileFromDialog(self, None, \"Select File\", file_extension_filter=\"HDF5 Files (*.hdf5 *.h5 *.hdf)\")\n\n if not file_name is None:\n self.le_loaded_plot_file_name.setText(os.path.basename(os.path.normpath(file_name)))\n\n plot_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(file_name), mode=\"r\")\n\n ticket = {}\n\n ticket[\"histogram\"], ticket[\"histogram_h\"], ticket[\"histogram_v\"], attributes = plot_file.get_last_plot(dataset_name=\"power_density\")\n ticket[\"bin_h_center\"], ticket[\"bin_v_center\"], ticket[\"h_label\"], ticket[\"v_label\"] = plot_file.get_coordinates()\n ticket[\"intensity\"] = attributes[\"intensity\"]\n ticket[\"nrays\"] = attributes[\"total_rays\"]\n ticket[\"good_rays\"] = attributes[\"good_rays\"]\n\n if self.plot_canvas is None:\n self.plot_canvas = PowerPlotXYWidget()\n self.image_box.layout().addWidget(self.plot_canvas)\n else:\n if not self.plotted_ticket is None:\n if QMessageBox.question(self, \"Load Plot\", \"Merge with current Plot?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes:\n if ticket[\"histogram\"].shape == self.plotted_ticket[\"histogram\"].shape and \\\n ticket[\"bin_h_center\"].shape == self.plotted_ticket[\"bin_h_center\"].shape and \\\n ticket[\"bin_v_center\"].shape == self.plotted_ticket[\"bin_v_center\"].shape and \\\n ticket[\"bin_h_center\"][0] == self.plotted_ticket[\"bin_h_center\"][0] and \\\n ticket[\"bin_h_center\"][-1] == self.plotted_ticket[\"bin_h_center\"][-1] and \\\n ticket[\"bin_v_center\"][0] == self.plotted_ticket[\"bin_v_center\"][0] and \\\n ticket[\"bin_v_center\"][-1] == self.plotted_ticket[\"bin_v_center\"][-1]:\n ticket[\"histogram\"] += self.plotted_ticket[\"histogram\"]\n\n if QMessageBox.question(self, \"Load Plot\", \"Average with current Plot?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) == QMessageBox.Yes:\n ticket[\"histogram\"] *= 0.5\n else:\n raise ValueError(\"The plots cannot be merged: the should have same dimensions and ranges\")\n\n cumulated_power_plot = numpy.sum(ticket[\"histogram\"])*(ticket[\"bin_h_center\"][1]-ticket[\"bin_h_center\"][0])*(ticket[\"bin_v_center\"][1]-ticket[\"bin_v_center\"][0])\n\n try:\n energy_min=0.0\n energy_max=0.0\n energy_step=0.0\n\n self.plot_canvas.cumulated_power_plot = cumulated_power_plot\n self.plot_canvas.plot_power_density_ticket(ticket,\n ticket[\"h_label\"],\n ticket[\"v_label\"],\n cumulated_total_power=0.0,\n energy_min=energy_min,\n energy_max=energy_max,\n energy_step=energy_step)\n\n self.cumulated_ticket = None\n self.plotted_ticket = ticket\n self.plotted_ticket_original = ticket.copy()\n except Exception as e:\n QMessageBox.critical(self, \"Error\", str(e), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise e\n\n def reloadPlot(self):\n if not self.plotted_ticket_original is None:\n ticket = self.plotted_ticket_original.copy()\n\n if self.plot_canvas is None:\n self.plot_canvas = PowerPlotXYWidget()\n self.image_box.layout().addWidget(self.plot_canvas)\n\n cumulated_power_plot = numpy.sum(ticket[\"histogram\"])*(ticket[\"bin_h_center\"][1]-ticket[\"bin_h_center\"][0])*(ticket[\"bin_v_center\"][1]-ticket[\"bin_v_center\"][0])\n\n try:\n energy_min=0.0\n energy_max=0.0\n energy_step=0.0\n\n self.plot_canvas.cumulated_power_plot = cumulated_power_plot\n self.plot_canvas.plot_power_density_ticket(ticket,\n ticket[\"h_label\"],\n ticket[\"v_label\"],\n cumulated_total_power=0.0,\n energy_min=energy_min,\n energy_max=energy_max,\n energy_step=energy_step)\n\n\n self.plotted_ticket = ticket\n except Exception as e:\n QMessageBox.critical(self, \"Error\", str(e), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise e\n\n def invertPlot(self):\n if not self.plotted_ticket is None:\n try:\n ticket = self.plotted_ticket.copy()\n\n histogram = ticket[\"histogram\"]\n h_coord = ticket[\"bin_h_center\"]\n v_coord = ticket[\"bin_v_center\"]\n\n h_coord, v_coord, histogram = self.invert(h_coord, v_coord, histogram)\n\n ticket[\"histogram\"] = histogram\n ticket[\"bin_h_center\"] = h_coord\n ticket[\"bin_v_center\"] = v_coord\n\n pixel_area = (h_coord[1]-h_coord[0])*(v_coord[1]-v_coord[0])\n\n if self.plot_canvas is None:\n self.plot_canvas = PowerPlotXYWidget()\n self.image_box.layout().addWidget(self.plot_canvas)\n\n cumulated_power_plot = numpy.sum(histogram)*pixel_area\n\n energy_min = 0.0\n energy_max = 0.0\n energy_step = 0.0\n\n self.plot_canvas.cumulated_power_plot = cumulated_power_plot\n self.plot_canvas.plot_power_density_ticket(ticket,\n ticket[\"v_label\"],\n ticket[\"h_label\"],\n cumulated_total_power=0.0,\n energy_min=energy_min,\n energy_max=energy_max,\n energy_step=energy_step)\n\n self.plotted_ticket = ticket\n except Exception as e:\n QMessageBox.critical(self, \"Error\", str(e), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise e\n\n def rebinPlot(self):\n if not self.plotted_ticket is None:\n try:\n congruence.checkStrictlyPositiveNumber(self.new_nbins_h, \"Nr. Bins H\")\n congruence.checkStrictlyPositiveNumber(self.new_nbins_v, \"Nr. Bins V\")\n\n ticket = self.plotted_ticket.copy()\n\n histogram = ticket[\"histogram\"]\n h_coord = ticket[\"bin_h_center\"]\n v_coord = ticket[\"bin_v_center\"]\n\n h_coord, v_coord, histogram = self.rebin(h_coord, v_coord, histogram, (int(self.new_nbins_h), int(self.new_nbins_v)))\n\n ticket[\"histogram\"] = histogram\n ticket[\"bin_h_center\"] = h_coord\n ticket[\"bin_v_center\"] = v_coord\n\n pixel_area = (h_coord[1]-h_coord[0])*(v_coord[1]-v_coord[0])\n\n if self.plot_canvas is None:\n self.plot_canvas = PowerPlotXYWidget()\n self.image_box.layout().addWidget(self.plot_canvas)\n\n cumulated_power_plot = numpy.sum(histogram)*pixel_area\n\n energy_min = 0.0\n energy_max = 0.0\n energy_step = 0.0\n\n self.plot_canvas.cumulated_power_plot = cumulated_power_plot\n self.plot_canvas.plot_power_density_ticket(ticket,\n ticket[\"h_label\"],\n ticket[\"v_label\"],\n cumulated_total_power=0.0,\n energy_min=energy_min,\n energy_max=energy_max,\n energy_step=energy_step)\n\n self.plotted_ticket = ticket\n except Exception as e:\n QMessageBox.critical(self, \"Error\", str(e), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise e\n\n def smoothPlot(self):\n if not self.plotted_ticket is None:\n try:\n if self.filter==0 or 2<=self.filter<=5:\n congruence.checkStrictlyPositiveNumber(self.filter_sigma_h, \"Sigma/Size H\")\n congruence.checkStrictlyPositiveNumber(self.filter_sigma_v, \"Sigma/Size V\")\n\n if self.filter == 1: congruence.checkStrictlyPositiveNumber(self.filter_spline_order, \"Spline Order\")\n\n ticket = self.plotted_ticket.copy()\n\n mask = numpy.where(self.plotted_ticket[\"histogram\"] <= self.plotted_ticket[\"histogram\"].max()*self.masking_level)\n\n histogram = ticket[\"histogram\"]\n h_coord = ticket[\"bin_h_center\"]\n v_coord = ticket[\"bin_v_center\"]\n\n norm = histogram.sum()\n\n pixel_area = (h_coord[1]-h_coord[0])*(v_coord[1]-v_coord[0])\n\n filter_mode = self.cb_filter_mode.currentText()\n\n if self.filter == 0:\n histogram = filters.gaussian_filter(histogram, sigma=(self.filter_sigma_h, self.filter_sigma_v), mode=filter_mode, cval=self.filter_cval)\n elif self.filter == 1:\n histogram = interpolation.spline_filter(histogram, order=int(self.filter_spline_order))\n elif self.filter == 2:\n histogram = filters.uniform_filter(histogram, size=(int(self.filter_sigma_h), int(self.filter_sigma_v)), mode=filter_mode, cval=self.filter_cval)\n elif self.filter == 3:\n histogram = numpy.real(numpy.fft.ifft2(fourier.fourier_gaussian(numpy.fft.fft2(histogram), sigma=(self.filter_sigma_h, self.filter_sigma_v))))\n elif self.filter == 4:\n histogram = numpy.real(numpy.fft.ifft2(fourier.fourier_ellipsoid(numpy.fft.fft2(histogram), size=(self.filter_sigma_h, self.filter_sigma_v))))\n elif self.filter == 5:\n histogram = numpy.real(numpy.fft.ifft2(fourier.fourier_uniform(numpy.fft.fft2(histogram), size=(self.filter_sigma_h, self.filter_sigma_v))))\n elif self.filter == 6:\n histogram = self.apply_fill_holes(histogram)\n\n histogram[mask] = 0.0\n\n norm /= histogram.sum()\n\n ticket[\"histogram\"] = histogram*norm\n \n if self.plot_canvas is None:\n self.plot_canvas = PowerPlotXYWidget()\n self.image_box.layout().addWidget(self.plot_canvas)\n\n cumulated_power_plot = numpy.sum(histogram)*pixel_area\n\n energy_min=0.0\n energy_max=0.0\n energy_step=0.0\n\n self.plot_canvas.cumulated_power_plot = cumulated_power_plot\n self.plot_canvas.plot_power_density_ticket(ticket,\n ticket[\"h_label\"],\n ticket[\"v_label\"],\n cumulated_total_power=0.0,\n energy_min=energy_min,\n energy_max=energy_max,\n energy_step=energy_step)\n\n self.plotted_ticket = ticket\n except Exception as e:\n QMessageBox.critical(self, \"Error\", str(e), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise e\n\n def rebin(self, x, y, z, new_shape):\n shape = (new_shape[0], z.shape[0] // new_shape[0], new_shape[1], z.shape[1] // new_shape[1])\n\n return numpy.linspace(x[0], x[-1], new_shape[0]), \\\n numpy.linspace(y[0], y[-1], new_shape[1]), \\\n z.reshape(shape).mean(-1).mean(1)\n\n def invert(self, x, y, data):\n return y, x, data.T\n\n def apply_fill_holes(self, histogram):\n from skimage.morphology import reconstruction\n\n seed = numpy.copy(histogram)\n seed[1:-1, 1:-1] = histogram.max()\n\n filled = reconstruction(seed=seed, mask=histogram, method='erosion')\n\n return filled*(histogram.sum()/filled.sum())\n\n def save_cumulated_data(self):\n file_name, _ = QFileDialog.getSaveFileName(self, \"Save Current Plot\", filter=\"HDF5 Files (*.hdf5 *.h5 *.hdf);;Text Files (*.dat *.txt)\")\n\n if not file_name is None and not file_name.strip()==\"\":\n items = (\"Hdf5 only\", \"Text only\", \"Hdf5 and Text\")\n\n item, ok = QInputDialog.getItem(self, \"Select Output Format\", \"Formats: \", items, 2, False)\n\n if ok and item:\n if item == \"Hdf5 only\" or item == \"Hdf5 and Text\":\n self.save_cumulated_data_hdf5(file_name)\n if item == \"Text only\" or item == \"Hdf5 and Text\":\n self.save_cumulated_data_txt(file_name)\n\n def save_cumulated_data_hdf5(self, file_name):\n if not self.plotted_ticket is None:\n try:\n save_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(os.path.splitext(file_name)[0] + \".hdf5\"))\n\n save_file.write_coordinates(self.plotted_ticket)\n save_file.add_plot_xy(self.plotted_ticket, dataset_name=\"power_density\")\n\n save_file.close()\n except Exception as exception:\n QMessageBox.critical(self, \"Error\", str(exception), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise exception\n\n def save_cumulated_data_txt(self, file_name):\n if not self.plotted_ticket is None:\n try:\n save_file = open(os.path.splitext(file_name)[0] + \".dat\", \"w\")\n\n x_values = self.plotted_ticket[\"bin_h_center\"]\n y_values = self.plotted_ticket[\"bin_v_center\"]\n z_values = self.plotted_ticket[\"histogram\"]\n\n for i in range(len(x_values)):\n for j in range(len(y_values)):\n row = str(x_values[i]) + \" \" + str(y_values[j]) + \" \" + str(z_values[i, j])\n\n if i+j > 0: row = \"\\n\" + row\n\n save_file.write(row)\n\n save_file.flush()\n save_file.close()\n except Exception as exception:\n QMessageBox.critical(self, \"Error\", str(exception), QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise exception\n\n def replace_fig(self, shadow_beam, var_x, var_y, xrange, yrange, nbins, nolost):\n if self.plot_canvas is None:\n self.plot_canvas = PowerPlotXYWidget()\n self.image_box.layout().addWidget(self.plot_canvas)\n\n try:\n\n if self.autosave == 1:\n if self.autosave_file is None:\n self.autosave_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(self.autosave_file_name))\n elif self.autosave_file.filename != congruence.checkFileName(self.autosave_file_name):\n self.autosave_file.close()\n self.autosave_file = ShadowPlot.PlotXYHdf5File(congruence.checkDir(self.autosave_file_name))\n\n if self.keep_result == 1:\n self.cumulated_ticket, last_ticket = self.plot_canvas.plot_power_density(shadow_beam, var_x, var_y,\n self.total_power, self.cumulated_total_power,\n self.energy_min, self.energy_max, self.energy_step,\n nbins=nbins, xrange=xrange, yrange=yrange, nolost=nolost,\n ticket_to_add=self.cumulated_ticket,\n to_mm=self.workspace_units_to_mm,\n show_image=self.view_type==1,\n kind_of_calculation=self.kind_of_calculation,\n replace_poor_statistic=self.replace_poor_statistic,\n good_rays_limit=self.good_rays_limit,\n center_x=self.center_x,\n center_y=self.center_y,\n sigma_x=self.sigma_x,\n sigma_y=self.sigma_y,\n gamma=self.gamma)\n self.plotted_ticket = self.cumulated_ticket\n self.plotted_ticket_original = self.plotted_ticket.copy()\n\n if self.autosave == 1:\n self.autosave_file.write_coordinates(self.cumulated_ticket)\n dataset_name = \"power_density\"\n\n self.autosave_file.add_plot_xy(self.cumulated_ticket, dataset_name=dataset_name)\n\n if self.autosave_partial_results == 1:\n if last_ticket is None:\n self.autosave_file.add_plot_xy(self.cumulated_ticket,\n plot_name=\"Energy Range: \" + str(round(self.energy_max-self.energy_step, 2)) + \"-\" + str(round(self.energy_max, 2)),\n dataset_name=dataset_name)\n else:\n self.autosave_file.add_plot_xy(last_ticket,\n plot_name=\"Energy Range: \" + str(round(self.energy_max-self.energy_step, 2)) + \"-\" + str(round(self.energy_max, 2)),\n dataset_name=dataset_name)\n\n self.autosave_file.flush()\n else:\n ticket, _ = self.plot_canvas.plot_power_density(shadow_beam, var_x, var_y,\n self.total_power, self.cumulated_total_power,\n self.energy_min, self.energy_max, self.energy_step,\n nbins=nbins, xrange=xrange, yrange=yrange, nolost=nolost,\n to_mm=self.workspace_units_to_mm,\n show_image=self.view_type==1,\n kind_of_calculation=self.kind_of_calculation,\n replace_poor_statistic=self.replace_poor_statistic,\n good_rays_limit=self.good_rays_limit,\n center_x=self.center_x,\n center_y=self.center_y,\n sigma_x=self.sigma_x,\n sigma_y=self.sigma_y,\n gamma=self.gamma)\n\n self.cumulated_ticket = None\n self.plotted_ticket = ticket\n self.plotted_ticket_original = self.plotted_ticket.copy()\n\n if self.autosave == 1:\n self.autosave_file.write_coordinates(ticket)\n self.autosave_file.add_plot_xy(ticket, dataset_name=\"power_density\")\n self.autosave_file.flush()\n\n except Exception as e:\n if not self.IS_DEVELOP:\n raise Exception(\"Data not plottable: No good rays or bad content\")\n else:\n raise e\n\n def plot_xy(self, var_x, var_y):\n beam_to_plot = self.input_beam\n\n if self.image_plane == 1:\n new_shadow_beam = self.input_beam.duplicate(history=False)\n\n if self.image_plane_rel_abs_position == 1: # relative\n dist = self.image_plane_new_position\n else: # absolute\n if self.input_beam.historySize() == 0:\n historyItem = None\n else:\n historyItem = self.input_beam.getOEHistory(oe_number=self.input_beam._oe_number)\n\n if historyItem is None: image_plane = 0.0\n elif self.input_beam._oe_number == 0: image_plane = 0.0\n else: image_plane = historyItem._shadow_oe_end._oe.T_IMAGE\n\n dist = self.image_plane_new_position - image_plane\n\n self.retrace_beam(new_shadow_beam, dist)\n\n beam_to_plot = new_shadow_beam\n\n xrange, yrange = self.get_ranges()\n\n self.replace_fig(beam_to_plot, var_x, var_y, xrange=xrange, yrange=yrange, nbins=int(self.number_of_bins), nolost=self.rays+1)\n\n def get_ranges(self):\n xrange = None\n yrange = None\n factor1 = self.workspace_units_to_mm\n factor2 = self.workspace_units_to_mm\n\n if self.x_range == 1:\n congruence.checkLessThan(self.x_range_min, self.x_range_max, \"X range min\", \"X range max\")\n\n xrange = [self.x_range_min / factor1, self.x_range_max / factor1]\n\n if self.y_range == 1:\n congruence.checkLessThan(self.y_range_min, self.y_range_max, \"Y range min\", \"Y range max\")\n\n yrange = [self.y_range_min / factor2, self.y_range_max / factor2]\n\n return xrange, yrange\n\n def plot_cumulated_data(self):\n if not self.cumulated_ticket is None:\n self.plot_canvas.plot_power_density_ticket(ticket=self.cumulated_ticket,\n var_x=self.x_column_index+1,\n var_y=self.y_column_index+1,\n cumulated_total_power=self.cumulated_total_power,\n energy_min=self.energy_min,\n energy_max=self.energy_max,\n energy_step=self.energy_step,\n show_image=self.view_type==1)\n\n self.plotted_ticket_original = self.cumulated_ticket.copy()\n\n def plot_results(self):\n try:\n sys.stdout = EmittingStream(textWritten=self.writeStdOut)\n\n if ShadowCongruence.checkEmptyBeam(self.input_beam):\n self.number_of_bins = congruence.checkStrictlyPositiveNumber(self.number_of_bins, \"Number of Bins\")\n\n self.plot_xy(self.x_column_index+1, self.y_column_index+1)\n\n time.sleep(0.1) # prevents a misterious dead lock in the Orange cycle when refreshing the histogram\n except Exception as exception:\n QMessageBox.critical(self, \"Error\",\n str(exception),\n QMessageBox.Ok)\n\n if self.IS_DEVELOP: raise exception\n\n def setBeam(self, input_beam):\n self.cb_rays.setEnabled(True)\n\n if not input_beam is None:\n if not input_beam.scanned_variable_data is None and input_beam.scanned_variable_data.has_additional_parameter(\"total_power\"):\n self.input_beam = input_beam\n\n self.total_power = self.input_beam.scanned_variable_data.get_additional_parameter(\"total_power\")\n\n if self.energy_min is None:\n self.energy_min = self.input_beam.scanned_variable_data.get_scanned_variable_value()\n self.cumulated_total_power = self.total_power\n else:\n self.cumulated_total_power += self.total_power\n\n self.energy_step = self.input_beam.scanned_variable_data.get_additional_parameter(\"photon_energy_step\")\n self.energy_max = self.input_beam.scanned_variable_data.get_scanned_variable_value()\n\n if self.input_beam.scanned_variable_data.has_additional_parameter(\"is_footprint\"):\n if self.input_beam.scanned_variable_data.get_additional_parameter(\"is_footprint\"):\n self.cb_rays.setEnabled(False)\n self.rays = 0 # transmitted, absorbed doesn't make sense since is precalculated by footprint object\n else:\n self.cb_rays.setEnabled(True)\n\n if ShadowCongruence.checkEmptyBeam(input_beam):\n if ShadowCongruence.checkGoodBeam(input_beam):\n self.plot_results()\n\n def writeStdOut(self, text):\n cursor = self.shadow_output.textCursor()\n cursor.movePosition(QTextCursor.End)\n cursor.insertText(text)\n self.shadow_output.setTextCursor(cursor)\n self.shadow_output.ensureCursorVisible()\n\n def retrace_beam(self, new_shadow_beam, dist):\n new_shadow_beam._beam.retrace(dist)\n","sub_path":"orangecontrib/aps/shadow/widgets/extension/ow_power_plot_xy.py","file_name":"ow_power_plot_xy.py","file_ext":"py","file_size_in_byte":49857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"28328993","text":"import os\n\nimport mock\nimport pytest\n\nfrom azure.ai.ml._azure_environments import (\n AzureEnvironments,\n _get_azure_portal_id_from_metadata,\n _get_base_url_from_metadata,\n _get_cloud_information_from_metadata,\n _get_default_cloud_name,\n _get_storage_endpoint_from_metadata,\n _set_cloud,\n)\nfrom azure.ai.ml.constants._common import AZUREML_CLOUD_ENV_NAME\n\n\n@pytest.mark.unittest\nclass TestCloudEnvironments:\n @mock.patch.dict(os.environ, {AZUREML_CLOUD_ENV_NAME: AzureEnvironments.ENV_DEFAULT}, clear=True)\n def test_set_valid_cloud_details_china(self):\n cloud_environment = AzureEnvironments.ENV_CHINA\n _set_cloud(cloud_environment)\n cloud_details = _get_cloud_information_from_metadata(cloud_environment)\n assert cloud_details.get(\"cloud\") == cloud_environment\n assert \"default\" in str(cloud_details.get(\"credential_scopes\"))\n assert \"https://management.chinacloudapi.cn\" in str(cloud_details.get(\"credential_scopes\"))\n\n def test_set_valid_cloud_details_us_gov(self):\n cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT\n _set_cloud(cloud_environment)\n cloud_details = _get_cloud_information_from_metadata(cloud_environment)\n assert cloud_details.get(\"cloud\") == cloud_environment\n assert \"default\" in str(cloud_details.get(\"credential_scopes\"))\n assert \"https://management.usgovcloudapi.net\" in str(cloud_details.get(\"credential_scopes\"))\n\n @mock.patch.dict(os.environ, {AZUREML_CLOUD_ENV_NAME: AzureEnvironments.ENV_DEFAULT}, clear=True)\n def test_get_base_url_from_default_environment(self):\n cloud_environment = None\n _set_cloud(cloud_environment)\n base_url = _get_base_url_from_metadata(cloud_environment)\n assert \"https://management.azure.com\" in base_url\n\n def test_get_base_url_from_us_gov(self):\n cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT\n _set_cloud(cloud_environment)\n base_url = _get_base_url_from_metadata(cloud_environment)\n assert \"https://management.usgovcloudapi.net\" in base_url\n\n def test_get_azure_portal_id_from_us_gov(self):\n cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT\n _set_cloud(cloud_environment)\n base_url = _get_azure_portal_id_from_metadata(cloud_environment)\n assert \"https://portal.azure.us\" in base_url\n\n def test_get_storage_endpoint_from_us_gov(self):\n cloud_environment = AzureEnvironments.ENV_US_GOVERNMENT\n _set_cloud(cloud_environment)\n base_url = _get_storage_endpoint_from_metadata(cloud_environment)\n assert \"core.usgovcloudapi.net\" in base_url\n\n def test_set_invalid_cloud(self):\n with pytest.raises(Exception) as e:\n _set_cloud(\"yadadada\")\n assert \"Unknown cloud environment supplied\" in str(e)\n\n def test_get_default_cloud(self):\n with mock.patch(\"os.environ\", {AZUREML_CLOUD_ENV_NAME: \"yadadada\"}):\n cloud_name = _get_default_cloud_name()\n assert cloud_name == \"yadadada\"\n","sub_path":"sdk/ml/azure-ai-ml/tests/internal_utils/unittests/test_cloud_environments.py","file_name":"test_cloud_environments.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226911552","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder\nfrom Instanssi.tickets.models import Ticket\nfrom Instanssi.store.models import StoreItem\n\nclass TicketForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.event = kwargs.pop('event', None)\n super(TicketForm, self).__init__(*args, **kwargs)\n \n # Set choices\n items = []\n for item in StoreItem.objects.filter(event=self.event, delivery_type=1):\n items.append((item.id, item.name))\n self.fields['storeitem'].choices = items\n \n # Set form\n self.helper = FormHelper()\n self.helper.layout = Layout(\n Fieldset(\n u'Lataa',\n 'used',\n 'storeitem',\n 'owner_firstname',\n 'owner_lastname',\n 'owner_email',\n ButtonHolder (\n Submit('submit', u'Tallenna')\n )\n )\n )\n \n class Meta:\n model = Ticket\n fields = ('storeitem','used','owner_firstname','owner_lastname','owner_email')","sub_path":"Instanssi/admin_tickets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"132196878","text":"import os\nimport requests\n\n\nfrom flask import Flask, jsonify, render_template, request,redirect\nfrom flask_socketio import SocketIO, emit\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\nchannels = []\nmax_msg = 100\n@app.route(\"/\")\ndef index():\n\t# renders homepage\n return render_template(\"index.html\", channels=channels)\n\n@socketio.on(\"create channel\")\ndef create_channel(data):\n\t# creates new channel\n new_channel = data['channel_name']\n display_name = data['display_name']\n channel_id = len(channels)\n channels.append({'channel_name': new_channel, 'channel_creator': display_name})\n channels[channel_id]['messages'] = []\n #emits the channel created\n emit(\"channels\", {'channel_name': new_channel, 'channel_creator': display_name, 'channel_id': channel_id}, broadcast=True)\n\n@app.route(\"/channel/\")\ndef channel(channel_id):\n\t# open channel page for chat\n\ttry:\n\t\tchannel_name = channels[channel_id]['channel_name']\n\texcept IndexError:\n\t\treturn\tredirect(\"/\")\n\treturn render_template(\"channel.html\", channel_name=channel_name, channel_id=channel_id )\n\n@socketio.on(\"add message\")\ndef add_message(data):\n\t# adds new message to the channel \n channel_id = data['channel_id']\n del data['channel_id']\n msg_length = len(channels[channel_id]['messages'])\n if len(channels[channel_id]['messages']) >= max_msg:\n channels[channel_id]['messages'].pop(0)\n channels[channel_id]['messages'].append(data)\n message = data['message'].strip()\n sender = data['sender']\n time = data['time'].strip()\n #emits the message added\n emit(\"new message\", {'message': message, 'sender': sender, 'time': time,'msg_length': msg_length, 'channel_id': channel_id}, broadcast=True)\n\n@app.route(\"/posts\", methods=[\"POST\"])\ndef posts():\n\n # get axisting messages for the channel\n channel_id = int(request.form.get(\"channel_id\"))\n return jsonify(channels[channel_id]['messages'])\n\n@app.route(\"/delete\", methods = [\"POST\"])\ndef delete():\n\t# deletes the message from the channel \n channel_id = int(request.form.get(\"channel_id\"))\n message = request.form.get(\"message\").strip()\n time = request.form.get(\"time\").strip()\n sender = request.form.get(\"sender\")\n \t# checks for the message\n for index in range(len(channels[channel_id]['messages'])):\n \tif(channels[channel_id]['messages'][index]['sender'] == sender and channels[channel_id]['messages'][index]['time'] == time ):\n \tdel channels[channel_id]['messages'][index]\n\n return '', 204\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"549087118","text":"def rekursif(str,val):\r\n if val > len(str):\r\n print('Index melebihi string')\r\n else:\r\n listData=[]\r\n for i in range(len(str)):\r\n if str[i] in 'abcdefghijklmnopqrstuvwxyz':\r\n listData.append(str[i])\r\n print(listData[val])\r\nval1=input('\\nMasukkan Kata : ')\r\nassert val1.isalpha(), \"Masukkan harus berupa kata\"\r\nval2=input('Masukkan Angka batas akhir : ')\r\nassert val2.isdigit(), \"Masukkan harus berupa Angka\"\r\nrekursif(val1, int(val2))","sub_path":"Python/Laporan/Minggu 10/tugas6.py","file_name":"tugas6.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"372588787","text":"from seedsource_project.settings.base import *\n\nINSTALLED_APPS += ('kombu.transport.django',)\n\nINTERNAL_IPS = ['127.0.0.1']\n\nBROKER_URL = 'django://'\nCELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'\n\nBABEL_CMD = 'babel'\nUGLIFY_CMD = 'uglifyjs'\n","sub_path":"source/seedsource_project/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"42563249","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author : Xu\n \n@Software: PyCharm\n \n@File : slots.py\n \n@Time : 2021/4/1 5:05 下午\n \n@Desc : 槽位的处理逻辑\n \n\"\"\"\n\nimport logging\n\nfrom typing import Any, Dict, List, Optional, Text, Type\n\nimport wechatter.shared.dm.dm_config\nfrom wechatter.shared.exceptions import WechatterException\nimport wechatter.shared.utils.common\nimport wechatter.shared.utils.io\nfrom wechatter.shared.dialogue_config import DOCS_URL_SLOTS\n\nlogger = logging.getLogger(__name__)\n\n\nclass InvalidSlotTypeException(WechatterException):\n \"\"\"Raised if a slot type is invalid.\"\"\"\n\n\nclass InvalidSlotConfigError(WechatterException, ValueError):\n \"\"\"Raised if a slot's config is invalid.\"\"\"\n\n\nclass Slot:\n \"\"\"\n Key-value store for storing information during a conversation.\n \"\"\"\n\n type_name = None\n\n def __init__(\n self,\n name: Text,\n initial_value: Any = None,\n value_reset_delay: Optional[int] = None,\n auto_fill: bool = True,\n influence_conversation: bool = True,\n ) -> None:\n \"\"\"Create a Slot.\n\n Args:\n name: The name of the slot.\n initial_value: The initial value of the slot.\n value_reset_delay: After how many turns the slot should be reset to the\n initial_value. This is behavior is currently not implemented.\n auto_fill: `True` if the slot should be filled automatically by entities\n with the same name.\n influence_conversation: If `True` the slot will be featurized and hence\n influence the predictions of the dialogue polices.\n \"\"\"\n self.name = name\n self._value = initial_value\n self.initial_value = initial_value\n self._value_reset_delay = value_reset_delay\n self.auto_fill = auto_fill\n self.influence_conversation = influence_conversation\n self._has_been_set = False\n\n def feature_dimensionality(self) -> int:\n \"\"\"How many features this single slot creates.\n\n Returns:\n The number of features. `0` if the slot is unfeaturized. The dimensionality\n of the array returned by `as_feature` needs to correspond to this value.\n \"\"\"\n if not self.influence_conversation:\n return 0\n\n return self._feature_dimensionality()\n\n def _feature_dimensionality(self) -> int:\n \"\"\"See the docstring for `feature_dimensionality`.\"\"\"\n return 1\n\n def has_features(self) -> bool:\n \"\"\"Indicate if the slot creates any features.\"\"\"\n return self.feature_dimensionality() != 0\n\n def value_reset_delay(self) -> Optional[int]:\n \"\"\"After how many turns the slot should be reset to the initial_value.\n\n If the delay is set to `None`, the slot will keep its value forever.\"\"\"\n # TODO: FUTURE this needs to be implemented - slots are not reset yet\n return self._value_reset_delay\n\n def as_feature(self) -> List[float]:\n if not self.influence_conversation:\n return []\n\n return self._as_feature()\n\n def _as_feature(self) -> List[float]:\n raise NotImplementedError(\n \"Each slot type needs to specify how its \"\n \"value can be converted to a feature. Slot \"\n \"'{}' is a generic slot that can not be used \"\n \"for predictions. Make sure you add this \"\n \"slot to your domain definition, specifying \"\n \"the type of the slot. If you implemented \"\n \"a custom slot type class, make sure to \"\n \"implement `.as_feature()`.\"\n \"\".format(self.name)\n )\n\n def reset(self) -> None:\n \"\"\"Resets the slot's value to the initial value.\"\"\"\n self.value = self.initial_value\n self._has_been_set = False\n\n @property\n def value(self) -> Any:\n \"\"\"Gets the slot's value.\"\"\"\n return self._value\n\n @value.setter\n def value(self, value: Any) -> None:\n \"\"\"Sets the slot's value.\"\"\"\n self._value = value\n self._has_been_set = True\n\n @property\n def has_been_set(self) -> bool:\n \"\"\"Indicates if the slot's value has been set.\"\"\"\n return self._has_been_set\n\n def __str__(self) -> Text:\n return f\"{self.__class__.__name__}({self.name}: {self.value})\"\n\n def __repr__(self) -> Text:\n return f\"<{self.__class__.__name__}({self.name}: {self.value})>\"\n\n @staticmethod\n def resolve_by_type(type_name) -> Type[\"Slot\"]:\n \"\"\"Returns a slots class by its type name.\"\"\"\n for cls in wechatter.shared.utils.common.all_subclasses(Slot):\n if cls.type_name == type_name:\n return cls\n try:\n return wechatter.shared.utils.common.class_from_module_path(type_name)\n except (ImportError, AttributeError):\n raise InvalidSlotTypeException(\n f\"Failed to find slot type, '{type_name}' is neither a known type nor \"\n f\"user-defined. If you are creating your own slot type, make \"\n f\"sure its module path is correct. \"\n f\"You can find all build in types at {DOCS_URL_SLOTS}\"\n )\n\n def persistence_info(self) -> Dict[str, Any]:\n return {\n \"type\": wechatter.shared.utils.common.module_path_from_instance(self),\n \"initial_value\": self.initial_value,\n \"auto_fill\": self.auto_fill,\n \"influence_conversation\": self.influence_conversation,\n }\n\n\nclass FloatSlot:\n \"\"\"\n 数字类型slot\n \"\"\"\n type_name = \"float\"\n\n def __init__(\n self,\n name: Text,\n initial_value: Optional[float] = None,\n value_reset_delay: Optional[int] = None,\n auto_fill: bool = True,\n max_value: float = 1.0,\n min_value: float = 0.0,\n influence_conversation: bool = True,\n ) -> None:\n super().__init__(\n name,\n initial_value,\n value_reset_delay,\n auto_fill,\n influence_conversation\n )\n self.max_value = max_value\n self.min_value = min_value\n\n if min_value >= max_value:\n raise InvalidSlotConfigError(\n \"Float slot ('{}') created with an invalid range \"\n \"using min ({}) and max ({}) values. Make sure \"\n \"min is smaller than max.\"\n \"\".format(self.name, self.min_value, self.max_value)\n )\n\n if initial_value is not None and not (min_value <= initial_value <= max_value):\n wechatter.shared.utils.io.raise_warning(\n f\"Float slot ('{self.name}') created with an initial value \"\n f\"{self.value}. This value is outside of the configured min \"\n f\"({self.min_value}) and max ({self.max_value}) values.\"\n )\n\n def _as_feature(self) -> List[float]:\n try:\n capped_value = max(self.min_value, min(self.max_value, float(self.value)))\n if abs(self.max_value - self.min_value) > 0:\n covered_range = abs(self.max_value - self.min_value)\n else:\n covered_range = 1\n return [1.0, (capped_value - self.min_value) / covered_range]\n except (TypeError, ValueError):\n return [0.0, 0.0]\n\n def persistence_info(self) -> Dict[Text, Any]:\n \"\"\"Returns relevant information to persist this slot.\"\"\"\n d = super().persistence_info()\n d[\"max_value\"] = self.max_value\n d[\"min_value\"] = self.min_value\n return d\n\n def _feature_dimensionality(self) -> int:\n return len(self.as_feature())\n\n\nclass BooleanSlot(Slot):\n type_name = \"bool\"\n\n def _as_feature(self) -> List[float]:\n try:\n if self.value is not None:\n return [1.0, float(bool_from_any(self.value))]\n else:\n return [0.0, 0.0]\n except (TypeError, ValueError):\n # we couldn't convert the value to float - using default value\n return [0.0, 0.0]\n\n def _feature_dimensionality(self) -> int:\n return len(self.as_feature())\n\n\nclass AnySlot:\n \"\"\"\n\n \"\"\"\n pass\n\n\nclass TextSlot(Slot):\n \"\"\"\n\n \"\"\"\n type_name = \"text\"\n\n def _as_feature(self) -> List[float]:\n return [1.0 if self.value is not None else 0.0]\n\n\nclass ListSlot(Slot):\n \"\"\"\n\n \"\"\"\n type_name = \"list\"\n\n def _as_feature(self) -> List[float]:\n try:\n if self.value is not None and len(self.value) > 0:\n return [1.0]\n else:\n return [0.0]\n except (TypeError, ValueError):\n # we couldn't convert the value to a list - using default value\n return [0.0]\n\n\nclass CategoricalSlot:\n \"\"\"\n\n \"\"\"\n\n\ndef bool_from_any(x: Any) -> bool:\n \"\"\" Converts bool/float/int/str to bool or raises error \"\"\"\n\n if isinstance(x, bool):\n return x\n elif isinstance(x, (float, int)):\n return x == 1.0\n elif isinstance(x, str):\n if x.isnumeric():\n return float(x) == 1.0\n elif x.strip().lower() == \"true\":\n return True\n elif x.strip().lower() == \"false\":\n return False\n else:\n raise ValueError(\"Cannot convert string to bool\")\n else:\n raise TypeError(\"Cannot convert to bool\")\n","sub_path":"wechatter/shared/dm/slots.py","file_name":"slots.py","file_ext":"py","file_size_in_byte":9422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284734245","text":"# -*- coding: utf-8 -*-\r\nimport telebot\r\nfrom telebot import types\r\nimport time\r\nimport socket\r\nimport socks\r\nip = '45.77.139.146'\r\nport = 30762\r\nsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, ip, port)\r\nsocket.socket = socks.socksocket\r\n\r\nbot = telebot.TeleBot(token)\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef inline(message):\r\n key = types.InlineKeyboardMarkup()\r\n But_Y = types.InlineKeyboardButton(text=\"Да\", callback_data=\"Да\")\r\n But_N = types.InlineKeyboardButton(text=\"Нет\", callback_data=\"Нет\")\r\n But_W = types.InlineKeyboardButton(text=\"Warning\", callback_data=\"Нет\")\r\n key.add(But_Y, But_N,But_W)\r\n bot.send_message(message.chat.id, \"Я прогер?\", reply_markup=key)\r\n #bot.send_photo(message.chat.id, 'https://ibb.co/2WY9FnK');\r\n #bot.download_file(message.photo)\r\n\r\n@bot.message_handler(content_types=['text', 'document', 'audio'])\r\ndef get_text_messages(message):\r\n if message.text == \"Привет\":\r\n bot.send_message(message.from_user.id, \"Привет, чем я могу тебе помочь?\")\r\n elif message.text == \"/help\":\r\n bot.send_message(message.from_user.id, \"Напиши привет\")\r\n else:\r\n bobot.send_message(message.from_user.id, \"Я тебя не понимаю. Напиши /help.\")\r\n\r\n@bot.callback_query_handler(func=lambda Answer:True)\r\ndef inlin(Answer):\r\n if Answer.data == \"Да\":\r\n bot.send_message(Answer.message.chat.id, \"Правильный ответ!\")\r\n if Answer.data == \"Нет\":\r\n bot.send_message(Answer.message.chat.id, \"Я ПРОГЕР!!!!!!!!!111!!!1!!!!\")\r\n\r\n\r\n@bot.message_handler(content_types=['photo'])\r\ndef photo(message):\r\n print('message.photo =', message.photo)\r\n fileID = message.photo[-1].file_id\r\n print('fileID =', fileID)\r\n file_info = bot.get_file(fileID)\r\n print('file.file_path =', file_info.file_path)\r\n downloaded_file = bot.download_file(file_info.file_path)\r\n\r\n with open(\"image.jpg\", 'wb') as new_file:\r\n new_file.write(downloaded_file)\r\n\r\nwhile True:\r\n try:\r\n bot.infinity_polling(True)\r\n except Exception as E:\r\n print(E.args)\r\n time.sleep(3)\r\n\r\n","sub_path":"bottelegram2.py","file_name":"bottelegram2.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"139354359","text":"\n'''\n* 변수 (variable)\n\n1. 변수는 데이터를 저장하기 위한 공간에 이름을 붙인 것.\n2. 하나의 변수에는 하나의 데이터만 저장 가능\n3. 언제든지 변경도 가능합니다.\n\n'''\n\n# 파이썬은 타입을 작성하지 않습니다.\n# 타입이 없는 게 아니라 동적 타이핑 언어입니다.\nnum = 10 + 9\nprint(type(num))\n\nnum = 50 + 40 # 변수의 값 변경도 그냥 선언과 동일합니다.\nprint(num)\n\n# 선언되지 않은 변수는 사용이 불가능합니다.\n# print(result) (x)\n\napple = '사과'","sub_path":"Day01/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103336205","text":"\"\"\"\n1. Bankomat\nLåt användaren mata in ett uttagsbelopp, och beräkna hur många 100-lappar, 200-lappar och 500-\nlappar bankomaten ska mata ut. Visa resultatet i terminalen.\n\"\"\"\n\n\ndef split_money(amount):\n\n if amount % 100 != 0:\n return \"Error\"\n\n fivehundred = amount // 500\n amount -= fivehundred * 500\n twohundred = amount // 200\n amount -= twohundred * 200\n hundred = amount // 100\n amount -= hundred * 100\n\n return [fivehundred, twohundred, hundred]\n\n\nwithdrawal = int(input(\"Hur mycket pengar vill vill du ha: \"))\n\ncash = split_money(withdrawal)\n\nprint(\"500: \" + str(cash[0]) + \"\\n200: \" + str(cash[1]) + \"\\n100: \" + str(cash[2]))","sub_path":"Workshop 1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39664661","text":"import math\nimport random\n\nfrom pareto import cull\n\n\nclass ParetoReproduction(object):\n \"\"\"\n Implements the Pareto form of reproduction:\n Remove stagnant species, then get the pareto front and carry that forward, then for the remaining space in the\n population use the NEAT-python reproduction scheme: explicit fitness sharing with fixed-time species stagnation.\n\n Note: the pareto front is limited to be at most half of the next population\n \"\"\"\n\n def __init__(self, config, reporters, genome_indexer):\n params = config.get_type_config(self)\n self.survival_threshold = float(params.get('survival_threshold'))\n\n self.reporters = reporters\n self.genome_indexer = genome_indexer\n self.stagnation = config.stagnation_type(config, reporters)\n\n self.results_matrix = None\n\n def set_results_matrix(self, matrix):\n self.results_matrix = matrix\n\n def reproduce(self, species, pop_size):\n # Filter out stagnated species and collect the set of non-stagnated species members.\n remaining_species = {}\n species_fitness = []\n avg_adjusted_fitness = 0.0\n for s, stagnant in self.stagnation.update(species):\n if stagnant:\n self.reporters.species_stagnant(s)\n else:\n remaining_species[s.ID] = s\n\n # Compute adjusted fitness.\n species_sum = 0.0\n for m in s.members:\n af = m.fitness / len(s.members)\n species_sum += af\n\n sfitness = species_sum / len(s.members)\n species_fitness.append((s, sfitness))\n avg_adjusted_fitness += sfitness\n\n # No species left.\n if not remaining_species:\n return [], []\n\n avg_adjusted_fitness /= len(species_fitness)\n self.reporters.info(\"Average adjusted fitness: {:.3f}\".format(avg_adjusted_fitness))\n\n # Compute the number of new individuals to create for the new generation.\n spawn_amounts = []\n for s, sfitness in species_fitness:\n spawn = len(s.members)\n\n if sfitness > avg_adjusted_fitness:\n spawn *= 1.1\n else:\n spawn *= 0.9\n spawn_amounts.append(spawn)\n\n # Normalize the spawn amounts so that the next generation is roughly\n # the population size requested by the user.\n total_spawn = sum(spawn_amounts)\n norm = pop_size / total_spawn\n spawn_amounts = [int(round(n * norm)) for n in spawn_amounts]\n self.reporters.info(\"Spawn amounts: {0}\".format(spawn_amounts))\n self.reporters.info('Species fitness : {0!r}'.format([sfitness for s, sfitness in species_fitness]))\n\n new_population = []\n new_species = []\n\n # Compute the pareto front, to be carried forward into the next population\n genomes = []\n for s in species:\n genomes.extend(s.members)\n\n pareto_front = self.get_limited_pareto_front(genomes, pop_size / 2)\n\n ids = [genome.ID for genome in pareto_front]\n self.reporters.info(\"Pareto front: {0}\".format(ids))\n\n for spawn, (s, sfitness) in zip(spawn_amounts, species_fitness):\n if spawn <= 0:\n continue\n\n # The species has at least one member for the next generation, so retain it.\n old_members = s.members\n s.members = []\n new_species.append(s)\n\n # Add the corresponding element from the pareto front if it is in the species\n for i, genome in enumerate(pareto_front):\n if genome in old_members:\n new_population.append(genome)\n spawn -= 1\n\n # If the pareto front has used all of the species slots, continue\n if spawn <= 0:\n continue\n\n # Sort members in order of descending fitness.\n old_members.sort(reverse=True)\n\n # Only use the survival threshold fraction to use as parents for the next generation.\n repro_cutoff = int(math.ceil(self.survival_threshold * len(old_members)))\n # Use at least two parents no matter what the threshold fraction result is.\n repro_cutoff = max(repro_cutoff, 2)\n old_members = old_members[:repro_cutoff]\n\n # Randomly choose parents and produce the number of offspring allotted to the species.\n while spawn > 0:\n spawn -= 1\n\n parent1 = random.choice(old_members)\n parent2 = random.choice(old_members)\n\n # Note that if the parents are not distinct, crossover will produce a\n # genetically identical clone of the parent (but with a different ID).\n child = parent1.crossover(parent2, self.genome_indexer.get_next())\n new_population.append(child.mutate())\n\n # Sort species by ID (purely for ease of reading the reported list).\n new_species.sort(key=lambda sp: sp.ID)\n\n # Clear the results matrix to prevent sing it accidentally next turn\n self.results_matrix = []\n\n return new_species, new_population\n\n def get_pareto_front(self, genomes):\n \"\"\"\n Get the entire pareto front of the genomes, given that self.results_matrix has been set correctly\n Args:\n genomes: The NEAT genomes that have been evaluated\n\n Returns:\n The list of genomes in the pareto front for that runthrough\n \"\"\"\n sorted_genomes = sorted(genomes, key=lambda genome: genome.ID)\n\n results = [i for i in self.results_matrix.itervalues()]\n\n # Get the rows of the results matrix that are not pareto dominated\n front = cull(results)\n\n # Get the corresponding indices of these points in the results matrix, as it corresponds to their index\n # in sorted_genomes\n indices = []\n for point in front:\n indices = [i for i, x in enumerate(results) if x == point]\n\n # Now get the corresponding genomes of the pareto front\n pareto_front_genomes = []\n\n for results_index in indices:\n pareto_front_genomes.append(sorted_genomes[results_index])\n\n return pareto_front_genomes\n\n def get_limited_pareto_front(self, genomes, max_front_size):\n \"\"\"\n Get the limited pareto front of the genomes, given that self.results_matrix has been set correctly\n The pareto front is limited by removing those of low domination (i.e. dominate in fewer dimensions)\n Args:\n max_front_size: The maximum allowed size of the front to return\n genomes: The NEAT genomes that have been evaluated\n\n Returns:\n The list of genomes in the pareto front for that runthrough\n \"\"\"\n sorted_genomes = sorted(genomes, key=lambda genome: genome.ID)\n\n results = [i for i in self.results_matrix.itervalues()]\n\n # Get the rows of the results matrix that are not pareto dominated\n front = cull(results)\n\n # must beat the others on at least 1 dimension at first\n min_dim = 1\n\n if len(front) > max_front_size:\n min_dims = [] # track the minimum dimensions for each element of the front\n new_front = [] # track the new limited front\n\n # Get the minimum number of dimensions that each point in front is dominant by, to limit the front\n # Also remove duplicates, as an early step (as will be dominant by 0 dimensions)\n for points in front:\n min_dim_diff = 0\n\n # If there is more than one occurrence of the element of the point in the front, then don't bother\n if results.count(points) > 1:\n continue\n\n # get the results excluding the point we are testing\n # using .index is fine here as there will be only one occurrence of the points now\n results_excl_point = [x for i, x in enumerate(front) if i != results.index(points)]\n\n # find the minimum dimension difference\n for other in results_excl_point:\n diff = sum([1 for i in xrange(len(other)) if points[i] == other[i]])\n min_dim_diff = min(min_dim_diff, diff)\n\n # set the array elements as appropriate\n min_dims.append(min_dim_diff)\n new_front.append(results)\n\n # we need to store the front when it has no duplicates, as this corresponds to the min_dim values\n # ie no_dup_front[2] has min dimension difference of min_dims[2]\n no_dup_front = new_front\n\n # loop through, increasing the number of dimensions of difference needed until we have reached\n # enough of a difference that the front will be sufficiently small\n while len(new_front) > max_front_size:\n new_front = []\n\n for i in xrange(len(no_dup_front)):\n # if this is within our limits, then thats fine\n if min_dims[i] >= min_dim:\n new_front.append(results)\n\n min_dim += 1\n front = new_front\n\n # Get the corresponding indices of these points in the results matrix, as it corresponds to their index\n # in sorted_genomes\n # (There may be multiple instances of same point in results, so get all of them)\n indices = set()\n\n for point in front:\n indices |= set([i for i, x in enumerate(results) if x == point])\n\n # Now get the corresponding genomes of the pareto front\n pareto_front_genomes = []\n\n for results_index in indices:\n pareto_front_genomes.append(sorted_genomes[results_index])\n\n return pareto_front_genomes\n","sub_path":"training/pareto_reproduction.py","file_name":"pareto_reproduction.py","file_ext":"py","file_size_in_byte":9872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"68337328","text":"\"\"\"Task_01\nUsage:\ntask_01 --text=CSV --save=SAVE --bertserver=ADDR\ntask_01 --vecs=SAVE --cosine\ntask_01 --vecs=SAVE --bm25 [--testrun=FILE]\ntask_01 --vecs=SAVE --logistic --logon=METHOD [--savelog=SAVE --loadlog=SAVE]\ntask_01 --vecs=SAVE --svm --svmon=METHOD [--savesvm=SAVE --loadsvm=SAVE]\ntask_01 --vecs=SAVE --bert --bertdir=DIR\n\nOptions:\n -h Show this screen\n --text=CSV Read texts from a CSV file, Prepare the vector representations, Save the ColieeVectorizer on disk\n --vecs=SAVE Read a ColieeVectorizer from a SAVE file, Use the representations to run some Rankers\n --save=FILE Use FILE to save the ColieeVectorizer\n --bertserver=ADDR Address of the Bert Server [default: localhost]\n --cosine Run the cosine model\n --logistic Run the Logistic Regression for Classification\n --logon=METHOD Which text representation to use [default: lsa]\n --savelog=SAVE Save the Logistic Regression model to disk\n --loadlog=SAVE Load the Logistic Regression model from disk\n --svm Run a SVM classification\n --svmon=METHOD Which text representation to use [default: lsa]\n --savesvm=SAVE Save the Logistic Regression model to disk\n --loadsvm=SAVE Load the Logistic Regression model from disk\n\"\"\"\nimport logging\nimport sys\n\nfrom basics import ColieeData, ColieeVectorizer, RankingBy, RankingEvaluation\nfrom baseline import CosineSimilarityRanker, LogisticRegressionRanker, SVMRanker, BM25Ranker\n\nfrom docopt import docopt\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)-8s %(name)-30s %(levelname)-8s %(message)s',\n datefmt='%H:%M')\n logger = logging.getLogger('COLIEE_Task_01')\n args = docopt(__doc__, version='COLIEE v1.0')\n\n if args['--text'] is not None:\n # Read data\n input_file = args['--text']\n logger.info('Reading Data from file {}'.format(input_file))\n data = ColieeData(input_file)\n\n # Create all vectors\n logger.info('Creating Vectorizer')\n vectorizer = ColieeVectorizer(data=data)\n vectorizer.parameters['bert_server'] = args['--bertserver']\n vectorizer.prepare_all(savetofile=args['--save'])\n\n if args['--vecs'] is not None:\n # Read the ColieeVectorizer from file\n input_file = args['--vecs']\n logger.info('Reading ColieeVectorizer from file {}'.format(input_file))\n vectorizer = ColieeVectorizer.from_file(input_file)\n\n ranker = None\n methods = 'all'\n\n # Run the models\n if args['--cosine'] is True:\n logger.info('Baseline : Cosine Similarity')\n cosine = CosineSimilarityRanker()\n ranker = cosine\n methods = ['bow', 'tfidf', 'lsi', 'bert']\n\n if args['--bm25'] is True:\n logger.info('Baseline : BM25')\n bm25 = BM25Ranker()\n ranker = bm25\n methods = ['raw']\n if args['--testrun'] is not None:\n scores = RankingBy.rank_vectorizer(vectorizer=vectorizer, ranker=bm25, methods=methods)\n data = vectorizer.data.data[['case_id', 'candidate_id']]\n scores['raw'][['case_id', 'candidate_id', 'score']].to_csv(args['--testrun'], index=False)\n sys.exit(0)\n\n if args['--logistic'] is True:\n logger.info('Baseline : Logistic Regression')\n if args['--loadlog'] is not None:\n # Load from a saved model\n logistic = LogisticRegressionRanker.from_file(args['--loadlog'])\n else:\n # Build a new model and save it\n logistic = LogisticRegressionRanker()\n logistic.train_model(X=ColieeData.stratify_input(vectorizer.vector_representations[args['--logon']]),\n Y=vectorizer.data.qrels.flatten())\n if args['--savelog'] is not None:\n logistic.to_file(args['--savelog'])\n ranker = logistic\n methods = args['--logon']\n\n if args['--svm'] is True:\n logger.info('Baseline : SVM')\n if args['--loadsvm'] is not None:\n # Load from a saved model\n svm = SVMRanker.from_file(args['--loadsvm'])\n else:\n # Build a new model and save it\n svm = SVMRanker()\n svm.train_model(\n X=ColieeData.stratify_input(vectorizer.vector_representations[args['--svmon']]),\n Y=vectorizer.data.qrels.flatten())\n if args['--savesvm'] is not None:\n svm.to_file(args['--savesvm'])\n ranker = svm\n methods = args['--svmon']\n\n baseline = RankingBy.evaluate_vectorizer(vectorizer=vectorizer, ranker=ranker, methods=methods)\n RankingEvaluation.print_results(baseline)\n","sub_path":"Task_01/task_01.py","file_name":"task_01.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571075820","text":"from hashlib import md5\nfrom pathlib import Path\nfrom time import time\nfrom typing import Optional\n\nimport toml\nfrom httpx import AsyncClient\nfrom loguru import logger\nfrom pydantic import parse_obj_as\n\nfrom . import config\nfrom .models import CodeboxProject, CodelabProject, Command, ProjectToRun, Response\nfrom .resources import redis\n\n\nasync def save_project(project: CodelabProject, ttl: Optional[int] = None, key: str = 'project:{}') -> None:\n '''\n Save project into Redis\n '''\n key = key.format(project.id)\n await redis.set(key, project.json(), ex=ttl)\n logger.info(f'Key: {key}, TTL: {ttl}s, {project}')\n return\n\n\ndef calc_id(proj: ProjectToRun) -> str:\n '''\n Calculates the project id based on the MD5 of its title, description, source code,\n language and inputs. The order matters.\n '''\n text = ''.join([proj.title, proj.description, proj.language, proj.sourcecode, str(proj.stdin)])\n return md5(text.encode()).hexdigest()\n\n\ndef codelab_to_codebox_project(project: ProjectToRun) -> CodeboxProject:\n language_mappings = {\n 'python': {\n 'filename': 'main.py',\n 'command': '/usr/local/bin/python main.py'\n },\n }\n mapping = language_mappings[project.language]\n sources = {\n mapping['filename']: project.sourcecode,\n }\n cmd = Command(command=mapping['command'], timeout=config.TIMEOUT, stdin=project.stdin)\n return CodeboxProject(sources=sources, commands=[cmd])\n\n\nasync def run_project_in_codebox(project: ProjectToRun) -> Response:\n '''\n Call Codebox to run the project\n '''\n start = time()\n codebox_project = codelab_to_codebox_project(project)\n async with AsyncClient() as client:\n result = await client.post(f'{config.CODEBOX_URL}/execute', json=codebox_project.dict())\n\n assert result.status_code == 200\n elapsed = time() - start\n logger.debug(f'Elapsed Time: {elapsed}s')\n responses = parse_obj_as(list[Response], result.json())\n return responses[0]\n\n\nasync def load_examples() -> list[CodelabProject]:\n '''\n Load examples into Redis.\n Examples/Samples are the toml files located at 'app/examples'.\n '''\n examples = []\n for example in (Path(__file__).parent / 'examples').glob('*.toml'):\n project = CodelabProject.parse_obj(toml.loads(example.read_text()))\n await save_project(project)\n examples.append(project)\n return examples\n","sub_path":"backend/app/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"521036228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/6/29 14:41\n# @Author : AsiHacker\n# @File : langdetect库实现.py\n# @Software: PyCharm\n# @notice : True masters always have the heart of an apprentice.\n\nfrom langdetect import DetectorFactory, detect_langs, detect\n\nDetectorFactory.seed = 9\n\ntest = \"\"\"\n你好啊,你是?\nhacker is not\n😄你🧯好🧯\n現在不方便接聽。\nIt is not convenient to answer the call now.\n今すぐ電話に出るのは不便です。\n지금 전화를받는 것은 편리하지 않습니다.\nअभी कॉल का उत्तर देना सुविधाजनक नहीं है।\nTidak nyaman untuk menjawab panggilan sekarang.\natau pihak berkuasa tempatan berkaitan larangan lawatan yang mungkin memberi kesan kepada anda\n\"\"\"\n\nif __name__ == '__main__':\n a = list(filter(lambda x: x != '', test.split('\\n')))\n for t in a:\n print(t)\n print(detect(t))\n # t0 = time.perf_counter()\n c = detect_langs(t)\n print(c)\n # print(time.perf_counter() - t0)\n","sub_path":"python笔记/aaa常用三方库笔记/语种识别/langdetect库实现.py","file_name":"langdetect库实现.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"36322404","text":"### -*- coding: UTF-8 -*-\n## 多线程发送摄像头数据\n## 2020--->2021.06 jack\nimport socketserver\nimport socket\nimport cv2\nimport numpy as np\nimport threading\nimport datetime\n\nIMAGESIZE = 921600\nIMAGEWIDTH = 1280\nIMAFEHEIGHT = 720\nFRAMELENGTH = 1024*2\n\n# 读取路径下的视频文件,以OpenCV打开\n# 当然也可以打开摄像头\n# 从摄像头采集图像\ncapture = cv2.VideoCapture(-1)\n#ret, frame = capture.read()\n# 创建服务器\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n# 获取到本机IP\ndef get_ip_address():\n s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n s.connect((\"1.1.1.1\",80))\n ipaddr=s.getsockname()[0]\n s.close()\n return ipaddr\n\n#define host ip: Rpi's IP\nHOST_IP=get_ip_address()\n#PCname = socket.gethostname()\n#IP = socket.gethostbyname(PCname)\nprint(HOST_IP)\n# 设置IP和端口号\nserver.bind((HOST_IP, 6800))\nserver.listen(1)\n\nprint('connecting...')\n\n\n# 多线程接收数据\n# socket接收为阻塞接收方式,阻断程序运行\n# 用多线程的方式来避免发生阻塞\nclass Receive_Thread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.return_value = b'0'\n\n def run(self):\n while True:\n # 每次接收一个字节的数据\n self.return_value = cmd = conn.recv(1)\n\n # 返回接收到的数据\n def get_value(self):\n return self.return_value\n\n\n\nwhile True:\n # 等待客户端连接\n # 阻塞方式,不连接不会执行下一步\n # conn为新创建的socket对象\n # 用于下边数据收发\n conn, addr = server.accept()\n print('收到请求')\n print('客户端地址', addr)\n # 创建数据接收线程\n rec_thread = Receive_Thread()\n rec_thread.start()\n picCount=0\n while True:\n # 读取下一帧\n ret, frame = capture.read()\n # 数据类型为uint8\n framed = cv2.resize(frame, (IMAGEWIDTH, IMAFEHEIGHT))\n #framed = cv2.cvtColor(framed, cv2.COLOR_BGR2RGB)\n has_sent = 0\n rec_data = rec_thread.get_value()\n # 打印接收到的控制指令\n if rec_data != b'0':\n print(rec_data)\n # 发送图片,每次发送1024字节\n while has_sent < IMAGESIZE*3:\n data_to_send = framed[has_sent: has_sent+FRAMELENGTH]\n conn.send(data_to_send)\n has_sent += FRAMELENGTH\n currentTime=datetime.datetime.now().strftime('%H:%M:%S')\n print(currentTime+\":\")\n print(\"has send %d\" %has_sent)\n picCount+=1 \n print(\"sent pic %d\" %picCount)\n cv2.waitKey(100)\n #cv2.imshow('RobotCam', framed)\n break\n\ncap.release()\n\n#到这里完成了TCPserver的部分,接下来在Qt中完成TCPClient的部分\n\n","sub_path":"robot/cam_serverNoshow_720p.py","file_name":"cam_serverNoshow_720p.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"206160779","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Objective: Parsing web links at different depths\n\n# import the required libraries\nimport requests\nfrom urllib.parse import urljoin\nfrom bs4 import BeautifulSoup\n\n# the website to scrape\nlink = \"https://www.courts.com.sg/sitemap\"\n\n# create beatiful soup object\nres = requests.get(link)\nsoup = BeautifulSoup(res.text, \"lxml\")\n\n# write the program logic\n# CAUTION: Executing the given logic will traverse the entire website and can take more than 2 minutes to complete.\nfor item in soup.select(\".nav-dropdown li a\"):\n if \"#\" in item.get(\"href\"):continue #kick out invalid links\n newlink = urljoin(link,item.get(\"href\"))\n req = requests.get(newlink)\n sauce = BeautifulSoup(req.text,\"lxml\")\n for elem in sauce.select(\".product-item-info .product-item-link\"):\n print(elem.get_text(strip=True))\n\n# As we can see from the above logic, there is no way to limit the `crawling-depth`. For instance, in `R`, the library `Rcrawler` provides 'MaxDepth' so the crawler will go within a certain number of links from the homepage within that domain.\n# `Rcrawler(Website = \"https://stackoverflow.com/\", no_cores = 4, no_conn = 4, ExtractCSSPat = c(\"div\"), ****MaxDepth=5****)`\n# \n# So the obvious question is, \"how to limit the crawling depth when using Beautiful Soup?\".\n\n# Answer: There is no function in BeautifulSoup because BeautifulSoup is not crawler.\n# It only parses string with HTML so you could search in HTML.\n# \n# There is no function in requests because requests is no crawler too.\n# It only reads data from server so you could use it with BeautifulSoup or similar.\n# \n# If you use BeautifulSoup and request then you have to do all on your own - you have to build crawling system from scratch.\n# \n# `Scrapy` is a real crawler (or rather framework to build spiders and crawl network).\n# And it has option `DEPTH_LIMIT`\n\n","sub_path":"scripts/python/scrp_init_courts_website_scraper.py","file_name":"scrp_init_courts_website_scraper.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22431270","text":"#!/bin/python3\n\n#####################################\n# Day 23: BST Level-Order Traversal #\n#####################################\n\nimport sys\nimport time\n\nclass Node:\n def __init__(self,data):\n self.right=self.left=None\n self.data = data\nclass Solution:\n def insert(self,root,data):\n if root==None:\n return Node(data)\n else:\n if data<=root.data:\n cur=self.insert(root.left,data)\n root.left=cur\n else:\n cur=self.insert(root.right,data)\n root.right=cur\n return root\n \n def levelOrder(self,root):\n queue = list()\n curr = root\n if root == None:\n return\n queue.insert(0, curr)\n while len(queue) != 0:\n curr = queue.pop()\n print(curr.data, end=\" \")\n if curr.left:\n queue.insert(0, curr.left)\n if curr.right:\n queue.insert(0, curr.right)\n\n\nT=int(input())\nmyTree=Solution()\nroot=None\nfor i in range(T):\n data=int(input())\n root=myTree.insert(root,data)\nmyTree.levelOrder(root)\n","sub_path":"Tutorials/30 Days of Code/30-binary-trees.py","file_name":"30-binary-trees.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"600066092","text":"from model import Base, Product\nfrom model import Base, Cart\n\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\nengine = create_engine('sqlite:///database.db')\nBase.metadata.create_all(engine)\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\ndef add_product(name, price, picture_link, description):\n\n\tproduct_object = Product(\n\t\tname = name,\n\t\tprice = price,\n\t\tpicture_link = picture_link,\n\t\tdescription = description)\n\tsession.add(product_object)\n\tsession.commit()\n\ndef edit_product(id, name, price, picture_link, description):\n\n\tproduct_object = session.query(\n\t\tProduct).filter_by(\n\t\tid=id).first()\n\tproduct_object.name = name\n\tproduct_object.price = price\n\tproduct_object.picture_link = picture_link\n\tproduct_object.description = description\n\tsession.add(product_object)\n\tsession.commit()\n\ndef delete_product(name):\n\tsession.query(Product).filter_by(\n\t\tname = name).delete()\n\tsession.commit()\n\ndef return_all_products():\n\tproducts = session.query(Product).all()\n\treturn products\n\ndef query_by_id(their_id):\n\tproduct = session.query(Product).filter_by(id = their_id).first()\n\treturn product\n\ndef Add_To_Cart(productID):\n\tadd_to_cart = Cart(\n\t\tproductID = productID)\n\tsession.add(add_to_cart)\n\tsession.commit()\n\n\n# add_product(\"Granny Smith\", 75, 'GrannySmith.jpg', \"S O U R city!\")\n# add_product(\"Red Delicious\", 50, 'RedDelicious.jpg', \"i like to eat this apple becasue it is red\")\n# add_product(\"Golden Delicious\", 25, 'GoldenDelicious.jpg', \"i dont like this apple becasue it is yellow\")","sub_path":"databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"508315789","text":"import chainer\nfrom chainer import training, optimizers, dataset, serializers, iterators, cuda\nimport chainer.links as L\nimport chainer.functions as F\nfrom sklearn import metrics\nfrom chainer.training import extensions\nfrom chainer.dataset import concat_examples\nfrom chainer.datasets import cifar, split_dataset_random, TransformDataset\nfrom chainercv.transforms import resize\nimport os\nimport shutil\nimport cv2\n\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nfrom PIL import Image\nimport matplotlib as mpl\nmpl.use('Agg')\nimport random\nimport numpy as np\nimport cupy as cp\nimport cnnData\n\n\nclass AlexNet(chainer.Chain):\n def __init__(self, num_class=5, train=True):\n super(AlexNet, self).__init__()\n with self.init_scope():\n self.conv1 = L.Convolution2D(None, 96, 11, stride=2)\n self.conv2 = L.Convolution2D(None, 256, 5, pad=2)\n self.conv3 = L.Convolution2D(None, 384, 3, pad=1)\n self.conv4 = L.Convolution2D(None, 384, 3, pad=1)\n self.conv5 = L.Convolution2D(None, 256, 3, pad=1)\n self.fc6 = L.Linear(None, 4096)\n self.fc7 = L.Linear(None, 4096)\n self.fc8 = L.Linear(None, num_class)\n\n def __call__(self, x):\n h = F.max_pooling_2d(F.local_response_normalization(\n F.relu(self.conv1(x))), 3, stride=2)\n h = F.max_pooling_2d(F.local_response_normalization(\n F.relu(self.conv2(h))), 3, stride=2)\n h = F.relu(self.conv3(h))\n h = F.relu(self.conv4(h))\n h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)\n h = F.dropout(F.relu(self.fc6(h)))\n h = F.dropout(F.relu(self.fc7(h)))\n h = self.fc8(h)\n\n return h\n\n\nclass CNN(chainer.Chain):\n\n def __init__(self, num_class=len(cnnData.FIXED_LABEL)):\n super(CNN, self).__init__()\n\n # パラメータを持つ層の登録\n with self.init_scope():\n self.conv1 = L.Convolution2D(None, 40, 5) # 40,5 # 92*92\n #self.bnorm1 = L.BatchNormalization(64)\n self.conv2 = L.Convolution2D(None, 40, 5) # 42*42\n self.bnorm2 = L.BatchNormalization(40) # 40\n # self.conv3 = L.Convolution2D(None, 128, 3) # 17*17\n #self.bnorm3 = L.BatchNormalization(64)\n self.l1 = L.Linear(None, 256) # 256\n self.l2 = L.Linear(None, num_class)\n\n def __call__(self, x):\n # データを受け取った際のforward計算を書く\n h = F.relu(self.conv1(x))\n h = F.max_pooling_2d(h, 2) # 46*46\n h = F.relu(self.bnorm2(self.conv2(h)))\n h = F.max_pooling_2d(h, 2) # 21*21\n h = F.dropout(F.relu(self.l1(h)))\n y = self.l2(h)\n return y\n\n\ndef makeDataset(test_data_num=1, train_dataset=None, valid_dataset=None, test_dataset=None, dtype=np.int32, train_rate=1.0):\n # 1. Dataset\n if train_dataset is None or test_dataset is None:\n train_dataset, valid_dataset, test_dataset = cnnData.crossDataset(\n test_data_num=test_data_num, train_rate=train_rate)\n\n train = chainer.datasets.LabeledImageDataset(\n train_dataset, label_dtype=dtype)\n\n test = chainer.datasets.LabeledImageDataset(\n test_dataset, label_dtype=dtype)\n train = TransformDataset(train, transform)\n test = TransformDataset(test, transform)\n\n if valid_dataset is not None:\n valid = chainer.datasets.LabeledImageDataset(\n valid_dataset, label_dtype=dtype)\n valid = TransformDataset(valid, transform)\n print('Create valid data')\n else:\n valid = None\n\n return train, valid, test\n\n\ndef train(model_name, batchsize=128, gpu_id=1, max_epoch=20, train_dataset=None, valid_dataset=None, test_dataset=None, base_lr=0.01):\n print(model_name)\n dtype = np.int32\n print(dtype)\n\n accuracies = []\n confmats = []\n\n for test_data_num in range(5):\n network_object = CNN()\n if gpu_id >= 0:\n network_object.to_gpu(gpu_id)\n\n # 1. dataset\n train, valid, test = makeDataset(\n test_data_num=test_data_num, dtype=dtype, train_rate=1)\n\n # 2. Iterator\n train_iter = iterators.MultiprocessIterator(\n train, batchsize, n_processes=5)\n # 一時的にvalidにテストデータを入れてるので後で戻す\n valid_iter = iterators.MultiprocessIterator(\n test, batchsize, False, False, n_processes=5)\n\n # 3. Model\n # ロス関数のデフォはsoftmax_cross_entropy 変更するならlossfuncをいじる\n net = L.Classifier(network_object, lossfun=F.softmax_cross_entropy)\n\n # 4. Optimizer\n # optimizer = optimizers.MomentumSGD(lr=base_lr).setup(net)\n optimizer = optimizers.Adam().setup(net)\n # optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005)) # 追加処理\n\n # 5. Updater\n updater = training.StandardUpdater(\n train_iter, optimizer, device=gpu_id)\n\n # 6. Trainer\n trainer = training.Trainer(\n updater, (max_epoch, 'epoch'), out='../result/NN/%s/' % model_name)\n\n # 7. Trainer extensions\n trainer.extend(extensions.dump_graph(\n root_name=\"main/loss\", out_name=\"cg.dot\"))\n trainer.extend(extensions.observe_lr())\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.ProgressBar())\n trainer.extend(extensions.Evaluator(\n valid_iter, net, device=gpu_id), name='val')\n trainer.extend(extensions.PrintReport(\n ['epoch', 'main/loss', 'main/accuracy', 'val/main/loss', 'val/main/accuracy', 'elapsed_time', 'lr']))\n trainer.extend(extensions.PlotReport(\n ['main/loss', 'val/main/loss'], x_key='epoch', file_name='loss.png'))\n trainer.extend(extensions.PlotReport(\n ['main/accuracy', 'val/main/accuracy'], x_key='epoch', file_name='accuracy.png'))\n\n trainer.run()\n del trainer\n\n # 8. Evaluation\n test_iter = iterators.MultiprocessIterator(\n test, batchsize, False, False, n_processes=10)\n test_evaluator = extensions.Evaluator(test_iter, net, device=gpu_id)\n results = test_evaluator()\n\n serializers.save_npz('../result/NN/%s/%s%s.model' %\n (model_name, model_name, str(test_data_num).zfill(2)), net)\n\n accuracies.append(cuda.to_cpu(results['main/accuracy']))\n confmat = confusionMatrix(\n net, test, gpu_id=gpu_id)\n confmats.append(confmat)\n\n print('Summary')\n for i in range(len(accuracies)):\n print('Testdata{}:{:.04f}' .format(i, accuracies[i]))\n print('Accuracy average:{:.04f}'.format(np.mean(accuracies)))\n print('Confusion matrix')\n print(np.sum(confmats, axis=0))\n\n\ndef confusionMatrix(net, test, batchsize=128, gpu_id=0):\n '''\n output_dir_root = '../result/verification/'\n output_dir = ['smile_laugh/', 'laugh_smile/',\n 'standard_thinking/', 'thinking_standard/']\n for path in output_dir:\n if os.path.exists(output_dir_root + path):\n print('Deleting ' + output_dir_root + path)\n shutil.rmtree(output_dir_root + path)\n os.makedirs(output_dir_root + path)\n print('Created folders')\n '''\n\n if gpu_id >= 0:\n net.to_gpu(gpu_id)\n y_test = [] # 予測結果\n t_test = [] # ラベル\n\n hoge, t_temp = zip(*test)\n # print(path[0:5])\n # print(t_temp[0:10])\n\n #test = chainer.datasets.LabeledImageDataset(test)\n #test = TransformDataset(test, transform)\n\n # Calc Confusion matrix\n test_iter = iterators.MultiprocessIterator(test, batchsize, False, False)\n while True:\n test_batch = test_iter.next()\n x_test, t_temp = concat_examples(test_batch, gpu_id)\n t_test.extend(t_temp)\n\n # テストデータをforward\n with chainer.using_config('train', False), \\\n chainer.using_config('enable_backprop', False):\n hoge = net.predictor(x_test)\n hoge = hoge.data\n hoge = cp.argmax(cp.asarray(hoge), axis=1)\n y_test.extend(hoge)\n if test_iter.is_new_epoch:\n test_iter.reset()\n break\n t_test = chainer.cuda.to_cpu(t_test)\n y_test = chainer.cuda.to_cpu(y_test)\n t_test = np.asarray(t_test)\n y_test = np.asarray(y_test)\n\n '''\n img_len =[0,0,0,0]\n #間違って予測した画像の書き出し\n for i in range(len(y_test)):\n if t_test[i] == 0 and y_test[i] == 1: #ラベルではsmileだが予測ではlaugh\n img = cv2.imread(path[i])\n cv2.imwrite(output_dir_root +\n output_dir[0] + '%s.jpg' % str(img_len[0]).zfill(6), img)\n img_len[0] += 1 \n if t_test[i] == 1 and y_test[i] == 0: # ラベルではlaughだが予測ではsmile\n img = cv2.imread(path[i])\n cv2.imwrite(output_dir_root +\n output_dir[1] + '%s.jpg' % str(img_len[1]).zfill(6), img)\n img_len[1] += 1 \n if t_test[i] == 3 and y_test[i] == 2: #ラベルではstandardだが予測ではthinking\n img = cv2.imread(path[i])\n cv2.imwrite(output_dir_root +\n output_dir[2] + '%s.jpg' % str(img_len[2]).zfill(6), img)\n img_len[2] += 1\n if t_test[i] == 2 and y_test[i] == 3: # ラベルではthinkingだが予測ではstandard\n img = cv2.imread(path[i])\n cv2.imwrite(output_dir_root +\n output_dir[3] + '%s.jpg' % str(img_len[3]).zfill(6), img)\n img_len[3] += 1\n '''\n\n print(y_test.shape)\n print(t_test.shape)\n confmat = metrics.confusion_matrix(t_test, y_test)\n print('Confusion matrix')\n print(confmat)\n\n return confmat\n\n\ndef transform(input):\n '''\n img, label = input\n img = img / 255.0\n #label = label.reshape(-1)\n return img, label\n '''\n img, label = input\n img = img / 255.0\n img = np.asarray(img, dtype=np.float32)\n img = img[0]\n img = img.reshape(1, 96, 96)\n #label = label.reshape(-1)\n return img, label\n\n\nif __name__ == '__main__':\n # cnnData.standardCheck()\n\n model_name = 'CNN_normal'\n train(batchsize=128, model_name=model_name, max_epoch=10,\n gpu_id=1, base_lr=0.01)\n","sub_path":"script/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":10313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433007483","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 1 22:20:17 2021\n\n@author: pepermatt94\n\"\"\"\n\nimport tkinter as tk\nimport pandas as pd\nfrom PIL import Image, ImageTk\nfrom tkinter.font import Font\nimport tkinter.scrolledtext as tkscrolled\nimport webbrowser\nimport os\nimport glob\nfrom tkinter import filedialog\nfrom tkinter.ttk import Progressbar\nfrom tkscrolledframe import ScrolledFrame\nfrom treview import treview\nimport initizializers as init\nimport SearchInRepo as search\nimport ActOnRepo as act\nimport tkinter.ttk as ttk\nimport numpy as np\nABOUT = \"\"\"\n\nThis is a proptotype program for library management. \n\nThe author and the developer of the program claims only \nan honest regard on his own for his work and his time. \n\nEach develop, modification or, even more, saling or \ndistribution of this product is free in a \ngentle agreement with the author and the law.\n\nThe program is completely open source, \nand the source code of the program can be found on github at page \nhttps://github.com/peppermatt94/Bibliotkinter .\n\nThere is no lucrative or secondary aim in this project, \nbut only the hope that the knowledge\nof God and the Gospel expands. \n\nsignature of the author:\npepermatt94\npep.94@libero.it .\n\"\"\"\ninit.init()\n\ndef bar():\n global OpenWindow\n l4=tk.Label(OpenWindow,text='Loading...',fg='white',bg=a)\n lst4=('Calibri (Body)',10)\n l4.config(font=lst4)\n l4.place(x=18,y=210)\n \n import time\n \n r=0\n for i in range(100):\n progress['value']=r\n OpenWindow.update_idletasks()\n time.sleep(0.03)\n r=r+1\n \n OpenWindow.destroy()\n \n Start_init(init.filenameREPO, init.filenameLOAN, init.variableString, init.credentials, REMOTE = init.REMOTE)\n \n \ndef about():\n aboutWin = tk.Toplevel()\n aboutWin.iconbitmap('Seminario_RM.ico')\n label2 = tk.Label(aboutWin, text = ABOUT).pack()\n\n \n\ndef Start_init(filenameREPO, filenameLOAN, variableString, credentials, REMOTE =\"No\"): \n global dataframe\n global dataframeLoan\n global window\n \n \n \n if REMOTE == \"Yes\":\n if os.path.exist(\"Z:\\\\RMbiblio\"): os.makedirs(\"Z:\\\\RMbiblio\")\n Workdir = \"Z:\\\\RMbiblio\\\\\"\n else:\n Workdir = os.getcwd() +\"\\\\\"\n \n if not os.path.exists(Workdir+\".setup\\\\credentials.txt\"): \n init.initializerPWD(credentials,init.initializerREPO, new_win)\n \n #if not os.path.exists(Workdir+\".setup\\\\parameter.txt\"):\n # init.initializerREPO(filenameREPO, filenameLOAN)\n else:\n try: \n with open(Workdir+\".setup\\\\parameter.txt\", \"r\") as f:\n files = f.read()\n \n files = files.splitlines()\n if len(files)==1:\n \n \n files[0] = files[0].split(\",\")\n files = files[0]\n else:\n RepositorySelected = 0\n selector = RepositorySelected\n for i in range(len(files)):\n \n files[i]= files[i].split(\",\")\n files = files[RepositorySelected]\n \n \n init.filenameREPO = files[0]\n init.filenameLOAN = files[1]\n \n init.SpecialVar[\"Title\"] = files[2]\n init.SpecialVar[\"Author\"] = files[3]\n init.SpecialVar[\"Position\"] = files[4]\n #New variables\n \n \n \n init.dataframe=pd.read_csv(init.filenameREPO, sep = \";\", encoding = \"latin1\")\n init.dataframe = init.dataframe.replace(np.nan, '', regex=True)\n init.dataframe = init.dataframe.astype(str)\n init.dataframeLoan = pd.read_csv(init.filenameLOAN, sep = \";\", encoding = \"latin1\")\n init.dataframeLoan = init.dataframeLoan.replace(np.nan, '', regex=True)\n init.dataframeLoan = init.dataframeLoan.astype(str) \n except:\n init.initializerREPO(init.filenameREPO, init.filenameLOAN, new_win)\n\n \n new_win(init.dataframe, init.dataframeLoan, init.filenameREPO, init.credentials, init.variableString, init.window)\n\n\n \ndef new_win(dataframe, dataframeLoan, filenameREPO, credentials, variableString, PrecedentWindow):\n \n global window\n if PrecedentWindow != None:\n PrecedentWindow.destroy()\n window = tk.Tk()\n #New variables\n #breakpoint()\n init.window=window\n for i in dataframe.columns:\n variableString[f\"{i}\"] = tk.StringVar()\n #user and password variables\n credentials[\"USER\"] = tk.StringVar()\n credentials[\"PASSWORD\"] = tk.StringVar()\n window.title(\"RMbiblio0.1\")\n window.iconbitmap('Seminario_RM.ico')\n #window.geometry(\"1000x100\")\n window.grid_columnconfigure(0,weight=1)\n Volto_di_Cristo= Image.open(\"Volto.jpg\")\n Volto_di_Cristo = ImageTk.PhotoImage(Volto_di_Cristo)\n VoltoImage = tk.Label(image=Volto_di_Cristo).grid(row=0, column=4, columnspan =2, rowspan = 6)\n \n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n x_coordinate = (screen_width/2)-(width_of_window/2)\n y_coordinate = (screen_height/2)-(height_of_window/2) \n \n #let's make the window responsive\n n_rows =10\n n_columns =6\n for i in range(n_rows):\n window.grid_rowconfigure(i, weight =1)\n for i in range(n_columns):\n window.grid_columnconfigure(i, weight =1)\n #requests fields of the GUI\n LabelField={}\n RequestsFields={}\n \n for i in range(len(dataframe.columns)):\n if dataframe.columns[i] != \"Available\":\n if i < 5:\n LabelField[f\"{dataframe.columns[i]}\"] = tk.Label(window, text = f\"{dataframe.columns[i]}\").grid(row=i, column=0) \n RequestsFields[f\"{dataframe.columns[i]}_entry\"] = tk.Entry( textvariable = variableString[f\"{dataframe.columns[i]}\"]).grid(row=i, column=1, sticky = 'WE', padx=10, pady=0)\n if i>4 and i<10:\n LabelField[f\"{dataframe.columns[i]}\"] = tk.Label(window, text = f\"{dataframe.columns[i]}\").grid(row=i-5, column=2) \n RequestsFields[f\"{dataframe.columns[i]}_entry\"] = tk.Entry( textvariable = variableString[f\"{dataframe.columns[i]}\"]).grid(row=i-5, column=3, sticky = 'WE', padx=10, pady=0)\n \n \n container = ttk.Frame(window)\n canvas = tk.Canvas(container, width=1200)\n scrollbar = ttk.Scrollbar(container,orient=\"vertical\")#.grid(row=0, column=20, rowspan = 20, sticky=tk.S + tk.E + tk.N)\n scrollbar.config( command = canvas.yview )\n #scrollbar.grid(row=0, column=20, rowspan = 20, sticky=tk.S + tk.E + tk.N)\n \n scrollable_frame = ttk.Frame(canvas)\n scrollable_frame.bind(\n \"\",\n lambda e: canvas.configure(\n scrollregion=canvas.bbox(\"all\")\n )\n ) \n\n canvas.create_window((10, 10), window=scrollable_frame, anchor=\"nw\")\n\n canvas.configure(yscrollcommand=scrollbar.set)\n \n #for i in range(50):\n # tk.Label(scrollable_frame, text=\"Sample scrolling label\").grid(row = i, column=10)\n \n # container.grid(row = 8, column=0)\n # canvas.grid(row = 8, column=0)\n # scrollbar.grid(row=0, column=20, rowspan = 20, sticky=tk.S + tk.E + tk.N)\n\n \n #button of the gui\n \n searchButton = tk.Button( text = \"Search\", command=lambda: search.search(dataframe, dataframeLoan, scrollable_frame, container, canvas, scrollbar), height = 2, width = 15).grid(row = 6,column=0)\n Addbutton = tk.Button(text = \"Add to repository\", command = lambda: init.openPWDrequest(act.add_book, credentials, init.dataframe, dataframeLoan, variableString, filenameREPO), height = 2, width = 15).grid(row=6,column=1)\n exploreButton = tk.Button(text = \"Explore Repository\", command =lambda : act.explore_repository(init.dataframe, variableString), height = 2, width = 15).grid(row=6,column=2)\n exploreButton = tk.Button(text = \"Explore Loan \\nRepository\", command =lambda: act.explore_loan_repository(init.dataframeLoan, init.StringOfRestitution), height = 2, width = 15).grid(row=6,column=3)\n onlineButton = tk.Button(text = \"Search online\", command =lambda: search.online_search(variableString), height = 2, width = 15).grid(row=6,column=4)\n #eliminateButton = tk.Button(text = \"Delete Book\", command = search_book).grid(row=5,column=2)\n #loanButton= tk.Button(text = \"Loan\", command = search_book).grid(row=6,column=2)\n \n #fileMenu\n \n menu = tk.Menu(window)\n window.config(menu=menu)\n fileMenu = tk.Menu(menu)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n fileMenu.add_command(label=\"New Repository\", command =lambda: init.newRep(init.filenameREPO, init.filenameLOAN, new_win))\n fileMenu.add_command(label = \"Open Repository\", command = lambda: init.openRep(window, new_win))\n fileMenu.add_command(label = \"New Credentials\", command = init.newCredential)\n \n editMenu = tk.Menu(menu)\n menu.add_cascade(label=\"Option\", menu=editMenu)\n editMenu.add_command(label=\"Delete Book\", command =lambda: act.eliminateBook(init.dataframe,filenameREPO, variableString))\n editMenu.add_command(label=\"Loan book\",command = lambda: act.loan_id_insert(init.dataframe, init.dataframeLoan, variableString,filenameREPO, init.filenameLOAN))\n editMenu.add_command(label=\"Restitution\", command =lambda: act.restitution_win(dataframeLoan, dataframe, filenameREPO, init.filenameLOAN, init.StringOfRestitution))\n editMenu.add_command(label=\"Explore repository\", command =lambda: act.explore_repository(dataframe, variableString))\n editMenu.add_command(label=\"Explore loan repository\", command =lambda: act.explore_loan_repository(dataframeLoan, variableString))\n \n \n aboutMenu = tk.Menu(menu)\n menu.add_cascade(label=\"?\", menu=aboutMenu)\n aboutMenu.add_command(label = \"About\", command = about)\n \n \n \n window.mainloop()\n\n\n\n#Starting window: \nOpenWindow=tk.Tk()\nwidth_of_window = 427\nheight_of_window = 250\nscreen_width = OpenWindow.winfo_screenwidth()\nscreen_height = OpenWindow.winfo_screenheight()\nx_coordinate = (screen_width/2)-(width_of_window/2)\ny_coordinate = (screen_height/2)-(height_of_window/2)\nOpenWindow.geometry(\"%dx%d+%d+%d\" %(width_of_window,height_of_window,x_coordinate,y_coordinate))\nVolto_di_Cristo= Image.open(\"Seminario_RM.ico\")\nVolto_di_Cristo = ImageTk.PhotoImage(Volto_di_Cristo)\nVoltoImage = tk.Label(OpenWindow,image=Volto_di_Cristo).pack()\n\nOpenWindow.overrideredirect(1)\ns = tk.ttk.Style()\ns.theme_use('clam')\ns.configure(\"red.Horizontal.TProgressbar\", foreground='red', background='#4f4f4f')\nprogress=Progressbar(OpenWindow,style=\"red.Horizontal.TProgressbar\",orient=tk.HORIZONTAL,length=500,mode='determinate',)\n\n#############progressbar 33333333333333333333333333333\n\nprogress.place(x=-10,y=235)\n\na='#249794'\ntk.Frame(OpenWindow,width=427,height=241,bg=a).place(x=0,y=0) #249794\nStart=tk.Button(OpenWindow,width=10,height=1,text='Get Started',command= bar ,border=0,fg=a,bg='white')\nStart.place(x=170,y=200)\n\n\n######## Label\n\nl1=tk.Label(OpenWindow,text='RMbiblio',fg='white',bg=a)\nlst1=('Calibri (Body)',18,'bold')\nl1.config(font=lst1)\nl1.place(x=50,y=80)\n\nl2=tk.Label(OpenWindow,text='0.1',fg='white',bg=a)\nlst2=('Calibri (Body)',18)\nl2.config(font=lst2)\nl2.place(x=155,y=82)\n\nl3=tk.Label(OpenWindow,text='Library de Ciudad de Mexico',fg='white',bg=a)\nlst3=('Calibri (Body)',13)\nl3.config(font=lst3)\nl3.place(x=50,y=110)\n\nOpenWindow.mainloop()\n \n","sub_path":"RMbiblio.py","file_name":"RMbiblio.py","file_ext":"py","file_size_in_byte":11502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"58732170","text":"#~ Exercise 16: Reading and Writing Files- Learning Python the Hard Way 3rd ed.\n#~ url: http://learnpythonthehardway.org/book/ex16.html\n\nfrom sys import argv\n\nscript, filename = argv\n\nprint(\"We're going to erase %r.\" % filename)\nprint(\"If you don't want that, hit CTRL-C (^C).\")\nprint(\"If you do want that, hit Enter.\")\n\ninput(\"?\")\n\n#~ The 'w' allows the file to be opened in the 'write' format\nprint(\"Opening the file...\")\ntarget = open(filename, 'w')\n\nprint(\"Now I'm going to ask you for three lines to add to the file\")\n\nline1 = input(\"line 1: \")\nline2 = input(\"line 2: \")\nline3 = input(\"line 3: \")\n\nprint(\"I'm going to write these to the file.\")\n\n#~ 'Target' is the file variable and the 'write' function allows us to\n#~ add the 'input\" for each line to the file (target). the '\\n' is\n#~ used to start a new line.\ntarget.write(line1)\ntarget.write(\"\\n\")\ntarget.write(line2)\ntarget.write(\"\\n\")\ntarget.write(line3)\ntarget.write(\"\\n\")\n\n#~ The 'close' function saves the file to disk\nprint(\"And finally, we close it.\")\ntarget.close()\n\n\n\n#~ What You Should See\n\n#~ There are actually two things you will see. First the output of your new script:\n\n#~ $ python ex16.py test.txt\n#~ We're going to erase 'test.txt'.\n#~ If you don't want that, hit CTRL-C (^C).\n#~ If you do want that, hit RETURN.\n#~ ?\n#~ Opening the file...\n#~ Truncating the file. Goodbye!\n#~ Now I'm going to ask you for three lines.\n#~ line 1: Mary had a little lamb\n#~ line 2: Its fleece was white as snow\n#~ line 3: It was also tasty\n#~ I'm going to write these to the file.\n#~ And finally, we close it.\n\n\n\n#~ Study Drill 2\n#~ See file ex16_StudyDrill2.py for script that reads the file created\n#~ in this exercise.\n\n#~ Study Drill 3\n#~ See ex16_StudyDrill3 for a rewrite the script so it has only one\n#~ 'target.write() command.\n\n#~ Study Drill 4\n#~ Adding the 'w' parameter so that the open file could be written to.\n#~ If no parameter i given it will be read ('r') only.\n\n#~ Study Drill 5\n#~ Do we need to use 'target.truncate()' if we add the 'w' parameter when\n#~ opening a file? No, we do not because the 'w' is actually best thought\n#~ of as rewrite the file. If you want to added text then use\n#~ append ('a')\n\n\n\n","sub_path":"ex16.py","file_name":"ex16.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"542545929","text":"import os\nimport subprocess\n\nfrom detectron2.data.datasets import register_coco_instances\n\n\ndef download(imageset, dataset):\n if not os.path.exists(f\"/content/{imageset}/\"):\n subprocess.call(\n f\"cp /content/gdrive/MyDrive/Share/4YPDatasets/{imageset}.zip /content/\",\n shell=True,\n )\n subprocess.call(f\"unzip {imageset}.zip > /dev/null\", shell=True)\n subprocess.call(f\"mkdir {imageset}\", shell=True)\n subprocess.call(f\"mv img* {imageset}/\", shell=True)\n if not os.path.exists(f\"/content/{dataset}_train.json\"):\n subprocess.call(\n f\"cp /content/gdrive/MyDrive/Share/4YPDatasets/{dataset}.zip /content/\",\n shell=True,\n )\n subprocess.call(f\"unzip {dataset}.zip > /dev/null\", shell=True)\n\n\ndef register(imageset, dataset):\n download(imageset, dataset)\n register_coco_instances(\n f\"{dataset}_train\",\n {},\n f\"/content/{dataset}_train.json\",\n f\"/content/{imageset}/\",\n )\n register_coco_instances(\n f\"{dataset}_val\",\n {},\n f\"/content/{dataset}_val.json\",\n f\"/content/{imageset}/\",\n )\n register_coco_instances(\n f\"{dataset}_test\",\n {},\n f\"/content/{dataset}_test.json\",\n f\"/content/{imageset}/\",\n )\n","sub_path":"Datasets.py","file_name":"Datasets.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520940610","text":"import pytest\n\nfrom gitlabform.gitlabform import GitLabForm\nfrom gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, create_readme_in_project, \\\n GROUP_NAME\n\nPROJECT_NAME = 'branches_project'\nGROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME\n\n\n@pytest.fixture(scope=\"module\")\ndef gitlab(request):\n gl = get_gitlab()\n\n create_group(GROUP_NAME)\n create_project_in_group(GROUP_NAME, PROJECT_NAME)\n create_readme_in_project(GROUP_AND_PROJECT_NAME) # in master branch\n\n branches = ['protect_branch_but_allow_all', 'protect_branch_and_disallow_all',\n 'protect_branch_and_allow_merges', 'protect_branch_and_allow_pushes']\n for branch in branches:\n gl.create_branch(GROUP_AND_PROJECT_NAME, branch, 'master')\n\n def fin():\n # delete all created branches\n for branch_to_delete in branches:\n gl.delete_branch(GROUP_AND_PROJECT_NAME, branch_to_delete)\n\n request.addfinalizer(fin)\n return gl # provide fixture value\n\n\nprotect_branch_but_allow_all = \"\"\"\ngitlab:\n api_version: 4\n\nproject_settings:\n gitlabform_tests_group/branches_project:\n branches:\n protect_branch_but_allow_all:\n protected: true\n developers_can_push: true\n developers_can_merge: true\n\"\"\"\n\nprotect_branch_and_disallow_all = \"\"\"\ngitlab:\n api_version: 4\n\nproject_settings:\n gitlabform_tests_group/branches_project:\n branches:\n protect_branch_and_disallow_all:\n protected: true\n developers_can_push: false\n developers_can_merge: false\n\"\"\"\n\nmixed_config = \"\"\"\ngitlab:\n api_version: 4\n\nproject_settings:\n gitlabform_tests_group/branches_project:\n branches:\n protect_branch_and_allow_merges:\n protected: true\n developers_can_push: false\n developers_can_merge: true\n protect_branch_and_allow_pushes:\n protected: true\n developers_can_push: true\n developers_can_merge: false\n\"\"\"\n\nunprotect_branches = \"\"\"\ngitlab:\n api_version: 4\n\nproject_settings:\n gitlabform_tests_group/branches_project:\n branches:\n protect_branch_and_allow_merges:\n protected: false\n protect_branch_and_allow_pushes:\n protected: false\n\"\"\"\n\n\nclass TestBranches:\n\n def test__protect_branch_but_allow_all(self, gitlab):\n gf = GitLabForm(config_string=protect_branch_but_allow_all,\n project_or_group=GROUP_AND_PROJECT_NAME)\n gf.main()\n\n branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_but_allow_all')\n assert branch['protected'] is True\n assert branch['developers_can_push'] is True\n assert branch['developers_can_merge'] is True\n\n def test__protect_branch_and_disallow_all(self, gitlab):\n gf = GitLabForm(config_string=protect_branch_and_disallow_all,\n project_or_group=GROUP_AND_PROJECT_NAME)\n gf.main()\n\n branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_disallow_all')\n assert branch['protected'] is True\n assert branch['developers_can_push'] is False\n assert branch['developers_can_merge'] is False\n\n def test__mixed_config(self, gitlab):\n gf = GitLabForm(config_string=mixed_config,\n project_or_group=GROUP_AND_PROJECT_NAME)\n gf.main()\n\n branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')\n assert branch['protected'] is True\n assert branch['developers_can_push'] is False\n assert branch['developers_can_merge'] is True\n\n branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')\n assert branch['protected'] is True\n assert branch['developers_can_push'] is True\n assert branch['developers_can_merge'] is False\n\n gf = GitLabForm(config_string=unprotect_branches,\n project_or_group=GROUP_AND_PROJECT_NAME)\n gf.main()\n\n branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')\n assert branch['protected'] is False\n\n branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')\n assert branch['protected'] is False\n","sub_path":"gitlabform/gitlabform/test/test_branches.py","file_name":"test_branches.py","file_ext":"py","file_size_in_byte":4247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47313384","text":"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\nFaster R-CNN\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.ops import MultiScaleRoIAlign\nfrom torch.jit.annotations import Dict, List, Tuple, Optional\nfrom torch import Tensor\n\nfrom utils.pooler import NewMultiScaleRoIAlign\nfrom utils.generalized_polar_rcnn import GeneralizedRCNN\nfrom utils.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork\nfrom utils.roi_head import RoIHeads\nfrom utils.transform import GeneralizedRCNNTransform\n\n\nfrom datasets import transforms as T\nfrom torchvision import transforms\n\ndef get_transform(train):\n \"\"\"\n Data augmentation ops\n train: a boolean flag to do diff transform in train or val, test\n \"\"\"\n if not train:\n transform_list = [\n T.RandomCrop(datatype='val'),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n ]\n return transforms.Compose(transform_list)\n else:\n transform_list = [\n T.RandomCrop(datatype='train'),\n T.ImgAugTransform(),\n T.ToTensor(),\n T.Normalize() \n ]\n return T.Compose(transform_list)\n \n\n\nclass FasterPolarRCNN(GeneralizedRCNN):\n\n def __init__(self, backbone, polar_net, mtype=None, num_classes=None,\n min_size=800, max_size=1333,\n image_mean=None, image_std=None,\n rpn_anchor_generator=None, rpn_head=None,\n rpn_pre_nms_top_n_train=2000,\n rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000,\n rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_img=256,\n rpn_positive_fraction=0.5,\n box_roi_pool=None,\n box_head=None,\n box_predictor=None,\n box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n box_batch_size_per_img=512,\n box_positive_fraction=0.25,\n box_score_thresh=0.05,\n box_nms_thresh=0.5,\n box_detections_per_img=100,\n # box_detections_per_img=20,\n bbox_reg_weights=None):\n \n if not hasattr(backbone, \"out_channels\"):\n raise ValueError(\"backbone should have the out_channels attr\")\n\n assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))\n # assert isinstance(box_roi_pool, (NewMultiScaleRoIAlign, type(None)))\n assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))\n\n out_channels = backbone.out_channels\n\n if rpn_anchor_generator is None:\n anchor_sizes = ((32,),(64,),(128,),(256,),(512,))\n # anchor_sizes = ((32,64,128),(32,64,128),(64,128,256),(64,128,256),(128,256,512))\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n # anchor_sizes = ((64,),(128,),(192,),(256,),(320,))\n # aspect_ratios = ((0.5, 1.0, 4.0),(0.5, 1.0, 3.5),(0.5, 1.0, 5.0),(0.5, 1.0, 3.0),(0.5, 1.0, 4.0))\n # anchor_sizes = ((128, 256, 512),)\n # aspect_ratios = ((0.5, 1.0, 2.0),)\n rpn_anchor_generator = AnchorGenerator(anchor_sizes,\n aspect_ratios)\n if rpn_head is None:\n rpn_head = RPNHead(out_channels,\n rpn_anchor_generator.num_anchors_per_location()[0])\n\n rpn_pre_nms_top_n = dict(\n training=rpn_pre_nms_top_n_train,\n testing=rpn_pre_nms_top_n_test\n )\n rpn_post_nms_top_n = dict(\n training=rpn_post_nms_top_n_train,\n testing=rpn_post_nms_top_n_test\n )\n rpn = RegionProposalNetwork(\n rpn_anchor_generator, rpn_head,\n rpn_pre_nms_top_n, rpn_post_nms_top_n,\n rpn_nms_thresh, rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_img, rpn_positive_fraction\n )\n \n if box_roi_pool is None:\n box_roi_pool = MultiScaleRoIAlign(\n featmap_names=[\"0\", \"1\", \"2\", \"3\"],\n output_size=7,\n sampling_ratio=2\n )\n # if box_roi_pool is None:\n # box_roi_pool = MultiScaleRoIAlign(\n # featmap_names=[\"3\"],\n # output_size=7,\n # sampling_ratio=2\n # )\n # if box_roi_pool is None:\n # box_roi_pool = MultiScaleRoIAlign(\n # featmap_names=[\"3\"],\n # output_size=7,\n # sampling_ratio=2\n # )\n # box_roi_pool = NewMultiScaleRoIAlign(\n # featmap_names=[\"0\", \"1\", \"2\", \"3\"],\n # output_size=7,\n # sampling_ratio=2\n # )\n if box_head is None:\n # roi特征的尺寸\n resolution = box_roi_pool.output_size[0]\n representation_size = 1024\n box_head = TwoMLHead(\n out_channels * resolution ** 2,\n representation_size\n )\n # box_head = NewTwoMLHead(\n # out_channels,\n # resolution,\n # representation_size\n # )\n\n if box_predictor is None:\n representation_size = 1024\n box_predictor = FastRCNNPredictor(\n representation_size,\n num_classes=num_classes\n )\n\n roi_heads = RoIHeads(\n box_roi_pool, box_head, box_predictor,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_img, box_positive_fraction,\n box_score_thresh, box_nms_thresh,\n box_detections_per_img,\n bbox_reg_weights\n )\n\n if image_mean is None:\n image_mean = [0.485, 0.456, 0.406]\n if image_std is None:\n image_std = [0.229, 0.224, 0.225]\n transform = GeneralizedRCNNTransform(min_size, max_size,\n image_mean, image_std)\n super(FasterPolarRCNN, self).__init__(transform, backbone, polar_net, rpn, roi_heads, mtype=mtype)\n\n\nclass TwoMLHead(nn.Module):\n \n def __init__(self, in_channels, representation_size):\n super(TwoMLHead, self).__init__()\n self.fc6 = nn.Linear(in_channels, representation_size)\n self.fc7 = nn.Linear(representation_size, representation_size)\n\n def forward(self, x):\n x = x.flatten(start_dim=1)\n x = F.relu(self.fc6(x))\n x = F.relu(self.fc7(x))\n return x\n\n\nclass NewTwoMLHead(nn.Module):\n\n def __init__(self, in_channels, roi_size, representation_size):\n super(NewTwoMLHead, self).__init__()\n self.fc6 = nn.ModuleList()\n num_levels = 4\n for i in range(num_levels):\n self.fc6.append(\n nn.Sequential(\n nn.Linear(in_channels*roi_size**2, representation_size),\n nn.GroupNorm(32, representation_size, 1e-5),\n nn.ReLU(inplace=True)\n )\n )\n\n self.fc7 = nn.Sequential(\n nn.Linear(representation_size, representation_size),\n nn.GroupNorm(32, representation_size, 1e-5),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n batch_size = x[0].shape[0]\n for i in range(len(x)):\n x[i] = self.fc6[i](x[i].view(batch_size, -1))\n for i in range(1, len(x)):\n x[0] = torch.max(x[0], x[i])\n x = x[0]\n x = self.fc7(x)\n return x\n\n\nclass FastRCNNPredictor(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super(FastRCNNPredictor, self).__init__()\n self.cls_score = nn.Linear(in_channels, num_classes)\n self.bbox_pred = nn.Linear(in_channels, num_classes*4)\n # nn.init.normal_(self.cls_score.weight, 0., 0.01)\n # nn.init.constant_(self.cls_score.bias, -2.0)\n\n def forward(self, x):\n if x.dim() == 4:\n assert list(x.shape[2:]) == [1, 1]\n x = x.flatten(start_dim=1)\n scores = self.cls_score(x)\n bbox_deltas = self.bbox_pred(x)\n\n return scores, bbox_deltas\n\n\nclass TraceFasterRCNN(FasterPolarRCNN):\n\n def forward(self, images, targets=None):\n\n original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], [])\n for img in images:\n img_size = img.shape[-2:]\n assert len(img_size) == 2\n original_image_sizes.append((img_size[0], img_size[1]))\n images, targets = self.transform(images, targets)\n features = self.backbone(images.tensors)\n if isinstance(features, Tensor):\n features = OrderedDict([(\"0\", features)])\n proposals, proposal_losses = self.rpn(images, features, targets)\n detections, detector_losses = self.roi_heads(features, proposals,\n images.image_sizes,\n targets)\n detections = self.transform.postprocess(detections,\n images.image_sizes,\n original_image_sizes)\n\n # print(detections[0]['labels'].unsqueeze(1).shape, detections[0]['boxes'].shape)\n \n out = []\n for det in detections:\n tail = torch.stack([torch.ones_like(det['scores']), torch.zeros_like(det['scores']), torch.zeros_like(det['scores'])], dim=-1)\n # print(\"det['scores'].shape\", det['scores'].shape)\n # print('tail.shape',tail.shape)\n bw = det['boxes'][:, 2] - det['boxes'][:, 0]\n bh = det['boxes'][:, 3] - det['boxes'][:, 1]\n boxes = torch.stack([\n det['boxes'][:, 0] + bw/2,\n det['boxes'][:, 1] + bh/2,\n bw,\n bh\n ], dim=1)\n for i in range(det['boxes'].shape[0]):\n tail[i, det['labels'][i].long()] = det['scores'][i]\n out.append(torch.cat([boxes, tail], dim=1))\n \n return [torch.stack(out, dim=0), torch.zeros_like(out[0])]\n\nif __name__ == \"__main__\":\n from utils.backbone_utils import resnet_fpn_backbone, densenet_fpn_backbone\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = \"1\"\n pth_name = r'results\\polar_r50\\best.pth'\n # save_pt_name = pth_name.replace('.pth','.pt')\n bsize = 16\n isize = 1024\n save_pt_name = 'detector_densenet161_input%dx3x%dx%d.pt' % (bsize, isize, isize)\n backbone = densenet_fpn_backbone(\"densenet161\", True)\n model = TraceFasterRCNN(backbone, num_classes=3)\n model.trace_module = True\n model.load_state_dict(torch.load(pth_name, map_location='cpu'))\n print(model)\n T = get_transform(False)\n from PIL import Image\n x = T(Image.open(r'E:\\wei\\glandular1536_20211214\\images\\1M09.mrxs_x28635_y124183_side1536.jpg'))\n x= torch.stack([x for _ in range(bsize)], dim=0).cuda()\n print('x.shape',x.shape)\n model = model.eval().cuda()\n with torch.no_grad():\n # y=model(x)\n # print(type(y), y[0].shape)\n # exit()\n traced_script_module = torch.jit.trace(model, x, check_trace=False)\n traced_script_module.save(save_pt_name)\n y=traced_script_module(x)\n print(y[0])\n print(type(y), y[0].shape)\n # dis = y[0]-y1[0]\n # print(dis.mean(), dis.max(), dis.min())\n # import ipdb;ipdb.set_trace()\n\n","sub_path":"Faster RCNN/faster_polar_rcnn.py","file_name":"faster_polar_rcnn.py","file_ext":"py","file_size_in_byte":11662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"155081888","text":"'''\nCreated on Nov 7, 2016\n\n@author: AlexxanderX\n'''\nimport unittest\nfrom repository.ClientRepository import ClientRepository\nfrom repository.MovieRepository import MovieRepository\nfrom repository.RentalRepository import RentalRepository\nfrom controller.RentalController import RentalController\nfrom domain.Client import Client\nfrom domain.Movie import Movie\nfrom domain.Rental import Rental\nfrom tools.HistoryKeeper import HistoryKeeper\n\nclass Test(unittest.TestCase):\n \n def setUp(self):\n self.clientRepository = ClientRepository()\n self.movieRepository = MovieRepository()\n self.rentalRepository = RentalRepository()\n self.rentalController = RentalController(self.rentalRepository, self.clientRepository, self.movieRepository)\n \n historyKeeper = HistoryKeeper()\n self.rentalController.setHistoryKeeper(historyKeeper)\n historyKeeper.addController(self.rentalController)\n \n client = Client(\"Bob\", 1)\n self.clientRepository.add(client, False)\n \n movie = Movie(\"T\", \"D\", \"G\", 1)\n self.movieRepository.add(movie, False)\n\n def testAdd(self):\n result = self.rentalController.add(1, 1, [15,11,2016], [17,11,2016], 1)\n rental = Rental(1, 1, [15,11,2016], [17,11,2016], [-1,-1,-1], 1)\n \n self.assertEqual(result, 1)\n self.assertEqual(self.rentalController.get(1), rental)\n \n result = self.rentalController.add(2, 1, [15,11,2016], [17,11,2016], 1)\n self.assertEqual(result, -1)\n \n result = self.rentalController.add(1, 2, [15,11,2016], [17,11,2016], 1)\n self.assertEqual(result, -3)\n \n result = self.rentalController.add(1, 1, [15,11,2016], [14,11,2016], 1)\n self.assertEqual(result, -4)\n \n result = self.rentalController.add(1, 1, [18,11,2016], [19,11,2016], 1)\n self.assertEqual(result, -2)\n\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","sub_path":"test/TestRentalController.py","file_name":"TestRentalController.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124305113","text":"#PF-Prac-1\r\n'''\r\nCreated on Mar 23, 2019\r\n\r\n@author: vijay.pal01\r\n'''\r\n\r\ndef add_string(str1):\r\n #start writing your code here\r\n n=len(str1)\r\n if(n<3):\r\n return str1\r\n if(str1.endswith(\"ing\")):\r\n str1+=\"ly\"\r\n else:\r\n str1+=\"ing\"\r\n return str1\r\n \r\n \r\n \r\n\r\n \r\n return str1\r\n\r\nstr1=\"com\"\r\nprint(add_string(str1))","sub_path":"PF/Day9/src/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113420782","text":"# -*- coding: utf-8 -*-\n#------------------------------------------------------------------\n# LEIA E PREENCHA O CABEÇALHO \n# NÃO ALTERE OS NOMES DAS FUNÇÕES\n# NÃO APAGUE OS DOCSTRINGS\n# NÃO INCLUA NENHUM import ...\n#------------------------------------------------------------------\n\n'''\n\n Nome: Joao Paulo G R Alves\n NUSP: 9390909\n\n Ao preencher esse cabeçalho com o meu nome e o meu número USP,\n declaro que todas as partes originais desse exercício programa (EP)\n foram desenvolvidas e implementadas por mim e que portanto não \n constituem desonestidade acadêmica ou plágio.\n Declaro também que sou responsável por todas as cópias desse\n programa e que não distribui ou facilitei a sua distribuição.\n Estou ciente que os casos de plágio e desonestidade acadêmica\n serão tratados segundo os critérios divulgados na página da \n disciplina.\n Entendo que EPs sem assinatura devem receber nota zero e, ainda\n assim, poderão ser punidos por desonestidade acadêmica.\n\n Abaixo descreva qualquer ajuda que você recebeu para fazer este\n EP. Inclua qualquer ajuda recebida por pessoas (inclusive\n monitores e colegas). Com exceção de material de MAC0110, caso\n você tenha utilizado alguma informação, trecho de código,...\n indique esse fato abaixo para que o seu programa não seja\n considerado plágio ou irregular.\n\n Exemplo:\n\n A monitora me explicou que eu devia utilizar a função int() quando\n fazemos leitura de números inteiros.\n\n A minha função quicksort() foi baseada na descrição encontrada na \n página https://www.ime.usp.br/~pf/algoritmos/aulas/quick.html.\n\n Descrição de ajuda ou indicação de fonte:\n\n'''\n\n#-------------------------------------------------------------------------- \n\ndef main():\n\n print(\"Testes da classe Pymagem\")\n # coloque os seus testes aqui\n \n#-------------------------------------------------------------------------- \n\nclass Pymagem:\n '''\n Implementação da classe Pymagem que tem o mesmo comportamento descrito \n no enunciado.\n '''\n\n def __init__(self, nlins, ncols, valor = 0):\n self.nlins = nlins\n self.ncols = ncols\n self.valor = valor\n self.linha = []\n self.matriz = []\n\n for i in range(self.nlins):\n for j in range(self.ncols):\n self.linha.append(self.valor)\n self.matriz.append(self.linha)\n self.linha = []\n \n def __str__(self):\n s = \"\"\n for i in range(self.nlins):\n for j in range(self.ncols):\n if type(self.matriz[i][j]) == int:\n if self.ncols - j == 1:\n s += '%d' % (self.matriz[i][j])\n else:\n s += '%d, ' % (self.matriz[i][j])\n else:\n if self.ncols - j == 1:\n s += '%0.1f' % (self.matriz[i][j])\n else:\n s += '%0.1f, ' % (self.matriz[i][j])\n s += '\\n'\n return s\n \n def __add__(self, other):\n soma_imagem = Pymagem(self.nlins, self.ncols)\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n self.matriz[i][j] = self.matriz[i][j] + other.matriz[i][j]\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n soma_imagem.put(i, j, self.matriz[i][j])\n \n return soma_imagem\n \n def __mul__(self, alfa):\n mult_imagem = Pymagem(self.nlins, self.ncols)\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n mult_imagem.put(i, j, self.matriz[i][j]*alfa)\n \n return mult_imagem \n \n def size(self):\n dimensao = (self.nlins, self.ncols)\n return dimensao\n\n def get(self, lin, col):\n return self.matriz[lin][col]\n\n def put(self, lin, col, valor):\n self.matriz[lin][col] = valor\n \n def crop(self,*args):\n linha_recortada = []\n matriz_recortada = []\n \n if len(args) == 0:\n tlx, tly = 0, 0\n brx, bry = self.nlins, self.ncols\n else:\n tlx, tly = args[0], args[1]\n brx, bry = args[2], args[3]\n \n img_recortada = Pymagem(brx - tlx, bry - tly)\n \n for i in range(tlx, brx):\n for j in range(tly, bry):\n linha_recortada.append(self.matriz[i][j])\n matriz_recortada.append(linha_recortada)\n linha_recortada = []\n \n for i in range(len(matriz_recortada)):\n for j in range(len(matriz_recortada[0])):\n img_recortada.put(i, j, matriz_recortada[i][j])\n\n return img_recortada\n\n def paste(self, other, tlin, tcol):\n cola_imagem = Pymagem(self.nlins, self.ncols, self.valor)\n \n for i in range(tlin, other.nlins + tlin):\n for j in range(tcol, other.ncols + tcol):\n self.matriz[i][j] = other.matriz[0][0]\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n cola_imagem.put(i, j, self.matriz[i][j])\n \n def pinte_disco(self, lin, col, raio, val):\n disco_imagem = Pymagem(self.nlins, self.ncols)\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n if (i - lin)**2 + (j - col)**2 < raio**2:\n self.matriz[i][j] = val\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n disco_imagem.put(i, j, self.matriz[i][j])\n \n def pinte_retangulo(self, tlx, tly, brx, bry, val):\n retang_imagem = Pymagem(self.nlins, self.ncols)\n \n if tlx < 0:\n tlx = 0\n if tly < 0:\n tly = 0\n if brx > self.nlins:\n brx = self.nlins\n if bry > self.ncols:\n bry = self.ncols\n if brx < 0:\n brx = 0\n if bry < 0:\n bry = 0\n \n for i in range(tlx, brx):\n for j in range(tly, bry):\n self.matriz[i][j] = val\n \n for i in range(self.nlins):\n for j in range(self.ncols):\n retang_imagem.put(i, j, self.matriz[i][j])\n \n def get_mat(self):\n mat_back = []\n mat_back = self.matriz\n return (mat_back)\n \n\n#-------------------------------------------------------------------------- \n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"pymagem.py","file_name":"pymagem.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119751692","text":"# coding: utf-8\nimport typing\n\nimport aiohttp\nimport requests\nimport serpyco\n\nfrom rolling.map.source import WorldMapSource\nfrom rolling.model.character import CharacterModel\nfrom rolling.model.character import CreateCharacterModel\nfrom rolling.model.zone import ZoneMapModel\n\n\nclass HttpClient(object):\n def __init__(self, server_address: str) -> None:\n self._server_address = server_address\n self._create_character_serializer = serpyco.Serializer(CreateCharacterModel)\n self._character_serializer = serpyco.Serializer(CharacterModel)\n self._characters_serializer = serpyco.Serializer(CharacterModel, many=True)\n self._zone_serializer = serpyco.Serializer(ZoneMapModel)\n\n def create_character(\n self, create_character_model: CreateCharacterModel\n ) -> CharacterModel:\n response = requests.post(\n \"{}/character\".format(self._server_address),\n json=self._create_character_serializer.dump(create_character_model),\n )\n response_json = response.json()\n return self._character_serializer.load(response_json)\n\n def get_character(self, character_id: str) -> CharacterModel:\n response = requests.get(\n \"{}/character/{}\".format(self._server_address, character_id)\n )\n response_json = response.json()\n return self._character_serializer.load(response_json)\n\n def get_zone(self, world_row_i: int, world_col_i: int) -> ZoneMapModel:\n response = requests.get(\n \"{}/zones/{}/{}\".format(self._server_address, world_row_i, world_col_i)\n )\n response_json = response.json()\n return self._zone_serializer.load(response_json)\n\n def get_zone_events_url(self, row_i: int, col_i: int) -> str:\n return f\"{self._server_address}/zones/{row_i}/{col_i}/events\"\n\n def get_zone_characters(\n self, world_row_i: int, world_col_i: int\n ) -> typing.List[CharacterModel]:\n response = requests.get(\n \"{}/zones/{}/{}/characters\".format(\n self._server_address, world_row_i, world_col_i\n )\n )\n response_json = response.json()\n return self._characters_serializer.load(response_json)\n\n def get_world_source(self) -> str:\n response = requests.get(\"{}/world/source\".format(self._server_address))\n return response.text\n","sub_path":"rolling/client/http/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360165970","text":"from __future__ import print_function\nimport boto3\nimport json\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom botocore.exceptions import ClientError\n\nprint('Loading function - Consultar hospede')\n_dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n\n# Tratamento da mensagem de retorno\ndef respond(err, res=None):\n return {\n 'statusCode': '400' if err else '200',\n 'body': err if err else res,\n 'headers': {\n 'Content-Type': 'application/json',\n },\n }\n\n# Limpar dados\ndef removeCampoVazio(dados):\n dados_limpos = dict((k, v) for k, v in dados.items() if v)\n return dados_limpos\n\n\n#Consultar Hospede\ndef consultarHospede(_id_hospede):\n print(_id_hospede)\n _table = _dynamodb.Table('Hospede')\n _consulta = _table.query(\n KeyConditionExpression=Key('id_hospede').eq(_id_hospede)\n )\n return _consulta\n\n\n# handler\ndef lambda_handler(event, context):\n\n # Limpar dados\n event = removeCampoVazio(event)\n\n #Validacao - id_hospede\n if 'id_hospede' not in event:\n return respond(\"O ID do Hóspede é obrigatório.\")\n else:\n _id_hospede = event['id_hospede']\n\n # Verifica se existe\n try:\n _item = consultarHospede(_id_hospede)\n except ClientError as e:\n _message_err = \"Erro ao efetuar a consulta: \" + e.response['Error']['Message']\n print(_message_err)\n return _message_err\n\n print(_item)\n if _item['Count'] > 0:\n print(_item['Items'][0])\n return respond(None, _item['Items'][0])\n else:\n return respond(\"Id inválido\")\n","sub_path":"1in - 12052017/HospedeGet.py","file_name":"HospedeGet.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"316021886","text":"class Polygon:\n pi = 3.14159265\n\n @ staticmethod\n def get_area(factors, sides, cfacts):\n data = []\n for name in sides:\n x = float(input('Enter ' + name + ': '))\n data.append(x)\n prod = 1\n for i in factors:\n prod *= data[i - 1]\n for i in cfacts:\n prod *= i\n return prod\n\nclass Square(Polygon):\n def __init__(self):\n self.a = Polygon.get_area([1,1], ['side'], [])\n\n\nclass Circle(Polygon):\n def __init__(self):\n self.a = Polygon.get_area([1,1], ['radius'], [Polygon.pi])\n\n\nclass Sphere(Polygon):\n def __init__(self):\n self.a = Polygon.get_area([1,1,1], ['radius'], [Polygon.pi, 4/3])\n\n\nprint('The area of a square...')\ns1 = Square()\nprint('The area is the suqare is', s1.a)\n\nprint('The area of a circle...')\nc1 = Circle()\nprint('The area if a circle is', c1.a)\n\nprint('The volume of a sphere...')\nsph = Sphere()\nprint('The volume of the sphere is', sph.a)\n\n","sub_path":"Archive/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213906393","text":"from django.shortcuts import render\n\n\ndef decode(ciphertext, key):\n key_length = len(key)\n key_as_int = [ord(i) for i in key]\n ciphertext_int = [ord(i) for i in ciphertext]\n msg = ''\n for i in range(len(ciphertext_int)):\n value = (ciphertext_int[i] - key_as_int[i % key_length]) % 26\n msg += chr(value + 65)\n return msg\n\n# Create your views here.\n\ndef decode_view(request):\n if request.method == 'POST':\n context = {'result': None}\n\n key, message = request.POST['Key'], request.POST['Message']\n\n context['result'] = decode(message, key)\n\n return render(request, 'result.html', context=context)\n\n else:\n return render(request, 'decode.html')\n","sub_path":"assignments/003/003-code/crypto/decode/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"2256120","text":"import sys, os\nimport pickle\nimport numpy as np\nimport matplotlib.pylab as plt\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef softmax(a):\n c = np.max(a)\n exp_a = np.exp(a - c)\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n return y\n\ndef get_data():\n (x_train, t_train), (x_test, t_test) = \\\n load_mnist(normalize=True, flatten=True, one_hot_label=False)\n return x_test, t_test\n\ndef init_network():\n with open(\"./code_given/ch03/sample_weight.pkl\", 'rb') as f:\n network = pickle.load(f)\n return network\n\ndef predict(network, x):\n W1,W2,W3 = network['W1'],network['W2'],network['W3']\n b1,b2,b3 = network['b1'],network['b2'],network['b3']\n\n a1 = np.dot(x, W1) + b1\n z1 = sigmoid(a1)\n a2 = np.dot(z1, W2) + b2\n z2 = sigmoid(a2)\n a3 = np.dot(z2, W3) + b3\n y = softmax(a3)\n return y\n\ndef forward(network, x_set, t_set, acc_cnt):\n for (x,t) in zip(x_set, t_set):\n y = predict(network, x)\n p = np.argmax(y)\n if p == t:\n acc_cnt += 1\n return acc_cnt\n\ndef forward_batch(network, x_set, t_set, acc_cnt, bat_size):\n for i in range(0, len(x_set), bat_size):\n x_batch = x_set[i:i+bat_size]\n y_batch = predict(network, x_batch)\n p = np.argmax(y_batch, axis=1)\n acc_cnt += np.sum(p == t_set[i:i+bat_size])\n return acc_cnt\n\nif __name__ == '__main__':\n sys.path.append(os.path.curdir + '/code_given')\n from dataset.mnist import load_mnist\n\n accuracy_cnt = 0\n batch_size = 100\n x_set, t_set = get_data()\n network = init_network()\n #accuracy_cnt = forward(network, x_set, t_set, accuracy_cnt)\n accuracy_cnt = forward_batch(network, x_set, t_set, accuracy_cnt, batch_size)\n\n print(\"Accuracy:\" + str(float(accuracy_cnt) / len(x_set)))\n","sub_path":"handwritten_recognition.py","file_name":"handwritten_recognition.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"20454086","text":"from django.test import TestCase, RequestFactory\nfrom django_cradmin.crinstance import reverse_cradmin_url\nimport htmls\nfrom devilry.project.develop.testhelpers.corebuilder import UserBuilder, PeriodBuilder\n\n\nclass TestContactApp(TestCase):\n def setUp(self):\n self.testuser = UserBuilder('testuser').user\n self.factory = RequestFactory()\n self.periodbuilder = PeriodBuilder.quickadd_ducku_duck1010_active()\n self.assignmentbuilder = self.periodbuilder.add_assignment('testassignment')\n self.groupbuilder = self.assignmentbuilder.add_group()\n self.groupbuilder.add_deadline_in_x_weeks(weeks=1)\n self.groupbuilder.add_students(self.testuser)\n\n def _get_as(self, username):\n self.client.login(username=username, password='test')\n return self.client.get(reverse_cradmin_url(\n 'devilry_student_group', 'contact', roleid=self.groupbuilder.group.id))\n\n def test_no_examiners(self):\n response = self._get_as('testuser')\n selector = htmls.S(response.content)\n self.assertFalse(selector.exists('#devilry_student_group_contact_hasexaminers'))\n self.assertTrue(selector.exists('#devilry_student_group_contact_noexaminers'))\n\n def test_single_examiner(self):\n self.groupbuilder.add_examiners(UserBuilder('testexaminer').user)\n response = self._get_as('testuser')\n selector = htmls.S(response.content)\n self.assertTrue(selector.exists('#devilry_student_group_contact_hasexaminers'))\n self.assertFalse(selector.exists('#devilry_student_group_contact_noexaminers'))\n self.assertEquals(selector.count('#devilry_student_group_contact_hasexaminers ul li'), 1)\n\n def test_multiple_examiners(self):\n self.groupbuilder.add_examiners(\n UserBuilder('testexaminer1').user,\n UserBuilder('testexaminer2').user)\n response = self._get_as('testuser')\n selector = htmls.S(response.content)\n self.assertTrue(selector.exists('#devilry_student_group_contact_hasexaminers'))\n self.assertFalse(selector.exists('#devilry_student_group_contact_noexaminers'))\n self.assertEquals(selector.count('#devilry_student_group_contact_hasexaminers ul li'), 2)\n\n def test_render_examiner_has_fullname(self):\n self.groupbuilder.add_examiners(UserBuilder('testexaminer', full_name='Test Examiner').user)\n response = self._get_as('testuser')\n selector = htmls.S(response.content)\n self.assertEquals(\n selector.one('#devilry_student_group_contact_hasexaminers ul li').alltext_normalized,\n 'Test Examiner ')\n\n def test_render_examiner_no_fullname(self):\n self.groupbuilder.add_examiners(UserBuilder('testexaminer').user)\n response = self._get_as('testuser')\n selector = htmls.S(response.content)\n self.assertEquals(\n selector.one('#devilry_student_group_contact_hasexaminers ul li').alltext_normalized,\n 'testexaminer ')\n\n def test_render_examiner_email_href(self):\n self.groupbuilder.add_examiners(UserBuilder('testexaminer').user)\n response = self._get_as('testuser')\n selector = htmls.S(response.content)\n self.assertEquals(\n selector.one('#devilry_student_group_contact_hasexaminers ul li a')['href'],\n 'mailto:testexaminer@example.com?subject=duck1010.active.testassignment%20-%20testuser')\n\n def test_404_if_anonymous_assignment(self):\n self.assignmentbuilder.update(anonymous=True)\n response = self._get_as('testuser')\n self.assertEquals(response.status_code, 404)\n","sub_path":"devilry/devilry_student/tests/cradmin_group/test_contactapp.py","file_name":"test_contactapp.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"222384804","text":"from flask import Flask, render_template, flash, redirect, request\n#import urllib2\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\n\n\napp = Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\nimport itemDb\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\treturn render_template('index.html',\n\t\t\t\t\t\t\ttitle='Home')\n\n@app.route('/ore', methods = ['GET', 'POST'])\ndef ore():\n\tif request.method == 'POST':\n\t\tore = request.form['ore']\n\t\treturn render_template('ore.html',\n\t\t\t\t\t\t\ttitle='ore Calc',\n\t\t\t\t\t\t\tore=ore)\n\telse:\n\t\treturn render_template('ore.html',\n\t\t\t\t\t\t\t\ttitle='Ore Calc')\n\n@app.route(\"/buyback\", methods = ['GET', 'POST'])\ndef buyback():\n\tif request.method == 'POST':\n\t\tprice = int(request.form['price'])\n\t\tprice = price - (price * .1)\n\t\treturn render_template('buyback.html',\n\t\t\t\t\t\t\t\ttitle='Buyback Calc',\n\t\t\t\t\t\t\t\tprice=price)\n\telse:\n\t\treturn render_template('buyback.html',\n\t\t\t\t\t\t\t\ttitle='Buyback Calc')\n\n@app.route(\"/shipping\", methods = ['GET', 'POST'])\ndef shipping():\n\tif request.method == 'POST':\n\t\tprices = {'flatFee': int(request.form['flatFee']), 'm3Cost': int(request.form['m3Cost']), 'totalM3': int(request.form['totalM3']),'commission': int(request.form['commission']), 'collateral': int(request.form['collateral'])}\n\t\t#print(flatFee + \" \" + m3Cost + \" \" + totalM3 + \" \" + commission + \" \" + collateral)\n\t\tcontractCost = prices['flatFee'] + (prices['totalM3'] * prices['m3Cost']) + (prices['collateral'] / prices['commission'])\n\t\treturn render_template('shipping.html',\n\t\t\t\t\t\t\t\tcontractCost=contractCost,\n\t\t\t\t\t\t\t\tprices=prices,\n\t\t\t\t\t\t\t\ttitle='Shipping Calculator')\n\telse:\n\t\tprices = {'flatFee': 50000000, 'm3Cost': 400, 'totalM3': 0,'commission': 10, 'collateral': 0}\n\t\treturn render_template('shipping.html',\n\t\t\t\t\t\t\t\tprices=prices,\n\t\t\t\t\t\t\t\ttitle='Shipping Calculator')\n\n\n\n\n\n@app.route(\"/yield\", methods = ['GET', 'POST'])\ndef mineYield():\n if request.method == 'POST':\n payload = None\n error = None\n stats = {'m3': int(request.form['m3']), 'time': int(request.form['time']), 'turrets': int(request.form['turrets'])}\n if stats['time'] == 0:\n error = 'Time can not be 0'\n else:\n payload = ((60 * 60) / stats['time']) * (stats['turrets'] * stats['m3'])\n return render_template('yield.html', title='Mining Yield', stats=stats, error=error, payload=payload)\n else:\n stats = {'m3': 0, 'time': 165, 'turrets': 1}\n return render_template('yield.html', title='Mining Yield', stats=stats)\n\n\n\n@app.route(\"/dbtest\", methods = ['GET', 'POST'])\ndef dbtest():\n db = itemDb.Item.query.get(350090)\n if db == None:\n db = \"Item not found.\"\n\n\n return render_template('dbtest.html', db=db)\n\n@app.route(\"/pricecheck\", methods = ['GET', 'POST'])\ndef pricecheck():\n if request.method == 'POST':\n rawInput = request.form['rawInput']\n return render_template('pricecheck.html', rawInput=rawInput)\n#name quantity group volume\n return render_template('pricecheck.html')\n\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"164933087","text":"import os\nimport csv\n\ndef read_palette_from_file(palette_filename):\n my_dict = dict()\n my_dir = os.path.dirname(os.path.realpath(__file__))\n palette_filename = 'CHROMA.MAP.csv'\n with open (my_dir+'/'+palette_filename,'r') as f:\n csv_reader = csv.reader(f)\n for row in csv_reader:\n key = int(row[0])\n value = (int(row[1]),int(row[2]),int(row[3]))\n my_dict[key] = value\n print (my_dict)\n return my_dict\n","sub_path":"palette_import_lib.py","file_name":"palette_import_lib.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521585860","text":"import io\nfrom PIL import Image, ImageTk\n\nfrom rtsp import Connection\n\nclass SessionListener:\n '''Interface for listener methods for session events.'''\n def exception_thrown(self, exception):\n pass\n\n def frame_received(self, frame):\n pass\n\n def video_name_changed(self, name):\n pass\n\nclass VideoFrame:\n def __init__(self, payload_type, marker, sequence_number, timestamp, payload):\n '''Creates a new frame.\n\t- payload_type: The numeric type of payload found in the frame. The most\n\t common type is 26 (JPEG).\n\t- marker: An indication if the frame is an important frame when compared\n\t to other frames in the stream.\n\t- sequence_number: A sequential number corresponding to the ordering of the\n\t frame. This number is expected to start at 0 (zero) and increase by one\n for each frame following that.\n\t- timestamp: The number of milliseconds after the logical start of the\n\t stream when this frame is expected to be played.\n\t- payload: A byte array containing the payload (contents) of the frame.\n '''\n self.payload_type = payload_type\n self.marker = marker\n self.sequence_number = sequence_number\n self.timestamp = timestamp\n self.payload = payload\n\n def get_image(self):\n '''Creates an Image based on the payload of the frame.'''\n image = Image.open(io.BytesIO(self.payload))\n return ImageTk.PhotoImage(image)\n \nclass Session:\n def __init__(self, address):\n '''Creates a new RTSP session. This constructor will also create a\n new network connection with the server. No stream setup is\n established at this point.\n '''\n self.connection = Connection(self, address)\n self.video_name = None\n self.listeners = []\n\n def add_listener(self, listener):\n '''Adds a new listener interface to be called every time a session\n\tevent (such as a change in video name or a new frame)\n\thappens. Any interaction with user interfaces is done through\n\tthese listeners.\n '''\n self.listeners.append(listener)\n listener.video_name_changed(self.video_name)\n\n def open(self, video_name):\n '''Opens a new video file in the interface.'''\n try:\n self.video_name = video_name\n self.connection.setup()\n for l in self.listeners:\n l.video_name_changed(video_name)\n except Exception as exception:\n self.handle_exception(exception)\n\n def play(self):\n '''Starts to play the existing file. It should only be called once a\n\tfile has been opened. This function will return immediately\n\tafter the request was responded. Frames will be received in\n\tthe background and will be handled by the process_frame\n\tmethod. If the video has been paused previously, playback will\n\tresume where it stopped.\n '''\n try:\n self.connection.play()\n except Exception as exception:\n self.handle_exception(exception)\n\n def pause(self):\n '''Pauses the playback the existing file. It should only be called\n\tonce a file has started playing. This function will return\n\timmediately after the request was responded. The server might\n\tstill send a few frames before stopping the playback\n\tcompletely.\n '''\n try:\n self.connection.pause()\n except Exception as exception:\n self.handle_exception(exception)\n\n def teardown(self):\n '''Closes the currently open file. It should only be called once a\n\tfile has been open.\n '''\n try:\n self.connection.teardown()\n self.video_name = None\n for l in self.listeners:\n l.frame_received(None)\n l.video_name_changed(None)\n except Exception as exception:\n self.handle_exception(exception)\n\n def close(self):\n '''Closes the connection with the current server. This session element\n\tshould not be used anymore after this point.\n '''\n try:\n self.connection.close()\n for l in self.listeners:\n l.video_name_changed(None)\n l.frame_received(None)\n except Exception as exception:\n self.handle_exception(exception)\n\n def handle_exception(self, exception):\n '''Helper function that notifies the main window that an exception has\n happened.\n '''\n for l in self.listeners:\n l.exception_thrown(exception)\n \n def process_frame(self, payload_type, marker, sequence_number, timestamp, payload):\n '''Creates and processes a frame received from the RTSP server. This\n\tmethod will direct the frame to the user interface to be\n\tprocessed and presented to the user. A description of the\n\tparameters can be found on the VideoFrame class comments.\n '''\n frame = VideoFrame(payload_type, marker, sequence_number, timestamp, payload)\n if (self.video_name):\n for l in self.listeners:\n l.frame_received(frame)\n","sub_path":"RTSPClientPython/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"648056106","text":"from tests.test_limesurvey import TestBase\nfrom limesurveyrc2api.limesurvey import LimeSurveyError\n\n\nclass TestSurveys(TestBase):\n\n def test_list_surveys_success(self):\n \"\"\"A valid request for list of surveys should not return empty.\"\"\"\n result = self.api.survey.list_surveys()\n for survey in result:\n self.assertIsNotNone(survey.get('sid'))\n\n def test_list_surveys_failure(self):\n \"\"\"An invalid request for list of surveys should raise an error.\"\"\"\n with self.assertRaises(LimeSurveyError) as ctx:\n self.api.survey.list_surveys(username=\"not_a_user\")\n self.assertIn(\"Invalid user\", ctx.exception.message)\n\n def test_list_questions_success(self):\n \"\"\"Listing questions for a survey should return a question list.\"\"\"\n result = self.api.survey.list_questions(survey_id=self.survey_id)\n for question in result:\n self.assertEqual(self.survey_id, question[\"sid\"])\n self.assertIsNotNone(question[\"gid\"])\n self.assertIsNotNone(question[\"qid\"])\n\n def test_list_questions_failure(self):\n \"\"\"Listing questions for an invalid survey should returns an error.\"\"\"\n with self.assertRaises(LimeSurveyError) as ctx:\n self.api.survey.list_questions(self.survey_id_invalid)\n self.assertIn(\"Error: Invalid survey ID\", ctx.exception.message)\n\n def test_delete_survey_success(self):\n \"\"\" Deleting a Survey should return status OK. \"\"\"\n s = 'tests/fixtures/a_rather_interesting_questionnaire_for_testing.lss'\n new_survey_id = self.api.survey.import_survey(s, new_name='delete_me')\n result = self.api.survey.delete_survey(new_survey_id)\n self.assertEqual(\"OK\", result[\"status\"])\n\n def test_export_responses_success_different_document_types(self):\n \"\"\" Should return requested file as base64 encoded string. \"\"\"\n for extension in ['pdf', 'csv', 'xls', 'doc', 'json']:\n result = self.api.survey.export_responses(\n self.survey_id, document_type=extension)\n self.assertIs(type(result), str)\n\n # TODO: add tests for other parameters of export_responses\n\n def test_import_survey_success_lss(self):\n \"\"\" Importing a survey should return the id of the new survey. \"\"\"\n valid_files = [\n 'tests/fixtures/a_rather_interesting_questionnaire_for_testing.lss',\n 'tests/fixtures/an_other_questionnaire_different_fileformat.lsa',\n 'tests/fixtures/same_questionnaire_different_fileformat.txt'\n ]\n new_survey_ids = [] # for deleting after test\n for file in valid_files:\n new_name = 'copy_test_%s' % file[-3:]\n result = self.api.survey.import_survey(file, new_name)\n self.assertIs(int, type(result))\n new_survey_ids.append(result)\n for new_survey_id in new_survey_ids: # delete new surveys\n self.api.survey.delete_survey(new_survey_id)\n\n def test_import_survey_failure_invalid_file_extension(self):\n \"\"\" Survey with invalid file extension should raise an error. \"\"\"\n invalid = 'tests/fixtures/same_questionnaire_different_fileformat.xml'\n with self.assertRaises(LimeSurveyError) as ctx:\n self.api.survey.import_survey(invalid)\n self.assertIn(\"Invalid extension\", ctx.exception.message)\n\n def test_activate_survey_success(self):\n \"\"\" In case of success result of activation as array is returned. \"\"\"\n non_active_survey_path = (\n 'tests/fixtures/same_questionnaire_different_fileformat.txt')\n non_active_survey_id = self.api.survey.import_survey(\n non_active_survey_path)\n result = self.api.survey.activate_survey(non_active_survey_id)\n self.assertEqual(result['status'], 'OK')\n # TODO: if get_survey_properties is implemented check active status\n # clean up\n self.api.survey.delete_survey(non_active_survey_id)\n\n def test_activate_tokens_success(self):\n \"\"\" In case of success return response. \"\"\"\n new_survey_path = (\n 'tests/fixtures/same_questionnaire_different_fileformat.txt')\n new_survey_id = self.api.survey.import_survey(new_survey_path)\n response = self.api.survey.activate_tokens(new_survey_id)\n self.assertEqual(response['status'], 'OK')\n # clean up\n self.api.survey.delete_survey(new_survey_id)\n\n def test_activate_tokens_failure(self):\n \"\"\" A wrong survey_id should raise an exception. \"\"\"\n with self.assertRaises(LimeSurveyError) as ctx:\n self.api.survey.activate_tokens(self.survey_id_invalid)\n self.assertIn(\"Error: Invalid survey ID\", ctx.exception.message)\n\n # TODO: test for attributeFields\n\n def test_list_groups_success(self):\n \"\"\" Listing groups for a survey should return a group list. \"\"\"\n response = self.api.survey.list_groups(self.survey_id)\n for group in response:\n self.assertEqual(group[\"sid\"], self.survey_id)\n self.assertIsNotNone(group[\"group_name\"])\n self.assertIsNotNone(group[\"gid\"])\n\n def test_list_groups_failure(self):\n \"\"\"Listing questions for an invalid survey should return an error.\"\"\"\n with self.assertRaises(LimeSurveyError) as ctx:\n self.api.survey.list_questions(self.survey_id_invalid)\n self.assertIn(\"Error: Invalid survey ID\", ctx.exception.message)\n","sub_path":"tests/test__survey.py","file_name":"test__survey.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"449366908","text":"from django.shortcuts import render\nfrom django.db import models\nimport json\nfrom django.http import JsonResponse\nfrom api.models import TaskList, Task\nfrom django.views.decorators.csrf import csrf_exempt\nfrom api.serializers import TaskSerializer, TaskListSerializer\n# Create your views here.\n\n@csrf_exempt\ndef task_lists(request):\n if request.method == 'GET':\n TaskLists = TaskList.objects.all()\n serializer = TaskListSerializer(TaskLists, many=True)\n return JsonResponse(serializer.data, safe=False, status=200)\n elif request.method == 'POST':\n data = json.loads(request.body)\n serializer = TaskListSerializer(data = data)\n if serializer.is_valid():\n serializer.save() # create\n return JsonResponse(serializer.data, status=201)\n return JsonResponse(serializer.errors)\n\n@csrf_exempt\ndef task_list_detail(request, pk):\n try:\n taskList= TaskList.objects.get(id=pk)\n except TaskList.DoesNotExist as e:\n return JsonResponse({'error': str(e)})\n if request.method == 'GET':\n serializer = TaskListSerializer(taskList)\n return JsonResponse(serializer.data, status=200)\n elif request.method == 'PUT':\n data = json.loads(request.body)\n serializer = TaskListSerializer(instance= taskList, data=data)\n if serializer.is_valid():\n serializer.save() #update\n return JsonResponse(serializer.data, status=200)\n return JsonResponse(serializer.errors)\n elif request.method == 'DELETE':\n taskList.delete()\n return JsonResponse({}, status=204)\n\n\n@csrf_exempt\ndef task_list_tasks(request, pk):\n try:\n task_list = TaskList.objects.get(id=pk)\n except TaskList.DoesNotExist as e:\n return JsonResponse({'error': str(e)})\n if request.method == 'GET':\n tasks = task_list.task_set.all()\n serializer = TaskSerializer(tasks, many=True)\n return JsonResponse(serializer.data, safe=False)\n elif request.method == 'POST':\n data = json.loads(request.body)\n serializer = TaskSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=200)\n return JsonResponse(serializer.errors)\n\n\n@csrf_exempt\ndef task_list_task_detail(request, pk, pk2):\n try:\n task_list = TaskList.objects.get(id=pk)\n task = task_list.task_set.get(id=pk2)\n except Task.DoesNotExist as e:\n return JsonResponse({'error': str(e)})\n if request.method == 'GET':\n serializer = TaskSerializer(task)\n return JsonResponse(serializer.data, status=200)\n elif request.method == 'PUT':\n data = json.loads(request.body)\n serializer = TaskSerializer(instance=task, data=data)\n if serializer.is_valid():\n serializer.save() # update\n return JsonResponse(serializer.data, status=200)\n return JsonResponse(serializer.errors)\n elif request.method == 'DELETE':\n task.delete()\n return JsonResponse({}, status=204)\n","sub_path":"Week12/todo-back/demo/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"330704508","text":"\"\"\"\nHomework 2\n\nHarkirat Gill\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom nltk.tokenize import sent_tokenize,word_tokenize\nfrom nltk.corpus import stopwords\nfrom string import punctuation\nimport nltk\n\nfrom nltk.probability import FreqDist\n\nfrom heapq import nlargest\n\n# Needed this in my IDE\n#nltk.download('punkt')\n#nltk.download('stopwords')\n\n# Run code on default article\nprint_summary()\n\n# Print out top three sentences of an article and output the metrics of the output\ndef print_summary(url=\"https://arstechnica.com/cars/2018/10/honda-will-use-gms-self-driving-technology-invest-2-75-billion/\"):\n processed = process_article(url)\n metrics = compute_metrics(processed[0], processed[1])\n \n print(\"\\nTop Sentences\\n\")\n print(processed[0][0])\n print(\"...\")\n print(processed[0][1])\n print(\"...\")\n print(processed[0][2])\n \n # Prints out word count of article, percentage of the article each sentence takes up, and recuction in size of reading by the summary as metrics for how efficient the summarizer was\n print(\"\\nMetrics of Summarization\\n\")\n print(\"Word Count : \" + str(metrics[0]))\n print(\"Protion of First : \" + str(round(100 * metrics[1], 2)) + \"%\")\n print(\"Protion of Second : \" + str(round(100 * metrics[2], 2)) + \"%\")\n print(\"Protion of Third : \" + str(round(100 * metrics[3], 2)) + \"%\")\n print(\"Reduction in Size : \" + str(round(100 - (100 * metrics[4]), 2)) + \"%\")\n \n# Parse article and return top three sentences based on highest density of popular words\ndef process_article(url):\n articleURL=url\n response = requests.get(articleURL)\n response.encoding = 'utf-8'\n data = response.text\n soup = BeautifulSoup(data)\n #print(soup)\n \n soup.find('article').text\n \n text = ' '.join(map(lambda p: p.text, soup.find_all('article')))\n #print(text)\n \n text.encode('ascii', 'ignore')\n #print(text)\n \n ####\n \n sents = sent_tokenize(text)\n #print(sents)\n \n word_sent = word_tokenize(text.lower())\n #print(word_sent)\n \n _stopwords = set(stopwords.words('english') + list(punctuation))\n #print(_stopwords)\n \n # Filter out stopword\n word_sent=[word for word in word_sent if word not in _stopwords]\n #print(word_sent)\n \n freq = FreqDist(word_sent)\n #print(freq)\n \n nlargest(10, freq, key=freq.get)\n \n # We want to create a signifcant score ordered by highest frequency\n from collections import defaultdict\n ranking = defaultdict(int)\n for i,sent in enumerate(sents):\n for w in word_tokenize(sent.lower()):\n if w in freq:\n ranking[i] += freq[w]\n #print(ranking)\n \n # Top 3 Sentences\n sents_idx = nlargest(4, ranking, key=ranking.get)\n #print(sents_idx)\n \n # Clean sentences by removing new lines and extra spaces\n sents = cleanSents(sents)\n \n # Collect top three sentences and order them to keep structure of article preserved\n top_idx = [sents_idx[0], sents_idx[1], sents_idx[2]]\n top_idx.sort()\n top = (sents[top_idx[0]], sents[top_idx[1]], sents[top_idx[2]])\n \n # Return line numbers of the top three sentences as well as the entire text of the article for use in calculaating metrics\n return (top, text)\n\n# Clean each sentence\ndef cleanSents(sents):\n new_sents = []\n for s in sents:\n new_sents.append(clean(s))\n return new_sents\n\n# Remove new lines and extra spaces\ndef clean(s):\n s = s.replace(\"\\n\", \"\")\n l = list(s)\n i = 0\n while i < len(l):\n if (l[i] == \" \" and l[i + 1] == \" \"):\n l[i:i + 1] = \"\"\n else : \n i += 1\n \n return \"\".join(l)\n\n# Compute and return metrics\n# The best measure I could think of was efficiency. Meaning, what percentage of the article is the summary\ndef compute_metrics(top_sents, text):\n length = len(text)\n eff_1 = len(top_sents[0]) / length\n eff_2 = len(top_sents[1]) / length\n eff_3 = len(top_sents[2]) / length\n total_eff = eff_1 + eff_2 + eff_3\n return(length, eff_1, eff_2, eff_3, total_eff)\n \n \n","sub_path":"src/data/python/homework2.py","file_name":"homework2.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357988819","text":"#coding: utf-8\nfrom flask import current_app\nfrom influxdb import InfluxDBClient\nfrom datetime import datetime\nimport hashlib\n\ndef get_client(dbname=None):\n c = current_app.config\n config = dict([(k, c.get('INFLUXDB_{}'.format(k.upper()), None)) for k in\\\n ['host', 'port', 'username', 'password', 'ssl', 'verify_ssl', 'timeout',\n 'use_udp', 'udp_port']])\n if not config['host']:\n return None\n config['database'] = dbname\n return InfluxDBClient(**config)\n\n\ndef write_point(db, measurement, tags, value=1):\n client = get_client(db)\n if not client:\n return\n try:\n client.write_points([{\n \"measurement\": measurement,\n \"tags\": tags,\n \"time\": datetime.utcnow().strftime('%Y%m%dT%H:%M:%SZ'),\n \"fields\": {\n \"value\": value\n }\n }])\n except Exception as e:\n current_app.logger.error('Influxdb Error: {}'.format(e))\n\n\ndef write_get_taxis(zupc_insee, lon, lat, moteur, request, l_taxis):\n write_point(\n current_app.config['INFLUXDB_TAXIS_DB'],\n \"get_taxis_requests\",\n {\n \"zupc\": zupc_insee,\n \"position\": \"{:.3f}:{:.3f}\".format(float(lon), float(lat)),\n \"moteur\": moteur,\n \"customer\": hashlib.sha224(str(\n (\n request.headers.getlist(\"X-Forwarded-For\")[0].rpartition(' ')[-1]\n if 'X-Forwarded-For' in request.headers\n else request.remote_addr\n ) or 'untrackable'\n ).encode('utf-8')\n ).hexdigest()[:10]\n },\n value=l_taxis\n )\n","sub_path":"APITaxi_utils/influx_db.py","file_name":"influx_db.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"543745226","text":"\"\"\"\nfriends = 'Максим Леонид '\nprint(len(friends))\nprint(friends.find('Лео'))\nprint(friends.isdigit())\nprint(friends.lower())\n#print('Привет %a. Тебе %b лет?'%('Ivan', 30))\nprint('Привет {}. Тебе {} лет?'.format('Ivan', 30))\n\ntop5 = 'Первые пять мест на соревнованиях: 1. Иванов 2. Петров 3. Сидоров 4. Орлов 5. Соколов'\nstart = top5.find('1')\nend = top5.find('4')\ntop3 = top5[start:end]\nprint('Поздравляем {} с успехом!'.format(top3.upper()))\n\"\"\"\nprint('соревноване по python'.upper())\ncount = int(input('Введите количество участников соревнования: '))\ni = count\nmembers = []\nwhile i > 0:\n name = input('Кто занял {} место? '.format(i))\n members.append(name)\n i-=1\nprint('В соревновании участвовали:', sorted(members))\nmembers.reverse()\ntop3 = members[:3]\nprint('Победители {}. Поздравляем!'.format(top3))","sub_path":"4_lesson.py","file_name":"4_lesson.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256588960","text":"import regex as re\n\ndef substitute(bot, update):\n try:\n message = update.message\n parsed = re.search('s/(.+)/(.*)/', message.text)\n\n match = parsed.group(1)\n replace = parsed.group(2)\n print(\"match: {}\\nreplace: {}\".format(match, replace))\n\n reply = message.reply_to_message\n processed_message = re.sub(match, replace, reply.text)\n\n chat = update.message.chat.id\n\n bot.sendMessage(chat, processed_message)\n except AttributeError as e:\n print(\"error:\")\n print(e)\n","sub_path":"substitute.py","file_name":"substitute.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"86956514","text":"import ipdb\nimport numpy as np\n\nfrom cle.cle.data import Iterator\nfrom cle.cle.graph.net import Net\nfrom cle.cle.models import Model\nfrom cle.cle.layers import InitCell, OnehotLayer\nfrom cle.cle.layers.cost import MulCrossEntropyLayer\nfrom cle.cle.layers.feedforward import FullyConnectedLayer\nfrom cle.cle.train import Training\nfrom cle.cle.train.ext import (\n EpochCount,\n GradientClipping,\n Monitoring,\n Picklize,\n EarlyStopping\n)\nfrom cle.cle.train.opt import RMSProp\nfrom cle.cle.utils import error, predict, OrderedDict\nfrom cle.datasets.mnist import MNIST\n\n\n# Set your dataset\n#data_path = '/data/lisa/data/mnist/mnist.pkl'\n#save_path = '/u/chungjun/repos/cle/saved/'\ndata_path = '/home/junyoung/data/mnist/mnist.pkl'\nsave_path = '/home/junyoung/repos/cle/saved/'\n\nbatch_size = 128\ndebug = 0\n\nmodel = Model()\ntrdata = MNIST(name='train',\n path=data_path)\nvaldata = MNIST(name='valid',\n path=data_path)\n\n# Choose the random initialization method\ninit_W = InitCell('randn')\ninit_b = InitCell('zeros')\n\n# Define nodes: objects\nmodel.inputs = trdata.theano_vars()\nx, y = model.inputs\n# You must use THEANO_FLAGS=\"compute_test_value=raise\" python -m ipdb\nif debug:\n x.tag.test_value = np.zeros((batch_size, 784), dtype=np.float32)\n y.tag.test_value = np.zeros((batch_size, 1), dtype=np.float32)\n\ninputs = [x, y]\ninputs_dim = {'x':784, 'y':1}\n\nonehot = OnehotLayer(name='onehot',\n parent=['y'],\n nout=10)\n\nh1 = FullyConnectedLayer(name='h1',\n parent=['x'],\n nout=1000,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\nh2 = FullyConnectedLayer(name='h2',\n parent=['h1'],\n nout=10,\n unit='softmax',\n init_W=init_W,\n init_b=init_b)\n\ncost = MulCrossEntropyLayer(name='cost', parent=['onehot', 'h2'])\n\n# You will fill in a list of nodes and fed them to the model constructor\nnodes = [onehot, h1, h2, cost]\n\n# Your model will build the Theano computational graph\nmlp = Net(inputs=inputs, inputs_dim=inputs_dim, nodes=nodes)\nmlp.build_graph()\n\n# You can access any output of a node by doing model.nodes[$node_name].out\ncost = mlp.nodes['cost'].out\nerr = error(predict(mlp.nodes['h2'].out), predict(mlp.nodes['onehot'].out))\ncost.name = 'cost'\nerr.name = 'error_rate'\nmodel.graphs = [mlp]\n\n# Define your optimizer: Momentum (Nesterov), RMSProp, Adam\noptimizer = RMSProp(\n lr=0.001\n)\n\nextension = [\n GradientClipping(),\n EpochCount(40),\n Monitoring(freq=100,\n ddout=[cost, err],\n data=[Iterator(trdata, batch_size),\n Iterator(valdata, batch_size)]),\n Picklize(freq=200,\n path=save_path)\n]\n\nmainloop = Training(\n name='toy_mnist',\n data=Iterator(trdata, batch_size),\n model=model,\n optimizer=optimizer,\n cost=cost,\n outputs=[cost, err],\n extension=extension\n)\nmainloop.run()\n","sub_path":"tutorials/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312293421","text":"# -*- coding: utf-8 -*-\n# Commands --------------------------------------------------------------------------------\n\n# python roi_collector.py\n\n# Import libraries ------------------------------------------------------------------------\n\nimport os\nimport sys\n\nimport cv2\nimport imutils\nimport numpy as np\n\n\n# Functions -------------------------------------------------------------------------------\n\n# Print iterations progress\ndef print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n bar_length - Optional : character length of bar (Int)\n \"\"\"\n str_format = \"{0:.\" + str(decimals) + \"f}\"\n percents = str_format.format(100 * (iteration / float(total)))\n filled_length = int(round(bar_length * iteration / float(total)))\n bar = '█' * filled_length + '-' * (bar_length - filled_length)\n\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n\n if iteration == total:\n sys.stdout.write('\\n')\n\n\nsys.stdout.flush()\n\n# Initialization --------------------------------------------------------------------------\n\n# loads Caffe face detector\nprint(\"[INFO] loading face detector...\")\nnet = cv2.dnn.readNetFromCaffe(\"face_detector/deploy.prototxt\",\n \"face_detector/res10_300x300_ssd_iter_140000.caffemodel\")\n\n# Camera ----------------------------------------------------------------------------------\n\nimg_count = 1\n\n# num_images = len([name for name in os.listdir(\"users/Unknown/\")])\n\nfor filename in os.listdir(\"users/Unknown/\"):\n if filename.endswith(\".jpg\"):\n # Read in image\n frame = cv2.imread(os.path.sep.join([\"users/Unknown/\", filename]))\n frame = imutils.resize(frame, width=600)\n img_count += 1\n\n # Convert frame to blob type\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0))\n\n # Have the NN analyze the blob\n net.setInput(blob)\n detections = net.forward()\n\n for i in range(0, 1):\n # Determine prediction confidence\n confidence = detections[0, 0, i, 2]\n\n # Filter poor detections\n if confidence > .7:\n # Compute ROI coordinates\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # Make sure ROI is in frame\n startX = max(0, startX)\n startY = max(0, startY)\n endX = min(w, endX)\n endY = min(h, endY)\n\n # Pre-process the ROI data\n face = frame[startY:endY, startX:endX]\n\n # Save the face's ROI\n img_path = os.path.sep.join([\"/home/logan/Pictures/Unknown/\", filename])\n cv2.imwrite(img_path, face)\n print_progress(img_count, 13233, prefix=\"Processing: \", suffix=\" {}/{}\".format(img_count, 13233))\n","sub_path":"roi_collector.py","file_name":"roi_collector.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"371740163","text":"# based on https://github.com/OpenAgricultureFoundation/python-wifi-connect\n# which is based on https://github.com/balena-io/wifi-connect\nimport argparse\nimport logging\nimport socket\nimport sys\nimport time\nimport uuid\nfrom getpass import getpass\n\nimport NetworkManager\n\nlogger = logging.getLogger('network-manager')\nlogger.setLevel(logging.INFO)\n\nap_connection_id = 'farm_ng-' + socket.gethostname()\nap_SSID = ap_connection_id\nap_ip = '192.168.42.1'\nwifi_interface = 'wlan0'\n\n\ndef get_ap_config(password):\n return {\n '802-11-wireless': {\n 'mode': 'ap',\n 'security': '801-22-wireless-security',\n 'ssid': ap_SSID,\n },\n '802-11-wireless-security': {\n 'key-mgmt': 'wpa-psk',\n 'psk': password,\n },\n 'connection': {\n 'autoconnect': True,\n 'id': ap_connection_id,\n 'interface-name': wifi_interface,\n 'type': '802-11-wireless',\n 'uuid': str(uuid.uuid4()),\n },\n 'ipv4': {\n 'address-data': [{'address': ap_ip, 'prefix': 24}],\n 'addresses': [[ap_ip, 24, ap_ip]],\n 'gateway': ap_ip,\n 'method': 'shared',\n },\n 'ipv6': {'method': 'auto'},\n }\n\n\ndef find_connection(id):\n connections = NetworkManager.Settings.ListConnections()\n connections = {x.GetSettings()['connection']['id']: x for x in connections}\n return connections.get(id)\n\n\ndef get_ap_connection():\n connection = find_connection(ap_connection_id)\n\n if not connection:\n password = getpass(prompt='AP Password (at least 8 characters): ')\n NetworkManager.Settings.AddConnection(get_ap_config(password))\n logger.info(f'Added connection: {ap_connection_id}')\n connection = find_connection(ap_connection_id)\n\n if not connection:\n raise Exception('Could not get ap connection.')\n\n return connection\n\n\ndef activate_connection(connection):\n connection_id = connection.GetSettings()['connection']['id']\n\n # Get the wireless network interface\n device = next((d for d in NetworkManager.NetworkManager.GetDevices() if d.Interface == wifi_interface), None)\n if not device:\n raise Exception(f'No {wifi_interface} device found')\n\n # Enable autoconnect for selected connection only\n wifi_connections = [c for c in NetworkManager.Settings.ListConnections() if c.GetSettings()['connection']['type'] == '802-11-wireless']\n for c in wifi_connections:\n c_settings = c.GetSettings()\n c_settings['connection']['autoconnect'] = (c_settings['connection']['id'] == connection_id)\n c.Update(c_settings)\n\n NetworkManager.NetworkManager.ActivateConnection(connection, device, '/')\n logger.info(f'Activated connection={connection_id}, dev={device.Interface}.')\n\n i = 0\n while device.State != NetworkManager.NM_DEVICE_STATE_ACTIVATED:\n if i % 5 == 0:\n logger.info('Waiting for connection to become active...')\n if i > 30:\n break\n time.sleep(1)\n i += 1\n\n if device.State != NetworkManager.NM_DEVICE_STATE_ACTIVATED:\n raise Exception(f'Enabling connection {ap_connection_id} failed.')\n\n logger.info(f'Connection {connection_id} is active.')\n\n\ndef delete_connection(name):\n connection = find_connection(name)\n if not connection:\n raise Exception(f'The connection {name} does not exist.')\n connection.Delete()\n logger.info(f'Connection {name} deleted.')\n\n\ndef disable_current():\n current = next(\n (\n c for c in NetworkManager.NetworkManager.ActiveConnections if c.Connection.GetSettings()\n ['connection']['interface-name'] == wifi_interface\n ), None,\n )\n\n if current:\n logger.info(f\"Deactivating connection {current.Connection.GetSettings()['connection']['id']}\")\n NetworkManager.NetworkManager.DeactivateConnection(current)\n\n\ndef enable_ap():\n connection = get_ap_connection()\n activate_connection(connection)\n\n\ndef enable_connection(name):\n connection = find_connection(name)\n if not connection:\n raise Exception(f'The connection {name} does not exist.')\n activate_connection(connection)\n\n\ndef print_connections():\n connections = NetworkManager.Settings.ListConnections()\n for c in connections:\n print(c.GetSettings()['connection']['id'])\n\n\nclass WifiCLI:\n def __init__(self):\n parser = argparse.ArgumentParser(\n description='Manage NetworkManager wifi configuration', usage='''wifi []\n\nThe most commonly used commands are:\n list Show saved wifi configurations\n ap Enable access point\n connect Connect to a wifi network\n delete Delete a saved wifi configuration\n''',\n )\n parser.add_argument('command', help='Subcommand to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n exit(1)\n getattr(self, args.command)()\n\n def list(self):\n print_connections()\n\n def ap(self):\n enable_ap()\n\n def connect(self):\n parser = argparse.ArgumentParser(description='Connect to a wifi network')\n parser.add_argument('id', help='connection id')\n args = parser.parse_args(sys.argv[2:])\n enable_connection(args.id)\n\n def delete(self):\n parser = argparse.ArgumentParser(description='Delete a saved wifi configuration')\n parser.add_argument('id', help='connection id')\n args = parser.parse_args(sys.argv[2:])\n delete_connection(args.id)\n\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n WifiCLI()\n","sub_path":"modules/tractor/python/farm_ng/tractor/wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"565428158","text":"from numpy import *\n\narr0= array([1,2,3,4,5])\n\nfor i in arr0:\n if i>4:\n print(\"is highest number of the serie\")\n break\n\nelse:\n print (\"not at all\")\n","sub_path":"startCode1.py","file_name":"startCode1.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82582519","text":"#!/usr/bin/env python3.6\n\nimport argparse\nimport os\nimport pickle\nimport hashlib\nimport exifread\nimport shutil\nimport operator\nimport itertools\nfrom PIL import Image\nfrom datetime import datetime\nfrom concurrent import futures\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"export_dir\",\n action = FullPath,\n type = is_dir)\n\n args = parser.parse_args()\n error_dir = os.path.join(os.getcwd(), \"Errors\")\n pickle_path = os.path.join(args.export_dir, \"photos.pickle\")\n\n if not os.path.exists(error_dir):\n os.makedirs(error_dir)\n \n exclude = list([args.export_dir])\n photos = _collect_photos(os.getcwd(), exclude)\n\n importer = Importer(args.export_dir, error_dir)\n importer.import_photos(photos)\n\ndef is_dir(dirname):\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n return os.path.abspath(dirname)\n else:\n return os.path.abspath(dirname)\n\nclass Photo():\n def __init__(self, path: str):\n self.hasher = Hasher()\n self.path = path\n self._cached_md5 = None\n self._cached_dhash = None\n self._cached_date = None\n\n @property\n def md5(self):\n if self._cached_md5 is None:\n self._cached_md5 = self.hasher.md5(self.path)\n return self._cached_md5\n\n @property\n def dhash(self):\n if self._cached_dhash is None:\n self._cached_dhash = self.hasher.dhash(self.path, 16)\n return self._cached_dhash\n\n @property\n def date(self):\n if self._cached_date is None:\n self._cached_date = self._get_date()\n return self._cached_date\n\n def _get_date(self) -> datetime:\n with open(self.path, \"rb\") as f:\n tags = exifread.process_file(f, details = False)\n\n date_tag = tags.get(\"Image DateTime\")\n if date_tag is None:\n return None\n else:\n return datetime.strptime(str(date_tag), \"%Y:%m:%d %H:%M:%S\")\n\n def __repr__(self):\n return self.path\n\n\nclass Importer():\n def __init__(self, output_directory: str, error_directory: str):\n self.output_directory = output_directory\n self.error_directory = error_directory\n self.pickle_path = os.path.join(output_directory, \"photos.pickle\")\n\n def import_photos(self, photos: list):\n if not os.path.exists(self.pickle_path):\n print(\"Scanning output directory\")\n self.scan_output_directory()\n print(\"Complete\")\n\n with open(self.pickle_path, \"rb\") as f:\n checksums = pickle.load(f)\n \n self._prime_md5(photos)\n seen = set()\n photos_to_import = [x for x in photos if x.md5 not in checksums and x.md5 not in seen and not seen.add(x.md5)]\n print(f\"Photos found to import: {len(photos_to_import)}\")\n \n #burst_finder = BurstFinder(photos_to_import)\n #burst_finder.find_bursts()\n\n for photo in photos_to_import:\n if self.import_photo(photo):\n checksums.add(photo.md5)\n\n with open(self.pickle_path, \"wb\") as f:\n pickle.dump(checksums, f)\n\n def import_photo(self, photo: Photo) -> bool:\n suffix = 'a'\n try:\n date = photo.date\n year = date.strftime(\"%Y\")\n month = date.strftime(\"%m - %B\")\n photo_name = date.strftime(\"%Y-%m-%d %H-%M-%S\")\n dest_dir = os.path.join(self.output_directory, year, month)\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n duplicate_name = os.path.join(dest_dir, f\"{photo_name}.jpg\")\n while os.path.exists(duplicate_name):\n photo_name = date.strftime(\"%Y-%m-%d %H-%M-%S\") + suffix\n duplicate_name = os.path.join(dest_dir, f\"{photo_name}.jpg\")\n suffix = chr(ord(suffix) + 1)\n\n shutil.copy2(photo.path, duplicate_name)\n return True\n except Exception as e:\n print(f\"Exception importing {photo.path}. Reason: {e}\")\n shutil.copy2(photo.path, self.error_directory)\n return False\n\n def scan_output_directory(self):\n photos = _collect_photos(self.output_directory)\n checksums = set()\n for photo in photos:\n checksums.add(photo.md5)\n \n with open(self.pickle_path, \"wb\") as f:\n pickle.dump(checksums, f)\n\n def _prime_md5(self, photos: list):\n print(\"Calculating MD5 sums\")\n _multicore(_get_md5, photos)\n print(\"Done\")\n\nclass BurstFinder():\n def __init__(self, photos: list):\n self.photos = [x for x in photos if x.date is not None]\n self.photos.sort(key = operator.attrgetter(\"date\"))\n\n def find_bursts(self):\n # find pictures taken in succession\n runs = self.split_into_runs()\n for run in runs:\n self._check_visuals(run)\n # for each group, split into groups based on dhash\n # return bursts\n\n def split_into_runs(self) -> list:\n runs = list()\n run = list()\n \n for i in range(len(self.photos)):\n if len(run) == 0:\n run.append(self.photos[i])\n continue\n\n tdelta = self._time_delta_millis(self.photos[i-1].date, self.photos[i].date)\n if tdelta < 500:\n run.append(self.photos[i])\n else:\n runs.append(run)\n run = list()\n run.append(self.photos[i])\n\n if i == len(self.photos) - 1:\n runs.append(run)\n runs = [j for j in runs if len(j) >= 2]\n\n return runs\n\n def _time_delta_millis(self, date1: datetime, date2: datetime) -> int:\n tdelta = date2 - date1\n return int((tdelta.days * 86400000) + (tdelta.seconds * 1000) + (tdelta.microseconds / 1000))\n\n def _check_visuals(self, photos):\n self._prime_dhashes(photos)\n photos_to_remove = list()\n for a, b in itertools.combinations(photos, 2):\n if _hamming_distance(a, b) > 30:\n print(f\"Damn, {a} and {b} don't match\")\n\n def _prime_dhashes(self, photos: list):\n print(\"Calculating dhashes\")\n _multicore(_get_dhash, photos)\n print(\"Done\")\n\nclass ExifError(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass Hasher():\n def dhash(self, photo: str, width: int = 8) -> int:\n image = Image.open(photo)\n data = list(image.convert(\"L\")\n .resize((width+1, width+1), Image.ANTIALIAS)\n .getdata())\n \n row_hash = 0\n col_hash = 0\n for y in range(width):\n for x in range(width):\n offset = y * (width + 1) + x\n\n row_bit = data[offset] < data[offset + 1]\n row_hash = row_hash << 1 | row_bit\n\n col_bit = data[offset] < data[offset + (width + 1)]\n col_hash = col_hash << 1 | col_bit\n\n return row_hash << (width * width) | col_hash\n\n def md5(self, file: str) -> str:\n m = hashlib.md5()\n with open(file, \"rb\") as f:\n data = f.read(65536)\n while data:\n m.update(data)\n data = f.read(65536)\n\n return m.hexdigest()\n\n\nclass FullPath(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, values)\n\ndef _collect_photos(directory: str, ignore: list = list()) -> list:\n photos = list()\n for root, dirs, files in os.walk(directory):\n dirs[:] = [d for d in dirs if os.path.join(root, d) not in ignore]\n for file in files:\n photos.append(Photo(os.path.join(root, file)))\n \n photos = [p for p in photos if p.path.lower()[-4:] == \".jpg\"]\n\n return photos\n\ndef _get_dhash(photo: Photo) -> int:\n return photo.dhash\n\ndef _get_md5(photo: Photo) -> str:\n return photo.md5\n\ndef _hamming_distance(photo1: Photo, photo2: Photo) -> int:\n return bin(photo1.dhash ^ photo2.dhash).count(\"1\")\n\ndef _multicore(function, inputs):\n with futures.ProcessPoolExecutor() as ex:\n ex.map(function, inputs)\n\nif __name__ == \"__main__\":\n main()","sub_path":"photo_management/sort-photos.py","file_name":"sort-photos.py","file_ext":"py","file_size_in_byte":8367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"344526417","text":"# -*- coding: utf8 -*-\nimport feedparser\nimport scrapy\nfrom items import ArticleItem\nimport re\n\nclass Spider10(scrapy.Spider):\n\n name = \"test\"\n\n def start_requests(self):\n urls = ['https://news.zing.vn/shawn-mendes-va-camila-cabello-om-ap-nhau-tren-san-khau-vmas-2019-post983126.html']\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_article)\n\n def parse_article(self, response ):\n article = ArticleItem()\n\n tmp = response.xpath('//*[@class=\"pic\"]/img/@src').extract()\n print(tmp)\n #//*[@class=\"keywords-box\"]/span/a/text()\n\n","sub_path":"crawler/scraper/scraper/spiders/testcrawl.py","file_name":"testcrawl.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328546476","text":"\"\"\"\nThere are M people sitting in a row of N seats, where M < N. Your task is to redistribute people such that \nthere are no gaps between any of them, while keeping overall movement to a minimum.\n\nFor example, suppose you are faced with an input of \n[0, 1, 1, 0, 1, 0, 0, 0, 1], where 0 represents an empty seat and 1 represents a person. \nIn this case, one solution would be to place the person on the right in the fourth seat. \nWe can consider the cost of a solution to be the sum of the absolute distance each person must move, \nso that the cost here would be five.\n\nGiven an input such as the one above, return the lowest possible cost of moving people to remove all gaps.\n\"\"\"\n\ndef move(seats):\n people = [i for i, x in enumerate(seats) if x == 1]\n print(\"people: {}\".format(people))\n n = len(people)\n print(\"n: {}\".format(n))\n median = people[n // 2]\n print(\"median: {}\".format(median))\n cost = 0\n\n # Move left seats closer to median.\n i = median - 1; j = n // 2 - 1\n print(\"i :{}\".format(i))\n print(\"j : {}\".format(j))\n while i >= 0 and j >= 0:\n if seats[i] == 0:\n cost += abs(people[j] - i)\n seats[i], seats[people[j]] = seats[people[j]], seats[i]\n j -= 1 \n i -= 1\n\n # Move right seats closer to median.\n i = median + 1; j = n // 2 + 1\n while i < len(seats) and j < n:\n if seats[i] == 0:\n cost += abs(people[j] - i)\n seats[i], seats[people[j]] = seats[people[j]], seats[i]\n j += 1\n i += 1\n\n return seats, cost\n\nprint(move([0, 1, 1, 0, 1, 0, 0, 0, 1]))","sub_path":"prob607.py","file_name":"prob607.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"84167724","text":"\"\"\"Core tasks.\"\"\"\n# Copyright 2014 - 2015 Solinea, Inc.\n#\n# Licensed under the Solinea Software License Agreement (goldstone),\n# Version 1.0 (the \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at:\n#\n# http://www.solinea.com/goldstone/LICENSE.pdf\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom django.conf import settings\nimport logging\nfrom subprocess import check_call\n\nfrom goldstone.celery import app as celery_app\n\nlogger = logging.getLogger(__name__)\n\n\n@celery_app.task()\ndef delete_indices(prefix,\n cutoff=None,\n es_host=settings.ES_HOST,\n es_port=settings.ES_PORT):\n \"\"\"Cull old indices from Elasticsearch.\n\n Takes an index name prefix (ex: goldstone-) and a cutoff time in days\n Returns 0 or None if no cutoff was provided.\n \"\"\"\n\n if cutoff is not None:\n cmd = \"curator --host %s --port %s delete --prefix %s \" \\\n \"--older-than %d\" % (es_host, es_port, prefix, cutoff)\n return check_call(cmd.split())\n else:\n return \"Cutoff was none, no action taken\"\n\n\n@celery_app.task()\ndef update_persistent_graph():\n \"\"\"Update the Resource graph's persistent data from the current OpenStack\n cloud state.\n\n Nodes are:\n - deleted if they are no longer in the OpenStack cloud\n - added if they are in the OpenStack cloud, but not in the graph.\n - updated from the cloud if they are already in the graph.\n\n \"\"\"\n from goldstone.cinder.utils import update_nodes as update_cinder_nodes\n from goldstone.glance.utils import update_nodes as update_glance_nodes\n from goldstone.keystone.utils import update_nodes as update_keystone_nodes\n from goldstone.nova.utils import update_nodes as update_nova_nodes\n\n update_cinder_nodes()\n update_glance_nodes()\n update_keystone_nodes()\n update_nova_nodes()\n\n\n@celery_app.task()\ndef expire_auth_tokens():\n \"\"\"Expire authorization tokens.\n\n This deletes all existing tokens, which will force every user to log in\n again.\n\n This should be replaced with djangorestframwork-timed-auth-token after we\n upgrade to Django 1.8.\n\n \"\"\"\n from rest_framework.authtoken.models import Token\n\n Token.objects.all().delete()\n","sub_path":"goldstone/core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"628488144","text":"def my_function():\n print(\"Hello from a function\")\n \ndef sub(A1):\n res = set()\n pre = {0}\n for x in A1:\n pre = {x | y for y in pre} | {x}\n res |= pre\n return len(res)\n\nfor _ in range(int(input())):\n n = int(input())\n a = list(map(int, input().split()))\n if len(a)*(len(a)+1)/2 == sub(a):\n print(\"YES\")\n else:\n print(\"NO\")","sub_path":"COOK119B/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"143633926","text":"#!/usr/bin/env python3\n\n#\n# Copyright (c) 2014-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\nimport json\nfrom typing import Tuple\n\nimport click\nimport jsondiff\nfrom openr.AllocPrefix import ttypes as ap_types\nfrom openr.cli.utils import utils\nfrom openr.cli.utils.commands import OpenrCtrlCmd\nfrom openr.LinkMonitor import ttypes as lm_types\nfrom openr.Lsdb import ttypes as lsdb_types\nfrom openr.OpenrCtrl import OpenrCtrl\nfrom openr.OpenrCtrl.ttypes import OpenrError\nfrom openr.utils import ipnetwork, printing\nfrom openr.utils.consts import Consts\nfrom openr.utils.serializer import deserialize_thrift_object\n\n\nclass ConfigShowCmd(OpenrCtrlCmd):\n def _run(self, client: OpenrCtrl.Client):\n resp = client.getRunningConfig()\n config = json.loads(resp)\n utils.print_json(config)\n\n\nclass ConfigDryRunCmd(OpenrCtrlCmd):\n def _run(self, client: OpenrCtrl.Client, file: str):\n try:\n file_conf = client.dryrunConfig(file)\n except OpenrError as ex:\n click.echo(click.style(\"FAILED: {}\".format(ex), fg=\"red\"))\n return\n\n config = json.loads(file_conf)\n utils.print_json(config)\n\n\nclass ConfigCompareCmd(OpenrCtrlCmd):\n def _run(self, client: OpenrCtrl.Client, file: str):\n running_conf = client.getRunningConfig()\n\n try:\n file_conf = client.dryrunConfig(file)\n except OpenrError as ex:\n print(\"invalid config {} : {}\".format(file, ex))\n return\n\n res = jsondiff.diff(running_conf, file_conf, load=True, syntax=\"explicit\")\n if res:\n click.echo(click.style(\"DIFF FOUND!\", fg=\"red\"))\n print(\"== diff(running_conf, {}) ==\".format(file))\n print(res)\n else:\n click.echo(click.style(\"SAME\", fg=\"green\"))\n\n\nclass ConfigStoreCmdBase(OpenrCtrlCmd):\n def getConfigWrapper(\n self, client: OpenrCtrl.Client, config_key: str\n ) -> Tuple[str, str]:\n blob = None\n exception_str = None\n try:\n blob = client.getConfigKey(config_key)\n except OpenrError as ex:\n exception_str = \"Exception getting key for {}: {}\".format(config_key, ex)\n\n return (blob, exception_str)\n\n\nclass ConfigPrefixAllocatorCmd(ConfigStoreCmdBase):\n def _run(self, client: OpenrCtrl.Client):\n (prefix_alloc_blob, exception_str) = self.getConfigWrapper(\n client, Consts.PREFIX_ALLOC_KEY\n )\n\n if prefix_alloc_blob is None:\n print(exception_str)\n return\n\n prefix_alloc = deserialize_thrift_object(\n prefix_alloc_blob, ap_types.AllocPrefix\n )\n self.print_config(prefix_alloc)\n\n def print_config(self, prefix_alloc: ap_types.AllocPrefix) -> None:\n seed_prefix = prefix_alloc.seedPrefix\n seed_prefix_addr = ipnetwork.sprint_addr(seed_prefix.prefixAddress.addr)\n\n caption = \"Prefix Allocator parameters stored\"\n rows = []\n rows.append(\n [\"Seed prefix: {}/{}\".format(seed_prefix_addr, seed_prefix.prefixLength)]\n )\n rows.append([\"Allocated prefix length: {}\".format(prefix_alloc.allocPrefixLen)])\n rows.append(\n [\"Allocated prefix index: {}\".format(prefix_alloc.allocPrefixIndex)]\n )\n\n print(printing.render_vertical_table(rows, caption=caption))\n\n\nclass ConfigLinkMonitorCmd(ConfigStoreCmdBase):\n def _run(self, client: OpenrCtrl.Client) -> None:\n # After link-monitor thread starts, it will hold for\n # \"adjHoldUntilTimePoint_\" time before populate config information.\n # During this short time-period, Exception can be hit if dump cmd\n # kicks during this time period.\n (lm_config_blob, exception_str) = self.getConfigWrapper(\n client, Consts.LINK_MONITOR_KEY\n )\n\n if lm_config_blob is None:\n print(exception_str)\n return\n\n lm_config = deserialize_thrift_object(lm_config_blob, lm_types.LinkMonitorState)\n self.print_config(lm_config)\n\n def print_config(self, lm_config: lm_types.LinkMonitorState):\n caption = \"Link Monitor parameters stored\"\n rows = []\n rows.append(\n [\"isOverloaded: {}\".format(\"Yes\" if lm_config.isOverloaded else \"No\")]\n )\n rows.append([\"nodeLabel: {}\".format(lm_config.nodeLabel)])\n rows.append(\n [\"overloadedLinks: {}\".format(\", \".join(lm_config.overloadedLinks))]\n )\n print(printing.render_vertical_table(rows, caption=caption))\n\n print(printing.render_vertical_table([[\"linkMetricOverrides:\"]]))\n column_labels = [\"Interface\", \"Metric Override\"]\n rows = []\n for (k, v) in sorted(lm_config.linkMetricOverrides.items()):\n rows.append([k, v])\n print(printing.render_horizontal_table(rows, column_labels=column_labels))\n\n print(printing.render_vertical_table([[\"adjMetricOverrides:\"]]))\n column_labels = [\"Adjacency\", \"Metric Override\"]\n rows = []\n for (k, v) in sorted(lm_config.adjMetricOverrides.items()):\n adj_str = k.nodeName + \" \" + k.ifName\n rows.append([adj_str, v])\n print(printing.render_horizontal_table(rows, column_labels=column_labels))\n\n\nclass ConfigPrefixManagerCmd(ConfigStoreCmdBase):\n def _run(self, client: OpenrCtrl.Client) -> None:\n (prefix_mgr_config_blob, exception_str) = self.getConfigWrapper(\n client, Consts.PREFIX_MGR_KEY\n )\n\n if prefix_mgr_config_blob is None:\n print(exception_str)\n return\n\n prefix_mgr_config = deserialize_thrift_object(\n prefix_mgr_config_blob, lsdb_types.PrefixDatabase\n )\n self.print_config(prefix_mgr_config)\n\n def print_config(self, prefix_mgr_config: lsdb_types.PrefixDatabase):\n print()\n print(utils.sprint_prefixes_db_full(prefix_mgr_config))\n print()\n\n\nclass ConfigEraseCmd(ConfigStoreCmdBase):\n def _run(self, client: OpenrCtrl.Client, key: str) -> None:\n client.eraseConfigKey(key)\n print(\"Key:{} erased\".format(key))\n\n\nclass ConfigStoreCmd(ConfigStoreCmdBase):\n def _run(self, client: OpenrCtrl.Client, key: str, value: str) -> None:\n client.setConfigKey(key, value)\n print(\"Key:{}, value:{} stored\".format(key, value))\n","sub_path":"openr/py/openr/cli/commands/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207847539","text":"\"\"\"\nРеализовать метод __str__, позволяющий выводить все папки и файлы из данной,\nнапример так:\n\n> print(folder1)\n\nV folder1\n|-> V folder2\n| |-> V folder3\n| | |-> file3\n| |-> file2\n|-> file1\n\nА так же возможность проверить, находится ли файл или папка в другой папке:\n> print(file3 in folder2)\nTrue\n\n\"\"\"\nimport shutil\nimport os\n\n\nclass PrintableFolder:\n def __init__(self, name, content):\n self.name = name\n self.content = content\n self.output = []\n\n def __str__(self, depth=0):\n for item in self.content:\n if os.path.isfile(self.name + '\\\\' + item):\n if depth-1:\n self.output.append(\"| \"*(depth-1) + \"|-> \" + item)\n else:\n self.output.append(\"|-> \" + item)\n if os.path.isdir(self.name + '\\\\' + item):\n if depth:\n if depth-1:\n self.output.append(\n \"| \"*(depth-1) + \"|->\" + ' V ' + item)\n else:\n self.output.append(\"|-> V \" + item)\n else:\n self.output.append(\"V \" + item)\n depth += 1\n new_folder = PrintableFolder(self.name+'\\\\'+item,\n os.listdir(self.name+'\\\\'+item))\n self.output.append(new_folder.__str__(depth))\n depth -= 1\n return '\\n'.join(self.output)\n\n def __contains__(self, item):\n for i in self.content:\n if os.path.isdir(self.name+'\\\\'+i):\n new_folder = PrintableFolder(\n self.name+'\\\\'+i, os.listdir(self.name+'\\\\'+i))\n return item in new_folder\n if os.path.isfile(self.name+'\\\\'+i):\n if item.name == i:\n return True\n\n\nclass PrintableFile:\n def __init__(self, name):\n self.name = name\n\n def __str__(self):\n return '|-> '+self.name\n\n\nfolder1 = PrintableFolder(r'C:\\Users\\nyna-\\Documents\\Virtual Machines',\n os.listdir\n (r'C:\\Users\\nyna-\\Documents\\Virtual Machines'))\nprint(folder1)\n\nfile1 = PrintableFile('QNX-s001.vmdk')\nprint(file1 in folder1)\n","sub_path":"06-advanced-python/hw/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"365417035","text":"#\n# Dynamic Non-Linear Repsonse Properties Module\n#\n# Alex Findlater, Kris Keipert\n# Version 1.0 2014\n#\n# CI/CAS/TDDFT moments are parsed from GAMESS\n# log files and used to calculate 2nd and 3rd\n# order optical properties.\n# \n#\nimport numpy as np\n#\n#\n# User input\n#\n#\nlogname='ex15.log'\nw=0 # freqency used for GNUplot of beta(-2w;w,w)\nw1=0 # freqency 1 for beta(-(w1+w2);w1,w2)\nw2=0\nprt_beta=0 # 1 prints two gnuplot inputs for each element of beta(-2w;w,w):\n # plotting each SOS term in the element and plotting the entire element\nplot_min=0.0 # gnuplot input plot domain min and max\nplot_max=1.0\nbeta_print_tol=0.0 # SOS beta term cut off to be plotted\ntpa_print_tol=0.0 # SOS tpa terms cut off to be printed to log file\nterms=5 # number of excited-states in SOS expressions.\nnstate=0 # number of total states, including reference. This will be set by parsing.\n#\n#\n# Constants\n#\n#\npi=3.14159\na0=5.29e-9 # Bohr radium (cm)\nfsc=7.30e-3 # Fine structure constant (unit-less)\nc=2.9979e10 # Speed of light (cm/s)\ngamma=0.00367493 # FWHM of Lorentzian for final state\nev2h=float(0.03674930495120813) # eV -> hartree conversion \ndeb2bohr=float(0.393430307) # Debye -> e*bohr\n#\n#\n# Parse GAMESS log file\n#\n#\nruntyp=int(9)\ndftcheck=0\n# open GAMESS log file, and identify run type\nwith open (logname, 'r') as log:\n for line in log:\n if 'SOCI= T' in line:\n runtyp=int(0)\n if 'CITYP =CIS' in line:\n runtyp=int(1)\n if 'TDDFT =EXCITE' in line:\n runtyp=2 \n#\n# runtp=0, GUGA-DRT RUNTYP=TRANSTN\n if runtyp==int(0):\n if 'RECOVER CI' in line:\n nstate=int(line.rsplit(None, 1)[1])\n u_x=np.zeros( (nstate,nstate) )\n u_y=np.zeros( (nstate,nstate) )\n u_z=np.zeros( (nstate,nstate) )\n u_x_2=np.zeros( (nstate,nstate) )\n u_y_2=np.zeros( (nstate,nstate) )\n u_z_2=np.zeros( (nstate,nstate) )\n ediff=np.zeros( (nstate,nstate) )\n if 'CI STATE NUMBER=' in line:\n i=int(line.split( )[3])-1\n j=int(line.split( )[4])-1\n if 'E*BOHR' in line:\n if i==j==0:\n base_x=float(line.split( )[3])*ev2h\n base_y=float(line.split( )[4])*ev2h\n base_z=float(line.split( )[5])*ev2h\n if i==j!=0:\n u_x[i,j]=float(line.split( )[3])*ev2h-base_x\n u_y[i,j]=float(line.split( )[4])*ev2h-base_y\n u_z[i,j]=float(line.split( )[5])*ev2h-base_z\n if i!=j:\n u_x[i,j]=float(line.split( )[3])*ev2h\n u_y[i,j]=float(line.split( )[4])*ev2h\n u_z[i,j]=float(line.split( )[5])*ev2h\n if 'TRANSITION ENERGY=' in line:\n ediff[i,j]=float(line.split( )[8])*ev2h\n#\n# runtyp=1, CITYP=CIS\n if runtyp==int(1):\n if 'NUMBER OF STATES REQUESTED =' in line:\n nstate=int(line.split( )[5])+1\n u_x=np.zeros( (nstate,nstate) )\n u_y=np.zeros( (nstate,nstate) )\n u_z=np.zeros( (nstate,nstate) )\n u_x_2=np.zeros( (nstate,nstate) )\n u_y_2=np.zeros( (nstate,nstate) )\n u_z_2=np.zeros( (nstate,nstate) )\n ediff=np.zeros( (nstate,nstate) )\n if ' GROUND STATE (SCF) DIPOLE= ' in line:\n u_x[0,0]=float(line.split()[4])*deb2bohr\n u_y[0,0]=float(line.split()[5])*deb2bohr\n u_z[0,0]=float(line.split()[6])*deb2bohr\n if 'EXPECTATION VALUE DIPOLE MOMENT FOR EXCITED STATE' in line:\n i=j=int(line.split()[7])\n for x in range (0,5):\n line=log.next()\n u_x[i,j]=(float(line.split()[3])-u_x[0,0])\n u_y[i,j]=(float(line.split()[4])-u_y[0,0])\n u_z[i,j]=(float(line.split()[5])-u_z[0,0])\n if 'TRANSITION FROM THE GROUND STATE TO EXCITED STATE' in line:\n i=0\n j=int(line.split( )[8])\n if 'TRANSITION BETWEEN EXCITED STATES ' in line:\n i=int(line.split( )[4])\n j=int(line.split( )[6])\n if 'STATE ENERGIES =' in line:\n ediff[i,j]=float((float(line.split( )[4])-float(line.split( )[3])))\n if 'TRANSITION DIPOLE' in line and 'E*BOHR' in line:\n if i!=j:\n u_x[i,j]=float(line.split( )[3])\n u_y[i,j]=float(line.split( )[4])\n u_z[i,j]=float(line.split( )[5])\n#\n#runtyp=2, TDDFT\n if runtyp==2:\n if 'NSTATE=' in line and 'IROOT=' in line:\n nstate=int(line.split()[1])+1\n u_x=np.zeros( (nstate,nstate) )\n u_y=np.zeros( (nstate,nstate) )\n u_z=np.zeros( (nstate,nstate) )\n u_x_2=np.zeros( (nstate,nstate) )\n u_y_2=np.zeros( (nstate,nstate) )\n u_z_2=np.zeros( (nstate,nstate) )\n ediff=np.zeros( (nstate,nstate) )\n #check to make sure DFT is entered, so HF data isn't pulled\n if 'PROPERTIES FOR THE' in line and 'FUNCTIONAL' in line:\n dftcheck=1\n if 'ELECTROSTATIC MOMENTS' in line and dftcheck==1:\n for x in range (0,6):\n line=log.next()\n u_x[0,0]=float(line.split()[0])*deb2bohr\n u_y[0,0]=float(line.split()[1])*deb2bohr\n u_z[0,0]=float(line.split()[2])*deb2bohr\n if 'STATE ENERGY EXCITATION TRANSITION DIPOLE, A.U. OSCILLATOR' in line:\n for x in range (0,3):\n line=log.next()\n for x in range (1,nstate):\n line=line.replace('-',' -')\n ediff[0,x]=float(line.split()[3])*ev2h\n u_x[0,x]=float(line.split()[4])\n u_y[0,x]=float(line.split()[5])\n u_z[0,x]=float(line.split()[6])\n line=log.next()\n if ' TRANSITION EXCITATION TRANSITION DIPOLE, A.U. OSCILLATOR' in line:\n for x in range (0,3): \n line=log.next()\n count=((nstate-1)**2+nstate-1)/2\n for x in range (0,count):\n i=line.split()[0]\n j=line.split()[2]\n line=line.replace('-',' -')\n ediff[i,j]=float(line.split()[3])*ev2h \n if i==j:\n u_x[i,i]=float(line.split()[4])-u_x[0,0]\n u_y[i,i]=float(line.split()[5])-u_y[0,0]\n u_z[i,i]=float(line.split()[6])-u_z[0,0]\n if i!=j:\n u_x[i,j]=float(line.split()[4])\n u_y[i,j]=float(line.split()[5])\n u_z[i,j]=float(line.split()[6])\n line=log.next()\nlog.close( )\n#\n# make symmetric\nfor i in range(0,nstate):\n for j in range(0,nstate):\n u_x[j,i]=u_x[i,j]\n u_y[j,i]=u_y[i,j]\n u_z[j,i]=u_z[i,j]\n ediff[j,i]=ediff[i,j]\n#\n#\n# Set u_r_2 diag elements of moment maxtrix to\n# excited state dipoles instead of difference with\n# ground-state\nfor i in range(0,nstate):\n for j in range(0,nstate):\n u_x_2[i,j]=u_x[i,j]\n u_y_2[i,j]=u_y[i,j]\n u_z_2[i,j]=u_z[i,j]\n if i==j!=0:\n u_x_2[i,i]=u_x[i,i]+u_x[0,0]\n u_y_2[i,i]=u_y[i,i]+u_y[0,0]\n u_z_2[i,i]=u_z[i,i]+u_z[0,0]\n#\n#\n# SOS expressions for dynamic properties\n#\n#\n# first-order hyperpolarizability \n# Beta(-(w1+w2);w1,w2) is calculated and \n# SOS expressions are written to gnuplot \n# input files\n#\n#\nnstate=terms+1\nbeta = np.zeros( (3,3,3) ) # Rank-3 first-hyperpolarizability tensor \nalpha=np.zeros( (3,3) ) # Rank-2 polarizability tensor\nu=[u_x,u_y,u_z] # Rank-3 moment tensor\ndom_str=[None]*1000 # first-hyperpolarizability denominaor expression string \ndel_beta=np.zeros( (6,2) ) # del_beta holds numerator/denominator values for each of the\n# six permutation unique terms. Used for builidng GNUplot.in\n#\n#\nfor i in range(0,3):\n for j in range(0,3):\n for k in range(0,3):\n if prt_beta==1:\n file_name1='full_beta_'+str(i)+str(j)+str(k)+'.in'\n file_name2='terms_beta_'+str(i)+str(j)+str(k)+'.in'\n log1=open(file_name1,'w')\n log2=open(file_name2,'w')\n log1.write('plot ['+str(plot_min)+':'+str(plot_max)+'] ')\n log2.write('plot ['+str(plot_min)+':'+str(plot_max)+'] ')\n for n in range(1, nstate):\n for p in range(1, nstate):\n del_beta[0,1] = ((u[i][n][0]*u[j][n][p]*u[k][0][p]))\n del_beta[0,0] = del_beta[0,1]/((ediff[0][n]-(w1+w2))*(ediff[0][p]-w2))\n dom_str[0]='(('+str(ediff[0][n])+'-2*x)*('+str(ediff[0][p])+'-x))'\n\n del_beta[1,1] = ((u[i][n][0]*u[k][n][p]*u[j][0][p]))\n del_beta[1,0] = del_beta[1,1]/((ediff[0][n]-(w1+w2))*(ediff[0][p]-w1))\n dom_str[1]='(('+str(ediff[0][n])+'-2*x)*('+str(ediff[0][p])+'-x))'\n\n del_beta[2,1] = ((u[j][n][0]*u[i][n][p]*u[k][0][p]))\n del_beta[2,0] = del_beta[2,1]/((ediff[0][n]+w1)*(ediff[0][p]-w2))\n dom_str[2]='(('+str(ediff[0][n])+'+x)*('+str(ediff[0][p])+'-x))'\n\n del_beta[3,1] = ((u[k][n][0]*u[i][n][p]*u[j][0][p]))\n del_beta[3,0] = del_beta[3,1]/((ediff[0][n]+w2)*(ediff[0][p]-w1))\n dom_str[3]='(('+str(ediff[0][n])+'+x)*('+str(ediff[0][p])+'-x))'\n\n del_beta[4,1] = ((u[k][n][0]*u[j][n][p]*u[i][0][p]))\n del_beta[4,0] = del_beta[4,1]/((ediff[0][n]+w2)*(ediff[0][p]+(w1+w2)))\n dom_str[4]='(('+str(ediff[0][n])+'+x)*('+str(ediff[0][p])+'+2*x))'\n\n del_beta[5,1] = ((u[j][n][0]*u[k][n][p]*u[i][0][p]))\n del_beta[5,0] = del_beta[5,1]/((ediff[0][n]+w1)*(ediff[0][p]+(w1+w2)))\n dom_str[5]='(('+str(ediff[0][n])+'+x)*('+str(ediff[0][p])+'+2*x))'\n# SOS expressions for GNUplot are puched to file next\n for r in range(0,6):\n beta[i,j,k]=beta[i,j,k]+del_beta[r,0]\n if (del_beta[r,0]>beta_print_tol or del_beta[r,0]<-beta_print_tol) and prt_beta==1:\n string2=str(del_beta[r,1])+'/'+dom_str[r]+',' #' title \"'+str(n)+','+str(p)+','+str(r)+'\" '\n string1=str(del_beta[r,1])+'/'+dom_str[r]+' + '\n log1.write(string1)\n log2.write(string2)\n#\n# \n# Two-photon absorption matrix elements.\n# SOS TPA matrix expression is evaluated and large contributions are punched\n# to file for each state.\n#\n#\ntpa=np.zeros( (nstate-1,3,3) )\ninvar=np.zeros( (nstate-1) )\ncs=np.zeros( (nstate-1) )\ndel_tpa=np.zeros( (nstate-1,nstate,3,3) )\nz=[u_x_2,u_y_2,u_z_2]\nfor f in range(1,nstate):\n for i in range(0,3):\n for j in range(0,3):\n for n in range(0,nstate):\n del_tpa[f-1,n,i,j]= ((z[i][f][n]*z[j][n][0])/(ediff[0][n]-(ediff[0][f]/2)) + \\\n ((z[j][f][n]*z[i][n][0])/(ediff[0][n]-(ediff[0][f]/2))))\n tpa[f-1,i,j]=tpa[f-1,i,j] + del_tpa[f-1,n,i,j]\n#\n#\n# Invariant TPA matrix (au)\n#\n#\nfor f in range(1,nstate):\n invar[f-1]=6*(((tpa[f-1,0,0])+(tpa[f-1,1,1])+(tpa[f-1,2,2]))**2)+8*((tpa[f-1,0,1]**2) + \\\n (tpa[f-1,0,2]**2)+(tpa[f-1,1,2]**2)-(tpa[f-1,0,0]*tpa[f-1,1,1]) - \\\n (tpa[f-1,0,0]*tpa[f-1,2,2]) - (tpa[f-1,1,1]*tpa[f-1,2,2]))\n#\n#\n# TPA cross-section in GM units\n#\n#\nfor i in range(1,nstate):\n cs[i-1]=(4*(pi**2)*(a0**5)*fsc*(((ediff[0,i])/2.0)**2)*invar[i-1])/(15*c*gamma)\n#\n#\n#\n# Printing\n#\n#\noutput=open('tpa_output.txt','w')\n#\n# Print TPA SOS summary\n#\noutput.write('==========================================\\n')\noutput.write('Two-photon absorption SOS analysis summary \\n')\noutput.write('==========================================\\n\\n')\noutput.write(str(terms)+' excited states included in SOS expansion\\n\\n')\noutput.write(' state\\t ex. energy(au) invar(au) cs(GM*1e50) \\n\\n')\nfor f in range(1,nstate):\n write_vars = [f,ediff[0,f]/2,invar[f-1],cs[f-1]*1e50]\n write_vars = ['%.4e' % v for v in write_vars]\n for item in write_vars:\n output.write(str(item)+'\\t')\n output.write('\\n')\noutput.write('\\n')\n#\n# Print TPA SOS details for each state\n#\nfor f in range(1,nstate):\n output.write('========================\\n')\n output.write('State '+str(f)+' TPA SOS analysis \\n')\n output.write('========================\\n\\n')\n output.write('TPA matrix (au)\\n')\n output.write(str(tpa[f-1])+'\\n\\n')\n output.write('Dominant terms in TPA SOS expansion \\n\\n')\n output.write('final-state,intermediate-state,i,j\\n\\n')\n for i in range(0,3):\n for j in range(i,3):\n for n in range(0,nstate):\n if del_tpa[f-1,n,i,j]>tpa_print_tol or del_tpa[f-1,n,i,j]<-tpa_print_tol:\n string=str(f)+','+str(n)+','+str(i)+','+str(j)+' = '+str(del_tpa[f-1,n,i,j])+'\\n'\n output.write(string)\n output.write('\\n')\n#\n# Print beta tensor\n#\noutput.write('===============================\\n')\noutput.write('Hyperpolarizability tensor (au)\\n')\noutput.write('===============================\\n\\n')\noutput.write(str(beta))\n#\n# ran to completion\nprint('Done calculating properties')\n","sub_path":"soci_cis_parsers_live_here_for_now.py","file_name":"soci_cis_parsers_live_here_for_now.py","file_ext":"py","file_size_in_byte":13865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"598380753","text":"import unittest\nimport warnings\nfrom ddt import ddt,data\nfrom chao.work.project.common.do_excel import DoExcel\nfrom chao.work.project.common.http_request import HttpRequest\nfrom chao.work.project.common import project_path\nfrom chao.work.project.common.my_log import MyLog\nfrom chao.work.project.common.get_data import GetData\nfrom chao.work.project.common.learn_mysql import Do_Mysql\n\n\n#测试数据\n\nfile_name = project_path.case_path\nsheet_name = 'invest'\ntest_data=DoExcel(project_path.case_path,sheet_name).read_data('test_recharge')\nmy_log=MyLog()\n\n\nprint(test_data)\nCOOKIES=None\n@ddt#装饰类\nclass testCases(unittest.TestCase):\n\n def setUp(self):\n print('测试开始')#执行用例之前要做的事情放进来\n warnings.simplefilter('ignore',ResourceWarning)\n\n\n def tearDown(self):\n print('测试结束')#执行用例之后要做的事情放进来\n\n #写用例\n\n @data(*test_data)#装饰用例*号脱外套,把测试数据放进来跑\n def test_cases(self,case):#把每一条用例(字典)存放到case里面\n # global COOKIES #\n global TestResult#全局变量\n\n method = case['Method']\n url = case['Url']\n param =eval(case['Params'])\n if case['Sql'] is not None:\n before_amount=Do_Mysql().do_mysql(eval(case['Sql'])['sql'])[0]\n # 发起测试\n my_log.info('正在测试{}模块里的第{}条测试用例:{}'.format(case['Module'], case['CaseId'], case['Title']))\n\n resp = HttpRequest().http_request(method=method,url=url,data=param,cookies=getattr(GetData,'COOKIE'))#调用这个类来发送请求\n\n\n\n if resp.cookies: # 判断请求的cookies是否为空 不为空就是true\n setattr(GetData,'COOKIE',resp.cookies)\n print(resp.cookies)\n\n\n\n try:#监控断言\n\n\n self.assertEqual(eval(case['ExpectedResult']),resp.json())#对比最好用json字典\n if case['Sql'] is not None:# 如果SQL不为NONE,那就要进行数据库的查询操作\n after_amount = Do_Mysql().do_mysql(eval(case['Sql'])['sql'], 1)[0] # 返回的是元组\n rechar_amount = eval(case['Params'])['amount']\n expect_amount = before_amount - int(rechar_amount) # 两种不同数据类型\n self.assertEqual(expect_amount,after_amount)\n\n TestResult='Pass'\n except Exception as e:#断言错误的话就会处理错误\n TestResult = 'Failed'\n my_log.error('http请求测试用例��错了,错误是:{}'.format(e))\n\n raise e#抛出异常,不抛出的话用例全是成功,没有失败\n finally:\n t = DoExcel(project_path.case_path, 'test_case')\n t.write_back(file_name,sheet_name,case['CaseId'] + 1, 9, str(resp.json()))\n t.write_back(file_name,sheet_name,case['CaseId'] + 1, 10, TestResult)\n\n\n my_log.info('实际结果:{}'.format(resp.json())) # http发送请求拿到实际返回值\n\n\n","sub_path":"test_cases/invest_loan.py","file_name":"invest_loan.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"155350158","text":"# encoding: UTF-8\nfrom skeleton import Skeleton\nfrom . import QuaternionFrame\nfrom . import check_quat, convert_quat_frame_value_to_array\nfrom . import SKELETON_NODE_TYPE_END_SITE, LEN_EULER, LEN_ROOT\nimport numpy as np\nfrom ..external.transformations import euler_matrix, euler_from_matrix\n\n\nclass BVHAnalyzer(Skeleton):\n\n def __init__(self, bvhreader):\n super(BVHAnalyzer, self).__init__(bvhreader)\n self.quat_frames = []\n self.euler_frames = bvhreader.frames\n self.n_frames = len(self.euler_frames)\n\n def get_global_pos(self, joint_name, frame_index):\n joint_chain = self.get_joint_chain(joint_name)\n global_trans = np.eye(4)\n global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]\n for joint in joint_chain:\n offset = joint.offset\n if 'EndSite' in joint.node_name: # end site joint\n rot_mat = np.eye(4)\n rot_mat[:3, 3] = offset\n else:\n rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)\n rot_angles_rad = np.deg2rad(rot_angles_euler)\n rot_mat = euler_matrix(rot_angles_rad[0],\n rot_angles_rad[1],\n rot_angles_rad[2],\n 'rxyz')\n rot_mat[:3, 3] = offset\n global_trans = np.dot(global_trans, rot_mat)\n return global_trans[:3, 3]\n\n def get_joint_chain(self, joint_name):\n joint = self.get_joint_by_joint_name(joint_name)\n joint_chain = []\n while joint.parent is not None:\n joint_chain.append(joint)\n joint = joint.parent\n joint_chain.append(joint)\n joint_chain.reverse()\n return joint_chain\n\n def get_relative_pos(self, joint_name, frame_index):\n joint_chain = self.get_joint_chain(joint_name)\n if len(joint_chain) == 1:\n raise ValueError('Root joint has no relative position')\n pos = self.get_global_pos(joint_name, frame_index)\n parent_pos = self.get_global_pos(joint_chain[-2].node_name, frame_index)\n return pos - parent_pos\n\n def get_joint_offset(self, joint_name):\n return self.nodes[joint_name].offset\n\n def _get_nodes_without_endsite(self):\n animated_nodes = self.nodes.values()\n nodes_without_endsite = [node for node in animated_nodes if node.node_type != SKELETON_NODE_TYPE_END_SITE]\n return nodes_without_endsite\n\n def get_relative_orientation_euler(self, joint_name, frame_index):\n # assert frame_index in range(self.n_frames), ('Frame index is invalid!')\n nodes_without_endsite = self._get_nodes_without_endsite()\n assert (len(nodes_without_endsite)+1) * 3 == len(self.euler_frames[0]), \\\n ('The length of euler frame is not corresponding to length of modeled joints')\n joint = self.get_joint_by_joint_name(joint_name)\n assert joint in nodes_without_endsite, (\"The joint is not modeled!\")\n joint_index = nodes_without_endsite.index(joint)\n start_channel_index = joint_index * 3 + LEN_ROOT\n end_channel_index = start_channel_index + LEN_EULER\n return self.euler_frames[frame_index][start_channel_index: end_channel_index]\n\n def get_global_transform(self, joint_name, frame_index):\n joint_chain = self.get_joint_chain(joint_name)\n global_trans = np.eye(4)\n global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]\n for joint in joint_chain:\n offset = joint.offset\n if 'EndSite' in joint.node_name: # end site joint\n rot_mat = np.eye(4)\n rot_mat[:3, 3] = offset\n else:\n rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)\n rot_angles_rad = np.deg2rad(rot_angles_euler)\n rot_mat = euler_matrix(rot_angles_rad[0],\n rot_angles_rad[1],\n rot_angles_rad[2],\n 'rxyz')\n rot_mat[:3, 3] = offset\n global_trans = np.dot(global_trans, rot_mat)\n return global_trans\n\n def get_global_orientation_euler(self, joint_name, frame_index):\n joint_chain = self.get_joint_chain(joint_name)\n global_trans = np.eye(4)\n global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]\n for joint in joint_chain:\n offset = joint.offset\n rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)\n rot_angles_rad = np.deg2rad(rot_angles_euler)\n rot_mat = euler_matrix(rot_angles_rad[0],\n rot_angles_rad[1],\n rot_angles_rad[2],\n 'rxyz')\n rot_mat[:3, 3] = offset\n global_trans = np.dot(global_trans, rot_mat)\n global_angles_rad = euler_from_matrix(global_trans,\n 'rxyz')\n return np.rad2deg(global_angles_rad)\n\n def set_relative_orientation_euler(self, joint_name, frame_index, euler_angles):\n \"\"\"\n\n :param joint_name: str\n :param frame_index: int\n :param euler_angles: array degree\n :return:\n \"\"\"\n # assert frame_index in range(self.n_frames), ('Frame index is invalid!')\n animated_nodes = self.nodes.values()\n nodes_without_endsite = [node for node in animated_nodes if node.node_type != SKELETON_NODE_TYPE_END_SITE]\n assert (len(nodes_without_endsite)+1) * 3 == len(self.euler_frames[0]), \\\n ('The length of euler frame is not corresponding to length of modeled joints')\n joint_index = 0\n for node in nodes_without_endsite:\n if node.node_name == joint_name:\n break\n else:\n joint_index += 1\n start_channel_index = (joint_index + 1) * 3\n end_channel_index = start_channel_index + LEN_EULER\n self.euler_frames[frame_index][start_channel_index: end_channel_index] = euler_angles\n\n def get_joint_index(self, joint_name):\n joint_name_list = self.nodes.keys()\n if joint_name not in joint_name_list:\n raise ValueError('joint name is not found!')\n return joint_name_list.index(joint_name)\n\n def set_joint_offset(self, joint_name, offset):\n assert len(offset) == 3, ('The length of joint is not correct')\n joint = self.get_joint_by_joint_name(joint_name)\n joint.offset = [offset[0], offset[1], offset[2]]\n\n def get_joint_by_joint_name(self, joint_name):\n if joint_name not in self.nodes.keys():\n raise KeyError('Joint name is not found!')\n return self.nodes[joint_name]\n\n def to_quaternion(self, filter_joints=True):\n last_quat_frame = None\n for frame in self.euler_frames:\n quat_frame = QuaternionFrame(self, frame, ignore_bip_joints=filter_joints)\n if last_quat_frame is not None:\n for joint_name in self.node_names.keys():\n if joint_name in quat_frame.keys():\n quat_frame[joint_name] = check_quat(\n quat_frame[joint_name],\n last_quat_frame[joint_name])\n last_quat_frame = quat_frame\n root_translation = frame[0:3]\n quat_frame_values = [root_translation, ] + quat_frame.values()\n quat_frame_values = convert_quat_frame_value_to_array(quat_frame_values)\n self.quat_frames.append(quat_frame_values)\n\n def get_joint_channel_in_full_euler_frame(self, joint):\n \"\"\"\n\n :param joint: str, joint name\n :return:\n \"\"\"\n return self.node_channels.index((joint, 'Xrotation'))\n\n def get_closure_kinematic_chain(self, joint):\n joint_chain = []\n if joint.parent is not None:\n joint_chain.append(joint)\n return joint_chain.reverse()\n\n\n def to_Cartesian(self):\n pass\n\n","sub_path":"python_src/morphablegraphs/animation_data/bvh_analyzer.py","file_name":"bvh_analyzer.py","file_ext":"py","file_size_in_byte":8173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"132729729","text":"from flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap\nfrom flask_nav import Nav\nfrom flask_nav.elements import *\n\nnav = Nav()\nnav.register_element('top', Navbar('Serval Swapper',\n View('Home', 'index'),\n Link('Source', 'https://github.com/musaprg/serval-swapper'),\n))\n\napp = Flask(__name__)\nBootstrap(app)\n\nnav.init_app(app)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/test\")\ndef test():\n return render_template(\"test.html\")\n\nif __name__ == '__main__':\n app.run();\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"235416951","text":"#first val is key\n\ncustomer = {\n \"name\":\"John Smith\",\n \"age\" : 30,\n \"is_verified\": True\n}\nprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n#call key\nprint(customer[\"name\"])\n\n\n\n\n# Exercise\nprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\nnumbers = {\n \"1\":\"One \",\n \"2\":\"Two \",\n \"3\":\"Three \",\n \"4\":\"Four \",\n \"5\":\"Five \",\n \"6\":\"Six \",\n \"7\":\"Seven \",\n \"8\":\"Eight \",\n \"9\":\"Nine \",\n \"0\":\"Zero \"\n}\nnumber = input(\"What is your number going to be?: \")\nfor x in number:\n print(numbers[x])","sub_path":"Python/MOSH_PythonBeginner/14_dictionaries.py","file_name":"14_dictionaries.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"491154205","text":"'''\nUsing one print statement and string concatenation assemble and print the\nfollowing to console: “my var: 4, my var1: 2, my var2: Magic Number”.\nTo concatenate my var and my var1 along with other strings you will need\nto use the “+” operator and str(my var) and str(my var1) to represent\nthem as strings\n'''\n\nmy_var = 4\nmy_var1 = 2\nmy_var2 = 'Magic number'\nmy_var3 = \" Magic Number\"\n\nprint(str(my_var)+str(my_var1)+my_var3) # --->42 Magic Number\nprint(my_var, my_var1, my_var2) # --->4 2 Magic number\n","sub_path":"Python/Python Fundamentals/1.Introduction/2. Programming Questions/Exercise 3.py","file_name":"Exercise 3.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"50383551","text":"import datetime\nimport sys\nimport os\n\npath = os.path.dirname(os.path.abspath(__file__))\n\namountLamp = int(sys.argv[1])\nnameEnv = sys.argv[2]\nbright = int(sys.argv[3])\n \nx = datetime.datetime.now()\ndate = f'{x.day:02d}/{x.month:02d}/{x.year}'\n\nwith open(f'{path}/../logs/{nameEnv}.txt', 'a') as outFile:\n cost = (amountLamp*0.0028)*(bright/100)\n outFile.write('{} - {:.5f} \\n'.format(date, cost))\n","sub_path":"src/CronPrint.py","file_name":"CronPrint.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"438293844","text":"import sublime\nimport sublime_plugin\n\nvalid_fb2_tags = ['p', 'section', 'empty-line/', 'emphasis', 'strong',\n 'epigraph','cite', \n 'book-name', 'src-title-info', 'title', 'image', \n 'image',\n 'v', 'stanza', 'poem', 'text-author',\n 'a', 'body', 'subtitle', 'sup','sub',\n 'id', 'binary', 'src-lang','td',\n 'first-name',\n 'coverpage', 'book-title', 'nickname', \n 'title-info', 'publish-info', 'sequence', \n 'history', 'city', 'genre', 'tr', \n 'author', 'version', 'middle-name', 'translator', \n '?xml', 'table', 'src-ocr', 'src-url', 'email',\n 'th', 'year', 'program-used', 'document-info', \n 'last-name', 'description', 'strikethrough',\n 'keywords', 'custom-info', 'lang', 'FictionBook', 'date',\n 'code', 'publisher', 'annotation']\n\n\nclass Fb2AutoCompleterListener(sublime_plugin.ViewEventListener):\n def on_query_completions(self, prefix, locations):\n \"\"\"return list, tuple or None\"\"\"\n fname = self.view.file_name()\n if fname is None or fname=='' or fname.endswith('.fb2'):\n if prefix.startswith('/'):\n _p = prefix[1:]\n _l = list(['/{}\\t{}'.format(s, 'valid fb2 tag'), '/{}>'.format(s)]\\\n for s in \\\n filter(lambda s: s.startswith(_p), valid_fb2_tags)\n )\n return _l if len(_l) != 0 else None\n else:\n _l = list(['{}\\t{}'.format(s, 'valid fb2 tag'), '{}>'.format(s)]\\\n for s in \\\n filter(lambda s: s.startswith(prefix), valid_fb2_tags)\n )\n return _l if len(_l) != 0 else None\n return None\n\n","sub_path":"fb2_auto_completer.py","file_name":"fb2_auto_completer.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"64900587","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n EvaQ8DockWidget\n A QGIS plugin\n SDSS system helping police officers evacuate buildings.\n -------------------\n begin : 2016-12-13\n git sha : $Format:%H$\n copyright : (C) 2016 by Lilia Angelova\n email : urb.lili.an@gmail.com\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nimport os\nimport csv\n\nfrom PyQt4 import QtGui, uic, QtCore\nfrom PyQt4.QtCore import pyqtSignal\nfrom qgis.core import *\nfrom qgis.networkanalysis import *\nfrom qgis.gui import *\nfrom utility_functions import *\n\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'EvaQ8_dockwidget_base.ui'))\n\n\nclass EvaQ8DockWidget(QtGui.QDockWidget, FORM_CLASS):\n\n closingPlugin = pyqtSignal()\n\n\n def __init__(self, iface, parent=None):\n \"\"\"Constructor.\"\"\"\n super(EvaQ8DockWidget, self).__init__(parent)\n # Set up the user interface from Designer.\n # After setupUI you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n self.Police.clicked.connect(self.getPolice)\n self.Ambulance.clicked.connect(self.getAmbulance)\n\n\n # define globals\n self.iface = iface\n self.canvas = self.iface.mapCanvas()\n self.plugin_dir = os.path.dirname(__file__)\n self.LoadLayers()\n self.getAttributes()\n self.Send_Location.clicked.connect(self.sendLocation)\n self.Send_Location.clicked.connect(self.policemen_send_location)\n\n self.Main_table.itemSelectionChanged.connect(self.startNavigationOn)\n self.Navigation.setDisabled(True)\n self.Navigation.clicked.connect(self.startnavigation)\n self.graph = QgsGraph()\n self.tied_points = []\n self.roads_layer = getLegendLayerByName(self.iface, 'ROAD_NETWORK')\n self.current_location = None\n\n # report\n self.createCSV()\n self.Send_report.clicked.connect(self.sendReport)\n self.Send_report.clicked.connect(self.clear)\n self.Send_report.clicked.connect(self.evacuated_building)\n\n #disabled buttons if nothing selected\n self.Send_Location.setDisabled(True)\n self.Send_report.setDisabled(True)\n self.Police.setDisabled(True)\n self.Ambulance.setDisabled(True)\n\n #enable if selection\n self.Main_table.itemSelectionChanged.connect(self.Enable_buttons)\n\n def startnavigation(self):\n building_layer = getLegendLayerByName(self.iface, \"Buildings\")\n police_stations_layer = getLegendLayerByName(self.iface, \"POLICE_STATIONS\")\n routes_layer = getLegendLayerByName(self.iface, \"Routes\")\n if routes_layer:\n routes_layer.startEditing()\n features = routes_layer.getFeatures()\n ids = [feature.id() for feature in features]\n routes_layer.deleteFeatures(ids)\n routes_layer.commitChanges()\n else:\n routes_layer = createTempLayer('Routes', 'LINESTRING', building_layer.crs().postgisSrid(), ['id'], [QtCore.QVariant.Int])\n routes_layer.loadNamedStyle(\"%s/FINAL_DATA/routes.qml\" % self.plugin_dir)\n loadTempLayer(routes_layer)\n\n # get the building point, and the corresponding police station point\n # destination is currently selected building:\n building_id = self.Main_table.item(self.Main_table.currentRow(), 0).text()\n # select building feature with these coordinates.(extracting only the floats)\n building_coords = building_id[1:-1].split(',')\n target_point = QgsPoint(float(building_coords[0]), float(building_coords[1]))\n # origin is the current location or the police station\n if self.current_location:\n # get id of the currently selected building\n origin_id = self.Main_table.item(self.current_location, 0).text()\n origin_coords = origin_id[1:-1].split(',')\n origin_point = QgsPoint(float(origin_coords[0]), float(origin_coords[1]))\n else:\n # select police station name from target building feature\n origin_attribs = self.getFeaturesWithValues(building_layer, 'X', [float(building_coords[0])])\n origin_id = origin_attribs.values()[0][9]\n # select police station with that id\n request = QgsFeatureRequest().setFilterExpression(\"\\\"name\\\" = '%s'\" % origin_id)\n iterator = police_stations_layer.getFeatures(request)\n for feature in iterator:\n origin_point = QgsPoint(feature.attribute('X'), feature.attribute('Y'))\n break\n\n # add these points to a list\n new_points = [origin_point, target_point]\n # build the graph, which returns the tied points. this is a list of points that are used to calculate routes.\n self.graph, self.tied_points = makeUndirectedGraph(self.roads_layer, new_points)\n\n # this next part calculates the route\n origin = 0 # the first point in the new_points list\n destination = 1 # the other point\n path = calculateRouteDijkstra(self.graph, self.tied_points, origin, destination)\n # store the route results in temporary layer called \"Routes\", with an id column\n if path and routes_layer:\n provider = routes_layer.dataProvider()\n fet = QgsFeature()\n fet.setGeometry(QgsGeometry.fromPolyline(path))\n provider.addFeatures([fet])\n provider.updateExtents()\n self.canvas.refresh()\n # keep this location as next origin\n self.current_location = self.Main_table.currentRow()\n self.canvas.setExtent(routes_layer.extent())\n self.canvas.zoomOut()\n self.canvas.refresh()\n\n def getFeaturesWithValues(self, layer, name, values=list):\n features = {}\n if layer:\n if fieldExists(layer, name):\n iterator = layer.getFeatures()\n for feature in iterator:\n att = feature.attribute(name)\n if att in values:\n features[feature.id()] = feature.attributes()\n return features\n\n def startNavigationOn(self):\n self.Navigation.setDisabled(False)\n\n\n def getPolice(self):\n # self.textEdit.setTextColor(QtGui.QColor.setblue(255))\n self.textEdit.setText('Police are on their way!')\n current_text = self.lineEdit_Policemen.text()\n if current_text == '':\n current_count = 0\n else:\n current_count = int(current_text)\n\n self.lineEdit_Policemen.setText(str(current_count + 1))\n\n\n\n def getAmbulance(self):\n # self.textEdit.setTextColor(QtGui.QColor.setred(255))\n self.textEdit.setText('An ambulance is on its way!')\n\n current_text = self.lineEdit_Ambulances.text()\n if current_text == '':\n current_count = 0\n else:\n current_count = int(current_text)\n self.lineEdit_Ambulances.setText(str(current_count + 1))\n\n\n def sendLocation(self):\n try:\n current_row = self.Main_table.currentRow()\n self.current_location = current_row\n update_item = self.Main_table.item(current_row, 2)\n current_count = int(update_item.text())\n update_item.setText(str(current_count + 1))\n self.Main_table.setItem(current_row, 2, update_item)\n self.Send_Location.setDisabled(True)\n except:\n pass\n\n\n def LoadLayers(self,filename=\"\"):\n scenario_open = False\n scenario_file = self.plugin_dir+'/FINAL_DATA/EvaQ8_project.qgs'\n # check if file exists\n if os.path.isfile(scenario_file):\n self.iface.addProject(scenario_file)\n scenario_open = True\n else:\n last_dir = getLastDir(\"SDSS\")\n new_file = QtGui.QFileDialog.getOpenFileName(self, \"\", last_dir, \"(*.qgs)\")\n if new_file:\n self.iface.addProject(unicode(new_file))\n scenario_open = True\n\n\n def closeEvent(self, event):\n self.closingPlugin.emit()\n event.accept()\n\n\n def clearTable(self):\n self.Main_table.clear()\n\n\n def getAttributes(self):\n layer = getCanvasLayerByName(self.iface, \"Buildings\")\n table = []\n for feature in layer.getFeatures():\n #get feature attributes\n coord = feature['X'], feature['Y']\n priority = feature['priority']\n table.append((coord, priority))\n self.clearTable()\n self.updateTable(table)\n\n\n def updateTable(self,values):\n self.Main_table.setHorizontalHeaderLabels([\"Location\",\"Priority\",\"Officer at place\"])\n self.Main_table.setRowCount(len(values))\n for i, item in enumerate(values):\n self.Main_table.setItem(i, 0, QtGui.QTableWidgetItem(str(item[0])))\n self.Main_table.setItem(i, 1, QtGui.QTableWidgetItem(str(item[1])))\n self.Main_table.setItem(i, 2, QtGui.QTableWidgetItem(str(0)))\n self.Main_table.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.ResizeToContents)\n self.Main_table.horizontalHeader().setResizeMode(1, QtGui.QHeaderView.ResizeToContents)\n self.Main_table.horizontalHeader().setResizeMode(2, QtGui.QHeaderView.Stretch)\n #hide grid\n self.Main_table.setShowGrid(True)\n #set background color of selected row\n self.Main_table.setStyleSheet(\"QTableView {selection-background-color: red;}\")\n self.Main_table.resizeRowsToContents()\n self.Main_table.sortItems(1)\n self.Main_table.itemSelectionChanged.connect(self.Additional_info)\n self.Main_table.itemSelectionChanged.connect(self.createReport)\n\n def Additional_info(self):\n #get from the selected building it's x coordinate\n v = self.Main_table.selectedItems()[0].text()\n next = v[1:-1]\n t = next.split(\",\")\n coord = float(t[0])\n #search in the layer for this feature\n layer = getCanvasLayerByName(self.iface, \"Buildings\")\n feature = getFeaturesByExpression(layer,'\"X\"=%s'%coord)\n #get all attributes of the feature\n l = feature.values()\n #puting the ones needed in Additional info tab\n self.Population_floor.setText(str(l[0][5]))\n self.Population_total.setText(str(l[0][4]))\n self.Building_type.setText(str(l[0][6]))\n self.Floors.setText(str(l[0][3]))\n #zoom to the selected feature\n layer.setSelectedFeatures(feature.keys())\n if layer.selectedFeatureCount() > 0:\n self.iface.mapCanvas().setCurrentLayer(layer)\n self.iface.mapCanvas().zoomToSelected()\n self.canvas.zoomScale(1000.0)\n\n\n# Report functions\n\n def createReport(self):\n items = self.Main_table.selectedItems()[0].text()\n next = items[1:-1]\n attr = next.split(\",\")\n coord = float(attr[0])\n # search in the layer for this feature\n layer = getCanvasLayerByName(self.iface, \"Buildings\")\n feature = getFeaturesByExpression(layer, '\"X\"=%s'%coord)\n # get all attributes of the feature\n l = feature.values()\n # puting the ones needed in Additional info tab\n self.lineEdit_T_People.setText(str(l[0][4]))\n\n\n\n def createCSV(self):\n with open(self.plugin_dir + '//SEND_REPORT//sentreport.csv', 'w') as report:\n writer = csv.DictWriter(report,fieldnames=[\"Location\",\"Total People\",\"Evacuated People\",\"Injured People\",\"Ambulances\",\"Policemen\"])\n writer.writeheader()\n\n\n def sendReport(self):\n csvfile = self.plugin_dir + '//SEND_REPORT//sentreport.csv'\n with open(csvfile, 'a') as report:\n writer = csv.writer(report)\n # write data\n location = self.Main_table.selectedItems()[0].text()\n totalpeople = self.lineEdit_T_People.text()\n evacuated = self.lineEdit_Evacuated.text()\n injured = self.lineEdit_Injured.text()\n ambulances = self.lineEdit_Ambulances.text()\n policemen = self.lineEdit_Policemen.text()\n writer.writerow([str(location),str(totalpeople),str(evacuated),str(injured), str(ambulances), str(policemen)])\n self.textEdit.setText('Report has been sent!')\n self.Send_report.setDisabled(True)\n\n\n def evacuated_building(self):\n row = self.Main_table.currentRow()\n for i in range(0,3):\n v = self.Main_table.item(row, i)\n v.setBackground(QtGui.QColor(0, 255, 0))\n\n def clear(self):\n evacuated = self.lineEdit_Evacuated.setText(\"\")\n injured = self.lineEdit_Injured.setText(\"\")\n ambulances = self.lineEdit_Ambulances.setText(\"\")\n policemen = self.lineEdit_Policemen.setText(\"\")\n\n def policemen_send_location(self):\n current_text = self.lineEdit_Policemen.text()\n if current_text == '':\n current_count = 0\n else:\n current_count = int(current_text)\n\n self.lineEdit_Policemen.setText(str(current_count + 1))\n\n def Enable_buttons(self):\n self.Send_Location.setDisabled(False)\n self.Send_report.setDisabled(False)\n self.Police.setDisabled(False)\n self.Ambulance.setDisabled(False)\n\n\n\n\n\n","sub_path":"2016_Group2_/EvaQ8/EvaQ8_dockwidget.py","file_name":"EvaQ8_dockwidget.py","file_ext":"py","file_size_in_byte":14260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"136281661","text":"# -*- coding: utf-8 -*-\nimport scrapy, json\nfrom scrapy.loader import ItemLoader\nfrom ..items import *\n\nclass ZhihuSpider(scrapy.Spider):\n name = 'zhihu'\n # 两种类型的问题结构:1.知乎问答(www.zhihu.com); 2.知乎专栏(zhuanlan.zhihu.com);\n allowed_domains = ['zhihu.com']\n # 问题列表页接口\n start_urls = ['https://www.zhihu.com/api/v3/feed/topstory?action_feed=True&limit=7&session_token=741d29fcd68f7b71943266341195e5ec&action=down&after_id=&desktop=true']\n\n def parse(self, response):\n \"\"\"\n 解析列表页返回的json数据,提取问题的id。\n :param response:\n :return:\n \"\"\"\n list_json_dict = json.loads(response.text)\n data = list_json_dict['data']\n for question_dict in data:\n try:\n # 如果target中存在question,说明是问答页面(www.zhihu.com)\n question_id = question_dict['target']['question']['id']\n question_detail_url = 'https://www.zhihu.com/question/{}'.format(question_id)\n except:\n # 如果target中不存在question,说明是专栏页面(zhuanlan.zhihu.com)\n question_id = question_dict['target']['id']\n question_detail_url = 'https://zhuanlan.zhihu.com/p/{}'.format(question_id)\n\n yield scrapy.Request(url=question_detail_url, callback=self.parse_question, meta={'question_id': question_id})\n\n # 获取question_detail_url以后,可以区分是问答页面还是专栏页面。\n if 'question' in question_detail_url:\n # 问答页面\n answers_url = 'https://www.zhihu.com/api/v4/questions/{}/answers?include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B*%5D.mark_infos%5B*%5D.url%3Bdata%5B*%5D.author.follower_count%2Cbadge%5B*%5D.topics&offset=0&limit=3&sort_by=default'.format(question_id)\n yield scrapy.Request(\n url=answers_url,\n callback=self.parse_answer\n )\n elif 'zhuanlan' in question_detail_url:\n zhuanlan_url = 'https://www.zhihu.com/api/v4/articles/{}/comments?include=data%5B*%5D.author%2Ccollapsed%2Creply_to_author%2Cdisliked%2Ccontent%2Cvoting%2Cvote_count%2Cis_parent_author%2Cis_author%2Calgorithm_right&order=normal&limit=20&offset=0&status=open'.format(question_id)\n yield scrapy.Request(\n url=zhuanlan_url,\n callback=self.parse_answer\n )\n\n is_end = list_json_dict['paging']['is_end']\n if is_end == False:\n next_url = list_json_dict['paging']['next']\n # 构造请求,放入调度器中\n yield scrapy.Request(\n url=next_url,\n callback=self.parse\n )\n\n def parse_question(self, response):\n \"\"\"\n 解析详情页的问题的信息,标题、关注者数量等。\n :param response:\n :return:\n \"\"\"\n if 'question' in response.url:\n # question_title:问答页面和专栏页面的标签结构是不一样的。\n # 解决方法:\n # 1. 提取详情页的中title标签的内容。\n # 2. 给question_title添加两个add_css/add_xpath,两个css只能有一个提取到数据。\n question_id = response.meta['question_id']\n item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)\n item_loader.add_value('question_id', question_id)\n item_loader.add_css('question_title', '.QuestionHeader-main > h1.QuestionHeader-title::text')\n # 问题的内容可能是不存在的。question_content\n item_loader.add_css('question_content', '.QuestionHeader-detail span.RichText > p::text')\n # 问题的主题分类\n item_loader.add_css('question_topic', '.QuestionHeader-topics .Popover > div::text')\n\n # 关注数和浏览数: 注意:这里获取的源码标签结构中,关注数和浏览数的标签结构和在浏览器中查看的网页源代码中的标签结构是不一样的。以response获取的网页源代码标签结构为准。。。。。\n item_loader.add_xpath('question_watch_num', '//div[contains(@class, \"QuestionFollowStatus-counts\")]/div[1]/div/strong/@title')\n item_loader.add_xpath('question_click_num', '//div[contains(@class, \"QuestionFollowStatus-counts\")]/div[2]/div/strong/@title')\n\n item_loader.add_css('question_answer_nums', 'h4.List-headerText > span::text')\n item_loader.add_css('question_comment_nums', 'div.QuestionHeader-Comment > button::text')\n\n item = item_loader.load_item()\n yield item\n else:\n # 专栏页面(标题、赞数量、评论数量)\n yield None\n\n def parse_answer(self, response):\n \"\"\"\n 获取问题的答案:一种是问答页面的答案;一种是知乎专栏的答案;\n :param response:\n :return:\n \"\"\"\n answers_dict = json.loads(response.text)\n if 'answers' in response.url:\n # 问答页面的答案接口\n for answer_dic in answers_dict['data']:\n # 答案ID\n answer_id = answer_dic['id']\n # 答案所属的问题\n answer_question_id = answer_dic['question']['id']\n # 回答问题的用户\n answer_author_id = answer_dic['author']['id']\n # 回答问题的时间\n answer_time = answer_dic['created_time']\n # 答案获得的点赞数\n answer_vote_up_nums = answer_dic['voteup_count']\n # 答案获得的评论数\n answer_comment_num = answer_dic['comment_count']\n\n item = ZhihuAnswerItem()\n item['answer_id'] = answer_id\n item['answer_question_id'] = answer_question_id\n item['answer_author_id'] = answer_author_id\n item['answer_time'] = answer_time\n item['answer_vote_up_nums'] = answer_vote_up_nums\n item['answer_comment_num'] = answer_comment_num\n\n yield item\n\n elif 'comments' in response.url:\n # 专栏页面的答案接口\n for answer_dic in answers_dict['data']:\n # 答案ID\n answer_id = answer_dic['id']\n # 答案所属的问题:旨在表名这个答案是哪一个问题的答案。\n answer_question_id = 0\n # 回答问题的用户\n answer_author_id = answer_dic['author']['member']['id']\n # 回答问题的时间\n answer_time = answer_dic['created_time']\n # 答案获得的点赞数\n answer_vote_up_nums = answer_dic['vote_count']\n # 答案获得的评论数\n answer_comment_num = 0\n\n item = ZhihuAnswerItem()\n item['answer_id'] = answer_id\n item['answer_question_id'] = answer_question_id\n item['answer_author_id'] = answer_author_id\n item['answer_time'] = answer_time\n item['answer_vote_up_nums'] = answer_vote_up_nums\n item['answer_comment_num'] = answer_comment_num\n\n yield item\n\n # 获取下一页的答案内容\n is_end = answers_dict['paging']['is_end']\n if is_end == False:\n yield scrapy.Request(\n url=answers_dict['paging']['next'],\n callback=self.parse_answer\n )","sub_path":"zhihuspider/zhihuspider/spiders/zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":8108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"435548166","text":"\"\"\"\ndata preprocessing\n\"\"\"\nimport os\nimport tarfile\nimport hashlib\nimport numpy as np\nfrom six.moves import urllib\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nDOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\nHOUSING_PATH = \"datasets/housing\"\nHOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + \"/housing.tgz\"\n\n\ndef fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n \"\"\"get source data:housing from github\"\"\" \n if not os.path.isdir(housing_path):\n os.makedirs(housing_path)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()\n\n\n# fetch_housing_data()\n\ndef load_housing_data(housing_path=HOUSING_PATH):\n \"\"\"load the data\"\"\"\n csv_path = os.path.join(housing_path,\"housing.csv\")\n return pd.read_csv(csv_path)\n\n\nhousing = load_housing_data()\nhousing.head()\nhousing.info()\nhousing[\"ocean_proximity\"].value_counts()\nhousing.describe()\n\n\n\nhousing.hist(bins=50, figsize=(20,15)) # bins:num of bars.\nplt.show()\n\n# 创建测试集\ndef split_train_set(data,test_ratio):\n shuffled_indices = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return data.iloc[train_indices], data.iloc[test_indices]\ntrain_set, test_set = split_train_set(housing,0.2)\n\n# 确认生成的是同样测试集\ndef test_set_check(idetifier,test_ratio,hash):\n return hash(np.int(idetifier)).digest()[-1] < 256* test_ratio\ndef split_train_test_by_id(data,test_ratio,id_column,hash=hashlib.md5):\n ids = data[id_column]\n in_test_set = ids.apply(lambda id_:test_set_check(id_,test_ratio,hash))\n return data.loc[~in_test_set],data.loc[in_test_set]\n# 以行索引作为id\nhousing_with_id = housing.reset_index()\ntrain_set,test_set = split_train_test_by_id(housing_with_id,0.2,\"index\")\n\n# # 测试集和训练集的产生可以用sklearn\n# from sklearn.model_selection import train_test_split\n# train_set,test_set = train_test_split(housing,test_size=0.2,random_state=42)\n\n# 创建收入类别属性 ceil进一法取整\nhousing[\"income_cat\"] = np.ceil(housing[\"median_income\"] / 1.5)\n\n# 对收入类别分层抽样\nfrom sklearn.model_selection import StratifiedShuffleSplit\nsplit = StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)\nfor train_index,test_index in split.split(housing,housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\n# 删除incom_cat属性,回归原样\nfor set in (strat_train_set,strat_test_set):\n set.drop([\"income_cat\"], axis=1, inplace=True)\n\n# 仅使用训练集作可视化\nhousing = strat_train_set.copy()\nhousing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\")\nplt.show()\n# 突出高密度地区的可视化\nhousing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.1)\n\n# 利用颜色表来可视化不同地区房价\nhousing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.4,s=housing[\"population\"]/100,\n label=\"population\", c=\"median_house_value\",cmap=plt.get_cmap(\"jet\"),colorbar=True,)\nplt.legend()\nplt.show()\n\n# 各属性相关系数\ncorr_matrix = housing.corr()\n# 查看每个属性与房价中位数关系\ncorr_matrix[\"median_house_value\"].sort_values(ascending=False)\n\n# 绘制属性间的相关性矩阵\nfrom pandas.plotting import scatter_matrix\nattributes = [\"median_house_value\", \"median_income\", \"total_rooms\",\"housing_median_age\"]\nscatter_matrix(housing[attributes],figsize=(12, 8))\nplt.show()\n\n# 最有潜力影响房价的是收入中位数\nhousing.plot(kind=\"scatter\",x=\"median_income\",y=\"median_house_value\",alpha=0.1)\nplt.show()\n\n# 新建某些特征:如卧室总数与房间总数之比\nhousing[\"rooms_per_household\"] = housing[\"total_rooms\"] / housing[\"households\"]\nhousing[\"bedrooms_per_room\"] = housing[\"total_bedrooms\"] / housing[\"total_rooms\"]\nhousing[\"population_per_household\"] = housing[\"population\"] / housing[\"households\"]\ncorr_matrix = housing.corr()\n\n# 对缺值NA数据的处理 放弃属性、地区、补全中位数/均值\nmedian_total_bedrooms = housing[\"total_bedrooms\"].median()\nhousing.dropna(subset=[\"total_bedrooms\"])\nhousing.drop(\"total_bedrooms\",axis=1)\nhousing[\"total_bedrooms\"].fillna(median_total_bedrooms)\n\n# sklearn中imputer处理缺失值\nfrom sklearn.preprocessing import Imputer\nimputer =Imputer(strategy=\"median\")\n\n# 因为中位数仅能在数据上计算,所以最后一列没有数字型的要删掉\nhousing_num = housing.drop(\"ocean_proximity\", axis=1)\nimputer.fit(housing_num)\n\nX = imputer.transform(housing_num)\nhousing_tr = pd.DataFrame(X,columns=housing_num.columns)\n\n# ocean_proximity is text instead of num. need to encode to num\nfrom sklearn.preprocessing import LabelEncoder\nencoder = LabelEncoder()\nhousing_cat = housing[\"ocean_proximity\"]\nhousing_cat_encoded = encoder.fit_transform(housing_cat)\n# encoder.classes_ 的属性可以读出该编码器的映射\n# 独热向量 LabelBinarizer\n","sub_path":"Machine Learning/Python_MachineLearning/HandsOnMachineLearning/gethousing.py","file_name":"gethousing.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311436622","text":"import requests,binascii,time\nfrom Crypto.PublicKey import RSA\nfrom urllib.parse import unquote as un\n\n################################## Hexer #######################\ndef bin2hex(binStr):\n return binascii.hexlify(binStr)\n\ndef hex2bin(hexStr):\n return binascii.unhexlify(hexStr)\n################################################################\n \nbinPubKey = open('clienta.pub').read()\n\n################################## Algo ########################\ndef algo(type,binPubKey,msg):\n #key = RSA.generate(2048)\n binPrivKey = hex2bin(open('clienta').read())#key.exportKey('DER')\n #key.publickey().exportKey('DER')\n privKeyObj = RSA.importKey(binPrivKey)\n pubKeyObj = RSA.importKey(binPubKey)\n if type == \"enc\":\n return pubKeyObj.encrypt(msg.encode('utf-8'), 'x')[0]\n if type == \"dec\":\n return privKeyObj.decrypt(msg)\n#################################################################\n\ndef serConn():\n print('Cheking Pub Key ...')\n pub=requests.get('http://127.0.0.1:8000/data/clienta.pub')\n if requests.get('http://127.0.0.1:8000/data/clientb.pub').status_code == 404:\n requests.post('http://127.0.0.1:8000/data/clientb.pub',data={'pub':open('clienta.pub','r').read()})\n print('Pub Key Sent...')\n\n if pub.status_code == 200:\n binPubKey=hex2bin(pub.text)\n print('Pub Key Found Decrypting ...')\n def con():\n d=str(input(\"send ==>: \"))\n encmsg=bin2hex(algo('enc',binPubKey,d))\n requests.post('http://127.0.0.1:8000/data/msgb',data={'sad':encmsg})\n \n emsg=hex2bin(un(requests.get('http://127.0.0.1:8000/data/msga').text))\n print(algo('dec',binPubKey,emsg))\n con()\n con()\n else:\n print('Waitng 2 Sec ...')\n time.sleep(2)\n serConn()\n \nserConn()\n","sub_path":"CLIENTA/Client A.py","file_name":"Client A.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"657802","text":"import threading, time, httplib2\nHOST = \"http://www.baidu.com\"; #主机地址 例如192.168.1.101\nPORT = 80 #端口 \nURI = \"/?123\" #相对地址,加参数防止缓存,否则可能会返回304 \nTOTAL = 0 #总数 \nSUCC = 0 #响应成功数 \nFAIL = 0 #响应失败数 \nEXCEPT = 0 #响应异常数 \nMAXTIME=0 #最大响应时间 \nMINTIME=100 #最小响应时间,初始值为100秒 \nGT3=0 #统计3秒内响应的 \nLT3=0 #统计大于3秒响应的 \n# 创建一个 threading.Thread 的派生类 \nclass RequestThread(threading.Thread): \n # 构造函数 \n def __init__(self, thread_name): \n threading.Thread.__init__(self) \n self.test_count = 0 \n \n # 线程运行的入口函数 \n def run(self):\n self.test_performace() \n \n \n def test_performace(self): \n global TOTAL \n global SUCC \n global FAIL \n global EXCEPT \n global GT3 \n global LT3 \n try: \n i=0\n while i<=200:\n st = time.time()\n h = httplib2.Http(\".cache\")\n res, content = h.request(HOST)\n if res.status == 200:\n TOTAL+=1\n SUCC+=1\n else:\n TOTAL+=1\n FAIL+=1\n time_span = time.time()-st\n # print('%s:%f\\n'%(self.name,time_span))\n self.maxtime(time_span)\n self.mintime(time_span)\n if time_span>3:\n GT3+=1\n else:\n LT3+=1\n i=i+1\n print(self.name+\":end\")\n callBack(self.name)\n except Exception as e:\n print(e)\n TOTAL+=1 \n EXCEPT+=1\n def maxtime(self,ts): \n global MAXTIME \n # print(ts)\n if ts>MAXTIME: \n MAXTIME=ts \n def mintime(self,ts): \n global MINTIME \n if ts Task:\n \"\"\"Determine the state of scheduling an activity for execution with retry options.\n\n Parameters\n ----------\n state: List[HistoryEvent]\n The list of history events to search to determine the current state of the activity.\n retry_options: RetryOptions\n The retry options for the activity function.\n name: str\n The name of the activity function to call.\n input_: Any\n The JSON-serializable input to pass to the activity function.\n\n Returns\n -------\n Task\n A Durable Task that completes when the called activity function completes or fails\n completely.\n \"\"\"\n new_action = CallActivityWithRetryAction(\n function_name=name, retry_options=retry_options, input_=input_)\n for attempt in range(retry_options.max_number_of_attempts):\n task_scheduled = find_task_scheduled(state, name)\n task_completed = find_task_completed(state, task_scheduled)\n task_failed = find_task_failed(state, task_scheduled)\n task_retry_timer = find_task_retry_timer_created(state, task_failed)\n task_retry_timer_fired = find_task_retry_timer_fired(\n state, task_retry_timer)\n set_processed([task_scheduled, task_completed,\n task_failed, task_retry_timer, task_retry_timer_fired])\n\n if not task_scheduled:\n break\n\n if task_completed:\n return Task(\n is_completed=True,\n is_faulted=False,\n action=new_action,\n result=parse_history_event(task_completed),\n timestamp=task_completed.timestamp,\n id_=task_completed.TaskScheduledId)\n\n if task_failed and task_retry_timer and attempt + 1 >= \\\n retry_options.max_number_of_attempts:\n return Task(\n is_completed=True,\n is_faulted=True,\n action=new_action,\n timestamp=task_failed.timestamp,\n id_=task_failed.TaskScheduledId,\n exc=Exception(\n f\"{task_failed.Reason} \\n {task_failed.Details}\")\n )\n\n return Task(is_completed=False, is_faulted=False, action=new_action)\n","sub_path":"azure/durable_functions/tasks/call_activity_with_retry.py","file_name":"call_activity_with_retry.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129044605","text":"import csv\nfrom dbconnect import connectica_test_Conn\n\n\ndef extractAllData(tableName):\n c, conn = connectica_test_Conn() \n query = 'SELECT * FROM %s;'%(tableName)\n c.execute(query)\n\n #exporting data to csv\n with open('outfile.csv','w') as f:\n writer = csv.writer(f)\n writer.writerow(c.description)\n for row in c.fetchall():\n writer.writerow(row)\n\n\n c.close()\n conn.close()\n return\n\nextractAllData('IrradianceMap')\n","sub_path":"dataExport.py","file_name":"dataExport.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"457950411","text":"import requests\nimport unicodedata\nimport html\nimport html.parser\n\nimport json\n\ndef CrawlDataString(resString,posStart,posEnd):\n # url = \"https://news.ycombinator.com/\"\n\n temp_new = resString\n\n # get content web and covert to string\n\n\n\n #find position start- end of Article (only 1)\n\n\n # find string contain value rank\n strGetRank = temp_new[posStart:posEnd - 170]\n # get the value of rank\n rank = strGetRank[13:15]\n\n # From strGetRank, filter string Title and children of class Title\n strTitle = 'class=\"title\">'\n posStartTitle = strGetRank.find(strTitle)\n posEndTitle = strGetRank.find(strEndTitle, posStartTitle + 1)\n strGetTitle = strGetRank[posStartTitle:posEndTitle]\n\n #continue get position class storylink\n posClassStory = strGetTitle.find('class=\"storylink')\n lenStory = len('class=\"storylink\"') + 1\n # get Value link, title\n link = strGetTitle[22:posClassStory]\n Title =unicodedata.normalize(\"NFKD\" ,strGetTitle[posClassStory + lenStory:None])\n\n # get link site and title site\n posEndSite = strGetRank.find('class=\"subtext\"')\n\n strSite = strGetRank[posEndTitle:posEndSite - 39]\n postEndLinkSite = strSite.find('sitestr')\n linkSite = strSite[44:postEndLinkSite - 14]\n\n titleSite = linkSite[11:-1]\n\n\n # subtext of article\n\n # find postions purpose get value subtext\n\n posSubtextStart = strGetRank.find('class=\"score\"')\n posSubtextEnd = strGetRank.find('comments')\n\n strSubtext = strGetRank[posSubtextStart:posSubtextEnd]\n postEndPoint = strSubtext.find(' by')\n\n # get values point, hnuser, age, comments\n\n Point = strSubtext[34:postEndPoint] + \" by \"\n posStartHNUser = strSubtext.find('class=\"hnuser\"')\n # find position age\n posStartAge = strSubtext.find('0):\n\n a=CrawlDataString(temp_new,posStart,posEnd)\n posStart = temp_new.find('class=\"rank\"',posStart+1)\n posEnd = temp_new.find('class=\"rank\"',posStart+1)\n print(a)\n\n","sub_path":"reFucntionString.py","file_name":"reFucntionString.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"300406065","text":"from datetime import date\r\nfrom sys import exit\r\n\r\nunique_id2 = 0\r\nunique_id = 0\r\n\r\n\r\nclass Book:\r\n def __init__(self, title, author, genre, pages, publisher, isbn_no):\r\n self.title = title\r\n self.author = author\r\n # self.language = language\r\n self.genre = genre\r\n self.pages = pages\r\n self.publisher = publisher\r\n self.isbn_no = isbn_no\r\n global unique_id\r\n unique_id += 1\r\n self.book_id = unique_id\r\n\r\n def filter_book(self, filter):\r\n return filter in self.book_id\r\n\r\n def __repr__(self):\r\n return f\"id-{self.book_id} -- titled {self.title} by {self.author}\"\r\n\r\nclass Library:\r\n def __init__(self):\r\n self.books = []\r\n self.members = []\r\n self.borrowed_books = []\r\n\r\n def add_book(self, title, author, genre, pages, publisher, isbn_no):\r\n self.books.append(Book(title, author, genre, pages, publisher, isbn_no))\r\n\r\n def add_member(self, firstname, lastname):\r\n name = firstname + lastname\r\n self.members.append(Member(name))\r\n\r\n def lendbook(self, b_id):\r\n pass\r\n\r\n def search_book(self, filter):\r\n pass\r\n\r\n\r\n\r\n\r\nclass Member:\r\n def __init__(self, name):\r\n self.name = name\r\n global unique_id2\r\n unique_id2 += 1\r\n namecode = name[0:3]\r\n self.member_id = namecode.upper() + str(unique_id2)\r\n self.cart = []\r\n\r\n def request_book(self, bk):\r\n self.cart.append(bk)\r\n\r\n def returnbook(self, bk):\r\n self.cart.remove(bk)\r\n\r\n def __repr__(self):\r\n return f\"{self.name}\"\r\n\r\n\r\nclass Menu:\r\n def __init__(self):\r\n self.library = Library()\r\n self.choices = {\r\n \"1\": self.add_member,\r\n \"2\": self.add_book,\r\n \"3\": self.show_books,\r\n \"4\": self.lend_book,\r\n \"5\": self.return_book,\r\n \"6\": self.generate_report,\r\n \"7\": self.quit\r\n }\r\n\r\n def display_menu(self):\r\n print('''\r\n 1. Add Member\r\n 2. Add Book\r\n 3. Show Books\r\n 4. Lend Book\r\n 5. Return Book\r\n 6. Generate Report\r\n 7. Quit \r\n ''')\r\n\r\n def _book_filter(self, book_id):\r\n for book in self.library.books:\r\n if str(book.book_id) == str(book_id):\r\n return book\r\n\r\n def _member_filter(self, member_id):\r\n for member in self.library.members:\r\n if str(member.member_id) == str(member_id):\r\n return member\r\n\r\n def run(self):\r\n while True:\r\n self.display_menu()\r\n choice = input(\"Enter your choice and press enter: \")\r\n action = self.choices.get(choice)\r\n if action:\r\n action()\r\n else:\r\n try_again = input(f\"{choice} is not a valid choice. Try Again? [Y/N]\")\r\n if try_again.lower() == \"y\":\r\n continue\r\n else:\r\n break\r\n\r\n def add_member(self):\r\n fname = input(\"Enter First Name => \")\r\n lname = input(\"Enter last name => \")\r\n self.library.add_member(fname, lname)\r\n print(\"Member Added!\")\r\n\r\n def add_book(self):\r\n try:\r\n title = input(\"Enter Book Title => \")\r\n author = input(\"Enter Author Name => \")\r\n # language = input(\"Enter Book Title => \")\r\n genre = input(\"Enter Genre => \")\r\n pages = int(input(\"Enter Number of Pages => \"))\r\n publisher = input(\"Enter Publisher => \")\r\n isbn_no = input(\"ISBN Number => \")\r\n self.library.add_book(title, author, genre, pages, publisher, isbn_no)\r\n print(\"Book Added\")\r\n add_more_books = input(f\"Add more books? [type : Y]\")\r\n if add_more_books.lower() == \"y\":\r\n self.add_book()\r\n except ValueError as e:\r\n print(\"Enter Again! \", e)\r\n\r\n def show_books(self):\r\n for book in self.library.books:\r\n print(f\"id-{book.book_id}\\t{book.title}\\t\\t{book.author}\\t\\t{book.genre}\\t{book.publisher}\")\r\n\r\n def show_members(self):\r\n for member in self.library.members:\r\n print(f\"{member.member_id}.\\t{member.name}\")\r\n\r\n def lend_book(self):\r\n memberId = input(\"Enter member ID number => \")\r\n bookId = input(\"Enter ID of book you want to borrow => \")\r\n current_member = self._member_filter(memberId)\r\n borrowed_book = self._book_filter(bookId)\r\n # print(borrowed_book)\r\n # print(current_member)\r\n if borrowed_book in self.library.books and current_member in self.library.members:\r\n self.library.books.remove(borrowed_book)\r\n self.library.borrowed_books.append(borrowed_book)\r\n current_member.request_book(borrowed_book)\r\n print(f\"{borrowed_book} issued to {current_member}\")\r\n\r\n def return_book(self):\r\n memberId = input(\"Enter member ID number => \")\r\n bookId = input(\"Enter ID of book you want to return => \")\r\n current_member = self._member_filter(memberId)\r\n returing_book = self._book_filter(bookId)\r\n if returing_book not in self.library.borrowed_books and returing_book not in self.library.books or current_member not in self.library.members:\r\n print(\"Book Return Incomplete\")\r\n if returing_book in self.library.borrowed_books and returing_book not in self.library.books:\r\n self.library.books.append(returing_book)\r\n current_member.returnbook(returing_book)\r\n print(f\"{returing_book} returned by {current_member}\")\r\n\r\n def generate_report(self):\r\n for member in self.library.members:\r\n print(f\"{member.member_id}.\\t{member.name}\")\r\n\r\n def quit(self):\r\n exit(0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Menu().run()\r\n","sub_path":"02library/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12164522","text":"class Task:\n\n due_date = \"\"\n description = \"\"\n\n def __init__(self, description, due_date):\n self.due_date = due_date\n self.description = description\n\n def __str__(self):\n return \"{} : {}\".format(self.description, self.due_date)\n\n\nclass TodoList:\n my_list_of_tasks = []\n\n def __init__(self):\n self.task_list = []\n\n @classmethod\n def add_task(cls, description, due_date):\n tasks = Task(description, due_date)\n cls.my_list_of_tasks.append(tasks)\n\n\ntask1 = TodoList.add_task(\"Do laundry\", \"On wednesday\")\ntask2 = TodoList.add_task(\"Take out the garbage\", \"On Tuesday\")\ntask3 = TodoList.add_task(\"Go to the gym\", \"tonight\")\n\nprint(TodoList.my_list_of_tasks)\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"523759369","text":"# -*- coding:UTF-8 -*-\nimport cntk as C\nimport numpy as np\nfrom cntk.io import MinibatchSource, HTKFeatureDeserializer, StreamDef, StreamDefs\nfrom cntk import load_model, combine\nfrom cntk.device import gpu,try_set_default_device,cpu\nfrom cntk.ops import as_composite\nimport scipy.io as sio\nimport os\nimport sys\n\nGPU_id = int(sys.argv[1])\ntry_set_default_device(gpu(GPU_id))\nmodel_dnn= load_model(\"./model/speech_enhancement.model\")\nfeatures_file = \"./test_normed.scp\" \nfeature_dim = 257\ntest_reader = MinibatchSource(HTKFeatureDeserializer(StreamDefs(\n amazing_features = StreamDef(shape=feature_dim,context=(3,3), scp=features_file))),randomize = False,frame_mode=False)\neval_input_map = {input :test_reader.streams.amazing_features}\n\nf = open(features_file)\nline = f.readline() \nwhile line:\n\ttemp_input_path = line.split(']')[0]\n\tmb_size = temp_input_path.split(',')[-1]\n\tmb_size = int(mb_size) + 1\n\tnoisy_fea=test_reader.next_minibatch(mb_size, input_map = eval_input_map)\n\treal_noisy_fea=noisy_fea[input].data\n\n\tnode_in_graph = model_dnn.find_by_name('irm')\n\toutput_nodes = combine([node_in_graph.owner])\n\tout_noisy_fea = output_nodes.eval(real_noisy_fea)\n\t# out_noisy_fea = as_composite(model_dnn.output1[0].owner).eval(real_noisy_fea)\n\n\tout_SE_noisy_fea = np.concatenate((out_noisy_fea),axis=0)\n\n\tout_file_path = line.split('=')[0]\n\tout_file_name = os.path.join('enhanced_norm_fea_mat',out_file_path)\n\tout_file_fullpath = os.path.split(out_file_name)[0]\n\t# print (out_file_fullpath)\n\tif not os.path.exists(out_file_fullpath):\n\t\tos.makedirs(out_file_fullpath)\n\tsio.savemat(out_file_name, {'SE': out_SE_noisy_fea})\n\tline = f.readline()\n\nf.close() ","sub_path":"decode_model.py","file_name":"decode_model.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101033251","text":"class Solution(object):\n def maxCoins(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n numbers = [1] + copy.deepcopy(nums) + [1]\n n = len(numbers)\n dp = [[0 for i in range(n)] for i in range(n)]\n \n for k in range(2, n):\n for left in range(n-k):\n right = left + k\n for i in range(left+1, right):\n # burst ith balloon at last\n dp[left][right] = max(dp[left][right], dp[left][i] + dp[i][right] + numbers[left]*numbers[i]*numbers[right])\n return dp[0][n-1]\n","sub_path":"python_solutions/312-burst-balloons.py","file_name":"312-burst-balloons.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424729364","text":"import gzip\nimport json\n\nfrom base.models.model import Model\nfrom base.utils import console_log, LineStripReader\n\n\ndef import_usa_granted(json_gz_file_path):\n json_gz_file_name = json_gz_file_path.split('/')[-1]\n with gzip.open(json_gz_file_path) as f:\n for line_no, content in enumerate(LineStripReader(f)):\n try:\n patent = json.loads(content)\n raw = {}\n raw['patent'] = patent\n raw['source_file'] = json_gz_file_name\n raw['line_no'] = line_no\n instance = Model.objects.create(raw=raw)\n console_log(instance)\n except Exception as e:\n console_log(e)\n with open('error.log', 'a') as lf:\n lf.write('{}\\n'.format(e))\n lf.write('file : {}\\n'.format(json_gz_file_name))\n lf.write('line_no : {}\\n'.format(line_no))\n lf.write(content)\n lf.write('\\n\\n')\n","sub_path":"study/importer/patent/usa.py","file_name":"usa.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115157175","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 21 00:15:29 2018\n\n@author: Ankita\n\"\"\"\nfrom keras import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nimport pandas as pd\nimport numpy as np\nimport os\n\nos.getcwd()\nos.environ['PATH'] += os.pathsep + 'C:\\\\Program Files (x86)\\\\Graphviz2.38\\\\bin'\nnp.random.seed(100)\n\ndigit_train = pd.read_csv(\"C:\\\\Data Science\\\\Deep Learning\\\\data\\\\Digit_Recognizer\\\\train.csv\")\ndigit_train.shape\ndigit_train.info()\n\n#iloc[:, 1:] Means first to last row and 2nd column to last column\n#255.0 --> Convert my data to 255 pixels\nX_train = digit_train.iloc[:,1:]/255.0\n#X_train.to_csv('aaaaa.csv',index =False)\ny_train = np_utils.to_categorical(digit_train[\"label\"])\nprint(y_train)\n\ny_train.shape\n\n\n#Here comes the basic Neural network\nmodel = Sequential()\nmodel.add(Dense(10, input_shape=(784,), activation ='softmax'))\nprint(model.summary())\n\n#mean_squared_error for regression\nmodel.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])\n\nhistory = model.fit(x= X_train, y= y_train, verbose = 1,epochs =2, batch_size =2, validation_split = 0.2)\nprint(model.get_weights())\n\n#Predictions on Test data\ndigit_test = pd.read_csv(\"C:\\\\Data Science\\\\Deep Learning\\\\data\\\\Digit_Recognizer\\\\test.csv\")\ndigit_test.shape\ndigit_test.info()\n\nX_test = digit_test.values.astype('float32')/255.0\n\npred = model.predict_classes(X_test)\nsubmissions= pd.DataFrame({\"ImageId\": list(range(1,len(pred)+1)), \"label\": pred})\nsubmissions.to_csv(\"submission_DigitsRec.csv\", index=False, header=True)\n","sub_path":"Deep Learning/Digit_recognizer/2.layered perceptron.py","file_name":"2.layered perceptron.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"380788659","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QDialog\nfrom PyQt5.QtCore import Qt, pyqtSignal\nimport SelectTargetUi_Inverse\n\n\nclass Ui_SelectTarget_Inverse(QDialog, SelectTargetUi_Inverse.Ui_Dialog):\n\tmy_Signal = pyqtSignal(int)\n\tdef __init__(self):\n\t\tsuper(Ui_SelectTarget_Inverse, self).__init__()\n\t\t# QDialog.__init__(self)\n\t\t# SelectServantUi.Ui_Dialog.__init__(self)\n\t\tself.setupUi(self)\n\t\tself.btn_1.clicked.connect(lambda: self.sendEditContent(1))\n\t\tself.btn_2.clicked.connect(lambda: self.sendEditContent(2))\n\t\tself.btn_3.clicked.connect(lambda: self.sendEditContent(3))\n\n\tdef sendEditContent(self, target):\n\t\tself.my_Signal.emit(target)\n\t\tself.close()\n\n\n\n\n\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\t# 实例化子窗口\n\tselectservant = Ui_SelectTarget_Inverse()\n\tselectservant.show()\n\tsys.exit(app.exec_())\n\n\n\n","sub_path":"Code_SelectTarget_Inverse.py","file_name":"Code_SelectTarget_Inverse.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"89385342","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 13 10:52:22 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n\"\"\"\r\n1143\r\n\r\n给定两个字符串 text1 和 text2,返回这两个字符串的最长公共子序列的长度。\r\n\r\n一个字符串的 子序列 是指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某\r\n些字符(也可以不删除任何字符)后组成的新字符串。\r\n例如,\"ace\" 是 \"abcde\" 的子序列,但 \"aec\" 不是 \"abcde\" 的子序列。两个字符串的「公共子序\r\n列」是这两个字符串所共同拥有的子序列。\r\n\r\n若这两个字符串没有公共子序列,则返回 0。\r\n\"\"\"\r\nclass Solution:\r\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\r\n # 递归解法\r\n def dp(i, j):\r\n if i == -1 or j == -1:# 空串\r\n return 0\r\n if text1[i] == text2[j]:# 两元素相等 那么此元素必定在最长子序列中LCS\r\n return dp(i - 1, j - 1) + 1\r\n else:# 比较那种情况下LCS更长\r\n return max(dp(i - 1, j), dp(i, j - 1))\r\n return dp(len(text1) - 1, len(text2) - 1)# 超时\r\n \r\n\r\nclass Solution:\r\n def longestCommonSubsequence(self, text1: str, text2: str) -> int:\r\n # 使用dp table\r\n m, n = len(text1), len(text2)\r\n # 构建dp table\r\n # base case [0][i] [j][0] 为 0\r\n dp = [[0] * (n + 1) for i in range(m + 1)]\r\n # 状态转移\r\n for i in range(1, m + 1):\r\n for j in range(1, n + 1):\r\n if text1[i - 1] == text2[j - 1]:\r\n dp[i][j] = 1 + dp[i - 1][j - 1]\r\n else:\r\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\r\n return dp[-1][-1]\r\n\"\"\"\r\n执行结果:通过 显示详情\r\n执行用时:372 ms, 在所有 Python3 提交中击败了96.87%的用户\r\n内存消耗:21.1 MB, 在所有 Python3 提交中击败了40.05%的用户\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"labuladong的算法小抄(Python)/动态规划/最长公共子序列.py","file_name":"最长公共子序列.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328198010","text":"import xlrd\r\nimport xlwt\r\n'''Excel读写文件'''\r\nclass Excel():\r\n\r\n \r\n def read_excel(self,files,sheet,row):\r\n try:\r\n data = xlrd.open_workbook(files)\r\n table = data.sheet_by_name(sheet)\r\n nrows=table.nrows\r\n value_list=[]\r\n for i in range(row, nrows):\r\n rowvalue = table.row_values(i)\r\n value_list.append(rowvalue)\r\n return value_list\r\n except:\r\n print('文件读取失败')\r\n def write_excel(self,items,name):\r\n newtable = '{}.xls'.format(name)\r\n wb = xlwt.Workbook(encoding='utf-8')\r\n ws = wb.add_sheet('test')\r\n index = 1\r\n for i in range(0,len(items)):\r\n for j in range(0,len(items[i])):\r\n ws.write(index, j,items[i][j])\r\n index+=1\r\n wb.save(newtable)\r\n","sub_path":"gongzuo/Excel.py","file_name":"Excel.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"278278183","text":"from pymongo import MongoClient\nimport datetime\n\nclient = MongoClient()\ndb = client['crm']\norders = db['Where'].find()\ntotal = db['Where'].count()\nfor i in range(total) : \n\tcurrent_date = datetime.datetime.now()\n\tif(orders[i]['Expiry-Date']) : \n\t\tif((orders[i]['Expiry-Date'] - current_date).days > 7) : \n\t\t\torders[i]['isExpiring'] = 'Yes'","sub_path":"checkExpiry.py","file_name":"checkExpiry.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"429640436","text":"from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('', views.index),\n path('blank/', views.blank),\n path('contact/', views.contact),\n path('crowd_pred/', views.crowd_pred),\n path('crowd_prediction/', views.crowd_prediction),\n path('fake_info/', views.fake_info),\n path('fake_info_prediction/', views.fake_info_prediction),\n path('fake_info_result/', views.fake_info_result),\n path('general/', views.general),\n path('location_analysis/', views.location_analysis),\n path('location_analysis_report/', views.location_analysis_report),\n path('login/', views.login),\n path('profile/', views.profile),\n path('thank_you/', views.thank_you),\n path('thank_you_fakeinfo/', views.thank_you_fakeinfo),\n path('widgets/', views.widgets),\n\n]","sub_path":"tweeysa/builder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"355556449","text":"#!/usr/bin/python\n# Version 2016-07-19-01\n\n## the config file to load from\nCONFIG_FILE = '/data/cortex-puppet-bridge/bridge.conf'\n\n## config defaults\nPUPPET_BINARY = '/opt/puppetlabs/bin/puppet'\nPUPPETSRV_BINARY = '/opt/puppetlabs/bin/puppetserver'\nPUPPET_CONFDIR = '/etc/puppetlabs/puppet/'\nPUPPET_SSLDIR = '/etc/puppetlabs/puppet/ssl/'\nAUTH_TOKEN = 'changeme'\nDEBUG = False\n\n## DO NOT EDIT PAST THIS LINE #################################################\n\nfrom flask import Flask, jsonify, abort, request, g, make_response\nimport subprocess\nimport syslog\nimport re\nimport os.path\nimport ConfigParser\nimport traceback\nimport logging\nimport yaml\n\n################################################################################\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_pyfile(CONFIG_FILE,silent=True)\napp.logger.setLevel(logging.DEBUG)\n\n################################################################################\n\n@app.route('/')\ndef default():\n\tabort(400)\n\n################################################################################\n\n@app.route('/deactivatenode/', methods=['GET'])\ndef deactivate_node(hostname):\n\n\tif 'X-Auth-Token' not in request.headers:\n\t\tsyslog.syslog(\"deactivatenode request failed because X-Auth-Token was missing from the request\")\n\t\tabort(401)\n\tif request.headers['X-Auth-Token'] != app.config['AUTH_TOKEN']:\n\t\tapp.logger.warn(\"deactivatenode request failed because the X-Auth-Token was incorrect\")\n\t\tabort(401)\n\n\t## validate the hostname\n\tif not is_valid_hostname(hostname):\n\t\tsyslog.syslog(\"Invalid hostname in request\")\n\t\tabort(400)\n\n\t## deactivate the puppet node\n\t(rcode, stdout, stderr) = sysexec([app.config['PUPPET_BINARY'], \"node\", \"--confdir\", app.config['PUPPET_CONFDIR'], \"deactivate\", hostname], shell=False)\n\n\tif rcode != 0:\n\t\tsyslog.syslog(\"puppet node deactivate failed for hostname \" + hostname)\n\t\tsyslog.syslog(\"stdout: \" + str(stdout))\n\t\tsyslog.syslog(\"stderr: \" + str(stderr))\n\t\tabort(500)\n\telse:\n\t\tsyslog.syslog(\"puppet node with hostname \" + hostname + \" was deactivated\")\n\n\t# Return blank 200 OK response\n\treturn \"\"\n\n################################################################################\n\n@app.route('/cleannode/', methods=['GET'])\ndef clean_node(hostname):\n\n\tif 'X-Auth-Token' not in request.headers:\n\t\tsyslog.syslog(\"cleannode request failed because X-Auth-Token was missing from the request\")\n\t\tabort(401)\n\tif request.headers['X-Auth-Token'] != app.config['AUTH_TOKEN']:\n\t\tapp.logger.warn(\"cleannode request failed because the X-Auth-Token was incorrect\")\n\t\tabort(401)\n\n\t## validate the hostname\n\tif not is_valid_hostname(hostname):\n\t\tsyslog.syslog(\"Invalid hostname in request\")\n\t\tabort(400)\n\n\t## clean the puppet node\n\t(rcode, stdout, stderr) = sysexec([app.config['PUPPET_BINARY'], \"node\", \"--confdir\", app.config['PUPPET_CONFDIR'], \"clean\", hostname], shell=False)\n\n\tif rcode != 0:\n\t\tsyslog.syslog(\"puppet node clean failed for hostname \" + hostname)\n\t\tsyslog.syslog(\"stdout: \" + str(stdout))\n\t\tsyslog.syslog(\"stderr: \" + str(stderr))\n\t\tabort(500)\n\telse:\n\t\tsyslog.syslog(\"puppet node with hostname \" + hostname + \" was cleaned\")\n\n\t# Return blank 200 OK response\n\treturn \"\"\n\n################################################################################\n\n@app.route('/getcert/', methods=['GET'])\ndef register_by_user(hostname):\n\n\tif 'X-Auth-Token' not in request.headers:\n\t\tsyslog.syslog(\"getcert request failed because X-Auth-Token was missing from the request\")\n\t\tabort(401)\n\tif request.headers['X-Auth-Token'] != app.config['AUTH_TOKEN']:\n\t\tapp.logger.warn('getcert request failed because the X-Auth-Token was incorrect')\n\t\tabort(401)\n\n\ttry:\n\t\tpuppet_version = get_puppet_version()\n\texcept Exception as e:\n\t\tsyslog.syslog('Failed to get Puppet version ' + str(e))\n\t\tabort(500)\n\n\t## validate the certname \n\tif not is_valid_hostname(hostname):\n\t\tsyslog.syslog(\"Invalid hostname in request\")\n\t\tabort(400)\n\n\t## do all the files already exist for this cert name?\n\tif not all([os.path.exists(app.config['PUPPET_SSLDIR'] + \"private_keys/\" + hostname + \".pem\"),\n\t\t\tos.path.exists(app.config['PUPPET_SSLDIR'] + \"public_keys/\" + hostname + \".pem\"),\n\t\t\tos.path.exists(app.config['PUPPET_SSLDIR'] + \"ca/signed/\" + hostname + \".pem\")]):\n\n\t\t## try to clean the cert but fail silently if it doesnt work\n\t\tif puppet_version[0] <= 4:\n\t\t\tsysexec([app.config['PUPPET_BINARY'], \"cert\", \"--confdir\", app.config['PUPPET_CONFDIR'], \"clean\", hostname], shell=False)\n\t\t\tsysexec([app.config['PUPPET_BINARY'], \"cert\", \"--confdir\", app.config['PUPPET_CONFDIR'], \"destroy\", hostname], shell=False)\n\t\t\tsysexec([app.config['PUPPET_BINARY'], \"ca\", \"--confdir\", app.config['PUPPET_CONFDIR'], \"destroy\", hostname], shell=False)\n\t\telse:\n\t\t\tsysexec([app.config['PUPPETSRV_BINARY'], \"ca\", \"clean\", \"--config\", os.path.join(app.config['PUPPET_CONFDIR'], \"puppet.conf\"), \"--certname\", hostname], shell=False)\n\n\t\tsyslog.syslog(\"generating new puppet certificate for \" + hostname)\n\n\t\t## puppet generate a new cert\n\t\tif puppet_version[0] <= 4:\n\t\t\t(rcode, stdout, stderr) = sysexec([app.config['PUPPET_BINARY'], \"cert\", \"--confdir\", app.config['PUPPET_CONFDIR'], \"generate\", hostname], shell=False)\n\t\telse:\n\t\t\t(rcode, stdout, stderr) = sysexec([app.config['PUPPETSRV_BINARY'], \"ca\", \"generate\", \"--config\", os.path.join(app.config['PUPPET_CONFDIR'], \"puppet.conf\"), \"--certname\", hostname], shell=False)\n\n\t\tif rcode != 0:\n\t\t\tsyslog.syslog(\"puppet cert generate failed for hostname \" + hostname)\n\t\t\tsyslog.syslog(\"stdout: \" + str(stdout))\n\t\t\tsyslog.syslog(\"stderr: \" + str(stderr))\n\t\t\tabort(500)\n\telse:\n\t\tsyslog.syslog(\"deploying existing puppet certificate for \" + hostname)\n\n\t## get a dict ready for json return\n\tdata = {}\n\n\t## grab the contents of the files the client needs\n\ttry:\n\t\twith open(app.config['PUPPET_SSLDIR'] + \"public_keys/\" + hostname + \".pem\",\"r\") as f:\n\t\t\tdata['public_key'] = f.read()\n\texcept Exception as ex:\n\t\tsyslog.syslog(\"failed to read generated public key file for \" + hostname)\n\t\tsyslog.syslog(str(ex))\n\t\tabort(500)\n\n\ttry:\n\t\twith open(app.config['PUPPET_SSLDIR'] + \"ca/signed/\" + hostname + \".pem\",\"r\") as f:\n\t\t\tdata['cert'] = f.read()\n\texcept Exception as ex:\n\t\tsyslog.syslog(\"failed to read generated certificate file for \" + hostname)\n\t\tsyslog.syslog(str(ex))\n\t\tabort(500)\n\n\ttry:\n\t\twith open(app.config['PUPPET_SSLDIR'] + \"private_keys/\" + hostname + \".pem\",\"r\") as f:\n\t\t\tdata['private_key'] = f.read()\n\texcept Exception as ex:\n\t\tsyslog.syslog(\"failed to read generated certificate file for \" + hostname)\n\t\tsyslog.syslog(str(ex))\n\t\tabort(500)\n\n\t## send results back as json\n\treturn jsonify(data)\n\n################################################################################\n\n@app.route('/modules', methods=['GET'])\ndef modules_list():\n\n\tif 'X-Auth-Token' not in request.headers:\n\t\tsyslog.syslog(\"modules request failed because X-Auth-Token was missing from the request\")\n\t\tabort(401)\n\tif request.headers['X-Auth-Token'] != app.config['AUTH_TOKEN']:\n\t\tapp.logger.warn('modules request failed because the X-Auth-Token was incorrect')\n\t\tabort(401)\n\n\t## ask the puppet server for a list of modules\n\t(rcode, stdout, stderr) = sysexec([app.config['PUPPET_BINARY'], \"module\", \"--modulepath=\" + app.config['PUPPET_MODULE_PATH'], \"list\", \"--render-as\", \"yaml\"])\t\n\n\tif rcode != 0:\n\t\tsyslog.syslog(\"puppet module list failed\")\n\t\tsyslog.syslog(\"stdout: \" + str(stdout))\n\t\tsyslog.syslog(\"stderr: \" + str(stderr))\n\t\tabort(500)\n\telse:\n\t\t## Try to validate the YAML\n\t\ttry:\n\t\t\t# work around the stupid ruby object shit in puppet yaml\n\t\t\tyaml.add_multi_constructor(u\"!ruby/object:\", construct_ruby_object)\n\t\t\tmodules = yaml.load(stdout)\n\t\texcept yaml.YAMLError as ex:\n\t\t\tsyslog.syslog(\"puppet module list returned invalid YAML\")\n\t\t\tsyslog.syslog(\"invalid YAML: \" + stdout)\n\t\t\tabort(500)\n\n\t\tr = make_response(yaml.dump(modules))\n\t\tr.headers['Content-Type'] = \"application/x-yaml\"\n\t\treturn r\n\n################################################################################\n\ndef get_puppet_version():\n\t(ret, out, err) = sysexec([app.config['PUPPET_BINARY'], '--version'], shell=False)\n\n\tif ret == 0:\n\t\tversion_string_re = re.compile('(?P[0-9]+)\\.(?P[0-9]+)\\.(?P[0-9]+)')\n\t\tversion_string = version_string_re.match(out.strip())\n\t\tif version_string is not None:\n\t\t\treturn (int(version_string.group('major')), int(version_string.group('minor')), int(version_string.group('patch')))\n\t\telse:\n\t\t\traise Exception('Failed to parse Puppet version string')\n\telse:\n\t\traise Exception('Error running Puppet to get Puppet version')\n\n################################################################################\n\ndef construct_ruby_object(loader, suffix, node):\n\treturn loader.construct_yaml_map(node)\n\n################################################################################\n\n@app.before_request\ndef before_request():\n\tsyslog.openlog(\"cortex-puppet-bridge\")\n\n################################################################################\n\ndef sysexec(command,shell=False):\n\t\"\"\"Executes a command on the local system using subprocess Popen\"\"\"\n\n\ttry:\n\t\tproc = subprocess.Popen(command,stdout=subprocess.PIPE, stderr=subprocess.STDOUT,shell=shell)\n\t\t(stdoutdata, stderrdata) = proc.communicate() \t\n\t\treturn (proc.returncode,str(stdoutdata),str(stderrdata))\n\texcept Exception as ex:\n\t\tsyslog.syslog(\"sysexec exception: \" + str(ex))\n\t\treturn (1,\"\",str(ex))\n\n################################################################################\n\ndef is_valid_hostname(hostname):\n\t\"\"\"Returns true if the given hostname is valid\"\"\"\n\n\tif len(hostname) > 255:\n\t\treturn False\n\tif hostname[-1] == \".\":\n\t\thostname = hostname[:-1]\n\tallowed = re.compile(\"(?!-)[A-Z\\d-]{1,63}(?/CODE TESTCASENAME @LOCATION TIME MESSAGE\n\nSo, for example, when running the Zephyr *Hello World!* sample in a\ntarget called *qz39c-arm* in the local server, the output could be::\n\n $ tcf run -vv -t local/qz39c-arm test_zephyr_hello_world.py\n INFO2/\t toplevel @local: scanning for test cases\n INFO2/n9gcf3\t test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: will run on target group 'xqkw (target=local/qz39c-arm:arm)'\n PASS1/n9gcf3\t test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: configure passed\n PASS1/n9gcf3\t test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: build passed\n PASS2/n9gcf3\t test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: deploy passed\n INFO2/n9gcf3E#1 test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: Reset\n PASS2/n9gcf3E#1 test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: found expected `Hello World! arm` in console `default` at 0.03s\n PASS2/n9gcf3E#1 test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: eval pass: found expected `Hello World! arm` in console `default` at 0.03s\n PASS1/n9gcf3\t test_zephyr_hello_world.py#_test @local/qz39c-arm:arm: evaluation passed\n PASS0/\t toplevel @local: 1 tests (1 passed, 0 failed, 0 blocked, 0 skipped) - passed\n\nNote the columns and the messages:\n\n- *TAG* is:\n\n - *INFO*, information\n - *PASS*, *FAIL*, *ERRR*, something passed, failed to pass or errored\n - *BLCK* infrastructure issue\n errored or an infrastructure\n - *SKIP* something was skipped\n - *DATA* data was collected and now is being reported\n\n- **, a verbosity number (0 most general, 1 more detailed, 2 more\n verbose, etc)\n\n- *CODE* such as ``/XXXXXX[CBDEL].NN``, where XXXXXX is the\n :term:`hashid`. The letters ``CBDEL`` describe which phase it us\n running (Configure, Build, Deploy, Evaluation, cLean), followed by\n the step number when they are being executed.\n\n What is this useful for? Well, you can ask the system to generate a\n log file (using `--log-file FILE.log`) and just let it print the\n most high level information to the console. The log has *way* more\n information than you would ever care for, but when something fails,\n grep for the message ID in the logfile (for example, if the build\n had failed, ``grep n9gcf3B FILE.log`` would give you the full build\n output for you to determine what is wrong--note the trailing *B*\n after ther hashid. It is also passed to the server, so we can\n identify what the target was doing when.\n\n .. note:: TCF also generates reports when something fails (look for\n ``report-HASHID.txt``) with all that detailed information.\n\n- *@LOCATION*: testcase name, target name and :term:`BSP model`.\n\n- A message indicating what happened\n\n\"\"\"\nimport io\nimport os\nimport subprocess\nimport sys\nimport threading\n\nimport commonl\nimport tcfl\nimport tc\n\nclass driver(tc.report_driver_c):\n \"\"\"\n Driver to write progress messages to console and a log file\n\n :param int verbosity verbosity: maximum verbosity of messages to\n *stdout*\n\n :param str log_file: (optional) write messages also to the given\n log file name (defaults to *None*).\n\n If the name ends up in any of the suffixes in :data:`compress`,\n then the log file will be compressed with the program described\n by said entry.\n\n :param int verbosity_logf: (optional) maximum verbosity to report\n to the logfile; defaults to all of them (see\n :meth:`tcfl.tc.report_driver_c.report`), but on some cases you\n might want to limit to cut on disk consumption.\n \"\"\"\n def __init__(self, verbosity, log_file = None,\n verbosity_logf = 999):\n tc.report_driver_c.__init__(self)\n if log_file:\n assert isinstance(log_file, basestring)\n\n self.tls = threading.local()\n\n if log_file:\n\n _basename, ext = os.path.splitext(log_file)\n if ext in self.compress:\t# compressed logfile support\n kws = dict(log_file = log_file)\n # use shell + exec to try to game possible buffering\n # issues w/ Python and blocks\n command = self.compress[ext]\n pipe = subprocess.Popen(\n \"exec \" + command % kws + \" > '%s'\" % log_file,\n shell = True, stdin = subprocess.PIPE)\n logf = io.open(pipe.stdin.fileno(), \"w\",\n encoding = 'utf-8', errors = 'replace')\n else:\n logf = io.open(log_file, \"w+\", encoding = 'utf-8',\n errors = 'replace')\n self.logf = commonl.io_tls_prefix_lines_c(\n self.tls, logf.detach(),\n encoding = 'utf-8', errors = 'replace')\n else:\n self.logf = None\n consolef = io.open(sys.stdout.fileno(), \"w\",\n encoding = 'utf-8', errors = 'replace')\n self.consolef = commonl.io_tls_prefix_lines_c(\n self.tls, consolef.detach(),\n encoding = 'utf-8', errors = 'replace')\n self.verbosity = verbosity\n self.verbosity_logf = verbosity_logf\n\n #: Map log file extension to compression program\n #:\n #: Log files in big runs can be huge but we don't want to loose\n #: them or we don't need the whole thing...until we need them.\n #:\n #: Compressing them after the fact is often a pain, so we can\n #: compress them on the run. Each program here takes stdin raw\n #: data and writes compressed data to stdout. It shall stop when\n #: receiving EOF and close it out gracefully, which will also work\n #: if TCF is killed mercilessly.\n #:\n #: New programs can be added with:\n #:\n #: >>> tcfl.report_console.driver.compress[\".EXT\"] = \"program -options\"\n #:\n #: Note that you can generate both a compressed and uncompressed\n #: log file by using tee; this is meant for debugging, since the\n #: compressed stream will be buffered by the compression program.\n #:\n #: >>> tcfl.report_console.driver.compress[\".xz\"] = \\\n #: >>> \"tee %(log_file)s.raw | xz -T0 -6qzc\",\n #:\n #: Another method is to strace the compression program::\n #:\n #: $ strace -s 1024 -p PID\n compress = {\n \".bz2\": \"bzip2 -9qzc\",\n \".xz\": \"xz -T0 -6qzc\",\n }\n\n\n def _shall_do(self, level):\n console = level <= self.verbosity\n # level >= 1000 is for control messages\n logfile = level <= self.verbosity_logf or level >= 1000\n return console, logfile\n\n def report(self, reporter, tag, ts, delta,\n level, message,\n alevel, attachments):\n \"\"\"\n Report messages to the console or logfile in a line-by-line\n format prefixing each line.\n \"\"\"\n # FIXME: rework the streaming object so it can multiplex the\n # output to two file descriptors *and* decide based on\n # verbosity; this way we do not have to walk the attachmen\n # tree and format twice if the log file is enabled.\n\n # the prefix to each line is stored in thraed-local-storage\n # (with commonl.tls_prefix_c), where it is picked up by the\n # stream buffer object commonl.io_tls_prefix_lines_c. This,\n # before printing, adds the prefix to each line.\n _prefix = \"%s%d/%s\\t%s [+%.1fs]: \" % (\n tag, level, tcfl.msgid_c.ident(),\n reporter._report_prefix, delta\n )\n with commonl.tls_prefix_c(self.tls, _prefix):\n console_p, logfile_p = self._shall_do(level)\n message += \"\\n\"\n if console_p:\n self.consolef.write(message)\n if self.logf and logfile_p:\n self.logf.write(message)\n\n if attachments != None:\n assert isinstance(attachments, dict)\n console_p, logfile_p = self._shall_do(alevel)\n if console_p or logfile_p:\n _aprefix = \"%s%d/%s\\t%s [+%.1fs]: \" % (\n tag, alevel, tcfl.msgid_c.ident(),\n reporter._report_prefix, delta\n )\n with commonl.tls_prefix_c(self.tls, _aprefix):\n if console_p:\n commonl.data_dump_recursive_tls(attachments, self.tls,\n of = self.consolef)\n if self.logf and logfile_p:\n commonl.data_dump_recursive_tls(attachments, self.tls,\n of = self.logf)\n\n self.consolef.flush()\n if self.logf:\n self.logf.flush()\n","sub_path":"tcfl/report_console.py","file_name":"report_console.py","file_ext":"py","file_size_in_byte":9488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"112968603","text":"spaces =' \\n\\t'\n\n'''Obtiene la subcadena contenida en str que se encuentra antes de la primera aparición \nde un caracter perteneciente a un conjunto de caracteres especiales(chars)'''\n#getBefore :: str -> str ->(str, str)\ndef getBefore(str, chars):\n\tname = ''\n\tfor char in str:\t\t\t#Por cada substring de longitud 1 en str\n\t\tif(char in chars):\t\t#Si el substring se encuentra en la cadena de caracteres límite\n\t\t\tbreak\t\t\t\t#terminar el lazo for\n\t\telse:\t\t\t\t\t#Taso contrario\n\t\t\tname = name + char \t#Agregar el substring al objeto name\n\trest = str[len(name):]\t\t#El objeto rest contiene el substring de str que no contiene a name\n\treturn(name, rest)\n\n'''Obtene la primera subcadena contenida en str que se encuentre entre un par de caracteres especiales.\nDicho caracter es el segundo argumento de la función'''\n#getBetween :: str -> str -> (str, str)\ndef getBetween(str, char):\n\tword = ''\n\tflag = 0\n\tfor char in str:\n\t\tif char == '\"':\n\t\t\tflag+=1\n\t\telif flag == 1:\n\t\t\tword = word + char\n\treturn (word, str[len(word)+2:])\n\n'''Recibe una línea de código xml, la procesa y retorna el nombre de la etiqueta y los atributos'''\n#processLine :: str -> str ->(str, dict)\ndef processLine(str, kindOfTag):\n\trest = str.strip('<>' + spaces)\t\t\t#Elimina '<' al inicio de la línea y '>' al final de la línea y posibles espacios en blanco\n\tname = ''\n\tattribs = {}\t\t\t\t\t\t\t#Se inicializa la lista de atributos con un diccionario vacío\n\tif kindOfTag == 'selfClosingTag':\n\t\trest = rest.rstrip('/')\t\t\t\t#Elimina '/' a final de la línea\n\tpair = getBefore(rest, spaces)\t\t\t#Retorna una tupla con el nombre y el resto del primer parámentro y lo referencia con la variable pair\n\tname = pair[0]\t\t\t\t\t\t\t#Referencia el primer elemento de la tupla a la variable name\n\trest = pair[1]\t\t\t\t\t\t\t#Referencia el segundo elemento de la tupla a la variable rest\n\trest = rest.strip(spaces)\t\t\t\t#Elimina los espacios antes y después de la cadena\n\twhile(len(rest)!=0):\n\t\tpair = getBefore(rest,'=')\t\t\t#Recupera la clave y el resto de la cadena\n\t\tkey = pair[0]; key = key.strip()\t#Elimina posibles espacios antes y despues de la clave\n\t\trest = pair[1].strip(spaces + '=')\t#Elimina posibles espacios e '=' al inicio y final de la cadena\n\t\tpair = getBetween(rest,'\"')\t\t#Recupera el valor y el resto de la cadena\n\t\tvalue = pair[0];\t\t\t\t\t#Se le envía una referencia del primer elemento de la dupla a value\n\t\trest = pair[1].strip(spaces)\t\t#Elimina posibles espacios al inicio y final de la cadena\n\t\tattribs[key] = value \t\t\t\t#Agrega un nuevo atributo a la lista de atributos\n\treturn (name, attribs)\n\n#Test\n'''\nprint(processLine('',''))\nprint(processLine('',''))\nprint(processLine('',''))\n'''\n","sub_path":"scriptsAndFiles/str_functions.py","file_name":"str_functions.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"439219123","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/qt/work/pyside/pyside-setup/pyside2_install/py2.7-qt5.14.2-64bit-release/lib/python2.7/site-packages/shiboken2/files.dir/shibokensupport/signature/mapping.py\n# Compiled at: 2020-04-24 02:55:46\nfrom __future__ import print_function, absolute_import\nimport sys, struct, os\nfrom shibokensupport.signature import typing\nfrom shibokensupport.signature.typing import TypeVar, Generic\nfrom shibokensupport.signature.lib.tool import with_metaclass\n\nclass ellipsis(object):\n\n def __repr__(self):\n return '...'\n\n\nellipsis = ellipsis()\nPoint = typing.Tuple[(float, float)]\nVariant = typing.Any\nModelIndexList = typing.List[int]\nQImageCleanupFunction = typing.Callable\nNoneType = type(None)\n_S = TypeVar('_S')\n\nclass _CharMeta(type):\n\n def __repr__(self):\n return '%s.%s' % (self.__module__, self.__name__)\n\n\nclass Char(with_metaclass(_CharMeta)):\n \"\"\"\n From http://doc.qt.io/qt-5/qchar.html :\n\n In Qt, Unicode characters are 16-bit entities without any markup or\n structure. This class represents such an entity. It is lightweight,\n so it can be used everywhere. Most compilers treat it like an\n unsigned short.\n\n Here, we provide a simple implementation just to avoid long aliases.\n \"\"\"\n __module__ = 'typing'\n\n def __init__(self, code):\n if isinstance(code, int):\n self.code = code & 65535\n else:\n self.code = ord(code)\n\n def __add__(self, other):\n return chr(self.code) + other\n\n def __radd__(self, other):\n return other + chr(self.code)\n\n def __repr__(self):\n return ('typing.Char({})').format(self.code)\n\n\ntyping.Char = Char\nMultiMap = typing.DefaultDict[(str, typing.List[str])]\nulong_max = 2 * sys.maxsize + 1 if len(struct.pack('L', 1)) != 4 else 4294967295\nushort_max = 65535\nGL_COLOR_BUFFER_BIT = 16384\nGL_NEAREST = 9728\nWId = int\nGL_TEXTURE_2D = 3553\nGL_RGBA = 6408\n\nclass _NotCalled(str):\n \"\"\"\n Wrap some text with semantics\n\n This class is wrapped around text in order to avoid calling it.\n There are three reasons for this:\n\n - some instances cannot be created since they are abstract,\n - some can only be created after qApp was created,\n - some have an ugly __repr__ with angle brackets in it.\n\n By using derived classes, good looking instances can be created\n which can be used to generate source code or .pyi files. When the\n real object is needed, the wrapper can simply be called.\n \"\"\"\n\n def __repr__(self):\n return ('{}({})').format(type(self).__name__, self)\n\n def __call__(self):\n from shibokensupport.signature.mapping import __dict__ as namespace\n text = self if self.endswith(')') else self + '()'\n return eval(text, namespace)\n\n\nUSE_PEP563 = False\n\nclass Virtual(_NotCalled):\n pass\n\n\nclass Missing(_NotCalled):\n\n def __repr__(self):\n if USE_PEP563:\n return _NotCalled.__repr__(self)\n return ('{}(\"{}\")').format(type(self).__name__, self)\n\n\nclass Invalid(_NotCalled):\n pass\n\n\nclass Default(_NotCalled):\n pass\n\n\nclass Instance(_NotCalled):\n pass\n\n\nclass _Parameterized(object):\n\n def __init__(self, type):\n self.type = type\n self.__name__ = self.__class__.__name__\n\n def __repr__(self):\n return ('{}({})').format(type(self).__name__, self.type.__name__)\n\n\nclass ResultVariable(_Parameterized):\n pass\n\n\nclass ArrayLikeVariable(_Parameterized):\n pass\n\n\nStringList = ArrayLikeVariable(str)\n\nclass Reloader(object):\n \"\"\"\n Reloder class\n\n This is a singleton class which provides the update function for the\n shiboken and PySide classes.\n \"\"\"\n\n def __init__(self):\n self.sys_module_count = 0\n\n @staticmethod\n def module_valid(mod):\n if getattr(mod, '__file__', None) and not os.path.isdir(mod.__file__):\n ending = os.path.splitext(mod.__file__)[(-1)]\n return ending not in ('.py', '.pyc', '.pyo', '.pyi')\n else:\n return False\n\n def update(self):\n \"\"\"\n 'update' imports all binary modules which are already in sys.modules.\n The reason is to follow all user imports without introducing new ones.\n This function is called by pyside_type_init to adapt imports\n when the number of imported modules has changed.\n \"\"\"\n if self.sys_module_count == len(sys.modules):\n return\n self.sys_module_count = len(sys.modules)\n g = globals()\n candidates = list(mod_name for mod_name in sys.modules.copy() if self.module_valid(sys.modules[mod_name]))\n for mod_name in candidates:\n top = __import__(mod_name)\n g[top.__name__] = top\n proc_name = 'init_' + mod_name.replace('.', '_')\n if proc_name in g:\n g.update(g.pop(proc_name)())\n\n\ndef check_module(mod):\n if not Reloader.module_valid(mod):\n mod_name = mod.__name__\n raise ImportError((\"Module '{mod_name}' is not a binary module!\").format(**locals()))\n\n\nupdate_mapping = Reloader().update\ntype_map = {}\nnamespace = globals()\ntype_map.update({'...': ellipsis, \n 'bool': bool, \n 'char': Char, \n 'char*': str, \n 'char*const': str, \n 'double': float, \n 'float': float, \n 'int': int, \n 'List': ArrayLikeVariable, \n 'long': int, \n 'PyCallable': typing.Callable, \n 'PyObject': object, \n 'PySequence': typing.Iterable, \n 'PyTypeObject': type, \n 'QChar': Char, \n 'QHash': typing.Dict, \n 'qint16': int, \n 'qint32': int, \n 'qint64': int, \n 'qint8': int, \n 'qintptr': int, \n 'QList': ArrayLikeVariable, \n 'qlonglong': int, \n 'QMap': typing.Dict, \n 'QPair': typing.Tuple, \n 'qptrdiff': int, \n 'qreal': float, \n 'QSet': typing.Set, \n 'QString': str, \n 'QStringList': StringList, \n 'quint16': int, \n 'quint32': int, \n 'quint32': int, \n 'quint64': int, \n 'quint8': int, \n 'quintptr': int, \n 'qulonglong': int, \n 'QVariant': Variant, \n 'QVector': typing.List, \n 'real': float, \n 'short': int, \n 'signed char': Char, \n 'signed long': int, \n 'std.list': typing.List, \n 'std.map': typing.Dict, \n 'std.pair': typing.Tuple, \n 'std.vector': typing.List, \n 'str': str, \n 'true': True, \n 'Tuple': typing.Tuple, \n 'uchar': Char, \n 'uchar*': str, \n 'uint': int, \n 'ulong': int, \n 'ULONG_MAX': ulong_max, \n 'unsigned char': Char, \n 'unsigned char*': str, \n 'unsigned int': int, \n 'unsigned long int': int, \n 'unsigned long long': int, \n 'unsigned long': int, \n 'unsigned short int': int, \n 'unsigned short': int, \n 'Unspecified': None, \n 'ushort': int, \n 'void': int, \n 'WId': WId, \n 'zero(bytes)': '', \n 'zero(Char)': 0, \n 'zero(float)': 0, \n 'zero(int)': 0, \n 'zero(object)': None, \n 'zero(str)': '', \n 'zero(typing.Any)': None})\ntype_map.update({'array double*': ArrayLikeVariable(float), \n 'array float*': ArrayLikeVariable(float), \n 'array GLint*': ArrayLikeVariable(int), \n 'array GLuint*': ArrayLikeVariable(int), \n 'array int*': ArrayLikeVariable(int), \n 'array long long*': ArrayLikeVariable(int), \n 'array long*': ArrayLikeVariable(int), \n 'array short*': ArrayLikeVariable(int), \n 'array signed char*': bytes, \n 'array unsigned char*': bytes, \n 'array unsigned int*': ArrayLikeVariable(int), \n 'array unsigned short*': ArrayLikeVariable(int)})\ntype_map.update({'char*': bytes, \n 'QChar*': bytes, \n 'quint32*': int, \n 'quint8*': bytearray, \n 'uchar*': bytes, \n 'unsigned char*': bytes})\ntype_map.update({'bool*': ResultVariable(bool), \n 'float*': ResultVariable(float), \n 'int*': ResultVariable(int), \n 'long long*': ResultVariable(int), \n 'long*': ResultVariable(int), \n 'PStr*': ResultVariable(str), \n 'qint32*': ResultVariable(int), \n 'qint64*': ResultVariable(int), \n 'qreal*': ResultVariable(float), \n 'QString*': ResultVariable(str), \n 'quint16*': ResultVariable(int), \n 'uint*': ResultVariable(int), \n 'unsigned int*': ResultVariable(int), \n 'QStringList*': ResultVariable(StringList)})\n\ndef init_Shiboken():\n type_map.update({'PyType': type, \n 'shiboken2.bool': bool, \n 'size_t': int})\n return locals()\n\n\ndef init_minimal():\n type_map.update({'MinBool': bool})\n return locals()\n\n\ndef init_sample():\n import datetime\n type_map.update({'char': Char, \n 'char**': typing.List[str], \n 'Complex': complex, \n 'double': float, \n 'Foo.HANDLE': int, \n 'HANDLE': int, \n 'Null': None, \n 'nullptr': None, \n 'ObjectType.Identifier': Missing('sample.ObjectType.Identifier'), \n 'OddBool': bool, \n 'PStr': str, \n 'PyDate': datetime.date, \n 'sample.bool': bool, \n 'sample.char': Char, \n 'sample.double': float, \n 'sample.int': int, \n 'sample.ObjectType': object, \n 'sample.OddBool': bool, \n 'sample.Photon.TemplateBase[Photon.DuplicatorType]': sample.Photon.ValueDuplicator, \n 'sample.Photon.TemplateBase[Photon.IdentityType]': sample.Photon.ValueIdentity, \n 'sample.Point': Point, \n 'sample.PStr': str, \n 'sample.unsigned char': Char, \n 'std.size_t': int, \n 'std.string': str, \n 'ZeroIn': 0, \n 'Str(\"\")': '', \n 'Str(\"nown>\")': 'nown>'})\n return locals()\n\n\ndef init_other():\n import numbers\n type_map.update({'other.ExtendsNoImplicitConversion': Missing('other.ExtendsNoImplicitConversion'), \n 'other.Number': numbers.Number})\n return locals()\n\n\ndef init_smart():\n global SharedPtr\n\n class SharedPtr(Generic[_S]):\n __module__ = 'smart'\n\n smart.SharedPtr = SharedPtr\n type_map.update({'smart.Smart.Integer2': int})\n return locals()\n\n\ndef init_PySide2_QtCore():\n from PySide2.QtCore import Qt, QUrl, QDir\n from PySide2.QtCore import QRect, QSize, QPoint, QLocale, QByteArray\n from PySide2.QtCore import QMarginsF\n try:\n from PySide2.QtCore import Connection\n except ImportError:\n pass\n\n type_map.update({\"' '\": ' ', \n \"'%'\": '%', \n \"'g'\": 'g', \n '4294967295UL': 4294967295, \n 'CheckIndexOption.NoOption': Instance('PySide2.QtCore.QAbstractItemModel.CheckIndexOptions.NoOption'), \n 'false': False, \n 'list of QAbstractAnimation': typing.List[PySide2.QtCore.QAbstractAnimation], \n 'list of QAbstractState': typing.List[PySide2.QtCore.QAbstractState], \n 'long long': int, \n 'NULL': None, \n 'nullptr': None, \n 'PyByteArray': bytearray, \n 'PyBytes': bytes, \n 'PySide2.QtCore.QCborStreamReader.StringResult[PySide2.QtCore.QByteArray]': PySide2.QtCore.QCborStringResultByteArray, \n 'PySide2.QtCore.QCborStreamReader.StringResult[QString]': PySide2.QtCore.QCborStringResultString, \n 'PySide2.QtCore.QCborStreamReader.QCborStringResultByteArray': PySide2.QtCore.QCborStringResultByteArray, \n 'PySide2.QtCore.QCborStreamReader.QCborStringResultString': PySide2.QtCore.QCborStringResultString, \n 'PySide2.QtCore.QUrl.ComponentFormattingOptions': PySide2.QtCore.QUrl.ComponentFormattingOption, \n 'PyUnicode': typing.Text, \n 'Q_NULLPTR': None, \n 'QDir.Filters(AllEntries | NoDotAndDotDot)': Instance('QDir.Filters(QDir.AllEntries | QDir.NoDotAndDotDot)'), \n 'QDir.SortFlags(Name | IgnoreCase)': Instance('QDir.SortFlags(QDir.Name | QDir.IgnoreCase)'), \n 'QGenericArgument((0))': ellipsis, \n 'QGenericArgument()': ellipsis, \n 'QGenericArgument(0)': ellipsis, \n 'QGenericArgument(NULL)': ellipsis, \n 'QGenericArgument(nullptr)': ellipsis, \n 'QGenericArgument(Q_NULLPTR)': ellipsis, \n 'QJsonObject': typing.Dict[(str, PySide2.QtCore.QJsonValue)], \n 'QModelIndex()': Invalid('PySide2.QtCore.QModelIndex'), \n 'QModelIndexList': ModelIndexList, \n 'QModelIndexList': ModelIndexList, \n 'QString()': '', \n 'QStringList()': [], 'QStringRef': str, \n 'QStringRef': str, \n 'Qt.HANDLE': int, \n 'QUrl.FormattingOptions(PrettyDecoded)': Instance('QUrl.FormattingOptions(QUrl.PrettyDecoded)'), \n 'QVariant()': Invalid(Variant), \n 'QVariant.Type': type, \n 'QVariantMap': typing.Dict[(str, Variant)], \n 'QVariantMap': typing.Dict[(str, Variant)]})\n try:\n type_map.update({'PySide2.QtCore.QMetaObject.Connection': PySide2.QtCore.Connection})\n except AttributeError:\n pass\n\n return locals()\n\n\ndef init_PySide2_QtGui():\n from PySide2.QtGui import QPageLayout, QPageSize\n type_map.update({'0.0f': 0.0, \n '1.0f': 1.0, \n 'GL_COLOR_BUFFER_BIT': GL_COLOR_BUFFER_BIT, \n 'GL_NEAREST': GL_NEAREST, \n 'int32_t': int, \n 'QPixmap()': Default('PySide2.QtGui.QPixmap'), \n 'QPlatformSurface*': int, \n 'QVector< QTextLayout.FormatRange >()': [], 'uint32_t': int, \n 'uint8_t': int, \n 'USHRT_MAX': ushort_max})\n return locals()\n\n\ndef init_PySide2_QtWidgets():\n from PySide2.QtWidgets import QWidget, QMessageBox, QStyleOption, QStyleHintReturn, QStyleOptionComplex\n from PySide2.QtWidgets import QGraphicsItem, QStyleOptionGraphicsItem\n type_map.update({'QMessageBox.StandardButtons(Yes | No)': Instance('QMessageBox.StandardButtons(QMessageBox.Yes | QMessageBox.No)'), \n 'QWidget.RenderFlags(DrawWindowBackground | DrawChildren)': Instance('QWidget.RenderFlags(QWidget.DrawWindowBackground | QWidget.DrawChildren)'), \n 'SH_Default': QStyleHintReturn.SH_Default, \n 'SO_Complex': QStyleOptionComplex.SO_Complex, \n 'SO_Default': QStyleOption.SO_Default, \n 'static_cast(Qt.MatchExactly|Qt.MatchCaseSensitive)': Instance('Qt.MatchFlags(Qt.MatchExactly | Qt.MatchCaseSensitive)'), \n 'Type': PySide2.QtWidgets.QListWidgetItem.Type})\n return locals()\n\n\ndef init_PySide2_QtSql():\n from PySide2.QtSql import QSqlDatabase\n type_map.update({'QLatin1String(defaultConnection)': QSqlDatabase.defaultConnection, \n 'QVariant.Invalid': Invalid('Variant')})\n return locals()\n\n\ndef init_PySide2_QtNetwork():\n best_structure = typing.OrderedDict if getattr(typing, 'OrderedDict', None) else typing.Dict\n type_map.update({'QMultiMap[PySide2.QtNetwork.QSsl.AlternativeNameEntryType, QString]': best_structure[(PySide2.QtNetwork.QSsl.AlternativeNameEntryType, typing.List[str])]})\n del best_structure\n return locals()\n\n\ndef init_PySide2_QtXmlPatterns():\n from PySide2.QtXmlPatterns import QXmlName\n type_map.update({'QXmlName.NamespaceCode': Missing('PySide2.QtXmlPatterns.QXmlName.NamespaceCode'), \n 'QXmlName.PrefixCode': Missing('PySide2.QtXmlPatterns.QXmlName.PrefixCode')})\n return locals()\n\n\ndef init_PySide2_QtMultimedia():\n import PySide2.QtMultimediaWidgets\n check_module(PySide2.QtMultimediaWidgets)\n type_map.update({'QGraphicsVideoItem': PySide2.QtMultimediaWidgets.QGraphicsVideoItem, \n 'qint64': int, \n 'QVideoWidget': PySide2.QtMultimediaWidgets.QVideoWidget})\n return locals()\n\n\ndef init_PySide2_QtOpenGL():\n type_map.update({'GLbitfield': int, \n 'GLenum': int, \n 'GLfloat': float, \n 'GLint': int, \n 'GLuint': int})\n return locals()\n\n\ndef init_PySide2_QtQml():\n type_map.update({'QJSValueList()': [], 'QVariantHash()': typing.Dict[(str, Variant)]})\n return locals()\n\n\ndef init_PySide2_QtQuick():\n type_map.update({'PySide2.QtQuick.QSharedPointer[PySide2.QtQuick.QQuickItemGrabResult]': PySide2.QtQuick.QQuickItemGrabResult, \n 'UnsignedShortType': int})\n return locals()\n\n\ndef init_PySide2_QtScript():\n type_map.update({'QScriptValueList()': []})\n return locals()\n\n\ndef init_PySide2_QtTest():\n type_map.update({'PySide2.QtTest.QTest.PySideQTouchEventSequence': PySide2.QtTest.QTest.QTouchEventSequence, \n 'PySide2.QtTest.QTouchEventSequence': PySide2.QtTest.QTest.QTouchEventSequence})\n return locals()\n\n\ndef init_PySide2_QtWinExtras():\n type_map.update({'QList< QWinJumpListItem* >()': []})\n return locals()\n\n\ndef init_PySide2_QtDataVisualization():\n from PySide2.QtDataVisualization import QtDataVisualization\n QtDataVisualization.QBarDataRow = typing.List[QtDataVisualization.QBarDataItem]\n QtDataVisualization.QBarDataArray = typing.List[QtDataVisualization.QBarDataRow]\n QtDataVisualization.QSurfaceDataRow = typing.List[QtDataVisualization.QSurfaceDataItem]\n QtDataVisualization.QSurfaceDataArray = typing.List[QtDataVisualization.QSurfaceDataRow]\n type_map.update({'100.0f': 100.0, \n 'QtDataVisualization.QBarDataArray': QtDataVisualization.QBarDataArray, \n 'QtDataVisualization.QBarDataArray*': QtDataVisualization.QBarDataArray, \n 'QtDataVisualization.QSurfaceDataArray': QtDataVisualization.QSurfaceDataArray, \n 'QtDataVisualization.QSurfaceDataArray*': QtDataVisualization.QSurfaceDataArray})\n return locals()\n\n\ndef init_testbinding():\n type_map.update({'testbinding.PySideCPP2.TestObjectWithoutNamespace': testbinding.TestObjectWithoutNamespace})\n return locals()","sub_path":"pycfiles/shiboken2-5.14.2.1-5.14.2-cp27-cp27m-macosx_10_13_intel/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":17194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"40504415","text":"import os.path as osp\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nfrom torch import nn as nn\nfrom torch import autograd\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.torch.torch_irl_algorithm import TorchIRLAlgorithm\nfrom rlkit.torch.sac.policies import MakeDeterministic\nfrom rlkit.core.train_util import linear_schedule\nfrom rlkit.core.vistools import plot_seaborn_grid\nfrom rlkit.core import logger\n\n\ndef concat_trajs(trajs):\n new_dict = {}\n for k in trajs[0].keys():\n if isinstance(trajs[0][k], dict):\n new_dict[k] = concat_trajs([t[k] for t in trajs])\n else:\n new_dict[k] = np.concatenate([t[k] for t in trajs], axis=0)\n return new_dict\n\n\nclass FixedDistDiscTrainAlg():\n '''\n state-marginal matching\n '''\n def __init__(\n self,\n discriminator,\n\n exp_data,\n pol_data,\n\n disc_optim_batch_size=1024,\n num_update_loops_per_train_call=1,\n num_disc_updates_per_loop_iter=1,\n\n disc_lr=1e-3,\n disc_momentum=0.0,\n disc_optimizer_class=optim.Adam,\n\n use_grad_pen=True,\n grad_pen_weight=10,\n\n train_objective='airl',\n ):\n assert disc_lr != 1e-3, 'Just checking that this is being taken from the spec file'\n \n self.exp_data, self.pol_data = exp_data, pol_data\n\n self.discriminator = discriminator\n self.rewardf_eval_statistics = None\n self.disc_optimizer = disc_optimizer_class(\n self.discriminator.parameters(),\n lr=disc_lr,\n betas=(disc_momentum, 0.999)\n )\n print('\\n\\nDISC MOMENTUM: %f\\n\\n' % disc_momentum)\n\n self.disc_optim_batch_size = disc_optim_batch_size\n\n assert train_objective in ['airl', 'fairl', 'gail', 'w1']\n self.train_objective = train_objective\n\n self.bce = nn.BCEWithLogitsLoss()\n target_batch_size = self.disc_optim_batch_size\n self.bce_targets = torch.cat(\n [\n torch.ones(target_batch_size, 1),\n torch.zeros(target_batch_size, 1)\n ],\n dim=0\n )\n self.bce_targets = Variable(self.bce_targets)\n if ptu.gpu_enabled():\n self.bce.cuda()\n self.bce_targets = self.bce_targets.cuda()\n \n self.use_grad_pen = use_grad_pen\n self.grad_pen_weight = grad_pen_weight\n\n self.num_update_loops_per_train_call = num_update_loops_per_train_call\n self.num_disc_updates_per_loop_iter = num_disc_updates_per_loop_iter\n\n d = 5.0\n self._d = d\n self._d_len = np.arange(-d,d+0.25,0.25).shape[0]\n self.xy_var = []\n for i in np.arange(-d,d+0.25,0.25):\n for j in np.arange(-d,d+0.25,0.25):\n self.xy_var.append([float(i),float(j)])\n self.xy_var = np.array(self.xy_var)\n self.xy_var = Variable(ptu.from_numpy(self.xy_var), requires_grad=False)\n\n\n def get_disc_training_batch(self, batch_size, from_expert):\n if from_expert:\n buffer = self.exp_data\n else:\n buffer = self.pol_data\n idx = np.random.choice(buffer.shape[0], size=batch_size, replace=False)\n batch = {\n 'observations': buffer[idx]\n }\n batch = np_to_pytorch_batch(batch)\n return batch\n\n\n def train(self):\n epoch = -1\n for t in range(self.num_update_loops_per_train_call):\n epoch += 1\n for _ in range(self.num_disc_updates_per_loop_iter):\n self._do_reward_training(epoch)\n \n self.discriminator.eval()\n logits = self.discriminator(self.xy_var, None)\n rewards = self._convert_logits_to_reward(logits)\n self.discriminator.train()\n\n logit_bound = 10.0\n if self.train_objective == 'airl':\n rew_bound = 10.0\n elif self.train_objective == 'fairl':\n rew_bound = 100.0\n elif self.train_objective == 'gail':\n rew_bound = 10.0\n elif self.train_objective == 'w1':\n rew_bound = 10.0\n else:\n raise Exception()\n \n # plot the logits of the discriminator\n # print(logit_bound)\n # print(rew_bound)\n logits = ptu.get_numpy(logits)\n logits = np.reshape(logits, (int(self._d_len), int(self._d_len))).T\n plot_seaborn_grid(logits, -logit_bound, logit_bound, 'Disc Logits Epoch %d'%epoch, osp.join(logger.get_snapshot_dir(), 'disc_logits_epoch_%d.png'%epoch))\n\n # plot the rewards given by the discriminator\n rewards = ptu.get_numpy(rewards)\n rewards = np.reshape(rewards, (int(self._d_len), int(self._d_len))).T\n plot_seaborn_grid(rewards, -rew_bound, rew_bound, 'Disc Rewards Epoch %d'%epoch, osp.join(logger.get_snapshot_dir(), 'disc_rewards_epoch_%d.png'%epoch))\n\n logger.dump_tabular(with_prefix=False, with_timestamp=False)\n self.rewardf_eval_statistics = None\n\n\n def _do_reward_training(self, epoch):\n '''\n Train the discriminator\n '''\n self.disc_optimizer.zero_grad()\n\n expert_batch = self.get_disc_training_batch(self.disc_optim_batch_size, True)\n policy_batch = self.get_disc_training_batch(self.disc_optim_batch_size, False)\n\n expert_obs = expert_batch['observations']\n policy_obs = policy_batch['observations']\n\n obs = torch.cat([expert_obs, policy_obs], dim=0)\n disc_logits = self.discriminator(obs, None)\n\n if self.train_objective == 'w1':\n n = expert_obs.size(0)\n # not CE loss but I just got lazy about renaming things below\n disc_ce_loss = disc_logits[:n].mean() - disc_logits[n:].mean()\n total_loss = disc_ce_loss\n else: # the disc objective for all other approaches is BCE\n disc_ce_loss = self.bce(disc_logits, self.bce_targets)\n total_loss = disc_ce_loss\n disc_preds = (disc_logits > 0).type(disc_logits.data.type())\n accuracy = (disc_preds == self.bce_targets).type(torch.FloatTensor).mean()\n \n if self.use_grad_pen:\n eps = Variable(torch.rand(expert_obs.size(0), 1))\n if ptu.gpu_enabled(): eps = eps.cuda()\n \n interp_obs = eps*expert_obs + (1-eps)*policy_obs\n interp_obs = interp_obs.detach()\n interp_obs.requires_grad = True\n gradients = autograd.grad(\n outputs=self.discriminator(interp_obs, None).sum(),\n inputs=[interp_obs],\n # grad_outputs=torch.ones(exp_specs['batch_size'], 1).cuda(),\n create_graph=True, retain_graph=True, only_inputs=True\n )\n total_grad = gradients[0]\n\n # GP from Gulrajani et al.\n # gradient_penalty = ((total_grad.norm(2, dim=1) - 1) ** 2).mean()\n # disc_grad_pen_loss = gradient_penalty * self.grad_pen_weight\n\n # DIFFERENT FROM GP from Gulrajani et al.\n gradient_penalty = total_grad.norm(2, dim=1) - 1\n gradient_penalty = F.relu(gradient_penalty)\n gradient_penalty = (gradient_penalty**2).mean()\n disc_grad_pen_loss = gradient_penalty * self.grad_pen_weight\n\n # # GP from Mescheder et al.\n # gradient_penalty = (total_grad.norm(2, dim=1) ** 2).mean()\n # disc_grad_pen_loss = gradient_penalty * 0.5 * self.grad_pen_weight\n\n total_loss = total_loss + disc_grad_pen_loss\n\n total_loss.backward()\n self.disc_optimizer.step()\n\n \"\"\"\n Save some statistics for eval\n \"\"\"\n if self.rewardf_eval_statistics is None:\n \"\"\"\n Eval should set this to None.\n This way, these statistics are only computed for one batch.\n \"\"\"\n self.rewardf_eval_statistics = OrderedDict()\n \n self.rewardf_eval_statistics['Disc CE Loss'] = np.mean(ptu.get_numpy(disc_ce_loss))\n self.rewardf_eval_statistics['Disc Acc'] = np.mean(ptu.get_numpy(accuracy))\n if self.use_grad_pen:\n self.rewardf_eval_statistics['Grad Pen'] = np.mean(ptu.get_numpy(gradient_penalty))\n self.rewardf_eval_statistics['Grad Pen W'] = np.mean(self.grad_pen_weight)\n \n for key, value in self.rewardf_eval_statistics.items():\n logger.record_tabular(key, value)\n\n\n def _convert_logits_to_reward(self, logits):\n if self.train_objective == 'airl':\n return logits\n elif self.train_objective == 'fairl':\n return torch.exp(logits)*(-logits)\n elif self.train_objective == 'gail':\n return F.softplus(logits, beta=-1)\n elif self.train_objective == 'w1':\n return -logits\n else:\n raise Exception()\n \n\n def cuda(self):\n self.discriminator.cuda()\n self.xy_var = self.xy_var.cuda()\n \n\n def cpu(self):\n self.discriminator.cpu()\n self.xy_var = self.xy_var.cpu()\n\n\ndef _elem_or_tuple_to_variable(elem_or_tuple):\n if isinstance(elem_or_tuple, tuple):\n return tuple(\n _elem_or_tuple_to_variable(e) for e in elem_or_tuple\n )\n return Variable(ptu.from_numpy(elem_or_tuple).float(), requires_grad=False)\n\n\ndef _filter_batch(np_batch):\n for k, v in np_batch.items():\n if v.dtype == np.bool:\n yield k, v.astype(int)\n else:\n yield k, v\n\n\ndef np_to_pytorch_batch(np_batch):\n return {\n k: _elem_or_tuple_to_variable(x)\n for k, x in _filter_batch(np_batch)\n if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries)\n }\n","sub_path":"rlkit/state_matching_algs/fixed_dist_disc_train_alg.py","file_name":"fixed_dist_disc_train_alg.py","file_ext":"py","file_size_in_byte":10002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"551330769","text":"#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\n# This config is a modification of the https://github.com/qtile/qtile-examples/tree/master/oboingo example.\n# The main change is that it does not use multi-headed setups predefined for certain host names.\n# It uses xrandr instead, to detect the number of currently connected displays, and displaces 8 groups (workspaces)\n# on up to 4 screens. It would be easy to add more if needed.\n# Other adjustments (group names, key bindings) in majority come from default ArchLabs configs, which I'm used to.\n# I also added the ArchLabs rofr.sh script. It's a copy w/ the logout command adjusted to qtile.\n# DO NOT FORGET to edit autostart.sh to your needs / installed packages.\n# DO NOT DELETE the __init__.py file.\n\n# Original copyrights:\n\n# Copyright (c) 2010 Aldo Cortesi\n# Copyright (c) 2010, 2014 dequis\n# Copyright (c) 2012 Randall Ma\n# Copyright (c) 2012-2014 Tycho Andersen\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2013 horsik\n# Copyright (c) 2013 Tao Sauvage\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport subprocess\n\nfrom libqtile import layout, hook\n\nfrom keys import keys # NOQA\nfrom groups import groups # NOQA\nfrom screens import screens # NOQA\n\n\n# Setup layouts\nlayouts = [\n layout.Max(),\n layout.Columns(),\n layout.Floating(),\n layout.VerticalTile(),\n layout.Stack(num_stacks=2)\n]\n\nwidget_defaults = dict(\n font='sans',\n fontsize=14,\n padding=3,\n)\nextension_defaults = widget_defaults.copy()\n\ndgroups_key_binder = None\ndgroups_app_rules = []\nmain = None\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(float_rules=[\n {'wmclass': 'confirm'},\n {'wmclass': 'dialog'},\n {'wmclass': 'download'},\n {'wmclass': 'error'},\n {'wmclass': 'file_progress'},\n {'wmclass': 'notification'},\n {'wmclass': 'splash'},\n {'wmclass': 'toolbar'},\n {'wmclass': 'confirmreset'}, # gitk\n {'wmclass': 'makebranch'}, # gitk\n {'wmclass': 'maketag'}, # gitk\n {'wname': 'branchdialog'}, # gitk\n {'wname': 'pinentry'}, # GPG key password entry\n {'wmclass': 'ssh-askpass'}, # ssh-askpass\n])\nauto_fullscreen = False\nfocus_on_window_activation = \"smart\"\n\n# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this\n# string besides java UI toolkits; you can see several discussions on the\n# mailing lists, github issues, and other WM documentation that suggest setting\n# this string if your java app doesn't work correctly. We may as well just lie\n# and say that we're a working one by default.\n#\n# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in\n# java that happens to be on java's whitelist.\nwmname = \"LG3D\"\n\n# Do things on startup\n@hook.subscribe.startup_once\ndef autostart():\n home = os.path.expanduser('~/.config/qtile/autostart.sh')\n subprocess.call([home])\n","sub_path":"myqtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"651021364","text":"import discord, random, time, datetime, asyncio\nfrom discord.ext import commands\nfrom PIL import Image, ImageDraw, ImageFont\nimport textwrap\nimport aiohttp\nimport rethinkdb as r\nimport os\nfrom prettytable import PrettyTable\nimport gettext\nfrom io import BytesIO\n\nlist_ = [\n \"Shiro\",\n \"Kafuu Chino\",\n \"Toujou Koneko\",\n \"Aihara Enju\",\n \"Yoshino\",\n \"Takanashi Rikka\",\n \"Tsutsukakushi Tsukiko\",\n \"Aisaka Taiga\",\n \"Oshino Shinobu\",\n \"Hasegawa Kobato\",\n \"Hibiki\",\n \"Terminus Est\",\n \"Tachibana Kanade\",\n \"Noel\",\n \"Itsuka Kotori\",\n \"Illyasviel Von Einzbern\",\n \"Sprout Tina\",\n \"Yazawa Nico\",\n \"Izumi Konata\",\n \"Konjiki No Yami\",\n \"Shana\",\n \"Gokou Ruri\",\n \"Sigtuna Yurie\",\n \"Shimakaze\",\n \"Yuuki Mikan\",\n \"Victorique De Blois\",\n \"Kanzaki Aria\",\n \"Cirno\",\n \"Wendy Marvell\",\n \"Nakano Azusa\",\n \"Akatsuki\",\n \"Yaya\",\n \"Yukihira Furano\",\n \"Uni\",\n \"Akatsuki\",\n \"Nyaruko\",\n \"Azuki Azusa\",\n \"Hachikuji Mayoi\",\n \"Amatsukaze\",\n \"Flandre Scarlet\",\n \"Hiiragi Kagami\",\n \"Tatsumaki\",\n \"Kaname Madoka\",\n \"Sakura Kyouko\",\n \"Fear Kubrick\",\n \"Sengoku Nadeko\",\n \"Kirima Sharo\",\n \"Noumi Kudryavka\",\n \"Kanna\",\n \"chifuyu_himeki\",\n \"holo\",\n \"dva\",\n \"megumin\"\n] # \"Halloween NekoBot\" # Special Halloween Card hahayes\n # \"Louise Francoise Le Blanc De La Valliere\",\n # \"Hoshimiya Kate\",\n\nclass CardGame:\n \"\"\"Loli Card Gamelol xDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.lang = {}\n # self.languages = [\"french\", \"polish\", \"spanish\", \"tsundere\", \"weeb\"]\n self.languages = [\"tsundere\", \"weeb\", \"chinese\"]\n for x in self.languages:\n self.lang[x] = gettext.translation(\"cardgame\", localedir=\"locale\", languages=[x])\n\n async def _get_text(self, ctx):\n lang = await self.bot.get_language(ctx)\n if lang:\n if lang in self.languages:\n return self.lang[lang].gettext\n else:\n return gettext.gettext\n else:\n return gettext.gettext\n\n async def __post_to_hook(self, action:str, user:discord.Member, amount):\n try:\n async with aiohttp.ClientSession() as cs:\n await cs.post(\"http://localhost:1241\", json={\n \"user\": str(user.id),\n \"action\": action,\n \"amount\": amount,\n \"time\": str(int(time.time()))\n })\n except:\n pass\n\n async def __has_account(self, user:int):\n if await r.table(\"cardgame\").get(str(user)).run(self.bot.r_conn):\n return True\n else:\n return False\n\n async def __create_account(self, user:int):\n data = {\n \"id\": str(user),\n \"lastdaily\": \"0\",\n \"cards\": []\n }\n await r.table(\"cardgame\").insert(data).run(self.bot.r_conn)\n\n async def __check_for_user(self, user:int):\n if not await self.__has_account(user):\n await self.__create_account(user)\n\n @commands.group()\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def card(self, ctx: commands.Context):\n \"\"\"Loli Card Game OwO\"\"\"\n _ = await self._get_text(ctx)\n\n await self.__check_for_user(ctx.author.id)\n\n if ctx.invoked_subcommand is None:\n return await ctx.send(_(\"A loli roleplaying card game ofc!\\n\\n\"\n \"**Commands**\\n\"\n \"**n!card daily** - Get your daily cards\\n\"\n \"**n!card display** - Display a card of yours\\n\"\n \"**n!card list** - Lists your cards\\n\"\n \"**n!card sell** - Sell a card\\n\"\n \"**n!card transfer** - Transfer Cards\"))\n\n @card.command(name=\"transfer\")\n async def card_transfer(self, ctx, card_number, user:discord.Member):\n \"\"\"Transfer cards to other users\"\"\"\n _ = await self._get_text(ctx)\n\n if user == ctx.author:\n return await ctx.send(_(\"You can't send yourself cards\"))\n elif user.bot:\n return await ctx.send(_(\"You can't send bots cards.\"))\n\n try:\n card_number = int(card_number)\n except:\n return await ctx.send(_(\"Not a valid number\"))\n\n if card_number > 6 or card_number <= 0:\n return await ctx.send(_(\"Not a valid card number.\"))\n\n await self.__check_for_user(ctx.author.id)\n await self.__check_for_user(user.id)\n\n author_data = await r.table(\"cardgame\").get(str(ctx.author.id)).run(self.bot.r_conn)\n author_cards = author_data[\"cards\"]\n user_data = await r.table(\"cardgame\").get(str(user.id)).run(self.bot.r_conn)\n user_cards = user_data[\"cards\"]\n\n if len(user_cards) >= 6:\n return await ctx.send(_(\"%s has no slots left\") % user.mention)\n\n try:\n card = author_cards[card_number-1]\n except:\n return await ctx.send(_(\"Not a valid card.\"))\n\n user_cards.append(card)\n\n newdata = {\n \"cards\": user_cards\n }\n\n await r.table(\"cardgame\").get(str(ctx.author.id)).update({\"cards\": r.row[\"cards\"].delete_at(card_number-1)}).run(self.bot.r_conn)\n await r.table(\"cardgame\").get(str(user.id)).update(newdata).run(self.bot.r_conn)\n await ctx.send(_(\"Transferred card to %s!\") % user.mention)\n\n # @card.command(name='fight', aliases=['battle'])\n # async def card_battle(self, ctx, user: discord.Member):\n # \"\"\"Fight a user OwO\"\"\"\n # lang = await self.bot.redis.get(f\"{ctx.author.id}-lang\")\n # if lang:\n # lang = lang.decode('utf8')\n # else:\n # lang = \"english\"\n # author = ctx.author\n #\n # await self.__check_for_user(ctx.author.id)\n #\n # author_data = await r.table(\"cardgame\").get(str(author.id)).run(self.bot.r_conn)\n # if len(author_data[\"cards\"]) == 0:\n # return await ctx.send(\"%s you don't have any cards.\" % author.mention)\n # user_data = await r.table(\"cardgame\").get(str(user.id)).run(self.bot.r_conn)\n # if len(user_data[\"cards\"]) == 0:\n # return await ctx.send(\"%s has no cards.\" % user.mention)\n #\n # await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"confirm\"].format(user, author))\n #\n # def check_user(m):\n # return m.author == user and m.channel == ctx.message.channel\n #\n # def check_author(m):\n # return m.author == author and m.channel == ctx.message.channel\n #\n # try:\n # msg = await self.bot.wait_for('message', check=check_user, timeout=15.0)\n # except asyncio.TimeoutError:\n # await ctx.send(embed=discord.Embed(color=0xff5630,\n # description=getlang(lang)[\"cardgame\"][\"battle\"][\"cancelled\"]))\n # return\n #\n # if msg.content.lower() == \"yes\":\n # await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"author_select\"].format(author))\n # try:\n # msg = await self.bot.wait_for('message', check=check_author, timeout=15.0)\n # except asyncio.TimeoutError:\n # return await ctx.send(embed=discord.Embed(color=0xff5630,\n # description=getlang(lang)[\"cardgame\"][\"battle\"][\"cancelled\"]))\n # try:\n # msgcontent = int(msg.content)\n # except:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid\"])\n # if msgcontent <= 0:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid\"])\n # elif msgcontent > 6:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid\"])\n #\n # try:\n # author_card = author_data[\"cards\"][msgcontent]\n # except:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid_slot\"].format(author))\n #\n # else:\n # await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"author_select\"].format(user))\n # try:\n # msg = await self.bot.wait_for('message', check=check_user, timeout=15.0)\n # except asyncio.TimeoutError:\n # return await ctx.send(embed=discord.Embed(color=0xff5630, description=getlang(lang)[\"cardgame\"][\"battle\"][\"cancelled\"]))\n # try:\n # msgcontent = int(msg.content)\n # except:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid\"])\n # if msgcontent <= 0:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid\"])\n # elif msgcontent > 6:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid\"])\n #\n # try:\n # user_card = user_data[\"cards\"][msgcontent]\n # except:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"invalid_slot\"].format(author))\n #\n # author_card_name = author_card[\"name\"]\n # author_card_attack = author_card[\"attack\"]\n # author_card_defense = author_card[\"defense\"]\n #\n # user_card_name = user_card[\"name\"]\n # user_card_attack = user_card[\"attack\"]\n # user_card_defense = user_card[\"defense\"]\n # msg = await ctx.send(\n # embed=discord.Embed(color=0xDEADBF, title=f\"{author_card_name} ({author.name}) |\\n\"\n # f\" {user_card_name} ({user.name})\",\n # description=f\"**{author.name}** vs **{user.name}**\"))\n # await asyncio.sleep(random.randint(3, 6))\n # if (int(author_card_attack) + int(author_card_defense)) > (\n # int(user_card_attack) + int(user_card_defense)):\n # await msg.edit(\n # embed=discord.Embed(color=0xDEADBF, title=f\"{author_card_name} ({author.name}) |\\n\"\n # f\" {user_card_name} ({user.name})\",\n # description=f\"**{author.name}** vs **{user.name}**\\n\"\n # f\"**{author.name}** Beat **{user.name}**\"))\n # elif (int(author_card_attack) + int(author_card_defense)) < (\n # int(user_card_attack) + int(user_card_defense)):\n # await msg.edit(\n # embed=discord.Embed(color=0xDEADBF, title=f\"{author_card_name} ({author.name}) |\\n\"\n # f\" {user_card_name} ({user.name})\",\n # description=f\"**{author.name}** vs **{user.name}**\\n\"\n # f\"**{user.name}** Beat **{author.name}**\"))\n # else:\n # return await ctx.send(getlang(lang)[\"cardgame\"][\"battle\"][\"cancelled\"])\n\n @card.command(name='daily')\n async def card_daily(self, ctx):\n \"\"\"Get your card daily\"\"\"\n _ = await self._get_text(ctx)\n\n await self.__check_for_user(ctx.author.id)\n\n data = await r.table(\"cardgame\").get(str(ctx.author.id)).run(self.bot.r_conn)\n lastdaily = int(data[\"lastdaily\"])\n cards = data[\"cards\"]\n\n lastdaily = datetime.datetime.utcfromtimestamp(lastdaily).strftime(\"%d\")\n today = datetime.datetime.utcfromtimestamp(time.time()).strftime(\"%d\")\n\n author = ctx.message.author\n\n if today == lastdaily:\n tommorow = datetime.datetime.now() + datetime.timedelta(1)\n midnight = datetime.datetime(year=tommorow.year, month=tommorow.month,\n day=tommorow.day, hour=0, minute=0, second=0)\n m, s = divmod((midnight - datetime.datetime.now()).seconds, 60)\n h, m = divmod(m, 60)\n return await ctx.send(_(\"Wait another %sh %sm before using daily again...\") % (h, m,))\n\n if len(cards) >= 6:\n return await ctx.send(_(\"All of your slots are full ;w;\"))\n\n character_loli = str(random.choice(list_)).lower().replace(' ', '_')\n\n cards.append({\n \"name\": character_loli,\n \"attack\": random.randint(1, 50),\n \"defense\": random.randint(1, 50)\n })\n\n newdata = {\n \"lastdaily\": str(int(time.time())),\n \"cards\": cards\n }\n\n await r.table(\"cardgame\").get(str(author.id)).update(newdata).run(self.bot.r_conn)\n await ctx.send(_(\"Given character **%s!**\") % character_loli.replace('_', ' ').title())\n\n def _generate_card(self, character: str, num: int, attack: int, defense: int):\n card_name = f\"data/{character}.jpg\"\n img = Image.open('data/card.jpg')\n _character = Image.open(card_name).resize((314, 313))\n\n draw = ImageDraw.Draw(img)\n title_font = ImageFont.truetype(\"data/fonts/card.ttf\", 40)\n lower_font = ImageFont.truetype(\"data/fonts/card.ttf\", 20)\n desc_font = ImageFont.truetype(\"data/fonts/card.ttf\", 16)\n\n img.paste(_character, (52, 114))\n\n if character == 'kanna':\n description = \"Be sure to keep this loli charged. Very thicc thighs.\"\n elif character == 'yaya':\n description = \"She'll be your puppet if you promise to marry her.\"\n elif character == 'yoshino':\n description = \"She must be a happy loli. Word of the wise never have her lose Yoshinon.\"\n elif character == 'toujou_koneko':\n description = \"A Neko Loli who will not kindly treat perverted actions.\"\n elif character == 'terminus_est':\n description = \"A sword who can transform into a loli. For some reason is just fine wearing only knee socks but not being fully naked.\"\n elif character == 'azuki_azusa':\n description = \"A hard working loli who pretends to be rich. Likes animals and works a lot of jobs to afford the act.\"\n elif character == 'itsuka_kotori':\n description = \"A bipolar loli. The color of the ribbon determines her personally as weak for white and strong for black.\"\n elif character == 'tachibana_kanade':\n description = \"An \\\"Angel\\\" who develops her own body to defend.\"\n elif character == 'nyaruko':\n description = \"An obessive otaku loli who will kill anyone that dares attempt to harm what she loves. \"\n elif character == 'cirno':\n description = \"A ice fairy who never backs down from a challenge. She is very weak in respect to others but won't stop trying.\"\n elif character == 'flandre_scarlet':\n description = \"She respects her sister so much that she never leaves the mansion due to her orders. Is nice, quiet, and a tad nuts. \"\n elif character == 'shiro':\n description = \"Genius gamer who is excellent at both strategy and in first person shooters. She will quickly master languages.\"\n elif character == 'aihara_enju':\n description = \"A rabbit type girl who will protect her friends. Can get jealous even to friends and tries to marry her partner at every chance.\"\n elif character == 'takanashi_rikka':\n description = \"A loli suffering from \\\"8th grade syndrome\\\" who believes she has the power of the tyrants's eye an will always walk around with an umbrella.\"\n elif character == 'tsutsukakushi_tsukiko':\n description = \"A gluttonous loli who will eat numerous snacks and cannot show emotion. Thinks of herself as childish.\"\n elif character == 'aisaka_taiga':\n description = \"Kind to those she trusts while aggressive to others. She hates he height pointed out or being called the palm top tiger.\"\n elif character == 'hasegawa_kobato':\n description = \"A very shy loli who enjoys cosplaying. She is almost always dressed up in a cosplay of her favorite gothic vampire.\"\n elif character == 'sprout_tina':\n description = \"A noctural loli. She will be sleepy during the day; however, when night falls she becomes an excellent sniper Who follows every order.\"\n elif character == 'konjiki_no_yami':\n description = \"Attacks those that talk about something she doesn't like and hates perverted people.\"\n elif character == 'yukihira_furano':\n description = \"A quiet girl that will insert sexual or vulgar words or phrases into sentences. Is also a part of the \\\"Reject Five\\\"\"\n elif character == 'tatsumaki':\n description = \"Arrogant and overconfident. She considers her job as a duty and also can get bored while not fighting monsters.\"\n elif character == 'victorique_de_blois':\n description = \"Bored by a normal life so she wants cases or other things to entertain her. She dislikes most strangers. She is also very intelligent.\"\n elif character == \"holo\":\n description = \"\"\n elif character == \"dva\":\n description = \"\"\n elif character == \"hibiki\":\n description = \"Qtiest qt of all qts\"\n else:\n description = \"\"\n\n draw.text((37, 23), character.replace('_', ' '), (0, 0, 0), title_font)\n draw.text((255, 550), str(attack), (0, 0, 0), lower_font)\n draw.text((344, 550), str(defense), (0, 0, 0), lower_font)\n draw.text((40, 477), textwrap.fill(description, 37), (0, 0, 0), font=desc_font)\n\n img.save(f\"data/cards/{num}.png\") # there is a thing called BytesIO oldme smh todo\n\n @card.command(name='sell')\n async def card_sell(self, ctx, num: int):\n \"\"\"Sell a card\"\"\"\n _ = await self._get_text(ctx)\n\n await self.__check_for_user(ctx.author.id)\n if num > 6 or num < 1:\n return await ctx.send(_(\"**Out of card range.**\"))\n\n author = ctx.author\n data = await r.table(\"cardgame\").get(str(author.id)).run(self.bot.r_conn)\n cards = data[\"cards\"]\n\n if not await r.table(\"economy\").get(str(author.id)).run(self.bot.r_conn):\n return await ctx.send(_(\"❌ | **You don't have a bank account to sell your cards, make one with `n!register`**\"))\n\n try:\n card = cards[num-1]\n except:\n return await ctx.send(_(\"No cards in this slot...\"))\n\n cardname = card[\"name\"]\n cardname_en = str(cardname).replace('_', ' ').title()\n attack = card[\"attack\"]\n defense = card[\"defense\"]\n\n cardprice = int(random.randint(10000, 15000) + (((attack * .25) + (defense * .25)) * 1000))\n\n await ctx.send(_(\"%s, type `yes` to sell **%s** for %s\") % (author.mention, cardname_en, cardprice))\n\n def check(m):\n return m.channel == ctx.message.channel and m.author == author\n\n try:\n x = await self.bot.wait_for('message', check=check, timeout=15.0)\n if not str(x.content).lower() == \"yes\":\n return await ctx.send(_(\"❌ | **Cancelled Transaction.**\"))\n except asyncio.TimeoutError:\n await ctx.send(_(\"❌ | **Cancelled Transaction.**\"))\n return\n\n after_check = await r.table(\"cardgame\").get(str(author.id)).run(self.bot.r_conn)\n if after_check != data:\n await self.__post_to_hook(\"Card Sell Fail 😤😤\", author, 0)\n return await ctx.send(_(\"Card has already been sold\"))\n\n await r.table(\"cardgame\").get(str(author.id)).update({\"cards\": r.row[\"cards\"].delete_at(num-1)}).run(self.bot.r_conn)\n economy = await r.table(\"economy\").get(str(author.id)).run(self.bot.r_conn)\n await r.table(\"economy\").get(str(author.id)).update({\"balance\": economy[\"balance\"] + cardprice}).run(self.bot.r_conn)\n\n await ctx.send(_(\"Sold %s for %s\") % (cardname_en, cardprice))\n await self.__post_to_hook(\"Sold card\", author, cardprice)\n\n @card.command(name='list')\n async def card_list(self, ctx):\n \"\"\"List your cards\"\"\"\n await self.__check_for_user(ctx.author.id)\n author = ctx.message.author\n\n data = await r.table(\"cardgame\").get(str(author.id)).run(self.bot.r_conn)\n cards = data[\"cards\"]\n\n table = PrettyTable()\n table.field_names = [\"Number\", \"Card\", \"Attack\", \"Defense\"]\n\n cardnum = 0\n displaynum = 1\n for x in range(6):\n try:\n card = cards[cardnum]\n table.add_row([displaynum, card[\"name\"].replace(\"_\", \" \").title(), card[\"attack\"], card[\"defense\"]])\n except:\n table.add_row([displaynum, \"Empty\", \"0\", \"0\"])\n\n cardnum += 1\n displaynum += 1\n\n await ctx.send(\"```\\n%s\\n```\" % table)\n\n @card.command(name=\"display\", aliases=[\"show\"])\n async def card_display(self, ctx, num: int):\n \"\"\"Display your card(s)\"\"\"\n await ctx.trigger_typing()\n _ = await self._get_text(ctx)\n await self.__check_for_user(ctx.author.id)\n if num > 6 or num < 1:\n return await ctx.send(_(\"**Out of card range.**\"))\n\n data = await r.table(\"cardgame\").get(str(ctx.author.id)).run(self.bot.r_conn)\n cards = data[\"cards\"]\n\n try:\n card = cards[num-1]\n except:\n return await ctx.send(_(\"Empty Slot...\"))\n\n num = ctx.author.id\n\n character_name = card[\"name\"]\n character_name_en = str(character_name).replace('_', ' ').title()\n attack = card[\"attack\"]\n defense = card[\"defense\"]\n self._generate_card(character_name, num, attack, defense)\n\n embed = discord.Embed(color=0xDEADBF, title=character_name_en)\n embed.add_field(name=_(\"Attack\"), value=str(attack))\n embed.add_field(name=_(\"Defense\"), value=str(defense))\n\n await ctx.send(file=discord.File(f'data/cards/{num}.png'), embed=embed.set_image(url=f'attachment://{num}.png'))\n os.remove(\"data/cards/%s.png\" % num) # smh\n\n @card.command(name='generate', hidden=True)\n @commands.is_owner()\n async def card_gen(self, ctx, character: str = \"shiro\", attack: int = 1, defense: int = 1):\n card_name = f\"data/{character}.jpg\"\n img = Image.open('data/card.jpg')\n _character = Image.open(card_name).resize((314, 313))\n\n draw = ImageDraw.Draw(img)\n title_font = ImageFont.truetype(\"data/fonts/card.ttf\", 40)\n lower_font = ImageFont.truetype(\"data/fonts/card.ttf\", 20)\n\n img.paste(_character, (52, 114))\n\n draw.text((37, 23), character.replace('_', ' '), (0, 0, 0), title_font)\n draw.text((255, 550), str(attack), (0, 0, 0), lower_font)\n draw.text((344, 550), str(defense), (0, 0, 0), lower_font)\n\n temp = BytesIO()\n img.save(temp, format=\"png\")\n temp.seek(0)\n await ctx.send(file=discord.File(fp=temp, filename=\"generated.png\"),\n embed=discord.Embed(color=0xDEADBF).set_image(url=f'attachment://generated.png'))\n\n @card.command(name='forcegive', hidden=True)\n @commands.is_owner()\n async def forcegive(self, ctx, user:discord.Member=None):\n if user is None:\n user = ctx.author\n character_loli = str(random.choice(list_)).lower().replace(' ', '_')\n data = await r.table(\"cardgame\").get(str(user.id)).run(self.bot.r_conn)\n cards = data[\"cards\"]\n cards.append({\n \"name\": character_loli,\n \"attack\": random.randint(1, 50),\n \"defense\": random.randint(1, 50)\n })\n await r.table(\"cardgame\").get(str(ctx.author.id)).update({\"cards\": cards}).run(self.bot.r_conn)\n await ctx.send(\"Gave %s\" % character_loli)\n\ndef setup(bot):\n bot.add_cog(CardGame(bot))","sub_path":"modules/cardgame.py","file_name":"cardgame.py","file_ext":"py","file_size_in_byte":24119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"608210927","text":"from functools import reduce\nfrom operator import mul\n\n\ndef cmb(n, r):\n if n < r:\n return 0\n\n r = min(n-r, r)\n if r == 0:\n return 1\n over = reduce(mul, range(n, n - r, -1))\n under = reduce(mul, range(1, r + 1))\n return over // under\n\n\ndef solve(n):\n # 12 17 0 2 7\n p = [set() for _ in range(27)]\n for i in range(n):\n s = input()\n p[ord(s[0])-65].add(s)\n\n sum_of_elems = len(p[12])+len(p[17])+len(p[0])+len(p[2])+len(p[7])\n all_cmb = cmb(sum_of_elems, 3)\n\n for j in [12, 17, 0, 2, 7]:\n count_of_elem = len(p[j])\n all_cmb -= cmb(count_of_elem, 2) * (sum_of_elems - count_of_elem)\n all_cmb -= cmb(count_of_elem, 3)\n\n return all_cmb\n\n\nif __name__ == '__main__':\n n = int(input())\n print(solve(n))\n","sub_path":"Python/March.py","file_name":"March.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"366423797","text":"from chalicelib.constants import *\nimport pandas as pd\nimport numpy as np\n\ndef append_data(df1, df2):\n ''' Appends the two dataframes given and returns the appended df.\n '''\n df_new = pd.concat([df1, df2], axis=0, ignore_index=True)\n return df_new\n\ndef basic(df):\n '''\n This function makes the column names lowercase, drops missing values from student and makes \n student an integer type.\n '''\n # make column names lowercase\n df.columns = df.columns.str.lower().str.strip()\n \n if \"student\" in df.columns:\n # drop NAs from student\n df = df.dropna(subset=['student'])\n # change data type of student\n df.student.astype(int, copy=False, errors='ignore')\n return df\n\ndef clean_columns(colnames,alternateColNames = False):\n '''\n Function to clean column names and map them to human-readable terms\n '''\n # Will convert column names to lower, get rid of pound symbol\n colParsed = pd.Series(colnames).str.lower().str.strip().str.replace('#','')\n # Replace with human readable termns\n if alternateColNames:\n mappedCols = colParsed.replace(COLUMN_NAME_MAP_2)\n else:\n mappedCols = colParsed.replace(COLUMN_NAME_MAP)\n return mappedCols\n\ndef col_missing_vals(df, columnName):\n ''' Generate a binary column that indicates whether the given column has a missing value.\n Provide the dataframe and the column of interest. The output will be the dataframe with one \n additional column named {columnName}_miss that has a 1 for missing values in columnName\n and a 0 for a non-missing value in columnName.\n '''\n replace_map = {1:0, 2:0, 3:0, 0:0, np.NaN:1} # set the replacement schema\n newcol = columnName + \"_miss\" # generate the new column name\n df[newcol] = df[columnName].replace(replace_map) # make the new column\n return df\n\ndef convert_columns(data,beh_map,sim_map):\n '''\n convert categorical column to numeric \n data would be dataframe, beh_map is the map that changing the categorical respond in behaviour columns to numeric \n sim_map is the map that changing the categorical respond in sim columns to numeric \n '''\n data.loc[:,'beh_fidgeting':'beh_mood_changes']=data.loc[:,'beh_fidgeting':'beh_mood_changes'].replace(beh_map)\n data.loc[:,'sim_nervous' :'sim_enough_time']=data.loc[:,'sim_nervous' :'sim_enough_time'].replace(sim_map)\n\ndef convert_numeric(data):\n '''\n convert string columns to float for later scalling use\n '''\n data.loc[:,'app_coach_stu':'app_beh_manage_teach']=data.loc[:,'app_coach_stu':'app_beh_manage_teach'].astype('float')\n if 'sim_fbsk' in data.columns:\n data[['sim_fbsk']]=data[['sim_fbsk']].astype('float')\n if 'sim_cmsk' in data.columns:\n data[['sim_cmsk']]=data[['sim_cmsk']].astype('float')\n\ndef dates(df, colname):\n ''' Makes the given column a string type with format mmddyyyy for easier querying.\n '''\n # make column the format mmddyyyy\n df[colname] = pd.to_datetime(df[colname], errors='ignore')\n df[colname] = df[colname].apply(lambda x: x.strftime('%m%d%Y'))\n return df\n\ndef destring(df, included=None, first=None, last=None):\n ''' Turns the data type of all columns provided to float. Provide the dataframe and either\n a list of specified columns as included or the first and last column name where you also\n want the function to apply to all columns in between.\n '''\n if included == None: # turn first and last into a list\n included = list(df.loc[:,first:last].columns)\n for col in included: # loop through all columns specified\n df[col] = df[col].astype(float) # change the datatype\n return df\n\ndef drop_cols_by_name(df,start,end):\n '''\n Function to drop a slice of columns by name\n '''\n # Get column names\n columns = df.columns.values\n # Get the start and end index of provided columns\n startIndex = np.where(columns==start)[0][0]\n endIndex = np.where(columns==end)[0][0] + 1\n # Drop this slice of columns\n df.drop(df.columns[startIndex:endIndex], axis=1, inplace=True)\n\ndef drop_duplicate(data):\n '''\n function to drop duplicate observation\n data would be dataframe that need to check the duplicate\n '''\n #convert the startdate to datestamp()\n data['startdate']=pd.to_datetime(data.startdate)\n duplicated=data[data.duplicated(['email'],keep=False)]\n duplicated_email=pd.unique(duplicated['email'])\n column_n=len(data.columns)\n #for each duplicated email\n for email in duplicated_email:\n #find the observation contains duplicated email\n rows=duplicated.loc[duplicated['email'] == email]\n #count the na in the observation\n count_NA=rows.isnull().sum(axis=1).tolist()\n min_NA=min(count_NA)\n #if all duplicated observations have all information\n if sum(count_NA)==0:\n #drop the observation other the earliest start date\n drops=rows.loc[rows.startdate!=min(rows['startdate'])]\n email=drops['email']\n startdate=drops['startdate']\n i=data[((data.email == email) &( data.startdate == startdate))].index\n data=data.drop(i)\n #drop the observation with less missing value\n else:\n index=count_NA.index(min_NA)\n for i in range(len(count_NA)):\n if i != index:\n a=rows.iloc[i,]\n email=a.email\n startdate=a.startdate\n i=data[((data.email == email) &( data.startdate == startdate))].index\n data=data.drop(i)\n return data\n\ndef fix_email_add(string):\n '''\n helper function that fixed the email address to correct format\n '''\n result=''\n #search if the email contains '@'\n index=string.find(\"@\")\n #if the email do not contain '@'\n #the user only enter computing id\n if index==-1:\n #put @virginia.edu after the computing id\n result=string+'@virginia.edu'\n #changing all the email address to format of computing id@virginia.edu\n #this step is to fix the if has a typo in virginia.edu or entered gmail.com instead\n else:\n result=string[:index]+'@virginia.edu'\n return result \n\ndef format_email(data):\n '''\n function to format all the email address, changing all the input email address to computingid@virginia.edu\n this function is avoid if someone just input there computing id or misspelling the virginia or input the gmail.com instead.\n '''\n data.email =data.email.str.strip()\n data.email =data.email.str.lower()\n email_fix = data.loc[~data[\"email\"].str.contains(\"virginia\")]\n email_fix['email']=email_fix['email'].apply(lambda x:fix_email_add(x))\n for i in email_fix.index:\n data.at[i,\"email\"]=email_fix.at[i,'email'] \n\n\ndef generate_calculated_columns(df):\n '''\n Function to generate calculated columns\n '''\n # For each calculated column...\n for name,cols in CALCULATED_COLUMNS:\n # Divide first column name by second\n if len(cols)==1:\n df[name]=df[cols[0]]\n else:\n df[name]=df[cols[0]] / df[cols[1]]\n\ndef generate_app_scale(data):\n '''\n function to generate positibe and negative app scale\n '''\n data['manage_app_negative']= data.loc[:,'app_coach_stu':'app_discp_refer'].mean(axis=1)\n data['manage_app_positive']=data.loc[:,'app_confer_stu':'app_beh_manage_teach'].mean(axis=1)\n \n\n\ndef isnumber(x):\n try:\n float(x)\n return True\n except:\n return False\n\n\ndef generate_behavior_columns(df):\n '''\n Function to generate Behavior Column totals\n '''\n # For each set of behavior columns...\n for name,cols in BEHAVIOR_COLUMNS:\n # Edge case: have two columns we want generated for this\n if name == 'total_nb_se':\n # Sum the # of 1's\n df['tot_nb'] = (df[cols]==1).sum(axis=1)\n # Sum the # of 2's\n df['tot_se'] = (df[cols]==2).sum(axis=1)\n else:\n # Sum the set of behavior columns row-wise\n cols = df[cols]\n df[name]=cols[cols.applymap(isnumber)].fillna(0).astype('int64').sum(axis=1)\n \ndef generate_duplicate_column(df):\n '''\n Function to generate double_code column\n '''\n # get vidCount\n vidCount = df.groupby('vid').size().reset_index()\n vidCount['duplicate']= np.where(df.groupby('vid').size().reset_index()[0] > 1, 1,0)\n # Check if duplicate\n df['double_code'] = df['vid'].replace({row['vid']:row['duplicate'] for index,row in vidCount.iterrows()})\n \ndef generate_iowa_score_scale(data):\n '''\n function to generate Iowa score\n '''\n data['beh_rating_opdefiant']=data.loc[:,'beh_quarrelsome': 'beh_uncooperative'].mean(axis=1)\n data['beh_rating_impulsive']=data.loc[:,'beh_fidgeting':'beh_short_attention'] .mean(axis=1)\n data['beh_rating'] =data.loc[:,'beh_fidgeting':'beh_mood_changes'].mean(axis=1)\n #label var beh_rating_opdefiant \"Iowa Connors Operational Defiant\"\n #label var beh_rating_impulsive \"Iowa Connors Impulsive\"\n #label var beh_rating \"Iowa Connors Overall\"\n \n\ndef generate_score_variables(df):\n '''\n Function to convert factor variables to encoding\n '''\n # Iterate through columns\n for column in df.columns:\n # If column a factor\n if column in COLUMN_FACTOR_MAP:\n # Replace it with the encoding\n df[column] = df[column].map(FACTORS[COLUMN_FACTOR_MAP[column]]).astype('Int64')\n \ndef lower_skip(df):\n ''' Makes the column names lowercase and deletes the first row of the dataframe.\n '''\n # clean column names\n df.columns = df.columns.str.lower().str.strip()\n # delete the first row\n df = df.iloc[1:,:]\n return df\n\ndef new_col_mean(df, name=\"RowMean\", included=None, first=None, last=None):\n '''\n Provide the dataframe and the name of the new column you want created. Then either pass a \n list of columns to include in the calculation as included or the first and last column if\n all columns in between should also be included.\n '''\n if included == None: # turn first and last into a list\n df[name] = df.loc[:,first:last].mean(axis=1)\n else: # generate the new column\n df[name] = df[included].mean(axis=1)\n \n return df\n\ndef recode(df, included):\n '''Specifically used in File 1 to swap 1s and 5s and swap 4s and 2s in \n a new column that has r_ prior to the original column's name. Provide the dataframe\n and a list of the columns for which this should be executed. Note that the original\n column will be deleted after its corresponding new column is added.\n '''\n replace_map = {1:5, 2:4, 4:2, 5:1} # the replacement to take place\n for var in included: # loop through all columns in included\n name = \"r_\" + str(var) # generated the new column name\n df[name] = df[var] # set the new column values equal to the old column values\n df[name] = df[name].replace(replace_map) # make the replacement of values\n df = df.drop([var], axis=1) # drop the original column\n return df\n\ndef replace_emails(dataframe, csv):\n ''' Provide a dataframe and a csv name that has the first column of student ids and the second column \n as the corresponding email address. This will replace the current email for that\n student with the one provided or simply add it if they have a missing email.\n '''\n if type(csv)=='string':\n replacements = pd.read_csv(csv)\n else:\n replacements = csv\n for index, row in replacements.iterrows():\n dataframe.loc[dataframe.student==int(row[0]), \"email\"] = row[1]\n return dataframe\n\ndef reverse_approach_scale(data,app_map):\n '''\n Reverse coding management approaches scale \n '''\n app_columns=[x for x in data.columns if x.startswith(\"app\")]\n for x in app_columns:\n name_rc=x+'_rc'\n data[name_rc]=data[x].replace(app_map)\n \n\n","sub_path":"aws/sera-preprocessing/chalicelib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"57673180","text":"from online_lda import OnlineLDA\nfrom sklearn.decomposition import LatentDirichletAllocation\nimport numpy as np\n\n\ndef main():\n X = np.array([[0, 1, 0, 2, 2, 0], [1, 0, 1, 1, 3, 3]])\n\n olda = OnlineLDA(n_topics=2)\n olda.partial_fit(X)\n print(olda.lambda_)\n\n lda = LatentDirichletAllocation(n_topics=2, total_samples=2)\n lda.partial_fit(X)\n print(lda.components_)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/lda/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"589037184","text":"# Tencent is pleased to support the open source community by making ncnn available.\n#\n# Copyright (C) 2023 THL A29 Limited, a Tencent company. All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom packaging import version\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n\n self.w0 = nn.Parameter(torch.rand(12, 15))\n self.w1 = nn.Parameter(torch.rand(12, 15))\n self.w2 = nn.Parameter(torch.rand(12, 15))\n self.w3 = nn.Parameter(torch.rand(12, 15))\n self.w4 = nn.Parameter(torch.rand(12, 15))\n self.w5 = nn.Parameter(torch.rand(12, 15))\n self.c0 = nn.Parameter(torch.ones(1))\n self.c1 = nn.Parameter(torch.ones(3) + 0.2)\n\n def forward(self, x):\n c10, c11, _ = torch.unbind(self.c1)\n x0 = x * 10 + self.c0 - c11\n x = x + self.w0 + x0\n x = x - self.w1 + x0.float()\n x = x * self.w2 + x0\n x = x / self.w3 + x0\n x = x // self.w4 + x0\n if version.parse(torch.__version__) >= version.parse('2.0'):\n x = x % self.w5 + x0\n else:\n x = torch.fmod(x, self.w5) + x0\n y = x.int()\n return x, y & 3, y | 3, y ^ 3, y << 3, y >> 3\n\ndef test():\n net = Model()\n net.eval()\n\n torch.manual_seed(0)\n x = torch.rand(12, 15)\n\n a = net(x)\n\n # export torchscript\n mod = torch.jit.trace(net, x)\n mod.save(\"test_pnnx_expression.pt\")\n\n # torchscript to pnnx\n import os\n os.system(\"../src/pnnx test_pnnx_expression.pt inputshape=[12,15]\")\n\n # pnnx inference\n import test_pnnx_expression_pnnx\n b = test_pnnx_expression_pnnx.test_inference()\n\n for a0, b0 in zip(a, b):\n if not torch.equal(a0, b0):\n return False\n return True\n\nif __name__ == \"__main__\":\n if test():\n exit(0)\n else:\n exit(1)\n","sub_path":"tools/pnnx/tests/test_pnnx_expression.py","file_name":"test_pnnx_expression.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593461663","text":"\"\"\"\n File including World class which\n implementing methods useful for\n handling and building the game world\n\"\"\"\n\nfrom constants.constants import *\nfrom random import random, randrange\n\n\nclass World(object):\n \"\"\" Class representing world in game \"\"\"\n\n def __init__(self, width: int, height: int):\n \"\"\" Initialize the world\n @:param width -> width of the world\n @:param height -> height of the world \"\"\"\n\n assert width > 0, 'World width has to be positive value'\n assert height > 0, 'World height has to be positive value'\n\n self.__width = width\n self.__height = height\n self.__world = [[EMPTY_FIELD_VALUE for i in range(width)].copy() for i in range(height)]\n self.__agent_x = 0\n self.__agent_y = 0\n\n self.generate_world()\n\n def generate_world(self):\n \"\"\" generate random world with the agent, obstacles and target point \"\"\"\n agent_x = randrange(0, self.__width)\n agent_y = randrange(0, self.__height)\n self.__world[agent_y][agent_x] = AGENT_FIELD_VALUE\n\n for i in range(OBSTACLES_COUNT):\n obstacle_x = randrange(0, self.__width)\n obstacle_y = randrange(0, self.__height)\n\n for h in range(-MAX_OBSTACLE_HEIGHT//2, MAX_OBSTACLE_HEIGHT//2):\n for w in range(-MAX_OBSTACLE_WIDTH//2, MAX_OBSTACLE_WIDTH//2):\n if 0 <= obstacle_x + w < self.__width and 0 <= obstacle_y + h < self.__height:\n if random() >= OBSTACLE_PROBABILITY and self.__world[obstacle_y+h][obstacle_x+w] == EMPTY_FIELD_VALUE:\n self.__world[obstacle_y + h][obstacle_x + w] = OBSTACLE_FIELD_VALUE\n\n destination_x = randrange(0, self.__width)\n destination_y = randrange(0, self.__height)\n self.__world[destination_y][destination_x] = DESTINATION_FIELD_VALUE\n\n\n def get_environment_vector(self, x: int, y: int):\n \"\"\" Return vector of values representing environment around the point (x, y)\"\"\"\n assert 0 <= x <= self.__width, 'Bad point coordinates'\n assert 0 <= y <= self.__height, 'Bad point coordinates'\n\n env_vector = [OBSTACLE_FIELD_VALUE for i in range(9)]\n\n if y - 1 >= 0:\n for i in range(-1, 2, 1):\n if 0 <= x + i < self.__width:\n env_vector[i + 1] = self.__world[y - 1][x + i]\n\n for i in range(-1, 2, 1):\n if 0 <= x + i < self.__width:\n env_vector[i + 4] = self.__world[y][x + i]\n\n if y + 1 < self.__height:\n for i in range(-1, 2, 1):\n if 0 <= x + i < self.__width:\n env_vector[i + 7] = self.__world[y + 1][x + i]\n\n return env_vector\n\n def __str__(self):\n \"\"\" define how world should be shown as string \"\"\"\n world_as_string = \"\"\n for world_row in self.__world:\n row = \"\"\n for field in world_row:\n if field == OBSTACLE_FIELD_VALUE:\n row += \"# \"\n elif field == DESTINATION_FIELD_VALUE:\n row += \"$ \"\n elif field == AGENT_FIELD_VALUE:\n row += \"A \"\n else:\n row += \". \"\n world_as_string += row + '\\n'\n return world_as_string\n\n def set_width(self, width: int):\n \"\"\" Set new world width \"\"\"\n assert width > 0, 'World width has to be positive value'\n self.__width = width\n\n def set_height(self, height: int):\n \"\"\" Set new world width \"\"\"\n assert height > 0, 'World height has to be positive value'\n self.__height = height\n\n def set_agent_point(self, point: tuple[int, int]):\n \"\"\" Set new agent point coordinates \"\"\"\n assert 0 <= point[0] < self.__width, 'Agent point has bad coordinates'\n assert 0 <= point[1] < self.__height, 'Agent point has bad coordinates'\n self.__agent_x = point[0]\n self.__agent_y = point[1]\n\n def get_point(self, x: int, y: int):\n \"\"\" Return point from game world \"\"\"\n assert 0 <= x < self.__width, 'Point beyond the world'\n assert 0 <= y < self.__height, 'Point beyond the world'\n return self.__world[y][x]\n\n def get_agent_point(self):\n \"\"\" Return agent point coordinates as a tuple \"\"\"\n return self.__agent_x, self.__agent_y\n\n def get_width(self):\n \"\"\" Return world width \"\"\"\n return self.__width\n\n def get_height(self):\n \"\"\" Return world height \"\"\"\n return self.__height\n\n\nif __name__ == '__main__':\n test_world = World(20, 20)\n print(test_world)\n print(test_world.get_environment_vector(5, 5))\n print(test_world.get_environment_vector(0, 0))\n print(test_world.get_environment_vector(19, 19))\n print(test_world.get_environment_vector(0, 19))\n print(test_world.get_environment_vector(19, 0))\n","sub_path":"world/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197846896","text":"# Hangman game\r\n# Number of guesses and players can be changed\r\n# Each turn a player can guess an entire word or a letter\r\n# after guessing the turn ends and the next player's turn starts\r\n# guessing a letter in the secret word reveals the occurences\r\n# of that letter in the secret word placeholder\r\n# guessing an incorrect word or letter doesn't result in anything\r\n# the first person to guess the entire word (either by typing\r\n# the entire word or plugging in the last letter) wins the game\r\n# if the players run out of word the computer wins and the secret word\r\n# is revealed\r\n\r\nimport random\r\nfrom tkinter import *\r\n\r\nNR_OF_GUESSES = 10\r\nNR_OF_PLAYERS = 1\r\n\r\nclass Hangman:\r\n def __init__(self):\r\n self.__main_window = Tk()\r\n self.__main_window.title(\"Hangman\")\r\n self.__main_window.option_add(\"*Font\", \"Arial 18\")\r\n\r\n # ===============================\r\n # Column 0: Buttons\r\n\r\n self.__guess_button = Button(self.__main_window, text=\"Guess\", command=self.guess)\r\n self.__guess_button.grid(row=1, column=0, sticky=W + E)\r\n\r\n Button(self.__main_window, text=\"New game\", command=self.new_game) \\\r\n .grid(row=2, column=0, sticky=W + E)\r\n Button(self.__main_window, text=\"Quit\", command=self.__main_window.destroy)\\\r\n .grid(row=3, column=0, sticky=W + E)\r\n\r\n # ===============================\r\n # Column 1: placeholder, entry, instructions, guesses left\r\n\r\n self.__placeholder_label = Label(self.__main_window)\r\n self.__placeholder_label.grid(row=0, column=1)\r\n\r\n self.__guess = Entry(self.__main_window)\r\n self.__guess.grid(row=1, column=1, sticky=W + E)\r\n\r\n self.__instructions_label = Label(self.__main_window)\r\n self.__instructions_label.grid(row=2, column=1)\r\n\r\n self.__guesses_left_label = Label(self.__main_window)\r\n self.__guesses_left_label.grid(row=3, column=1)\r\n\r\n self.__wrong_letters_label = Label(self.__main_window)\r\n self.__wrong_letters_label.grid(row=4, column=1)\r\n\r\n self.new_game()\r\n self.__main_window.mainloop()\r\n\r\n\r\n # method that initiates all the values\r\n def new_game(self):\r\n self.__guesses_left = NR_OF_GUESSES\r\n\r\n # getting a random word from a dictionary, lowercase\r\n self.__dictionary = open(\"words.txt\", 'r')\r\n self.__words = self.__dictionary.read().splitlines()\r\n self.__secret_word = random.choice(self.__words)\r\n self.__secret_word = self.__secret_word.lower()\r\n\r\n # initialising the placeholder and displaying it\r\n self.__word_placeholder = \"-\" * len(self.__secret_word)\r\n self.__placeholder_label.configure(text=self.__word_placeholder)\r\n\r\n self.__wrong_letters = []\r\n\r\n self.__whose_turn = 0\r\n self.__instructions = \"Player \" + str(self.__whose_turn + 1) + \" guess a letter or a word\"\r\n\r\n self.update_texts()\r\n\r\n # activating the guess button\r\n self.__guess_button.configure(state=NORMAL)\r\n\r\n def update_texts(self):\r\n self.__guesses_left_label.configure(text=\"Guesses left: \" + str(self.__guesses_left))\r\n self.__instructions_label.configure(text=self.__instructions)\r\n self.__wrong_letters_label.configure(text=\"Wrong letters:\" + \", \".join(self.__wrong_letters))\r\n\r\n # method tied to the guess button\r\n def guess(self):\r\n\r\n guess = self.__guess.get()\r\n self.__guess.delete(0, END)\r\n guess = guess.lower()\r\n\r\n # lowering the guesses count by one\r\n self.__guesses_left -= 1\r\n self.update_texts()\r\n\r\n # checking the guess\r\n while True:\r\n if guess == self.__secret_word:\r\n self.update_dashes(guess)\r\n self.game_over(\"win\")\r\n break\r\n elif guess in self.__secret_word and len(guess) == 1:\r\n self.update_dashes(guess)\r\n if self.__placeholder_label.cget(\"text\") == self.__secret_word:\r\n self.game_over(\"win\")\r\n break\r\n\r\n elif guess not in self.__wrong_letters and len(guess) == 1:\r\n self.__wrong_letters.append(guess)\r\n\r\n\r\n if self.__guesses_left == 0:\r\n self.game_over(\"loss\")\r\n break\r\n\r\n self.change_turn()\r\n\r\n break\r\n\r\n # this method is called after each incorrect guess attempt\r\n def change_turn(self):\r\n\r\n self.__whose_turn = (self.__whose_turn + 1) % NR_OF_PLAYERS\r\n self.__instructions = \"Player \" + str(self.__whose_turn + 1) + \" guess a letter or a word\"\r\n self.update_texts()\r\n\r\n # this method updates the placeholder\r\n def update_dashes(self, guess):\r\n # revealing the word\r\n if len(guess) > 1:\r\n self.reveal()\r\n\r\n # otherwise updating the dashes\r\n else:\r\n # breaking up the placeholder into a list of its letters and dashes\r\n placeholder_list = list(self.__placeholder_label.cget(\"text\"))\r\n\r\n # replacing each dash by the correctly guessed letter in the list\r\n # and joining it into a new placeholder\r\n for i in range(len(placeholder_list)):\r\n if self.__secret_word[i] == str(guess):\r\n placeholder_list[i] = str(guess)\r\n self.__placeholder_label.configure(text=\"\".join(placeholder_list))\r\n\r\n # this method reveals the word\r\n def reveal(self):\r\n self.__placeholder_label.configure(text=self.__secret_word)\r\n\r\n # this method is called when the word is correctly guessed\r\n # or the players run out of guesses\r\n def game_over(self, state):\r\n if state == \"win\":\r\n self.__instructions = \"Player \" + str(self.__whose_turn + 1) + \" has won!\"\r\n elif state == \"loss\":\r\n self.__instructions = \"The computer has won :(\"\r\n self.reveal()\r\n\r\n self.update_texts()\r\n self.__guess_button.configure(state=DISABLED)\r\n\r\n\r\ndef main():\r\n Hangman()\r\n\r\n\r\nmain()","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197489000","text":"import time\n\nfrom jj2.snek.handler import PortHandler\nfrom jj2.snek.utils import fetch_one\n\n\nclass MOTDHandler(PortHandler):\n \"\"\"\n Serve Message of the Day\n \"\"\"\n\n async def handle_data(self):\n \"\"\"\n Return MOTD and immediately close connection\n \"\"\"\n self.server_list.log.info(\"Sending MOTD to %s\" % self.ip)\n\n motd = fetch_one(\"SELECT value FROM settings WHERE item = ?\", ('motd',))\n expires = fetch_one(\"SELECT value FROM settings WHERE item = ?\", ('motd_expires',))\n\n if not expires:\n expires = {\"value\": time.time() + 10}\n\n if motd and motd != \"\" and int(time.time()) < expires[\"value\"]:\n await self.send(motd[\"value\"] + \"\\n\")\n else:\n await self.send(\"\")\n\n self.close()\n","sub_path":"handlers/motd.py","file_name":"motd.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539281730","text":"from uuid import uuid4\nfrom django.db import models\nfrom rest_framework import serializers\n\nfrom .nanny_application import NannyApplication\n\n\nclass InsuranceCover(models.Model):\n \"\"\"\n Model for INSURANCE_COVER table\n \"\"\"\n objects = models.Manager()\n\n insurance_cover_id = models.UUIDField(primary_key=True, default=uuid4)\n application_id = models.OneToOneField(\n NannyApplication, on_delete=models.CASCADE, db_column='application_id')\n public_liability = models.BooleanField(blank=True, null=True, default=None)\n\n @property\n def timelog_fields(self):\n \"\"\"\n Specify which fields to track in this model once application is returned.\n :return: tuple of fields which needs update tracking when application is returned\n \"\"\"\n return (\n 'public_liability',\n )\n\n class Meta:\n db_table = 'INSURANCE_COVER'\n\n\nclass InsuranceCoverSerializer(serializers.ModelSerializer):\n class Meta:\n model = InsuranceCover\n fields = '__all__'\n\n def get_bool_as_string(self, bool_field):\n if bool_field:\n return 'Yes'\n else:\n return 'No'\n\n def get_summary_table(self):\n data = self.data\n return [\n {\"title\": \"Insurance cover\", \"id\": data['insurance_cover_id']},\n {\"name\": \"Do you have public liability insurance?\",\n \"value\": self.get_bool_as_string(data['public_liability']),\n 'pk': data['insurance_cover_id'],\n \"reverse\": \"insurance:Public-Liability\",\n \"change_link_description\": \"answer to having public liability insurance\"}\n ]\n\n\n","sub_path":"application/models/insurance_cover.py","file_name":"insurance_cover.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"131661756","text":"\"\"\"Problem 14\"\"\"\n\ncollatz = [0] * 1000001\nn = 2\nmax_chain = 0\n\nwhile n < 1000000:\n\tm = n\n\tcount = 0\n\twhile(m!=1):\n\t\tif m<1000000 and collatz[m]:\n\t\t\tcount += collatz[m] + 1\n\t\t\tm = 1\n\t\telse:\n\t\t\tif m%2 == 0:\n\t\t\t\tm = m // 2\n\t\t\telse:\n\t\t\t\tm = 3*m + 1\n\t\t\tcount += 1\t\n\n\tcollatz[n] = count\n\tif count > max_chain:\n\t\tmax_chain = count\n\t\tstarting_number = n\n\tn+=1\nprint(starting_number)\t\t\t\t\t","sub_path":"Problem 14.py","file_name":"Problem 14.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"636580511","text":"\"\"\"\nThese classes represents building block of our processing.\nThey should have two working modes, namely batch mode and real-time mode. \nIn batch mode the full dataset is sent and processed before returning any data.\nIn real-time mode, data is sent one by one and results outputed each time\n (it can be None, if there is nothing to output).\nBoth these modes shouldn't be mixed,\n or special care has to be taken with internal variables.\n\"\"\"\n\nfrom collections import deque\nimport math\n\n\nclass ApplyWindowedFunction(object):\n \"\"\"\n apply a function on a sliding window on the time series\n \"\"\"\n\n def __init__(self, window, func):\n self.win = int(window)\n self.fun = func\n self.buf = deque([0] * int(window)) \n\n def batch_process(self,data):\n ret = [] \n half = self.win/2\n #prefill\n for i in range(half):\n self.process_next(data[0])\n for i in range(half):\n self.process_next(data[i])\n #process\n for i in range(half, len(data)):\n ret.append(self.process_next((data[i-half][0], data[i-half][1],\n int(data[i][2]))))\n #postfill\n for i in range(len(data)-half, len(data)):\n ret.append(self.process_next((data[i][0], data[i][1],\n int(data[len(data)-1][2]))))\n return ret\n\n def process_next(self, elem):\n self.buf.append(elem[2])\n self.buf.popleft()\n val = self.fun(self.buf)\n return [elem[0], elem[1], val] \n\n\nclass MedianFilteringProcess(ApplyWindowedFunction):\n \"\"\"\n returns a time series of values as the median of the sliding window\n \"\"\"\n\n def __init__(self, window):\n self.win = int(window)\n self.buf = deque([0] * int(window))\n self.fun = self.median\n \n def median(self, tab):\n tmp = sorted(tab)\n if not self.win % 2:\n return int((tmp[self.win/2]+tmp[self.win/2-1])/2)\n return tmp[self.win/2] \n \n\nclass RLEProcess(object):\n \"\"\"\n Run Length Encoding\n Takes a triple and compress contiguous ones that are the same\n \"\"\"\n \n def __init__(self):\n self.previous_t = None\n self.previous_v = None\n\n def batch_process(self, data):\n last = data[0][0]\n tab = []\n for i in range(len(data)-1):\n if data[i][2] != data[i+1][2]:\n tab.append([last, data[i][1], data[i][2]])\n last = data[i+1][0]\n tab.append([last, data[len(data)-1][1],\n data[len(data)-1][2]])\n return tab\n\n def process_next(self, elem):\n toreturn = None\n if self.previous_t:\n if elem[2] != self.previous_v:\n toreturn = [self.previous_t, elem[1], self.previous_v]\n self.previous_t = elem[0]\n self.previous_v = elem[2]\n else:\n self.previous_t = elem[0]\n self.previous_v = elem[2]\n return toreturn\n \n\nclass SymbolizeProcess(object):\n \"\"\"\n Conversion from raw values to symbol using separators\n Takes a triple and output the corresponding symbol with the same timestamps\n \"\"\"\n\n def __init__(self, length, separators):\n self.lng = length\n self.sep = separators\n self.current_v = 0\n self.current_c = 0\n self.previous_t = None\n\n def batch_process(self, data):\n tab = []\n for i in range(0, len(data), self.lng):\n avg = reduce(lambda x, y: x[2]+y[2], data[i:i+self.lng],\n [0, 0, 0])*1.0/self.lng\n j = 0\n while j < len(self.sep) and avg > self.sep[j]:\n j += 1\n tab.append([data[i][0], data[i][1], j])\n return tab\n\n def process_next(self, elem):\n toreturn = None\n self.current_v += elem[2]\n self.current_c += 1\n if not self.previous_t:\n self.previous_t = elem[0]\n\n if self.current_c >= self.lng:\n avg = self.current_v*1.0/self.current_c\n j = 0\n while j < len(self.sep) and avg > self.sep[j]:\n j += 1\n toreturn = [self.previous_t, elem[1], j]\n self.previous_t = elem[0]\n self.current_v = 0\n self.current_c = 0\n return toreturn\n\n\n\nclass ClusterSparseProcess(object):\n \"\"\"\n return the cluster number of the input segment\n represented as a sparse matrix (with dicts)\n nbr parameter is where to start numbering the clusters\n \"\"\"\n\n def __init__(self, epsilon, nbr, minpt=1):\n self.eps = float(epsilon)\n self.nbr = int(nbr)\n self.lookup = {}\n self.count = {} \n self.minpts = minpt\n\n def batch_process(self, segments, data):\n tab = []\n clu = []\n clusters = {}\n for seg in segments:\n sym = self.process_next(seg)[0]\n clu.append(sym[2]-self.nbr)\n j = 0\n for i in range(len(clu)):\n while j < len(data) and data[j][1] <= segments[i][0]:\n tab.append(data[j])\n j += 1\n if clu[i] >= 0 and self.count[clu[i]+self.nbr] >= self.minpts:\n tab.append([segments[i][0], segments[i][1], clu[i]+self.nbr])\n if not clu[i]+self.nbr in clusters:\n clusters[clu[i]+self.nbr] = []\n last = j\n while j < len(data) and data[j][1] <= segments[i][1]:\n j += 1\n clusters[clu[i]+self.nbr].append(data[last:j])\n \n while j < len(data):\n tab.append(data[j])\n j += 1\n return (tab, self.lookup)\n\n def process_next(self, segment):\n dist = self.eps\n clu = -1\n for k, v in self.lookup.iteritems():\n d = self.cosine_distance(segment[2], v)\n if d < dist:\n dist = d\n clu = k\n if clu == -1:\n clu = len(self.lookup)+self.nbr\n self.lookup[clu] = segment[2]\n self.count[clu] = 1\n else:\n self.mean(clu, segment)\n self.count[clu] += segment[1] - segment[0]\n return ([segment[0], segment[1], clu], self.lookup)\n\n def mean(self, c, s):\n one = []\n two = []\n for k,v in s[2].iteritems():\n if not k[0] in one:\n one.append(k[0])\n for k,v in self.lookup[c].iteritems():\n if not k[0] in two:\n two.append(k[0])\n if k in s[2]:\n self.lookup[c][k] = ((self.lookup[c][k]*self.count[c]+\n s[2][k]*(s[1]-s[0]))/\n (self.count[c]+(s[1]-s[0])))\n else:\n if k[0] in one:\n self.lookup[c][k] = ((self.lookup[c][k]*self.count[c])/\n (self.count[c] + (s[1]-s[0])))\n for k,v in s[2].iteritems():\n if not k in self.lookup[c]:\n if k[0] in two:\n self.lookup[c][k] = ((s[2][k]*(s[1]-s[0]))/\n (self.count[c] + (s[1]-s[0])))\n else:\n self.lookup[c][k] = s[2][k]\n\n def cosine_distance(self, s1, s2):\n \"\"\"\n actually angular similarity\n \"\"\"\n intersection = 0.0\n s1s = 0.0\n s2s = 0.0\n for k,v in s1.iteritems():\n if not v == 0:\n if k in s2 and not s2[k] == 0.0:\n intersection += (s2[k]) * v\n s1s += v * v\n for v in s2.values():\n if not v == 0:\n s2s += v * v\n theta = intersection/(math.sqrt(s1s)*math.sqrt(s2s))\n return 1-theta\n\n\nclass SegmentSparseProcess(object):\n \"\"\"\n generate segments by applying a threshold on the forecasting error \n from the iteratively updated length-frequencies\n The generated segments are stored as sparse matrices (dicts)\n \"\"\"\n\n def __init__(self, rate, threshold, lmbda, wsize=1, minSeg=5, slack=2):\n self.rate = float(rate)\n self.previous_v = [0, 0, 0]\n self.thr = float(threshold)\n self.wsize = int(wsize)\n self.window = deque([0] * int(wsize))\n self.buf = []\n self.backbuf = []\n self.mat = {}\n self.lmbda = float(lmbda)\n self.min_seg = int(minSeg)\n self.slack = float(slack)\n\n def batch_process(self, data):\n tab = []\n for dat in data:\n res = self.process_next(dat)\n if res:\n tab.append(res)\n return tab \n\n def process_next(self, elem):\n if not elem[1]-elem[0] > 0:\n return None\n ret = None\n steps = elem[1]-elem[0]\n dist = self.error(self.previous_v, elem)\n self.updateFreq(int(self.previous_v[2]), int(elem[2]), steps, self.lmbda)\n self.previous_v = elem\n self.window.append(dist)\n self.window.popleft()\n if sum(self.window) > self.wsize * self.thr:\n if len(self.buf) > (self.min_seg): #min size of segment\n ret = self.buildSegment()\n self.mat = {}\n self.backbuf = []\n else:\n self.backbuf.extend(self.buf) \n #after a too small segment we add it to backbuf\n #only fill it while high error, keep when in low error,\n # flush after segment.\n self.buf = []\n self.buf.append(elem)\n return ret\n\n def updateFreq(self, e1, e2, steps, lmbda):\n if not e1 in self.mat:\n self.mat[e1] = {}\n if not e2 in self.mat[e1]:\n self.mat[e1][e2] = {}\n if not steps in self.mat[e1][e2]:\n self.mat[e1][e2][steps] = 0\n\n for kk in self.mat[e1].keys():\n for kkk in self.mat[e1][kk].keys():\n self.mat[e1][kk][kkk] *= lmbda\n self.mat[e1][e2][steps] += 1\n\n def error(self, e1, e2):\n steps = e2[1]-e2[0]\n if not int(e1[2]) in self.mat or not int(e2[2]) in self.mat[int(e1[2])]:\n return 1.0\n tab = self.mat[int(e1[2])][int(e2[2])]\n su0 = sum(tab.values())\n if su0 == 0:\n dist = 1.0\n else:\n su = sum([sum(x.values()) for x in self.mat[int(e1[2])].values()])\n su1 = sum([k*v for k, v in tab.iteritems()])\n su2 = sum([k*k*v for k, v in tab.iteritems()])\n var = su2/su0-(su1*su1)/(su0*su0)\n if var < 0:\n var = 0 # because of rounding errors !!!\n sigma = self.slack*0.9*min(math.sqrt(var),\n self.iqr(tab, su0)/1.34)*(su0 ** -0.2)\n if sigma < 1.0:\n sigma = 1.0\n dist = 1.0-(sum([v*(math.exp(-(k-steps)*(k-steps)/(2.0*sigma ** 2)))\n for k, v in self.mat[int(e1[2])][int(e2[2])].iteritems()])/\n (1.0*su))\n return dist\n\n def iqr(self, tab, su):\n i = -1\n k = sorted(tab.keys())\n s = 0\n while s < 0.25*su:\n i += 1\n s += tab[k[i]]\n q1 = k[i]\n while s < 0.75*su:\n i += 1\n s += tab[k[i]]\n q2 = k[i] \n return q2 - q1\n\n\n def buildSegment(self):\n #------backtracking-------\n #rebuild mat for the segment\n self.buf.pop()\n self.backbuf.append(self.buf.pop(0)) #re-check first element\n self.mat = {}\n for i in reversed(range(1, len(self.buf))):\n self.updateFreq(int(self.buf[i][2]), int(self.buf[i-1][2]),\n self.buf[i-1][1]-self.buf[i-1][0], self.lmbda)\n\n #add some symbols in front\n sym = self.buf[0]\n for i in reversed(range(len(self.backbuf))):\n dist = self.error(sym, self.backbuf[i])\n sym = self.backbuf[i]\n if dist > (self.thr):\n break\n else:\n self.buf.insert(0, sym)\n #--------------------------\n mat = {}\n counts = {}\n for i in range(len(self.buf)-1):\n if not (int(self.buf[i][2]), int(self.buf[i][2])) in mat:\n mat[(int(self.buf[i][2]), int(self.buf[i][2]))] = 0\n if not (int(self.buf[i][2]), int(self.buf[i+1][2])) in mat:\n mat[(int(self.buf[i][2]), int(self.buf[i+1][2]))] = 0\n if not int(self.buf[i][2]) in counts:\n counts[int(self.buf[i][2])] = 0\n counts[int(self.buf[i][2])] += (self.buf[i][1]-self.buf[i][0])/self.rate+1\n mat[(int(self.buf[i][2]), int(self.buf[i][2]))] += (self.buf[i][1]-self.buf[i][0])/self.rate\n mat[(int(self.buf[i][2]), int(self.buf[i+1][2]))] += 1\n if not (int(self.buf[-1][2]), int(self.buf[-1][2])) in mat:\n mat[(int(self.buf[-1][2]), int(self.buf[-1][2]))] = 0\n mat[(int(self.buf[-1][2]), int(self.buf[-1][2]))] += (self.buf[-1][1]-self.buf[-1][0])/self.rate\n if not int(self.buf[-1][2]) in counts:\n counts[int(self.buf[-1][2])] = 0\n counts[int(self.buf[-1][2])] += (self.buf[-1][1]-self.buf[-1][0])/self.rate\n for i in mat.keys():\n if counts[i[0]] > 0:\n mat[i] *= 1.0/counts[i[0]] \n return [self.buf[0][0], self.buf[len(self.buf)-1][1],mat]\n\n","sub_path":"StateFinder/Processing.py","file_name":"Processing.py","file_ext":"py","file_size_in_byte":13453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"284494029","text":"# [파이썬 프로그래밍 기초(2) 파이썬의 기본 응용] 3. 자료구조 - 셋, 딕셔너리 8\n\n'''\n다음과 같이 사용자가 입력한 문장에서 대소문를 구별해 각각의 갯수를 출력하는 프로그램을 작성하십시오.\n\n예제 입력)\nHello World! 123 \n\n예제 출력)\nUPPER CASE 2\nLOWER CASE 8\n'''\n\nsentence = input()\nupper_cnt = 0\nlower_cnt = 0\n\nfor word in sentence:\n if word.isupper():\n upper_cnt += 1\n elif word.islower():\n lower_cnt += 1\nprint('UPPER CASE {}' . format(upper_cnt))\nprint('LOWER CASE {}' .format(lower_cnt))\n","sub_path":"swea_study/python_beginner/74_swea(6260).py","file_name":"74_swea(6260).py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"205625043","text":"# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.\n\"\"\"\nparsers to import from agent outputs to storage\n\"\"\"\n\nimport json\nimport sys\nfrom pprint import pprint\n\nfrom tenable.reports import NessusReportv2\n\nfrom sner.server import db\nfrom sner.server.model.storage import Host, Note, Service, SeverityEnum, Vuln\nfrom sner.server.parser import ParserBase, register_parser\nfrom sner.server.utils import SnerJSONEncoder\n\n\n@register_parser('nessus') # pylint: disable=too-few-public-methods\nclass NessusParser(ParserBase):\n \"\"\"nessus .nessus output parser\"\"\"\n\n SEVERITY_MAP = ['info', 'low', 'medium', 'high', 'critical']\n\n @staticmethod\n def import_file(path):\n \"\"\"import nessus data from file\"\"\"\n\n with open(path, 'r') as ftmp:\n for item in NessusReportv2(ftmp):\n tmp = NessusParser._import_report_item(item)\n print('parsed item: %s %s' % (tmp.host, tmp))\n db.session.commit()\n\n @staticmethod\n def _import_report_item(report_item):\n \"\"\"import nessus_v2 ReportItem 'element'\"\"\"\n\n xtype = 'nessus.%s' % report_item['pluginID']\n host = NessusParser._import_host(report_item)\n service = NessusParser._import_service(report_item, host)\n note = NessusParser._import_vuln_note(report_item, host, service, xtype)\n\n vuln = Vuln.query.filter(Vuln.host == host, Vuln.service == service, Vuln.xtype == xtype).one_or_none()\n if not vuln:\n vuln = Vuln(host=host, service=service, xtype=xtype)\n db.session.add(vuln)\n vuln.name = report_item['plugin_name']\n vuln.severity = SeverityEnum(NessusParser.SEVERITY_MAP[report_item['severity']])\n vuln.descr = '## Synopsis\\n\\n%s\\n\\n## Description\\n\\n%s' % (report_item['synopsis'], report_item['description'])\n if 'plugin_output' in report_item:\n vuln.data = report_item['plugin_output']\n vuln.refs = ['SN-%s' % note.id] + NessusParser._get_refs(report_item)\n\n return vuln\n\n @staticmethod\n def _import_host(report_item):\n \"\"\"pull host to storage\"\"\"\n\n def upsert_hostname(host, hostname):\n \"\"\"upsert hostname to host model\"\"\"\n\n if hostname != host.hostname:\n note = Note.query.filter(Note.host == host, Note.xtype == 'hostnames').one_or_none()\n if not note:\n note = Note(host=host, xtype='hostnames', data=json.dumps([host.hostname]))\n db.session.add(note)\n note.data = json.dumps(list(set(json.loads(note.data) + [hostname])))\n\n host = Host.query.filter(Host.address == report_item['host-ip']).one_or_none()\n if not host:\n host = Host(address=report_item['host-ip'])\n db.session.add(host)\n if 'host-fqdn' in report_item:\n if not host.hostname:\n host.hostname = report_item['host-fqdn']\n upsert_hostname(host, report_item['host-fqdn'])\n if 'host-rdns' in report_item:\n upsert_hostname(host, report_item['host-rdns'])\n if 'operating-system' in report_item:\n host.os = report_item['operating-system']\n\n return host\n\n @staticmethod\n def _import_service(report_item, host):\n \"\"\"pull service to storage\"\"\"\n\n if report_item['port'] == 0:\n return None\n\n service = Service.query.filter(\n Service.host == host,\n Service.proto == report_item['protocol'],\n Service.port == report_item['port']).one_or_none()\n if not service:\n service = Service(host=host, proto=report_item['protocol'], port=report_item['port'])\n db.session.add(service)\n service.state = 'open:nessus'\n service.name = report_item['svc_name']\n\n return service\n\n @staticmethod\n def _import_vuln_note(report_item, host, service, xtype):\n \"\"\"put vulnerability note to storage\"\"\"\n\n note = Note.query.filter(Note.host == host, Note.service == service, Note.xtype == xtype).one_or_none()\n if not note:\n note = Note(host=host, service=service, xtype=xtype)\n db.session.add(note)\n note.data = json.dumps(report_item, cls=SnerJSONEncoder)\n db.session.flush() # required to get .id\n\n return note\n\n @staticmethod\n def _get_refs(report_item):\n \"\"\"compile refs array for report_item\"\"\"\n\n def ensure_list(data):\n return [data] if isinstance(data, str) else data\n\n refs = []\n if 'cve' in report_item:\n refs += ensure_list(report_item['cve'])\n if 'bid' in report_item:\n refs += ['BID-%s' % ref for ref in ensure_list(report_item['bid'])]\n if 'xref' in report_item:\n refs += ['%s-%s' % tuple(ref.split(':', maxsplit=1)) for ref in ensure_list(report_item['xref'])]\n if 'see_also' in report_item:\n refs += ['URL-%s' % ref for ref in report_item['see_also'].splitlines()]\n if 'metasploit_name' in report_item:\n refs.append('MSF-%s' % report_item['metasploit_name'])\n if 'pluginID' in report_item:\n refs.append('NSS-%s' % report_item['pluginID'])\n\n return refs\n\n\ndef debug_parser(): # pragma: no cover\n \"\"\"cli helper, pull data from report and display\"\"\"\n\n with open(sys.argv[1], 'r') as ftmp:\n report = NessusReportv2(ftmp)\n for item in report:\n pprint(item)\n\n\nif __name__ == '__main__': # pragma: no cover\n debug_parser()\n","sub_path":"sner/server/parser/nessus.py","file_name":"nessus.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"401351364","text":"\n# There are a few things I would like to add to this, but I am lacking\n# the time to do so. The biggest and best change would be to make the\n# category entry box in the question editor be a Combobox from ttk. \n# This would make adding questions to existing categories significantly\n# easier. Making the question name editor be a Combobox as well would\n# be nice for editing existing questions, but not as much so, I think.\n# It would also be significantly more difficult to implement well.\n# Buttons to check/uncheck all categories on the Choose Categories \n# tab would be nice, but the current implementation of the save \n# function would make that somewhat obnoxious to use, because every\n# time a box is checked or unchecked, the whole configuration file is\n# saved, which takes some time. It would be particularly frustrating \n# for the users to use that button with a large number of categories.\n# I suspect it will be more frustrating to check or uncheck a large\n# number of categories manually as well, so that is certainly a \n# trade-off. Unfortunately, I do not have time to implement that,\n# debug it, and make sure it performs well.\n\n# Next, a few notes. The overall project is coded in Python 3.3.0.\n# The modules we use are tkinter, json, and sys. From tkinter \n# we use two sub modules called ttk and tix. We only need stdout\n# from sys for debug message printing purposes. \n\n# Tkinter is the graphical module. It includes everything needed\n# for the GUI itself (buttons, labels, entry boxes, etc.). \n# Included in tkinter is ttk, or Themed Tkinter. ttk is a more\n# modern extension of tkinter, allowing for more modern appearing\n# applications. We also use tix solely for the scrolled window\n# with the user-defined categories on the second tab. \n\n# Json, or JavaScript Object Notation, is used for inter-program\n# file standardization. This config editor writes a large config\n# file using json. Because json is standardized, other programs\n# in the binary racetrack suite can easily use the same config\n# file without doing special file parsing. The sys module is used\n# solely for stdout, to help with a debug print in one part.\n\n# To compile the program to exe, we use cx_Freeze version 4.3.1 for\n# Python 3.3. Theoretically, all future versions of Python and \n# cx_Freeze should work, but if any issues arise in future \n# maintenance, make sure these are the versions being used. More\n# documentation for compiling with cx_Freeze is included in the \n# file setup.py. For Python 2.x, an older program called py2exe\n# could be used, but unfortunately it is not fully compatible with\n# Python 3.x. \n\n# Python is rather versatile and easy to work with, so we are using\n# that. It is also multi-platform, so if the school wants this to \n# run on multiple platforms (Mac OSX, Windows, or some Linux distro),\n# making it work for those should be particularly easy. A similarly\n# portable language is Java, but at this point I pick Python solely\n# from personal preference. Python is just easier for me to work with.\n\n# There are many GUI (graphical user interface) toolkits for Python,\n# including PyQt, Pygame, PyGTK, PyGUI, and Tkinter. We are using \n# Tkinter here because it is included in all modern Python\n# distributions. It is the standard Python GUI toolkit. Adding \n# other toolkits would not be too difficult, but converting the \n# Python code to an executable file is a bit more of a hassle when\n# using 3rd party software (such as PyQt). \n\n# The normal Python file extension is .py. These can be opened and run\n# in IDLE or another Python coding environment. This file, however,\n# should be a .pyw file. This extension tells the Python environment\n# to not display the console. If we use .py, the GUI works fine, but\n# a console for text output is also displayed. .pyw files suppress\n# the console, so only the GUI is displayed.\n\n# imports\nfrom tkinter import *\nfrom tkinter import ttk \nfrom tkinter import messagebox\nfrom tkinter import tix\nimport json\nfrom sys import stdout\n\n# Use a class for encapsulation. It is not necessary, but it can help\n# make the overall code much easier to work with, especially if another\n# program ends up extending this module. It is unlikely, but \n# encapsulation is good practice anyway. \nclass glee_config(object):\n\t\n\t# This tuple is used to specify fonts in the labels and entry boxes\n\t# later. Change these values to any standard font or font size\n\t# (right now it is Courier and size 18), but be warned that the GUI\n\t# is designed specifically around this font. No other fonts have \n\t# been tested, so weird things can happen.\n\tdefault_font = (\"Courier\", 18)\n\t\n\t# For those unfamiliar with OOP (object-oriented programming), the \n\t# __init__ function is generally the part of the code where all \n\t# variables that will be stored in the class are instantiated \n\t# and initialized. Thus, __init__ functions are often rather \n\t# empty, bland, and long. For those who are familiar with OOP \n\t# languages such as C++ and Java, the __init__ function is the\n\t# constructor. \n\tdef __init__(self):\n\t\n\t\t# set up initial window.\n\t\t# In Tkinter, the root window is the primary window into which\n\t\t# everything else goes. All the widgets go into frames (which\n\t\t# are also widgets), and all the frames go into potentially\n\t\t# more frames. Eventually, all the top level frames are \n\t\t# packed into the root window. \n\t\t# Normally, the code would just be root = Tk(). However, in \n\t\t# order to use the ScrolledWindow that we use later, we have\n\t\t# to use the special Tk in tix, so we need tix.Tk(). \n\t\troot = tix.Tk()\n\t\troot.title(\"Learning Racetrack Question Editor\")\n\t\t# Geometry is important. The first 1000x650 makes it 1000 \n\t\t# pixels wide and 650 tall. +40 moves it 40 pixels from\n\t\t# the left side of the monitor, and +20 moves it 20 pixels\n\t\t# down from the top of the monitor.\n\t\troot.geometry('1000x650+40+20')\n\t\t# Because the geometry of the window is so important, we also \n\t\t# want to assure that the geometry remains that way. This \n\t\t# window also does not look good if its width or height change,\n\t\t# so we need to make it not resizable. Tkinter apps default\n\t\t# to being resizable in both width and height, so set those\n\t\t# both to false with the resizable function.\n\t\troot.resizable(width=FALSE, height=FALSE)\n\t\troot.rowconfigure(0, weight=1)\n\t\troot.columnconfigure(0,weight=1)\n\t\t\n\t\t# set up the notebook\n\t\t# A notebook can display multiple pages. Each page must \n\t\t# be a frame (or possibly some other frame-like widget).\n\t\t# Two things to note about the next line of code are we use\n\t\t# ttk.Notebook instead of just Notebook so we can use our\n\t\t# lovely modern themes. The one argument for the \n\t\t# Notebook initializer is the root window, so Tkinter knows\n\t\t# to put the Notebook into the root window. \n\t\t# Call this notebook book for simplicity. If we have other\n\t\t# notebooks, we would probably want to call it something \n\t\t# else, but book is fine here. \n\t\tbook = ttk.Notebook(root)\n\t\t# pack tells the notebook to actually appear on the frame\n\t\t# on which it's supposed to appear (in book's case, it \n\t\t# will appear on root). The fill argument can be X, Y, or\n\t\t# BOTH. This determines how the widget fills the frame it\n\t\t# is in. My understanding of the expand argument is not \n\t\t# complete, but as far as I can tell expand=1 tells the \n\t\t# widget to expand the frame it is in in order to fill \n\t\t# the frame that frame is in. The =1 part is the weight\n\t\t# of the expansion; if another frame had expand=2 and would\n\t\t# conflict, it has priority. \n\t\t# An alternate option would be to use a grid (instead of \n\t\t# pack), which allows us to specify exactly where on a \n\t\t# grid we want widgets. Both allow us to put things exactly\n\t\t# where we want them, and they both seem to be fairly\n\t\t# common, but I prefer pack, mostly because it was the first\n\t\t# I used with Tkinter.\n\t\tbook.pack(fill=BOTH,expand=1)\n\t\t\n\t\t# Make two frames and put them into the Notebook. One frame\n\t\t# is for editing the questions and categories. The other is \n\t\t# to enable or disable the various categories (user defined\n\t\t# and built in).\n\t\tpage_1_frame = ttk.Frame(book)\n\t\tpage_1_frame.pack(fill=BOTH,expand=1)\n\t\t\n\t\tpage_2_frame = ttk.Frame(book)\n\t\tpage_2_frame.pack(fill=BOTH,expand=1)\n\t\t\n\t\t# Put the frames into the book. This is an additional step\n\t\t# that is similar to packing but also required. The text \n\t\t#argument determines what displays in the tab for the frame.\n\t\tbook.add(page_1_frame, text=\"Edit Questions\")\n\t\tbook.add(page_2_frame, text=\"Choose Categories\")\n\t\t\n\t\t# set up category frame\n\t\t# This is a pretty standard frame setup. Make a frame, give\n\t\t# it to its parent frame, and pack it. In this case we want\n\t\t# it to be on the left side and fill its parent frame \n\t\t# vertically. Make sure it is a ttk.Frame rather than a \n\t\t# Frame because ttk is themed.\n\t\tcat_frame = ttk.Frame(page_1_frame)\n\t\tcat_frame.pack(side=LEFT,fill=Y)\n\t\t# Create a label. Labels are widgets used primarily to display\n\t\t# text. They can also be used to display images, but in this \n\t\t# case we are just displaying text with them. Make the text\n\t\t# be \"Categories\" and give it the default font, as described\n\t\t# at the start of this class. Pack it and give it some padding\n\t\t# to make it display more nicely. Without some padding, some\n\t\t# widgets will encroach on each other, altering the seamless\n\t\t# appearance of the various frames.\n\t\tcat_label = ttk.Label(cat_frame, text=\"Categories\", font=self.default_font)\n\t\tcat_label.pack(pady=3,padx=3)\n\t\t# load_cat_frame is used as a wrapper for the category buttons.\n\t\t# Pack the buttons onto the frame with side=LEFT. Once we \n\t\t# have this frame with things on it, we can pack it into the top \n\t\t# of cat_frame, which puts it just under the Categories label.\n\t\t# This has the added benefit of centering the buttons under the\n\t\t# label. Alternately, we could wrap this frame in yet another \n\t\t# frame, pack the wrapper frame at the top, and pack this frame\n\t\t# to the left to get the buttons to be left-aligned. We want\n\t\t# center for aesthetics in this case, though.\n\t\tload_cat_frame = ttk.Frame(cat_frame)\n\t\tload_cat_frame.pack(side=TOP)\n\t\t# Make two buttons. Buttons, like most other widgets, should be\n\t\t# the ttk variants for the modern theme. Pass it the frame, \n\t\t# set the text variable, and give it a command. Commands are \n\t\t# functions that are called when the button is clicked. We are\n\t\t# unable to pass any variables to the functions (aside from self\n\t\t# by referring to the function as self.cat_select, for example)\n\t\t# so they are generally built specifically for this purpose.\n\t\t# Another term for this type of function is a \"call back\". \n\t\tload_cat_button = ttk.Button(load_cat_frame, text=\"Select Category\", command=self.cat_select)\n\t\tload_cat_button.pack(side=LEFT)\n\t\tdel_cat_button = ttk.Button(load_cat_frame, text=\"Delete Category\", command=self.cat_delete)\n\t\tdel_cat_button.pack(side=LEFT)\n\t\t\n\t\t# Scrollbars are a bit different from other widgets. Most \n\t\t# widgets just require packing, and buttons (as we will see\n\t\t# later) require a command or event to be set. We want this\n\t\t# scrollbar to be associated with a listbox, which we will make\n\t\t# next. Make the scrollbar like any other widget (pass it the \n\t\t# frame we want to put it in) and pack it. We want it to be\n\t\t# vertical, so set fill to Y. We also want it to act like a \n\t\t# standard scrollbar, so pack it to the right instead of the\n\t\t# left that we have been using.\n\t\tcat_scroll = Scrollbar(cat_frame)\n\t\tcat_scroll.pack(side=RIGHT, fill=Y)\n\t\t\n\t\t# A listbox displays a selectable list of items. We only want\n\t\t# one item to be selectable at a time, which is nice because\n\t\t# that is the default mode. There are two other modes that\n\t\t# change how many items are selectable at a time. Like the\n\t\t# buttons, listboxes can have a command. However, it is a \n\t\t# special command called yscrollcommand. Set yscrollcommand to\n\t\t# cat_scroll.set, and then set the cat_scroll's command to \n\t\t# cat_box.yview. This lets the scrollbar control the listbox,\n\t\t# and it lets the listbox control the scrollbar. \n\t\tcat_box = Listbox(cat_frame, font=self.default_font, yscrollcommand=cat_scroll.set)\n\t\tcat_scroll.config(command=cat_box.yview)\n\t\t\n\t\t# Double click event is different than button presses, in that \n\t\t# the function it calls has to accept an event object. Because \n\t\t# this is object-oriented, that would mean the function would \n\t\t# have to accept self and the event object, and cat_select() \n\t\t# only accepts self. We could use a new function that does \n\t\t# nothing but call cat_select(), but instead of that we just \n\t\t# create a lambda function. A lambda function is a temporary \n\t\t# function that exists only during the time it is called. \n\t\t# After that, it is gone. Lambda functions are often used in \n\t\t# locations like this, where a very short function is wanted \n\t\t# for one specific purpose. This double click event essentially\n\t\t# allows us to just double click on an item instead of using\n\t\t# the \"select category\" button to select it. For convenience,\n\t\t# we leave the button in as well. Unfortunately, there does\n\t\t# not seem to be a single click event, which would make using\n\t\t# this app a bit easier.\n\t\tcat_box.bind(\"\", lambda x:self.cat_select()) \n\t\tcat_box.pack(side=TOP, fill=Y,expand=1)\n\t\t\n\t\t# set up input boxes\n\t\t# Most of the remaining code in the __init__ function is similar\n\t\t# to the code we have already seen, so it requires significantly\n\t\t# less documentation. \n\t\t\n\t\tmain_frame = ttk.Frame(page_1_frame)\n\t\tmain_frame.pack(fill=BOTH,expand=1)\n\t\t\n\t\t# An Entry is a 1-line input box that defaults to 20 characters\n\t\t# long. Fortunately, we want exactly 20 characters for these.\n\t\t# A Text is similar to an Entry, except it is potentially \n\t\t# 2-dimensional, and so requires a different sort of indexing.\n\t\t# Otherwise, they are about the same in functionality. \n\t\ttitle_frame = ttk.Frame(main_frame)\n\t\ttitle_frame.pack(fill=X)\n\t\ttitle_label = ttk.Label(title_frame, text=\"Question\", font=self.default_font)\n\t\ttitle_label.pack(side=LEFT,pady=3,padx=3)\n\t\ttitle_entry = Text(title_frame, font=self.default_font, height=5, wrap='word')\n\t\ttitle_entry.pack(side=RIGHT)\n\t\t\n\t\tchange_cat_frame = ttk.Frame(main_frame)\n\t\tchange_cat_frame.pack(fill=X)\n\t\tchange_cat_label = ttk.Label(change_cat_frame, text=\"Category\", font=self.default_font)\n\t\tchange_cat_label.pack(side=LEFT,pady=3,padx=3)\n\t\tchange_cat_entry = ttk.Entry(change_cat_frame, font=self.default_font)\n\t\tchange_cat_entry.pack(side=RIGHT)\n\t\t\n\t\tcor_ans_frame = ttk.Frame(main_frame)\n\t\tcor_ans_frame.pack(fill=X)\n\t\tcor_ans_label = ttk.Label(cor_ans_frame, text=\"Correct Answer\", font=self.default_font)\n\t\tcor_ans_label.pack(side=LEFT,pady=3,padx=3)\n\t\tcor_ans_entry = ttk.Entry(cor_ans_frame, font=self.default_font)\n\t\tcor_ans_entry.pack(side=RIGHT)\n\t\t\n\t\t# For the alternate answers we need another container frame\n\t\t# and then two smaller frames.\n\t\talt_ans_frame = ttk.Frame(main_frame)\n\t\talt_ans_frame.pack(fill=X)\n\t\t\n\t\talt_ans_left_frame = ttk.Frame(alt_ans_frame)\n\t\talt_ans_left_frame.pack(side=LEFT, fill=Y)\n\t\talt_ans_label = ttk.Label(alt_ans_left_frame, text=\"Alternate Answers\", font=self.default_font)\n\t\talt_ans_label.pack(side=TOP,pady=3,padx=3)\n\t\t# Change alt_ans_warning_label to say \"Must have either 2 or 4 answers\" \n\t\t# if there are 1 or 3 answers including the alternates.\n\t\talt_ans_warning_string = StringVar()\n\t\talt_ans_warning_label = ttk.Label(alt_ans_left_frame, textvariable=alt_ans_warning_string, font=self.default_font)\n\t\talt_ans_warning_label.config(foreground=\"red\")\n\t\talt_ans_warning_label.pack(pady=3,padx=3,side=TOP)\n\t\t\n\t\talt_ans_right_frame = ttk.Frame(alt_ans_frame)\n\t\talt_ans_right_frame.pack(side=RIGHT)\n\t\talt_ans_1_entry = ttk.Entry(alt_ans_right_frame, font=self.default_font)\n\t\talt_ans_1_entry.pack(pady=3)\n\t\talt_ans_2_entry = ttk.Entry(alt_ans_right_frame, font=self.default_font)\n\t\talt_ans_2_entry.pack(pady=3)\n\t\talt_ans_3_entry = ttk.Entry(alt_ans_right_frame, font=self.default_font)\n\t\talt_ans_3_entry.pack(pady=3)\n\t\t\n\t\t# set up frame for buttons \n\t\tbut_frame = ttk.Frame(main_frame)\n\t\tbut_frame.pack(fill=X)\n\t\t\t\n\t\t# There is plenty of code here for a few extra buttons that we\n\t\t# removed. Initially, there was a \"save all\" button. Nothing\n\t\t# was saved to the file until the save all button was clicked.\n\t\t# However, it was requested that we remove that, so it is no \n\t\t# longer there. Its callback function is called everywhere \n\t\t# data changes so data is always saved. There were also undo\n\t\t# buttons, but since data is saved automatically, undo seemed \n\t\t# a bit confusing, so those buttons were removed. We still have\n\t\t# the select question, save question, new question, and delete\n\t\t# question buttons.\n\t\tload_quest_button = ttk.Button(but_frame, text=\"Select Question\", command=self.quest_select)\n\t\tload_quest_button.pack(side=LEFT)\n\t\tsave_button = ttk.Button(but_frame, text=\"Save Question\", command=self.save_question)\n\t\t#save_button.config(font=self.default_font)\n\t\tsave_button.pack(side=LEFT)\n\t\t#undo_button = ttk.Button(but_frame, text=\"Undo Change\", command=self.undo_change)\n\t\t#undo_button.pack(side=LEFT)\n\t\t#undo_all_button = ttk.Button(but_frame, text=\"Undo All Changes\", command=self.undo_all_changes)\n\t\t#undo_all_button.config(font=self.default_font)\n\t\t#undo_all_button.pack(side=LEFT)\n\t\tnew_button = ttk.Button(but_frame, text=\"New Question\", command=self.new_question)\n\t\tnew_button.pack(side=LEFT)\n\t\tdel_button = ttk.Button(but_frame, text=\"Delete Question\", command=self.delete_question_func)\n\t\tdel_button.pack(side=LEFT)\n\t\t#save_button = ttk.Button(but_frame, text=\"Save\", command=self.save_all)\n\t\t#save_button.pack(side=LEFT)\n\t\t\n\t\t# set up frame for question selection\n\t\tquest_frame = ttk.Frame(main_frame)\n\t\tquest_frame.pack(side=LEFT,fill=BOTH,expand=1)\n\t\t\n\t\tquest_label = ttk.Label(quest_frame, text=\"Questions\", font=self.default_font)\n\t\tquest_label.pack(pady=3,padx=3)\n\t\tquest_scroll = Scrollbar(quest_frame)\n\t\tquest_scroll.pack(fill=Y, side=RIGHT)\n\t\t\n\t\tquest_box = Listbox(quest_frame, font=self.default_font, yscrollcommand=quest_scroll.set)\n\t\tquest_scroll.config(command=quest_box.yview)\n\t\tquest_box.bind(\"\", lambda x:self.quest_select()) \t\n\t\t\n\t\tquest_box.pack(fill=BOTH, expand=1)\n\t\t\n\t\t# work on choose categories tab\n\t\tchoose_cat_title_frame = ttk.Frame(page_2_frame)\n\t\tchoose_cat_title_frame.pack(side=TOP)# fill=X)\n\t\t\n\t\tchoose_cat_title = ttk.Label(choose_cat_title_frame, text=\"Select Categories\", font=self.default_font)\n\t\tchoose_cat_title.pack(side=TOP)\n\t\t\n\t\tdefault_cat_frame = ttk.Frame(page_2_frame)\n\t\tdefault_cat_frame.pack(side=LEFT, fill=Y)\n\t\t\n\t\tdefault_cat_buffer_frame = ttk.Frame(default_cat_frame)\n\t\tdefault_cat_buffer_frame.pack(side=TOP)\n\t\t\n\t\tdefault_cat_title = ttk.Label(default_cat_buffer_frame, text=\"Default Categories\", font=self.default_font)\n\t\tdefault_cat_title.pack(side=TOP)\n\t\t\n\t\tuser_cat_master_frame = ttk.Frame(page_2_frame)\n\t\tuser_cat_master_frame.pack(fill=BOTH, expand=1)\n\t\tuser_cat_title = ttk.Label(user_cat_master_frame, text=\"Custom Categories\", font=self.default_font)\n\t\tuser_cat_title.pack(side=TOP)\n\t\t\n\t\tuser_cat_scroll_window = tix.ScrolledWindow(user_cat_master_frame, scrollbar=\"x\")\n\t\tuser_cat_scroll_window.pack(fill=BOTH, expand=1)\n\t\t\n\t\tuser_cat_frame = user_cat_scroll_window.window\n\t\t\n\t\tbuiltin_cat_checkboxes = {}\n\t\t\n\t\t#choose_cat_frame = ttk.Frame(page_2_frame)\n\t\t#choose_cat_frame.pack(side=TOP)#, fill=X)\n\n\t\t# Give all the frames borders for debugging purposes.\n\t\t# Uncomment these lines to see where the various \n\t\t# frames are. Do not leave it uncommented, because\n\t\t# the app looks quite ugly with frames.\n\t\t#default_cat_frame.config(relief=SUNKEN)\n\t\t#choose_cat_title_frame.config(relief=SUNKEN)\n\t\t#user_cat_frame.config(relief=SUNKEN)\n\t\t#cat_frame.config(relief=SUNKEN)\n\t\t#main_frame.config(relief=SUNKEN)\n\t\t#title_frame.config(relief=SUNKEN)\n\t\t#change_cat_frame.config(relief=SUNKEN)\n\t\t#cor_ans_frame.config(relief=SUNKEN)\n\t\t#alt_ans_frame.config(relief=SUNKEN)\n\t\t#alt_ans_left_frame.config(relief=SUNKEN)\n\t\t#alt_ans_right_frame.config(relief=SUNKEN)\n\t\t#but_frame.config(relief=SUNKEN)\n\t\t\n\t\t# When the __init__ function ends, most of the variables\n\t\t# expire because they are local to the function. Of \n\t\t# course, they still exist because root eventually owns\n\t\t# them (or their parents, which in turn own them), but\n\t\t# we need a way to reference them. Assign them to \n\t\t# class variable versions for later access.\n\t\tself.root = root\n\t\tself.cat_box = cat_box\n\t\tself.save_button = save_button\n\t\t#self.undo_button = undo_button\n\t\t#self.undo_all_button = undo_all_button\n\t\tself.title_entry = title_entry\n\t\tself.change_cat_entry = change_cat_entry\n\t\tself.cor_ans_entry = cor_ans_entry\n\t\tself.alt_ans_warning_label = alt_ans_warning_label\n\t\tself.alt_ans_warning_string = alt_ans_warning_string\n\t\tself.alt_ans_1_entry = alt_ans_1_entry\n\t\tself.alt_ans_2_entry = alt_ans_2_entry\n\t\tself.alt_ans_3_entry = alt_ans_3_entry\n\t\tself.quest_box = quest_box\n\t\tself.builtin_cat_checkboxes = builtin_cat_checkboxes\n\t\tself.default_cat_buffer_frame = default_cat_buffer_frame\n\t\tself.user_cat_frame = user_cat_frame\n\t\tself.page_2_frame = page_2_frame\n\t\t\n\t\t# We need a few more variables that do not come from \n\t\t# widgets. Create some class variables and assign them\n\t\t# default values. \n\t\tself.cat_selected = \"\"\n\t\tself.quest_selected = \"\"\n\t\tself.font_color=\"blue\"\n\t\tself.builtin_categories = {}\n\t\tself.user_categories = {}\n\t\tself.user_cat_frames = []\n\t\t\n\t\t# Set up some styles used in the choose categories frame.\n\t\t# These can have any name, as long as the name ends in \n\t\t# \".TCheckbutton\" so the styles inherit the values from\n\t\t# the default TCheckbutton style. All we want here is \n\t\t# the background colors to be different. \n\t\tsty = ttk.Style()\n\t\tself.sty_1 = \"Emergency1.TCheckbutton\"\n\t\tself.sty_2 = \"Emergency0.TCheckbutton\"\n\t\tsty.configure(\"Emergency1.TCheckbutton\", background=\"white\")\n\t\tsty.configure(\"Emergency0.TCheckbutton\", background=\"#F0F0F0\")\n\t\t\n\t\tself.fill_defaults()\n\t\t\n\t\t# Load the questions.txt file.\t\t\n\t\tself.fname = \"questions.txt\"\n\t\tself.load_from_file(self.fname)\n\n\t\t# Initialize all the widgets. These functions \n\t\t# automatically use the values loaded from the \n\t\t# questions.txt file.\n\t\tself.init_categories()\n\t\tself.init_default_checkboxes()\n\t\tself.init_user_checkboxes()\n\t\t\n\t\t# The mainloop is like a more standard app's main\n\t\t# function, and keeps the program running until \n\t\t# the user exits it or it somehow crashes.\n\t\troot.mainloop()\n\t\t\n\t# init_default_checkboxes is used to fill (or refill) the \n\t# checkboxes for the default categories according to \n\t# the configuration file. This is called in the initial\n\t# setup, but we do not need to call it anywhere else\n\t# because the default categories will never change.\n\tdef init_default_checkboxes(self):\n\t\tfor cat in sorted(self.builtin_categories):\n\t\t\ttmp_frame = ttk.Frame(self.default_cat_buffer_frame)\n\t\t\ttmp_frame.pack(side=TOP, fill=X)#, expand=1)\n\t\t\t#tmp_frame.config(relief=SUNKEN)\n\t\t\tb = ttk.Checkbutton(tmp_frame, text=cat, var=self.builtin_categories[cat])\n\t\t\tself.builtin_cat_checkboxes[cat] = b\n\t\t\tb.pack(side=LEFT)\n\t\t\n\t# init_user_checkboxes is called during the initial\n\t# setup and when a category is added or deleted. Much\n\t# like init_default_checkboxes, it fills the checkboxes\n\t# for all user-defined categories. \n\tdef init_user_checkboxes(self):\n\t\t## Some test code to automatically generate 400 \n\t\t## categories named \"category 1\", \"category 2\", \n\t\t## etc. This is only for testing purposes, but\n\t\t## is quite useful for testing large numbers of\n\t\t## categories. \n\t\t#for i in range(400):\n\t\t\t#x = \"%6d\"%i\n\t\t\t#self.questions[\"category \" + x] = {}\n\t\t\t#self.questions[\"category \" + x][\"question \" + str(i)] = [str(i), str(i), str(i), str(i)]\n\t\t#self.user_cat_frame.destroy()\n\t\t##self.user_cat_frame = ttk.Frame(self.page_2_frame)\n\t\t#user_cat_title = ttk.Label(self.user_cat_frame, text=\"Custom Categories\", font=self.default_font)\n\t\t#user_cat_title.pack(side=TOP)\n\t\tprint(\"init user checkboxes\")\n\t\t\n\t\t# Clear all sub-frames. The first time this function\n\t\t# is called, this does nothing. Every other time, it \n\t\t# removes all existing checkboxes so we can put them\n\t\t# back. This might seem counter-intuitive, but we \n\t\t# want all old versions out of our way so we can\n\t\t# put them all in in the right order.\n\t\tfor frame in self.user_cat_frames:\n\t\t\tframe.destroy()\n\t\t\n\t\t# Destroying a frame does not remove the reference to\n\t\t# it, so self.user_cat_frames potentially contains a\n\t\t# bunch of dead references to frames. Remove these\n\t\t# references by setting the list to a blank list.\n\t\t# The garbage collector will take care of the rest\n\t\t# for us.\n\t\tself.user_cat_frames = []\n\t\t\n\t\t# Make sure we have a BooleanVar to keep track of all\n\t\t# our categories. The only time this should do anything\n\t\t# is if there is a new category. This new category\n\t\t# should default to False. We use a BooleanVar because\n\t\t# those are easy to trace with the callback function\n\t\t# trace_func. This is called every time the variable\n\t\t# changes (or is \"w\"ritten to), which happens every time\n\t\t# the checkbox is clicked. \n\t\tfor cat in self.questions:\n\t\t\tif cat not in self.user_categories:\n\t\t\t\tself.user_categories[cat] = BooleanVar(value=False)\n\t\t\t\tself.user_categories[cat].trace(callback=self.trace_func, mode=\"w\")\n\t\t\n\t\t# Experimentation showed that the best number of \n\t\t# categories to display per column is 25. Theoretically,\n\t\t# this should be more dynamic based on the size of the\n\t\t# font, but for the current version 25 is nice.\n\t\tCATEGORIES_PER_PAGE = 25\n\t\ti = 0\n\t\tf = None\n\t\tmod = 1\n\t\t\n\t\t# Iterate through every category. We want to keep them\n\t\t# sorted so large lists of categories are much easier\n\t\t# for the user to read. Otherwise, due to the nature\n\t\t# of dicts, the checkboxes would appear in a completely\n\t\t# random order each time.\n\t\tfor cat in sorted(self.user_categories.keys()):\n\t\t\t# The mod variable is used to alternate colors in\n\t\t\t# the custom category display. This line switches\n\t\t\t# it from 1 to 0 or 0 to 1.\n\t\t\tmod = (mod + 1) % 2\n\t\t\t\n\t\t\t# We already made a BooleanVar to keep track of all\n\t\t\t# new categories. Now get rid of all extraneous\n\t\t\t# BooleanVars for categories that no longer exist.\n\t\t\tif cat not in self.questions:\n\t\t\t\tdel self.user_categories[cat]\n\t\t\t\tprint(\"should be deleting %s from self.user_categories\"%cat)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# i keeps track of the number of checkboxes we have\n\t\t\t# put on one column. If that matches \n\t\t\t# CATEGORIES_PER_PAGE, reset it to 0 and make a new\n\t\t\t# Frame to put checkboxes on. Pack the new Frame\n\t\t\t# and append it to self.user_cat_frames. Also,\n\t\t\t# create and pack a small Label to act as a border\n\t\t\t# between two columns.\n\t\t\tif i%CATEGORIES_PER_PAGE == 0:\n\t\t\t\ti = 0\n\t\t\t\tmod = 0\n\t\t\t\tf = ttk.Frame(self.user_cat_frame)\n\t\t\t\tself.user_cat_frames.append(f)\n\t\t\t\tf.pack(side=LEFT, fill=Y)\n\t\t\t\t#f.config(relief=SUNKEN)\n\t\t\t\ttmp_frm = ttk.Frame(self.user_cat_frame)\n\t\t\t\ttmp_frm.pack(side=LEFT)\n\t\t\t\ttmp_lbl = Label(tmp_frm, text=\" \")\n\t\t\t\ttmp_lbl.pack()\n\t\t\t\t\n\t\t\t# Make a new small Frame to put our new Checkbutton\n\t\t\t# into. Create the new Checkbutton and give it the\n\t\t\t# appropriate BooleanVar from self.user_categories.\n\t\t\ttmp_frame = ttk.Frame(f)\n\t\t\ttmp_frame.pack(side=TOP, fill=X)\n\t\t\t#tmp_frame.config(relief=SUNKEN)\n\t\t\tb = ttk.Checkbutton(tmp_frame, text=cat, var=self.user_categories[cat])\n\t\t\t\n\t\t\t# If mod is 0, we want to use one background for\n\t\t\t# checkbuttons. Otherwise, use the other background.\n\t\t\tif (mod == 0):\n\t\t\t\tb.config(style=\"Emergency1.TCheckbutton\")\n\t\t\telse:\n\t\t\t\tb.config(style=\"Emergency0.TCheckbutton\")\n\t\t\tb.pack(side=LEFT)\n\t\t\tLabel(tmp_frame, text=\" \").pack(side=LEFT)\n\t\t\t\t\n\t\t\ti += 1 #\n\t\t\t\n\t# fill_defaults is a simple test function to fill a few examples\n\t# in the category and question listboxes for display testing.\n\t# It should never be used in production.\n\tdef fill_defaults(self):\n\t\tprint(\"filling defaults\")\n\t\tfor word in sorted([\"English\", \"Math\", \"History\", \"Geography\"]):\n\t\t\tself.cat_box.insert(END, word)\n\t\tfor i in range(20):\n\t\t\tfor word in [\"1. 10x+3\", \"2. 52/3\", \"3. 5*5\"]:\n\t\t\t\tself.quest_box.insert(END, word)\n\t\t\t\t\n\t# This function clears and then re-fills the category Listbox\n\t# in addition to clearing the question Listbox and all \n\t# text entry forms.\n\tdef init_categories(self):\n\t\tprint(\"init categories\")\n\t\tself.cat_box.delete(0, END) # clear the category box\n\t\tself.quest_box.delete(0,END) # clear the question box\n\t\t# clear all the entry boxes\n\t\tself.cor_ans_entry.delete(0,END)\n\t\tself.alt_ans_1_entry.delete(0,END)\n\t\tself.alt_ans_2_entry.delete(0,END)\n\t\tself.alt_ans_3_entry.delete(0,END)\n\t\tself.title_entry.delete(0.0,END)\n\t\tself.change_cat_entry.delete(0,END)\n\t\t\n\t\tfor key in sorted(self.questions.keys()): # populate the category box with the categories\n\t\t\tself.cat_box.insert(END, key)\n\t\n\t# This function clears the question Listbox and sets the \n\t# currently selected question to an empty string.\n\tdef reset_questions(self):\n\t\tprint(\"reset questions\")\n\t\tself.quest_box.delete(0, END)\n\t\tself.quest_selected = \"\"\n\t\t\t\n\t# cat_select is the callback function for the select category\n\t# button. \n\tdef cat_select(self):\n\t\tprint(\"cat select\")\n\t\t# Make sure the user has actually selected something\n\t\t# in the Listbox. If not, notify them.\n\t\tif len(self.cat_box.curselection()) == 0:\n\t\t\tif self.cat_selected == \"\":\n\t\t\t\tself.alt_ans_warning_string.set(\"Please click on\\na category and click\\nSelect Category.\")\n\t\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\t\treturn\t\t\t\n\t\t# Listboxes are a bit counter-intuitive. Rather than\n\t\t# giving us the string the user has selected, it gives\n\t\t# us a tuple containing a string representing the index \n\t\t# (for example, \"1\") of the item they have selected.\n\t\t# If multiple selections are enabled (in this case, they\n\t\t# are not) the tuple contains the indices of each \n\t\t# selected item. We can then use that index in the\n\t\t# Listbox's get function to get the selected string.\n\t\tselect_index = self.cat_box.curselection()[0]\n\t\tcategory = self.cat_box.get(select_index)\n\t\tself.cat_selected = category\n\t\tself.fill_quest_box(category)\n\t\tself.reset_display()\n\t\t# When the user selects a new category, reset the \n\t\t# selected question to a null string.\n\t\tself.quest_selected = \"\"\n\t\n\t# cat_delete is the callback function for the delete \n\t# category button. \n\tdef cat_delete(self):\n\t\tprint(\"cat delete\")\n\t\t# messagebox.askyesno is a nifty function that displays\n\t\t# a dialog that prompts the user to click \"yes\" or \"no\". \n\t\t# It returns True if the user clicks yes and False \n\t\t# otherwise. \n\t\tprompt = messagebox.askyesno(\"Delete Category?\", \"Are you sure you want to delete this category?\\nAll questions in this category will be deleted!\\nCategory: \" + self.cat_selected)\n\t\tif prompt == True:\n\t\t\tdel self.questions[self.cat_selected]\n\t\t\t# Display a message saying which category the\n\t\t\t# user is deleting. Only display the first 20\n\t\t\t# characters of the category for space concerns.\n\t\t\tself.alt_ans_warning_string.set(\"Deleted category\\n%s\"%(self.cat_selected[:20]))\n\t\t\tself.alt_ans_warning_label.config(foreground=\"blue\")\n\t\t\t# If we delete the selected category, remove the \n\t\t\t# reference to its name here. Set it to a null\n\t\t\t# string.\n\t\t\tself.cat_selected = \"\"\n\t\t\tself.init_categories()\n\t\t\tself.init_user_checkboxes()\n\t\t\tself.save_all()\n\t\n\t# This function reads all questions in a specific category\n\t# and inserts them into the question box. \n\tdef fill_quest_box(self, category):\n\t\tprint(\"fill question box\")\n\t\tself.reset_questions()\n\t\t# Make sure it is a valid category. If for some reason\n\t\t# the category is not valid, do nothing.\n\t\tif category in self.questions:\n\t\t\tfor question in sorted(self.questions[category].keys()):\n\t\t\t\tself.quest_box.insert(END, question)\n\t\n\t# reset_display removes all inputted text from the text boxes.\n\tdef reset_display(self):\n\t\tprint(\"reset display\")\n\t\tself.title_entry.delete(0.0, END)\n\t\tself.change_cat_entry.delete(0,END)\n\t\tself.cor_ans_entry.delete(0,END)\n\t\tself.alt_ans_1_entry.delete(0,END)\n\t\tself.alt_ans_2_entry.delete(0,END)\n\t\tself.alt_ans_3_entry.delete(0,END)\n\t\n\t# quest_select is the callback function for the Select\n\t# Question button. \n\tdef quest_select(self, q=None):\n\t\tprint(\"question select\")\n\t\tself.reset_display()\n\t\tquestion = q\n\t\t# If no question is specified, make sure there is a \n\t\t# selected category. If not, display an informative\n\t\t# message and return. If there is a selected \n\t\t# category, check if there is a selected question.\n\t\t# If so, get it and set the selected question to that.\n\t\t# If not, display an informative message and return.\n\t\tif q == None:\n\t\t\tif self.cat_selected == \"\":\n\t\t\t\tself.alt_ans_warning_string.set(\"Please click on\\na category and click\\nSelect Category.\")\n\t\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\t\treturn\n\t\t\tif len(self.quest_box.curselection()) == 0:\n\t\t\t\tself.alt_ans_warning_string.set(\"Click on a question\\nbelow and then click\\nSelect Question.\")\n\t\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\t\treturn\n\t\t\tquestion = self.quest_box.get(self.quest_box.curselection()[0])\n\t\t\tself.quest_selected = question\n\t\t\n\t\tcategory = self.cat_selected\n\t\t\n\t\tprint(\"Category: \" + category + \" Question: \" + question)\n\t\tanswers = self.questions[category][question]\n\t\tself.fill_answers(question, category, answers)\n\t\n\t# fill_answers fills the text boxes with the information\n\t# (answers, category, and question) of the specified\n\t# category and question.\n\tdef fill_answers(self, question, category, answers):\n\t\tprint(\"fill answers\")\n\t\tself.title_entry.insert(END, question)\n\t\tself.change_cat_entry.insert(END, category)\n\t\tself.cor_ans_entry.insert(END, answers[0])\n\t\tself.alt_ans_1_entry.insert(END, answers[1])\n\t\tif len(answers) > 2:\n\t\t\tself.alt_ans_2_entry.insert(END, answers[2])\n\t\t\tself.alt_ans_3_entry.insert(END, answers[3])\n\t\n\t# save_question is the callback function for the save\n\t# question button. \n\tdef save_question(self):\n\t\tprint(\"save question\")\n\t\tquestion = self.title_entry.get(0.0, END).strip()\n\t\tcategory = self.change_cat_entry.get()\n\t\tprint(\"(question, category)\", (question, category))\n\t\tanswers = [self.cor_ans_entry.get(), self.alt_ans_1_entry.get(), \n\t\t self.alt_ans_2_entry.get(), self.alt_ans_3_entry.get()]\n\t\tanswers = [i for i in answers if i != ''] # clear all blank answers\n\t\t\n\t\t# Check to make sure the user has inputted the\n\t\t# necessary data. Display appropriate messages\n\t\t# otherwise.\n\t\tif question == \"\":\n\t\t\tprint(\"blank question. Setting warning\")\n\t\t\tself.alt_ans_warning_string.set(\"Must enter a \\nquestion \\nto save.\")\n\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\treturn\n\t\telif category == \"\":\n\t\t\tprint(\"blank category. Setting warning\")\n\t\t\tself.alt_ans_warning_string.set(\"Must enter a \\ncategory\\nto save.\")\n\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\treturn\n\t\telif len(answers) == 3 or len(answers) < 2: # 0, 1, or 3 answers; must have 2 or 4\n\t\t\tprint(\"invalid number of answers: \" + str(answers))\n\t\t\tself.alt_ans_warning_string.set(\"Must enter 2 or 4\\nanswers to save.\")\n\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\treturn\n\t\t# If the previous if statements were all false,\n\t\t# continue to saving the question. Update the\n\t\t# question if it is editing an existing question,\n\t\t# create a new one if it is new, or change the \n\t\t# category if that is necessary.\n\t\telif self.cat_selected == \"\" or self.quest_selected == \"\":\n\t\t\t# check if the category or question name has not been selected.\n\t\t\t# If it has not, that means the question is new.\t\n\t\t\tprint(\"either category or question not selected. (category, question):\", (self.cat_selected, self.quest_selected))\n\t\t\tself.insert_question(question, category, answers)\n\t\t\tself.init_categories()\n\t\t\tself.alt_ans_warning_string.set(\"New question saved!\")\n\t\t\tself.alt_ans_warning_label.config(foreground=self.font_color)\n\t\telif self.cat_selected == category and self.quest_selected == question:\n\t\t\t# we just have to change the answer\n\t\t\tprint(\"just changing answer\")\n\t\t\tself.insert_question(question, category, answers)\n\t\t\tself.alt_ans_warning_string.set(\"Answers changed!\")\n\t\t\tself.alt_ans_warning_label.config(foreground=self.font_color)\n\t\telse:\n\t\t\tprint(\"deleting and re-inserting\")\n\t\t\tself.delete_question(self.quest_selected, self.cat_selected)\n\t\t\tself.quest_selected = question\n\t\t\tself.cat_selected = category\n\t\t\tself.insert_question(question, category, answers)\n\t\t\tself.alt_ans_warning_string.set(\"Question updated!\")\n\t\t\tself.alt_ans_warning_label.config(foreground=self.font_color)\n\t\t\n\t\tself.cat_selected = \"\"\n\t\tself.quest_selected = \"\"\n\t\tself.init_categories()\n\t\tself.save_all()\n\t\t\t\n\t# insert_question stores the question in the dict\n\t# in the proper format, such that \n\t# self.questions[category][question] == answers.\n\tdef insert_question(self, question, category, answers):\n\t\tprint(\"insert question\")\n\t\tif category not in self.questions:\n\t\t\tself.questions[category] = {}\n\t\tself.questions[category][question] = answers\n\t\tself.init_categories()\n\t\tself.fill_quest_box(category)\n\t\tself.fill_answers(question, category, answers)\n\t\tself.quest_selected = question\n\t\tself.cat_selected = category\n\t\t\n\t# undo_change is the callback function for the \n\t# undo change button. However, it is deprecated,\n\t# because questions save automatically. It simply\n\t# re-selects the selected question.\n\tdef undo_change(self):\n\t\tself.quest_select(q=self.quest_selected)\n\t\t\n\t# undo_all_changes is the callback function for the\n\t# undo all changes button. However, it is deprecated,\n\t# because questions save automatically. It simply\n\t# reloads the configuration file.\n\tdef undo_all_changes(self):\n\t\tself.alt_ans_warning_string.set(\"Reloading\\nQuestions!\")\n\t\tself.alt_ans_warning_label.config(foreground=self.font_color)\n\t\tself.load_from_file(self.fname)\n\t\tself.init_categories()\n\t\tself.quest_selected = \"\"\n\t\tself.cat_selected = \"\"\n\t\n\t# delete_question is a helper function to delete a \n\t# question.\n\tdef delete_question(self, question, category):\n\t\tprint(\"delete question\")\n\t\t# Remove the question from the category.\n\t\tdel self.questions[category][question]\n\t\t# If the category is now empty, delete the \n\t\t# category as well.\n\t\tif len(self.questions[category]) == 0:\n\t\t\tprint(\"deleting\")\n\t\t\tdel self.questions[category]\n\t\t\tself.init_categories()\n\t\t\tself.init_user_checkboxes()\n\t\t\n\t# delete_question_func is the callback function for\n\t# the delete question button. \n\tdef delete_question_func(self):\n\t\tif self.cat_selected == \"\":\n\t\t\tself.alt_ans_warning_string.set(\"Please click on\\na question and click\\nSelect Question.\")\n\t\t\tself.alt_ans_warning_label.config(foreground=\"red\")\n\t\t\treturn\t\t\t\n\t\tprompt = messagebox.askyesno(\"Delete Question?\", \"Are you sure you want to delete this question?\\nQuestion: \" + self.quest_selected)\n\t\t\n\t\tif prompt == True:\n\t\t\tself.delete_question(self.quest_selected, self.cat_selected)\n\t\t\tcategory = self.cat_selected\n\t\t\tself.init_categories()\n\t\t\tself.cat_selected = category\n\t\t\tself.fill_quest_box(category)\n\t\t\tself.save_all()\n\t\n\t# new_question is the callback function for the\n\t# new questino button. It resets all displays \n\t# and \n\tdef new_question(self):\n\t\tself.reset_display()\n\t\tself.quest_selected = \"\"\n\t\tself.cat_selected = \"\"\n\t\tself.alt_ans_warning_string.set(\"Starting\\na New\\nQuestion!\")\n\t\tself.alt_ans_warning_label.config(foreground=self.font_color)\n\n\t# save_all is the callback function for the save\n\t# all button. However, that use is deprecated because \n\t# this function is now called whenever information \n\t# changes. \n\tdef save_all(self, refresh=True):\n\t\tfout = open(self.fname, \"w\")\n\t\tself.print_questions(f=fout)\n\t\tfout.close()\n\t\tif refresh:\n\t\t\tself.init_categories()\n\t\t\tself.init_user_checkboxes()\n\t\n\t# load_from_file is called automatically in the __init__\n\t# function. \n\tdef load_from_file(self, fname): # right now the filename is \"sample.txt\"\n\t\tf = open(fname, \"r\")\n\t\t# Read the dumped json dict.\n\t\tret = json.load(f)\n\t\t# Split the dict into its various parts and\n\t\t# assign them to the appropriate variables. Also,\n\t\t# initialize some variables.\n\t\tself.questions = ret[\"questions\"]\n\t\tif \"default categories\" in ret:\n\t\t\tself.builtin_categories = ret[\"default categories\"]\n\t\t\tfor key in self.builtin_categories:\n\t\t\t\tself.builtin_categories[key] = BooleanVar(value=self.builtin_categories[key])\n\t\t\t\tself.builtin_categories[key].trace(callback=self.trace_func, mode=\"w\")\n\t\telse:\n\t\t\tself.builtin_categories = {}\n\t\t\tfor i in [\"binary to decimal\", \"decimal to binary\",\n\t\t\t \"addition\", \"subtraction\", \"multiplication\", \"division\"]: \n\t\t\t\tself.builtin_categories[i] = BooleanVar(value=False)\n\t\t\t\tself.builtin_categories[i].trace(callback=self.trace_func, mode=\"w\")\n\t\tif \"user categories\" in ret:\n\t\t\tself.user_categories = ret[\"user categories\"]\n\t\t\tfor key in self.user_categories:\n\t\t\t\tself.user_categories[key] = BooleanVar(value=self.user_categories[key])\n\t\telse:\n\t\t\tpass\n\t\tfor key in self.user_categories:\n\t\t\tself.user_categories[key].trace(\"w\", self.trace_func)\n\t\n\t# trace_func is a callback function called whenever the\n\t# BooleanVars (used to keep track of whether the user \n\t# wants specific categories to be displayed in the game)\n\t# are changed. \n\tdef trace_func(self, u, v, w):\n\t\t# Save all information to the file every time a \n\t\t# BooleanVar changes. Do not refresh.\n\t\tself.save_all(refresh=False)\n\t\tpass\n\t\n\t# print_questions is used to print questions to a file,\n\t# or, if a file is unspecified, to stdout.\n\tdef print_questions(self, f=stdout):\n\t\t# Create an empty dict to store all data.\n\t\tdic = {}\n\t\tdic[\"questions\"] = self.questions\n\t\tdic[\"default categories\"] = {}# = self.builtin_categories\n\t\tfor key in self.builtin_categories:\n\t\t\tdic[\"default categories\"][key] = self.builtin_categories[key].get()\n\t\tdic[\"user categories\"] = {}\n\t\tfor key in self.user_categories:\n\t\t\tdic[\"user categories\"][key] = self.user_categories[key].get()\n\t\t\t\n\t\t# Dump that dict using json to the specified\n\t\t# file. If the file is stdout, this will print\n\t\t# to stdout. Set indent to 2 for easy parsing.\n\t\tjson.dump(dic, f, indent=2)\n\n# Create the glee_config instance. Since the mainloop is \n# run from within the __init__ function, this both creates\n# and starts the app.\nx = glee_config()\n","sub_path":"config/glee_config.pyw","file_name":"glee_config.pyw","file_ext":"pyw","file_size_in_byte":42298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"390258461","text":"'''\n367. Valid Perfect Square\nGiven a positive integer num, write a function which returns True if num is a perfect square else False.\nNote: Do not use any built-in library function such as sqrt.\nExample 1:\nInput: 16\nOutput: true\n\nExample 2:\nInput: 14\nOutput: false\n'''\n\ndef LeetCode367(num):\n\n left=1\n right=num\n while left<=right:\n mid=(left+right)//2\n square=mid*mid\n if num==square:\n return True\n elif square>num:\n right=mid-1\n else:\n left=mid+1\n return False\n\ndef main():\n\n num=16\n print(LeetCode367(num))\n\n num = 81\n print(LeetCode367(num))\n\n num = 14\n print(LeetCode367(num))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/LeetCode367.py","file_name":"LeetCode367.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"45880931","text":"import numpy.random as rand\r\nfrom collections import OrderedDict as ODict\r\n\r\n\r\nclass Tr:\r\n def __init__(self, id):\r\n self.__init_obsl = 7200.0\r\n self.id = id\r\n self.time = 0.0\r\n self.next_block = 1\r\n self.curr_block = 0\r\n self.working = 0.0\r\n self.obsl = self.__init_obsl\r\n\r\n def __repr__(self):\r\n return \":%d, %.1f, %d, %d, %.1f, %.1f:\" % (self.id, self.time, self.curr_block, self.next_block, self.working, self.obsl)\r\n\r\n def show_attr(self):\r\n print([self.working, self.obsl])\r\n\r\n def gen_working(self): # 1st advance\r\n self.working = 400.0 + rand.exponential(200.0) # 400 = min, 400+200 = mean\r\n\r\n def init_obsl(self):\r\n self.obsl = self.__init_obsl\r\n\r\n def reduce_obsl(self):\r\n self.obsl = self.obsl - self.working\r\n\r\n def set_pos_adv1(self):\r\n self.curr_block = 5\r\n self.next_block = 6\r\n\r\n def set_pos_test_return(self):\r\n self.curr_block = 9\r\n self.next_block = 5\r\n\r\n def set_pos_fixing(self):\r\n self.curr_block = 13\r\n self.next_block = 14\r\n\r\n\r\nclass Fixer:\r\n # he has queue and takes transacts from queue\r\n def __init__(self):\r\n self.M = 40 # number of fixers\r\n self.Queue = []\r\n self.Processing = []\r\n self.OutputProcessing = []\r\n self.sum_q = 0.0\r\n self.sum_nal = 0.0\r\n\r\n def process(self, sys_time):\r\n self.OutputProcessing.clear()\r\n if len(self.Processing) < self.M:\r\n self.OutputProcessing.extend(self.Queue[0:self.M])\r\n del(self.Queue[0:self.M])\r\n for j in self.OutputProcessing:\r\n j.time = sys_time + Fixer.gen_processing()\r\n self.Processing.extend(self.OutputProcessing)\r\n # print(\"PROCESSING: \", self.Processing)\r\n # print(\"QUEUE: \", self.Queue)\r\n self.sum_q += len(self.Queue)\r\n self.sum_nal += len(self.Processing)/2\r\n return self.OutputProcessing\r\n\r\n def end_fixing(self, sys_time):\r\n for q in self.Processing:\r\n if q.time == sys_time:\r\n self.Processing.pop(0)\r\n\r\n @staticmethod\r\n def gen_processing():\r\n return 600.0 + rand.exponential(400.0) # 600 = min, 600+400 = mean\r\n\r\n\r\nfixing = Fixer()\r\nFEC = ODict()\r\nCEC = ODict()\r\nN = 40 # transact quantity\r\nItem = 0\r\nToRepair = 0\r\n# entry phase\r\nsys_time = 0.0\r\nFEC[sys_time] = []\r\nfor i in range(N):\r\n FEC[sys_time].append(Tr(i))\r\n# correction phase\r\nFEC = ODict(sorted(FEC.items()))\r\nsys_time = list(FEC.keys())[0]\r\nCEC[sys_time] = FEC.pop(sys_time)\r\n# look up phase\r\n# before \"WORK\" cycle\r\nfor i in CEC[sys_time]:\r\n i.gen_working()\r\n# in \"WORK\" cycle\r\nwhile sys_time < 86400:\r\n # first advance\r\n for i in CEC[sys_time]:\r\n i.time += i.working\r\n i.reduce_obsl()\r\n i.set_pos_adv1()\r\n FEC[i.time] = []\r\n FEC[i.time].append(i)\r\n CEC.pop(sys_time)\r\n FEC = ODict(sorted(FEC.items()))\r\n # new correction phase\r\n # after advance\r\n sys_time = list(FEC.keys())[0]\r\n CEC[sys_time] = FEC.pop(sys_time)\r\n index = 0\r\n for i in CEC[sys_time]:\r\n Item += 1\r\n i.set_pos_test_return()\r\n i.gen_working() # generate future working time\r\n if i.working >= i.obsl:\r\n i.set_pos_fixing()\r\n CEC.pop(sys_time)\r\n ToRepair += 1\r\n i.init_obsl()\r\n fixing.Queue.append(i)\r\n processed = []\r\n processed.extend(fixing.process(sys_time))\r\n for j in processed:\r\n if j:\r\n FEC[j.time] = []\r\n FEC[j.time].append(j)\r\n FEC = ODict(sorted(FEC.items()))\r\n sys_time = list(FEC.keys())[0]\r\n CEC[sys_time] = FEC.pop(sys_time)\r\n index += 1\r\n fixing.end_fixing(sys_time)\r\nprint(\"repair\", ToRepair)\r\nprint(\"Items = \", Item)\r\nprint(\"sum_q = \", fixing.sum_q)\r\nprint(\"mean q = \", fixing.sum_q/ToRepair)\r\nprint(\"mean nal = \", fixing.sum_nal/ToRepair)\r\nprint(\"Koef nal = \", fixing.sum_nal/ToRepair/fixing.M)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494381466","text":"import sys\nimport math\n\n\ncliLineInputNumber = 1\nfileInputNumber = 2\n\n\ndef convert_strings_into_floats(numbers):\n convertedNumbers = []\n for entry in numbers:\n try:\n convertedNumbers.append(float(entry))\n except ValueError:\n # catching inputs other than floats\n print(f\"{entry} can't be converted to a float\")\n return convertedNumbers\n\n\ndef get_number_input(option):\n floatingInputNumbers = []\n if (option == cliLineInputNumber):\n print(\"Please enter floating numbers:\")\n temp = sys.stdin.readline().split()\n floatingInputNumbers = convert_strings_into_floats(temp)\n elif (option == fileInputNumber):\n f = open(sys.argv[2], \"r\")\n temp = f.readline().split()\n f.close()\n\n floatingInputNumbers = convert_string_sinto_floats(temp)\n else:\n print(\"Argument doesn't match a floating number input option!\\n\")\n sys.exit(1)\n return floatingInputNumbers\n\n\n# adding all values up and dividing by the total number\ndef calculate_mean(numbers):\n temp = 0.0\n for number in numbers:\n temp += number\n return temp / len(numbers)\n\n\n# sort values, count total numbers,\n# if odd numbers, add mid and to the right and divide\ndef calculate_median(numbers):\n numbers = sorted(numbers)\n size = len(numbers)\n mid = len(numbers) // 2\n if (size % 2 == 0):\n return (numbers[mid] + numbers[mid - 1]) / 2.0\n else:\n return numbers[mid]\n\n\n# get mean, adds values up minus the mean to the power of 2\n# then the sqr root of the deviation divided by the size is taken\ndef calculate_deviation(numbers):\n size = len(numbers)\n if (size <= 1):\n return null\n mean = calculate_mean(numbers)\n deviation = 0.0\n for number in numbers:\n deviation += (number - mean)**2\n deviation = math.sqrt(deviation / float(size - 1))\n return deviation\n\n\nif __name__ == \"__main__\":\n nums = get_number_input(int(sys.argv[1]))\n mean = calculate_mean(nums)\n median = calculate_median(nums)\n deviation = calculate_deviation(nums)\n\n print(f\"\"\"\n Floating Numbers: {nums}\n Mean: {mean}\n Median: {median}\n Standard Deviation: {deviation}\"\"\")\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"471736802","text":"import gensim\nimport logging\nimport traceback\n\nimport definitions\nfrom wsdm.ts.helpers.persons import persons\nfrom wsdm.ts.helpers.nationalities import nationalities\nfrom wsdm.ts.helpers.professions import professions\n\nfrom definitions import WORD2VEC_MODEL_PATH\n\nNATIONALITY_MULTIPLIER = 5\nPROFESSION_MULTIPLIER = 3\nmodel = None\n\n\ndef custom_similarity(similarity, multiplier=1):\n result = similarity * 7\n result *= multiplier\n if result > 7:\n result = 7\n return result\n\n\ndef load_module():\n global model\n model = gensim.models.Word2Vec.load(WORD2VEC_MODEL_PATH)\n\n\ndef find_profession_similarity(person_name, profession):\n global model\n\n person_name = persons.remove_spaces(person_name)\n profession_words = professions.get_similarity_words(profession)\n result = 0\n total_count = 0\n for word in profession_words:\n try:\n result += abs(model.similarity(person_name.lower(), word.lower()))\n total_count += 1\n except Exception as e:\n # logging.error(traceback.format_exc())\n return definitions.DEFAULT_SIMILARITY\n\n result /= total_count\n return custom_similarity(result, PROFESSION_MULTIPLIER)\n\n\ndef find_nationality_similarity(person_name, nationality):\n global model\n\n person_name = persons.remove_spaces(person_name)\n nationality = nationalities.remove_spaces(nationality)\n try:\n return custom_similarity(abs(model.similarity(person_name.lower(), nationality.lower())), NATIONALITY_MULTIPLIER)\n except Exception as e:\n # logging.error(traceback.format_exc())\n return definitions.DEFAULT_SIMILARITY\n\ndef find_similarity(person_name, term, inputType):\n if inputType == definitions.TYPE_NATIONALITY:\n return find_nationality_similarity(person_name, term)\n elif inputType == definitions.TYPE_PROFESSION:\n return find_profession_similarity(person_name, term)\n else:\n raise TypeError\n","sub_path":"src/wsdm/ts/features/word2VecFeature.py","file_name":"word2VecFeature.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"130166598","text":"def merge_sort(array):\n if len(array) > 2:\n a = array[:len(array)//2]\n b = array[len(array)//2:]\n c = merge(merge_sort(a), merge_sort(b))\n return c\n elif len(array) == 1:\n return array\n else:\n if array[0] > array[1]:\n return array[::-1]\n else:\n return array\n\n\ndef merge(a, b):\n c = []\n i,j = 0,0\n while i < len(a) and j < len(b):\n if a[i] > b[j]:\n c.append(b[j])\n j += 1\n else:\n c.append(a[i])\n i += 1\n if i < len(a):\n c.extend(a[i:])\n\n if j < len(b):\n c.extend(b[j:])\n \n return c\n\nif __name__ == \"__main__\":\n array = input(\"Enter array to be sorted: \").strip().split()\n try:\n array = [int(el) for el in array]\n except:\n raise ValueError(\"All elements of array must be integers.\")\n print(f'Before sorting: {array}')\n print(f'Merge sorted: {merge_sort(array)}')","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"6523289","text":"import sqlite3\n\n\nclass DatabaseManager:\n\n def __init__(self, path):\n self.path = path\n\n def __enter__(self):\n self.conn = sqlite3.connect(self.path)\n self.cursor = self.conn.cursor()\n return self.cursor\n\n def __exit__(self, *args):\n self.cursor.close()\n\n\ndef show_categories():\n categories = []\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT categories.category_name FROM categories\n \"\"\")\n for c in result:\n categories.append(*c)\n return categories\n\n\ndef show_products_by_id_category(id_category):\n products_by_category = []\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT my_products.product_name FROM my_products\n WHERE my_products.id_category = ? AND my_products.status = 1\n \"\"\", [id_category])\n for c in result:\n products_by_category.append(*c)\n return products_by_category\n\n\ndef show_id_categories():\n id_categories = []\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT categories.id FROM categories\n \"\"\")\n for c in result:\n id_categories.append(*c)\n return id_categories\n\n\ndef show_products_by_id(id_product):\n products = []\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT my_products.amount, my_products.price,\n my_products.description\n FROM my_products\n WHERE my_products.id = ? AND my_products.status = 1\n \"\"\", [id_product])\n for i in result:\n for k in i:\n products.append(k)\n return products\n\n\ndef show_products_id_by_product_name(product_name):\n products_id = []\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT my_products.id FROM my_products\n WHERE my_products.product_name = ? AND my_products.status =1 \n \"\"\", [product_name])\n for c in result:\n products_id.append(*c)\n return products_id\n\n\ndef check_products(product_name):\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT my_products.product_name FROM my_products\n WHERE my_products.product_name = ? \n \"\"\", [product_name])\n r = [i for i in result]\n if len(r) == 0:\n return True\n else:\n return False\n\n\ndef check_category(category_name):\n with DatabaseManager('products.db') as db:\n result = db.execute(\"\"\"\n SELECT categories.category_name FROM categories\n WHERE categories.category_name = ? \n \"\"\", [category_name])\n r = [i for i in result]\n if len(r) == 0:\n return True\n else:\n return False\n\n\ndef add_new_product(product_name_, amount, price, product_category, description, status):\n c = sqlite3.connect('products.db')\n c.cursor()\n c.execute(\"\"\"\n INSERT INTO my_products (product_name, amount, price, id_category, description, status)\n VALUES (?,?,?,(SELECT categories.id\n FROM categories WHERE categories.category_name = ?),?,?)\n \"\"\", [product_name_, amount, price, product_category, description, status])\n c.commit()\n c.close()\n\n\ndef add_new_category(new_category_name):\n c = sqlite3.connect('products.db')\n c.cursor()\n c.execute(\"\"\"\n INSERT INTO categories (category_name)\n VALUES (?)\n \"\"\", [new_category_name])\n c.commit()\n c.close()\n\n\n# add_new_product(\"iphone\", 2, 100,\n# \"phones\", \"cool phone\", 1)\n\n# print(check_category(\"phones\"))\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"51271268","text":"import argparse\nimport csv\nimport glob\nimport json\nimport logging\nimport os\nimport shutil\nimport re\nimport timeit\nfrom urllib.error import HTTPError\nimport functools\n\nimport requests\nimport sys\nimport time\n\nimport pandas\nfrom pandas.errors import EmptyDataError\n\nfrom blackduck.HubRestApi import HubInstance\n\nparser = argparse.ArgumentParser(\"A program to create reports for a given project-version and all of its subprojects\")\nparser.add_argument(\"project_name\")\nparser.add_argument(\"version_name\")\nparser.add_argument('-r', '--refresh', action='store_true',\n help='delete existing reports in the results directory and regenerate')\nparser.add_argument('-v', '--verbose', action='store_true', default=False, help='turn on DEBUG logging')\n\nargs = parser.parse_args()\n\n\ndef get_hub():\n global hub\n try:\n hub = HubInstance(refresh_token=True)\n except Exception as e:\n print(\"There was an exception thrown while creating the Hub instance object: {}\".format(e))\n print(\"It is required that this script be executed in the same directory as .restconfig.json\")\n print(\".restconfig.json must contain an API token for authentication\")\n return None\n else:\n return hub\n\n\nhub = get_hub()\n\n\ndef set_logging_level(log_level):\n logging.basicConfig(stream=sys.stderr, level=log_level, format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\nif args.verbose:\n set_logging_level(logging.DEBUG)\nelse:\n set_logging_level(logging.INFO)\n\nprojname = args.project_name\ntimestamp = time.strftime('%m_%d_%Y_%H_%M')\nfile_out = (projname + '_' + \"Consolidated_src_report-\" + timestamp)\nfile_out = (file_out + \".csv\")\nrootDir = os.getcwd()\nupgrade_guidance_timed_out = {}\n\n\n# print (\"root dir=%s\" % rootDir)\n\ndef doRefresh(dir_name):\n tempDir = os.path.join(rootDir, dir_name)\n print(\"tempDir=%s\" % tempDir)\n for fileName in os.listdir(tempDir):\n print(\"Removing stale files %s\" % fileName)\n os.remove(os.path.join(tempDir, fileName))\n\n\ndef checkdirs():\n os.chdir(rootDir)\n if not os.path.isdir('./temp'):\n os.makedirs('./temp')\n print('made temp directory')\n elif len(os.listdir('./temp')) != 0:\n doRefresh('temp')\n else:\n print('temp directory already exists')\n\n if not os.path.isdir('./results'):\n os.makedirs('./results')\n print('made results directory')\n elif args.refresh and len(os.listdir('./results')) != 0:\n print('refreshing results')\n doRefresh('results')\n else:\n print('results directory already exists')\n\n\ndef clean_up_date(date_string):\n return date_string.split('T')[0]\n\n\ndef getCompositePathContext(comp):\n try:\n matchedFilesURL = comp['_meta']['links'][4]['href']\n except TypeError as err:\n logging.debug(\"Error getting matched files for {}\".format(comp['component']), err)\n return [\"\", \"\"]\n response = hub.execute_get(matchedFilesURL)\n if response.status_code == 200:\n matched_files = response.json()\n else:\n return [\"\", \"\"]\n result = []\n try:\n if len(matched_files['items']) <= 0 and comp['origins'][0]['externalId']:\n result.append(comp['origins'][0]['externalId'])\n result.append(comp['origins'][0]['externalNamespace'])\n else:\n result.append(matched_files['items'][0]['filePath']['path'])\n result.append(matched_files['items'][0]['filePath']['fileName'])\n except (TypeError, KeyError, IndexError):\n return [\"\", \"\"]\n return result\n\n\ndef get_component_URL_and_description(bomComponent):\n components_info = []\n component_url = bomComponent['component']\n response = hub.execute_get(component_url)\n if response.status_code == 200:\n component_details = response.json()\n components_info.append(component_details.get(\"url\"))\n desc = component_details.get(\"description\").strip().splitlines()\n components_info.append(\"\".join(desc))\n return components_info\n\n\ndef get_license_names_and_family(bom_component):\n result = []\n if bom_component['licenses'][0]['licenses']:\n license_url = bom_component['licenses'][0]['licenses'][0]['license']\n response = hub.execute_get(license_url)\n else:\n license_url = bom_component['licenses'][0]['license']\n response = hub.execute_get(license_url)\n if response.status_code == 200:\n license_details = response.json()\n result.append(license_details.get(\"name\"))\n result.append(license_details.get(\"licenseFamily\")[\"name\"] if license_details.get(\"licenseFamily\") else \"\")\n return result\n else:\n return result\n\n\n# search the list of vulnerable components for a matching component version url, return a list of vulnerabilities with\n# remediation details for that bom component\ndef get_component_vuln_information(bom_component):\n result = []\n response = hub.execute_get(\n \"{}{}\".format(bom_component['_meta']['links'][3].get('href'), hub.get_limit_paramstring(10000)))\n if response.status_code in [200, 201]:\n result = response.json().get('items')\n else:\n response.raise_for_status()\n return result\n\n\ndef build_upgrade_guidance(components):\n guidance_dict = dict()\n components_with_origins = [comp for comp in components['items'] if comp.get('origins')]\n components_without_origins = [comp for comp in components['items'] if\n not comp.get('origins') and comp.get('componentVersion')]\n\n for cwoo in components_without_origins:\n r_key = cwoo.get('componentVersion')\n try:\n r_val = get_upgrade_guidance_version_name(cwoo.get('componentVersion'))\n except requests.exceptions.HTTPError as err:\n logging.debug(\"no upgrade guidance for:{}, with {}, writing an empty field \".format(r_key, err))\n r_val = \"\"\n except requests.exceptions.ReadTimeout as timeout:\n logging.debug(\n \"upgrade-guidance request for {} {}\".format(timeout.request.url, timeout.args[0].args[0].split(':')[1]))\n upgrade_guidance_timed_out.update(\n {cwoo.get('componentName') + \" \" + cwoo.get('componentVersionName'): timeout.request.url})\n r_val = \"\"\n r_dict = {r_key: r_val}\n guidance_dict.update(r_dict)\n\n for cwo in components_with_origins:\n ug_url = [guidance for guidance in cwo.get('origins')[0]['_meta']['links'] if\n guidance['rel'] == \"upgrade-guidance\"]\n try:\n response = hub.execute_get(ug_url[0].get('href'), request_params={'timeout': 30})\n if response.status_code in [200, 201]:\n result_json = response.json()\n r_key = result_json['origin']\n r_val = result_json\n r_dict = {r_key: r_val}\n guidance_dict.update(r_dict)\n continue\n else:\n response.raise_for_status()\n except requests.exceptions.HTTPError as err:\n logging.debug(\"no upgrade guidance for:{}, with {}, writing an empty field \".format(r_key, err))\n r_val = \"\"\n except requests.exceptions.ReadTimeout as timeout:\n logging.debug(\n \"upgrade-guidance request for {} {}\".format(timeout.request.url, timeout.args[0].args[0].split(':')[1]))\n upgrade_guidance_timed_out.update(\n {cwo.get('componentName') + \" \" + cwo.get('componentVersionName'): timeout.request.url})\n r_val = \"\"\n else:\n r_dict = {r_key: r_val}\n guidance_dict.update(r_dict)\n return guidance_dict\n\n\n# return a dictionary with remediation details from a call to /REMEDIATION endpoint\ndef build_component_remediation_data(vulnerable_components, componentName, componentVersion):\n remediation_data = dict()\n vc = [x for x in vulnerable_components['items'] if\n x.get('componentName') == componentName and x.get('componentVersionName') == componentVersion]\n for info in vc:\n response = hub.execute_get(info['_meta']['href'])\n if response.status_code == 200:\n rd = response.json()\n rkey = rd['id']\n rval = rd\n rdict = {rkey: rval}\n remediation_data.update(rdict)\n continue\n return remediation_data\n\n\n# return a dictionary with the version url mapped to the latestAfterCurrent name and date\n# after a call to the /REMEDIATING endpoint\ndef get_component_remediating_data(comp_version_name_url):\n remediating_data = dict()\n url = \"{}{}\".format(comp_version_name_url, \"/remediating\")\n response = hub.execute_get(url)\n if response.status_code == 200:\n rj = response.json()\n r_key = comp_version_name_url\n r_val = rj\n remediating_data.update({r_key: r_val})\n return remediating_data\n\n\ndef get_origin_url(comp):\n assert 'origins' in comp, \"component must have an origins object\"\n try:\n assert comp.get('origins')[0], \"component must have an origin object\"\n except IndexError:\n return comp.get('origins')\n assert 'origin' in comp.get('origins')[0]['origin'], \"component must have an origin URL\"\n return comp.get('origins')[0]['origin']\n\n\n# get the short term target upgrade version\ndef get_upgrade_guidance_version_name(comp_version_url):\n url = \"{}{}\".format(comp_version_url, \"/upgrade-guidance\")\n resp = hub.execute_get(url, request_params={'timeout': 30})\n upgrade_target_version = \"\"\n if resp.status_code in [200, 201]:\n upgrade_target_version = resp.json()\n else:\n resp.raise_for_status()\n return upgrade_target_version\n return upgrade_target_version\n\n\ndef quote_versions(s):\n return \"{}{}{}\".format(\"=\\\"\", s, \"\\\"\")\n\n\ndef format_leading_zeros(n):\n match_re = '^0+[0-9]+\\.*[0-9]*'\n if not re.match(match_re, str(n)):\n return n\n else:\n return \"{}{}{}\".format(\"=\\\"\", n, \"\\\"\")\n\n\ndef get_header():\n return [\"Project Name\", \"Project Version\", \"Package Path\", \"Package Type\", \"Component Name\",\n \"Component Version Name\",\n \"Vulnerability Name\", \"Severity\",\n \"Base Score\", \"Remediation Status\", \"Vulnerability Published Date\", \"Vulnerability Updated Date\",\n \"Remediation Created At\", \"Fixed In\", \"Remediation Comment\", \"License Names\",\n \"License Family\",\n \"Download URL\", \"Component Description\", \"Latest Version Available\"]\n\n\ndef append_component_info(component, package_type, url_and_des, license_names_and_family, comp_version_url,\n project_name, project_version):\n name = component['componentName']\n version = component['componentVersionName']\n row = []\n if project_name:\n row.append(project_name)\n if project_version:\n row.append(project_version)\n if package_type is not None:\n row.append(package_type[0])\n row.append(package_type[1])\n else:\n row.append(\"\")\n row.append(\"\")\n\n row.append(name)\n row.append(quote_versions(version))\n\n component_row_list = []\n for i in range(9):\n row.append(\"\")\n\n try:\n row.append(license_names_and_family[0])\n row.append(license_names_and_family[1])\n except IndexError as er:\n logging.debug(\"no license information found for:{} {}, writing empty values \".format(name, er))\n row.append(\"\")\n row.append(\"\")\n\n row.extend(add_url_and_desc(url_and_des))\n\n # latestAfterCurrent release is no longer available from the API.\n row.append(quote_versions(version))\n\n component_row_list.append(row.copy())\n\n return component_row_list\n\n\ndef append_vulnerabilities(package_type, component_vuln_information, row_list, row, license_names_and_family,\n comp_version_url, url_and_des, component,\n vulnerable_components, project_name, project_version, upgrade_guidance):\n name = component['componentName']\n version = component['componentVersionName']\n vuln_component_remediation_info = build_component_remediation_data(vulnerable_components, name, version)\n\n global comp_origin_url\n rl = row_list\n r = row\n\n if project_name:\n r.append(project_name)\n if project_version:\n r.append(project_version)\n\n if package_type is not None:\n row.append(package_type[0])\n row.append(package_type[1])\n else:\n row.append(\"\")\n row.append(\"\")\n\n r.append(name)\n r.append(quote_versions(version))\n\n for vuln in component_vuln_information:\n v_name_key = vuln.get('vulnerabilityWithRemediation').get('vulnerabilityName')\n if vuln.get('vulnerabilityWithRemediation').get('relatedVulnerability'):\n nvd = vuln.get('vulnerabilityWithRemediation').get('relatedVulnerability').split('/')\n nvd_name = nvd[len(nvd) - 1]\n r.append(\"{}({})\".format(nvd_name, v_name_key))\n else:\n r.append(v_name_key)\n\n r.extend(vcr_info(v_name_key, vuln_component_remediation_info))\n r.extend(add_short_term_upgrade_guidance(comp_version_url, component, upgrade_guidance))\n r.extend(add_rem_comment(v_name_key, vuln_component_remediation_info))\n r.extend(add_license_name_and_family(license_names_and_family))\n r.extend(add_url_and_desc(url_and_des))\n r.extend(add_long_term_upgrade_guidance(comp_version_url, component, upgrade_guidance))\n rl.append(r.copy())\n r = r[0:6]\n return rl\n\n\ndef add_license_name_and_family(license_names_and_family):\n result = []\n try:\n l_name = license_names_and_family[0]\n l_family = license_names_and_family[1]\n except(KeyError, IndexError, TypeError) as err:\n logging.debug(\"{} with err {}\".format(\"Failed to get license name and family\", err))\n result.append(\"\")\n result.append(\"\")\n else:\n result.append(l_name)\n result.append(l_family)\n return result\n\n\ndef add_url_and_desc(url_and_des):\n result = []\n for ud in url_and_des:\n result.append(\"\" if not ud else ud)\n return result\n\n\ndef add_long_term_upgrade_guidance(comp_version_url, component, upgrade_guidance):\n global comp_origin_url\n result = []\n try:\n comp_origin_url = get_origin_url(component)\n upgrade_target = upgrade_guidance.get(comp_origin_url).get('longTerm').get('versionName')\n except AttributeError as err:\n try:\n upgrade_target = upgrade_guidance.get(comp_version_url).get('longTerm').get('versionName')\n except AttributeError as err:\n logging.debug(\n \"No longTerm upgrade-guidance found for component {}, with error: {} \"\n \"writing an empty value\".format(comp_origin_url, err))\n result.append(\"\")\n else:\n result.append(format_leading_zeros(upgrade_target))\n else:\n result.append(format_leading_zeros(upgrade_target))\n return result\n\n\ndef add_rem_comment(v_name_key, vuln_component_remediation_info):\n result = []\n try:\n rem_comment = vuln_component_remediation_info.get(v_name_key).get('comment')\n except (KeyError, TypeError, AttributeError) as err:\n logging.debug(\"No remediation comment available for {} with error {}\".format(v_name_key, err))\n result.append(\"\")\n else:\n result.append(rem_comment)\n return result\n\n\ndef add_short_term_upgrade_guidance(comp_version_url, component, upgrade_guidance):\n global comp_origin_url\n result = []\n try:\n comp_origin_url = get_origin_url(component)\n upgrade_target = upgrade_guidance.get(comp_origin_url).get('shortTerm').get('versionName')\n except AttributeError as err:\n try:\n upgrade_target = upgrade_guidance.get(comp_version_url).get('shortTerm').get('versionName')\n except AttributeError as err:\n logging.debug(\n \"No shortTerm upgrade-guidance found for component {}, with error: {} \"\n \"writing an empty value\".format(comp_origin_url, err))\n result.append(\"\")\n else:\n result.append(format_leading_zeros(upgrade_target))\n else:\n result.append(format_leading_zeros(upgrade_target))\n return result\n\n\ndef vcr_info(v_name_key, vuln_component_remediation_info):\n result = []\n # prioritizes cvss3 over cvss2\n try:\n cvs_score = vuln_component_remediation_info.get(v_name_key)['cvss3'].get('baseScore')\n cvs_severity = vuln_component_remediation_info.get(v_name_key)['cvss3'].get('severity')\n except (KeyError, TypeError, AttributeError) as err:\n try:\n cvs_score = vuln_component_remediation_info.get(v_name_key)['cvss2'].get('baseScore')\n cvs_severity = vuln_component_remediation_info.get(v_name_key)['cvss2'].get('severity')\n except(KeyError, TypeError, AttributeError) as err:\n logging.debug(\n \"{} with err {} for {}\".format(\"No cvss2 or cvss3 score for vulnerability, writing empty value\", err,\n v_name_key))\n result.append(\"\")\n result.append(\"\")\n else:\n result.append(cvs_severity)\n result.append(format_leading_zeros(cvs_score))\n else:\n result.append(cvs_severity)\n result.append(format_leading_zeros(cvs_score))\n\n try:\n rem_status = vuln_component_remediation_info.get(v_name_key).get('remediationStatus')\n except(KeyError, TypeError, AttributeError) as err:\n logging.debug(\n \"{} with err {} for {}\".format(\"failed to get remediationStatus for vulnerability, writing empty value\",\n err, v_name_key))\n result.append(\"\")\n else:\n result.append(rem_status)\n\n try:\n published_date = clean_up_date(vuln_component_remediation_info.get(v_name_key).get('publishedDate'))\n updated_date = clean_up_date(vuln_component_remediation_info.get(v_name_key).get('lastModifiedDate'))\n except(KeyError, TypeError, AttributeError) as err:\n logging.debug(\n \"{} with err {} for {}\".format(\"failed to get remediationStatus for vulnerability, writing empty value\",\n err, v_name_key))\n result.append(\"\")\n result.append(\"\")\n else:\n result.append(published_date)\n result.append(updated_date)\n\n try:\n created_at = clean_up_date(vuln_component_remediation_info.get(v_name_key)['createdAt'])\n except (KeyError, TypeError, AttributeError) as err:\n logging.debug(\n \"{} with err {} for {}\".format(\"failed to get createdAt date for vulnerability, writing empty value\", err,\n v_name_key))\n result.append(\"\")\n else:\n result.append(created_at)\n return result\n\n\nsubprojects = list()\n\n\ndef generate_child_reports(component):\n child_project_name = component['componentName']\n child_project_version_name = component['componentVersionName']\n child_project_version = hub.get_project_version_by_name(child_project_name, child_project_version_name)\n initial_components = hub.get_version_components(child_project_version, 100)\n child_project_components = collect_all_version_components(initial_components, child_project_version)\n print(\"Component count returned for sub-project {} {} = {} \".format(child_project_name, child_project_version_name,\n len(child_project_components.get('items'))))\n child_initial_vulnerable_components = hub.get_vulnerable_bom_components(child_project_version, 100)\n child_vulnerable_components = collect_vulnerabilities_for_project_version_batch(child_initial_vulnerable_components,\n child_project_version)\n print(\"Vulnerable bom components returned for {} {} = totalCount {} Actual Count {} \"\n .format(child_project_name, child_project_version_name, child_vulnerable_components.get('totalCount'),\n len(child_vulnerable_components.get('items'))))\n\n upgrade_guidance = build_upgrade_guidance(child_project_components)\n # child_vuln_component_remediation_info = build_component_remediation_data(child_vulnerable_components)\n child_timestamp = time.strftime('%m_%d_%Y_%H_%M_%S')\n child_file_out = (projname + '_' + \"subproject_src_report-\" + child_timestamp)\n child_file_out = (child_file_out + \".csv\")\n curdir = os.getcwd()\n if not curdir.endswith(\"temp\"):\n curdir = os.path.join(rootDir, \"temp\")\n os.chdir(curdir)\n with open(child_file_out, 'a', newline='') as f:\n first_child_file = True\n writer = csv.writer(f)\n for component in child_project_components['items']:\n package_type = getCompositePathContext(component)\n url_and_des = get_component_URL_and_description(component)\n license_names_and_family = get_license_names_and_family(component)\n comp_version_url = component.get('componentVersion')\n component_vuln_information = [x for x in child_vulnerable_components['items'] if\n x['componentVersion'] == component['componentVersion']]\n row = []\n if first_child_file:\n header = get_header()\n writer.writerow(header)\n first_child_file = False\n\n row_list = []\n if len(component_vuln_information) <= 0:\n row_list = append_component_info(component, package_type, url_and_des, license_names_and_family,\n comp_version_url, child_project_name,\n child_project_version_name)\n elif len(component_vuln_information) > 0:\n row_list = append_vulnerabilities(package_type, component_vuln_information, row_list, row,\n license_names_and_family,\n comp_version_url, url_and_des, component,\n child_vulnerable_components, child_project_name,\n child_project_version_name, upgrade_guidance)\n for row in row_list:\n writer.writerow(row)\n f.close()\n\n\ndef genreport():\n # build up the datasets\n projversion = hub.get_project_version_by_name(args.project_name, args.version_name)\n initial_components = hub.get_version_components(projversion, 100)\n components = collect_all_version_components(initial_components, projversion)\n print(\"Component count returned for {} {} = {} \".format(args.project_name, args.version_name,\n len(components.get('items'))))\n initial_vulnerable_components = hub.get_vulnerable_bom_components(projversion, 100)\n vulnerable_components = collect_vulnerabilities_for_project_version_batch(initial_vulnerable_components,\n projversion)\n print(\"Vulnerable bom components returned for {} {} = totalCount {} Actual Count {} \"\n .format(args.project_name, args.version_name, vulnerable_components.get('totalCount'),\n len(vulnerable_components.get('items'))))\n\n upgrade_guidance = build_upgrade_guidance(components)\n # vuln_component_remediation_info = build_component_remediation_data(vulnerable_components)\n project_name = args.project_name\n project_version = args.version_name\n curdir = os.getcwd()\n if not curdir.endswith(\"temp\"):\n curdir = os.path.join(rootDir, \"temp\")\n os.chdir(curdir)\n with open(file_out, 'w', newline='') as f:\n writer = csv.writer(f)\n first_file = True\n for component in components['items']:\n if len(component['activityData']) == 0:\n generate_child_reports(component)\n continue\n package_type = getCompositePathContext(component)\n url_and_des = get_component_URL_and_description(component)\n license_names_and_family = get_license_names_and_family(component)\n comp_version_url = component.get('componentVersion')\n component_vuln_information = [x for x in vulnerable_components['items'] if\n x['componentVersion'] == component['componentVersion']]\n row = []\n if first_file:\n header = get_header()\n writer.writerow(header)\n first_file = False\n row_list = []\n if len(component_vuln_information) <= 0:\n row_list = append_component_info(component, package_type, url_and_des, license_names_and_family,\n comp_version_url, project_name,\n project_version)\n elif len(component_vuln_information) > 0:\n row_list = append_vulnerabilities(package_type, component_vuln_information, row_list, row,\n license_names_and_family,\n comp_version_url, url_and_des, component,\n vulnerable_components, project_name, project_version,\n upgrade_guidance)\n try:\n logging.debug(\"{} row(s) written for component {} {}\".format(len(row_list), row_list[0][4].strip(\"\\\"\"),\n row_list[0][5].strip(\"\\\"\")))\n except Exception as err:\n logging.debug(\"exception thrown while counting rows {}\".format(err))\n\n for row in row_list:\n writer.writerow(row)\n f.close()\n\n\ndef collect_all_version_components(components, project_version):\n processed = 100\n batch_size = 100\n total_count = components.get('totalCount')\n if len(components.get('items')) <= total_count:\n components_list = []\n bound = int(total_count / batch_size)\n remaining = total_count % batch_size\n i = 1\n for i in range(bound):\n temp_components = hub.get_version_components(project_version, 100, parameters={'offset': processed}).get(\n 'items')\n logging.debug(\"Request to get next batch of components returned {} items\".format(len(temp_components)))\n processed += batch_size\n components_list += temp_components\n if processed >= (total_count - remaining):\n components_list += hub.get_version_components(project_version, 100,\n parameters={'offset': (total_count - remaining)}).get(\n 'items')\n logging.debug(\"Request to get remaining components returned {} items\".format(len(temp_components)))\n processed = len(components_list) + batch_size\n logging.debug(\"Captured all components. processed={}, total_count={}, components_list_length={}\".format(\n processed,\n total_count,\n len(components_list)))\n break\n i += 1\n components.update({'items': components.get('items') + components_list})\n return components\n else:\n return components\n\n\ndef collect_vulnerabilities_for_project_version_batch(vulnerabilities, project_version):\n processed = 100\n batch_size = 100\n total_count = vulnerabilities.get('totalCount')\n if len(vulnerabilities.get('items')) <= total_count:\n vulnerabilities_list = []\n bound = int(total_count / batch_size)\n remaining = total_count % batch_size\n i = 1\n for i in range(bound):\n temp_vulnerabilities = hub.get_vulnerable_bom_components(project_version, 100,\n parameters={'offset': processed}).get('items')\n logging.debug(\"Request to get next batch of components returned {} items\".format(len(temp_vulnerabilities)))\n processed += batch_size\n vulnerabilities_list += temp_vulnerabilities\n if processed >= (total_count - remaining):\n vulnerabilities_list += hub.get_vulnerable_bom_components(project_version, 100,\n parameters={'offset': (total_count - remaining)}).get('items')\n logging.debug(\"Request to get remaining components returned {} items\".format(len(temp_vulnerabilities)))\n processed = len(vulnerabilities_list) + batch_size\n logging.debug(\"Captured all vulnerabilities. processed={}, total_count={}, \"\n \"vulnerabilities_list_length={}\".format(processed,total_count,len(vulnerabilities_list)))\n break\n i += 1\n vulnerabilities.update({'items': vulnerabilities.get('items') + vulnerabilities_list})\n return vulnerabilities\n else:\n return vulnerabilities\n\n\ncsv_list = []\n\n\ndef concat():\n curdir = os.getcwd()\n if not curdir.endswith(\"temp\"):\n curdir = os.path.join(rootDir, \"temp\")\n os.chdir(curdir)\n all_csvs = glob.glob(os.path.join(curdir, '*.csv'))\n all_data_frames = []\n for csv in all_csvs:\n try:\n data_frame = pandas.read_csv(csv, index_col=None)\n except EmptyDataError:\n data_frame = pandas.DataFrame()\n\n all_data_frames.append(data_frame)\n data_frame_concat = pandas.concat(all_data_frames, axis=0, ignore_index=True)\n data_frame_concat.to_csv(file_out, index=False)\n shutil.move(file_out, '../results/')\n shutil.rmtree('../temp', ignore_errors=True)\n\n\ndef main():\n checkdirs()\n start = timeit.default_timer()\n print(\"Starting timer: {} seconds\".format(int(timeit.default_timer())))\n genreport()\n print(\"Time spent generating consolidated report: {} seconds\".format(int(timeit.default_timer() - start)))\n print(\"Upgrade guidance requests timed out for:\")\n for k, v in upgrade_guidance_timed_out.items():\n print(k, v)\n concat()\n\n\nmain()\n","sub_path":"examples/generate_vuln_report_for_sub_projects.py","file_name":"generate_vuln_report_for_sub_projects.py","file_ext":"py","file_size_in_byte":30529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"93659485","text":"from math import *\n\nn = eval(input())\nk = int(input())\n\ne = 3\np = n\ni = 1\n\nprint((round(n,10)))\nwhile(i p*period:\n\t\t\t\t\t\tcumulative_doubles[r]+=(number_of_agent[\"Aab\"]+number_of_agent[\"Bab\"])\n\t\t\t\t\t\tfor j in range(8):\n\t\t\t\t\t\t\tnumber_of_agent[agent_of_index[j]]=np.random.binomial(number_of_agent[agent_of_index[j]],dilution_factor)\n\t\t\t\t\t\tnumber_of_agent[\"R\"]=batch_resources\n\t\t\t\t\t\tp+=1\n\t\t\t\tprint(r)\n\t\t\toutput[x][y]=sum(cumulative_doubles)/N_reps\n\t\t\tprint([ps,pm])\n\nwith open(csvfile, \"w\") as output2:\n\twriter = csv.writer(output2, lineterminator='\\n')\n\tfor i in range(len(PS)):\n\t\twriter.writerows([output[i]])\n","sub_path":"Jordt_et_al_FigS3c.py","file_name":"Jordt_et_al_FigS3c.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312817747","text":"import numpy as np\nimport pandas as pd\nimport itertools as it\nfrom decimal import Decimal\n\n# need to enforce rows summing to 1\n# could be for either state transition or obs probs\nclass ProbabilityMatrix:\n def __init__(self, probabilities, row=None, col=None):\n # probabilities: m*n array where rows should sum to 1\n # row: (optional) a length m array of corresponding row names\n # col: (optional) a length n array of corresponding col names\n if row != None:\n assert len(probabilities) == len(row),\\\n \"The number of row labels must equal number of rows.\"\n if col != None:\n assert len(probabilities[0]) == len(col),\\\n \"The number of col labels must equal number of cols.\"\n for p in probabilities:\n assert len(p) == len(probabilities[0]),\\\n \"All rows should have the same length.\"\n assert abs(sum(p) - 1.0) < 1e-12,\\\n \"The sum of each row should be 1\"\n\n if row == None:\n self.row = [i for i in range(len(probabilities))]\n else:\n self.row = row\n\n if col == None:\n self.col = [i for i in range(len(probabilities[0]))]\n else:\n self.col = col\n\n self.probabilities = np.array(probabilities)\n\n @property\n def df(self):\n # return a dataframe for the probability matrix\n return pd.DataFrame(self.probabilities, columns=self.col, index=self.row)\n \n def transition(self, start, end):\n # return a_start,end.\n # index() throws an error labels aren't found\n i = self.row.index(start)\n j = self.col.index(end)\n return self.probabilities[i][j]\n\n def diag(self, label):\n return pd.DataFrame(np.diag(self.df[label]), columns=self.row, index=self.row)\n # return ProbabilityMatrix(np.diag(self.df[label]), self.row, self.row)\n\n\n def __repr__(self):\n return self.df.to_string()\n\n def __matmul__(self, other):\n if isinstance(other, ProbabilityMatrix):\n return ProbabilityMatrix(self.probabilities @ other.probabilities,\\\n self.row, other.col)\n # if isinstance(other, pd.DataFrame):\n # return ProbabilityMatrix(self.probabilities @ other,\\\n # self.row, list(other.index))\n\n################################################################################\n\nA = ProbabilityMatrix([[0.7, 0.3],\n [0.4, 0.6]],\n row=[\"H\", \"C\"],\n col=[\"H\", \"C\"])\nB = ProbabilityMatrix([[0.1, 0.4, 0.5],\n [0.7, 0.2, 0.1]],\n row=[\"H\", \"C\"],\n col=[\"S\", \"M\", \"L\"])\n\nP = ProbabilityMatrix([[0.6, 0.4]],col=[\"H\",\"C\"])\n\n# print(A)\n# print(B)\n# print(P)\n\nO = [0,1,0,2]\nO2 = [\"S\", \"M\", \"S\", \"L\"]\nX = [\"H\",\"H\",\"C\",\"C\"]\n\n# Given L = (A,B,P), state sequence X, and observations O\n# find the probability of observing O.\ndef P_XO(A,B,P,X,O):\n assert len(X) == len(O),\\\n \"Length of state seq and observations should be equal.\"\n\n prob = P.transition(0,X[0]) * B.transition(X[0],O[0])\n for i in range(1, len(X)):\n prob *= A.transition(X[i-1],X[i]) * B.transition(X[i],O[i])\n return prob\n\n# print(P_XO(A,B,P,X,O2))\n\ndef state_seq_probabilities_tab(A,B,P,Q,O):\n # A,B,P: Markov model\n # Q: states\n # O: observations (length T)\n\n # returns a dataframe with the probability of observations O for each\n # possible state sequence\n\n perms = list(it.product(Q, repeat=len(O)))\n tab = []\n for perm in perms:\n tab.append([''.join(perm), float(round(Decimal(P_XO(A,B,P,perm,O)),6))])\n\n df = pd.DataFrame(tab, columns=[\"State\",\"Probability\"])\n total = df.sum()[\"Probability\"]\n df[\"Normalized Probability\"] = df.apply(lambda r: r[\"Probability\"]/total, axis=1)\n\n return df\n\nss_df = state_seq_probabilities_tab(A,B,P, [\"H\",\"C\"], O2)\n\n# print(\"Table 1: State Sequence Probabilities\")\n# print(ss_df)\n# print()\n\ndef hmm_probabilities_tab(A,B,P,Q,O):\n # A,B,P: Markov model\n # Q: state labels\n # O: observations (length T)\n\n # returns a dataframe with the probability of a given state at each time\n\n ss_df = state_seq_probabilities_tab(A,B,P,Q,O)\n df = pd.DataFrame([[]],index=Q)\n for i in range(len(O)):\n probs = []\n for q in Q:\n q_df = ss_df[ss_df[\"State\"].str[i] == q]\n probs.append(q_df.sum()[\"Normalized Probability\"])\n df[i] = probs\n return df\n\nhmm_prob = hmm_probabilities_tab(A,B,P,[\"H\",\"C\"],O2)\n\n# print(\"Table 2: HMM Probabilities\")\n# print(hmm_prob)\n# print()\n\ndef P_O_given_L(A,B,P,O):\n # A,B,P: Markov model\n # O: observations (length T)\n # given HMM lambda (ABP), return probability of observing O\n\n alpha = P.probabilities@B.diag(O[0])\n for i in range(1,len(O)):\n alpha @= A.probabilities\n alpha @= B.diag(O[i]).reset_index(drop=True)\n\n return alpha\n\ndef P_O_given_L2(A,B,P,O):\n # A,B,P: Markov model\n # O: observations (length T)\n # given HMM lambda (ABP), return probability of observing O\n alpha = P@pd.DataFrame(np.diag(B[O[0]]))\n\n for i in range(1,len(O)):\n alpha @= A\n alpha @= pd.DataFrame(np.diag(B[O[0]])).reset_index(drop=True)\n\n return float(alpha.sum(axis=1))\n\ndef X_given_LO(A,B,P,O):\n # A,B,P: Markov model\n # O: observations (length T) \n # given HMM lambda (ABP), return most likely state seq X\n\n\n alpha = P.probabilities@B.diag(O[0])\n alpha_tab = pd.DataFrame(alpha)\n\n beta = pd.DataFrame(np.ones(P.df.shape[1]),index=P.df.columns)\n beta_tab = beta.transpose()\n\n for i in range(1,len(O)):\n alpha @= A.probabilities\n alpha @= B.diag(O[i]).reset_index(drop=True)\n alpha_tab = alpha_tab.append(pd.DataFrame(alpha), ignore_index=True)\n\n beta = A.probabilities @ B.diag(O[len(O)-i]).reset_index(drop=True) @ beta\n beta.index= P.df.columns\n beta_tab = beta.transpose().append(beta_tab, ignore_index=True)\n\n gamma_tab = alpha_tab*beta_tab\n gamma_tab = gamma_tab.div(gamma_tab.sum(axis=1)[0])\n\n return ','.join(list(gamma_tab.idxmax(axis=1)))\n\nPOL = P_O_given_L(A,B,P,O2)\n# print(\"Problem 1 - Find P(O|λ):\")\n# print(POL)\n# print(POL.sum(axis=1))\n# print()\n\nXLO = X_given_LO(A,B,P,O2)\n# print(\"Problem 2 - Most likely state seq:\")\n# print(XLO)\n# print()\n\n\n\ndef find_lambda(O,N,M):\n # O: sequence of observations\n # N: size of state space\n # M: size of observation space\n\n assert max(O) < M,\\\n \"Range of observations must be within M.\"\n\n # initialize and normalize A,B,P\n A = pd.DataFrame(np.random.rand(N,N))\n B = pd.DataFrame(np.random.rand(N,M))\n P = pd.DataFrame(np.random.rand(1,N))\n A += 10\n B += 10\n P += 10\n A = A.div(A.sum(axis=1), axis=0)\n B = B.div(B.sum(axis=1), axis=0)\n P = P.div(P.sum(axis=1), axis=0)\n # probability of getting O given this HMM\n p_O = P_O_given_L2(A,B,P,O)\n\n print(\"A:\")\n print(A)\n print(\"B:\")\n print(B)\n print(\"P:\")\n print(P)\n print(\"P(O|λ):\")\n print(p_O)\n\n for step in range(100):\n alpha = P@pd.DataFrame(np.diag(B[O[0]]))\n alpha_tab = pd.DataFrame(alpha)\n\n beta = pd.DataFrame(np.ones(P.shape[1]),index=P.columns)\n beta_tab = beta.transpose()\n\n for i in range(1,len(O)):\n alpha @= A\n alpha @= pd.DataFrame(np.diag(B[O[i]])).reset_index(drop=True)\n alpha_tab = alpha_tab.append(pd.DataFrame(alpha), ignore_index=True)\n\n beta = A @ pd.DataFrame(np.diag(B[O[len(O)-i]])).reset_index(drop=True) @ beta\n beta.index= P.columns\n beta_tab = beta.transpose().append(beta_tab, ignore_index=True)\n\n gamma_tab = alpha_tab*beta_tab\n gamma_tab = gamma_tab.div(gamma_tab.sum(axis=1)[0])\n\n #make di_gamma table (Tx(NxN array))\n di_gamma_tab = pd.DataFrame()\n for t in range(0,len(O)-1):\n # print(\"t = %d\" % t)\n di_gamma = A.mul(np.array(alpha_tab[t:t+1]), axis=0)\n di_gamma @= pd.DataFrame(np.diag(B[O[t+1]])).reset_index(drop=True)\n di_gamma = di_gamma.mul(np.array(beta_tab[t+1:t+2]), axis=1)\n # print(di_gamma)\n # print(di_gamma.sum(axis=1))\n # di_gamma = di_gamma.div(float(p_O.sum(axis=1)))\n di_gamma = di_gamma.div(di_gamma.sum(axis=1), axis=0)\n # print(di_gamma)\n # print(gamma_tab[t:t+1])\n di_gamma = di_gamma.mul(np.array(gamma_tab[t:t+1]), axis=0)\n # print(di_gamma)\n # print(di_gamma.to_numpy().sum())\n cols = pd.MultiIndex.from_product([[t], [x for x in range(P.shape[1])]])\n # print(cols)\n di_gamma = pd.DataFrame(np.array(di_gamma), index=cols)\n di_gamma_tab = di_gamma_tab.append(di_gamma)\n # print(di_gamma_tab)\n \n print(\"====================== step %d ======================\" % step)\n # print(\"alpha_tab:\")\n # print(alpha_tab)\n # print(\"beta_tab:\")\n # print(beta_tab)\n print(\"gamma_tab:\")\n print(gamma_tab)\n # print(\"sum of gamma_tab cols to t-2:\")\n sum_gamma_tab_t2 = gamma_tab[:-1].sum(axis=0)\n # print(sum_gamma_tab_t2)\n\n # print(\"sum of gamma_tab cols to t-1:\")\n sum_gamma_tab_t1 = gamma_tab.sum(axis=0)\n # print(sum_gamma_tab_t1)\n\n\n # print(\"di_gamma_tab:\")\n # print(di_gamma_tab)\n sum_di_gamma_tab = di_gamma_tab.sum(level=1)\n # print(\"sum of di_gamma_tab over t:\")\n # print(sum_di_gamma_tab)\n # print(\"\\n\\n\")\n\n P_comp = gamma_tab[:1]\n print(\"P_comp:\")\n print(P_comp)\n\n A_comp = sum_di_gamma_tab.div(np.array(sum_gamma_tab_t2),axis=0)\n print(\"A_comp:\")\n print(A_comp)\n\n\n # print(gamma_tab.iloc[[0,2]])\n print(\"B_comp:\")\n B_comp = pd.DataFrame()\n for j in range(M): # cols\n # print(\"j: %d\" %j)\n rows = [t for t, o_t in enumerate(O) if o_t == j]\n B_j = gamma_tab.iloc[rows].sum(axis=0).div(np.array(sum_gamma_tab_t1),axis=0)\n # print(B_j)\n B_comp[j] = B_j\n # print(B_comp)\n\n # B_j = pd.DataFrame(np.array(B_j), index=[j])\n # print(B_j)\n # B_comp = B_comp.append(B_j)\n # print(rows)\n # print(gamma_tab.iloc[rows])\n # print(\"sum:\")\n # print(gamma_tab.iloc[rows].sum(axis=0))\n # print(sum_gamma_tab_t1)\n # print(\"div:\")\n # print(gamma_tab.iloc[rows].sum(axis=0).div(np.array(sum_gamma_tab_t1),axis=0))\n # print()\n print(B_comp)\n\n p_O_comp = P_O_given_L2(A_comp,B_comp,P_comp,O)\n # print(\"p_O: %f\" % float(p_O.sum(axis=1)))\n # print(\"p_O_comp: %f\" % float(p_O_comp.sum(axis=1)))\n print(\"p_O: %f\" % p_O)\n print(\"p_O_comp: %f\" % p_O_comp)\n\n if (p_O >= p_O_comp):\n print(\"Re-estimated HMM was not better... T_T\")\n break\n\n A = A_comp\n B = B_comp\n P = P_comp\n p_O = p_O_comp\n # print(\"test\")\n # print(gamma_tab[2:3])\n # print(np.array([np.array(gamma_tab)]))\n\n\n # print(\"SUMMARY:\")\n # print(\"A:\")\n # print(A)\n # print(\"B:\")\n # print(B)\n # print(\"P:\")\n # print(P)\n # print(\"P(O|λ):\")\n # print(p_O.sum(axis=1))\n\n return\n\nprint(\"Problem 3 - Find A,B,π:\")\nprint(find_lambda([0,1,2,0,1,2,0,1,2,0,1,2,0,1,2],3,3))\n","sub_path":"hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":11530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"509390478","text":"#!/usr/bin/env python\n# Definition for binary tree with next pointer.\n\nclass TreeLinkNode:\n def __init__(self, x, left=None, right=None):\n self.val = x\n self.left = left\n self.right = right\n self.next = None\n\n\nclass Solution:\n # @param root, a tree link node\n # @return nothing\n def connect(self, root):\n if root is None:\n return\n\n parents = [root]\n while parents:\n children = []\n for left, right in zip(parents[:-1], parents[1:]):\n left.next = right\n\n for node in parents:\n if node.left:\n children.append(node.left)\n if node.right:\n children.append(node.right)\n\n parents = children\n\n\nroot = TreeLinkNode(1,\n left=TreeLinkNode(2,\n left=TreeLinkNode(4,\n left=TreeLinkNode(8)\n ),\n right=TreeLinkNode(5)\n ),\n right=TreeLinkNode(3,\n right=TreeLinkNode(7,\n left=TreeLinkNode(14)\n )\n )\n)\n\ndef show(root):\n parents = [root]\n\n while parents:\n children = []\n for node in parents:\n print(node.val, node.next.val if node.next else None)\n if node.left:\n children.append(node.left)\n if node.right:\n children.append(node.right)\n\n parents = children\n\nshow(root)\nSolution().connect(root)\nshow(root)\n","sub_path":"117.Populating_Next_Right_Pointers_in_Each_Node_II.py","file_name":"117.Populating_Next_Right_Pointers_in_Each_Node_II.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"100444220","text":"import psutil\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom django.views.generic import View\n\nfrom .utils import clear_database_info, get_package_list\n\n\nclass CheckerView(View):\n\n def get(self, request, *args, **kwargs):\n result = {\n 'BASE_DIR': settings.BASE_DIR,\n 'DEBUG': settings.DEBUG,\n 'INSTALLED_APPS': settings.INSTALLED_APPS,\n 'DATABASES': clear_database_info(settings.DATABASES),\n 'PACKAGE_LIST': get_package_list(),\n 'GUNICORN': 'gunicorn' in (p.name() for p in psutil.process_iter()),\n 'NGINX': 'nginx' in (p.name() for p in psutil.process_iter())\n }\n return JsonResponse(result)\n","sub_path":"ya_checker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"613636271","text":"# -*- coding: utf-8 -*-\n\"\"\"A coliseum is an object that will compare a list of pipelines over some\n parameter space will compare different models, and data cleaning methods,\n subset of the training data using cross validation and logloss.\n Sets up pipeline for the models. Defines the transforms and runs cross\n validation with various parameters. Each method will return a\n\"\"\"\nfrom functools import partial\nimport multiprocessing\nimport timeit\nimport time\nimport datetime\nimport itertools\n\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import model_selection\nfrom sklearn.metrics import make_scorer, log_loss\n\n\nimport pandas as pd\n\n\n\n\nfrom dataexplorer import DataExplorer\nfrom datamanager import DataManager\nfrom transform import *\n\ndef make_pipelines(transform_class_list, batches=1):\n '''Takes a list of classes of transforms. Extracts the callable methods,\n creates all possibles tuples in the order given of the methods, creates\n a list of data frames representing the possible pipelines.\n '''\n methods_list = []\n for transform in transform_class_list:\n attributes = [getattr(transform, method) for method in dir(transform)]\n methods = [attribute for attribute in attributes\n if callable(attribute) and\n not attribute.__name__.startswith(\"__\") and\n not attribute.__name__ == 'fit' and\n not attribute.__name__ == 'transform' and\n not attribute.__name__ == 'predict' and\n not attribute.__name__ == 'type']\n methods_list.append(methods)\n print(\"Generated Lists\")\n print(\"Now building pipelines.\")\n product = itertools.product(*methods_list)\n pipelines = []\n try:\n for prod in product:\n pipes = []\n for pipe in prod[:-1]:\n pipes.append(pipe.__name__)\n\n pipes.append(prod[-1].__name__)\n pipelines.append(pipes)\n except TypeError as error:\n print(\"Pipeline Creation Failed!\")\n print(error)\n print(pipes)\n pipelines = chunk_pipelines(pipelines, n_chunks=batches)\n transform_names = [transform.__name__ for transform in transform_class_list]\n return [make_pipeline_df(chunk, transform_names) for chunk in pipelines]\n\ndef chunk_pipelines(pipelines, n_chunks=1):\n '''Take a list of pipelines, and return a list of n_chunks\n of pipelines. This should try to evenly distribute the workload required\n for each batch but does not in this iteration'''\n if n_chunks == 1:\n return [pipelines]\n else:\n chunk_size = int(len(pipelines)/n_chunks)\n #create first equally size chunks\n chunks = [pipelines[chunk_size*i\n :chunk_size*(i+1)] for i in range(0, n_chunks)]\n\n #make the last chunk\n chunks.append(pipelines[n_chunks*chunk_size:len(pipelines)])\n chunks = [chunk for chunk in chunks if chunk]\n return chunks\n\ndef make_pipeline_df(pipelines, transform_class_names):\n ''''''\n return pd.DataFrame(pipelines, columns=transform_class_names)\n\ndef objective_score(results):\n ''''''\n return pd.DataFrame(results).mean().values[0]\n\ndef run_coliseum(x_train, y_train, transform_class_list, scorer=None, grids=None,\n n_jobs=1):\n '''\n Takes a training set, and a sequence of transform classes, and compares the\n possible pipelines in the given order.\n Parameters:\n x_train - Predictors in the training data. They should be compatible\n with the given models.\n y_train - The true values we are training with. We want to predict\n these.\n tranform_class_list - This is a list of classes that we will build the\n pipelines from. Each step of the pipeline is pulled from\n the class in the respective position in this list.\n scorer - This is a Scorer object that is compatible with cross_validate\n grids - Not currenty used. This represents input for hyper parameter\n search.\n n_jobs - The number of processes to run this under.\n Parallelizing at the pipeline level. does not balance load \n right now.\n\n Return:\n A data frame with columns transform_names+ ['average fit_time',\n 'average score_time',\n 'average test_score'])\n\n '''\n seed = 7\n\n if scorer is None:\n scorer = make_scorer(log_loss, labels=[0, 1])\n\n #create a data frame of the pipelines that we need to test.\n chunks = make_pipelines(transform_class_list, batches=n_jobs)\n\n #get the transform types names for the columns of the data frame.\n transform_names = [transform.__name__ for transform in transform_class_list]\n #cross validation setup.\n kfold = model_selection.KFold(n_splits=10, random_state=seed)\n\n #mode='serial'\n if n_jobs == 1:\n cv_results = [evaluate_pipelines(x_train, y_train,\n transform_names, scorer,\n kfold, chunk) for chunk in chunks]\n #mode='parallel'\n elif n_jobs != 1:\n pool = multiprocessing.Pool(processes=n_jobs)\n func = partial(evaluate_pipelines, x_train, y_train,\n transform_names, scorer, kfold)\n cv_results = pool.map(func, chunks)\n return pd.concat(cv_results, ignore_index=True)\n\n\ndef evaluate_pipelines(x_train, y_train, transform_names, scorer,\n kfold, pipeline_df):\n '''\n Takes a pipeline data frame and cross validates each corresponding\n pipelines on the training data.\n '''\n cv_results = []\n n = 1\n pipelines = get_pipelines(pipeline_df)\n for pipeline in pipelines:\n print(\"Running pipeline: \"+str(n)+\" Out of \"+ str(len(pipelines)))\n cv = evaluate_pipeline(x_train, y_train, pipeline, scorer, kfold)\n cv = pd.DataFrame(cv).mean().values\n cv_results.append(cv)\n n = n+1\n #data frame containing cv results for all the pipelines.\n coliseum_results = pd.DataFrame(cv_results, columns=['fit_time',\n 'score_time',\n 'test_score'])\n\n results = pd.concat([pipeline_df, coliseum_results], axis=1)\n return results\n\ndef get_pipelines(pipeline_df):\n '''\n Takes a pipeline dataframe and returns a list of Pipeline objects\n corresponding the string function names. Each step in the df is a column\n of strings.\n '''\n pipelines = []\n print(pipeline_df)\n stages = [globals()[class_name] for class_name in pipeline_df.columns]\n for index, pipeline in pipeline_df.iterrows():\n steps = []\n for stage in stages:\n stage_index = stages.index(stage)\n func_name = pipeline[stage_index]\n transform = stage(getattr(stage, func_name))\n steps.append((func_name, transform))\n\n pipelines.append(Pipeline(steps))\n print(\"got pipeline\")\n return pipelines\n\ndef evaluate_pipeline(x_train, y_train, pipeline, scorer, kfold):\n '''Takes training data, and a list of pipelines, fits the pipelines to\n the data, scores each pipeline, and returns a dataframe containing\n #return list of the form \"ModName\", \"Coder\", \"Cleaner\",\"ImpName\",\n \"CVResults\",\"Score\"\n '''\n log = \"working on :\" +\"/\"+str(pipeline)+\"\\n\"\n try:\n cv_results = model_selection.cross_validate(pipeline, x_train,\n y_train, cv=kfold,\n scoring=scorer,\n n_jobs=1,\n return_train_score=False)\n return cv_results\n except ValueError as error:\n print(\"cross validation failed.\")\n print(error)\n return float('nan')\n #print(traceback.print_tb(error.__traceback__))\ndef make_submission(dm, coliseum_results, stage):\n '''Create the kaggle format submissions of all possilbe games given the MM seeds.\n The output of this will be submitted to kaggle.'''\n pass\ndef run_tests(dm, stage=1, n_jobs=1):\n '''Run the stage one and stage 2 tests for the kaggle comp.\n Stage 1 consist of predicting the marchmadness outcome for every possible\n team matchup of the seeded teams each year.\n '''\n transform_class_list = [Imputer, Filterer, FeatureSelector, Scaler, Model]\n test_period = \"March Madness\"\n\n #compute log loss scores using 2010-2013 training data. Predict on 2014,\n #2015,2016,2017\n if stage == 'sanity':\n training_years = [2010]\n test_years = [2010]\n\n elif stage == 1:\n training_years = [2010, 2011, 2012, 2013]\n\n test_years = [2014, 2015, 2016, 2017]\n test_period = \"March Madness\"\n\n\n #compute log loss scores using 2010-2017 training data. Predict on 2018\n elif stage == 2:\n training_years = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]\n #The MM data for this stage is not available on computer. This will fail\n #to run.\n\n training_years = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]\n test_years = [2018]\n test_period = \"March Madness\"\n\n x_train, y_train = dm.make_training_data_years(training_years, period=\"All\")\n x_test, y_test = dm.make_test_data_years(test_years, period=test_period)\n\n print(\"loaded Data\")\n print(\"Running Coliseum\")\n\n #compare the pipelines using cv.\n coliseum_results = run_coliseum(x_train, y_train, transform_class_list,\n n_jobs=n_jobs)\n coliseum_results.to_csv(\"coliseum_results_stage_\"+str(stage)\n +str(datetime.datetime.today()).split()[0]+\".csv\")\n #print(\"Best Pipelines are:\")\n #slice out the pipelines that had the minimum (mean) cv score on the test\n #data.\n best = coliseum_results[\n coliseum_results['test_score'] == coliseum_results['test_score'].min()]\n #train best models on all training data, and compute total log loss\n # of the predictions on the\n\n y_pred_list = []\n for pipeline in get_pipelines(pd.DataFrame(\n best[0:len(transform_class_list)],\n columns=[t.__name__ for t in transform_class_list])):\n y_pred = pipeline.fit(x_train, y_train).predict(x_test)\n y_pred_list.append(y_pred)\n print(\"total log loss on test data for best model is :\" +str(\n log_loss(y_test, y_pred)))\n #prepare submission for kaggle.\n\n end = time.time()\n print(\"Computation Time: \"+ str(end - start))\n return x_train, y_train, coliseum_results, best, y_pred_list\n\n#A sanity check for the program working.\nif __name__ == '__main__':\n start = time.time()\n if 'dm' not in locals():\n dm = DataManager()\n print(\"Now loading training data.\")\n #x_train, y_train, coliseum_results, best, y_pred_list = run_tests(\n # dm, stage='sanity', n_jobs=8)\n x_train, y_train, coliseum_results, best, y_pred_list = run_tests(dm, stage=1, n_jobs=1)\n #x_train, y_train, coliseum_results, best, y_pred_list = run_tests(dm, stage=2, n_jobs=1)\n\n# print(accuracy_score(Y_validation, predictions))\n# print(confusion_matrix(Y_validation, predictions))\n# print(classification_report(Y_validation, predictions))\n\n #prepare submission for kaggle.\n end = time.time()\n# print(\"Computation Time: \"+ str(end - start))\n# print(\"Best Pipelines are:\")\n \n","sub_path":"High_Scholar_2018_March_Madness/code/coliseum.py","file_name":"coliseum.py","file_ext":"py","file_size_in_byte":11683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"289957575","text":"import os\nimport json\nimport multiprocessing\nimport random\nimport time\nfrom mountainclient import client as mt\nfrom mountainclient import MountainClient\nfrom .computeresourceclient import ComputeResourceClient\nfrom .shellscript import ShellScript\nfrom .temporarydirectory import TemporaryDirectory\nfrom .mountainjob import MountainJob\nfrom .mountainjobresult import MountainJobResult\nimport mtlogging\nfrom copy import deepcopy\n\n# module global\n_realized_files = set()\n_compute_resources_config = dict()\n\n\ndef configComputeResource(name, *, resource_name, collection=None, kachery_name=None, share_id=None):\n if share_id is not None:\n print('WARNING: use kachery_name in configComputeResource (share_id) is deprecated)')\n assert kachery_name is None\n kachery_name = share_id\n if resource_name is not None:\n _compute_resources_config[name] = dict(\n resource_name=resource_name,\n collection=collection,\n kachery_name=kachery_name\n )\n else:\n _compute_resources_config[name] = None\n\n\ndef configComputeResources(obj):\n for key, cr in obj.items():\n configComputeResource(\n name=key,\n resource_name=cr.get('resource_name', None),\n collection=cr.get('collection', None),\n share_id=cr.get('share_id', None)\n )\n\n\n@mtlogging.log()\ndef executeBatch(*, jobs, label='', num_workers=None, compute_resource=None, halt_key=None, job_status_key=None, job_result_key=None, srun_opts=None, job_index_file=None, cached_results_only=False, download_outputs=True, _job_handler=None):\n all_kwargs = locals()\n\n if len(jobs) == 0:\n return []\n\n if num_workers == 1:\n num_workers = None\n if not srun_opts:\n srun_opts = None\n\n if num_workers is not None:\n if compute_resource is not None:\n raise Exception('Cannot specify both num_workers and compute_resource in executeBatch.')\n if job_index_file is not None:\n raise Exception('Cannot specify both num_workers and job_index_file in executeBatch.')\n if compute_resource is not None:\n if srun_opts is not None:\n raise Exception('Cannot specify both compute_resource and srun_opts in executeBatch.')\n if job_index_file is not None:\n raise Exception('Cannot specify both compute_resource and job_index_file in executeBatch.')\n if srun_opts is not None:\n if job_index_file is not None:\n raise Exception('Cannot specify both srun_opts and job_index_file in executeBatch.')\n\n if type(compute_resource) == str:\n if compute_resource in _compute_resources_config:\n compute_resource = _compute_resources_config[compute_resource]\n else:\n raise Exception('No compute resource named {}. Use mlprocessors.configComputeResource(\"{}\",...).'.format(compute_resource, compute_resource))\n\n if type(compute_resource) == dict:\n if compute_resource['resource_name'] is None:\n compute_resource = None\n\n if compute_resource or srun_opts:\n if compute_resource:\n mtlogging.sublog('checking-for-cached-results-prior-to-sending-to-compute-resource')\n print('Checking for cached results prior to sending to compute resource...')\n elif srun_opts:\n mtlogging.sublog('checking-for-cached-results-prior-to-using-srun')\n print('Checking for cached results prior to using srun...')\n kwargs0 = all_kwargs\n kwargs0['compute_resource'] = None\n kwargs0['cached_results_only'] = True\n kwargs0['num_workers'] = 10 # check it in parallel\n # kwargs0['num_workers'] = None # for timing, do not check in parallel\n kwargs0['srun_opts'] = None\n results0 = executeBatch(**kwargs0)\n all_complete = True\n num_found = 0\n for ii, job in enumerate(jobs):\n if results0[ii].retcode is not None:\n num_found = num_found + 1\n job.result.fromObject(results0[ii].getObject())\n else:\n all_complete = False\n if num_found > 0:\n print('Found {} of {} cached results'.format(num_found, len(jobs)))\n if all_complete:\n return results0\n mtlogging.sublog(None)\n\n jobs2 = [job for job in jobs if job.result.retcode is None]\n\n if compute_resource:\n for job in jobs2:\n job.useRemoteUrlsForInputFiles()\n\n if _job_handler is not None:\n for job in jobs2:\n setattr(job, 'job_handler', _job_handler)\n\n files_to_realize = []\n for job in jobs2:\n files_to_realize.extend(job.getFilesToRealize())\n files_to_realize = list(set(files_to_realize))\n\n local_client = MountainClient()\n\n if compute_resource:\n # print('Ensuring files are available on remote server...')\n # mtlogging.sublog('ensuring-files-remote')\n # collection=compute_resource.get('collection', None)\n # upload_to=compute_resource.get('kachery_name', None)\n # for fname in files_to_realize:\n # if fname.startswith('sha1://'):\n # if local_client.findFile(path=fname):\n # mt.saveFile(path=fname, collection=collection, upload_to=upload_to)\n # elif fname.startswith('kbucket://') or fname.startswith('sha1dir://'):\n # # todo: in case of sha1dir, save the dir\n # pass\n # else:\n # mt.saveFile(path=fname, collection=collection, upload_to=upload_to)\n\n mtlogging.sublog('initializing-batch')\n\n args = deepcopy(compute_resource)\n if 'share_id' in args:\n args['kachery_name'] = args['share_id']\n del args['share_id']\n CRC = ComputeResourceClient(**args)\n\n batch_id = CRC.initializeBatch(jobs=jobs2, label=label)\n CRC.startBatch(batch_id=batch_id)\n mtlogging.sublog('running-batch')\n try:\n CRC.monitorBatch(batch_id=batch_id, jobs=jobs2, label=label)\n except:\n CRC.stopBatch(batch_id=batch_id)\n raise\n\n mtlogging.sublog('getting-batch-results')\n results = CRC.getBatchJobResults(batch_id=batch_id)\n if results is None:\n raise Exception('Unable to get batch results.')\n for i, job2 in enumerate(jobs2):\n result0 = results[i]\n if result0:\n job2.result.fromObject(result0.getObject())\n else:\n raise Exception('Unexpected: Unable to find result for job {}'.format(i))\n\n mtlogging.sublog('realizing-outputs')\n # Download outputs to local computer\n if download_outputs:\n download_from = compute_resource.get('kachery_name', None)\n for ii, result in enumerate(results):\n if result and (result.retcode == 0):\n for output_name, output_path in result.outputs.items():\n if not local_client.realizeFile(path=output_path):\n print('Downloading output {} {} ...'.format(output_name, output_path))\n local_path = mt.realizeFile(path=output_path, download_from=download_from)\n if not local_path:\n raise Exception('Unable to realize output {} from {}'.format(output_name, output_path))\n if not local_client.realizeFile(path=result.console_out):\n print('Downloading console output {}...'.format(result.console_out))\n local_path = mt.realizeFile(path=result.console_out, download_from=download_from)\n if not local_path:\n raise Exception('Unable to realize console output from {}'.format(output_name))\n\n mtlogging.sublog('caching-results-locally')\n # save results to local cache\n for ii, result in enumerate(results):\n if result and (result.retcode == 0):\n jobs2[ii].storeResultInCache(result)\n\n return [job.result for job in jobs]\n\n # Not using compute resource, do this locally\n if not cached_results_only:\n mtlogging.sublog('realizing-files')\n if job_index_file is None:\n print('Making sure files are available on local computer...')\n for fname in files_to_realize:\n print('Realizing {}...'.format(fname))\n mt.realizeFile(path=fname)\n mtlogging.sublog(None)\n\n if srun_opts is None:\n for job_index, job in enumerate(jobs2):\n setattr(job, 'halt_key', halt_key)\n setattr(job, 'job_status_key', job_status_key)\n setattr(job, 'job_index', job_index)\n setattr(job, 'job_result_key', job_result_key)\n job.setUseCachedResultsOnly(cached_results_only)\n\n if num_workers is not None:\n pool = multiprocessing.Pool(num_workers)\n results2 = pool.map(_execute_job, jobs2)\n pool.close()\n pool.join()\n else:\n results2 = []\n if job_index_file is None:\n for job in jobs2:\n results2.append(_execute_job(job))\n else:\n while True:\n job_index = _take_next_batch_job_index_to_run(job_index_file)\n if job_index < len(jobs2):\n print('Executing job {}'.format(job_index))\n _execute_job(jobs2[job_index])\n else:\n break\n return None\n\n for i, job in enumerate(jobs2):\n job.result.fromObject(results2[i].getObject())\n else:\n # using srun\n with TemporaryDirectory(remove=True) as temp_path:\n local_client = MountainClient()\n job_objects = [job.getObject() for job in jobs2]\n jobs_path = os.path.join(temp_path, 'jobs.json')\n job_index_file = os.path.join(temp_path, 'job_index.txt')\n with open(job_index_file, 'w') as f:\n f.write('0')\n local_client.saveObject(object=job_objects, dest_path=jobs_path)\n if job_result_key is None:\n job_result_key = dict(\n name='executebatch_job_result',\n randid=_random_string(8)\n )\n srun_py_script = ShellScript(\"\"\"\n #!/usr/bin/env python\n\n from mlprocessors import executeBatch\n from mountaintools import MountainClient\n from mlprocessors import MountainJob\n\n local_client = MountainClient()\n\n job_objects = local_client.loadObject(path = '{jobs_path}')\n jobs = [MountainJob(job_object=obj) for obj in job_objects]\n\n executeBatch(jobs=jobs, label='{label}', num_workers=None, compute_resource=None, halt_key={halt_key}, job_status_key={job_status_key}, job_result_key={job_result_key}, srun_opts=None, job_index_file='{job_index_file}', cached_results_only={cached_results_only})\n \"\"\", script_path=os.path.join(temp_path, 'execute_batch_srun.py'), keep_temp_files=keep_temp_files)\n srun_py_script.substitute('{jobs_path}', jobs_path)\n srun_py_script.substitute('{label}', label)\n if halt_key:\n srun_py_script.substitute('{halt_key}', json.dumps(halt_key))\n else:\n srun_py_script.substitute('{halt_key}', 'None')\n if job_status_key:\n srun_py_script.substitute('{job_status_key}', json.dumps(job_status_key))\n else:\n srun_py_script.substitute('{job_status_key}', 'None')\n if job_result_key:\n srun_py_script.substitute('{job_result_key}', json.dumps(job_result_key))\n else:\n srun_py_script.substitute('{job_result_key}', 'None')\n srun_py_script.substitute('{cached_results_only}', str(cached_results_only))\n srun_py_script.substitute('{job_index_file}', job_index_file)\n srun_py_script.write()\n\n srun_opts_adjusted, num_workers_adjusted = _adjust_srun_opts_for_num_jobs(srun_opts, num_workers or 1, len(jobs2))\n\n print('USING SRUN OPTS: {}'.format(srun_opts_adjusted))\n print('USING NUM SIMULTANEOUS SRUN CALLS: {}'.format(num_workers_adjusted))\n\n srun_sh_scripts = []\n for ii in range(num_workers_adjusted):\n if srun_opts is not 'fake':\n srun_sh_script = ShellScript(\"\"\"\n #!/bin/bash\n set -e\n\n srun {srun_opts} {srun_py_script}\n \"\"\", keep_temp_files=keep_temp_files)\n else:\n srun_sh_script = ShellScript(\"\"\"\n #!/bin/bash\n set -e\n\n {srun_py_script}\n \"\"\", keep_temp_files=keep_temp_files)\n srun_sh_script.substitute('{srun_opts}', srun_opts_adjusted)\n srun_sh_script.substitute('{srun_py_script}', srun_py_script.scriptPath())\n srun_sh_scripts.append(srun_sh_script)\n\n for srun_sh_script in srun_sh_scripts:\n srun_sh_script.start()\n for srun_sh_script in srun_sh_scripts:\n while srun_sh_script.isRunning():\n srun_sh_script.wait(5)\n if srun_sh_script.returnCode() != 0:\n print('Non-zero return code for srun script. Stopping scripts...')\n for srun_sh_script in srun_sh_scripts:\n srun_sh_script.stop()\n raise Exception('Non-zero return code for srun script.')\n\n result_objects = []\n for ii, job in enumerate(jobs2):\n print('Loading result object...', job_result_key, str(ii))\n num_tries = 0\n while True:\n result_object = local_client.loadObject(key=job_result_key, subkey=str(ii))\n if (result_object is None) and (not cached_results_only):\n print('Problem loading result....', job_result_key, str(ii))\n print('=====================', local_client.getValue(key=job_result_key, subkey='-'))\n print('=====================', local_client.getValue(key=job_result_key, subkey=str(ii)))\n num_tries = num_tries + 1\n if num_tries >= 3:\n raise Exception('Unable to load result object after {} tries.')\n print('Retrying...')\n time.sleep(1)\n else:\n print('Loaded result object...', job_result_key, str(ii))\n break\n result_objects.append(result_object)\n results2 = [MountainJobResult(result_object=obj) for obj in result_objects]\n for i, job in enumerate(jobs2):\n job.result.fromObject(results2[i].getObject())\n\n return [job.result for job in jobs]\n\n\ndef _take_next_batch_job_index_to_run(job_index_file):\n while True:\n time.sleep(random.uniform(0, 0.1))\n fname2 = _attempt_lock_file(job_index_file)\n if fname2:\n index = int(_read_text_file(fname2))\n _write_text_file(fname2, '{}'.format(index + 1))\n os.rename(fname2, job_index_file) # put it back\n return index\n\n\ndef _attempt_lock_file(fname):\n if os.path.exists(fname):\n fname2 = fname + '.lock.' + _random_string(6)\n try:\n os.rename(fname, fname2)\n except:\n return False\n if os.path.exists(fname2):\n return fname2\n\n\ndef _set_job_status(job, status):\n local_client = MountainClient()\n job_status_key = getattr(job, 'job_status_key', None)\n job_index = getattr(job, 'job_index', None)\n if job_status_key:\n subkey = str(job_index)\n local_client.setValue(key=job_status_key, subkey=subkey, value=status)\n\n\ndef _set_job_result(job, result_object):\n local_client = MountainClient()\n job_result_key = getattr(job, 'job_result_key', None)\n job_index = getattr(job, 'job_index', None)\n if job_result_key:\n subkey = str(job_index)\n num_tries = 0\n while True:\n print('Saving result object...')\n local_client.saveObject(key=job_result_key, subkey=subkey, object=result_object)\n testing = local_client.loadObject(key=job_result_key, subkey=subkey)\n if result_object and (testing is None):\n print('WARNING: Problem loading object immediately after saving....')\n print('==== value', local_client.getValue(key=job_result_key, subkey=subkey))\n print('==== object', local_client.loadObject(key=job_result_key, subkey=subkey))\n print(result_object)\n num_tries = num_tries + 1\n if num_tries >= 3:\n raise Exception('Unexpected: Problem loading object immediately after saving')\n else:\n print('retrying...')\n else:\n # we are good\n break\n\n\n@mtlogging.log()\ndef _execute_job(job):\n local_client = MountainClient()\n halt_key = getattr(job, 'halt_key', None)\n if halt_key:\n halt_val = local_client.getValue(key=halt_key)\n if halt_val:\n raise Exception('Batch halted.')\n\n _set_job_status(job, 'running')\n\n if hasattr(job, 'job_handler'):\n result = job.job_handler.executeJob(job)\n else:\n result = job.execute()\n\n if result:\n if result.retcode == 0:\n _set_job_status(job, 'finished')\n else:\n _set_job_status(job, 'error')\n _set_job_result(job, result.getObject())\n else:\n _set_job_status(job, 'result-not-found')\n\n return result\n\n\ndef _adjust_srun_opts_for_num_jobs(srun_opts, num_workers, num_jobs):\n vals = srun_opts.split()\n for i in range(len(vals)):\n if vals[i] == '-n' and (i + 1 < len(vals)):\n nval = int(vals[i + 1])\n if num_jobs <= nval:\n nval = num_jobs\n num_workers = 1\n elif num_jobs <= nval * (num_workers - 1):\n num_workers = int((num_jobs - 1) / nval) + 1\n vals[i + 1] = str(nval)\n return ' '.join(vals), num_workers\n\n\ndef _random_string(num_chars):\n chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n return ''.join(random.choice(chars) for _ in range(num_chars))\n\n\ndef _write_text_file(fname, txt):\n with open(fname, 'w') as f:\n f.write(txt)\n\n\ndef _read_text_file(fname):\n with open(fname, 'r') as f:\n return f.read()\n","sub_path":"mountaintools/mlprocessors/executebatch.py","file_name":"executebatch.py","file_ext":"py","file_size_in_byte":18896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"277679876","text":"#! /usr/bin/python3\n\"\"\"\nConverts Cyrillic script to Latin.\n\nUse:\n\ncyrillic_text = 'На ливади коњ ућустечен и расћустечен!'\n\nconverted = CirConv(text=cyrillic_text)\nconverted.convert_to_latin()\n\n# Also: converted.convert_to_cyrillic()\n\nprint(converted.result)\n> Na livadi konj ućustečen i rasćustečen!\n\n\"\"\"\n\n#\n# Copyright (C) 2011 Romeo Mlinar (mlinar [a] languagebits.com)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\n__version__ = '1.5'\n__url__ = \"https://gitorious.org/dtknv\"\n__author__ = \"Romeo Mlinar\"\n__license__ = \"GNU General Public License v. 3\"\n\nimport os\nimport codecs\nimport json\n\ncyr = {'А':'A', 'Б':'B', 'В':'V', 'Г':'G', 'Д':'D', 'Е':'E',\n 'Ж':'Ž', 'З':'Z', 'И':'I', 'Ј':'J', 'К':'K', 'Л':'L',\n 'М':'M', 'Н':'N', 'Њ':'Nj','О':'O', 'П':'P', 'Р':'R',\n 'С':'S', 'Т':'T', 'Ћ':'Ć', 'У':'U', 'Ф':'F', 'Х':'H',\n 'Ц':'C', 'Ч':'Č', 'Џ':'Dž','Ш':'Š', 'Ђ':'Đ', 'Љ':'Lj',\n 'а':'a', 'б':'b', 'в':'v', 'г':'g', 'д':'d', 'е':'e',\n 'ж':'ž', 'з':'z', 'и':'i', 'ј':'j', 'к':'k', 'л':'l',\n 'љ':'lj','м':'m', 'н':'n', 'њ':'nj', 'о':'o', 'п':'p',\n 'р':'r', 'с':'s', 'т':'t', 'ћ':'ć', 'у':'u', 'ф':'f',\n 'х':'h', 'ц':'c', 'ч':'č', 'џ':'dž','ш':'š', 'ђ':'đ'}\n\nlat_resolutions = {'NJ':'Њ',\n 'Nj':'Њ',\n 'nJ':'нЈ',\n 'LJ':'Љ',\n 'Lj':'љ',\n 'lJ':'лЈ',\n 'DŽ':'Џ',\n 'Dž':'Џ',\n 'dŽ':'дЖ',}\n\ntwo_char = {'Њ':'NJ', 'Џ':'DŽ', 'Љ':'LJ'}\n\n# Characters that can follow capital letter. TODO: Options\n# for this?\nINTERPUNCTION_CAPLETTER = \"!?.'„“\" + '\"' + \" \" + \"»«–…\"\n\nstandard_exc = \\\n{'injekci': 'инјекци',\n 'konjuga': 'конјуга',\n 'nadživlj': 'надживљ',\n 'nadžnje': 'наджње',\n 'odživlj': 'одживљ',\n 'odživljen': 'одживљен',\n 'podžnjeti': 'поджњети'}\n\nclass Replace:\n \"\"\"\n Loads and saves strings that need to have different\n conversion rules.\n \"\"\"\n \n def __init__(self, f=False):\n \"\"\"\n Load and save file strings\n \"\"\"\n pass\n\n\n def load(self, f):\n \"\"\"\n Load a JSON file.\n \"\"\"\n return(self._load(f))\n \n\n def _load(self, f):\n \"\"\"\n Load a JSON file.\n \"\"\"\n # TODO: Add more elaborate check and\n # introduce a warning.\n with open(f, mode='r', encoding='utf-8') as f:\n c = json.load(f)\n return(c)\n\n def save(self, f, exc):\n \"\"\"\n Save a JSON file.\n \"\"\"\n with open(f, mode='w', encoding='utf-8') as f:\n json.dump(exc, f)\n\n\nclass CirConv:\n \"\"\"\n Converts Cyrillic script to Latin and vice versa.\n \"\"\"\n \n def __init__(self, text='', stats=False, exception_files=[], \n variants=False,\n path=False):\n \"\"\"\n text - text to be converted\n stats - true if statistics is to be calaculated\n exceptions - list of files with the exception strings\n \"\"\"\n # Raise TypeError if 'text' is not a character\n # object.\n if not isinstance(text, str):\n raise TypeError('CirConv accepts text only, %s is rejected.' \\\n % type(text))\n # Variables\n self.path = path\n self.text = text\n self.exception_elements = []\n # Exceptions strings. Don't load if path is\n # not present.\n if path and len(exception_files):\n self.load_exceptions(exception_files)\n else:\n self.exception_elements.append(standard_exc)\n # Variants?\n if variants and len(exception_files):\n self._make_variants()\n # Make character maps.\n self._make_charkeys()\n \n\n def load_exceptions(self, flist):\n \"\"\"\n Load exceptions strings from flist files.\n \"\"\"\n self.exception_elements = []\n if isinstance(flist, str):\n f = os.path.join(self.path, flist)\n exc_content = self._load_exc_file(f)\n if exc_content:\n self.exception_elements.append(exc_content)\n else:\n paths = [os.path.join(self.path, i) for i in flist]\n for f in paths:\n exc_content = self._load_exc_file(f)\n if exc_content:\n self.exception_elements.append(exc_content)\n \n \n def _load_exc_file(self, f):\n \"\"\"\n Load exception file or return false if\n there was an error.\n \"\"\"\n try:\n exc_content = Replace().load(f)\n except:\n exc_content = False\n \n return(exc_content)\n \n \n def _make_variants(self):\n \"\"\"\n Make variants of the words.\n \n TODO: finish this\n\n \"\"\"\n pass\n # variants = []\n # for word in words:\n # variants.append(word.upper())\n # variants.append(word.capitalize())\n # return variants\n\n\n def convert_to_latin(self):\n \"\"\"\n Convert the text and place it into .result. No return.\n \"\"\"\n self.result = self._charreplace(self.text, mode='tolat')\n\n\n def convert_to_cyrillic(self):\n \"\"\"\n Convert the text and place it into .result. No return.\n \"\"\"\n self.result = self._charreplace(self.text, mode='tocyr')\n\n \n def convert(self, text, prepare=False):\n \"\"\"\n If text is in Cyrillic, convert it to Latin and\n vice versa. \n \n Return the text.\n\n Does not use public text or conversion functions.\n \"\"\"\n if self.is_all_cyrillic(text):\n return self._charreplace(text, mode='tolat')\n elif self.is_all_latin(text):\n return self._charreplace(text, mode='tocyr')\n else:\n raise ValueError(\"Method does not accept mixed-script text.\")\n \n\n def is_all_cyrillic(self, text=None):\n \"\"\"\n Return true if all chars are Cyrillic.\n \"\"\"\n return self._is_all_textscript(text, 'lat')\n\n\n def is_all_latin(self, text=None):\n \"\"\"\n Return true if all chars are Latin.\n \"\"\"\n return self._is_all_textscript(text, 'cyr')\n\n\n def _is_all_textscript(self, text, script):\n \"\"\"\n Return true if all chars are Cyrillic/Latin\n \"\"\"\n # Check if the argumets are valid\n if script not in ('lat', 'cyr'):\n ValueError('script must be \"lat\" or \"cyr\"')\n # If no text is provide, check self.text\n if text == None: \n text = self.text\n # Character sets\n characters = getattr(self, 'charmap_to%s' % script)\n for i in text:\n if i not in characters.keys():\n return False\n return True\n\n \n def _make_charkeys(self):\n \"\"\"\n Make dictionaries for character replacement.\n \"\"\"\n self.charmap_tolat = cyr\n self.charmap_tocyr = dict([v,k] for k,v in cyr.items())\n \n\n def _prepare_for_cyrillic(self, text):\n \"\"\"\n Prepare text for conversion to Cyrillic.\n\n For example, capitalised \"NJEGOŠ\" is \"ЊЕГОШ\". The conversion\n without this method would be invalid \"НЈЕГОШ\".\n\n Uses lat_resolutions dictionary.\n \"\"\"\n lat_keys = lat_resolutions.keys()\n for letter in lat_keys:\n if letter in text:\n text = text.replace(letter, lat_resolutions[letter])\n return text\n\n\n def _prepare_for_latin(self, text):\n \"\"\"\n Prepare text for conversion to Latin.\n\n For example, capitalised \"ЊЕГОШ\" is \"NJEGOŠ\". The conversion\n without this method would be invalid \"NjEGOŠ\". The first form\n is required by the grammar of Serbian.\n \"\"\"\n for letter in two_char.keys():\n for i in range(text.count(letter)):\n letter_position = text.find(letter)\n if self._cap_check(text, letter_position):\n text = text.replace(letter, two_char[letter])\n return text\n\n\n def _cap_check(self, text, position):\n \"\"\"\n Returns true is the character at position+1 is capitalised.\n This method should contain more detailed checks.\n \"\"\"\n text_check = False\n try:\n # In case the letter in quesiton is at the end\n # of a sentence:\n if (text[position+1] in INTERPUNCTION_CAPLETTER) and \\\n text[position-1].isupper():\n return(text[position-1].isupper())\n except:\n pass\n\n try:\n # Is the letter at the end of a word?\n # I.e. KONJ.\n text_check = text[position+1].isupper()\n except IndexError:\n # Probably is:\n text_check = text[position-1].isupper()\n return(text_check)\n\n \n def _excreplace(self, text):\n \"\"\"\n Replace custom strings.\n \"\"\"\n print(\"I got\", text)\n # Go throught self.exceptions list, that holds\n # all dictionaries correspondng to files loaded\n # by Replace in __init__.\n for exception_dictionary in self.exception_elements:\n # Go through all keys of a dictionary.\n for string_search in exception_dictionary.keys():\n # If key is found in text, replace it\n # by the corresponding value.\n if string_search in text:\n text = text.replace(string_search, \n exception_dictionary[string_search])\n return text\n\n\n def _charreplace(self, text, mode):\n \"\"\"\n Replace characters in the input text.\n \"\"\"\n # Replace custom strings (\"exceptions\")\n text = self._excreplace(text)\n # Create lists and dictionary\n if mode == 'tocyr':\n charkeys = self.charmap_tocyr.keys()\n charmap = self.charmap_tocyr\n text = self._prepare_for_cyrillic(text)\n elif mode == 'tolat':\n charkeys = self.charmap_tolat.keys()\n charmap = self.charmap_tolat\n text = self._prepare_for_latin(text)\n else:\n raise ValueError(\"Mode must be 'tocyr' or 'tolat'.\")\n # Replace the characters\n for letter in charkeys:\n if letter in text:\n text = text.replace(letter, charmap[letter])\n return text\n\n","sub_path":"cyrconv.py","file_name":"cyrconv.py","file_ext":"py","file_size_in_byte":11249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"147828632","text":"from COMPS.Data import WorkItem\nfrom COMPS.Data.WorkItem import WorkItemState\n\nfrom simtools.Services.ObejctCatelog.ObjectInfoSvc import ObjectInfoSvc\nfrom simtools.Utilities.COMPSUtilities import COMPS_login\n\n\nclass RunSvc:\n\n @staticmethod\n def run(item_id):\n info = ObjectInfoSvc.get_item_info(item_id)\n\n if info:\n endpoint = info[\"provider_info\"].get(\"endpoint\", None)\n if info['type'] == 'WI' and info['provider'] == 'COMPS':\n COMPS_login(endpoint)\n wi = WorkItem.get(item_id)\n if wi.state == WorkItemState.Created:\n wi.commission()\n\n","sub_path":"simtools/Services/Run/RunSvc.py","file_name":"RunSvc.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"638016361","text":"import random\nimport sys\n\nimport logbook\n\napp_log = logbook.Logger('App')\n\n\ndef logging_register():\n level = logbook.TRACE\n log_filename = None\n\n if not log_filename:\n logbook.StreamHandler(sys.stdout, level=level).push_application()\n else:\n logbook.TimedRotatingFileHandler(log_filename, level=level).push_application()\n\n\nclass Roll:\n rolls = {\n 'paper': {'loose_to': 'scissors', 'win': 'rock'},\n 'rock': {'loose_to': 'paper', 'win': 'scissors'},\n 'scissors': {'loose_to': 'rock', 'win': 'paper'}\n }\n\n # def __init__(self, name, loose, win):\n # self.__name = name\n # self.__loose = loose\n # self.__win = win\n #\n # def get_name(self):\n # return self.__name.upper()\n\n # def __str__(self):\n # return self.str_rep()\n\n # def str_rep(self):\n # return f'Roll: {Roll.rolls}' #, Loose to: {self.__loose}, Win over: {self.__win}'\n\n\nclass Player:\n def __init__(self, name, score):\n self.__name = name\n self.score = score\n\n def get_name(self):\n return self.__name\n\n def get_score(self):\n return self.score\n\n\nclass Game:\n def __init__(self):\n self.rolls = Roll.rolls\n self.player1 = None\n self.player2 = Player('Computer', 0)\n\n def start_game(self):\n app_log.notice('Game started')\n\n name = input(\"Player1, your name is: \").capitalize()\n\n self.player1 = Player(name, 0)\n # self.player2 = Player('Computer')\n\n return '{}, {}'.format(self.player1, self.player2)\n\n def __get_roll(self, player):\n rps = self.rolls.keys()\n if player.get_name() != 'Computer':\n print()\n print(f'Choose rolls from:{list(rps)}')\n roll = input(f'{player.get_name()} your roll: ').lower()\n else:\n roll = random.choice(list(rps))\n\n return roll\n\n def game_round(self):\n round_number = 3\n count = 0\n\n while count != round_number:\n print()\n print(f\"----------Round {count + 1}----------\")\n try:\n player1_roll = self.__get_roll(self.player1)\n print(f'{self.player1.get_name()} rolls {player1_roll}')\n player2_roll = self.__get_roll(self.player2)\n print(f'{self.player2.get_name()} rolls {player2_roll}')\n\n # Get a winner of the round\n self.define_round_winner(player1_roll, player2_roll, self.rolls)\n count += 1\n except KeyError:\n app_log.error('Key Error')\n print('Sorry, you typed a wrong roll. Try it again.')\n continue\n\n # Get a result of the Game\n self.define_game_winner(self.player1.get_score(), self.player2.get_score())\n\n def define_round_winner(self, roll1, roll2, rolls):\n print()\n\n if roll2 == roll1:\n print(\"It's a tie\")\n print(f'{self.player1.get_name()}: {self.player1.get_score()}; '\n f'{self.player2.get_name()}: {self.player2.get_score()}')\n\n return False\n\n elif roll2 == rolls[roll1]['win']:\n self.player1.score += 1\n print(f'{self.player2.get_name()} lost to {self.player1.get_name()}')\n print(f'{self.player1.get_name()}: {self.player1.get_score()}, '\n f'{self.player2.get_name()}: {self.player2.get_score()}')\n\n return True\n\n else:\n self.player2.score += 1\n print(f'{self.player1.get_name()} lost to {self.player2.get_name()}')\n print(f'{self.player1.get_name()}: {self.player1.get_score()}, '\n f'{self.player2.get_name()}: {self.player2.get_score()}')\n\n return True\n\n def define_game_winner(self, pl1_score, pl2_score):\n print()\n print(\"-------------------------\")\n\n if pl1_score == pl2_score:\n print(\"Congratulations. It's tie!\")\n return True\n elif pl1_score > pl2_score:\n print(f\"{self.player1.get_name()} won the game! \")\n return True\n else:\n print(f\"{self.player2.get_name()} won the game! \")\n return True\n\n# game1 = Game()\n# game1.start_game()\n# game1.game_round()\n","sub_path":"src/Week2/Day13/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"232674796","text":"import tensorflow as tf\nimport gym\nimport numpy\n\n\nclass l_p():\n\tdef __init__(self):\n\t\tself.graph=tf.Graph()\n\t\tself.sess = tf.Session(graph=self.graph)\n\t\twith self.graph.as_default():\n\t\t\tsaver = tf.train.import_meta_graph('Lunar_dqn/models/my_dqn_model-1000.meta',clear_devices=True)\n\t\t\tsaver.restore(self.sess, 'Lunar_dqn/models/my_dqn_model-1000')\n\t\t\tself.state=self.graph.get_tensor_by_name(\"Placeholder:0\")\n\t\t\tself.is_training_ph=self.graph.get_tensor_by_name(\"Placeholder_5:0\")\n\t\t\tself.Q = self.graph.get_tensor_by_name(\"q_network/dense_3/BiasAdd:0\")\n\n\tdef get_q_values(self,state):\n\t\tstate=state.reshape(1,len(state))\n\t\treturn self.sess.run(self.Q,feed_dict={self.state:state,self.is_training_ph:False })\n\n\tdef get_a_star(self,state):\n\t\tvals=self.get_q_values(state)[0]\n\t\treturn numpy.argmax(vals)\n\n'''\nenv=gym.make(\"LunarLander-v2\")\ns=env.reset()\npolicy=l_p()\na=policy.get_a_star(s)\n'''","sub_path":"experiments/Lunar_dqn/lunar_demonstrator.py","file_name":"lunar_demonstrator.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"135430093","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: chaomy\n# @Date: 2017-07-05 08:12:30\n# @Last Modified by: chaomy\n# @Last Modified time: 2018-11-19 22:16:21\n\nfrom numpy import loadtxt, linspace\nimport os\nfrom math import cos, sin\nfrom math import sqrt\nfrom itertools import cycle\nimport numpy as np\n\n\nclass md_gb_pos(object):\n\n def loop_plt_angle(self):\n self.find_angles_1100()\n\n dd = np.zeros([len(self.ag) + 2, 2])\n dd[0, 0], dd[0, 1] = 0.0, 0.0\n dd[-1, 0], dd[-1, 1] = 90.0, 0.0\n\n total = np.ndarray([len(self.ag), 2])\n\n for e, i in zip(self.ag, range(len(self.ag))):\n mdir = \"1100_{:.2f}\".format(e[0])\n dd[i + 1, 0] = e[0]\n dd[i + 1, 1] = np.loadtxt(\"{}/lmp.dat\".format(mdir)) \n\n total[i, 0] = e[0]\n total[i, 1] = dd[i + 1, 1] \n\n np.savetxt('data_dirct.txt', total, fmt='%1.8f')\n print(dd)\n\n self.set_111plt((9, 6))\n self.set_keys()\n self.ax.plot(dd[1:-1, 0], 1e3 * dd[1:-1, 1], 'o--',\n markersize=14, label='<100> GB')\n # ddp **next(self.keysiter)\n\n self.add_legends(self.ax)\n\n ylb = cycle(['GB[001] mJ/m^2'])\n xlb = cycle(['Angle (deg)'])\n\n self.add_y_labels(ylb, self.ax)\n self.add_x_labels(xlb, self.ax)\n self.set_tick_size(self.ax)\n self.fig.savefig('fig_gb.png', **self.figsave)\n","sub_path":"gb/cal_md_gb_pos.py","file_name":"cal_md_gb_pos.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53617169","text":"# -*- coding: utf-8 -*-\n#%% IMPORTS\nimport time\nfrom collections import OrderedDict\nimport os\nimport time\nimport numpy as np\nimport random\nfrom tqdm import tqdm\nfrom matplotlib import pyplot as plt\nimport sys\nfrom PIL import Image\nimport pandas as pd\n\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.utils as vutils\nfrom torchvision import transforms as Transforms\nfrom torch.autograd import Variable\n\nfrom libraries.model.ganomaly_network import GanomalyModel\nfrom libraries.model.evaluate import evaluate\nfrom libraries.utils import EarlyStopping, saveInfoGanomaly, addInfoGanomaly, LR_decay\nfrom libraries.utils import Paths, ensure_folder, getNmeans\npaths = Paths()\n\nfrom libraries.dataset_package.dataset_manager import generatePatches\n\n#%% CONSTANTS\nGENERATOR = 'GENERATOR'\nDISCRIMINATOR = 'DISCRIMINATOR'\n\ndevice = torch.device('cuda:0')\n#%%\n\ndef loadModel(filename):\n \n model_name = filename.split('_')[0] + '_' + filename.split('_')[1]\n path_file = paths.checkpoint_folder + model_name + '/' + filename\n \n return torch.load(path_file)\n\nclass AnomalyDetectionModel():\n \n def __init__(self, opt, optim_gen, optim_discr,\n trainloader=None, validationloader=None):\n super().__init__()\n \n self.model = GanomalyModel(opt)\n optimizer_gen = optim_gen(self.model.generator.parameters(), opt.lr_gen)\n optimizer_discr = optim_discr(self.model.discriminator.parameters(), opt.lr_discr)\n self.model.init_optim(optimizer_gen, optimizer_discr)\n self.trainloader = trainloader\n self.validationloader = validationloader\n self.opt = opt\n \n def loadTrainloader(self, trainloader):\n self.trainloader = trainloader\n \n def loadValidationLoader(self, validationloader):\n self.validationloader = validationloader\n \n def get_loss(self):\n \n losses = OrderedDict([\n \n ('loss_gen', self.model.err_gen.item()),\n ('loss_discr', self.model.err_discr.item()),\n ('loss_gen_adv', self.model.err_gen_adv.item()),\n ('loss_gen_con', self.model.err_gen_con.item()),\n ('loss_gen_enc', self.model.err_gen_enc.item()),\n \n ])\n \n return losses\n\n def _trainOneEpoch(self): \n\n self.model.train()\n curr_epoch = 0\n steps = 0\n train_loss = {}\n train_loss[GENERATOR] = []\n train_loss[DISCRIMINATOR] = []\n \n adv_loss = []\n con_loss = []\n enc_loss = []\n \n \n n_iter = len(self.trainloader)\n# printing_freq = n_iter // self.opt.loss_per_epoch\n# print(n_iter)\n start = time.time()\n \n for images, labels in tqdm(self.trainloader, leave=True, total=n_iter, desc='Training', file = sys.stdout):\n \n steps += 1\n curr_epoch += self.opt.batch_size\n \n x = Variable(images).cuda()\n \n # GENERATOR FORWARD\n x_prime, z, z_prime = self.model.forward_gen(x)\n\n # DISCRIMINATOR FORWARD\n pred_real, feat_real, pred_fake, feat_fake = self.model.forward_discr(x, x_prime) \n \n \n # GENERATOR LOSS\n loss_gen, _, losses = self.model.loss_function_gen(x, x_prime, z, z_prime, feat_fake, feat_real, self.opt)\n # DISCRIMINATOR LOSS\n loss_discr = self.model.loss_function_discr(pred_real, pred_fake)\n \n # BACKWARDS\n self.model.optimize_gen(loss_gen)\n self.model.optimize_discr(loss_discr)\n \n train_loss[GENERATOR].append(loss_gen.item()*images.size(0))\n train_loss[DISCRIMINATOR].append(loss_discr.item()*images.size(0))\n adv_loss.append(losses[0].item()*images.size(0))\n con_loss.append(losses[1].item()*images.size(0))\n enc_loss.append(losses[2].item()*images.size(0))\n \n spent_time = time.time() - start\n \n return train_loss, [adv_loss, con_loss, enc_loss], spent_time\n \n def _validation(self):\n \n curr_epoch = 0\n steps = 0\n \n valid_loss = {}\n valid_loss[GENERATOR] = []\n valid_loss[DISCRIMINATOR] = []\n \n adv_loss = []\n con_loss = []\n enc_loss = []\n \n n_iter = len(self.validationloader)\n \n start = time.time()\n \n self.model.evaluate()\n with torch.no_grad():\n \n for images, labels in tqdm(self.validationloader, leave=True, total=n_iter, desc='Validation', file = sys.stdout):\n \n steps += 1\n curr_epoch += self.opt.batch_size\n \n# x = torch.Tensor(images).cuda()\n x = Variable(images).cuda()\n \n # GENERATOR FORWARD\n x_prime, z, z_prime = self.model.forward_gen(x)\n # DISCRIMINATOR FORWARD\n pred_real, feat_real, pred_fake, feat_fake = self.model.forward_discr(x, x_prime) \n \n # GENERATOR LOSS\n loss_gen, loss_gen_val, losses = self.model.loss_function_gen(x, x_prime, z, z_prime, feat_fake, feat_real, self.opt)\n # DISCRIMINATOR LOSS\n loss_discr = self.model.loss_function_discr(pred_real, pred_fake)\n \n valid_loss[GENERATOR].append(loss_gen_val.item()*images.size(0))\n valid_loss[DISCRIMINATOR].append(loss_discr.item()*images.size(0))\n adv_loss.append(losses[0].item()*images.size(0))\n con_loss.append(losses[1].item()*images.size(0))\n enc_loss.append(losses[2].item()*images.size(0))\n \n spent_time = time.time() - start\n \n return valid_loss, [adv_loss, con_loss, enc_loss], spent_time\n \n def _test(self):\n \n start = time.time()\n \n with torch.no_grad():\n \n i=0\n curr_epoch = 0\n times = []\n n_iter = len(self.validationloader)\n\n anomaly_scores = torch.zeros(size=(len(self.validationloader.dataset),), dtype=torch.float32, device=device)\n gt_labels = torch.zeros(size=(len(self.validationloader.dataset),), dtype=torch.long, device=device)\n \n \n for images, labels in tqdm(self.validationloader, leave=True, total=n_iter, desc='Test', file = sys.stdout):\n \n curr_epoch += self.opt.batch_size\n \n time_in = time.time()\n \n x = torch.Tensor(images).cuda()\n tensor_labels = torch.Tensor(labels).cuda() \n \n \n _, z, z_prime = self.model.forward_gen(x)\n# print('\\n----Z----')\n# torch.Size([64, 100, 1, 1])\n# print(z.shape)\n \n # ANOMALY SCORE\n score = torch.mean(torch.pow((z-z_prime), 2), dim=1)\n \n# print('Score: ', score)\n \n time_out = time.time()\n \n \n anomaly_scores[i*self.opt.batch_size : i*self.opt.batch_size + score.size(0)] = score.reshape(score.size(0))\n gt_labels[i*self.opt.batch_size : i*self.opt.batch_size + score.size(0)] = tensor_labels.reshape(score.size(0))\n\n times.append(time_out - time_in)\n\n i += 1\n \n \n # NORMALIZATION - Scale error vector between [0, 1]\n anomaly_scores_norm = (anomaly_scores - torch.min(anomaly_scores)) / (torch.max(anomaly_scores) - torch.min(anomaly_scores))\n # auc, eer = roc(self.gt_labels, self.anomaly_scores)\n auc, threshold_norm = evaluate(gt_labels, anomaly_scores_norm)\n \n _, threshold = evaluate(gt_labels, anomaly_scores)\n\n performance = dict({'AUC':auc,\n 'Threshold':threshold})\n \n eval_data = dict({'gt_labels':gt_labels,\n 'scores':anomaly_scores})\n\n \n \n spent_time = time.time() - start\n \n return performance, eval_data, spent_time\n \n def train_model(self, save=True, lr_decay_value=None):\n \n print('-> Training model: ', self.opt.name)\n self.epochs = self.opt.epochs\n plotUnit = 1\n \n self.train_loss = {}\n self.train_loss[GENERATOR] = []\n self.train_loss[DISCRIMINATOR] = []\n \n self.val_loss = {}\n self.val_loss[GENERATOR] = []\n self.val_loss[DISCRIMINATOR] = []\n \n self.train_adv_loss = []\n self.train_con_loss = []\n self.train_enc_loss = []\n \n self.valid_adv_loss = []\n self.valid_con_loss = []\n self.valid_enc_loss = []\n \n \n self.thresholds = []\n \n self.folder_save = paths.checkpoint_folder + self.opt.name + '/'\n ensure_folder(self.folder_save)\n \n self.best_auc = 0\n \n es = EarlyStopping(self.opt)\n lrDecay = LR_decay(self.opt.lr_gen)\n \n for self.epoch in range(self.epochs):\n print('\\n')\n print('Epoch {}/{}'.format(self.epoch+1, self.epochs))\n \n # TRAINING\n loss, losses, train_time = self._trainOneEpoch()\n self.train_loss[GENERATOR] = np.concatenate((self.train_loss[GENERATOR], getNmeans(loss[GENERATOR], plotUnit)))\n self.train_loss[DISCRIMINATOR] = np.concatenate((self.train_loss[DISCRIMINATOR], getNmeans(loss[DISCRIMINATOR], plotUnit)))\n# print(losses)\n# print(losses[0])\n \n self.train_adv_loss = np.concatenate((self.train_adv_loss, getNmeans(losses[0], plotUnit)))\n self.train_con_loss = np.concatenate((self.train_con_loss, getNmeans(losses[1], plotUnit)))\n self.train_enc_loss = np.concatenate((self.train_enc_loss, getNmeans(losses[2], plotUnit)))\n \n# train_loss[GENERATOR] = np.concatenate((train_loss[GENERATOR], loss[GENERATOR]))\n# print(len(train_loss[GENERATOR]))\n train_time = adjustTime(train_time)\n \n # VALIDATION\n loss, losses, val_time = self._validation()\n self.val_loss[GENERATOR] = np.concatenate((self.val_loss[GENERATOR], getNmeans(loss[GENERATOR], plotUnit)))\n self.val_loss[DISCRIMINATOR] = np.concatenate((self.val_loss[DISCRIMINATOR], getNmeans(loss[DISCRIMINATOR], plotUnit)))\n \n self.valid_adv_loss = np.concatenate((self.valid_adv_loss, getNmeans(losses[0], plotUnit)))\n self.valid_con_loss = np.concatenate((self.valid_con_loss, getNmeans(losses[1], plotUnit)))\n self.valid_enc_loss = np.concatenate((self.valid_enc_loss, getNmeans(losses[2], plotUnit)))\n# val_loss[GENERATOR] = np.concatenate((val_loss[GENERATOR], loss[GENERATOR]))\n# print(len(val_loss[GENERATOR]))\n val_time = adjustTime(val_time)\n \n# self.visualizer.plot(val_loss[GENERATOR], 'validation', 'Generator', 'append')\n \n # VISUALIZATION\n \n# self.visualizer.plot_loss(train_loss['INDEX'], train_loss['GENERATOR'], val_loss['GENERATOR'], 'GENERATOR')\n# self.visualizer.plot_loss(train_loss['INDEX'], train_loss['DISCRIMINATOR'], val_loss['DISCRIMINATOR'], 'DISCRIMINATOR')\n \n # TEST\n# return self._test()\n \n performance, eval_data, spent_time = self._test()\n \n self.auc, self.threshold = performance['AUC'], performance['Threshold']\n self.thresholds.append(self.threshold)\n \n self.gt_labels, self.anomaly_scores = eval_data['gt_labels'], eval_data['scores']\n# test_time = adjustTime(spent_time)\n \n if(self.epoch % self.opt.printing_freq == 0):\n self.plotting()\n self.evaluateRoc()\n \n if(self.auc > best_auc):\n self.best_auc = self.auc\n \n# self.visualizer.print_current_performance(result, best_auc)\n \n print('\\n')\n print('>- Training Loss: {} in {} sec'.format(self.train_loss[GENERATOR][-1], train_time) )\n print('>- Validation Loss: {} in {} sec'.format(self.val_loss[GENERATOR][-1], val_time))\n \n valid_loss = self.val_loss['GENERATOR'][-1]\n \n saveCkp = es(valid_loss)\n if(saveCkp and save):\n self.saveCheckPoint(valid_loss)\n \n if(es.early_stop):\n print('-> Early stopping now')\n if(lr_decay_value):\n print('-> LR factor decay: {}'.format(lr_decay_value))\n lrDecay(lr_decay_value)\n self.model.optimize_gen.params_groups[0]['lr'] = lrDecay.lr\n print('New Learning rate for generator is {}'.format(lrDecay.lr))\n else:\n break\n \n self.saveCheckPoint(valid_loss)\n self.plotting()\n self.saveInfo()\n self.evaluateRoc(folder_save=self.folder_save)\n \n return valid_loss\n \n def resumeTraining(self, epochs, save=True):\n \n plotUnit = 1\n es = EarlyStopping(self.opt)\n from_epochs = self.epochs\n self.epochs = epochs\n \n for self.epoch in range(from_epochs, self.epochs):\n print('\\n')\n print('Epoch {}/{}'.format(self.epoch+1, epochs))\n \n # TRAINING\n loss, losses, train_time = self._trainOneEpoch()\n self.train_loss[GENERATOR] = np.concatenate((self.train_loss[GENERATOR], getNmeans(loss[GENERATOR], plotUnit)))\n self.train_loss[DISCRIMINATOR] = np.concatenate((self.train_loss[DISCRIMINATOR], getNmeans(loss[DISCRIMINATOR], plotUnit)))\n# print(losses)\n# print(losses[0])\n \n self.train_adv_loss = np.concatenate((self.train_adv_loss, getNmeans(losses[0], plotUnit)))\n self.train_con_loss = np.concatenate((self.train_con_loss, getNmeans(losses[1], plotUnit)))\n self.train_enc_loss = np.concatenate((self.train_enc_loss, getNmeans(losses[2], plotUnit)))\n \n# train_loss[GENERATOR] = np.concatenate((train_loss[GENERATOR], loss[GENERATOR]))\n# print(len(train_loss[GENERATOR]))\n train_time = adjustTime(train_time)\n \n # VALIDATION\n loss, losses, val_time = self._validation()\n self.val_loss[GENERATOR] = np.concatenate((self.val_loss[GENERATOR], getNmeans(loss[GENERATOR], plotUnit)))\n self.val_loss[DISCRIMINATOR] = np.concatenate((self.val_loss[DISCRIMINATOR], getNmeans(loss[DISCRIMINATOR], plotUnit)))\n \n self.valid_adv_loss = np.concatenate((self.valid_adv_loss, getNmeans(losses[0], plotUnit)))\n self.valid_con_loss = np.concatenate((self.valid_con_loss, getNmeans(losses[1], plotUnit)))\n self.valid_enc_loss = np.concatenate((self.valid_enc_loss, getNmeans(losses[2], plotUnit)))\n# val_loss[GENERATOR] = np.concatenate((val_loss[GENERATOR], loss[GENERATOR]))\n# print(len(val_loss[GENERATOR]))\n val_time = adjustTime(val_time)\n \n# self.visualizer.plot(val_loss[GENERATOR], 'validation', 'Generator', 'append')\n \n # VISUALIZATION\n \n# self.visualizer.plot_loss(train_loss['INDEX'], train_loss['GENERATOR'], val_loss['GENERATOR'], 'GENERATOR')\n# self.visualizer.plot_loss(train_loss['INDEX'], train_loss['DISCRIMINATOR'], val_loss['DISCRIMINATOR'], 'DISCRIMINATOR')\n \n # TEST\n# return self._test()\n performance, eval_data, spent_time = self._test()\n \n self.auc, self.threshold = performance['AUC'], performance['Threshold']\n self.gt_labels, self.anomaly_scores = eval_data['gt_labels'], eval_data['scores']\n# test_time = adjustTime(spent_time)\n \n if(self.epoch % self.opt.printing_freq == 0):\n self.plotting(save=False)\n self.evaluateRoc()\n \n# if(result['AUC'] > best_auc):\n# best_auc = result['AUC']\n# \n# self.visualizer.print_current_performance(result, best_auc)\n \n print('\\n')\n print('>- Training Loss: {} in {} sec'.format(self.train_loss[GENERATOR][-1], train_time) )\n print('>- Validation Loss: {} in {} sec'.format(self.val_loss[GENERATOR][-1], val_time))\n \n valid_loss = self.val_loss['GENERATOR'][-1]\n \n saveCkp = es(valid_loss)\n if(saveCkp and save):\n self.saveCheckPoint(valid_loss)\n \n if(es.early_stop):\n print('-> Early stopping now')\n break\n \n self.saveCheckPoint(valid_loss)\n self.plotting()\n self.saveInfo()\n self.evaluateRoc(folder_save=self.folder_save)\n \n return valid_loss\n \n def plotting(self, save=True):\n \n # PLOTTING LOSSES\n fig, [ax1, ax2, ax3, ax4, ax5] = plt.subplots(5,1, figsize=(8,16))\n \n _subplot(ax1, self.train_adv_loss, self.valid_adv_loss, 'ADV loss')\n _subplot(ax2, self.train_con_loss, self.valid_con_loss, 'CON loss')\n _subplot(ax3, self.train_enc_loss, self.valid_enc_loss, 'ENC loss')\n _subplot(ax4, self.train_loss[GENERATOR], self.val_loss[GENERATOR], 'Generator')\n _subplot(ax5, self.train_loss[DISCRIMINATOR], self.val_loss[DISCRIMINATOR], 'Discriminator')\n# plt.legend() \n \n if(save):\n# plt.savefig(self.folder_save + self.opt.name + '/'+ 'plot')\n plt.savefig(self.folder_save + 'plot')\n \n plt.show()\n \n def evaluateRoc(self, folder_save=None):\n if(folder_save is not None):\n folder_save = folder_save\n \n auc, _ = evaluate(self.gt_labels, self.anomaly_scores, plot=True, folder_save=folder_save)\n \n print('\\n')\n print('AUC: {:.3f} \\t Thres. : {:.3f} '.format(auc, self.threshold))\n \n def plotLosses(self):\n fig, [ax1, ax2, ax3, ax4, ax5] = plt.subplots(5,1, figsize=(8,16))\n \n _subplot(ax1, self.train_adv_loss, self.valid_adv_loss, 'ADV loss')\n _subplot(ax2, self.train_con_loss, self.valid_adv_loss, 'CON loss')\n _subplot(ax3, self.train_enc_loss, self.valid_adv_loss, 'ENC loss')\n _subplot(ax4, self.train_loss[GENERATOR], self.val_loss[GENERATOR], 'Generator')\n _subplot(ax5, self.train_loss[DISCRIMINATOR], self.val_loss[DISCRIMINATOR], 'Discriminator')\n# plt.legend()\n plt.show() \n \n def saveInfo(self):\n folder_save = paths.checkpoint_folder + self.opt.name + '/'\n ensure_folder(folder_save)\n \n saveInfoGanomaly(self.opt, folder_save, self.auc)\n \n def addInfo(self, info):\n folder_save = paths.checkpoint_folder + self.opt.name + '/'\n ensure_folder(folder_save)\n \n addInfoGanomaly(self.opt, folder_save, info)\n \n def predict(self, image, target=None, info=None, verbose=0):\n# image_tensor = torch.FloatTensor(image)\n image_transf = Transforms.ToTensor()(image)\n image_unsqueeze = image_transf.unsqueeze_(0)\n x_image = Variable(image_unsqueeze).cuda()\n \n transf = Transforms.Compose([Transforms.ToTensor(), Transforms.Normalize((0.5,),(0.5,))])\n \n imagePIL = Image.fromarray(image)\n x = transf(imagePIL)\n x = x.unsqueeze_(0)\n x = Variable(x).cuda()\n# image = image_tensor\n \n# plt.imshow(image)\n\n with torch.no_grad():\n _, z, z_prime = self.model.forward_gen(x)\n x_prime, _, _ = self.model.forward_gen(x_image)\n\n score = torch.mean(torch.pow((z-z_prime), 2), dim=1)\n# print(score)\n anomaly_score = score\n \n output = x_prime.cpu().numpy()\n final_output = np.transpose(output[0], (2,1,0))\n \n# final_output = (output * 0.5) + 0.5\n final_output = np.flip(final_output, 1)\n final_output = np.rot90(final_output, 1) \n \n prediction = ['Anomalous Image', 1] if score >= self.threshold else ['Normal Image', 0]\n \n \n if(target is not None):\n real_outcome = 'Anomalous Image' if target == 1 else 'Normal Image'\n \n\n if(verbose):\n \n fig, [ax1, ax2] = plt.subplots(2,1, figsize=(10,13))\n results = '\\n------------ RESULTS -------------\\n' + \\\n 'Threshold: {:.3f}\\n'.format(self.threshold) + \\\n 'Score: {:.3f}\\n'.format(anomaly_score.item()) + \\\n 'Real Outcome: {}\\n'.format(real_outcome) + \\\n '---------------------------------\\n\\n' + \\\n 'Original image --> {}'.format(prediction[0])\n \n ax1.set_title(results)\n ax1.imshow(image)\n ax2.set_title('Reconstructed image')\n ax2.imshow(final_output)\n \n print('')\n print('\\n------------ RESULTS -------------')\n print('Threshold: \\t{:.3f}'.format(self.threshold))\n print('Score: \\t\\t{:.3f}'.format(anomaly_score.item()))\n print('From \\t\\t{}'.format(real_outcome))\n print('')\n print('Original image --> ', prediction[0])\n print('----------------------------------')\n \n if(info is not None):\n print('..Saving..')\n if(prediction[0] == 'Normal Image'): \n plt.savefig(self.folder_save + 'Normal_{}'.format(info))\n elif(prediction[0] == 'Anomalous Image'):\n plt.savefig(self.folder_save + 'Anomaly_{}'.format(info))\n else:\n raise Exception('Wrong Predicion')\n \n return prediction, anomaly_score.item(), self.threshold\n \n \n def predictImage(self, dataTest, folder_save=None, N=10):\n \n i = 0\n \n with torch.no_grad():\n anomaly_scores = torch.zeros(size=(len(dataTest.dataset),), dtype=torch.float32, device=device)\n gt_labels = torch.zeros(size=(len(dataTest.dataset),), dtype=torch.long, device=device)\n \n # print('> anom shape')\n # print(anomaly_scores.shape)\n \n start = time.time()\n for patches, labels in tqdm(dataTest, total=len(dataTest)):\n \n x = torch.Tensor(patches).cuda()\n tensor_label = torch.Tensor(labels).cuda()\n \n # print('> x shape')\n # print(x.shape)\n \n x_prime, z, z_prime = self.model.forward_gen(x)\n \n # ANOMALY SCORE\n score = torch.mean(torch.pow((z-z_prime), 2), dim=1)\n \n anomaly_scores[i*self.opt.batch_size : i*self.opt.batch_size + score.size(0)] = score.reshape(score.size(0))\n gt_labels[i*self.opt.batch_size : i*self.opt.batch_size + score.size(0)] = tensor_label.reshape(score.size(0))\n \n i += 1\n \n anomaly_scores_norm = (anomaly_scores - torch.min(anomaly_scores)) / (torch.max(anomaly_scores) - torch.min(anomaly_scores))\n auc, threshold_auc = evaluate(gt_labels, anomaly_scores_norm, plot=True, folder_save=folder_save)\n \n avg_prec = evaluate(gt_labels, anomaly_scores_norm, metric='prec_rec_curve', plot=True, folder_save=folder_save)\n# return evaluate(gt_labels, anomaly_scores_norm, metric='prec_rec_curve', plot=True)\n end = time.time()\n \n performance = {'AUC': auc,\n 'Thr': threshold_auc,\n 'Avg_prec': avg_prec}\n \n print('Prediction time: {}'.format(adjustTime(end-start)))\n \n gt_labels = dataTest.dataset.targets\n pred_labels = computeAnomalyDetection(anomaly_scores_norm, threshold_auc)\n # PRECISION\n precision = evaluate(gt_labels, pred_labels, metric='precision')\n \n \n pred_patches = generatePatches(dataTest.dataset.data, pred_labels)\n \n \n print('Precision: {}'.format(precision))\n samples = getSamples(patches, x_prime, labels, anomaly_scores_norm, N=10)\n \n return pred_patches, samples, performance\n \n def saveImages(self, dataloader):\n \n reals, fakes, fixed = self.get_images(dataloader)\n self.visualizer.save_current_images(self.epoch, reals, fakes, fixed)\n if self.opt.display:\n self.visualizer.display_current_images(reals, fakes, fixed)\n \n \n def saveCheckPoint(self, valid_loss):\n self.folder_save = paths.checkpoint_folder + self.opt.name + '/'\n ensure_folder(self.folder_save)\n \n path_file = '{0}/{1}_lr:{2}|Epoch:{3}|Auc:{4:.3f}|Loss:{5:.4f}.pth.tar'.format(self.folder_save,\n self.opt.name,\n self.opt.lr_gen,\n self.epoch,\n self.auc,\n valid_loss)\n \n torch.save(self, path_file)\n# print('3.Model: ', self.model)\n \n def tuneLearningRate(self, inf_bound_gen, sup_bound_gen, inf_bound_discr, sup_bound_discr):\n \n max_count = 10\n self.result = []\n \n for count in range(max_count):\n \n print('Model n.', count)\n \n self.opt.epochs = 4\n self.model.optimizer_gen.lr = 10**np.random.uniform(sup_bound_gen, inf_bound_gen)\n self.model.optimizer_discr.lr = 10**np.random.uniform(sup_bound_discr, inf_bound_discr)\n loss = self.train_model(save=False)\n \n lr_gen_label = 'Gen_Lr:\\t{}\\n'.format(self.model.optimizer_gen.lr)\n lr_discr_label = 'Discr_Lr:\\t{}\\n'.format(self.model.optimizer_discr.lr)\n loss_label = 'Loss:\\t{}\\n\\n'.format(loss)\n \n result_label = 'Results \\n' + lr_gen_label + lr_discr_label + loss_label\n \n self.result.append(result_label)\n \n return self.result\n\n \n\ndef computeAnomalyDetection(scores, threshold): \n\n pred_labels = []\n \n for i in range(0, len(scores)):\n if(scores[i] < threshold):\n pred_labels.append(np.float64(0))\n else:\n pred_labels.append(np.float64(1))\n \n return pred_labels\n \ndef outputSample(sample, threshold, info=None, folder_save=None):\n \n original = sample['originals']\n image_rec = sample['images']\n label = 'Anomaly' if(sample['labels']) else 'Normal'\n score = sample['scores']\n print(score)\n \n result = 'Normal patch' if score < threshold else 'Anomalous patch'\n \n fig, [ax1, ax2] = plt.subplots(2,1, figsize=(10,13))\n results = '\\n------------ RESULTS -------------\\n' + \\\n 'Threshold: {:.3f}\\n'.format(threshold) + \\\n 'Score: {:.3f}\\n'.format(score) + \\\n 'Real Outcome: {}\\n'.format(label) + \\\n '---------------------------------\\n\\n' + \\\n 'Original image --> {}'.format(result)\n \n ax1.set_title(results)\n ax1.imshow(original)\n ax1.grid(False)\n ax2.set_title('Reconstructed image')\n ax2.imshow(image_rec)\n ax2.grid(False)\n \n print('')\n print('\\n------------ RESULTS -------------')\n print('Threshold: \\t{:.3f}'.format(threshold))\n print('Score: \\t\\t{:.3f}'.format(score))\n print('From \\t\\t{}'.format(label))\n print('')\n print('Original image --> ', result)\n print('----------------------------------')\n \n if(info is not None and folder_save is not None):\n print('..Saving..')\n if(result == 'Normal patch'): \n plt.savefig(folder_save + 'Normal_{}'.format(info))\n elif(result == 'Anomalous patch'):\n plt.savefig(folder_save + 'Anomaly_{}'.format(info))\n else:\n raise Exception('Wrong Predicion') \n \n \ndef _subplot(ax, train, val, title):\n ax.set_title(title)\n ax.plot(train, color='r', label='Training')\n ax.plot(val, color='b', label='Validation')\n ax.legend()\n \ndef plotLoss(train_loss, val_loss, title):\n \n# print(train_loss)\n# print(val_loss)\n \n plt.title(title)\n plt.plot(train_loss, color='r', label='Training')\n plt.plot(val_loss, color='b', label='Validation')\n plt.legend()\n plt.show()\n \ndef adjustTime(sample_time):\n \n minutes = sample_time / 60\n seconds = (minutes - (sample_time // 60)) * 60\n \n return '{} min {} sec'.format(int(minutes), int(seconds))\n \ndef getSamples(originals, patches, labels, scores, N=10):\n \n patch_image = []\n patch_recon = []\n patch_labels = []\n patch_scores = []\n \n for i in range(0, N):\n x = originals[i]\n x_prime = patches[i]\n label = labels[i]\n \n image = x.cpu().numpy()\n output = x_prime.cpu().numpy()\n# print(output.shape)\n image_input = np.transpose(image, (2,1,0))\n final_output = np.transpose(output, (2,1,0))\n# print(final_output.shape)\n \n image_input = (image_input * 0.5) + 0.5\n image_input = np.flip(image_input, 1)\n image_input = np.rot90(image_input, 1) \n \n final_output = (final_output * 0.5) + 0.5\n final_output = np.flip(final_output, 1)\n final_output = np.rot90(final_output, 1) \n \n start = len(scores) - len(patches)\n# print(start)\n score = scores[start + i]\n \n patch_image.append(image_input)\n patch_recon.append(final_output)\n patch_labels.append(label.cpu())\n patch_scores.append(score)\n\n # TEST IMAGES PLOTS\n# plt.imshow(image_input)\n# plt.grid(False)\n# plt.show()\n# \n# plt.imshow(final_output)\n# plt.grid(False)\n# plt.show()\n \n \n samples = {'originals': patch_image,\n 'images': patch_recon,\n 'labels': patch_labels,\n 'scores': patch_scores}\n \n return pd.DataFrame(samples)\n \n \n \n \n \n \n \n \n ","sub_path":"base/libraries/model/ganomaly.py","file_name":"ganomaly.py","file_ext":"py","file_size_in_byte":31661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366794556","text":"import alsaaudio\nimport RPi.GPIO as GPIO\n\nfrom globaldefs import *\nfrom platformdefs import *\n\nVOLUME_DELTA = 5\n\n\nclass VolumeControl(object):\n\n terminate = False\n\n def __init__(self, sound_player):\n self.__sound_player = sound_player\n self.globalCounter = 0\n self.flag = 0\n self.Last_RoB_Status = 0\n self.Current_RoB_Status = 0\n\n # alsa /etc/asound.conf defines the mixer name\n self.__mixer = alsaaudio.Mixer(ALSA_MIXER_NAME)\n\n def close(self):\n if self.__mixer is not None:\n rc = self.__mixer.close()\n print ('ALSA Master mixer closed with rc =', rc)\n\n def rotate(self):\n self.Last_RoB_Status = GPIO.input(ROTARY_PIN_B)\n\n while(not GPIO.input(ROTARY_PIN_A)):\n self.Current_RoB_Status = GPIO.input(ROTARY_PIN_B)\n self.flag = 1\n\n if self.flag == 1:\n self.flag = 0\n if (self.Last_RoB_Status == 0) and (self.Current_RoB_Status == 1):\n self.globalCounter = self.globalCounter + 1\n if (self.Last_RoB_Status == 1) and (self.Current_RoB_Status == 0):\n self.globalCounter = self.globalCounter - 1\n\n def loop(self):\n tmp = 0\n try:\n while not VolumeControl.terminate:\n self.rotate()\n if tmp != self.globalCounter:\n current_volume = self.__mixer.getvolume()[0]\n print('current volume = ', current_volume)\n if tmp > self.globalCounter:\n # increase volume up to 100 max\n new_volume = current_volume + VOLUME_DELTA\n if new_volume > 100:\n new_volume = 100\n print('increase to ', new_volume)\n self.__mixer.setvolume(new_volume)\n else:\n # decrease volume down to 0 min\n new_volume = current_volume - VOLUME_DELTA\n if new_volume < 0:\n new_volume = 0\n print('decrease to ', new_volume)\n self.__mixer.setvolume(new_volume)\n tmp = self.globalCounter\n except RuntimeError:\n print('Ignoring RuntimeError at shutdown (VolumeControl)')\n","sub_path":"volumecontrol.py","file_name":"volumecontrol.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"6963597","text":"import argparse\nimport sys\n\nimport boto3\nimport logging\nimport botocore\nimport time\nfrom getpass import getpass\n\nfrom aws.aws_deployment import AwsDeplpyment\nfrom config.config import load_config, add_environment_to_config, create_config_file, config_exists, environment_exists, DEFAULT_CONFIG_FILE_NAME, remove_environment_from_config, save_config\nfrom aws.lambda_util import random_string\nfrom core.connection.connection_factory import Factory, PROVIDER_AWS_WEBSOCKETS\nfrom core.manager import C2Manager\nfrom core.master import Master\n\naws_id = None\naws_secret = None\n\n\ndef check_aws_credentials(aws_id, aws_secret):\n try:\n print('[*] Checking AWS credentials')\n\n aws_session = boto3.Session(\n aws_access_key_id=aws_id,\n aws_secret_access_key=aws_secret,\n )\n sts = aws_session.client('sts')\n sts.get_caller_identity()\n return True\n except botocore.exceptions.NoCredentialsError:\n print('[-] Invalid AWS credentials')\n return False\n except botocore.exceptions.ClientError:\n print('[-] Invalid AWS credentials')\n return False\n except:\n print('[-] Error checking credentials.')\n return False\n\n\ndef show_aws_key_steps():\n print('\\t1) To get it you need an AWS account and go to:')\n print('\\t https://console.aws.amazon.com/iam/home?#/security_credentials')\n print('\\t2) Click on the \"Access keys (access key ID and secret access key)\" dropdown')\n print('\\t3) If you don\\'t have the AWS key or don\\'t remember it, create a new')\n print('\\t4) Keep this key safe, it is needed to every deploy or remove of a environment.\\n\\t The key is not needed to connect to an environment that has already been deployed previously.')\n\n\ndef request_aws_credentials(file_name):\n global aws_id\n global aws_secret\n if aws_id is not None and aws_secret is not None:\n return aws_id, aws_secret\n\n aws_steps_shown = False\n config = load_config(file_name)\n if 'aws_id' not in config or config['aws_id'] == '':\n print('[*] AWS ID not found in the \"{}\" configuration file.'.format(file_name))\n print('[*] Access Key ID needed')\n show_aws_key_steps()\n aws_steps_shown = True\n access_key_id = input('[*] Access Key ID: ')\n aws_id = access_key_id\n else:\n aws_id = config['aws_id']\n print('[*] Access Key ID: {}'.format(aws_id))\n\n if not aws_steps_shown:\n print('[*] Secret key needed')\n show_aws_key_steps()\n aws_secret = getpass('[*] Insert your Secret Key: ')\n\n if not check_aws_credentials(aws_id, aws_secret):\n exit(1)\n else:\n print('[+] Credentials OK.')\n\n return aws_id, aws_secret\n\n\ndef sync_environments(file_name):\n global aws_id, aws_secret\n\n config = load_config(file_name)\n aws_id, aws_secret = request_aws_credentials(file_name)\n\n if aws_id not in config:\n config['aws_id'] = aws_id\n\n aws_environments = AwsDeplpyment().get_all_environments(aws_id, aws_secret)\n environments = []\n for item in aws_environments:\n environments.append({\n 'name': item['environment'],\n 'url': item['url'],\n 'master-password': item['master_password']\n })\n config['environments'] = environments\n save_config(config, file_name)\n print('[+] Configuration file synchronized with AWS')\n list_environments(file_name)\n\n\ndef deploy(file_name, environment_name):\n global aws_id, aws_secret\n\n if not config_exists(file_name):\n print('[*] Configuration file \"{}\" not found, creating one.'.format(file_name))\n create_config_file(file_name)\n else:\n print('[*] Loading configuration file \"{}\"'.format(file_name))\n\n config = load_config(file_name)\n if environment_exists(environment_name, file_name):\n print('[-] The \"{}\" environment was created before as indicated in the \"{}\" configuration file. Indicate another name with the -n (--env-name) option or delete it from the configuration file.'.format(environment_name, file_name))\n exit(1)\n\n aws_id, aws_secret = request_aws_credentials(file_name)\n\n print('[+] Creating infrastructure in AWS')\n\n master_password = random_string(16)\n url = AwsDeplpyment().deploy(aws_id, aws_secret, environment_name, master_password)\n\n if 'environments' not in config:\n config['environments'] = []\n\n print('[+] Saving \"{}\" configuration file'.format(file_name))\n add_environment_to_config({\n 'name': environment_name,\n 'url': url,\n 'master-password': master_password\n }, file_name)\n\n print('[+] Now you can connect to your new environment:')\n connection_command = '\\n\\tpython3 wsc2.py -c -n {}'.format(environment_name)\n if file_name != DEFAULT_CONFIG_FILE_NAME:\n connection_command = '\\n\\tpython3 wsc2.py -c -n {} -f {}'.format(environment_name, file_name)\n\n print(connection_command)\n print()\n\n\ndef remove(file_name, environment_name):\n global aws_id, aws_secret\n\n print('[!] Are you sure you want to delete \"{}\" environment? y/N'.format(environment_name))\n user_input = input()\n\n if user_input.lower() != 'y' and user_input == '':\n exit()\n\n aws_id, aws_secret = request_aws_credentials(file_name)\n\n config = load_config(file_name)\n if 'environments' not in config:\n config['environments'] = []\n environment_to_delete = None\n for environment in config['environments']:\n if environment['name'] == environment_name:\n environment_to_delete = environment\n break\n if environment_to_delete is None:\n print('[!] There is no \"{}\" environment in your local \"{}\" configuration file. Do you want to sync the config file with AWS? Y/n'.format(environment_name, file_name))\n user_input = input()\n if user_input.lower() == 'y' or user_input == '':\n sync_environments(file_name)\n time.sleep(1)\n\n config = load_config(file_name)\n environment_to_delete = None\n for environment in config['environments']:\n if environment['name'] == environment_name:\n environment_to_delete = environment\n break\n if environment_to_delete is None:\n print('[!] Environment not found. Do you want to try to delete it anyway? y/N'.format(environment_name))\n user_input = input()\n if user_input.lower() == 'n' or user_input == '':\n return\n\n print('[+] Removing \"{}\" environment from AWS'.format(environment_name))\n AwsDeplpyment().deploy(aws_id, aws_secret, environment_name, None, True)\n remove_environment_from_config(environment_name, file_name)\n\n\ndef connect(file_name, environment_name):\n config = load_config(file_name)\n environment_websocket = None\n for environment in config['environments']:\n if environment['name'] == environment_name:\n master = Master(\n environment_name,\n C2Manager(\n Factory(PROVIDER_AWS_WEBSOCKETS, {'url': environment['url']}),\n environment['master-password'],\n environment_name\n )\n )\n master.show_c2_menu()\n if environment_websocket is None:\n print('[-] \"{}\" environment not found in the \"{}\" configuration file. Do you want to sync \"{}\" configuration file from AWS? Y/n'.format(environment_name, file_name, file_name))\n user_input = input()\n if user_input.lower() == 'y' or user_input == '':\n sync_environments(file_name)\n config = load_config(file_name)\n for environment in config['environments']:\n if environment['name'] == environment_name:\n master = Master(\n environment_name,\n C2Manager(\n Factory(PROVIDER_AWS_WEBSOCKETS, {'url': environment['url']}),\n environment['master-password'],\n environment_name\n )\n )\n master.show_c2_menu()\n\n print('[-] {} environment not found in the {} configuration file. Add it manually to config file or create a new environment with -d (--deploy) option'.format(environment_name, file_name))\n\n\ndef list_environments(file_name):\n if not config_exists(file_name):\n print('[-] Configuration file \"{}\" not found'.format(file_name))\n exit(1)\n else:\n config = load_config(file_name)\n if 'environments' not in config or len(config['environments']) == 0:\n print('[!] No environments found in the {} configuration file'.format(file_name))\n else:\n print('\\nEnvironments from {} configuration file:\\n'.format(file_name))\n for environment in config['environments']:\n print('- {} ({}) | {}'.format(environment['name'], environment['url'], environment['master-password']))\n print()\n\n\nif __name__ == '__main__':\n\n logging.getLogger('boto3').setLevel(logging.INFO)\n logging.getLogger('botocore').setLevel(logging.INFO)\n logging.getLogger('s3transfer').setLevel(logging.INFO)\n logging.getLogger('urllib3').setLevel(logging.CRITICAL)\n logging.getLogger('root').setLevel(logging.CRITICAL)\n logging.getLogger('asyncio').setLevel(logging.CRITICAL)\n\n parser = argparse.ArgumentParser()\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-d', '--deploy', help='Option to deploy a new C2 to AWS. You must include the option -n or --env-name.', default=False, action='store_true')\n group.add_argument('-r', '--remove', help='Option to remove a C2 from AWS. You must include the option -n or --env-name.', default=False, action='store_true')\n group.add_argument('-c', '--connect', help='Connect to a C2 deploy. You must include the option -n or --env-name.', default=False, action='store_true')\n group.add_argument('-l', '--list', help='List environments from the config file.', default=False, action='store_true')\n\n parser.add_argument('-s', '--sync', help='Sync your config file with the AWS deployments', default=False, action='store_true')\n parser.add_argument('-n', '--env-name', help='The deployment environment name to create or connect to it.', default=\"default\")\n parser.add_argument('-f', '--file-name', help='The deployment environment file name to create or connect to it. In case of deployment must include the AWS keys, see aws_config.json example file.', default=\"aws_config.json\")\n\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args = parser.parse_args()\n\n if args.sync:\n sync_environments(args.file_name)\n\n if args.deploy:\n deploy(args.file_name, args.env_name)\n\n elif args.remove:\n remove(args.file_name, args.env_name)\n\n elif args.connect:\n connect(args.file_name, args.env_name)\n\n elif args.list:\n list_environments(args.file_name)\n","sub_path":"wsc2.py","file_name":"wsc2.py","file_ext":"py","file_size_in_byte":10900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352869578","text":"#!/usr/bin/python3 -O\n# pylint: disable=no-init,too-few-public-methods\n\nimport datetime\nimport decimal\nimport gettext\nimport math\nimport os\n\nimport sqlalchemy\nimport sqlalchemy.ext.declarative\nimport sqlalchemy.orm\nimport sqlalchemy.orm.collections\nimport sqlalchemy.orm.exc\nimport sqlalchemy.types\n\nfrom sqlalchemy import Table\nfrom sqlalchemy import Column\n\nfrom sqlalchemy import Boolean\nfrom sqlalchemy import CHAR\nfrom sqlalchemy import Date\nfrom sqlalchemy import Integer\nfrom sqlalchemy import Numeric\nfrom sqlalchemy import String\nfrom sqlalchemy import Text\nfrom sqlalchemy import Unicode\n\nfrom sqlalchemy import ForeignKey\n\nBase = sqlalchemy.ext.declarative.declarative_base()\n\nimport invoice\nimport invoice.util\n\nclass Currency(Base):\n __tablename__ = 'currencies'\n code = Column(CHAR(3), primary_key=True)\n sign = Column(Unicode(8), nullable=False)\n name = Column(Text(), nullable=False)\n\n\nfeatures_association = Table('invoice_features', Base.metadata,\n Column('invoice_no', ForeignKey('invoices.number')),\n Column('feature_code', ForeignKey('features.code'))\n)\n\n\nclass Feature(Base):\n __tablename__ = 'features'\n code = Column(String(8), primary_key=True)\n description = Column(Text())\n\n\nclass Product(Base):\n __tablename__ = 'products'\n code = Column(String(8), primary_key=True)\n name = Column(String(64), nullable=False)\n unit = Column(String(8))\n vat = Column(Numeric(2), default=23, nullable=False)\n\n prices = sqlalchemy.orm.relationship('Price', cascade='all, delete-orphan',\n collection_class=sqlalchemy.orm.collections.attribute_mapped_collection(\n 'currency_code'))\n\n def set_price(self, currency, price):\n price = decimal.Decimal(price)\n try:\n self.prices[currency].price = price\n except KeyError:\n session.add(Price(\n product_code=self.code, currency_code=currency, price=price))\n session.flush()\n\n def set_bprice(self, currency, bprice):\n bprice = decimal.Decimal(bprice)\n try:\n self.prices[currency].bprice = bprice\n except KeyError:\n price = Price(product_code=self.code, currency_code=currency)\n price.bprice = bprice\n session.add(price)\n session.flush()\n\n\n def __repr__(self):\n return ('<{0.__class__.__name__} {0.code!r} name={0.name!r}'\n ' unit={0.unit!r} vat={0.vat!r}>').format(self)\n\n def __str__(self):\n return invoice.env.get_template('product.txt').render(product=self)\n\n\nclass Price(Base):\n __tablename__ = 'prices'\n product_code = Column(ForeignKey('products.code',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n currency_code = Column(ForeignKey('currencies.code',\n onupdate='CASCADE', ondelete='RESTRICT'), primary_key=True)\n price = Column(Numeric(4),\n nullable=False)\n\n product = sqlalchemy.orm.relationship('Product')\n currency = sqlalchemy.orm.relationship('Currency')\n\n def set_bprice(self, bprice):\n # pylint: disable=no-member\n self.price = math.floor(\n decimal.Decimal(bprice) / (self.product.vat / 100 + 1) * 100) \\\n / 100\n session.flush()\n\n bprice = property(lambda self: (\n (self.product.vat * decimal.Decimal('.01') + 1) * self.price),\n set_bprice)\n\n\nclass Customer(Base):\n __tablename__ = 'customers'\n code = Column(String(16), primary_key=True)\n short = Column(String(64), nullable=False)\n address = Column(String(256), nullable=False)\n email = Column(String(128), nullable=False)\n\n\nclass Invoice(Base):\n __tablename__ = 'invoices'\n\n number = Column(String(16), primary_key=True)\n\n currency_code = Column(ForeignKey('currencies.code',\n onupdate='CASCADE', ondelete='RESTRICT'), nullable=False)\n customer_code = Column(ForeignKey('customers.code',\n onupdate='CASCADE', ondelete='RESTRICT'), nullable=False)\n delivered = Column(Date(),\n default=invoice.util.last_day_of_month, nullable=False)\n issued = Column(Date(),\n default=invoice.util.last_day_of_month, nullable=False)\n grace = Column(Integer(),\n default=15, nullable=False)\n finalised = Column(Boolean(),\n default=False, nullable=False)\n\n currency = sqlalchemy.orm.relationship('Currency')\n customer = sqlalchemy.orm.relationship('Customer')\n lines = sqlalchemy.orm.relationship('Line',\n cascade='all, delete-orphan')\n features = sqlalchemy.orm.relationship('Feature',\n secondary=features_association)\n\n deadline = property(lambda self:\n (self.issued + datetime.timedelta(days=self.grace)))\n\n\n netto = property(lambda self: sum(line.netto for line in self.lines))\n tax = property(lambda self: sum(line.tax for line in self.lines))\n brutto = property(lambda self: sum(line.brutto for line in self.lines))\n\n def _get_currency_rate(self):\n if hasattr(self, '_currency_rate'):\n return\n self._currency_rate, self._currency_rate_date = \\\n invoice.util.get_currency_rate(self.currency_code, self.delivered)\n\n @property\n def currency_rate(self):\n self._get_currency_rate()\n return self._currency_rate\n\n @property\n def currency_rate_date(self):\n self._get_currency_rate()\n return self._currency_rate_date\n\n @property\n def tax_pln(self):\n if self.currency_code == 'PLN':\n return self.tax\n return self.tax * self.currency_rate\n\n def add_line(self, product, amount, **kwargs):\n if not isinstance(product, Product):\n product = session.query(Product).filter_by(code=product).one()\n\n line = Line(invoice_no=self.number,\n product_code=product.code,\n amount=decimal.Decimal(amount),\n price=product.prices[self.currency_code].price,\n currency=self.currency,\n vat=product.vat)\n\n for k, v in kwargs.items():\n if v is None:\n continue\n setattr(line, k, v)\n\n self.lines.append(line) # pylint: disable=no-member\n session.flush()\n\n def add_line_from_spec(self, spec):\n tokens = spec.strip().split(',')\n product = tokens[0]\n amount = decimal.Decimal(tokens[1])\n\n kwargs = dict()\n for token in tokens[2:]:\n k, v = token.split('=', 1)\n kwargs[k] = decimal.Decimal(v)\n\n self.add_line(product, amount, **kwargs)\n\n def finalise(self):\n self.finalised = True\n\n def unfinalise(self):\n self.finalised = False\n\n # TODO\n def set_deadline(self, deadline):\n pass\n\n def tex(self, locale=None):\n if locale is None:\n locale = 'pl_PL'\n trans = gettext.translation('invoice', os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../locale')),\n languages=[locale], fallback=True)\n\n # pylint: disable=no-member\n invoice.env.install_gettext_translations(trans, newstyle=True)\n ret = invoice.env.get_template('invoice.tex').render(invoice=self)\n invoice.env.uninstall_gettext_translations(trans)\n return ret\n\n def __str__(self):\n return invoice.env.get_template('invoice.txt').render(invoice=self)\n\n def __repr__(self):\n return ('<{0.__class__.__name__} {0.number!r}'\n ' customer_code={0.customer_code!r}'\n ' delivered={0.delivered!r}'\n ' issued={0.issued!r}'\n ' grace={0.grace!r}'\n ' count={1!r}'\n ' netto={0.netto!r}'\n ' tax={0.tax!r}'\n ' brutto={0.brutto!r}'\n ' finalised={0.finalised!r}>').format(self, len(self.lines))\n\n\nclass Line(Base):\n # PRIMARY KEY is sort of stupid, it is there because sqlalchemy requires it\n __tablename__ = 'lines'\n invoice_no = Column(ForeignKey('invoices.number',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n product_code = Column(ForeignKey('products.code',\n onupdate='CASCADE', ondelete='RESTRICT'), primary_key=True)\n amount = Column(Numeric(),\n nullable=False)\n price = Column(Numeric(4),\n primary_key=True)\n currency_code = Column(ForeignKey('currencies.code',\n onupdate='CASCADE', ondelete='RESTRICT'), primary_key=True)\n vat = Column(Numeric(2),\n primary_key=True)\n\n product = sqlalchemy.orm.relationship('Product')\n currency = sqlalchemy.orm.relationship('Currency')\n\n def set_bprice(self, bprice):\n self.price = decimal.Decimal(math.floor(\n bprice / (self.vat / 100 + 1) * 100) / 100)\n session.flush()\n\n def set_netto(self, netto):\n self.price = decimal.Decimal(\n math.floor(netto / self.amount * 100) / 100)\n session.flush()\n\n def set_brutto(self, brutto):\n self.price = decimal.Decimal(math.floor(\n brutto / (self.vat / 100 + 1) / self.amount * 100) / 100)\n session.flush()\n\n bprice = property(\n lambda self: (self.vat * decimal.Decimal('.01') + 1) * self.price,\n set_bprice)\n netto = property(\n lambda self: self.price * self.amount,\n set_netto)\n tax = property(\n lambda self: self.vat * decimal.Decimal('.01') * self.netto)\n brutto = property(\n lambda self: self.netto + self.tax,\n set_brutto)\n\n def __repr__(self):\n return ('<{0.__class__.__name__} {0.product_code!r}'\n ' invoice={0.invoice_no!r}'\n ' amount={0.amount!r}'\n ' price={0.price!r}'\n ' currency={0.currency_code!r}'\n ' vat={0.vat!r}>').format(self)\n\n\nengine = sqlalchemy.create_engine(\n 'sqlite:///{}'.format(os.path.expanduser(invoice.config.dbfilename)))\nSession = sqlalchemy.orm.sessionmaker(bind=engine)\nsession = Session()\n\ndef init():\n Base.metadata.create_all(engine)\n session.add(Currency(code='PLN'))\n session.add(Currency(code='EUR'))\n session.add(Currency(code='USD'))\n session.add(Feature(code='28b', description='Odwrotne obciążenie'))\n session.commit()\n\n# vim: ts=4 sts=4 sw=4 et\n","sub_path":"invoice/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":10135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199227212","text":"# from django.shortcuts import render\n# from rest_framework import viewsets\n# from rest_framework.decorators import api_view\n# from rest_framework.response import Response\nimport json\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom . import arbitrage\nfrom . import news\nfrom .main import star_coin\nfrom .main.view_table import standard\nfrom .member import login, mypage, register\n\n\n# http 메소드로 들어오는 get,post 방식에 따라 응답하는 것을 만들거다\n@api_view(['GET'])\ndef get_all_coin_info(request):\n user_id = request.GET.get('user_id')\n # user_id를 기반으로 DB로부터 STANDARD_MARKET 받아와야 한다\n # STANDARD_MARKET = MongoDbManager().get_standard_market({'user_id':user_id})\n # TARGET_MARKET도 마찬가지다\n # TARGET_MARKET = MongoDbManager().get_target_market({'user_id':user_id})\n STANDARD_MARKET = \"upbit\"\n TARGET_MARKET = [\"binance\"]\n coins = arbitrage.get_coins_lst(STANDARD_MARKET)\n data = arbitrage.get_all_coin_info(coins, STANDARD_MARKET, TARGET_MARKET)\n return Response(data)\n # return Response()\n\n\n@api_view(['GET'])\ndef get_news_info(request):\n data = news.get_news()\n return Response(data)\n\n\n@api_view(['POST'])\ndef update_standard(request):\n user_id = request.POST['user_id']\n standard_market = request.POST['standard_market']\n standard.update_standard(user_id, standard_market)\n\n\n@api_view(['POST'])\ndef update_star_coin(request):\n user_id = request.POST['user_id']\n star_coin = request.POST['star_coin']\n status = request.POST['status']\n star_coin.update_star_coin(user_id, star_coin, status)\n\n\n@api_view(['POST'])\ndef update_star_market(request):\n user_id = request.POST['user_id']\n star_market = request.POST['star_market']\n status = request.POST['status']\n star_coin.update_star_market(user_id, star_market, status)\n\n\n@api_view(['GET'])\ndef get_login(request):\n user_id = request.GET.get('user_id') # 이거 view에서 넘겨받아야 한다\n user_pw = request.GET.get('user_pw')\n status = login.get_login(user_id, user_pw) # 200 성공, 400 비번틀림, 404 회원정보 없음\n if status == 200:\n return Response(status=200)\n elif status == 400:\n return Response(status=400)\n else:\n return Response(status=404)\n\n\n@api_view(['POST'])\ndef create_user(request):\n request = json.loads(request.body)[\"user\"]\n user_id = request['user_id']\n user_pw = request['user_pw']\n email = request['email']\n name = request['name']\n status = register.create_user(user_id, user_pw, email, name)\n if status == 200:\n return Response(status=200)\n else:\n return Response(status=400)\n\n\n@api_view(['GET'])\ndef get_mypage(request):\n user_id = request.GET.get('user_id')\n data = mypage.get_mypage(user_id)\n return Response(data)\n","sub_path":"backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"465429447","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n root = curr = ListNode(None)\n while l1 and l2:\n curr.next = l1.val <= l2.val and l1 or l2\n curr = curr.next\n if curr is l1:\n l1 = l1.next\n elif curr is l2:\n l2 = l2.next\n\n curr = l1 and l1.next or l2 and l2.next\n\n return root.next\n\n\nif __name__ == '__main__':\n def list_to_node(num_list: list) -> ListNode:\n it = iter(num_list)\n root = curr = ListNode(None)\n try:\n while True:\n curr.next = ListNode(next(it))\n curr = curr.next\n except StopIteration:\n return root.next\n finally:\n return root.next\n\n def print_list(root: ListNode):\n while root:\n print(root.val, end='->')\n root = root.next\n print('')\n\n solution = Solution()\n list_1 = [1, 2, 4, 5]\n list_2 = [1, 3, 4]\n print_list(list_to_node(list_1))\n\n print_list(solution.mergeTwoLists(list_to_node(list_1), list_to_node(list_2)))\n","sub_path":"Daily/mergeTwoLists.py","file_name":"mergeTwoLists.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504161468","text":"from gratipay.models.account_elsewhere import AccountElsewhere\nfrom gratipay.models.participant import Participant\nfrom gratipay.testing.emails import EmailHarness\n\n\nclass TestTransactionalEmails(EmailHarness):\n\n def setUp(self):\n EmailHarness.setUp(self)\n self.bob = self.make_participant('bob', claimed_time='now', email_address='bob@example.com')\n self.dan = self.make_participant('dan', claimed_time='now', email_address='dan@example.com')\n self.alice = self.make_participant('alice', claimed_time='now', email_address='alice@example.com')\n\n def test_opt_in_sends_notifications_to_patrons(self):\n carl_twitter = self.make_elsewhere('twitter', 1, 'carl')\n roy = self.make_participant('roy', claimed_time='now', email_address='roy@example.com')\n self.client.POST( '/roy/emails/notifications.json'\n , data={'toggle': 'notify_on_opt_in'}\n , auth_as='roy'\n )\n\n self.bob.set_tip_to(carl_twitter.participant.username, '100')\n self.dan.set_tip_to(carl_twitter.participant.username, '100')\n roy.set_tip_to(carl_twitter.participant.username, '100') # Roy will NOT receive an email.\n\n AccountElsewhere.from_user_name('twitter', 'carl').opt_in('carl')\n\n Participant.dequeue_emails()\n assert self.mailer.call_count == 2 # Emails should only be sent to bob and dan\n last_email = self.get_last_email()\n assert last_email['to'][0]['email'] == 'dan@example.com'\n expected = \"to carl\"\n assert expected in last_email['text']\n\n def test_take_over_sends_notifications_to_patrons(self):\n dan_twitter = self.make_elsewhere('twitter', 1, 'dan')\n\n self.alice.set_tip_to(self.dan, '100') # Alice shouldn't receive an email.\n self.bob.set_tip_to(dan_twitter.participant.username, '100') # Bob should receive an email.\n\n self.dan.take_over(dan_twitter, have_confirmation=True)\n\n Participant.dequeue_emails()\n assert self.mailer.call_count == 1\n last_email = self.get_last_email()\n assert last_email['to'][0]['email'] == 'bob@example.com'\n expected = \"to dan\"\n assert expected in last_email['text']\n\n def test_opt_in_notification_includes_unsubscribe(self):\n carl_twitter = self.make_elsewhere('twitter', 1, 'carl')\n roy = self.make_participant('roy', claimed_time='now', email_address='roy@example.com', notify_on_opt_in=1)\n roy.set_tip_to(carl_twitter.participant.username, '100')\n\n AccountElsewhere.from_user_name('twitter', 'carl').opt_in('carl')\n\n Participant.dequeue_emails()\n assert \"To stop receiving\" in self.get_last_email()['text']\n","sub_path":"tests/py/test_email_notifs.py","file_name":"test_email_notifs.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"70587303","text":"## Load all the dependencies\r\nimport os\r\nimport sys\r\nimport random\r\nimport warnings\r\nimport numpy as np\r\nfrom itertools import chain\r\nfrom numpy import genfromtxt\r\nfrom tensorflow import random\r\nfrom keras import backend as K\r\n# from keras import backend as k\r\nfrom keras.optimizers import Adam, SGD, RMSprop\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\r\nfrom keras.layers import Layer, UpSampling2D, GlobalAveragePooling2D, Multiply, Dense, Reshape, Permute, multiply, dot, add, Input\r\nfrom keras.layers.core import Dropout, Lambda, SpatialDropout2D, Activation\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\r\nfrom keras.layers.pooling import MaxPooling2D\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras.models import Model, load_model, model_from_yaml, Sequential\r\nimport tensorflow as tf\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\n\r\nnp.random.seed(1337) # for reproducibility\r\nrandom.set_seed(1337)\r\nprint(tf.__version__)\r\n\r\nname = \"model_fine_tuning_\"\r\n\r\n# Use dice coefficient function as the loss function \r\ndef dice_coef(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (2.0 * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0)\r\n\r\n# Jacard coefficient\r\ndef jacard_coef(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0)\r\n\r\n# calculate loss value\r\ndef jacard_coef_loss(y_true, y_pred):\r\n return -jacard_coef(y_true, y_pred)\r\n\r\n# calculate loss value\r\ndef dice_coef_loss(y_true, y_pred):\r\n return -dice_coef(y_true, y_pred)\r\n\r\ndef Residual_CNN_block(x, size, dropout=0.0, batch_norm=True):\r\n if K.image_dim_ordering() == 'th':\r\n axis = 1\r\n else:\r\n axis = 3\r\n conv = Conv2D(size, (3, 3), padding='same')(x)\r\n if batch_norm is True:\r\n conv = BatchNormalization(axis=axis)(conv)\r\n conv = Activation('relu')(conv)\r\n conv = Conv2D(size, (3, 3), padding='same')(conv)\r\n if batch_norm is True:\r\n conv = BatchNormalization(axis=axis)(conv)\r\n conv = Activation('relu')(conv)\r\n conv = Conv2D(size, (3, 3), padding='same')(conv)\r\n if batch_norm is True:\r\n conv = BatchNormalization(axis=axis)(conv)\r\n conv = Activation('relu')(conv)\r\n return conv\r\n\r\nclass multiplication(Layer):\r\n def __init__(self,inter_channel = None,**kwargs):\r\n super(multiplication, self).__init__(**kwargs)\r\n self.inter_channel = inter_channel\r\n def build(self,input_shape=None):\r\n self.k = self.add_weight(name='k',shape=(1,),initializer='zeros',dtype='float32',trainable=True)\r\n def get_config(self):\r\n base_config = super(multiplication, self).get_config()\r\n config = {'inter_channel':self.inter_channel}\r\n return dict(list(base_config.items()) + list(config.items())) \r\n def call(self,inputs):\r\n g,x,x_query,phi_g,x_value = inputs[0],inputs[1],inputs[2],inputs[3],inputs[4]\r\n h,w,c = int(x.shape[1]),int(x.shape[2]),int(x.shape[3])\r\n x_query = K.reshape(x_query, shape=(-1,h*w, self.inter_channel//4))\r\n phi_g = K.reshape(phi_g,shape=(-1,h*w,self.inter_channel//4))\r\n x_value = K.reshape(x_value,shape=(-1,h*w,c))\r\n scale = dot([K.permute_dimensions(phi_g,(0,2,1)), x_query], axes=(1, 2))\r\n soft_scale = Activation('softmax')(scale)\r\n scaled_value = dot([K.permute_dimensions(soft_scale,(0,2,1)),K.permute_dimensions(x_value,(0,2,1))],axes=(1, 2))\r\n scaled_value = K.reshape(scaled_value, shape=(-1,h,w,c)) \r\n customize_multi = self.k * scaled_value\r\n layero = add([customize_multi,x])\r\n my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))\r\n concate = my_concat([layero,g])\r\n return concate \r\n def compute_output_shape(self,input_shape):\r\n ll = list(input_shape)[1]\r\n return (None,ll[1],ll[1],ll[3]*3)\r\n def get_custom_objects():\r\n return {'multiplication': multiplication}\r\n\r\ndef attention_up_and_concatenate(inputs):\r\n g,x = inputs[0],inputs[1]\r\n inter_channel = g.get_shape().as_list()[3]\r\n g = Conv2DTranspose(inter_channel, (2,2), strides=[2, 2],padding='same')(g)\r\n x_query = Conv2D(inter_channel//4, [1, 1], strides=[1, 1], data_format='channels_last')(x)\r\n phi_g = Conv2D(inter_channel//4, [1, 1], strides=[1, 1], data_format='channels_last')(g)\r\n x_value = Conv2D(inter_channel//2, [1, 1], strides=[1, 1], data_format='channels_last')(x)\r\n inputs = [g,x,x_query,phi_g,x_value]\r\n concate = multiplication(inter_channel)(inputs)\r\n return concate\r\n\r\nclass multiplication2(Layer):\r\n def __init__(self,inter_channel = None,**kwargs):\r\n super(multiplication2, self).__init__(**kwargs)\r\n self.inter_channel = inter_channel\r\n def build(self,input_shape=None):\r\n self.k = self.add_weight(name='k',shape=(1,),initializer='zeros',dtype='float32',trainable=True)\r\n def get_config(self):\r\n base_config = super(multiplication2, self).get_config()\r\n config = {'inter_channel':self.inter_channel}\r\n return dict(list(base_config.items()) + list(config.items())) \r\n def call(self,inputs):\r\n g,x,rate = inputs[0],inputs[1],inputs[2]\r\n scaled_value = multiply([x, rate])\r\n att_x = self.k * scaled_value\r\n att_x = add([att_x,x])\r\n my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))\r\n concate = my_concat([att_x, g])\r\n return concate \r\n def compute_output_shape(self,input_shape):\r\n ll = list(input_shape)[1]\r\n return (None,ll[1],ll[1],ll[3]*2)\r\n def get_custom_objects():\r\n return {'multiplication2': multiplication2}\r\n\r\ndef attention_up_and_concatenate2(inputs):\r\n g, x = inputs[0],inputs[1]\r\n inter_channel = g.get_shape().as_list()[3]\r\n g = Conv2DTranspose(inter_channel//2, (3,3), strides=[2, 2],padding='same')(g)\r\n g = Conv2D(inter_channel//2, [1, 1], strides=[1, 1], data_format='channels_last')(g)\r\n theta_x = Conv2D(inter_channel//4, [1, 1], strides=[1, 1], data_format='channels_last')(x)\r\n phi_g = Conv2D(inter_channel//4, [1, 1], strides=[1, 1], data_format='channels_last')(g)\r\n f = Activation('relu')(add([theta_x, phi_g]))\r\n psi_f = Conv2D(1, [1, 1], strides=[1, 1], data_format='channels_last')(f)\r\n rate = Activation('sigmoid')(psi_f)\r\n concate = multiplication2()([g,x,rate])\r\n return concate\r\n\r\nloaded_model = load_model('original_model/model_augv_attention2.h5',custom_objects={'multiplication': multiplication,'multiplication2': multiplication2,'dice_coef_loss':dice_coef_loss, 'dice_coef':dice_coef,})\r\n\r\norder= 'Last4_'\r\n\r\n# remove the classifier which is the last 2 layer using pop() function\r\nloaded_model.layers.pop()\r\nloaded_model.layers.pop()\r\n\r\n# unfreeze only the last four layers before the classifier\r\nfor (index, layer) in enumerate(loaded_model.layers):\r\n if (index > len(loaded_model.layers)-5):\r\n layer.trainable = True\r\n else:\r\n layer.trainable = False\r\n\r\n# Create new model from the model using the input and output of the last layer (after poping last 2 layers)\r\n# model_without_last = Model(loaded_model.input, loaded_model.layers[-1].output)\r\nmodel_without_last = Model(loaded_model.input, loaded_model.output)\r\n\r\n# 1 dimensional convolution and generate probabilities from Sigmoid function\r\nconv_final = Conv2D(OUTPUT_MASK_CHANNELS, (1, 1), name='conv2d_last')(model_without_last.output)\r\nnew_out = Activation('sigmoid', name='activation_last')(conv_final)\r\n\r\n# Created new model with the newly added last two layers \r\ntransfered_model = Model(inputs=model_without_last.input, outputs=new_out)\r\n\r\n# New model structure\r\n# transfered_model.summary()\r\n\r\n\r\ncase = \"experiment_1_without_NAIP_\"\r\ndata_path = 'samples/experiment_1/without_NAIP/'\r\n\r\n\r\n# read in training and validation data\r\nX_train = np.load(data_path+'train_data.npy')\r\nY_train = np.load(data_path+'train_label.npy')\r\nX_Validation = np.load(data_path+'vali_data.npy')\r\nY_Validation = np.load(data_path+'vali_label.npy')\r\nprint(X_train.shape)\r\nprint(X_Validation.shape)\r\n\r\n# Define the input patch size which we use 224 pixels by 224 pixels\r\npatch_size = 224\r\nIMG_WIDTH = patch_size\r\nIMG_HEIGHT = patch_size\r\n\r\n# Number of feature channels or raster images \r\nINPUT_CHANNELS = 8\r\n\r\n# Number of output masks (1 in case you predict only one type of objects)\r\nOUTPUT_MASK_CHANNELS = 1\r\n\r\n# train for maximum 25 epochs\r\nmaxepoch = 25\r\n\r\n# hyperparameters\r\nlearning_rate = 0.0000359\r\npatience = 20\r\ntransfered_model.compile(optimizer=Adam(lr=learning_rate),loss = dice_coef_loss, metrics=[dice_coef,'accuracy'])\r\ncallbacks = [\r\n ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'),\r\n EarlyStopping(monitor='val_loss', patience=patience+10, verbose=0),\r\n ModelCheckpoint('first_pass_tf_model.h5', monitor='val_loss', save_best_only=True, verbose=0),\r\n ]\r\n\r\nfine_tuned_model_P1_history = transfered_model.fit(X_train_new, Y_train, validation_data=(X_Validation_new,Y_Validation), batch_size=2, epochs=maxepoch, callbacks=callbacks)\r\n\r\nfor (index, layer) in enumerate(transfered_model.layers):\r\n layer.trainable = True\r\n\r\n# Define the input patch size which we use 224 pixels by 224 pixels\r\npatch_size = 224\r\nIMG_WIDTH = patch_size\r\nIMG_HEIGHT = patch_size\r\n\r\n# Number of feature channels or raster images \r\nINPUT_CHANNELS = 8\r\n\r\n# Number of output masks (1 in case you predict only one type of objects)\r\nOUTPUT_MASK_CHANNELS = 1\r\n\r\n# train for maximum 25 epochs\r\nmaxepoch = 25\r\n\r\n# hyperparameters\r\n# 10 times smaller learning rate\r\nlearning_rate = 0.00003359\r\npatience = 25\r\ntransfered_model.compile(optimizer=Adam(lr=learning_rate),loss = dice_coef_loss, metrics=[dice_coef,'accuracy'])\r\ncallbacks = [\r\n ReduceLROnPlateau(monitor='val_loss', factor=0.7, patience=patience, min_lr=1e-9, verbose=1, mode='min'),\r\n EarlyStopping(monitor='val_loss', patience=patience+10, verbose=0),\r\n ModelCheckpoint('second_pass_tf_model.h5', monitor='val_loss', save_best_only=True, verbose=0),\r\n ]\r\n\r\nfine_tuned_model_P2_history = transfered_model.fit(X_train_new, Y_train, validation_data=(X_Validation_new,Y_Validation), batch_size=1, epochs=maxepoch, callbacks=callbacks)\r\n\r\nimport pickle\r\nname = case+\"model\"\r\n\r\n# save the trained model\r\nroot_path = './training_results/experiment_1/'\r\n \r\n# save the weights as h5 file\r\ntransfered_model.save(root_path+name+\".h5\")\r\n\r\n# save the intermdediate results and training statistics\r\nwith open(root_path+name+\".pickle\", 'wb') as file_pi:\r\n pickle.dump(fine_tuned_model_P2_history.history, file_pi, protocol=2)","sub_path":"4_1_experiment_1.py","file_name":"4_1_experiment_1.py","file_ext":"py","file_size_in_byte":10974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"541678185","text":"import torch\nfrom transformers import BertTokenizer, AdamW, BertModel, BertPreTrainedModel, BertConfig, \\\n get_linear_schedule_with_warmup, XLNetModel, XLNetTokenizer, XLNetConfig\nimport numpy as np\nimport os\nimport random\nfrom Config import *\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import defaultdict\nfrom torch.optim.optimizer import Optimizer\n\ndef paddingList(ls:list,val,returnTensor=False):\n ls=ls[:]#不要改变了原list尺寸\n maxLen=max([len(i) for i in ls])\n for i in range(len(ls)):\n ls[i]=ls[i]+[val]*(maxLen-len(ls[i]))\n return torch.tensor(ls,device='cuda') if returnTensor else ls\n\ndef fastTokenizer(a:str,b:str,maxLen,tk):\n a,b=a.split(),b.split()\n a,b=tk.convert_tokens_to_ids(a),tk.convert_tokens_to_ids(b)\n maxLen-=3#空留给cls sep sep\n assert maxLen>=0\n len2=maxLen//2#若为奇数,更长部分给左边\n len1=maxLen-len2\n #一共就a超长与否,b超长与否,组合的四种情况\n if len(a)+len(b)>maxLen:#需要截断\n if len(a)<=len1 and len(b)>len2:\n b=b[:maxLen-len(a)]\n elif len(a)>len1 and len(b)<=len2:\n a=a[:maxLen-len(b)]\n elif len(a)>len1 and len(b)>len2:\n a=a[:len1]\n b=b[:len2]\n input_ids=[tk.cls_token_id]+a+[tk.sep_token_id]+b+[tk.sep_token_id]\n token_type_ids=[0]*(len(a)+2)+[1]*(len(b)+1)\n return {'input_ids': input_ids, 'token_type_ids': token_type_ids}\n\n\nclass data_generator:\n def __init__(self, data, config, shuffle=False):\n self.data = data\n self.batch_size = config.batch_size\n self.max_length = config.MAX_LEN\n self.shuffle = shuffle\n\n vocab = 'vocab.txt' if os.path.exists(config.model_path + 'vocab.txt') else 'spiece.model'\n self.tokenizer = TOKENIZERS[config.model].from_pretrained(config.model_path + vocab)\n\n self.steps = len(self.data[0]) // self.batch_size\n if len(self.data[0]) % self.batch_size != 0:\n self.steps += 1\n\n def __len__(self):\n return self.steps\n\n def __iter__(self):\n input_ids, input_masks, segment_ids, labels = [], [], [], []\n for index, data_li in enumerate(self.data):\n\n text = data_li['text']\n label = data_li['label']\n tkRes = self.tokenizer(text, max_length=self.max_length, truncation='longest_first',\n return_attention_mask=False)\n input_id = tkRes['input_ids']\n segment_id = tkRes['token_type_ids']\n assert len(segment_id) == len(input_id)\n input_ids.append(input_id)\n segment_ids.append(segment_id)\n labels.append(label)\n\n if len(input_ids) == self.batch_size or index == len(self.data)-1:\n input_ids = paddingList(input_ids, 0, returnTensor=True) # 动态padding\n segment_ids = paddingList(segment_ids, 0, returnTensor=True)\n input_masks = (input_ids != 0)\n yield input_ids, input_masks, segment_ids, labels\n input_ids, input_masks, segment_ids, labels = [], [], [], []\n\n\n\nclass PGD():\n def __init__(self, model):\n self.model = model\n self.emb_backup = {}\n self.grad_backup = {}\n\n def attack(self, epsilon=0.3, alpha=0.1, emb_name='word_embeddings', is_first_attack=False):\n # emb_name这个参数要换成你模型中embedding的参数名\n for name, param in self.model.named_parameters():\n if param.requires_grad and emb_name in name:\n if is_first_attack:\n self.emb_backup[name] = param.data.clone()\n norm = torch.norm(param.grad)\n if norm != 0 and not torch.isnan(norm):\n r_at = alpha * param.grad / norm\n param.data.add_(r_at)\n param.data = self.project(name, param.data, epsilon)\n\n def restore(self, emb_name='word_embeddings'):\n # emb_name这个参数要换成你模型中embedding的参数名\n for name, param in self.model.named_parameters():\n if param.requires_grad and emb_name in name:\n assert name in self.emb_backup\n param.data = self.emb_backup[name]\n self.emb_backup = {}\n\n def project(self, param_name, param_data, epsilon):\n r = param_data - self.emb_backup[param_name]\n if torch.norm(r) > epsilon:\n r = epsilon * r / torch.norm(r)\n return self.emb_backup[param_name] + r\n\n def backup_grad(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n self.grad_backup[name] = param.grad.clone()\n\n def restore_grad(self):\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n param.grad = self.grad_backup[name]\n\n\n\nclass FGM():\n def __init__(self, model):\n self.model = model\n self.backup = {}\n\n def attack(self, epsilon=0.25, emb_name='word_embeddings'):\n # emb_name这个参数要换成你模型中embedding的参数名\n for name, param in self.model.named_parameters():\n if param.requires_grad and emb_name in name:\n self.backup[name] = param.data.clone()\n norm = torch.norm(param.grad)\n if norm != 0:\n r_at = epsilon * param.grad / norm\n param.data.add_(r_at)\n\n def restore(self, emb_name='word_embeddings'):\n # emb_name这个参数要换成你模型中embedding的参数名\n for name, param in self.model.named_parameters():\n if param.requires_grad and emb_name in name:\n assert name in self.backup\n param.data = self.backup[name]\n self.backup = {}\n\n\n# 支持多分类和二分类\nclass FocalLoss(nn.Module):\n \"\"\"\n This is a implementation of Focal Loss with smooth label cross entropy supported which is proposed in\n 'Focal Loss for Dense Object Detection. (https://arxiv.org/abs/1708.02002)'\n Focal_Loss= -1*alpha*(1-pt)^gamma*log(pt)\n :param num_class:\n :param alpha: (tensor) 3D or 4D the scalar factor for this criterion\n :param gamma: (float,double) gamma > 0 reduces the relative loss\n for well-classified examples (p>0.5) putting more\n focus on hard misclassified example\n :param smooth: (float,double) smooth value when cross entropy\n :param balance_index: (int) balance class index,\n should be specific when alpha is float\n :param size_average: (bool, optional) By default,\n the losses are averaged over each loss element in the batch.\n \"\"\"\n def __init__(self, num_class, alpha=None, gamma=2,\n smooth=None, size_average=True):\n super(FocalLoss, self).__init__()\n self.num_class = num_class\n self.alpha = alpha\n self.gamma = gamma\n self.smooth = smooth\n self.size_average = size_average\n\n if self.alpha is None:\n self.alpha = torch.ones(self.num_class, 1)\n elif isinstance(self.alpha, (list, np.ndarray)):\n assert len(self.alpha) == self.num_class\n self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1)\n self.alpha = self.alpha / self.alpha.sum()\n else:\n raise TypeError('Not support alpha type')\n if self.smooth is not None:\n if self.smooth < 0 or self.smooth > 1.0:\n raise ValueError('smooth value should be in [0,1]')\n\n def forward(self, input, target):\n logit = F.softmax(input, dim=1)\n\n if logit.dim() > 2:\n # N,C,d1,d2 -> N,C,m (m=d1*d2*...)\n logit = logit.view(logit.size(0), logit.size(1), -1)\n logit = logit.permute(0, 2, 1).contiguous()\n logit = logit.view(-1, logit.size(-1))\n target = target.view(-1, 1)\n\n # N = input.size(0)\n # alpha = torch.ones(N, self.num_class)\n # alpha = alpha * (1 - self.alpha)\n # alpha = alpha.scatter_(1, target.long(), self.alpha)\n epsilon = 1e-10\n alpha = self.alpha\n if alpha.device != input.device:\n alpha = alpha.to(input.device)\n\n idx = target.cpu().long()\n one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()\n one_hot_key = one_hot_key.scatter_(1, idx, 1)\n if one_hot_key.device != logit.device:\n one_hot_key = one_hot_key.to(logit.device)\n\n if self.smooth:\n one_hot_key = torch.clamp(\n one_hot_key, self.smooth, 1.0 - self.smooth)\n pt = (one_hot_key * logit).sum(1) + epsilon\n logpt = pt.log()\n\n gamma = self.gamma\n\n alpha = alpha[idx]\n loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt\n\n if self.size_average:\n loss = loss.mean()\n else:\n loss = loss.sum()\n return loss\n\n\ndef f1_match(y_true,y_pred):\n acc = sum(y_pred & y_true) / (sum(y_pred))\n rec = sum(y_pred & y_true) / (sum(y_true))\n\n return 2 * acc * rec /(acc + rec)\n\nclass Lookahead(Optimizer):\n r\"\"\"PyTorch implementation of the lookahead wrapper.\n Lookahead Optimizer: https://arxiv.org/abs/1907.08610\n \"\"\"\n\n def __init__(self, optimizer, la_steps=5, la_alpha=0.8, pullback_momentum=\"none\"):\n \"\"\"optimizer: inner optimizer\n la_steps (int): number of lookahead steps\n la_alpha (float): linear interpolation factor. 1.0 recovers the inner optimizer.\n pullback_momentum (str): change to inner optimizer momentum on interpolation update\n \"\"\"\n self.optimizer = optimizer\n self._la_step = 0 # counter for inner optimizer\n self.la_alpha = la_alpha\n self._total_la_steps = la_steps\n pullback_momentum = pullback_momentum.lower()\n assert pullback_momentum in [\"reset\", \"pullback\", \"none\"]\n self.pullback_momentum = pullback_momentum\n\n self.state = defaultdict(dict)\n\n # Cache the current optimizer parameters\n for group in optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['cached_params'] = torch.zeros_like(p.data)\n param_state['cached_params'].copy_(p.data)\n if self.pullback_momentum == \"pullback\":\n param_state['cached_mom'] = torch.zeros_like(p.data)\n\n def __getstate__(self):\n return {\n 'state': self.state,\n 'optimizer': self.optimizer,\n 'la_alpha': self.la_alpha,\n '_la_step': self._la_step,\n '_total_la_steps': self._total_la_steps,\n 'pullback_momentum': self.pullback_momentum\n }\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def get_la_step(self):\n return self._la_step\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def load_state_dict(self, state_dict):\n self.optimizer.load_state_dict(state_dict)\n\n def _backup_and_load_cache(self):\n \"\"\"Useful for performing evaluation on the slow weights (which typically generalize better)\n \"\"\"\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['backup_params'] = torch.zeros_like(p.data)\n param_state['backup_params'].copy_(p.data)\n p.data.copy_(param_state['cached_params'])\n\n def _clear_and_load_backup(self):\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n p.data.copy_(param_state['backup_params'])\n del param_state['backup_params']\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n def step(self, closure=None):\n \"\"\"Performs a single Lookahead optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = self.optimizer.step(closure)\n self._la_step += 1\n\n if self._la_step >= self._total_la_steps:\n self._la_step = 0\n # Lookahead and cache the current optimizer parameters\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n p.data.mul_(self.la_alpha).add_(\n param_state['cached_params'], alpha=1.0 - self.la_alpha) # crucial line\n param_state['cached_params'].copy_(p.data)\n if self.pullback_momentum == \"pullback\":\n internal_momentum = self.optimizer.state[p][\"momentum_buffer\"]\n self.optimizer.state[p][\"momentum_buffer\"] = internal_momentum.mul_(self.la_alpha).add_(\n 1.0 - self.la_alpha, param_state[\"cached_mom\"])\n param_state[\"cached_mom\"] = self.optimizer.state[p][\"momentum_buffer\"]\n elif self.pullback_momentum == \"reset\":\n self.optimizer.state[p][\"momentum_buffer\"] = torch.zeros_like(\n p.data)\n\n return loss\n","sub_path":"Nezha_pytorch/finetuning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168792649","text":"# Script for getting next airing anime for Rainmeter skin. Only shows anime you've marked as watching on Anilist.\n# ASSIGN USERID & USERNAME in anilistUserInfo.py\n# Limit of 90 requests/minute.\n# Query gets more information then Rainmeter skin currently uses. Might or might not develop the skin further.\n\n# import anilistUserInfo\nimport configparser\nimport requests\nimport json\nimport sys\nimport os\n\nconfig = configparser.ConfigParser()\n\n\ndef resource_path(relative_path):\n if hasattr(sys, '_MEIPASS'):\n return os.path.join(sys._MEIPASS, relative_path)\n return os.path.join(os.path.abspath(\".\"), relative_path)\n\n\n# Query func.\ndef api_request(url, query, variables):\n return requests.post(url, json={'query': query, 'variables': variables}).json()\n\n\n# read config file & assign values to vars for ease of use.\nconfig = configparser.ConfigParser()\nconfig.read(resource_path('config.ini'))\n\n# Makes USERNAME \"DoesItReallyMatter\" if empty or something wrong with .ini\ntry:\n if config['ANILISTUSER']['USERNAME'] == \"\":\n USERNAME = \"DoesItReallyMatter\"\n else:\n USERNAME = config['ANILISTUSER']['USERNAME']\nexcept (KeyError):\n USERNAME = \"DoesItReallyMatter\"\n\n# Anilist GrapQL url.\nurl = \"https://graphql.anilist.co\"\n\n# Query\nquery = '''\nquery($userName: String, $userStatus: MediaListStatus) {\n Page {\n pageInfo {\n total\n hasNextPage\n }\n mediaList(userName: $userName, status: $userStatus, type: ANIME) {\n media {\n title {\n userPreferred\n }\n episodes\n nextAiringEpisode {\n episode\n timeUntilAiring\n }\n format\n coverImage {\n large\n }\n status\n }\n status\n }\n }\n}\n'''\n# Query variables, what to get.\nvariables = {\n 'userStatus': \"CURRENT\",\n 'userName': USERNAME\n}\n\n# make request, save to result.\nresult = api_request(url, query, variables)\n\n# Create link to anilist profile.\nanilistProfileUrl = \"https://anilist.co/user/\" + USERNAME\n\n# User following\nfollowingReleasingAnimeCount = 0\n# Next airing anime information.\nnextTitle = str()\nnextEpisode = str()\nnextTimeUntilAir = 0\nnextTitleShort = str()\nnextAirTimeFormatted = str()\nnextEpisodeAndAirTime = str()\n\n# iterate animes.\nfor i in result['data']['Page']['mediaList']:\n # Check if anime is releasing episodes.\n try:\n if(i['media']['status'] == \"RELEASING\"):\n # check if its the first iteration, save the values.\n followingReleasingAnimeCount += 1\n if nextTimeUntilAir == 0:\n nextTitle = i['media']['title']['userPreferred']\n nextEpisode = i['media']['nextAiringEpisode']['episode']\n nextTimeUntilAir = i['media']['nextAiringEpisode']['timeUntilAiring']\n # check if anime has less time until new episode.\n elif i['media']['nextAiringEpisode']['timeUntilAiring'] < nextTimeUntilAir:\n nextTitle = i['media']['title']['userPreferred']\n nextEpisode = i['media']['nextAiringEpisode']['episode']\n nextTimeUntilAir = i['media']['nextAiringEpisode']['timeUntilAiring']\n except (TypeError):\n # TypeError sometimes, cant be arsed to look up why.\n # print(\"Shit hit the fan...\")\n pass\n\n# If watching releasing anime, else skip.\nif followingReleasingAnimeCount != 0:\n # format variables\n # convert time until airing to days, hours, minutes and seconds. Use whatevers needed.\n day = nextTimeUntilAir // (24*3600)\n nextTimeUntilAir = nextTimeUntilAir % (24 * 3600)\n hour = nextTimeUntilAir // 3600\n nextTimeUntilAir %= 3600\n minutes = nextTimeUntilAir // 60\n nextTimeUntilAir %= 60\n seconds = nextTimeUntilAir\n\n # Remove days, hours if 0.\n if day < 1:\n nextAirTimeFormatted = (\"%dh %dm\" % (hour, minutes))\n elif hour < 1:\n nextAirTimeFormatted = (\"%dm\" % (minutes))\n else:\n nextAirTimeFormatted = (\"%dd %dh %dm\" % (day, hour, minutes))\n\n # Concat Episode number with time until next episode.\n nextEpisodeAndAirTime = \"Ep \" + str(nextEpisode) + \" in \" + str(nextAirTimeFormatted)\n # Shorten title if over 45 characters, else make it same as nextTitle.\n nextTitleShort = (nextTitle[:45] + '..') if len(nextTitle) > 45 else nextTitle\n\n# Create dict with info.\nnextAnimeInfoDict = {\n \"anilistProfile\": USERNAME,\n \"anilistProfileUrl\": anilistProfileUrl,\n \"followingReleasingCount\": str(followingReleasingAnimeCount),\n \"title\": nextTitle,\n \"titleShort\": nextTitleShort,\n \"episode\": nextEpisode,\n \"timeUntilAir\": nextAirTimeFormatted,\n \"episodeAndAirTime\": nextEpisodeAndAirTime\n}\n\n# Create file rainmeter skin can read from, write dict as json to file.\nwith open(resource_path('nextAiringAnime.txt'), 'w') as outfile:\n json.dump(nextAnimeInfoDict, outfile)\n\n# Rainmeter couldnt run file sometimes, Close written file and exit script. (Solved problem...i think)\noutfile.close()\nexit()\n","sub_path":"RainmeterNextAnime/rainmeterNextAnime.py","file_name":"rainmeterNextAnime.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580715337","text":"#!/usr/bin/env python\nimport os, pygame, sys, time, random, math\nfrom pygame.locals import *\n\ncardbitmapwidth = 100\ncardw = cardbitmapwidth+20\ncardh = cardw * 3 / 2\npad = cardw/6\n\npygame.init()\nscreenw = (cardw+pad)*6\nscreenh = (cardh+pad)*4\nscreen = pygame.display.set_mode((screenw,screenh))\n\nfont = pygame.font.SysFont('andale mono',18)\n\ncolors = {'white':(255,255,255),'black':(0,0,0),'ltgrey':(200,200,200),\\\n\t\t 'blue':(0,0,255),'red':(255,0,0),'dkgrey':(20,20,20),\\\n\t\t 'ltblue':(230,225,255)}\n\ndef loadcardimage(shape,color,fill):\n\tname = shape+'_'+color+'_'+fill\n\timg = loadimage(name)\n\trect = img.get_rect()\n\tfactor = (cardbitmapwidth)/rect.width\n\treturn pygame.transform.scale(img,(factor*rect.width,factor*rect.height))\n\ndef loadimage(name):\n\tpath = os.path.join('data','menge',name+'.png')\n\treturn pygame.image.load(path).convert()\n\t\n\ndef testset(card1,card2,card3):\n\tfor property in xrange(4):\n\t\tif (card1[property]==card2[property] and card2[property]==card3[property] and card1[property]==card3[property])\\\n\t\tor (card1[property]!=card2[property] and card2[property]!=card3[property] and card1[property]!=card3[property]):\n\t\t\tpass\n\t\telse:\n\t\t\treturn False\n\treturn True\n\nclass Cache:\n\tdef __init__(self):\n\t\tself.cache = {}\n\t\tfor color in ['rot','blau','gruen']:\n\t\t\tfor shape in ['kreis','welle','raute']:\n\t\t\t\tfor fill in ['ganz','streifen','leer']:\n\t\t\t\t\tself.cache.update({color+shape+fill:loadcardimage(shape,color,fill)})\n\t\tself.cache.update({'back':loadimage('card')})\n\tdef get(self,name):\n\t\treturn self.cache[name]\n\tdef getcard(self,color,shape,fill):\n\t\treturn self.cache[color+shape+fill]\n\nc=Cache()\n\nclass Mouse(pygame.sprite.Sprite):\n\tdef __init__(self):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = pygame.surface.Surface((cardw*3,cardh))\n\t\tself.image.set_colorkey((0,0,0))\n\t\t#self.image.fill((20,200,200))\n\t\tself.rect = Rect(0,0,1,1)\n\t\tself.inhand = []\n\t\tself.vel = (0,0)\n\t\tself.lastpos = (0,0)\n\tdef holding(self):\n\t\treturn len(self.inhand)\n\tdef pickup(self,card):\n\t\tself.inhand.insert(0,card)\n\tdef putdown(self):\n\t\tif len(self.inhand) > 0:\n\t\t\tcard = self.inhand[0]\n\t\t\tself.inhand.remove(card)\n\t\t\treturn card\n\tdef update(self):\n\t\tself.image.fill(colors['black'])\n\t\tfor each in xrange(len(self.inhand)):\n\t\t\tself.image.blit(self.inhand[each].image,(each*cardw,0))\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.rect.center = pos\n\tdef touch(self,target):\n\t\treturn self.rect.colliderect(target.rect)\n\tdef giveset(self):\n\t\tcards = self.inhand\n\t\tself.inhand = []\n\t\treturn cards\n\t\t\n\nclass Card(pygame.sprite.Sprite):\n\tdef __init__(self,data):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.data = data\n\t\tself.rect = Rect(0,0,cardw,cardh)\n\t\tback = pygame.surface.Surface(self.rect.size)\n\t\tback.fill(colors['white'])\n\t\tpygame.draw.rect(back,colors['ltgrey'],self.rect,cardw/10)\n\t\tpygame.draw.rect(back,colors['dkgrey'],self.rect,cardw/30)\n\t\tself.image = pygame.surface.Surface(self.rect.size)\n\t\tself.image.set_colorkey(colors['black'])\n\t\tdesign = c.getcard(data[0],data[1],data[2])\n\t\tdrect = design.get_rect()\n\t\tdrect.center = self.rect.center\n\t\tself.image.blit(back,(0,0))\n\t\tif data[3] == '1':\n\t\t\tself.image.blit(design,drect)\n\t\telif data[3] == '2':\n\t\t\tself.image.blit(design,(drect.left,drect.top-drect.height/2))\n\t\t\tself.image.blit(design,(drect.left,drect.top+drect.height/2))\n\t\telif data[3] == '3':\n\t\t\tself.image.blit(design,(drect.left,drect.top-drect.height))\n\t\t\tself.image.blit(design,(drect.left,drect.top))\n\t\t\tself.image.blit(design,(drect.left,drect.top+drect.height))\n\tdef __repr__(self):\n\t\treturn self.data[0]+'_'+self.data[1]+'_'+self.data[2]+'_'+self.data[3]\n\nclass Deck:\n\tdef __init__(self):\n\t\tpile = []\n\t\tfor color in ['rot','blau','gruen']:\n\t\t\tfor shape in ['kreis','welle','raute']:\n\t\t\t\tfor fill in ['ganz','streifen','leer']:\n\t\t\t\t\tfor number in ['1','2','3']:\n\t\t\t\t\t\tpile.append(Card([color,shape,fill,number]))\n\t\tself.cards = pile\n\tdef draw(self):\n\t\tcard = random.choice(self.cards)\n\t\tself.cards.remove(card)\n\t\treturn card\n\nclass CurrentCards:\n\tdef __init__(self):\n\t\tself.spaces = [[],[],[]]\n\tdef addcard(self,card):\n\t\tx = cardw+pad\n\t\ty = cardh+pad\n\t\tpos = self.findemptyspace()\n\t\trow = pos[0]\n\t\tcol = pos[1]\n\t\tcard.rect.topleft = (x*col,y*row)\n\t\tif col == len(self.spaces[row]):\n\t\t\tself.spaces[row].append(0)\n\t\tself.spaces[row][col] = card\n\tdef findemptyspace(self):\n\t\tfor row in xrange(3):\n\t\t\tif self.spaces[row] == []:\n\t\t\t\treturn row,0\n\t\tfor row in xrange(3):\n\t\t\tfor col in xrange(len(self.spaces[row])):\n\t\t\t\tif self.spaces[row][col] == 0:\n\t\t\t\t\treturn row,col\n\t\tminsize = 100\n\t\tfor row in xrange(3):\n\t\t\tif len(self.spaces[row]) < minsize:\n\t\t\t\tminrow = row\n\t\t\t\tminsize = len(self.spaces[row])\n\t\treturn minrow,minsize\n\tdef takecard(self,card):\n\t\tfor row in xrange(3):\n\t\t\tfor col in xrange(len(self.spaces[row])):\n\t\t\t\tif self.spaces[row][col] != 0:\n\t\t\t\t\tif card == self.spaces[row][col]:\n\t\t\t\t\t\ttoreturn = self.spaces[row][col]\n\t\t\t\t\t\tself.spaces[row][col] = 0\n\t\treturn toreturn\n\tdef getallsprites(self):\n\t\tsprites = []\n\t\tfor row in self.spaces:\n\t\t\tfor element in row:\n\t\t\t\tif element != 0:\n\t\t\t\t\tsprites.append(element)\n\t\treturn sprites\n\tdef replentish(self,deck):\n\t\tif len(deck.cards) > 0:\n\t\t\tfor _ in xrange(3):\n\t\t\t\tself.addcard(deck.draw())\n\t\t\n\nclass PlayerPile(pygame.sprite.Sprite):\n\tdef __init__(self,name,number):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.name = name\n\t\tself.image = pygame.surface.Surface((cardw,cardh))\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.topleft = (cardw*2*(number-1),(cardh+pad)*3)\n\t\tself.image.set_colorkey(colors['black'])\n\t\tself.cardback = pygame.surface.Surface((cardw,cardh))\n\t\tself.cardback.fill(colors['ltblue'])\n\t\tpygame.draw.rect(self.cardback,colors['ltgrey'],(0,0,cardw,cardh),cardw/30)\n\t\tself.numsets = 0\n\tdef addset(self):\n\t\tself.numsets += 1\n\t\tself.image.fill(colors['black'])\n\t\tfont.set_bold(1)\n\t\tren = ren = font.render(' '+self.name+': '+\\\n\t\t\t\t\t\t\t\tstr(self.numsets),1,colors['dkgrey'])\n\t\tself.image.blit(self.cardback,(0,0))\n\t\tself.image.blit(ren,(0,cardh/3))\n\tdef update(self):\n\t\tself.addset()\n\n#background = pygame.transform.scale(pygame.image.load('/home/kgraehl/Pictures/backgrounds/pict0008.jpg').convert(),(screenw,screenh))\nbackground = pygame.surface.Surface((200,200))\nbackground.fill(colors['ltgrey'])\nbackground = pygame.transform.scale(background,(screenw,screenh))\ncurrentcards = CurrentCards()\ndeck = Deck()\nclock = pygame.time.Clock()\ncards = pygame.sprite.RenderPlain()\nmouse = Mouse()\npile1 = PlayerPile('kyle',1)\npile1.addset()\n\nplayers = (pile1,)\nplayerpiles = pygame.sprite.RenderPlain()\n\nfor _ in xrange(12):\n\tcurrentcards.addcard(deck.draw())\n\ndef main():\n\twhile 1:\n\t\tclock.tick(60)\n\t#Handle Input Events\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\treturn\n\t\t\telif event.type == KEYDOWN and event.key == K_ESCAPE:\n\t\t\t\treturn\n\t\t\telif event.type == MOUSEBUTTONDOWN:\n\t\t\t\tpressed = pygame.mouse.get_pressed()\n\t\t\t\tif pressed[0]:\n\t\t\t\t\tfor card in cards.sprites():\n\t\t\t\t\t\tif mouse.touch(card):\n\t\t\t\t\t\t\tif mouse.holding() < 3:\n\t\t\t\t\t\t\t\tmouse.pickup(card)\n\t\t\t\t\t\t\t\tcurrentcards.takecard(card)\n\t\t\t\t\tfor pnum in xrange(len(players)):\n\t\t\t\t\t\tif mouse.touch(players[pnum]):\n\t\t\t\t\t\t\tif mouse.holding() == 3:\n\t\t\t\t\t\t\t\tmouse.giveset()\n\t\t\t\t\t\t\t\tplayers[pnum].addset()\n\t\t\t\t\t\t\t\tcurrentcards.replentish(deck)\n\t\t\t\tif pressed[2]:\n\t\t\t\t\tcard = mouse.putdown()\n\t\t\t\t\tif card:\n\t\t\t\t\t\tcurrentcards.addcard(card)\n\t\tscreen.blit(background,(0,0))\n\t\t#screen.fill((80,80,80))\n\t\tcards.empty()\n\t\tfor each in currentcards.getallsprites():\n\t\t\tcards.add(each)\n\t\tcards.draw(screen)\n\n\t\tplayerpiles.empty()\n\t\tfor each in players:\n\t\t\tplayerpiles.add(each)\n\t\tplayerpiles.draw(screen)\n\t\tmouse.update()\n\t\tscreen.blit(mouse.image,mouse.rect)\n\n\t#Draw Everything\n\t\tpygame.display.flip()\n\nmain()\n\n\n","sub_path":"oldprogs/menge/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311358284","text":"\"\"\"add an orders table with a link to all tables\n\nRevision ID: 283cc4f4fa00\nRevises: 3b9ff5e239ae\nCreate Date: 2021-09-21 09:38:47.274014\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '283cc4f4fa00'\ndown_revision = '3b9ff5e239ae'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('orders',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('role_id', sa.Integer(), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.Column('quantity', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['role_id'], ['toppings.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('orders')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/283cc4f4fa00_add_an_orders_table_with_a_link_to_all_.py","file_name":"283cc4f4fa00_add_an_orders_table_with_a_link_to_all_.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"330534514","text":"#!/usr/bin/env python3\n'''\n @author Matthew Hoeper\n'''\n\ndef hardest_assignment(headers, data):\n '''\n Calculates the most difficult homework assignment in grades.csv based on lowest average\n :requires a list with the headers from the csv file\n :requires a numpy array with all of the data from csv file\n :returns assignment with lowest average\n '''\n import numpy as np\n index = {}\n avg = []\n for x in range(len(headers)):\n if \"Homework\" in headers[x] and \"Final\" not in headers[x] and \"Current\" not in headers[x]:\n v = data[:, x]\n v = v[~np.isnan(v)]\n norm = np.divide(v, max(v))\n index[np.mean(norm)] = headers[x]\n avg.append(np.mean(norm))\n lowest = min(avg)\n\n return index[lowest]\n","sub_path":"HW1/hardest_assignment.py","file_name":"hardest_assignment.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"216641220","text":"tasks = [\n ['Пайтон или питон?', 'Пайтон'],\n ['Какой тип данных позволяет хранить целочисленные значения?', 'int'],\n ['Какой тип данных позволяет хранить строковые значения?', 'string'],\n ['Что производительнее, Node.js или Python?', 'C++'],\n ['Как определяются функции в python?', 'def'],\n ['Как называется типизация данных в python?', 'Строгая'],\n ['Является ли python Си-подобным языком?', 'Нет'],\n ['Год появления Python?', '1991'],\n ['Имя создателя языка Python?', 'Гвидо ван Россум'],\n ['Последняя мажорная версия Python?', '3']\n]\n\nright_answers = 0\n\nfor i in range(0, len(tasks)):\n answer = input(tasks[i][0] + ' ')\n if(answer.lower() == tasks[i][1].lower()):\n right_answers += 1\n\n\nprint('Количество правильных ответов: ', right_answers)","sub_path":"enigma.py","file_name":"enigma.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"234164948","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nimport json\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\n\nfrom airflow.models import Connection\nfrom airflow.providers.microsoft.azure.hooks.data_factory import (\n AzureDataFactoryHook,\n AzureDataFactoryPipelineRunException,\n AzureDataFactoryPipelineRunStatus,\n)\nfrom airflow.providers.microsoft.azure.operators.data_factory import AzureDataFactoryRunPipelineOperator\nfrom airflow.utils import db, timezone\n\nDEFAULT_DATE = timezone.datetime(2021, 1, 1)\nSUBSCRIPTION_ID = \"my-subscription-id\"\nTASK_ID = \"run_pipeline_op\"\nAZURE_DATA_FACTORY_CONN_ID = \"azure_data_factory_test\"\nPIPELINE_NAME = \"pipeline1\"\nCONN_EXTRAS = {\n \"subscriptionId\": SUBSCRIPTION_ID,\n \"tenantId\": \"my-tenant-id\",\n \"resource_group_name\": \"my-resource-group-name-from-conn\",\n \"factory_name\": \"my-factory-name-from-conn\",\n}\nPIPELINE_RUN_RESPONSE = {\"additional_properties\": {}, \"run_id\": \"run_id\"}\nEXPECTED_PIPELINE_RUN_OP_EXTRA_LINK = (\n \"https://adf.azure.com/en-us/monitoring/pipelineruns/{run_id}\"\n \"?factory=/subscriptions/{subscription_id}/\"\n \"resourceGroups/{resource_group_name}/providers/Microsoft.DataFactory/\"\n \"factories/{factory_name}\"\n)\n\n\nclass TestAzureDataFactoryRunPipelineOperator:\n def setup_method(self):\n self.mock_ti = MagicMock()\n self.mock_context = {\"ti\": self.mock_ti}\n self.config = {\n \"task_id\": TASK_ID,\n \"azure_data_factory_conn_id\": AZURE_DATA_FACTORY_CONN_ID,\n \"pipeline_name\": PIPELINE_NAME,\n \"resource_group_name\": \"resource-group-name\",\n \"factory_name\": \"factory-name\",\n \"check_interval\": 1,\n \"timeout\": 3,\n }\n\n db.merge_conn(\n Connection(\n conn_id=\"azure_data_factory_test\",\n conn_type=\"azure_data_factory\",\n login=\"client-id\",\n password=\"client-secret\",\n extra=json.dumps(CONN_EXTRAS),\n )\n )\n\n @staticmethod\n def create_pipeline_run(status: str):\n \"\"\"Helper function to create a mock pipeline run with a given execution status.\"\"\"\n\n run = MagicMock()\n run.status = status\n\n return run\n\n @patch.object(AzureDataFactoryHook, \"run_pipeline\", return_value=MagicMock(**PIPELINE_RUN_RESPONSE))\n @pytest.mark.parametrize(\n \"pipeline_run_status,expected_output\",\n [\n (AzureDataFactoryPipelineRunStatus.SUCCEEDED, None),\n (AzureDataFactoryPipelineRunStatus.FAILED, \"exception\"),\n (AzureDataFactoryPipelineRunStatus.CANCELLED, \"exception\"),\n (AzureDataFactoryPipelineRunStatus.IN_PROGRESS, \"timeout\"),\n (AzureDataFactoryPipelineRunStatus.QUEUED, \"timeout\"),\n (AzureDataFactoryPipelineRunStatus.CANCELING, \"timeout\"),\n ],\n )\n def test_execute_wait_for_termination(self, mock_run_pipeline, pipeline_run_status, expected_output):\n operator = AzureDataFactoryRunPipelineOperator(**self.config)\n\n assert operator.azure_data_factory_conn_id == self.config[\"azure_data_factory_conn_id\"]\n assert operator.pipeline_name == self.config[\"pipeline_name\"]\n assert operator.resource_group_name == self.config[\"resource_group_name\"]\n assert operator.factory_name == self.config[\"factory_name\"]\n assert operator.check_interval == self.config[\"check_interval\"]\n assert operator.timeout == self.config[\"timeout\"]\n assert operator.wait_for_termination\n\n with patch.object(AzureDataFactoryHook, \"get_pipeline_run\") as mock_get_pipeline_run:\n mock_get_pipeline_run.return_value = TestAzureDataFactoryRunPipelineOperator.create_pipeline_run(\n pipeline_run_status\n )\n\n if not expected_output:\n # A successful operator execution should not return any values.\n assert not operator.execute(context=self.mock_context)\n elif expected_output == \"exception\":\n # The operator should fail if the pipeline run fails or is canceled.\n with pytest.raises(\n AzureDataFactoryPipelineRunException,\n match=f\"Pipeline run {PIPELINE_RUN_RESPONSE['run_id']} has failed or has been cancelled.\",\n ):\n operator.execute(context=self.mock_context)\n else:\n # Demonstrating the operator timing out after surpassing the configured timeout value.\n with pytest.raises(\n AzureDataFactoryPipelineRunException,\n match=(\n f\"Pipeline run {PIPELINE_RUN_RESPONSE['run_id']} has not reached a terminal status \"\n f\"after {self.config['timeout']} seconds.\"\n ),\n ):\n operator.execute(context=self.mock_context)\n\n # Check the ``run_id`` attr is assigned after executing the pipeline.\n assert operator.run_id == PIPELINE_RUN_RESPONSE[\"run_id\"]\n\n # Check to ensure an `XCom` is pushed regardless of pipeline run result.\n self.mock_ti.xcom_push.assert_called_once_with(\n key=\"run_id\", value=PIPELINE_RUN_RESPONSE[\"run_id\"]\n )\n\n mock_run_pipeline.assert_called_once_with(\n pipeline_name=self.config[\"pipeline_name\"],\n resource_group_name=self.config[\"resource_group_name\"],\n factory_name=self.config[\"factory_name\"],\n reference_pipeline_run_id=None,\n is_recovery=None,\n start_activity_name=None,\n start_from_failure=None,\n parameters=None,\n )\n\n if pipeline_run_status in AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES:\n mock_get_pipeline_run.assert_called_once_with(\n run_id=mock_run_pipeline.return_value.run_id,\n factory_name=self.config[\"factory_name\"],\n resource_group_name=self.config[\"resource_group_name\"],\n )\n else:\n # When the pipeline run status is not in a terminal status or \"Succeeded\", the operator will\n # continue to call ``get_pipeline_run()`` until a ``timeout`` number of seconds has passed\n # (3 seconds for this test). Therefore, there should be 4 calls of this function: one\n # initially and 3 for each check done at a 1 second interval.\n assert mock_get_pipeline_run.call_count == 4\n\n mock_get_pipeline_run.assert_called_with(\n run_id=mock_run_pipeline.return_value.run_id,\n factory_name=self.config[\"factory_name\"],\n resource_group_name=self.config[\"resource_group_name\"],\n )\n\n @patch.object(AzureDataFactoryHook, \"run_pipeline\", return_value=MagicMock(**PIPELINE_RUN_RESPONSE))\n def test_execute_no_wait_for_termination(self, mock_run_pipeline):\n operator = AzureDataFactoryRunPipelineOperator(wait_for_termination=False, **self.config)\n\n assert operator.azure_data_factory_conn_id == self.config[\"azure_data_factory_conn_id\"]\n assert operator.pipeline_name == self.config[\"pipeline_name\"]\n assert operator.resource_group_name == self.config[\"resource_group_name\"]\n assert operator.factory_name == self.config[\"factory_name\"]\n assert operator.check_interval == self.config[\"check_interval\"]\n assert not operator.wait_for_termination\n\n with patch.object(AzureDataFactoryHook, \"get_pipeline_run\", autospec=True) as mock_get_pipeline_run:\n operator.execute(context=self.mock_context)\n\n # Check the ``run_id`` attr is assigned after executing the pipeline.\n assert operator.run_id == PIPELINE_RUN_RESPONSE[\"run_id\"]\n\n # Check to ensure an `XCom` is pushed regardless of pipeline run result.\n self.mock_ti.xcom_push.assert_called_once_with(\n key=\"run_id\", value=PIPELINE_RUN_RESPONSE[\"run_id\"]\n )\n\n mock_run_pipeline.assert_called_once_with(\n pipeline_name=self.config[\"pipeline_name\"],\n resource_group_name=self.config[\"resource_group_name\"],\n factory_name=self.config[\"factory_name\"],\n reference_pipeline_run_id=None,\n is_recovery=None,\n start_activity_name=None,\n start_from_failure=None,\n parameters=None,\n )\n\n # Checking the pipeline run status should _not_ be called when ``wait_for_termination`` is False.\n mock_get_pipeline_run.assert_not_called()\n\n @pytest.mark.parametrize(\n \"resource_group,factory\",\n [\n # Both resource_group_name and factory_name are passed to the operator.\n (\"op-resource-group\", \"op-factory-name\"),\n # Only factory_name is passed to the operator; resource_group_name should fallback to Connection.\n (None, \"op-factory-name\"),\n # Only resource_group_name is passed to the operator; factory_nmae should fallback to Connection.\n (\"op-resource-group\", None),\n # Both resource_group_name and factory_name should fallback to Connection.\n (None, None),\n ],\n )\n def test_run_pipeline_operator_link(self, resource_group, factory, create_task_instance_of_operator):\n ti = create_task_instance_of_operator(\n AzureDataFactoryRunPipelineOperator,\n dag_id=\"test_adf_run_pipeline_op_link\",\n execution_date=DEFAULT_DATE,\n task_id=TASK_ID,\n azure_data_factory_conn_id=AZURE_DATA_FACTORY_CONN_ID,\n pipeline_name=PIPELINE_NAME,\n resource_group_name=resource_group,\n factory_name=factory,\n )\n ti.xcom_push(key=\"run_id\", value=PIPELINE_RUN_RESPONSE[\"run_id\"])\n\n url = ti.task.get_extra_links(ti, \"Monitor Pipeline Run\")\n EXPECTED_PIPELINE_RUN_OP_EXTRA_LINK = (\n \"https://adf.azure.com/en-us/monitoring/pipelineruns/{run_id}\"\n \"?factory=/subscriptions/{subscription_id}/\"\n \"resourceGroups/{resource_group_name}/providers/Microsoft.DataFactory/\"\n \"factories/{factory_name}\"\n )\n\n conn = AzureDataFactoryHook.get_connection(\"azure_data_factory_test\")\n conn_resource_group_name = conn.extra_dejson[\"resource_group_name\"]\n conn_factory_name = conn.extra_dejson[\"factory_name\"]\n\n assert url == (\n EXPECTED_PIPELINE_RUN_OP_EXTRA_LINK.format(\n run_id=PIPELINE_RUN_RESPONSE[\"run_id\"],\n subscription_id=SUBSCRIPTION_ID,\n resource_group_name=resource_group if resource_group else conn_resource_group_name,\n factory_name=factory if factory else conn_factory_name,\n )\n )\n","sub_path":"tests/providers/microsoft/azure/operators/test_azure_data_factory.py","file_name":"test_azure_data_factory.py","file_ext":"py","file_size_in_byte":11769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"563868624","text":"\"\"\"\nstrserver - view.py\n\n2016/03/21\n\nHandles HTML processing.\n\"\"\"\n\n\nimport sys\nimport os\nimport logging\nimport re\nfrom pkg_resources import resource_string\n\n\ndef fetch_template(path=''):\n \"\"\"Return template specified, if not found just redirects to index.html.\"\"\"\n\n print(path)\n path = path.lstrip('/')\n\n if path == '':\n path='index.html'\n\n template_dir = 'templates/'\n template_filename = os.path.join( template_dir, path )\n logging.info('Returning file %s from template dir %s', template_filename, template_dir)\n\n try:\n template = resource_string(__name__, template_filename)\n except OSError:\n template_dir = 'templates/'\n path = 'index.html'\n template_filename = os.path.join( template_dir, path )\n template = resource_string(__name__, template_filename)\n\n return template\n\n","sub_path":"python/strserver/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"242002083","text":"from flask import Flask, request, render_template, redirect, url_for, make_response, json\nfrom forms import MusicVideoLibraryDraft\nfrom data import videos\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"nininini\"\n\n\n@app.route(\"/library/\", methods=[\"GET\", \"POST\"])\ndef videos_list():\n form = MusicVideoLibraryDraft()\n error = \"\"\n if request.method == \"POST\":\n if form.validate_on_submit():\n videos.create(form.data)\n videos.save_all()\n return redirect(url_for(\"videos_list\"))\n\n return render_template(\"music.html\", form=form, videos=videos.all(), error=error)\n\n@app.route(\"/library//\", methods=[\"GET\", \"POST\"])\ndef video_update(video_id):\n video = videos.get(video_id - 1)\n form = MusicVideoLibraryDraft(data=video)\n\n if request.method == \"POST\":\n if form.validate_on_submit():\n videos.update(video_id - 1, form.data)\n return redirect(url_for(\"videos_list\"))\n return render_template(\"music_upd.html\", form=form, video_id=video_id)\n\n\n@app.route(\"/library/delete/\", methods=[\"GET\", \"POST\"])\ndef video_delete():\n form = MusicVideoLibraryDraft()\n error = \"\"\n if request.method == \"POST\":\n if form.validate_on_submit():\n videos.delete(form.data)\n videos.save_all()\n return redirect(url_for(\"videos_list\"))\n return render_template(\"music_del.html\", form=form, videos=videos.all(), error=error)\n\n\n@app.errorhandler(400)\ndef bad_request(error):\n return make_response(({'error': 'Bad request', 'status_code': 400}), 400)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"video_library_1.0/music_video_library.py","file_name":"music_video_library.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"355729882","text":"'''\nThis Python script acquires and stores data from the WSU LAR indoor air quality sensors\nthat were built for the Ramboll project. This project installed both indoor and outdoor\nsensors in elementary schools in Spokane, Washington.\n\nContact: Von P. Walden, Washington State University\nDate: 16 July 2019\n'''\nimport os\n\nx=1\n\ndef local_wifi():\n local_wifi_arr = check_output(\"ifconfig\").decode('utf-8')\n connection_test = local_wifi_arr.rfind(\"inet 134.121.21.214\")\n if connection_test > 0:\n return True\n else:\n return False\n\ndef mail_alert(sensor): # input sensor type of bad data\n fromaddr = email\n toaddrs = email\n msg = 'Subject: {}\\n\\n{}'.format('BAD DATA FROM INDOOR AQ SENSOR', 'Erroneous data record from' + '_' + sensor + '_' + currentTime.strftime('%Y%m%d_%H%M%S') + '_' + 'Sensor' + '_' + sensorParameters['ID'])\n \n# Credentials (if needed)\n username = email\n password = pw\n\n# The actual mail send\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n\ndef mail_alert2(sensor): # input sensor type of bad data\n fromaddr = email\n toaddrs = email\n msg = 'Subject: {}\\n\\n{}'.format('Publishing Error', 'Publishing Error From' + '_' + sensor + '_' + currentTime.strftime('%Y%m%d_%H%M%S') + '_' + 'Sensor' + '_' + sensorParameters['ID'])\n\n# Credentials (if needed)\n username = email\n password = pw\n\n# The actual mail send\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n\ndef writeRPiMonitor():\n '''\n Print single values to files to be used in RPi monitor; \n data files are updated every cycle.\n\n Written by: Matthew Roetcisoender\n Created on: June 2019\n '''\n\n file2write=open('/home/pi/SpokaneSchools/software/PM_0_3_data','w')\n file2write.write(str(particles_03um))\n file2write.close\n\n file2write=open('/home/pi/SpokaneSchools/software/PM_0_5_data','w')\n file2write.write(str(particles_05um))\n file2write.close\n\n file2write=open('/home/pi/SpokaneSchools/software/PM_1_data','w')\n file2write.write(str(particles_10um))\n file2write.close\n\n file2write=open('/home/pi/SpokaneSchools/software/PM_2_5_data','w')\n file2write.write(str(particles_25um))\n file2write.close\n\n file2write=open('/home/pi/SpokaneSchools/software/PM_5_data','w')\n file2write.write(str(particles_50um))\n file2write.close\n\n file2write=open('/home/pi/SpokaneSchools/software/PM_10_data','w')\n file2write.write(str(particles_100um))\n file2write.close\n \n return\n\n# ............................. Connect to the Urbanova Cloud .............................\n'''\n Copyright Urbanova 2019 | Licensed under the Apache License, Version 2.0 (the \"License\")\n This file is distributed on an \"AS IS\" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n either express or implied. See the License for the specific language governing permissions\n and limitations under the License.\n'''\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\nimport argparse\nimport logging\nimport time\n\n### Custom MQTT message callback\ndef customCallback(client, userdata, message):\n print(\"Received a new message: \")\n print(message.payload)\n print(\"from topic: \")\n print(message.topic)\n print(\"--------------\\n\\n\")\n\n### Read in command-line parameters\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-r\", \"--rootCA\", action=\"store\", required=True, dest=\"rootCAPath\", help=\"Root CA file path\")\nparser.add_argument(\"-c\", \"--cert\", action=\"store\", required=True, dest=\"certificatePath\", help=\"Certificate file path\")\nparser.add_argument(\"-k\", \"--key\", action=\"store\", required=True, dest=\"privateKeyPath\", help=\"Private key file path\")\nparser.add_argument(\"-d\", \"--device\", action=\"store\", required=True, dest=\"deviceId\", help=\"Device Identifier\")\n\n### Urbanova Cloud IoT Custom Endpoint / MQTT Broker hosted at AWS\nucIoTCustomEndpoint = \"a1siobcc26zf4j-ats.iot.us-west-2.amazonaws.com\"\n\n### Parse Arguments\nargs = parser.parse_args()\nrootCAPath = args.rootCAPath # rootCA path\ncertificatePath = args.certificatePath # thing certifiate path\nprivateKeyPath = args.privateKeyPath # thing private key path\ndeviceId = args.deviceId # thing deviceId (autogenerated at time of Urbanova Cloud IoT Data Source creation)\n\n\n### Configure logging\nlogger = logging.getLogger(\"AWSIoTPythonSDK.core\")\nlogger.setLevel(logging.DEBUG)\nf_handler = logging.FileHandler(\"/home/pi/SpokaneSchools/Data/logging/logs.log\")\nf_handler.setLevel(logging.DEBUG)\nf_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nf_handler.setFormatter(f_format)\nlogger.addHandler(f_handler)\nstreamHandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nstreamHandler.setFormatter(formatter)\nlogger.addHandler(streamHandler)\n\n\n### Init Urbanova Cloud IoT MQTT Client using TLSv1.2 Mutual Authentication\nucIoTDeviceClient = None # initialize var\nucIoTDeviceClient = AWSIoTMQTTClient(deviceId) # The client class that connects to and accesses AWS IoT over MQTT v3.1/3.1.1.\nucIoTDeviceClient.configureEndpoint(ucIoTCustomEndpoint, 8883) # MQTT Broker host address and default port (TLS)\nucIoTDeviceClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) # certs and key\n\n\n### Configure Urbanova Cloud IoT Device Client Connection Settings (reference: https://s3.amazonaws.com/aws-iot-device-sdk-python-docs/sphinx/html/index.html)\nucIoTDeviceClient.configureAutoReconnectBackoffTime(1, 32, 20)\nucIoTDeviceClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\nucIoTDeviceClient.configureDrainingFrequency(2) # Draining: 2 Hz\nucIoTDeviceClient.configureConnectDisconnectTimeout(10) # 10 sec\nucIoTDeviceClient.configureMQTTOperationTimeout(5) # 5 sec\n\n\n### Connect to Urbanova Cloud IoT\nucIoTDeviceClient.connect()\ntime.sleep(2)\n# .........................................................................................\n\n# .......................... Connect to each Sensor of the Node ...........................\n\nimport board\nimport busio\nimport serial\nimport adafruit_bme280\nimport struct\n#import os\nimport json\nimport datetime\nimport time\nimport smtplib\nfrom subprocess import check_output\n\n#Create JSON file for sensorParameters (just update ID for each sensor)\n\n#name='WSU_LAR_Indoor_Air_Quality_Node'\n#ID='PT'\n#Type='WSU LAR Indoor Air Quality Node'\n#description='Indoor air quality sensor package (node) built at Washington State University Laboratory for Atmospheric Research. Sensors include PMS5003 (particulate matter) and BME280 (TPU).'\n#contact='Von P. Walden, Washington State University, v.walden@wsu.edu'\n#timeInterval=120\n\n#sensorParameters={'name':name,'ID':ID,'Type':Type,'description':description,'contact':contact,'timeInterval':timeInterval}\n\n#with open(\"sensorParameters.json\",\"w\") as f:\n# json.dump(sensorParameters, f, indent = 2,sort_keys=True)\n\n#Once JSON file is created, open the file to read in sensorParameters\nwith open('sensorParameters.json') as json_file:\n sensorParameters=json.load(json_file)\n\n# Create a unique filename for the current date.\ncurrentTime = datetime.datetime.now()\ncurrentDate = currentTime.date()\n#currentDate = datetime.datetime.now().date()\nfilename = 'Reset' + '5003_only' + sensorParameters['name'] + '_' + sensorParameters['ID'] + '_' +currentTime.strftime('%Y%m%d_%H%M%S') + '.json'\n\n### Initialize variables to store in JSON file.\nDateTime = []\nPM_0_3 = []\nPM_0_5 = []\nPM_1 = []\nPM_2_5 = []\nPM_5 = []\nPM_10 = []\nPM1_standard = []\nPM2_5_standard = []\nPM10_standard = []\nPM1_env = []\nPM2_5_env = []\nPM10_env = []\n\n#### Initialize Sensors\nuart = serial.Serial(\"/dev/ttyS0\", baudrate=9600, timeout=3000)\nbuffer = []\n\nwith open('/home/pi/SpokaneSchools/software/Name_1.txt','r') as file:\n email=file.read()\n\nwith open('/home/pi/SpokaneSchools/software/Name_2.txt','r') as file:\n pw=file.read()\n\n# .......................... Acquire and Store Sensor Data ...........................\nwhile True:\n # If new day, then close current JSON file and open a new file.\n if (datetime.datetime.now().date() != currentDate):\n currentTime = datetime.datetime.now()\n currentDate = currentTime.date()\n #currentDate = datetime.datetime.now().date()\n DateTime = []\n PM_0_3 = []\n PM_0_5 = []\n PM_1 = []\n PM_2_5 = []\n PM_5 = []\n PM_10 = []\n PM1_standard = []\n PM2_5_standard = []\n PM10_standard = []\n PM1_env = []\n PM2_5_env = []\n PM10_env = []\n filename = 'Reset' + '5003_only' + sensorParameters['name'] + '_' + sensorParameters['ID'] + '_' +currentTime.strftime('%Y%m%d_%H%M%S') + '.json'\n json_file = open('/home/pi/SpokaneSchools/Data/Good_Data/' + filename, 'w')\n\n try: # Attempts to acquire and decode the data from the PMS5003 particulate matter sensor\n data = uart.read(32) # read up to 32 bytes\n data = list(data)\n # print(\"read: \", data) # this is a bytearray type\n\n buffer += data\n while buffer and buffer[0] != 0x42:\n buffer.pop(0)\n\n if len(buffer) > 200:\n buffer = [] # avoid an overrun if all bad data\n if len(buffer) < 32:\n continue\n\n if buffer[1] != 0x4d:\n buffer.pop(0)\n continue\n\n frame_len = struct.unpack(\">H\", bytes(buffer[2:4]))[0]\n if frame_len != 28:\n buffer = []\n continue\n \n frame = struct.unpack(\">HHHHHHHHHHHHHH\", bytes(buffer[4:]))\n\n pm10_standard, pm25_standard, pm100_standard, pm10_env, \\\n pm25_env, pm100_env, particles_03um, particles_05um, particles_10um, \\\n particles_25um, particles_50um, particles_100um, skip, checksum = frame\n \n check = sum(buffer[0:30])\n \n if check != checksum:\n buffer = []\n continue\n\n # Stores the current time and data\n DateTime.append(datetime.datetime.now().isoformat())\n PM_0_3.append(particles_03um)\n PM_0_5.append(particles_05um)\n PM_1.append(particles_10um)\n PM_2_5.append(particles_25um)\n PM_5.append(particles_50um)\n PM_10.append(particles_100um)\n PM1_standard.append(pm10_standard)\n PM2_5_standard.append(pm25_standard)\n PM10_standard.append(pm100_standard)\n PM1_env.append(pm10_env)\n PM2_5_env.append(pm25_env)\n PM10_env.append(pm100_env)\n\n except:\n print('!! Erroneous data record from PMS5003 !!')\n print(' Skipping measurement and trying again...')\n #os.system('echo \"ALERT: Problem with WSU LAR Indoor AQ sensor (PMS5003 Data)\" | mail -s \"ALERT: Problem with WSU LAR Indoor AQ sensor on ' + datetime.datetime.utcnow().strftime('%Y%m%d %H') + ':00 UTC\" v.walden@wsu.edu')\n #os.system('echo \"ALERT: Problem with WSU LAR Indoor AQ sensor (PMS5003 Data)\" | mail -s \"ALERT: Problem with WSU LAR Indoor AQ sensor on ' + datetime.datetime.utcnow().strftime('%Y%m%d %H') + ':00 UTC\" matthew.s.roetcisoe@wsu.edu')\n mail_alert('PMS_5003')\n with open(\"/home/pi/SpokaneSchools/Data/PMS5003_errors/errors_PMS_5003.txt\", \"a\") as myfile:\n myfile.write('Erroneous data record from PMS5003' + currentTime.strftime('%Y%m%d_%H%M%S') + '_' + 'Sensor' + '_' + sensorParameters['ID'] + \"\\n\")\n myfile.close\n print(buffer)\n #PMS_5003_startup()\n uart = serial.Serial(\"/dev/ttyS0\", baudrate=9600, timeout=3000)\n buffer = []\n time.sleep(10)\n continue\n \n print('Current time: ', DateTime[-1])\n\n #print(\"Concentration Units (standard)\")\n #print(\"---------------------------------------\")\n #print(\"PM 1.0: %d\\tPM2.5: %d\\tPM10: %d\" % (pm10_standard, pm25_standard, pm100_standard))\n #print(\"Concentration Units (environmental)\")\n #print(\"---------------------------------------\")\n #print(\"PM 1.0: %d\\tPM2.5: %d\\tPM10: %d\" % (pm10_env, pm25_env, pm100_env))\n print(\"---------------------------------------\")\n print(\"Particles > 0.3um / 0.1L air:\", PM_0_3[-1])\n print(\"Particles > 0.5um / 0.1L air:\", PM_0_5[-1])\n print(\"Particles > 1.0um / 0.1L air:\", PM_1[-1])\n print(\"Particles > 2.5um / 0.1L air:\", PM_2_5[-1])\n print(\"Particles > 5.0um / 0.1L air:\", PM_5[-1])\n print(\"Particles > 10 um / 0.1L air:\", PM_10[-1])\n print(\"---------------------------------------\")\n\n #writeRPiMonitor()\n\n # Store all sensor data on RPI in JSON file\n sensor_data = {'name': sensorParameters['name'],\n 'ID': sensorParameters['ID'],\n 'Type': sensorParameters['Type'],\n 'description': sensorParameters['description'],\n 'contact': sensorParameters['contact'],\n 'timeInterval': sensorParameters['timeInterval'],\n 'Datetime': DateTime,\n 'PM_0_3': PM_0_3,\n 'PM_0_5': PM_0_5,\n 'PM_1': PM_1,\n 'PM_2_5': PM_2_5,\n 'PM_5': PM_5,\n 'PM_10': PM_10,\n 'PM1_standard': PM1_standard,\n 'PM2_5_standard': PM2_5_standard,\n 'PM10_standard': PM10_standard,\n 'PM1_env': PM1_env,\n 'PM2_5_env': PM2_5_env,\n 'PM10_env': PM10_env\n }\n json.dump(sensor_data, json_file, indent = 2,sort_keys=True)\n\n ### Send single json data packet to cloud\n Cloud_data = {'name': sensorParameters['name'],\n 'ID': sensorParameters['ID'],\n 'Type': sensorParameters['Type'],\n 'description': sensorParameters['description'],\n 'contact': sensorParameters['contact'],\n 'timeInterval': sensorParameters['timeInterval'],\n \"datetime\": DateTime[-1],\n \"PM_0_3\": PM_0_3[-1],\n \"PM_0_5\": PM_0_5[-1],\n \"PM_1\": PM_1[-1],\n \"PM_2_5\": PM_2_5[-1],\n \"PM_5\": PM_5[-1],\n \"PM_10\": PM_10[-1],\n 'PM1_standard': PM1_standard[-1],\n 'PM2_5_standard': PM2_5_standard[-1],\n 'PM10_standard': PM10_standard[-1],\n 'PM1_env': PM1_env[-1],\n 'PM2_5_env': PM2_5_env[-1],\n 'PM10_env': PM10_env[-1]\n }\n \n messageJson = json.dumps(Cloud_data) # convert to json\n\n #local_wifi()\n \n #while not local_wifi():\n # print('no wifi connection')\n # with open(\"/home/pi/SpokaneSchools/Data/wifi_errors/wifi_errors.txt\", \"a\") as myfile:\n # myfile.write('wifi_drop' + '_' + currentTime.strftime('%Y%m%d_%H%M%S') + '_' + 'Sensor' + '_' + sensorParameters['ID'] + \"\\n\")\n # myfile.close\n #time.sleep(5)\n try:\n ucIoTDeviceClient.publish(deviceId, messageJson, 1) \n print('Published to %s: %s\\n' % (deviceId, messageJson)) # print console\n except:\n logger.debug(\"Error in Publishing\", exc_info=True)\n print('!! Publishing Error !!')\n print(' Making Another Measurement Loop and trying again...')\n mail_alert2('Indoor_Unit')\n time.sleep(600)\n \n if x > 9:\n os.system(r'\"/home/pi/SpokaneSchools/Cloud/reset.exe\"')\n x = 0\n else:\n x = x +1\n print(x)\n \n # Reset data buffer for PMS5003\n buffer = buffer[32:]\n \n # Close JSON file\n json_file.close()\n \n # Waits for desired time interval\n time.sleep(sensorParameters['timeInterval'])\n\n\n\n\n\n\n","sub_path":"python/monitoring/PMS5003_only_reset_to_Cloud.py","file_name":"PMS5003_only_reset_to_Cloud.py","file_ext":"py","file_size_in_byte":16300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564396372","text":"\"\"\"Pain Train the Video Game, by Charlie Weiss and Diego Garcia\"\"\"\n\"\"\"TODO:\n- Obstacle placement\n- Graphic design\n- Interactive Start screen\n- Interactive End screen\n- Pain Train name\"\"\"\n\nimport pygame\nimport math\n\n# Colors\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nBLUE = (0,0,255)\nDIMGRAY = (105,105,105)\nSLATEGRAY = (112,128,144)\n\n\"\"\"Model classes\"\"\"\nclass Player(object):\n\tdef __init__(self,x=0,y=0,width=50,height=50,dx=1,dy=0,shiftdx=0,jumpdy=-.75):\n\t\t# places player below and to the right of the coordinate given\n\t\tself.x = x\n\t\tself.y = y-height\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.dx = dx\n\t\tself.dy = dy\n\t\tself.shiftdx = shiftdx\n\t\tself.jumpdy = jumpdy # variable dy is set to when controller jumps\n\n\tdef train_wreck(self, train):\n\t\treturn (train.x+train.width) > self.x\n\n\tdef shift_world(self):\n\t\treturn self.x > 350\n\n\tdef go_back(self):\n\t\treturn self.x < 130\n\n\tdef hit_platform(self,platform):\n\t\t#if the player rectangle dimensions are ABCD\n\t\t#and the platform rectangle is abcd\n\t\tA = self.x\n\t\tB = self.x+self.width\n\t\tC = self.y\n\t\tD = self.y+self.height\n\t\ta = platform.x\n\t\tb = platform.x+platform.width\n\t\tc = platform.y\n\t\td = platform.y+platform.height\n\t\treturn ((A>a and Aa and Bc and Cc and D 480\n\n\tdef on_platform(self,platform):\n\t\treturn self.x < (platform.x+platform.width) and (self.x+self.width) > platform.x and (self.y+self.height)==platform.y\n\nclass PainTrain(object):\n\tdef __init__(self,x=0,y=0,width=200,height=200,constdx=.05,dx=0,shiftdx=-1):\n\t\t# places train centered above coordinate given\n\t\tself.x = x\n\t\tself.y = y-height\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.constdx = constdx\n\t\tself.dx = dx\n\t\tself.shiftdx = shiftdx\n\n\tdef step(self):\n\t\tself.x += self.constdx\n\n# classes for level objects\nclass Ground(object):\n\tdef __init__(self, x = 0, y = 300, width = 2400, height = 180,dx=0,shiftdx=-1):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.dx = dx\n\t\tself.shiftdx = shiftdx\n\nclass Platform(object):\n\tdef __init__(self, x=0,y=0,width = 150, height = 20, dx=0, shiftdx=-1):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.dx = dx\n\t\tself.shiftdx = shiftdx\n\n\"\"\"View classes\"\"\"\nclass PlayerView(object):\n\tdef __init__(self, model,pic):\n\t\tself.model = model\n\t\tself.pic = pic\n\n\tdef draw(self, surface):\n\t\tmodel = self.model\n\t\tsurface.blit(self.pic,(model.x,model.y))\n\nclass PainTrainView(object):\n\tdef __init__(self, model,pic):\n\t\tself.model = model\n\t\tself.pic = pic\n\n\tdef draw(self, surface):\n\t\tmodel = self.model\n\t\tsurface.blit(self.pic,(model.x,model.y))\n\nclass GroundView(object):\n\tdef __init__(self, model,pic):\n\t\tself.model = model\n\t\tself.pic = pic\n\n\tdef draw(self, surface):\n\t\tmodel = self.model\n\t\tsurface.blit(self.pic,(model.x,model.y))\n\nclass ObstacleView(object):\n\t# can be used for any rectangular object\n\tdef __init__(self,model,pic):\n\t\tself.model = model\n\t\tself.pic = pic\n\n\tdef draw(self,surface):\n\t\tmodel = self.model\n\t\tsurface.blit(self.pic,(model.x,model.y))\n\n\"\"\"Controller classes\"\"\"\nclass Controller(object):\n\tdef __init__(self,models):\n\t\tself.models = models\n\t\tself.player = models[0] # make sure this aligns with all_models in main\n\n\tdef handle_event(self):\n\t\t# time passed isn't actually time based... based on while loop efficiency\n\t\tplayer = self.player\n\t\tmodels = self.models\n\t\tjump = False\n\t\tkeys = pygame.key.get_pressed() # checking pressed keys\n\t\tfor model in models:\n\t\t\tif keys[pygame.K_LEFT]:\n\t\t\t\tif player.go_back():\n\t\t\t\t\tmodel.x -= model.shiftdx\n\t\t\t\telse:\n\t\t\t\t\tmodel.x -= model.dx\n\t\t\tif keys[pygame.K_RIGHT]:\n\t\t\t\tif player.shift_world():\n\t\t\t\t\tmodel.x += model.shiftdx\n\t\t\t\telse:\n\t\t\t\t\tmodel.x += model.dx\n\t\t\tif model.y and player.on_platform(model):\n\t\t\t\tjump = True\n\n\t\tif keys[pygame.K_UP] and jump==True:\n\t\t\tplayer.dy = player.jumpdy\n\ndef main():\n\tpygame.init()\n\tscreen = pygame.display.set_mode((640,480))\n\n\t# Images\n\tgameover_pic = pygame.image.load('images/gameover1.bmp').convert()\n\ttrain_pic = pygame.image.load('images/train.bmp').convert()\n\tplayer_pic = pygame.image.load('images/player.bmp').convert()\n\tplayerjump_pic = pygame.image.load('images/player_jump.bmp').convert()\n\tground_pic = pygame.image.load('images/ground2.bmp').convert()\n\tplatform_pic = pygame.image.load('images/platform.bmp').convert()\n\n\t# models\n\t# level models:\n\t# design for level 6000 wide\n\tground1 = Ground(x=0,width=650)\n\tground2 = Ground(x=850,width=1100) #jump dist: 200\n\tplatform1 = Platform(x=1400,y=200)\n\tplatform2 = Platform(x=1700,y=100)\n\tground3 = Ground(x=2100,width=1100) #jump dist: 150\n\tground4 = Ground(x=3350,width=1100) #jump dist: 150\n\tground5 = Ground(x=4450,width=1100) #jump dist: 150\n\tground6 = Ground(x=5550,width=650)\n\tplatform3 = Platform(3000,200)\n\tplatform4 = Platform(3500,200)\n\tplatform5 = Platform(6300,300)\n\t# player/NPC models:\n\tplayer = Player(300,300,width=40)\n\ttrain = PainTrain(x=-300,y=300,width=400,height=300)\n\t#models = [train, player, ground, platform1]\n\tall_models = [player,train,ground1,ground2,platform1,platform2,ground3,ground4,ground5,ground6,platform3,platform4,platform5]\n\tcollision_models = [ground1,ground2,platform1,platform2,ground3,ground4,ground5,ground6,platform3,platform4,platform5]\n\n\t#resize images for views\n\tnew_train_pic = pygame.transform.scale(train_pic, (train.width,train.height))\n\tnew_player_pic = pygame.transform.scale(player_pic, (player.width,player.height))\n\tnew_playerjump_pic = pygame.transform.scale(playerjump_pic, (player.width,player.height))\n\ta_ground_pic = pygame.transform.scale(ground_pic, (ground1.width,ground1.height))\n\tb_ground_pic = pygame.transform.scale(ground_pic, (ground2.width,ground2.height))\n\tnew_platform_pic = pygame.transform.scale(platform_pic, (platform1.width,platform1.height))\n\tt_platform_pic = pygame.transform.scale(platform_pic, (platform2.width,platform2.height))\n\n\t# views\n\tviews = []\n\tviews.append(PlayerView(player,new_player_pic))\n\tviews.append(GroundView(ground1,a_ground_pic))\n\tviews.append(GroundView(ground2,b_ground_pic))\n\tviews.append(ObstacleView(platform1,new_platform_pic))\n\tviews.append(ObstacleView(platform2,t_platform_pic))\n\tviews.append(GroundView(ground3,b_ground_pic))\n\tviews.append(GroundView(ground4,b_ground_pic))\n\tviews.append(GroundView(ground5,b_ground_pic))\n\tviews.append(GroundView(ground6,a_ground_pic))\n\tviews.append(ObstacleView(platform3,new_platform_pic))\n\tviews.append(ObstacleView(platform4,new_platform_pic))\n\tviews.append(ObstacleView(platform5,new_platform_pic))\n\tviews.append(PainTrainView(train,new_train_pic))\n\n\t# controller\n\tcontroller = Controller(all_models)\n\trunning = True\n\tcounter = 0\n\n\t# variable to make speed lower\n\tdelta_speed = .00001 # good one is .00005\n\ttrain.constdx = .17\n\tquit_button = False\n\tplayer.jumpdy=-.65\n\n\twhile running == True:\n\t\tcounter += 1\n\t\tif counter%5 == 0: # adjust if it's running too slow. A little jank, sorry.\n\t\t\tcontroller.handle_event()\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tquit_button = True\n\t\t\t\trunning = False\n\n\t\tif player.train_wreck(train) or player.fall_to_death():\n\t\t\ttrain.constdx = 0\n\t\t\tplayer.dx = 0\n\t\t\trunning = False\n\n\t\t# keep train moving\n\t\ttrain.step()\n\n\t\t# code for player jumping\n\t\tplayer.y += player.dy\n\t\t# make player fall\n\t\tplayer.dy += 0.001 # if you lower this, also lower jumpdy in player class\n\t\tif player.dy > 0:\n\t\t\tviews[0]=PlayerView(player,new_player_pic)\n\t\telse:\n\t\t\tviews[0]=PlayerView(player,new_playerjump_pic)\n\t\t# make player's jump speed lower with time\n\t\tif player.jumpdy < -.05:\n\t\t\tplayer.jumpdy += delta_speed\n\n\t\t#handle collisions\n\t\tfor model in collision_models:\n\t\t\tif player.hit_platform(model):\n\t\t\t\tif player.dy>0:\n\t\t\t\t\tif player.y+player.height .01:\n\t\t\t\tmodel.dx -= delta_speed\n\t\t\telif model.dx < -.01:\n\t\t\t\tmodel.dx += delta_speed\n\t\t\tif model.shiftdx > .01:\n\t\t\t\tmodel.shiftdx -= delta_speed\n\t\t\telif model.shiftdx < -.01:\n\t\t\t\tmodel.shiftdx += delta_speed\n\n\t\tscreen.fill(WHITE)\n\t\tfor view in views:\n\t\t\tview.draw(screen)\n\n\t\tpygame.display.update()\n\n\trunning = True\n\twhile running == True and quit_button==False:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trunning = False\n\t\tscreen.blit(gameover_pic,(60,60))\n\t\tpygame.display.flip()\n\n\tpygame.quit()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"playfile.py","file_name":"playfile.py","file_ext":"py","file_size_in_byte":8778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327797989","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport cv2\nimport numpy as np\nfrom geometry_msgs.msg import Twist, Vector3\n\nclass tracker:\n def __init__(self, v, w):\n self.vel_rot = w\n self.vel_trans = v\n\n def get_center(self, frame):\n x = int(frame.shape[1]/2)\n y = int(frame.shape[0]/2)\n\n return (x,y)\n\n def get_diff(self, point, cm):\n x,y = point\n x_cm,y_cm = cm\n diff = x - x_cm\n return diff\n\n def crosshair(self, img, point, size, color):\n \"\"\"\n Desenha uma cruz no centro do frame\n \"\"\"\n x,y = point\n cv2.line(img,(x - size,y),(x + size,y),color,5)\n cv2.line(img,(x,y - size),(x, y + size),color,5)\n\n def get_velocity(self, point, cm):\n diff = self.get_diff(point, cm)\n if(diff > 0):\n if(diff < 10):\n return Twist(Vector3(self.vel_trans,0,0), Vector3(0,0,0))\n else:\n return Twist(Vector3(0,0,0), Vector3(0,0,self.vel_rot))\n else:\n if(diff > -10):\n return Twist(Vector3(self.vel_trans,0,0), Vector3(0,0,0))\n else:\n return Twist(Vector3(0,0,0), Vector3(0,0,-self.vel_rot))","sub_path":"projeto/projeto1/scripts/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87580148","text":"#******************************************************************************#\r\n# #\r\n# variable_num_params.py #\r\n# #\r\n# Shows how to use non-keyworded and keyworded variable length parameter #\r\n# lists. #\r\n# #\r\n#******************************************************************************#\r\n\r\n# To run this program, start python and then type:\r\n# from variable_num_params import *\r\n# add_numbers( 7, 5 )\r\n# print_member_age( Bo = 18 )\r\nsum = []\r\n#-------------------------------------------------------------------------------\r\n#\r\n# Any number of parameters. They are added up and the result printed. We can\r\n# use 'num' to find out what the parameters are. Example calls:\r\n# add_numbers( 1, 2, 3, 4, 5 )\r\n# add_numbers( 1, 2, 3, 4, 5, 6, 7, 8 )\r\n# This is the NON-KEYWORDED form.\r\n\r\ndef add_numbers( *num ):\r\n global sum\r\n sum = 0\r\n \r\n for i in num:\r\n\r\n sum = sum + 1\r\n\r\n print( sum )\r\nadd_numbers()\r\n#-------------------------------------------------------------------------------\r\n#\r\n# Any number of parameters. They are assumed to be a series of 'Name = Age'\r\n# pairs. To call this you need something like:\r\n# print_member_age( Bo = 18, Chang = 19 )\r\n# This is the KEYWORDED form.\r\n\r\ndef print_member_age( **age ):\r\n\r\n for i, j in age.items():\r\n\r\n print( 'Name = ', i, ', Age = ', j, sep='' )\r\n # Note use of sep=''.\r\nprint_member_age()\r\n#Use combine_list variabe to make a new functions:\r\ndef combine_list(a=[1,2], b=[3], c=[4,5]):\r\n \r\n print(a+b+c)\r\ncombine_list()\r\n ","sub_path":"Python/Freshman 1st Semester/Week15/Lab5/variable_num_params_changed.py","file_name":"variable_num_params_changed.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"266153781","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtCore import reset\nfrom WorkWidgets.WorkWidgetComponents import LabelComponent, LineEditComponent, ButtonComponent\nfrom SocketClient.ServiceController import ExecuteCommand\n\nimport time\n\nimport json\n\nclass DeleteStuWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.query_student_dict = dict()\n self.student_dict = dict()\n \n self.setObjectName(\"delete_stu_widget\")\n\n layout = QtWidgets.QGridLayout()\n\n header_label = LabelComponent(20, \"Delete Student\")\n result_label = LabelComponent(20, \"Result\")\n self.result_content = LabelComponent(10, \"請先查詢欲刪除之學生\", \"blue\")\n\n self.content_label_name = LabelComponent(16, \"Name: \")\n self.editor_label_name = LineEditComponent(\"\") #input name\n\n self.send_button = ButtonComponent(\"SEND DELETE\")\n self.button_Query = ButtonComponent(\"Query\")\n \n\n layout.addWidget(header_label, 0, 0, 1, 2) #(grid_x, grid_y, row佔幾格, column佔幾格)\n layout.addWidget(result_label, 0, 3, 1, 2)\n layout.addWidget(self.result_content, 1, 3, 2, 1)\n layout.addWidget(self.content_label_name, 1, 0, 1, 1)\n layout.addWidget(self.editor_label_name, 1, 1, 1, 2)\n layout.addWidget(self.button_Query, 1, 2, 1, 1)\n layout.addWidget(self.send_button, 4, 3, 1, 1)\n\n layout.setColumnStretch(0, 5) # column 0 width stretch 50%\n layout.setColumnStretch(1, 8) # column 1 width stretch 90%\n layout.setColumnStretch(2, 6)\n layout.setColumnStretch(3, 10)\n layout.setRowStretch(0, 1) # row 0 width stretch 比例佔1\n layout.setRowStretch(1, 2)\n layout.setRowStretch(2, 2)\n layout.setRowStretch(3, 5)\n\n self.setLayout(layout)\n\n self.execute()\n\n def execute(self):\n print(\"execute\")\n if(len(self.editor_label_name.text()) != 0):\n self.editor_label_name.mousePressEvent = self.clear_editor_label_name\n \n self.send_button.setEnabled(False)\n self.button_Query.clicked.connect(self.Query_action)\n self.send_button.clicked.connect(self.send_action)\n \n \n def clear_editor_label_name(self, event):\n self.editor_label_name.clear()\n\n def Query_action(self):\n if(len(self.editor_label_name.text()) != 0):\n self.query_student_dict[self.editor_label_name.text()] = {}\n \n self.query_toServer_action(self.query_student_dict)\n\n else:\n self.result_content.setText(\"未輸入學生名稱,請輸入後再試\")\n self.result_content.setStyleSheet(\"color:red;\")\n print(self.editor_label_name.text())\n \n def send_action(self):\n self.delete_toServer_action(self.student_dict)\n\n \n def reset(self):\n self.query_student_dict = {}\n self.student_dict = {}\n self.editor_label_name.clear()\n\n self.send_button.setEnabled(False)\n\n def query_toServer_action(self, query_student_dict):\n self.send_command = ExecuteCommand(command = \"query\", dict_data = self.query_student_dict)\n self.send_command.start()\n self.send_command.return_sig.connect(self.process_result_query_toServer)\n return_status = self.process_result_query_toServer\n\n def process_result_query_toServer(self, result):\n result = json.loads(result)\n self.result_content.setText(\"status: {}\".format(result['status']))\n if result['status'] == \"OK\":\n self.query_student_dict = {}\n self.student_dict = {}\n self.student_dict[self.editor_label_name.text()] = {}\n print(\"此名稱存在,您可以刪除此學生\")\n self.result_content.setText(\"已查詢{}成功,此名稱存在,您可以刪除此學生,若確定刪除請按下SEND鍵, return_status:{}\".format(self.editor_label_name.text(), result['status']))\n self.result_content.setStyleSheet(\"color:green;\")\n self.send_button.setEnabled(True)\n\n else:\n self.query_student_dict = {}\n self.student_dict = {}\n print(\"此名稱不存在,您不可以刪除此學生\")\n self.result_content.setText(\"已查詢{}成功,此名稱不存在,您不可以刪除此學生\".format(self.editor_label_name.text()))\n self.result_content.setStyleSheet(\"color:red;\")\n\n def delete_toServer_action(self, student_dict):\n self.send_command = ExecuteCommand(command = \"del\", dict_data = self.student_dict)\n self.send_command.start()\n self.send_command.return_sig.connect(self.process_result_delete_toServer)\n return_status = self.process_result_delete_toServer\n\n def process_result_delete_toServer(self, result):\n result = json.loads(result)\n self.result_content.setText(\"status: {}\".format(result['status']))\n if result['status'] == \"OK\":\n self.result_content.setText(\"已成功刪除資料:{}\\n\".format(self.student_dict))\n self.result_content.setStyleSheet(\"color:green;\")\n self.reset()\n print(\" Delete {} success\".format(self.student_dict))\n print(\"刪除成功\")\n else:\n self.result_content.setText(\"刪除資料失敗\\n\")\n self.result_content.setStyleSheet(\"color:red;\")\n self.reset()\n print(\" Delete {} fail\".format(self.student_dict))\n\n def load(self):\n self.reset()\n print(\"load DeleteStuWidget\")\n print(\"show widget\")\n\n\n","sub_path":"pythone_classPractice/week13_14(pyqt5_switch_widget_moreWidget)/Week13_Quiz_106360101謝尚泓/Client/WorkWidgets/DeleteStuWidget.py","file_name":"DeleteStuWidget.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"553896547","text":"from chromatica import logger\nfrom chromatica.util import load_external_module\n\nload_external_module(__file__, \"\")\nfrom clang import cindex\n\nimport os\nimport re\n\nlog = logger.logging.getLogger(\"chromatica.compile_args\")\n\nclass CompileArgsDatabase(object):\n\n def __init__(self, path, global_args=None):\n if path:\n self.__path = path\n else:\n self.__path = os.getcwd()\n self.compile_args = []\n self.cdb = None\n self.__clang_file = None\n self.__cdb_path = None\n\n if global_args != None:\n self.compile_args = global_args\n\n self.__find_clang_file()\n self.__find_cdb_file()\n\n self.__parse_compile_args()\n self.__try_init_cdb()\n\n def __find_clang_file(self):\n clang_file_path = self.__path\n while os.path.dirname(clang_file_path) != clang_file_path:\n self.__clang_file = os.path.join(clang_file_path, \".clang\")\n if os.path.exists(self.__clang_file):\n return\n clang_file_path = os.path.dirname(clang_file_path)\n\n self.__clang_file = None\n\n def __find_cdb_file(self):\n cdb_file_path = self.__path\n while os.path.dirname(cdb_file_path) != cdb_file_path:\n cdb_file = os.path.join(cdb_file_path, \"compile_commands.json\")\n if os.path.exists(cdb_file):\n self.__cdb_path = cdb_file_path\n return\n cdb_file_path = os.path.dirname(cdb_file_path)\n\n def __parse_compile_args(self):\n if self.__clang_file == None:\n return\n # read .clang file\n fp = open(self.__clang_file)\n flags = fp.read()\n fp.close()\n m = re.match(r\"^flags\\s*=\\s*\", flags)\n if m != None:\n self.compile_args += flags[m.end():].split()\n\n m = re.match(r\"^compilation_database\\s*=\\s*\", flags)\n if m != None:\n cdb_rel_path = flags[m.end():].strip(\"\\\"\")\n cdb_path = os.path.join(os.path.dirname(self.__clang_file), cdb_rel_path)\n if cdb_path and os.path.isdir(cdb_path):\n self.__cdb_path = cdb_path\n\n def __try_init_cdb(self):\n if self.__cdb_path != None:\n self.cdb = cindex.CompilationDatabase.fromDirectory(self.__cdb_path)\n\n def get_args_filename(self, filename):\n ret = None\n if self.cdb != None:\n args = self.cdb.getCompileCommands(filename)\n\n if ret:\n return ret.args\n else:\n return self.compile_args\n\n @property\n def clang_file(self):\n return self.__clang_file\n\n @property\n def cdb_file(self):\n if self.__cdb_path:\n return os.path.join(self.__cdb_path, \"compile_commands.json\")\n else:\n return \"\"\n\n","sub_path":"rplugin/python3/chromatica/compile_args_database.py","file_name":"compile_args_database.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101983334","text":"\"\"\"\nPYCOM LIB - TEMPERATURE\n\"\"\"\n\nimport time\nfrom machine import I2C\nimport math\n\n__version__ = '0.0.2'\n\nclass SI7006A20:\n \"\"\" class for handling the temperature sensor SI7006-A20\n +/- 1 deg C error for temperature\n +/- 5% error for relative humidity\n datasheet available at https://www.silabs.com/documents/public/data-sheets/Si7006-A20.pdf \"\"\"\n\n SI7006A20_I2C_ADDR = const(0x40)\n\n TEMP_NOHOLDMASTER = const(0xF3)\n HUMD_NOHOLDMASTER = const(0xF5)\n\n def __init__(self, pysense = None, sda = 'P22', scl = 'P21'):\n if pysense is not None:\n self.i2c = pysense.i2c\n else:\n self.i2c = I2C(0, mode=I2C.MASTER, pins=(sda, scl))\n\n def _getWord(self, high, low):\n return ((high & 0xFF) << 8) + (low & 0xFF)\n\n def temperature(self):\n \"\"\" obtaining the temperature(degrees Celsius) measured by sensor \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0xF3]))\n time.sleep(0.5)\n data = self.i2c.readfrom(SI7006A20_I2C_ADDR, 3)\n #print(\"CRC Raw temp data: \" + hex(data[0]*65536 + data[1]*256 + data[2]))\n data = self._getWord(data[0], data[1])\n temp = ((175.72 * data) / 65536.0) - 46.85\n return temp\n\n def humidity(self):\n \"\"\" obtaining the relative humidity(%) measured by sensor \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0xF5]))\n time.sleep(0.5)\n data = self.i2c.readfrom(SI7006A20_I2C_ADDR, 2)\n data = self._getWord(data[0], data[1])\n humidity = ((125.0 * data) / 65536.0) - 6.0\n return humidity\n\n def read_user_reg(self):\n \"\"\" reading the user configuration register \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0xE7]))\n time.sleep(0.5)\n data = self.i2c.readfrom(SI7006A20_I2C_ADDR, 1)\n return data[0]\n\n def read_heater_reg(self):\n \"\"\" reading the heater configuration register \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0x11]))\n time.sleep(0.5)\n data = self.i2c.readfrom(SI7006A20_I2C_ADDR, 1)\n return data[0]\n\n def read_electronic_id(self):\n \"\"\" reading electronic identifier \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0xFA]) + bytearray([0x0F]))\n time.sleep(0.5)\n sna = self.i2c.readfrom(SI7006A20_I2C_ADDR, 4)\n time.sleep(0.1)\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0xFC]) + bytearray([0xC9]))\n time.sleep(0.5)\n snb = self.i2c.readfrom(SI7006A20_I2C_ADDR, 4)\n return [sna[0], sna[1], sna[2], sna[3], snb[0], snb[1], snb[2], snb[3]]\n\n def read_firmware(self):\n \"\"\" reading firmware version \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([0x84])+ bytearray([0xB8]))\n time.sleep(0.5)\n fw = self.i2c.readfrom(SI7006A20_I2C_ADDR, 1)\n return fw[0]\n\n def read_reg(self, reg_addr):\n \"\"\" reading a register \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([reg_addr]))\n time.sleep(0.5)\n data = self.i2c.readfrom(SI7006A20_I2C_ADDR, 1)\n return data[0]\n\n def write_reg(self, reg_addr, value):\n \"\"\" writing a register \"\"\"\n self.i2c.writeto(SI7006A20_I2C_ADDR, bytearray([reg_addr])+bytearray([value]))\n time.sleep(0.1)\n\n def dew_point(self):\n \"\"\" computing the dew pointe temperature (deg C) for the current Temperature and Humidity measured pair\n at dew-point temperature the relative humidity is 100% \"\"\"\n temp = self.temperature()\n humid = self.humidity()\n h = (math.log(humid, 10) - 2) / 0.4343 + (17.62 * temp) / (243.12 + temp)\n dew_p = 243.12 * h / (17.62 - h)\n return dew_p\n\n def humid_ambient(self, t_ambient, dew_p = None):\n \"\"\" returns the relative humidity compensated for the current Ambient temperature\n for ex: T-Ambient is 24.4 degC, but sensor indicates Temperature = 31.65 degC and Humidity = 47.3%\n -> then the actual Relative Humidity is 72.2%\n this is computed because the dew-point should be the same \"\"\"\n if dew_p is None:\n dew_p = self.dew_point()\n h = 17.62 * dew_p / (243.12 + dew_p)\n h_ambient = math.pow(10, (h - (17.62 * t_ambient) / (243.12 + t_ambient)) * 0.4343 + 2)\n return h_ambient","sub_path":"pycom/lib/SI7006A20.py","file_name":"SI7006A20.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"17996876","text":"#!/usr/bin/env python\n\nimport re\nfrom collections import OrderedDict\nfrom pprint import pprint\n\nfrom first import first\n\nfrom ranking.management.modules.common import REQ, BaseModule, parsed_table\nfrom ranking.management.modules.excepts import InitModuleException\n\n\nclass Statistic(BaseModule):\n\n def __init__(self, **kwargs):\n super(Statistic, self).__init__(**kwargs)\n if '//stats.ioinformatics.org/olympiads/' not in self.url:\n raise InitModuleException(f'Url {self.url} should be contains stats.ioinformatics.org/olympiads')\n\n def get_standings(self, users=None, statistics=None):\n result = {}\n problems_info = OrderedDict()\n year = self.start_time.year\n\n if not self.standings_url:\n self.standings_url = self.url.replace('/olympiads/', '/results/')\n\n page = REQ.get(self.standings_url)\n regex = ']*>.*?'\n html_table = re.search(regex, page, re.DOTALL).group(0)\n table = parsed_table.ParsedTable(html_table, as_list=True)\n\n idx = 0\n for r in table:\n row = OrderedDict()\n problems = row.setdefault('problems', {})\n problem_idx = 0\n for k, v in r:\n if 'taskscore' in v.header.attrs.get('class', '').split():\n problem_idx += 1\n d = problems_info.setdefault(problem_idx, {})\n d['short'] = str(problem_idx)\n d['full_score'] = 100\n d['name'] = k\n try:\n score = float(v.value)\n p = problems.setdefault(str(problem_idx), {})\n p['result'] = v.value\n p['partial'] = score < 100\n except Exception:\n pass\n elif k == 'Abs.':\n row['solving'] = float(v.value)\n elif k == 'Rank':\n row['place'] = v.value.strip('*').strip('.')\n elif k == 'Contestant':\n if not v.value:\n idx += 1\n member = f'{year}-{idx:06d}'\n row['member'] = member\n else:\n url = first(v.column.node.xpath('a[@href]/@href'))\n member = url.strip('/').split('/')[-1]\n row['member'] = member\n row['name'] = v.value\n elif k == 'Country':\n country = re.sub(r'\\s*[0-9]+$', '', v.value)\n if country:\n row['country'] = country\n else:\n val = v.value.strip()\n if val:\n row[k] = val\n result[row['member']] = row\n\n standings = {\n 'result': result,\n 'url': self.standings_url,\n 'problems': list(problems_info.values()),\n }\n return standings\n\n\nif __name__ == \"__main__\":\n statictic = Statistic(url='http://stats.ioinformatics.org/olympiads/2008', standings_url=None)\n pprint(statictic.get_result('804'))\n","sub_path":"ranking/management/modules/stats_ioinformatics.py","file_name":"stats_ioinformatics.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"485891189","text":"import time, webbrowser, os, warnings, random, math, datetime\r\nfrom time import sleep\r\nfrom os import system, name\r\nfrom tqdm import tqdm\r\nimport logging\r\nimport sys\r\nimport string\r\n\r\nPerm = os.path.realpath(os.path.dirname(sys.argv[0]))\r\n'''\r\nWell it looks like your trying to edit me!\r\nThat's great! If you want to change the OS BOOT number, change the variable below and rename it to whatever you want, this will also appear in the logs!\r\n------------------------------------------------------------------\r\nIf you want to change some of the configuration, scroll down to find that variable and change the values. \r\n\r\nNOTE: MAKE SURE YOU KNOW WHAT YOU ARE CHANGING!\r\n- You might cause some unwanted changes if you don't know what you are doing. \r\n\r\n'''\r\n\r\n#Logging Setup:\r\nlogging.basicConfig(filename ='app.log', \r\n level = logging.ERROR)\r\nlogger=logging.getLogger() \r\nlogger.setLevel(logging.DEBUG) \r\n\r\n#OSBOOT Version \r\nOSInfo = \"4.3\"\r\n#Public Build\r\nBetaValue = True\r\nif BetaValue == True:\r\n OSInfoB = \"4.1.6\"\r\n #Beta build\r\n#The Value above tells if the following \r\n\r\n#Use this to skip login start!\r\nDEVMODE = True\r\n\r\n#Starter Values:\r\nfiletf = False\r\nRegister = False\r\nKeyAWOL = False\r\nSecurityBlocked = False\r\nletters = string.ascii_letters\r\nDeCode = ( ''.join(random.choice(letters) for i in range(10)) )\r\nsave = \"Null\"\r\nlinkpl = \"Null\"\r\nnewaccount1 = False\r\nWebConsoleSec = False\r\nWarnWebC = 0\r\n\r\n#Security Values\r\nFileShield = True\r\nBehaviorShield = True\r\nWebShield = True\r\nMailShield = True\r\naccountstat = \"admin\"\r\n\r\n#Use this to bypass warning...\r\nDevStat2 = \"Turtle2020\"\r\n\r\n#Update Notes:\r\nprint(\"Thank you for using PythonComputer or OSBOOTTURTLE!\")\r\nprint(\"Patch Notes/Updates:\")\r\nprint(\r\n \"- Security Module was updated! \\n\"\r\n \"- Added a new RegEdit app so you can modify applications! \\n\"\r\n \"- Fixed some nasty bugs. \\n\"\r\n)\r\nprint(\"Welcome to OSBOOTTURTLE \" + OSInfo + \"!\")\r\n\r\n#Required Function prior to setup!\r\ndef clear(): \r\n # for windows \r\n if name == 'nt': \r\n _ = system('cls') \r\n \r\n # for mac and linux(here, os.name is 'posix') \r\n else: \r\n _ = system('clear') \r\n\r\n#Slow tyoe\r\ndef cool_print(str):\r\n for char in str:\r\n sys.stdout.write(char)\r\n sys.stdout.flush()\r\n time.sleep(0.1)\r\n\r\n#Starting Screen\r\n\r\nprint(\" Starting Drivers... \\n\")\r\nrangeArg1 = 0\r\nrangeArg2 = 999999\r\nrandNum = 23\r\nfor i in tqdm(range(rangeArg1, rangeArg2)):\r\n randNum += 1\r\nsleep(2)\r\nprint(\" Checking Data... \\n\")\r\nrangeArg1 = 0\r\nrangeArg2 = 999999\r\nrandNum = 23\r\nfor i in tqdm(range(rangeArg1, rangeArg2)):\r\n randNum += 1\r\nsleep(2)\r\n\r\nprint(\" Booting OSPUBLICTURTLE \" + OSInfo + \"... \\n\")\r\nrangeArg1 = 0\r\nrangeArg2 = 999999\r\nrandNum = 23\r\nfor i in tqdm(range(rangeArg1, rangeArg2)):\r\n randNum += 1\r\nsleep(2)\r\nclear()\r\n\r\nif DEVMODE == True:\r\n cool_print(\"WARNING!: You have DEVMODE Turned on! \\n\")\r\n time.sleep(2)\r\n warningdev = input(\"Turn off DEVMODE? (y/n): \\n\")\r\n if warningdev == \"y\":\r\n print(\"Turning off DEVMODE...\")\r\n DEVMODE = False\r\n print(\"DEVMODE is now: \")\r\n print(DEVMODE)\r\n else:\r\n cool_print(\"In order to protect un-published data, please enter the global DEV Password! \\n\")\r\n DEVPass2 = input(\"Password: \")\r\n if DEVPass2 == DevStat2:\r\n cool_print(\"Proceeding with DEVMODE on... \\n\")\r\n else:\r\n cool_print(\"Failed to authorize, try again later. Proceeding with DEVMODE off. \")\r\n\r\n'''Function:\r\n#If you want to define a function, its best to do it here if its a startup requirement!\r\n'''\r\n\r\n# Using for Beta Drive Gen 2\r\n'''\r\n\r\nfile = open('location',\"r\")\r\nprint(\"ID\" , '\\t' ,\"Name\",'\\t' ,\"SM\", '\\t' ,\"MM\",'\\t' ,\"SoM\",'\\t',\"TOTAL\")\r\nprint(\"------------------------------------------\")\r\nfor line in file:\r\n x = line.strip().split(',')\r\n if len(x) == 5:\r\n print(x[0], '\\t', x[1], '\\t', x[2], '\\t', x[3], '\\t', x[4], '\\t', int(x[2]) + int(x[3]) + int(x[4]))\r\n #fileout = open('location',\"a\")\r\n #fileout.write(ft)\r\n\r\n# (x[1],'\\t',x[2],'\\t',x[3],'\\t' ,x[4], '\\t', int(x[2],10)+ int(x[3],10)+ int(x[4],10))\r\n'''\r\n\r\n\r\n\r\n#Loading Bar Function\r\ndef LoadingBar():\r\n print(\" Processing... \\n\")\r\n rangeArg1 = 0\r\n rangeArg2 = 999999\r\n randNum = 23\r\n for i in tqdm(range(rangeArg1, rangeArg2)):\r\n randNum += 1\r\n sleep(2)\r\n clear()\r\n\r\n#Universal Bar\r\ndef UBar():\r\n rangeArg1 = 0\r\n rangeArg2 = 999999\r\n randNum = 23\r\n for i in tqdm(range(rangeArg1, rangeArg2)):\r\n randNum += 1\r\n sleep(2)\r\n\r\n\r\n \r\n\r\n#Login Function\r\ndef loginstart():\r\n def colortext():\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n BLINK = '\\33[6m'\r\ncurrentDT = datetime.datetime.now()\r\nPCProcessLOGS = open(os.path.join(Perm, \"PCProcessLOGS.txt\"), \"a\")\r\nPCProcessLOGS.write(\"Setting up services... \\n\")\r\nLoadingBar()\r\nPCProcessLOGS.write(str(currentDT) + \"\\n\")\r\nPCProcessLOGS.write(\"------------------\\n\")\r\nPCProcessLOGS.close\r\ncurrentDT = datetime.datetime.now()\r\nif DEVMODE == False:\r\n print(str(currentDT))\r\n print(\"Reseting Services...\")\r\n time.sleep(2)\r\n print(\"Creating processes\")\r\n print(\"No account file found... [Error Code 5] \")\r\n logger.error(\"No account file, proceeding with account setup...\")\r\n time.sleep(2)\r\n print(\"New Account Required!\")\r\n time.sleep(3)\r\n ssuser = input(\"New Username: \")\r\n sspass = input(\"New Password: \")\r\n print(\"Setting you up as the admin account...\")\r\n print(\"Setup Success!\")\r\n accountstat = 'Admin'\r\n currentDT = datetime.datetime.now()\r\n # \"\\n\"\r\n logger.info(\"Used \" + ssuser + \" as the Username to log in \\n\")\r\n logger.info(\"Used \" + sspass + \" as the Password to log in \\n\")\r\n logger.info(\"Setup is working /-\")\r\n logger.info(\"Setting up files... \\n\")\r\n logger.info(\"No Domain Found, proceeding as root user \\n\")\r\n logger.info(\"Proceeding as OSTURTLESTRSPPER \" + OSInfo + \"\\n\")\r\n logger.info(\"Root User logging in...\\n\")\r\n logger.info(\"Closing Terminal! \\n\")\r\n logger.info(\"----------------\\n\")\r\n logger.info(\"Accounts report:\")\r\n logger.info(\"Accounts Currently Stored: \\n\")\r\n logger.info(\"ROOT USER: \" + ssuser + \"\\n\")\r\n logger.info(\"--------------------------\\n\")\r\n websitechoicetf = 0\r\n print(\"Booting...\")\r\n time.sleep(2)\r\n print(\"No Domain Found...\")\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n print(f\"{OKGREEN}Attempting to log in...{ENDC}\")\r\n usernamestart = input(\"Username: \")\r\n if usernamestart == ssuser:\r\n print(\"Acessing domain!\")\r\n else:\r\n exit(\"Invalid Domain/Username\")\r\n psword = input(\"Please enter your password \")\r\n if psword == (sspass):\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n print(f\"{WARNING}Logging in...{ENDC}\")\r\n\r\n time.sleep(2)\r\n logger.info(\"Logged in as ROOT USER \\n\")\r\n logger.info(\"---------------------\\n\")\r\n\r\n\r\n#Function Workspace End\r\n\r\n\r\n\r\nif DEVMODE == False:\r\n loginstart()\r\n\r\n\r\n\r\n\r\n#Not being used\r\n\r\ndef colortext():\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n BLINK = '\\33[6m'\r\n\r\n\r\n\r\nStartboot =input(\"Press Enter to get started...\")\r\nif Startboot == \"\":\r\n print(\r\n \"Welcome to the Home Screen. This computer is not built for real life useage..\"\r\n )\r\n print(\"This computer is ran by the command line!\")\r\n time.sleep(2)\r\n des = \"y\"\r\n while des == \"y\":\r\n print(\"Options:\")\r\n print(\"Settings, Browser, Calculator, Console, Config, Settings-2\")\r\n appchoice = input(\"What would you like to use today?\")\r\n clear()\r\n if appchoice == (\"Settings\"):\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n logger.info(str(currentDT) + \"\\n\")\r\n logger.info(\"------------------\\n\")\r\n PCProcessLOGS.close\r\n print(\"Loading...\")\r\n time.sleep(2)\r\n print(\"Connecting to document...\")\r\n Logs = open(os.path.join(Perm, \"Logs.txt\"), \"r\")\r\n print(Logs.readlines())\r\n Logs.close()\r\n\r\n if appchoice == (\"Browser\"):\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n logger.info(str(currentDT) + \"\\n\")\r\n logger.info(\"------------------\\n\")\r\n print(\"Loading...\")\r\n website = input(\r\n \"What would you like to search? Format: python.org \")\r\n print(\"Opening \" + \"www.\" + website)\r\n time.sleep(1)\r\n print(\"Requesting www.\" + website, \"with chrome\")\r\n time.sleep(1)\r\n webbrowser.open(\"www.\" + website, new=0, autoraise=True)\r\n Logsweb = open(os.path.join(Perm,\"Logs.txt\"), \"a\")\r\n Logsweb.write(\"Visited: www.\" + website + \"\\n\")\r\n Logsweb.close()\r\n if appchoice == (\"Calculator\"):\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n logger.info(str(currentDT) + \"\\n\")\r\n logger.info(\"------------------\\n\")\r\n\r\n def add(x, y):\r\n return x + y\r\n\r\n # This function subtracts two numbers\r\n def subtract(x, y):\r\n return x - y\r\n\r\n # This function multiplies two numbers\r\n def multiply(x, y):\r\n return x * y\r\n\r\n # This function divides two numbers\r\n def divide(x, y):\r\n return x / y\r\n\r\n Cacl =input(\"What would you like to open? (Calculator or Prime)?\")\r\n if Cacl == \"Prime\":\r\n num = int(input(\"Number to test: \"))\r\n if num > 1:\r\n for i in range(2, num):\r\n if (num % i) == 0:\r\n print(num, \"is not a prime number\")\r\n print(i, \"times\", num // i, \"is\", num)\r\n break\r\n else:\r\n print(num, \"is a prime number\")\r\n else:\r\n print(num, \"is not a prime number\")\r\n \r\n else:\r\n\r\n print(\"Select operation.\")\r\n print(\"1.Add\")\r\n print(\"2.Subtract\")\r\n print(\"3.Multiply\")\r\n print(\"4.Divide\")\r\n\r\n # Take input from the user\r\n choice = input(\"Enter choice(1/2/3/4):\")\r\n\r\n num1 = int(input(\"Enter first number: \"))\r\n num2 = int(input(\"Enter second number: \"))\r\n\r\n if choice == '1':\r\n print(num1, \"+\", num2, \"=\", add(num1, num2))\r\n\r\n elif choice == '2':\r\n print(num1, \"-\", num2, \"=\", subtract(num1, num2))\r\n\r\n elif choice == '3':\r\n print(num1, \"*\", num2, \"=\", multiply(num1, num2))\r\n\r\n elif choice == '4':\r\n print(num1, \"/\", num2, \"=\", divide(num1, num2))\r\n else:\r\n print(\"Invalid input\")\r\n\r\n \r\n\r\n if appchoice == \"Console\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"Launching a instance\")\r\n\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n print(f\"{FAIL}Instance requires credentials to start...{ENDC}\")\r\n print(\"This Instance requires different credentials! \")\r\n time.sleep(2)\r\n username1 = input(\"Username: \")\r\n if username1 == \"BlueCore\":\r\n password1 = input(\"Password: \")\r\n if password1 == \"RedApple\":\r\n instancename = input(\"Instance Name:\")\r\n time.sleep(2)\r\n warnings.warn(\"Attempting to start...\")\r\n time.sleep(2)\r\n print(\"Starting \" + instancename)\r\n os.system('start \"SSH Client\":')\r\n time.sleep(3)\r\n\r\n class bcolors:\r\n OKGREEN = '\\033[92m'\r\n print(\"Sucessfully Started \" + instancename)\r\n\r\n\r\n if appchoice == \"Config\":\r\n logger.info.write(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"Get your path/link ready!\")\r\n print(\"If you are saving a file...\")\r\n print(\"Please enter the FULL PATH!\")\r\n print(\"Otherwise it will open a new file with that name!\")\r\n websitechoiceconfig = input(\"What should I open? (File or Link?) \")\r\n if websitechoiceconfig == \"File\":\r\n print(\"Please wait...\")\r\n fileopen = input(\"Please input a files name or path! \")\r\n des2009 = input(\"What do you want me to do with it? \")\r\n if des2009 == \"Open\":\r\n print(\"Opening\" + fileopen)\r\n print(\"failed... [Error 5]\")\r\n if des2009 == \"Save\":\r\n print(\"Saving!\")\r\n save1 = input(\"What do you want this called? \")\r\n save1 == fileopen\r\n print(\"Saved as\" + save1 + \".\")\r\n filetf = True\r\n\r\n if websitechoiceconfig == \"Link\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n linkpl = input(\"Copy and Paste your link here.. \")\r\n des2008 = input(\"Should I open it? \")\r\n websitechoicetf = 1\r\n if des2008 == \"Open\":\r\n webbrowser.open(\"www.\" + linkpl, new=0, autoraise=True)\r\n else:\r\n print(\"Alright, I saved the link.\")\r\n print(\"If you want to remove it, come back to this!\")\r\n\r\n if appchoice == \"Domain\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"Loading...\")\r\n time.sleep(2)\r\n if Register == False:\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n print(f\"{FAIL}Launching Domain...{ENDC}\")\r\n f = open(\"Logs.txt\", \"w+\")\r\n domainname = input(\"Domain name: \")\r\n domainpassword = input(\"Domain Password: \")\r\n f.write(domainname)\r\n f.write(\"\\n\")\r\n f.write(domainpassword)\r\n with open('Logs.txt', 'r') as f:\r\n contents = [line.strip() for line in f.readlines()]\r\n username = contents[0]\r\n pw = contents[1]\r\n print(\"Complete..\")\r\n print(\"Domain Username:\" + username)\r\n print(\"Domain Password:\" + pw)\r\n f.close()\r\n Register = True\r\n else:\r\n print(\"You already have a Domain Setup...\")\r\n Domaininput = input(\"Do you want to edit/delete this?\")\r\n if Domaininput == \"yes\":\r\n f = open(os.path.join(Perm, \"Logs.txt\"), \"a\")\r\n with open(\"Logs.txt\", \"w\") as f:\r\n f.write(\"\")\r\n print(\"Deleting...\")\r\n time.sleep(2)\r\n print(\"Complete\")\r\n time.sleep(2)\r\n print(\r\n \"To Edit this, please come back and create a new Domain!\"\r\n )\r\n print(\r\n \"To Log in with this, please go to DomainConsole!\")\r\n Register = False\r\n if appchoice == \"Exit\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"...\")\r\n time.sleep(2)\r\n exit(\"Logging out...\")\r\n\r\n if appchoice == \"DomainConsole\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"waiting...\")\r\n if Register == True:\r\n print(f\"{bcolors.WARNING}Launching!{bcolors.ENDC}\")\r\n os.system('start \"Domain VENV\":')\r\n else:\r\n print(\r\n f\"{bcolors.FAIL}You don't have a domain! Please set one up!{bcolors.ENDC}\"\r\n )\r\n print(f\"{bcolors.FAIL}Failed to Launch!{bcolors.ENDC}\")\r\n if appchoice == \"Notes\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"Loading...\")\r\n notesyn = input(\"Are you writting a new file? (y/n) \")\r\n if notesyn == \"y\":\r\n print(\"Loading...\")\r\n time.sleep(2)\r\n notestxt = input(\r\n \"Name of the file:(add file ending at end! (.txt)) \")\r\n notesx = open(os.path.join(Perm, \"notestxt.txt\"), \"w\")\r\n notesx.close()\r\n notesx = open(os.path.join(Perm, \"notestxt.txt\"), \"a\")\r\n writenote = input(\"What would you like to write? \")\r\n notesx.write(writenote)\r\n notesx.close()\r\n\r\n def writenotes():\r\n notesx = open(os.path.join(Perm, \"notestxt.txt\"), \"a\")\r\n writenote = input(\"What would you like to write? \")\r\n notesx.write(writenote + \"\\n\")\r\n notesx.close()\r\n noteagain = input(\"Would you like to write again? \")\r\n if noteagain == \"yes\":\r\n writenotes()\r\n else:\r\n print(\"Reading you lines!\")\r\n notesx.close()\r\n notesx = open(os.path.join(Perm, \"notestxt.txt\"), \"r\")\r\n notesx.readlines(1)\r\n notesx.close\r\n \r\n if appchoice == \"Time\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"Current Year is: %d\" % currentDT.year + \"\\n\")\r\n print(\"Current Month is: %d\" % currentDT.month + \"\\n\")\r\n print(\"Current Day is: %d\" % currentDT.day + \"\\n\")\r\n print(\"Current Hour is: %d\" % currentDT.hour + \"\\n\")\r\n print(\"Current Minute is: %d\" % currentDT.minute + \"\\n\")\r\n print(\"Current Second is: %d\" % currentDT.second + \"\\n\")\r\n print(\"Current Microsecond is: %d\" % currentDT.microsecond + \"\\n\")\r\n\r\n if appchoice == \"Accounts\":\r\n if DEVMODE == True:\r\n print(\"You are not allowed to access this component with DEVMODE on\")\r\n exit(\"Access Denied: DEVMODE can not be used with this config!\")\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(f\"{bcolors.WARNING}Please wait, loading your accounts!{bcolors.ENDC}\")\r\n time.sleep(2)\r\n AccountManager =open(\"AccountProcess.txt\",\"r\")\r\n AccountManager.readlines() \r\n time.sleep(2)\r\n print(\"Say n if you would like to switch accounts!\")\r\n accountnew =input(\"Would you like to create a new account? (y/n)\")\r\n if accountnew == \"y\":\r\n print(\"Please wait...\")\r\n if accountstat == \"Admin\":\r\n time.sleep(2)\r\n if newaccount1 == True:\r\n print(\"Warning! You already have an account registered, filling this out will overwrite your other account!\")\r\n newwarn =input(\"Are you sure you want to proceed?: \")\r\n if newwarn == \"y\":\r\n print(\"Exitting...\")\r\n break\r\n print(\"Admin Account Registration Restricted, only 1 admin account per domain!\")\r\n newaccount = input(\"Username: \")\r\n newpassword =input(\"Password: \")\r\n logger.warning(\"Attempting to create an account... \\n\")\r\n logger.info(\"Created an account: \\n\")\r\n logger.info(\"Username: \" + newaccount + \"\\n\")\r\n logger.info(\"Password: \" + newpassword + \"\\n\")\r\n logger.info(\"Normal Account: \" + newaccount + \"\\n\")\r\n newaccount1 = True\r\n accountstat = \"Admin\"\r\n print(\"Created your account!\")\r\n print(\"Username for new account:\" + newaccount)\r\n print(\"To switch accounts, go back to Accounts and say n where it says to create a new account!\")\r\n print(\"Forget your password? Check out Pass Helper!\")\r\n else:\r\n print(\"Access Denied! Log into the admin account!\")\r\n else:\r\n print(\"Fetching login details...\")\r\n if newaccount1 == True:\r\n print(\"Restoring data...\")\r\n if accountstat == \"Admin\":\r\n print(\"Logging in as \" + newaccount)\r\n checkpass =input(\"Password: \")\r\n if checkpass == newpassword:\r\n print(\"Logged in!\")\r\n accountstat = \"Normal\"\r\n else:\r\n print(\"Logging in as \" + ssuser)\r\n checkapass =input(\"Password: \")\r\n if checkapass == sspass:\r\n print(\"Logged in!\")\r\n accountstat = \"Admin\"\r\n\r\n else:\r\n print(\"No account file found!\")\r\n print(\"Please go create another account and come back here to login!\")\r\n\r\n if appchoice == \"Pass Helper\":\r\n logger.info(\"Setting up \"+ appchoice + \"\\n\")\r\n print(\"Accessing Account Processes\")\r\n passhelper = input(\"What account do you need to access?\")\r\n if passhelper == newaccount:\r\n print(\"Accessing Account...\")\r\n adminpassword = input(\"Admin Password: \")\r\n if adminpassword == sspass:\r\n print(\"Granted!\")\r\n logger.info(\"Used admin password to enter Pass Helper \\n\")\r\n else:\r\n print(\"Wrong password!\")\r\n logger.info(\"Entered wrong password (Admin) \\n\")\r\n else:\r\n print(\"Accessing\")\r\n time.sleep(2)\r\n adminrec = input(\"Admin Username:\")\r\n if adminrec == ssuser:\r\n print(\"Your password: \" + sspass)\r\n logger.info(\"Requested Admin Password \\n\")\r\n\r\n\r\n \r\n if appchoice == \"WebConsole\":\r\n print(\"Attempting to connect to the server...\")\r\n LoadingBar()\r\n if BehaviorShield == True:\r\n if WarnWebC == 0:\r\n print(\"Security has blocked a threat from establishing a connection to your PC!\")\r\n print(\"THREAT: WebConsole 3.2\")\r\n AllowTh = input(\"Allow Access?\")\r\n if AllowTh == \"y\":\r\n print(\"Allowing program...\")\r\n else:\r\n des = \"y\"\r\n pythonstore =open(\"CodeEval.txt\",\"a\")\r\n print(\"If you would like to exit out of the program/loop please use \"'exit'\" in order to leave the VM\" )\r\n des1 = \"n\"\r\n while des1 == \"n\":\r\n mycode =input(\"Code to execute: \")\r\n pythonstore.write(mycode + \"\\n\")\r\n if mycode == \"exit\":\r\n pythonstore.close\r\n print(\"Exiting Console...\")\r\n des1 = \"yes\"\r\n clear()\r\n else:\r\n exec(mycode)\r\n\r\n\r\n if appchoice == \"chatter\":\r\n CH =open(\"ChatterData\",\"a\")\r\n CHname =input(\"Hello there! What should I call you?: \")\r\n print(\"Nice to meet you \" + CHname)\r\n CH.write(\"Well it was nice meeting you! Here is what I asked you! \\n\")\r\n CH.write(\"Your name!: \" + CHname + \"\\n\")\r\n time.sleep(2)\r\n CHaction =input(\"Well what would you like to do? \" + CHname)\r\n CH.write(\"What you wanted to do!:\" + CHaction + \"\\n\")\r\n time.sleep(2)\r\n CHfavplay =input(\"nice nice, well what do you like to play?\")\r\n CH.write(\"You like to play \" + CHfavplay + \"\\n\")\r\n CHcolor =input(\"What is your favorite color?\")\r\n CH.write(\"Your favorite color is\" + CHcolor + \"\\n\")\r\n\r\n if appchoice == \"Reload\":\r\n print(\"Are you sure you want to restart the machine?\")\r\n restartconf =input(\"(y/n)\")\r\n if restartconf == \"y\":\r\n print(\"Preparing to reload OS BOOT.\")\r\n system(\"clear\")\r\n print(\".\")\r\n time.sleep(2)\r\n clear()\r\n print(\"..\")\r\n time.sleep(2)\r\n clear()\r\n print('...')\r\n time.sleep(2)\r\n clear()\r\n print(\"Removing Storage Containers....\")\r\n time.sleep(2)\r\n filetf = False\r\n Register = False\r\n KeyAWOL = False\r\n SecurityBlocked = False\r\n letters = string.ascii_letters\r\n DeCode = ( ''.join(random.choice(letters) for i in range(10)) )\r\n newaccount1 = False\r\n WebConsoleSec = False\r\n WarnWebC = 0\r\n #Security Values\r\n FileShield = True\r\n BehaviorShield = True\r\n WebShield = True\r\n MailShield = True \r\n accountstat = \"admin\"\r\n print(\"Restarted values...\")\r\n time.sleep(2)\r\n print(\"Reverted to \" + OSInfo)\r\n\r\n\r\n if appchoice == \"TaskManager\":\r\n print(\"...\")\r\n time.sleep(3)\r\n print(\"Loading...\")\r\n time.sleep(2)\r\n print(\"Running Process: \\n\")\r\n #mark for auto generate keys\r\n #1 # printing letters\r\n letters = string.ascii_letters\r\n AcctProccessKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n #2\r\n # printing letters\r\n letters = string.ascii_letters\r\n MainpyKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n #3\r\n # printing letters\r\n letters = string.ascii_letters\r\n CodeEvalKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n #4\r\n # printing letters\r\n letters = string.ascii_letters\r\n LogsKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n #5\r\n # printing letters\r\n letters = string.ascii_letters\r\n PCProcessLOGSKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n print(\" Application Name | Disk Usage | Secret | Folder | Total lines used\")\r\n print(\"1) Main.py | 51% |\" + MainpyKEY + \" | N/A | 1343 \")\r\n print(\"2) AcctProcess | 4% |\" + AcctProccessKEY + \" |DataLogs| 737 \")\r\n print(\"3) CodeEval | 5% |\" + CodeEvalKEY + \" |DataLogs| 343 \")\r\n print(\"4) Logs | 5% |\" + LogsKEY + \" |DataLogs| 7862 \")\r\n print(\"5) MPU | 1% | ***** |DataLogs| *** \")\r\n print(\"6) PCProcessLOGS | 20% |\" + PCProcessLOGSKEY + \"|DataLogs| 2323 \")\r\n print(\"7) SetupLogs | 5% | ***** |DataLogs| 32786 \")\r\n time.sleep(2)\r\n #Update note\r\n print(\"Details:\")\r\n class bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n BLINK = '\\033[6m'\r\n print(f\"{OKGREEN}Any process's that have stars in their secret means you do not have enough permission to see a core process key{ENDC}\")\r\n time.sleep(2)\r\n decodeAsk =input(\"Would you like to grant permission to see this file?\")\r\n if decodeAsk == \"y\":\r\n KeyAWOL = True\r\n time.sleep(2)\r\n print(\"Generating keys...\")\r\n time.sleep(2)\r\n print(\"Your key: \\n\")\r\n KeyAWOL = True\r\n #print(KeyAWOL)\r\n print(DeCode)\r\n print(\"------------------------------\")\r\n print(\"Use this key in the `Decoder` module to get information regarding the terminal! \")\r\n\r\n \r\n if appchoice == \"Decoder\":\r\n print(\"Please wait...\")\r\n time.sleep(2)\r\n print(\"Fetching key information...\")\r\n YourKey =input(\"Paste your key here: \")\r\n if YourKey == DeCode:\r\n print(\"Checking key...\")\r\n letters = string.ascii_letters\r\n MPUKEY = PCProcessLOGSKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n #2\r\n letters = string.ascii_letters\r\n SetupLogsKEY = PCProcessLOGSKEY = ( ''.join(random.choice(letters) for i in range(10)) )\r\n time.sleep(2)\r\n print(\"Message from key: \")\r\n clear()\r\n print(\"MPU Secret: \" + MPUKEY)\r\n print(\"SetupLogs Secret: \" + SetupLogsKEY)\r\n print(\"Closing out of Decoder\")\n\n if appchoice == \"Reboot\":\n print(\"Please wait...\")\n time.sleep(2)\n RebootConfirm =input(\"Are you sure you want to reinstall TurtleOS?\")\n if RebootConfirm == \"y\":\n print(\"Preparing to reinstall... \\n\") \n LoadingBar()\n print(\"Connecting...\")\n time.sleep(2)\n print(\"Downloading Data... \\n\")\n LoadingBar()\n print(\"Extracting Data...\")\n LoadingBar()\n print(\"Mounting Drive...\")\n print(\"This will take a bit!\")\n time.sleep(10)\n print(\"Applying Changes....\")\n print(\"Checking Build...\")\n exit(\"Restarting Machine...\")\n\r\n if appchoice == \"RegEdit\":\n print(\"Checking build date...\")\n if DEVMODE == True:\n print(\"This application is in beta and you may experience bugs!\")\n time.sleep(2)\n RegFile = open(os.path.join(Perm, \"RegEdit.txt\"), \"r\")\n print(RegFile.read())\n time.sleep(2)\n \n\n else:\n print(\"Failed to open...\")\n print(\"You aren't in DEVMODE!\")\n\n\n\r\n if appchoice == \"Security\":\r\n print(\"Loading...\")\r\n LoadingBar()\r\n if SecurityBlocked == False:\r\n print(\"Loading Turtle Anti-Malware...\")\r\n time.sleep(2)\r\n logger.info(\"Opening Security...\")\r\n print(\"There is an update, please wait while we update your system...\")\r\n logger.debug(\"Updating System...\")\r\n time.sleep(2)\r\n print(\"Extracting TurtleAnti-Malware_6.2.7 \\n\")\r\n rangeArg1 = 0\r\n rangeArg2 = 999999\r\n randNum = 23\r\n for i in tqdm(range(rangeArg1, rangeArg2)):\r\n randNum += 1\r\n sleep(2)\r\n print(\"Updating... \\n\")\r\n UBar()\r\n time.sleep(5)\r\n print(\"Core Shields:\")\r\n print(\"1) File Shield - ON\")\r\n print(\"2) Behavior Shield - ON\")\r\n print(\"3) Web Shield - ON\")\r\n print(\"4) Mail Shield - ON\")\r\n print(\"--------------------\")\r\n print(\"Scans: \")\r\n print(\"1a) Quick Scan: \")\r\n print(\"2a) Full Disk Scan \")\r\n time.sleep(2)\r\n print(\"What would you like to modify/run?\")\r\n sec =input(\"Use the numbers to select an option... \")\r\n if sec == \"1\":\r\n print(\"Please wait...\")\r\n if FileShield == True:\r\n FSQ = input(\"Are you sure you want to turn off this module? (File Shield)\")\r\n if FSQ == \"y\":\r\n print(\"Attempting to turn off module...\")\r\n time.sleep(1)\r\n if DEVMODE == False:\r\n FSQP = input(\"Enter your password: \")\r\n if FSQP == sspass:\r\n print(\"Turned off module\")\r\n else:\r\n print(\"Wrong password\")\r\n else:\r\n print(\"Turned off Module.\")\r\n else:\r\n print(\"Okay, good thing you didn't :p\")\r\n\r\n \r\n if sec == \"2\":\r\n print(\"Please wait...\")\r\n if BehaviorShield == True:\r\n BSQ = input(\"Are you sure you want to turn off this module? (Behavior Shield)\")\r\n if BSQ == \"y\":\r\n print(\"Attempting to turn off module...\")\r\n time.sleep(1)\r\n if DEVMODE == False:\r\n BSQP = input(\"Enter your password: \")\r\n if BSQP == sspass:\r\n print(\"Turned off module\")\r\n else:\r\n print(\"Wrong password\")\r\n else:\r\n print(\"Turned off Module.\")\r\n else:\r\n print(\"Okay, good thing you didn't :p\")\r\n if sec == \"3\":\r\n print(\"Please wait...\")\r\n if WebShield == True:\r\n WSQ = input(\"Are you sure you want to turn off this module? (Web Shield)\")\r\n if WSQ == \"y\":\r\n print(\"Attempting to turn off module...\")\r\n time.sleep(1)\r\n if DEVMODE == False:\r\n WSQP = input(\"Enter your password: \")\r\n if WSQP == sspass:\r\n print(\"Turned off module\")\r\n else:\r\n print(\"Wrong password\")\r\n else:\r\n print(\"Turned off Module.\")\r\n else:\r\n print(\"Okay, good thing you didn't :p\")\r\n if sec == \"4\":\r\n print(\"Please wait...\")\r\n if MailShield == True:\r\n MSQ = input(\"Are you sure you want to turn off this module? (Mail Shield)\")\r\n if MSQ == \"y\":\r\n print(\"Attempting to turn off module...\")\r\n time.sleep(1)\r\n if DEVMODE == False:\r\n MSQP = input(\"Enter your password: \")\r\n if MSQP == sspass:\r\n print(\"Turned off module\")\r\n else:\r\n print(\"Wrong password\")\r\n else:\r\n print(\"Turned off Module.\")\r\n else:\r\n print(\"Okay, good thing you didn't :p\")\r\n if sec == \"1a\":\r\n print(\"Preparing to run a quick scan...\")\r\n time.sleep(2)\r\n QSM = random.randint(0,1)\r\n print(\"Scanning BOOTDISK \\n\")\r\n UBar()\r\n print(\"Checking AccountProcess.txt ...\")\r\n time.sleep(1)\r\n print(\"Checking ChatterData...\")\r\n print(\"Checking CodeEval.txt\")\r\n print(\"Checking PCProcessLOGS.txt \")\r\n time.sleep(1)\r\n print(\"Checking SetupLogs.txt\")\r\n print(\"Checking Encode Drive...\")\r\n time.sleep(5)\r\n if QSM == 1:\r\n print(\"Threats found!\")\r\n time.sleep(1)\r\n print(\"Collecting Data...\")\r\n time.sleep(1)\r\n print(\"Trojan Found at: \")\r\n print(\"C:/bootdisk/rootuser/datalogs/files/rtxa.exe\")\r\n cleanupQ =input(\"Would you like to fix this?\")\r\n if cleanupQ == \"y\":\r\n print(\"Fixing threat...\")\r\n time.sleep(2)\r\n print(\"Threat moved to virus chest\")\r\n time.sleep(3)\r\n else:\r\n print(\"Leaving threat...\")\r\n time.sleep(2)\r\n else:\r\n print(\"No threats found...\")\r\n\r\n elif sec == \"2a\":\r\n print(\"Preparing to run a full scan...\")\r\n print(\"Scanning BOOTDISK \\n\")\r\n UBar()\r\n print(\"Checking AccountProcess.txt ...\")\r\n time.sleep(1)\r\n print(\"Checking ChatterData...\")\r\n print(\"Checking CodeEval.txt\")\r\n print(\"Checking PCProcessLOGS.txt \")\r\n time.sleep(1)\r\n print(\"Checking SetupLogs.txt\")\r\n print(\"Checking Encode Drive...\")\r\n FSM = random.randint(0,1)\r\n if FSM == 1:\r\n print(\"Threats found!\")\r\n time.sleep(1)\r\n print(\"Collecting Data...\")\r\n time.sleep(1)\r\n print(\"Trojan Found at: \")\r\n print(\"C:/bootdisk/rootuser/datalogs/files/rtxa.exe\")\r\n cleanupF =input(\"Would you like to fix this?\")\r\n if cleanupF == \"y\":\r\n print(\"Fixing threat...\")\r\n time.sleep(2)\r\n print(\"Threat moved to virus chest\")\r\n time.sleep(3)\r\n else:\r\n print(\"Leaving threat...\")\r\n time.sleep(2)\r\n else:\r\n print(\"No threats found...\")\n\n \r\n \r\n\r\n","sub_path":"2019pc.py","file_name":"2019pc.py","file_ext":"py","file_size_in_byte":37836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"557436149","text":"import numpy as np\n\n# ====================================================#\n# Define physical constants\n\nclass Constants():\n def __init__(self):\n self.msun = 1.989e33\n self.rsun = 6.955e10\n self.G = 6.674e-8\n self.yr = 3.1536e7\n self.h = 6.6260755e-27\n self.kB = 1.380658e-16\n self.mp = 1.6726219e-24\n self.me = 9.10938356e-28\n self.c = 2.99792458e10\n self.pc = 3.085677581e18\n self.au = 1.496e13\n self.q = 4.8032068e-10\n self.eV = 1.6021772e-12\n self.sigmaSB = 5.67051e-5\n self.sigmaT = 6.6524e-25\n\n print(\"Constants defined...\")\n return None\n\nc = Constants()\n\nclass ICs():\n def __init__(self,m1,m2,m3):\n self.m1 = m1\n self.m2 = m2\n self.m3 = m3\n\n print(\"Initial masses defined...\")\n return None\n\n\n\n global Ro, Mo, G, AU, day, hrs\n Ro=6.96e10\n Mo=1.99e33\n G=6.6726e-08\n AU = 1.49597870e+13\n day = 60*60*24\n hrs = 60*60\n\n def tidal(self,Mbh,Mstar,Rstar):\n \"\"\" All cgs\"\"\"\n return Rstar*(Mbh/Mstar)**(1./3)\n\n def get_IC(self,m1,m2,a,e,m3,r,rperi,e3,i,pointto='CM',v_inf=0):\n\n print(' Mass [Solar Masses]')\n print(' m1 | m2 | m3 ')\n print('------------------------------------')\n print(' ',round(m1/Mo,3),' ',round(m2/Mo,3),' ',round(m3/Mo,3))\n\n # Get Binary orbit\n global x1,y1,vx1,vy1,x2,y2,vx2,vy2,P\n\n x1,y1,z1,vx1,vy1,vz1,x2,y2,z2,vx2,vy2,vz2,P = self.getBinary(m1,m2,a,e)\n\n print('\\nBinary properties (m1, m2)')\n print('----------------------')\n print('\\nOrbital separation:',round(a/AU,3), '[AU] (', round(a/Ro,3),\\\n '[Solar radii])')\n print('Eccentricity:', e)\n\n\n print('Orbital period:',round(P/day,3), '[days] (',round(P/hrs,3),\\\n '[hours])')\n print('v1 =', round(np.sqrt(vx1**2 + vy1**2)/1e5,3), ', v2 =',\\\n round(np.sqrt(vx2**2 + vy2**2)/1e5,4), '[km/s]')\n\n\n # 3rd body orbit\n Mbin = m1+m2 # binary's mass\n Mstar = m3 # star's mass\n\n if pointto == 'CM':\n v, k, f, rdot, fdot = self.Orbit_3rd(e3,r,rperi,Mbin,Mstar)\n if pointto == 'm1':\n v, k, f, rdot, fdot = self.Orbit_3rd(e3,r,rperi,m1,Mstar)\n if pointto == 'm2':\n v, k, f, rdot, fdot = self.Orbit_3rd(e3,r,rperi,m2,Mstar)\n\n print('\\n3rd body orbital properties (m3)')\n print('----------------------')\n print('\\nInitial distance to binary:', round(r/Ro, 3), '[Solar Radii]')\n print('Initial velocity magnitude |v|:', round(v/(1.0e5), 3), '[km/s]')\n print('Orbital angular momentum k:', round(k,3), '[cm^2/s]')\n print('Initial true anomaly f:', round(f, 3), '[radians]')\n print('Initial radial velocity (magnitude) dr/dt:', round(rdot/1e5,3),\n '[km/s]')\n print('Initial tangential velocity (magnitude) df/dt:',\n round(r*fdot/1e5, 3), '[km/s]')\n\n if pointto == 'CM':\n x3,y3,z3,vx3,vy3,vz3 = self.xyzCoord(r,f,-rdot,-fdot,i)\n if pointto == 'm1':\n x3,y3,z3,vx3,vy3,vz3 = self.xyzCoord(r,f,-rdot,-fdot,i)\n x3 = x3+x1\n y3 = y3+y1\n z3 = z3+z1\n\n if pointto == 'm2':\n x3,y3,z3,vx3,vy3,vz3 = self.xyzCoord(r,f,-rdot,-fdot,i)\n x3,y3,z3 = x3-x2,y3-y2,z3-z2\n\n if v_inf != 0:\n vx3,vy3,vz3 = vx3*(v_inf/v), vy3*(v_inf/v), vz3*(v_inf/v)\n print('Final v magnitude =', np.sqrt(vx3**2 + vy3**2 + vz3**2)/1e5, ' km/s')\n\n # Put all initial conditions to be evolved into an array\n initialvalues = np.array([x1,y1,z1,vx1,vy1,vz1,x2,y2,z2,vx2,vy2,vz2,x3,y3,z3,vx3,vy3,vz3])\n\n return initialvalues,P\n\n def threebody_derivs(self,rv):\n\n # Positions and velocities for m1\n x1 = rv[0]\n y1 = rv[1]\n z1 = rv[2]\n vx1 = rv[3]\n vy1 = rv[4]\n vz1 = rv[5]\n # Positions and velocities for m2\n x2 = rv[6]\n y2 = rv[7]\n z2 = rv[8]\n vx2 = rv[9]\n vy2 = rv[10]\n vz2 = rv[11]\n # Positions and velocities for m3\n x3 = rv[12]\n y3 = rv[13]\n z3 = rv[14]\n vx3 = rv[15]\n vy3 = rv[16]\n vz3 = rv[17]\n\n\n #get quantities from positions and velocities\n r1_2 = np.sqrt((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2) # distance 1 , 2\n r1_3 = np.sqrt((x1-x3)**2 + (y1-y3)**2 + (z1-z3)**2) # distance 1 , 3\n r2_3 = np.sqrt((x2-x3)**2 + (y2-y3)**2 + (z2-z3)**2) # distance 3 , 2\n\n\n # Derivatives for m1\n dxdt_1 = vx1\n dydt_1 = vy1\n dzdt_1 = vz1\n\n dvxdt_1 = - (G*self.m2 / r1_2**3) * (x1-x2) - (G*self.m3 / r1_3**3) * (x1-x3)\n dvydt_1 = - (G*self.m2 / r1_2**3) * (y1-y2) - (G*self.m3 / r1_3**3) * (y1-y3)\n dvzdt_1 = - (G*self.m2 / r1_2**3) * (z1-z2) - (G*self.m3 / r1_3**3) * (z1-z3)\n\n # Derivatives for m2\n dxdt_2 = vx2\n dydt_2 = vy2\n dzdt_2 = vz2\n\n dvxdt_2 = - (G*self.m1 / r1_2**3) * (x2-x1) - (G*self.m3 / r2_3**3) * (x2-x3)\n dvydt_2 = - (G*self.m1 / r1_2**3) * (y2-y1) - (G*self.m3 / r2_3**3) * (y2-y3)\n dvzdt_2 = - (G*self.m1 / r1_2**3) * (z2-z1) - (G*self.m3 / r2_3**3) * (z2-z3)\n\n # Derivatives for m3\n dxdt_3 = vx3\n dydt_3 = vy3\n dzdt_3 = vz3\n\n dvxdt_3 = - (G*self.m1 / r1_3**3) * (x3-x1) - (G*self.m2 / r2_3**3) * (x3-x2)\n dvydt_3 = - (G*self.m1 / r1_3**3) * (y3-y1) - (G*self.m2 / r2_3**3) * (y3-y2)\n dvzdt_3 = - (G*self.m1 / r1_3**3) * (z3-z1) - (G*self.m2 / r2_3**3) * (z3-z2)\n\n # pack the derivatives up, and ship them out\n derivarray = np.array([dxdt_1, dydt_1, dzdt_1, dvxdt_1, dvydt_1, dvzdt_1, dxdt_2, dydt_2, dzdt_2, dvxdt_2, dvydt_2, dvzdt_2, dxdt_3, dydt_3, dzdt_3, dvxdt_3, dvydt_3, dvzdt_3])\n\n return derivarray\n\n def threebody_derivs_odeint(self,rv,times):\n\n # Positions and velocities for m1\n x1 = rv[0]\n y1 = rv[1]\n z1 = rv[2]\n\n vx1 = rv[3]\n vy1 = rv[4]\n vz1 = rv[5]\n\n # Positions and velocities for m2\n x2 = rv[6]\n y2 = rv[7]\n z2 = rv[8]\n\n vx2 = rv[9]\n vy2 = rv[10]\n vz2 = rv[11]\n\n # Positions and velocities for m3\n x3 = rv[12]\n y3 = rv[13]\n z3 = rv[14]\n\n vx3 = rv[15]\n vy3 = rv[16]\n vz3 = rv[17]\n\n #get quantities from positions and velocities\n r1_2 = np.sqrt((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2) # distance 1 , 2\n r1_3 = np.sqrt((x1-x3)**2 + (y1-y3)**2 + (z1-z3)**2) # distance 1 , 3\n r2_3 = np.sqrt((x2-x3)**2 + (y2-y3)**2 + (z2-z3)**2) # distance 3 , 2\n\n\n # Derivatives for m1\n dxdt_1 = vx1\n dydt_1 = vy1\n dzdt_1 = vz1\n\n dvxdt_1 = - (G*self.m2 / r1_2**3) * (x1-x2) - (G*self.m3 / r1_3**3) * (x1-x3)\n dvydt_1 = - (G*self.m2 / r1_2**3) * (y1-y2) - (G*self.m3 / r1_3**3) * (y1-y3)\n dvzdt_1 = - (G*self.m2 / r1_2**3) * (z1-z2) - (G*self.m3 / r1_3**3) * (z1-z3)\n\n # Derivatives for m2\n dxdt_2 = vx2\n dydt_2 = vy2\n dzdt_2 = vz2\n\n dvxdt_2 = - (G*self.m1 / r1_2**3) * (x2-x1) - (G*self.m3 / r2_3**3) * (x2-x3)\n dvydt_2 = - (G*self.m1 / r1_2**3) * (y2-y1) - (G*self.m3 / r2_3**3) * (y2-y3)\n dvzdt_2 = - (G*self.m1 / r1_2**3) * (z2-z1) - (G*self.m3 / r2_3**3) * (z2-z3)\n\n # Derivatives for m3\n dxdt_3 = vx3\n dydt_3 = vy3\n dzdt_3 = vz3\n\n dvxdt_3 = - (G*self.m1 / r1_3**3) * (x3-x1) - (G*self.m2 / r2_3**3) * (x3-x2)\n dvydt_3 = - (G*self.m1 / r1_3**3) * (y3-y1) - (G*self.m2 / r2_3**3) * (y3-y2)\n dvzdt_3 = - (G*self.m1 / r1_3**3) * (z3-z1) - (G*self.m2 / r2_3**3) * (z3-z2)\n\n # pack the derivatives up, and ship them out\n derivarray = np.array([dxdt_1, dydt_1, dzdt_1, dvxdt_1, dvydt_1, dvzdt_1, dxdt_2, dydt_2, dzdt_2, dvxdt_2, dvydt_2, dvzdt_2, dxdt_3, dydt_3, dzdt_3, dvxdt_3, dvydt_3, dvzdt_3])\n\n return derivarray\n\n def getBinary(self,m1,m2,a,e):\n\n mt = m1 + m2\n mu = m1*m2/mt\n SemimayorAx = a/(1.-e)\n P = np.sqrt((2*np.pi)**2 * SemimayorAx**3 / (G*mt))\n Jspec = mu*np.sqrt(G*mt*SemimayorAx*(1-e**2))\n\n # Primary's distance to perihelium f primary\n a1 = (1 - e)*SemimayorAx/(1+m1/m2)\n\n # positions\n x1 = a1\n y1 = 0.\n z1 = 0.\n\n x2 = -(m1/m2)*x1\n y2 = -(m1/m2)*y1\n z2 = 0.\n\n #velocities\n vx1 = 0.0\n vy1 = Jspec/(a1*m1*(1+ m1/m2))\n vz1 = 0.\n\n vx2 = -(m1/m2)*vx1\n vy2 = -(m1/m2)*vy1\n vz2 = 0.\n\n return np.array([x1,y1,z1,vx1,vy1,vz1,x2,y2,z2,vx2,vy2,vz2,P])\n\n def Orbit_3rd(self,e,r,rperi,Mbin,Mstar):\n mu = G * (Mbin + Mstar)\n k = (mu * rperi * (1 + e))**(1./2.)\n print('k=', k, '[cm^2/s]')\n f = np.arccos((k**2 / (mu * r) - 1)/e)\n v = ((mu**2 * (e**2 - 1))/ k**2 + 2*mu/r)**(1./2.)\n print('v = ', v/(1.0e5), '[km/s]')\n\n fdot = k / r**2\n rdot = np.sqrt(v**2 - (fdot*r)**2)\n\n return v, k, f, rdot, fdot\n\n def xyzCoord(self,r,f,rdot,fdot,i):\n '''This returns the positions and velocities for an orbit in a plane with an rotated an angle \"i\" along the Y axis ()\"i\" is measured from the X axis) '''\n\n x = r*np.cos(f)*np.cos(i)\n y = r*np.sin(f)\n z = r*np.cos(f)*np.sin(i)\n\n vx = rdot*np.cos(f)*np.cos(i) - fdot*r*np.sin(f)*np.cos(i)\n vy = rdot*np.sin(f) + fdot*r*np.cos(f)\n vz = rdot*np.cos(f)*np.sin(i) - fdot*r*np.sin(f)*np.sin(i)\n\n return np.array([x,y,z,vx,vy,vz])\n\n def RK4(self,times,pos_vel_array,derivs):\n\n dt = times[1]- times[0]\n\n # Create array to storeresults\n result = np.zeros( (len(times),len(pos_vel_array)) )\n\n for i, t in enumerate(times):\n\n result[i] = pos_vel_array\n\n # then advance the solution using Runge Kutta method\n k1 = derivs(pos_vel_array)\n\n k2 = dt*derivs(pos_vel_array + k1/2.0)\n\n k3 = dt*derivs(pos_vel_array + k2/2.0)\n\n k4 = dt*derivs(pos_vel_array + k3)\n\n\n\n pos_vel_array += k1/6. + k2/3. + k3/3. + k4/6.\n\n return result\n","sub_path":"Modules/gadget/IC/BBHorbit.py","file_name":"BBHorbit.py","file_ext":"py","file_size_in_byte":10337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156083766","text":"import math\nimport numpy as np\n\n\n__author__ = \"Valentin Rakovic\"\n__copyright__ = \"Copyright (c) 2017, Faculty of Electrical Engineering and Information Technologies, UKIM, Skopje, Macedonia\"\n__version__ = \"0.1.0\"\n__email__ = \"{valentin}@feit.ukim.edu.mk\"\n\n\n'''\nLocalization Module \nUsed for processing of the localization process in the REM backend\n'''\n\ndef ML_grid(xs, ys, zs, rss, ulx=0, uly=15, drx=32, dry=0, nx=50, ny=50, nz=50):\n\t'''\n\tLocalization process based on ML algorithm\n\tArgs:\n\t\txs,ys, zs: vectors of coordinates for the x, y, z axis\n\t\trss: vector of measured values on coordinates xs, ys, zs\n\t\tulx, uly, drx, dry: upper left and lower right corner coordinates of the area of interest for the loc process\n\t\tnx,ny, nz: resolution for x,y,z axis\n\t\tin_type: interpolation algorithm \n\tReturns:\n\t\tresults (tuple consisted of estimated x,y,z coordinates and respective estimated tx power)\n\t'''\n\n\t\n\n\tX = np.array(xs)\n\t#print(X)\n\tY = np.array(ys)\n\t#print(Y)\n\tZ = np.array(zs)\n\t#print(Z)\n\tP = np.array(rss)\n\t#print(P)\n\n\tnoMeasP = len(xs)\n\txmin = ulx\n\tymin = dry\n\txmax = drx\n\tymax = uly\n\tzmin = np.amin(zs)\n\tzmax = np.amax(zs)\n\t#print(zmin)\n\txres = abs((drx-ulx)/nx)\n\tyres = abs((dry-uly)/ny)\n\tzres = abs((zmax-zmin)/nz)\n\n\txE = -1\n\tyE = -1\n\tzE = -1\n\tpE = -1\n\n\txsize = nx\n\tysize = ny\n\tzsize = nz\n\n\thp = np.zeros(shape=((xsize+1)*(ysize+1)*(zsize+1),3))\n\tpoints = np.asmatrix(hp)\n\t#print(points)\n\t\n\tii = 0\n\tfor i in range(0, xsize+1):\n\t\tfor j in range(0, ysize+1):\n\t\t\tfor k in range(0, zsize+1):\n\t\t\t\tpoints[ii,0] = i*xres+xmin\n\t\t\t\tpoints[ii,1] = j*yres+ymin\n\t\t\t\tpoints[ii,2] = k*zres+zmin\n\t\t\t\t\n\t\t\t\tfor ik in range(0,noMeasP):\n\t\t\t\t\tif((X[ik]==points[ii,0]) and (Y[ik]==points[ii,1]) and (Z[ik]==points[ii,2])):\n\t\t\t\t\t\tX[ik] += 0.00001\n\t\t\t\t\t\tY[ik] += 0.00001\n\t\t\t\t\t\tZ[ik] += 0.00001\n\t\t\t\tii+=1\n\n\t\n\tL = -math.inf\n\tD = np.zeros(noMeasP)\n\tD2 = np.zeros(noMeasP)\n\tDP2 = np.zeros(noMeasP)\n\tPp = np.zeros(noMeasP)\n\tDnp = np.zeros(noMeasP)\n\tPm = np.zeros(noMeasP)\n\t\n\tfor i in range(0, (xsize+1)*(ysize+1)*(zsize+1)):\n\t\tLnp = 0\n\t\tsumD = 0\n\t\tsumD2 = 0\n\t\tsumP = 0\n\t\tsumDP2 = 0\n\t\tfor j in range(0,noMeasP):\n\t\t\tD[j] = 10*math.log10(math.sqrt(math.pow(X[j]-points[i,0],2)+math.pow(Y[j]-points[i,1],2)+math.pow(Z[j]-points[i,2],2)))\n\t\t\tD2[j] = math.pow(D[j],2)\n\t\t\tDP2[j] = D[j]*P[j]\n\t\t\tsumD +=D[j]\n\t\t\tsumD2 +=D2[j]\n\t\t\tsumP +=P[j]\n\t\t\tsumDP2 += DP2[j]\n\n\t\tDa = sumD/noMeasP\n\t\tDa2 = math.pow(Da,2)\n\t\tD2a = sumD2/noMeasP\n\t\tPa = sumP/noMeasP\n\t\tDPa = sumDP2/noMeasP\n\t\tP0 = (D2a*Pa-Da*DPa)/(D2a-Da2)\n\t\tnpp = (Da*Pa-DPa)/(D2a-Da2)\n\n\t\tfor j in range(0,noMeasP):\n\t\t\tPm[j] = P[j] - P0 + npp * D[j]\n\t\t\tLnp += math.pow(Pm[j],2)\n\n\t\tLn = -Lnp/2\n\t\tif (Ln > L):\n\t\t\tL = Ln\n\t\t\txE = points[i,0]\n\t\t\tyE = points[i,1]\n\t\t\tzE = points[i,2]\n\t\t\tpE = 10*math.log10(abs(P0))\n\t\t\n\t\n\tresults = []\n\tresults.append(xE)\n\tresults.append(yE)\n\tresults.append(zE)\n\tresults.append(pE)\n\t\n\treturn results\n\n","sub_path":"rem_backend/localization.py","file_name":"localization.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566184283","text":"import os\nimport hashlib\nfrom gameapp_spider.config import dbutils\nfrom scrapy.dupefilter import RFPDupeFilter\nfrom scrapy import log\n\n\nclass CustomFilter(RFPDupeFilter):\n def __init__(self, path=None, debug=False):\n self.file = None\n self.fingerprints = set()\n self.logdupes = True\n self.debug = debug\n if path:\n self.con = dbutils.get_con()\n self.cursor = self.con.cursor()\n\n sql = \"\"\"\n select distinct url from games_article;\n \"\"\"\n self.cursor.execute(sql)\n for i in self.cursor.fetchall():\n url = i[0]\n url_hash = self.__getid(url)\n self.fingerprints.add(url_hash)\n\n def __getid(self, url):\n url_hash = hashlib.sha1(url).hexdigest()\n return url_hash\n\n def request_seen(self, request):\n fp = self.__getid(request.url)\n if fp in self.fingerprints:\n log.msg(\"%s is depulicate, filtered.\"%request.url, level=log.DEBUG)\n return True\n self.fingerprints.add(fp)\n","sub_path":"gameapp_spider/gameapp_spider/duplicate_filter.py","file_name":"duplicate_filter.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"182241713","text":"import unittest\n\nfrom .frog import compute_hops_by_simluation, computer_hops_mathematically\n\n\n# These test are kind of silly, because they could pretty easily fail even when things\n# are working, but they're hopefully loose enough to not have too many flaky failures.\nclass FrogTest(unittest.TestCase):\n def test_short_distance_lots_of_frogs(self) -> None:\n sim_hops = compute_hops_by_simluation(distance=3, frogs=10**6)\n maths_hops = computer_hops_mathematically(distance=3)\n self.assertAlmostEqual(sim_hops, maths_hops, places=2)\n\n def test_longer_distance_fewer_frogs(self) -> None:\n sim_hops = compute_hops_by_simluation(distance=10, frogs=10**5)\n maths_hops = computer_hops_mathematically(distance=10)\n self.assertAlmostEqual(sim_hops, maths_hops, places=1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"languages/py/src/maths/frog_test.py","file_name":"frog_test.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77398917","text":"#############################################################################\n# $Id: SConscript,v 1.3 2006/01/31 20:35:55 dpalma Exp $\n\nImport('env')\n\nsourceFiles = Split(\"\"\"\n tinystr.cpp\n tinyxml.cpp\n tinyxmlerror.cpp\n tinyxmlparser.cpp\n\"\"\")\n\nlocal = env.Copy()\nlocal.Append(CPPDEFINES = ['TIXML_USE_STL'])\nlocal.Library(target='tinyxml', source=sourceFiles)\n","sub_path":"3rdparty/tinyxml/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361458423","text":"from django.shortcuts import render, redirect\nfrom django.core.paginator import Paginator, Page\nfrom django import forms\n\nfrom kek.gallery.models import Photo, Tag\nfrom kek.gallery.management.commands.fill import fill as filldb, fill_likes, fill_tags\n\nPhotoFilterQueryset = Tag.objects.all()\n\n\nclass UrlPage(Page):\n query_param = 'page'\n\n def next_page_number_url(self):\n return '%s=%s' % (self.query_param, self.next_page_number())\n\n def previous_page_number_url(self):\n return '%s=%s' % (self.query_param, self.previous_page_number())\n\n\nclass UrlPaginator(Paginator):\n def _get_page(self, *args, **kwargs):\n return UrlPage(*args, **kwargs)\n\n\nclass PhotoFilter(forms.Form):\n selected = forms.ModelMultipleChoiceField(required=False, queryset=PhotoFilterQueryset)\n excluded = forms.ModelMultipleChoiceField(required=False, queryset=PhotoFilterQueryset)\n\n DATE, LIKES = 'date', 'likes'\n\n ordering = forms.ChoiceField(choices=((DATE, 'created_at'), (LIKES, 'likes')), initial=DATE, required=False)\n\n\nclass PageForm(forms.Form):\n page = forms.IntegerField(min_value=1)\n\n\nPhotoQueryset = Photo.objects.all().select_related('user').prefetch_related('tags').order_by('-created_at')\nORDERING = {PhotoFilter.DATE: '-created_at', PhotoFilter.LIKES: '-likes_count'}\n\n\ndef index(request):\n form = PageForm(request.GET or None)\n filter_form = PhotoFilter(request.GET or None)\n queryset = PhotoQueryset\n if filter_form.is_valid():\n if 'selected' in filter_form.cleaned_data:\n queryset = PhotoQueryset.filter(tags__in=filter_form.cleaned_data['selected'])\n if 'excluded' in filter_form.cleaned_data:\n queryset = PhotoQueryset.exclude(tags__in=filter_form.cleaned_data['excluded'][:3])\n if filter_form.cleaned_data.get('ordering'):\n queryset = queryset.order_by(ORDERING[filter_form.cleaned_data['ordering']])\n if form.is_valid():\n page = form.cleaned_data['page']\n else:\n page = 1\n data = UrlPaginator(queryset, 20).page(page)\n return render(request, 'index.html', {'data': data, 'filter': filter_form})\n\n\nclass FillForm(forms.Form):\n LIKES, TAGS = 'likes', 'tags'\n source = forms.FileField(required=False)\n step = forms.ChoiceField(choices=(('likes', 'likes'), ('tags', 'tags')), required=False)\n\n\ndef fill(request):\n form = FillForm(request.POST or None, request.FILES)\n if request.method == 'POST' and form.is_valid():\n step = form.cleaned_data.get('step')\n if form.cleaned_data.get('source'):\n filldb(form.cleaned_data['source'])\n elif step == FillForm.LIKES:\n fill_likes()\n elif step == FillForm.TAGS:\n fill_tags()\n return redirect('/')\n return render(request, 'fill.html', {'form': form})\n","sub_path":"kek/gallery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432662047","text":"import csv\nimport cv2\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, Cropping2D\nfrom scipy import ndimage, misc\nimport matplotlib.pyplot as plt\n\nFLG_USE_LR = False\nbatch_size = 128\ninput_shape = (160, 320, 3)\ncount = 0\n\nimages = []\nmeasures = []\ndata_folder = \"/opt/carnd_p3/data/\"\nimage_folder = data_folder + \"IMG/\"\nprint(\"training folder:\", data_folder)\nwith open(data_folder + \"driving_log.csv\") as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n # skip header line\n if count is 0:\n count = count + 1\n continue\n source_path = line[0]\n file_name = source_path.split(\"/\")[-1]\n current_img_path = image_folder + file_name\n image = ndimage.imread(current_img_path)\n misc.imsave('./original.png', image)\n m = float(line[3])\n if FLG_USE_LR:\n left_img_path = image_folder + line[1].split(\"/\")[-1]\n right_img_path = image_folder + line[2].split(\"/\")[-1]\n left_img = ndimage.imread(left_img_path)\n right_img = ndimage.imread(right_img_path)\n images.append(left_img)\n measures.append(m + 0.2)\n images.append(right_img)\n measures.append(m - 0.2)\n images.append(image)\n measures.append(m)\n image_flipped = np.fliplr(image)\n misc.imsave('./flipped.png', image_flipped)\n break\n images.append(image_flipped)\n measurement_flipped = -m\n measures.append(measurement_flipped)\ny_train = np.array(measures)\nX_train = np.array(images)\nstart_train = True\nprint_history = False\n\nprint(X_train.shape)\nprint(X_train[0].shape)\nprint(y_train.shape)\nif start_train:\n model = Sequential()\n # preprocessing\n model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160, 320, 3)))\n model.add(Cropping2D(cropping=((70, 25), (0, 0))))\n # ===== architecture =====\n model.add(Conv2D(24, kernel_size=(5, 5), activation=\"elu\", strides=(2, 2)))\n model.add(Conv2D(36, kernel_size=(5, 5), activation=\"elu\", strides=(2, 2)))\n model.add(Conv2D(48, kernel_size=(5, 5), activation=\"elu\", strides=(2, 2)))\n model.add(Conv2D(64, kernel_size=(3, 3), activation=\"elu\"))\n model.add(Conv2D(64, kernel_size=(3, 3), activation=\"elu\"))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(100, activation=\"elu\"))\n model.add(Dense(50, activation=\"elu\"))\n model.add(Dense(10, activation=\"elu\"))\n model.add(Dense(1))\n # 2nd Layer - Add a fully connected layer\n # takes in the output of the first layer and sets the output dimensions to (100)\n # ===== architecture end =====\n model.compile(loss=\"mse\", optimizer=\"adam\")\n history_object = model.fit(\n x=X_train,\n y=y_train,\n batch_size=batch_size,\n validation_split=0.2,\n shuffle=True,\n epochs=7,\n verbose=1,\n )\n model.save(\"model_without_lr.h5\")\n if print_history:\n ### print the keys contained in the history object\n print(history_object.history.keys())\n\n ### plot the training and validation loss for each epoch\n plt.plot(history_object.history[\"loss\"])\n plt.plot(history_object.history[\"val_loss\"])\n plt.title(\"model mean squared error loss\")\n plt.ylabel(\"mean squared error loss\")\n plt.xlabel(\"epoch\")\n plt.legend([\"training set\", \"validation set\"], loc=\"upper right\")\n plt.show()\n","sub_path":"behavior_cloning/nvidia_model.py","file_name":"nvidia_model.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"266074464","text":"N = int(input())\nDP = [False for _ in range(N *100 + 1)]\nDP[0] = True\nfor i in range(N):\n k = int(input())\n for i2 in range(i * 100 + 1,-1,-1):\n if DP[i2] != 0:\n DP[i2 + k] = True\nans = 0\nfor i in range(N *100 + 1):\n if DP[i] == True and i % 10 != 0:\n ans = i\nprint(ans)","sub_path":"Python_codes/p03699/s129241902.py","file_name":"s129241902.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372714073","text":"import numpy as np\nfrom math import cos, pi\nimport matplotlib.pyplot as plt\nfrom numpy.linalg import inv\nfrom mpl_toolkits.mplot3d import Axes3D\nimport random\n\n\ndef randomcolor():\n colorArr = ['1', '2', '3', '4', '5', '6', '7',\n '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']\n color = \"\"\n for i in range(6):\n color += colorArr[random.randint(0, 14)]\n return \"#\"+color\n\n\ndef f_i(c, phi, phi_i, k=1, f_max=20, f_0=5., poisson_noise=True):\n result = f_0+c*f_max*(np.exp(k*(cos(2*(phi-phi_i))-1)))\n\n if poisson_noise:\n return np.random.poisson(result)\n else:\n return result\n\n\ndef f(n, **kwargs):\n neuron_orientations = np.arange(0, pi, pi/n)\n population_responses = np.array(list(map(lambda x: f_i(\n phi_i=x, **kwargs), neuron_orientations)))\n\n return population_responses\n\n\ndef problem_a_1():\n result_1 = f(n=100, c=1., phi=0)\n result_2 = f(n=100, c=1., phi=pi/2)\n result_3 = f(n=100, c=0.1, phi=0)\n result_4 = f(n=100, c=0.1, phi=pi/2)\n\n plt.figure()\n plt.plot(range(len(result_1)), result_1, color='red', label='c:1.0 phi:0')\n plt.plot(range(len(result_2)), result_2,\n color='green', label='c:1.0 phi:pi/2')\n\n # plt.xticks(np.arange(0, pi, pi / 10))\n plt.xlabel('orientation')\n plt.ylabel('response')\n plt.legend()\n\n plt.savefig('problem_a_1_c1.png')\n plt.close()\n\n plt.figure()\n plt.plot(range(len(result_3)), result_3, color='red', label='c:0.1 phi:0')\n plt.plot(range(len(result_4)), result_4,\n color='green', label='c:0.1 phi:pi/2')\n\n # plt.xticks(np.arange(0, pi, pi / 10))\n plt.xlabel('orientation')\n plt.ylabel('response')\n plt.legend()\n\n plt.savefig('problem_a_1_c01.png')\n plt.close()\n\n\ndef problem_a_2():\n result = f(n=100, c=1, phi=0, poisson_noise=False)\n covariance_matrix = np.zeros((len(result), len(result)))\n for i in range(len(result)):\n covariance_matrix[i, i] = result[i]\n\n return covariance_matrix\n\n\ndef problem_a_3():\n plt.figure()\n phi_arr = [[pi/4, 0], [pi/2, 0], [pi*0.75, 0]]\n covariance_matrix = problem_a_2()\n color = ['red', 'green', 'blue']\n label = ['pi/4', 'pi/2', '3pi/4']\n for i, d in enumerate(phi_arr):\n delta_phi = d[0]-d[1]\n w = np.dot(inv(covariance_matrix), f(\n n=100, c=1., phi=d[0], poisson_noise=False)-f(n=100, c=1., phi=d[1], poisson_noise=False))\n plt.plot(range(len(w)), w, color=color[i], label=label[i])\n\n # plt.xticks(np.arange(0, pi, pi / 10))\n plt.xlabel('orientation')\n plt.ylabel('w value')\n plt.legend()\n\n plt.savefig('problem_a_3.png')\n plt.close()\n\n\ndef problem_a_4(phi_1, phi_2):\n covariance_matrix = problem_a_2()\n w = np.dot(inv(covariance_matrix), f(\n n=100, c=1., phi=phi_1, poisson_noise=False)-f(n=100, c=1., phi=phi_2, poisson_noise=False))\n\n d1 = np.dot(w.T, f(n=100, c=1, phi=phi_1))\n d2 = np.dot(w.T, f(n=100, c=1, phi=phi_2))\n\n return d1, d2\n\n\ndef problem_a_5():\n def get_covariance_matrix(n, c, phi_1, phi_2):\n result = (f(n=100, c=1, phi=phi_1, poisson_noise=False) +\n f(n=100, c=1, phi=phi_2, poisson_noise=False))/2\n covariance_matrix = np.zeros((len(result), len(result)))\n for i in range(len(result)):\n covariance_matrix[i, i] = result[i]\n\n return covariance_matrix\n\n phi_1 = 0\n phi_2 = pi/2\n\n w = np.dot(inv(get_covariance_matrix(n=100, c=1, phi_1=phi_1, phi_2=phi_2)), f(\n n=100, c=1., phi=phi_1, poisson_noise=False)-f(n=100, c=1., phi=phi_2, poisson_noise=False))\n\n d1s = []\n d2s = []\n\n for i in range(1000):\n d1 = np.dot(w.T, f(n=100, c=1, phi=phi_1))\n d2 = np.dot(w.T, f(n=100, c=1, phi=phi_2))\n d1s.append(d1)\n d2s.append(d2)\n\n plt.figure()\n plt.hist(d1s)\n # plt.plot(range(len(d2s)), d2s, color='green', label='phi_2:pi/2')\n plt.xlabel('times of stimulus 1')\n plt.ylabel('count')\n plt.legend()\n\n plt.savefig('problem_a_4_phi_1.png')\n plt.close()\n\n plt.figure()\n plt.hist(d2s)\n plt.xlabel('times of stimulus 2')\n plt.ylabel('count')\n plt.legend()\n\n plt.savefig('problem_a_4_phi_2.png')\n plt.close()\n\n print('phi_1 number for d>0:{}'.format(np.sum(np.array(d1s) > 0)))\n print('phi_2 number for d>0:{}'.format(np.sum(np.array(d2s) > 0)))\n\n d1s = np.array(d1s)\n d2s = np.array(d2s)\n d_prime = (d1s-d2s)/(np.var(d1s)+np.var(d2s))**0.5\n\n plt.figure()\n plt.hist(d_prime)\n plt.xlabel('times')\n plt.ylabel('d')\n plt.legend()\n\n plt.savefig('problem_a_4_d_prime.png')\n plt.close()\n\n print('d_prime d>0:{}'.format(np.sum(np.array(d_prime) > 0)))\n\n\ndef problem_a_6():\n def get_covariance_matrix(n, c, phi_1, phi_2):\n result = (f(n=n, c=1, phi=phi_1, poisson_noise=False) +\n f(n=n, c=1, phi=phi_2, poisson_noise=False))/2\n covariance_matrix = np.zeros((len(result), len(result)))\n for i in range(len(result)):\n covariance_matrix[i, i] = result[i]\n\n return covariance_matrix\n\n phi_1 = 0\n phi_2 = pi/2\n\n d_prime_by_n = []\n for n in range(2, 1000, 10):\n print(n)\n w = np.dot(inv(get_covariance_matrix(n=n, c=1, phi_1=phi_1, phi_2=phi_2)), f(\n n=n, c=1., phi=phi_1, poisson_noise=False)-f(n=n, c=1., phi=phi_2, poisson_noise=False))\n\n d1s = []\n d2s = []\n\n for i in range(1000):\n d1 = np.dot(w.T, f(n=n, c=1, phi=phi_1))\n d2 = np.dot(w.T, f(n=n, c=1, phi=phi_2))\n d1s.append(d1)\n d2s.append(d2)\n\n d1s = np.array(d1s)\n d2s = np.array(d2s)\n d_prime = np.mean((d1s-d2s)/(np.var(d1s)+np.var(d2s))**0.5)\n\n d_prime_by_n.append(d_prime)\n\n plt.figure()\n # plt.plot(range(len(d1s)), d1s, color='red', label='phi_1:0')\n plt.plot(range(len(d_prime_by_n)), d_prime_by_n,\n color='green', label='d_prime')\n plt.xlabel('n*10')\n plt.ylabel('mean')\n plt.legend()\n\n plt.savefig('problem_a_5.png')\n plt.close()\n\n\ndef problem_a_7():\n def get_covariance_matrix(n, c, phi_1, phi_2, k):\n result = (f(n=n, c=1, phi=phi_1, poisson_noise=False, k=k) +\n f(n=n, c=1, phi=phi_2, poisson_noise=False, k=k))/2\n covariance_matrix = np.zeros((len(result), len(result)))\n for i in range(len(result)):\n covariance_matrix[i, i] = result[i]\n\n return covariance_matrix\n\n phi_1 = 0\n phi_2 = pi/2\n\n d_prime_by_n = []\n for k in np.arange(0.5, 10, 0.5):\n print(k)\n w = np.dot(inv(get_covariance_matrix(n=100, c=1, phi_1=phi_1, phi_2=phi_2, k=k)), f(\n n=100, c=1., phi=phi_1, poisson_noise=False, k=k)-f(n=100, c=1., phi=phi_2, poisson_noise=False, k=k))\n\n d1s = []\n d2s = []\n\n for i in range(1000):\n d1 = np.dot(w.T, f(n=100, c=1, phi=phi_1, k=k))\n d2 = np.dot(w.T, f(n=100, c=1, phi=phi_2, k=k))\n d1s.append(d1)\n d2s.append(d2)\n\n d1s = np.array(d1s)\n d2s = np.array(d2s)\n d_prime = np.mean((d1s-d2s)/(np.var(d1s)+np.var(d2s))**0.5)\n\n d_prime_by_n.append(d_prime)\n\n plt.figure()\n # plt.plot(range(len(d1s)), d1s, color='red', label='phi_1:0')\n plt.plot(range(len(d_prime_by_n)), d_prime_by_n,\n color='green', label='d_prime')\n plt.xlabel('k(*0.5)')\n plt.ylabel('d>0')\n plt.legend()\n\n plt.savefig('problem_a_6.png')\n plt.close()\n\n\ndef get_covariance_matrix_by_correlation(n, c_max, tau):\n neuron_orientations = np.arange(0, pi, pi/n)\n covariance_matrix = np.zeros(\n (len(neuron_orientations), len(neuron_orientations)))\n for i in range(len(neuron_orientations)):\n for j in range(len(neuron_orientations)-i):\n j += i\n # compute the depending correlation\n c_ij = c_max * \\\n np.exp(-np.abs(neuron_orientations[i] -\n neuron_orientations[j])/tau)\n # compute covariance matrix for f0i and f0j are both 5.\n covariance_matrix[i, j] = c_ij*5\n covariance_matrix[j, i] = c_ij*5\n\n return covariance_matrix\n\n\ndef get_optimal_weights(phi_1, phi_2, c, n, covariance_matrix):\n w = np.dot(inv(covariance_matrix), f(n=n, c=c, phi=phi_1,\n poisson_noise=False, k=1)-f(n=n, c=c, phi=phi_2, poisson_noise=False, k=1))\n\n return w\n\n\ndef f_b(n, covariance_matrix, **kwargs):\n neuron_orientations = np.arange(0, pi, pi/n)\n population_responses = np.array(list(map(lambda x: f_i(\n phi_i=x, **kwargs), neuron_orientations)))\n\n return population_responses+np.random.multivariate_normal(population_responses, covariance_matrix)\n\n\ndef problem_b_4():\n phi_1 = 0\n phi_2 = pi/2\n\n w_prime = []\n c_maxes = [0.1, 0.5, 0.8]\n for c_max in c_maxes:\n w_prime_by_c_max = []\n for n in range(2, 200, 10):\n print(n)\n cov_matrix = get_covariance_matrix_by_correlation(\n n=n, c_max=c_max, tau=0.5)\n cov_matrix = np.diag(np.diag(cov_matrix))\n w = get_optimal_weights(\n c=0.1, n=n, phi_1=phi_1, phi_2=phi_2, covariance_matrix=cov_matrix)\n\n d1s_w = []\n d2s_w = []\n\n for i in range(1000):\n d1 = np.dot(w.T, f_b(n=n, c=0.1, phi=phi_1,\n k=1, covariance_matrix=cov_matrix))\n d2 = np.dot(w.T, f_b(n=n, c=0.1, phi=phi_2,\n k=1, covariance_matrix=cov_matrix))\n d1s_w.append(d1)\n d2s_w.append(d2)\n\n d1s_w = np.array(d1s_w)\n d2s_w = np.array(d2s_w)\n d_prime = np.mean((d1s_w-d2s_w) /\n (np.var(d1s_w)+np.var(d2s_w))**0.5)\n\n w_prime_by_c_max.append(d_prime)\n\n w_prime.append(w_prime_by_c_max)\n\n plt.figure()\n for i, d in enumerate(w_prime):\n plt.plot(range(len(w_prime[i])), w_prime[i],\n color=randomcolor(), label='c_max:{}'.format(c_maxes[i]))\n plt.xlabel('n(*10)')\n plt.ylabel('d_prime')\n plt.legend()\n\n plt.savefig('problem_b_4_1.png')\n plt.close()\n\n w_opt_prime = []\n c_maxes = [0.1, 0.5, 0.8]\n for c_max in c_maxes:\n w_opt_prime_by_c_max = []\n for n in range(2, 200, 10):\n print(n)\n cov_matrix = get_covariance_matrix_by_correlation(\n n=n, c_max=c_max, tau=0.5)\n w_opt = get_optimal_weights(\n c=0.1, n=n, phi_1=phi_1, phi_2=phi_2, covariance_matrix=cov_matrix)\n\n d1s_w_opt = []\n d2s_w_opt = []\n\n for i in range(1000):\n d1 = np.dot(w_opt.T, f_b(n=n, c=0.1, phi=phi_1,\n k=1, covariance_matrix=cov_matrix))\n d2 = np.dot(w_opt.T, f_b(n=n, c=0.1, phi=phi_2,\n k=1, covariance_matrix=cov_matrix))\n d1s_w_opt.append(d1)\n d2s_w_opt.append(d2)\n\n d1s_w_opt = np.array(d1s_w_opt)\n d2s_w_opt = np.array(d2s_w_opt)\n d_prime = np.mean((d1s_w_opt-d2s_w_opt) /\n (np.var(d1s_w_opt)+np.var(d2s_w_opt))**0.5)\n\n w_opt_prime_by_c_max.append(d_prime)\n\n w_opt_prime.append(w_opt_prime_by_c_max)\n\n plt.figure()\n for i, d in enumerate(w_opt_prime):\n plt.plot(range(len(w_opt_prime[i])), w_opt_prime[i],\n color=randomcolor(), label='c_max:{}'.format(c_maxes[i]))\n plt.xlabel('n(*10)')\n plt.ylabel('d_prime')\n plt.legend()\n\n plt.savefig('problem_b_4_2.png')\n plt.close()\n\n\ndef problem_c():\n phi_1 = 0\n phi_2 = pi/2\n\n c = 0.1\n c_max = 0.1\n mean_k = 1.5\n mean_f_max = 20\n n = 100\n\n f_shape = 200000\n std = np.arange(0.1, 1, 0.1)\n shapes = (mean_k / std) ** 2\n scales = mean_k / shapes\n std = np.sqrt(shapes) * scales\n\n d_prime_var = []\n for shape, scale in zip(shapes, scales):\n print(shape)\n\n cov_matrix = get_covariance_matrix_by_correlation(\n n=n, c_max=c_max, tau=0.5)\n\n w_opt = get_optimal_weights(\n c=c, n=n, phi_1=phi_1, phi_2=phi_2, covariance_matrix=cov_matrix)\n\n d1s_w_opt = []\n d2s_w_opt = []\n\n for i in range(1000):\n # k = np.random.gamma(shape, scale, 1).item()\n fmax = np.random.gamma(f_shape, mean_f_max / f_shape, 1).item()\n d1 = np.dot(w_opt.T, f_b(n=n, c=0.1, phi=phi_1,\n k=1, f_max=fmax, covariance_matrix=cov_matrix))\n d2 = np.dot(w_opt.T, f_b(n=n, c=0.1, phi=phi_2,\n k=1, f_max=fmax, covariance_matrix=cov_matrix))\n d1s_w_opt.append(d1)\n d2s_w_opt.append(d2)\n\n d1s_w_opt = np.array(d1s_w_opt)\n d2s_w_opt = np.array(d2s_w_opt)\n d_prime = np.mean((d1s_w_opt-d2s_w_opt) /\n (np.var(d1s_w_opt)+np.var(d2s_w_opt))**0.5)\n d_prime_var.append(d_prime)\n\n plt.figure()\n plt.plot(range(len(d_prime_var)), d_prime_var,\n color=randomcolor(), label='fmax for different vars')\n plt.xlabel('n(*10)')\n plt.ylabel('d_prime')\n plt.legend()\n\n plt.savefig('problem_c_fmax.png')\n plt.close()\n\n\nif __name__ == '__main__':\n # problem_a_1()\n # print(problem_a_2())\n # problem_a_3()\n # problem_a_4()\n # problem_a_5()\n # problem_a_6()\n # problem_a_7()\n # get_optimal_weights(0, pi/2, 0.1)\n # problem_b_4()\n problem_c()\n","sub_path":"NS/HW3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389287173","text":"#!/usr/bin/env python2\n\nimport os\nimport sys\nimport subprocess\nimport commands\nimport shutil\nimport datetime\nimport multiprocessing\nimport signal\n\nfrom optparse import OptionParser\n\ntargets = 'aarch64-elf aarch64-linux-gnu \\\n alpha-linux-gnu alpha-freebsd6 alpha-netbsd alpha-openbsd \\\n alpha64-dec-vms alpha-dec-vms am33_2.0-linux \\\n arc-elf32OPT-with-cpu=arc600 arc-elf32OPT-with-cpu=arc700 \\\n arc-linux-uclibcOPT-with-cpu=arc700 arceb-linux-uclibcOPT-with-cpu=arc700 \\\n arm-wrs-vxworks arm-netbsdelf \\\n arm-linux-androideabi arm-uclinux_eabi arm-eabi \\\n arm-symbianelf avr-rtems avr-elf \\\n bfin-elf bfin-uclinux bfin-linux-uclibc bfin-rtems bfin-openbsd \\\n c6x-elf c6x-uclinux cr16-elf cris-elf cris-linux crisv32-elf crisv32-linux \\\n epiphany-elf epiphany-elfOPT-with-stack-offset=16 fido-elf \\\n fr30-elf frv-elf frv-linux h8300-elf h8300-rtems hppa-linux-gnu \\\n hppa-linux-gnuOPT-enable-sjlj-exceptions=yes hppa64-linux-gnu \\\n hppa2.0-hpux10.1 hppa64-hpux11.3 \\\n hppa64-hpux11.0OPT-enable-sjlj-exceptions=yes hppa2.0-hpux11.9 \\\n i686-pc-linux-gnu i686-apple-darwin i686-apple-darwin9 i686-apple-darwin10 \\\n i486-freebsd4 i686-freebsd6 i686-kfreebsd-gnu \\\n i686-netbsdelf9 i686-knetbsd-gnu i686-openbsd i686-openbsd3.0 \\\n i686-elf i686-kopensolaris-gnu i686-symbolics-gnu i686-pc-msdosdjgpp \\\n i686-lynxos i686-nto-qnx \\\n i686-rtems i686-solaris2.10 i686-wrs-vxworks \\\n i686-wrs-vxworksae \\\n i686-cygwinOPT-enable-threads=yes i686-mingw32crt ia64-elf \\\n ia64-freebsd6 ia64-linux ia64-hpux ia64-hp-vms iq2000-elf lm32-elf \\\n lm32-rtems lm32-uclinux m32c-rtems m32c-elf m32r-elf m32rle-elf m32r-rtems \\\n m32r-linux m32rle-linux m68k-elf m68k-netbsdelf \\\n m68k-openbsd m68k-uclinux m68k-linux m68k-rtems \\\n mcore-elf mep-elf microblaze-linux microblaze-elf \\\n mips-netbsd \\\n mips64el-st-linux-gnu mips64octeon-linux mipsisa64r2-linux \\\n mipsisa32r2-linux-gnu mipsisa64r2-sde-elf mipsisa32-elfoabi \\\n mipsisa64-elfoabi mipsisa64r2el-elf mipsisa64sr71k-elf mipsisa64sb1-elf \\\n mipsel-elf mips64-elf mips64vr-elf mips64orion-elf mips-rtems \\\n mips-wrs-vxworks mipstx39-elf mmix-knuth-mmixware mn10300-elf moxie-elf \\\n moxie-uclinux moxie-rtems \\\n msp430-elf \\\n nds32le-elf nds32be-elf \\\n nios2-elf nios2-linux-gnu \\\n pdp11-aout picochip-elfOPT-enable-obsolete \\\n powerpc-darwin8 \\\n powerpc-darwin7 powerpc64-darwin powerpc-freebsd6 powerpc-netbsd \\\n powerpc-eabispe powerpc-eabisimaltivec powerpc-eabisim ppc-elf \\\n powerpc-eabialtivec powerpc-xilinx-eabi powerpc-eabi \\\n powerpc-rtems4.11OPT-enable-threads=yes powerpc-linux_spe \\\n powerpc-linux_paired powerpc64-linux_altivec \\\n powerpc-wrs-vxworks powerpc-wrs-vxworksae powerpc-lynxos powerpcle-elf \\\n powerpcle-eabisim powerpcle-eabi rs6000-ibm-aix4.3 rs6000-ibm-aix5.1.0 \\\n rs6000-ibm-aix5.2.0 rs6000-ibm-aix5.3.0 rs6000-ibm-aix6.0 \\\n rl78-elf rx-elf s390-linux-gnu s390x-linux-gnu s390x-ibm-tpf sh-elf \\\n shle-linux sh-netbsdelf sh-superh-elf sh5el-netbsd sh64-netbsd sh64-linux \\\n sh64-elfOPT-with-newlib sh-rtems sh-wrs-vxworks sparc-elf \\\n sparc-leon-elf sparc-rtems sparc-linux-gnu \\\n sparc-leon3-linux-gnuOPT-enable-target=all sparc-netbsdelf \\\n sparc64-sun-solaris2.10OPT-with-gnu-ldOPT-with-gnu-asOPT-enable-threads=posix \\\n sparc-wrs-vxworks sparc64-elf sparc64-rtems sparc64-linux sparc64-freebsd6 \\\n sparc64-netbsd sparc64-openbsd spu-elf \\\n tilegx-linux-gnu tilegxbe-linux-gnu tilepro-linux-gnu \\\n v850e-elf v850-elf vax-linux-gnu \\\n vax-netbsdelf vax-openbsd x86_64-apple-darwin \\\n x86_64-pc-linux-gnuOPT-with-fpmath=avx \\\n x86_64-elfOPT-with-fpmath=sse x86_64-freebsd6 x86_64-netbsd \\\n x86_64-knetbsd-gnu x86_64-w64-mingw32 \\\n x86_64-mingw32OPT-enable-sjlj-exceptions=yes xstormy16-elf xtensa-elf \\\n xtensa-linux i686-interix3OPT-enable-obsolete score-elfOPT-enable-obsolete'\n\nall_targets = [x for x in targets.split(' ') if x]\nparallelism = multiprocessing.cpu_count()\nmake_cmd = 'make -k -j' + str(parallelism)\n\ndef err(message):\n log(message)\n exit(1)\n\ndef log(message):\n d = str(datetime.datetime.now())\n print('[%s]: %s' % (d, message))\n\ndef parse_languages(languages):\n return ','.join(set([x for x in languages.split(',') if x != 'lto']))\n\nfrontends = { 'c': 'cc1', 'c++': 'cc1plus', 'go': 'go1', 'fortran': 'f951', 'ada': 'gnat1', 'go': 'go1', 'java': 'jc1', 'objc': 'cc1obj', 'obj-c++': 'cc1objplus', 'java': 'jc1' }\n\nparser = OptionParser()\nparser.add_option(\"-f\", \"--folder\", dest=\"folder\", help=\"git repository folder\")\nparser.add_option(\"-d\", \"--destination\", dest=\"destination\", help = \"destination folder\")\nparser.add_option(\"-l\", \"--languages\", dest=\"languages\", help = \"languages\", default = 'all')\nparser.add_option(\"-c\", \"--checking\", action=\"store_true\", dest=\"checking\", default=False, help = \"enable checking\")\nparser.add_option(\"-s\", \"--subset\", action=\"store_true\", dest=\"subset\", default=False, help = \"subset of targets\")\nparser.add_option(\"-t\", \"--targets\", dest=\"targets\", default = ','.join(all_targets), help = \"targets\")\n\n(options,args) = parser.parse_args()\n\nif not options.folder:\n parser.error('folder not specified')\n\nif not options.destination:\n parser.error('destination')\n\nif not os.path.exists(options.destination):\n os.mkdir(options.destination)\n\nif options.languages.find('all') != -1:\n options.languages = options.languages.replace('all', ','.join(frontends.keys()))\n\noptions.targets = options.targets.split(',')\n\nif options.subset:\n subset = map(lambda x: x.split('-')[0], options.targets)\n prefixes = (set(subset))\n options.targets= map(lambda x: filter(lambda y: y.startswith(x), options.targets)[0], prefixes)\n\n# build of configure command line\nconfigure_options = '--disable-bootstrap'\n\nif not options.checking:\n configure_options = configure_options + ' --enable-checking=release'\n\nif options.languages != None:\n configure_options = configure_options + ' --enable-languages=' + options.languages\n\noptions.languages = options.languages.split(',')\n\nlog('Built configure options: ' + configure_options)\n\nfailures = []\ntargets = options.targets\n# targets = filter(lambda x: x.startswith('rs6000'), options.targets)\n# targets = filter(lambda x: x.startswith('mmix-knuth-mmixware'), options.targets)\n\noptions.destination = os.path.abspath(options.destination)\n\nfor (i, v) in enumerate(targets):\n log('configure: %s [%u/%u]' % (v, i + 1, len(targets)))\n tokens = v.split('OPT')\n target = tokens[0]\n\n folder = os.path.join(options.destination, v)\n\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n os.chdir(folder)\n\n configure_location = os.path.join(options.folder, 'configure')\n configure_command = configure_location + ' ' + configure_options + ' --target=' + target + ' ' + ' '.join(tokens[1:])\n r = commands.getstatusoutput(configure_command)\n enabled_languages = options.languages\n\n if r[0] != 0:\n lines = r[1].split('\\n')\n if lines[-1].strip().startswith('Supported languages are'):\n possible = parse_languages(lines[-1].split(':')[-1].strip())\n log('Unsupported language, trying to reconfigure for language subset: ' + possible)\n enabled_languages = possible.split(',')\n configure_command = configure_command + ' --enable-languages=' + possible\n r = commands.getstatusoutput(configure_command)\n else:\n err('Configuration failed: %s' % (r[1]))\n\n if r[0] != 0:\n err('Configuration failed: %s' % (r[1]))\n\n log('building: %s [%u/%u]' % (v, i + 1, len(targets)))\n\n r = commands.getstatusoutput(make_cmd)\n log('Make exited with: %u' % r[0])\n\n matches = filter(lambda x: not x[1], map(lambda x: (x, os.path.exists(os.path.join('gcc', frontends[x]))), enabled_languages))\n missing_fe = ' '.join(map(lambda x: x[0], matches))\n\n if any(matches):\n failures.append(v)\n msg = 'Missing FE: %s (%u/%u)' % (missing_fe, len(matches), len(enabled_languages))\n log(msg)\n\n if not os.path.exists('../logs'):\n os.mkdir('../logs')\n\n with open(os.path.join('../logs', v + '.log'), \"w\") as text_file:\n text_file.write(r[1])\n\n if not any(matches):\n shutil.rmtree(folder)\n\nlog('Wrong configurations: ' + str(failures))\n","sub_path":"boilerplate/gcc_batch_build.py","file_name":"gcc_batch_build.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"53130230","text":"from PIL import Image \nfrom skimage import measure\nfrom shapely.geometry import Polygon, MultiPolygon\nfrom image_processing import resize_label, resize_image\n\nimport os\nimport numpy as np\nimport tqdm\nimport datetime\nimport json\nimport pdb\n\n\nid2classid = {33:1,\n 34:2,\n 35:3,\n 36:4,\n 38:5,\n 39:6,\n 40:7,}\n\ncategory2id = {'car': 33,\n 'motorbicycle': 34, \n 'bicycle': 35,\n 'person': 36, \n 'truck': 38,\n 'bus': 39,\n 'tricycle': 40,}\nid2category = {}\nfor cat, _id in category2id.items():\n id2category[_id] = cat\n\ndef create_coco(img_path, label_path, destination, crop_size=None, down_scale=None):\n \n label_path.sort()\n img_path.sort()\n \n # create images list\n print('Creating image list...')\n image_list = create_image_list(img_path, crop_size, down_scale)\n \n # create annotation list\n print('Creating object2color dictionary...')\n object2color, class_instance = create_object2color(label_path)\n category_ids = create_color2category(class_instance, object2color)\n \n is_crowd = 0\n\n # These ids will be automatically increased as we go\n annotation_id = 0\n\n # Create the annotations\n print('Creating annotation list...')\n annotations = []\n for img_id, path in tqdm.tqdm(enumerate(label_path)):\n label = Image.open(path)\n label = np.array(label)\n \n if (crop_size is not None) and (down_scale is not None):\n label = resize_label(label, crop_size=crop_size, down_scale=down_scale)\n \n mask = do_mask_image(label, class_instance[img_id], object2color)\n mask = Image.fromarray(np.uint8(mask))\n sub_masks = create_sub_masks(mask)\n for color, sub_mask in sub_masks.items():\n category_id = id2classid[category_ids[img_id][color]]\n #annotation = create_sub_mask_annotation(sub_mask, img_id, category_id, annotation_id, is_crowd)\n try:\n annotation = create_sub_mask_annotation(sub_mask, img_id, category_id, annotation_id, is_crowd)\n except:\n print('Image ID ', img_id, 'could not find the bounding boxs')\n continue\n annotations.append(annotation)\n annotation_id += 1\n\n # Delete images withoud annotation\n image_list = clean_image_list(image_list, annotations)\n \n # Making json\n data = {}\n data['info'] = {}\n data['licenses'] = []\n data['images'] = image_list\n data['annotations'] = annotations\n data['categories'] = [\n {\"supercategory\": \"vehicle\", \"id\": 1,\"name\": \"car\"},\n {\"supercategory\": \"vehicle\", \"id\": 2,\"name\": \"motorbicycle\"},\n {\"supercategory\": \"vehicle\", \"id\": 3,\"name\": \"bicycle\"},\n {\"supercategory\": \"vehicle\", \"id\": 5,\"name\": \"truck\"},\n {\"supercategory\": \"vehicle\", \"id\": 6,\"name\": \"bus\"},\n {\"supercategory\": \"vehicle\", \"id\": 7,\"name\": \"tricycle\"},\n\n {\"supercategory\": \"person\", \"id\": 4,\"name\": \"person\"},\n ]\n with open(destination, 'w') as f:\n json.dump(data, f, indent=4)\n\ndef create_image_list(img_path, crop_size=None, down_scale=None):\n img = Image.open(img_path[0])\n h = img.height\n w = img.width\n \n if (crop_size is not None) and (down_scale is not None):\n img = np.array(img)\n img = resize_image(img, crop_size=crop_size, down_scale=down_scale)\n h, w, _ = img.shape\n \n image_list = []\n for img_id, path in enumerate(img_path):\n image_list += [{\n 'id': img_id,\n 'license': 0,\n 'coco_url': 'https://www.google.com/',\n 'flickr_url': 'https://www.google.com/',\n 'width': w,\n 'height': h,\n 'file_name': path.split('/')[-1],\n 'date_captured': datetime.datetime.now().replace(microsecond=0).isoformat(' '),\n }]\n return image_list\n\n\n#def create_object2color(label_path, id2category):\ndef create_object2color(label_path):\n \n class_instance = {}\n class_summary = {}\n for _id in id2category.keys():\n class_summary[_id] = set()\n \n for img_id, path in tqdm.tqdm(enumerate(label_path)):\n label = Image.open(path)\n label = np.array(label)\n label_class = np.ndarray.astype(label/1000, np.int32)\n label_instance = np.mod(label, 1000)\n \n class_instance[img_id] = []\n for class_id in id2category.keys():\n i, j = np.where(label_class==class_id)\n instance_set = set(label_instance[i, j])\n \n class_summary[class_id] = class_summary[class_id].union(instance_set)\n for instance_id in instance_set:\n class_instance[img_id] += [(class_id, instance_id)]\n \n object2color = {}\n color_set = set()\n def generate_color(color_set):\n while True:\n r, g, b = np.random.randint(256, size=3)\n if not (r, g, b) in color_set:\n break\n return (r, g, b)\n \n for class_id, instances_id in class_summary.items():\n for instance_id in instances_id:\n r, g, b = generate_color(color_set)\n object2color[(id2classid[class_id], instance_id)] = (r, g, b)\n \n \n return object2color, class_instance\n\ndef do_mask_image(label, class_instance, object2color):\n label_class = np.ndarray.astype(label/1000, np.int32)\n label_instance = np.mod(label, 1000)\n \n h, w = label.shape\n image_mask = np.zeros([h, w, 3], np.int32)\n for class_id, instance_id in class_instance:\n mask = np.zeros([h, w, 3], np.int32)\n r, g, b = object2color[(id2classid[class_id], instance_id)]\n \n intersect = (label_class==class_id)*(label_instance==instance_id)\n mask[:, :, 0] = r*intersect\n mask[:, :, 1] = g*intersect\n mask[:, :, 2] = b*intersect\n \n image_mask += mask\n \n return image_mask\n\ndef create_color2category(class_instance, object2color):\n category_ids = {}\n\n for img_id, pair_list in class_instance.items():\n category_ids[img_id] = {}\n for class_id, instance_id in pair_list:\n r, g, b = object2color[(id2classid[class_id], instance_id)]\n category_ids[img_id][str((r, g, b))] = class_id\n \n return category_ids\n\ndef create_sub_masks(mask_image):\n width, height = mask_image.size\n\n # Initialize a dictionary of sub-masks indexed by RGB colors\n sub_masks = {}\n for x in range(width):\n for y in range(height):\n # Get the RGB values of the pixel\n pixel = mask_image.getpixel((x,y))[:3]\n\n # If the pixel is not black...\n if pixel != (0, 0, 0):\n # Check to see if we've created a sub-mask...\n pixel_str = str(pixel)\n sub_mask = sub_masks.get(pixel_str)\n if sub_mask is None:\n # Create a sub-mask (one bit per pixel) and add to the dictionary\n # Note: we add 1 pixel of padding in each direction\n # because the contours module doesn't handle cases\n # where pixels bleed to the edge of the image\n sub_masks[pixel_str] = Image.new('1', (width+2, height+2))\n \n # Set the pixel value to 1 (default is 0), accounting for padding\n sub_masks[pixel_str].putpixel((x+1, y+1), 1)\n\n return sub_masks\n\n\n\ndef create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, is_crowd):\n # Find contours (boundary lines) around each sub-mask\n # Note: there could be multiple contours if the object\n # is partially occluded. (E.g. an elephant behind a tree)\n contours = measure.find_contours(sub_mask, 0.5, positive_orientation='low')\n\n segmentations = []\n polygons = []\n for contour in contours:\n # Flip from (row, col) representation to (x, y)\n # and subtract the padding pixel\n for i in range(len(contour)):\n row, col = contour[i]\n contour[i] = (col - 1, row - 1)\n\n # Make a polygon and simplify it\n poly = Polygon(contour)\n poly = poly.simplify(1.0, preserve_topology=False)\n polygons.append(poly)\n try:\n segmentation = np.array(poly.exterior.coords).ravel().tolist()\n except:\n continue\n segmentations.append(segmentation)\n\n # Combine the polygons to calculate the bounding box and area\n multi_poly = MultiPolygon(polygons)\n x, y, max_x, max_y = multi_poly.bounds\n width = max_x - x\n height = max_y - y\n bbox = (x, y, width, height)\n area = multi_poly.area\n\n annotation = {\n 'segmentation': segmentations,\n 'iscrowd': is_crowd,\n 'image_id': image_id,\n 'category_id': category_id,\n 'id': annotation_id,\n 'bbox': bbox,\n 'area': area\n }\n\n return annotation\n\ndef clean_image_list(image_list, annotations):\n useful_id = set()\n new_image_list = []\n \n print('Collecting images id...')\n for annotation in tqdm.tqdm(annotations):\n useful_id.add(annotation['image_id'])\n print('Clearning images id...')\n for image in tqdm.tqdm(image_list):\n if image['id'] in useful_id:\n new_image_list += [image]\n return new_image_list\n ","sub_path":"create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":9460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"504552106","text":"class ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\n\nclass Solution:\n \"\"\"\n @param: head: The first node of linked list.\n @return: a tree node\n \"\"\"\n\n def sortedListToBST(self, head):\n result = self.divide_conquer(head)\n return result\n\n def divide_conquer(self, head):\n if head is None:\n return None\n if head.next is None:\n return TreeNode(head.val)\n\n mid = self.go_to_mid(head)\n\n right_head = mid.next\n mid.next = None\n\n root = TreeNode(right_head.val)\n root.left = self.divide_conquer(head)\n root.right = self.divide_conquer(right_head.next)\n\n return root\n\n def go_to_mid(self, head):\n dummy = ListNode(0)\n dummy.next = head\n\n slow = dummy\n fast = dummy.next\n\n while fast is not None and fast.next is not None:\n slow = slow.next\n fast = fast.next.next\n\n return slow\n","sub_path":"US Giants/Linked List/106. Convert Sorted List to Binary Search Tree.py","file_name":"106. Convert Sorted List to Binary Search Tree.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389830651","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 11:57:56 2018\n\n@author: Jarnd\n\"\"\"\n\nfrom qiskit import QuantumProgram\n# Creating program\nQ_program = QuantumProgram()\n\n# Creating registers\nq = Q_program.create_quantum_register(\"qr\", 1)\nc = Q_program.create_classical_register(\"cr\", 1)\n\nqc = Q_program.create_circuit(\"Hadamard\",[q],[c])\n\n###############################################################################\n#Specify Hadamard circuit\n\nqc.h(q[0])","sub_path":"Circuits/circuit_hadamard.py","file_name":"circuit_hadamard.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"192865791","text":"import datetime\nimport time\nfrom datetime import date\nfrom datetime import datetime\nfrom time import strptime\nfrom time import strftime\n\nfrom itertools import groupby\nfrom operator import itemgetter\n\nimport math\nfrom openerp import netsvc\nfrom openerp import tools\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport psycopg2\n\nREIMBURSE_STATES =[\n\t('draft','Draft'),\n\t('verify','Verify'),\n\t('approve2','Second Approve'),\n ]\n\nclass reimburse(osv.osv):\n _name=\"reimburse\"\n _rec_name=\"jenis\"\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n _track = {\n 'state': {\n 'reimburse.mt_reimburse_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approve2',\n 'reimburse.mt_reimburse_refused': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'reject',\n 'reimburse.mt_reimburse_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'verify',\n }\n }, \n\n def create(self, cr, uid, values, context=None):\n \"\"\" Override to avoid automatic logging of creation \"\"\"\n if context is None:\n context = {}\n context = dict(context, mail_create_nolog=True)\n name=values['employee_id']\n thn = values['tahun']\n tipe = values['type']\n jenis = values['jenis']\n if tipe == 'add' :\n tot_peng = values['nomin']\n obj = self.pool.get('reimburse')\n src = obj.search(cr,uid,[('type','=', 'remove'),('employee_id','=',name),('tahun','=',thn),('state','=','approve2')])\n \n for reim in obj.browse(cr,uid,src) :\n if tipe == 'remove':\n raise osv.except_osv(_('Warning!'), _('Anda sudah memiliki alokasi tunjangan pengobatan'))\n total = reim.employee_id.sisa_reimburse_pengobatan\n if total < tot_peng and jenis == 'obat':\n raise osv.except_osv(_('Warning!'), _('tunjangan pengobatan anda sudah habis')) \n if tipe == 'add' and src == [] and jenis != 'rawat':\n raise osv.except_osv(_('Warning!'), _('Anda tidak memiliki Tunjangan Pengobatan')) \n return super(reimburse, self).create(cr, uid, values, context=context)\n \n def employe(self, cr, uid,ids,vals,name,context=None): \n result ={}\n xyz = self.browse(cr,uid,ids)[0]\n for xxx in self.browse(cr,uid,ids):\n tahun = xxx.tahun\n thn = time.strftime('%Y')\n tipe = xxx.type\n if tahun == thn :\n if tipe == 'remove' :\n yyy = xxx.employee_id.name\n contract_obj = self.pool.get('hr.contract')\n co_id = contract_obj.search(cr, uid,[('employee_id', '=', yyy),('status','=',True)],context=context) \n if co_id != [] :\n ob= contract_obj.browse(cr, uid, co_id, context=context)[0] \n else :\n raise osv.except_osv(_('Warning!'), _('Anda Belum mempunyai kontrak')) \n jeje=xxx.jenis \n obcd=ob.department_id.id \n if jeje == 'obat': \n result[xxx.id]=ob.jatah_reimburse_pengobatan \n # year =str(datetime.now().year)\n # remove = 'remove'\n # approve2 = 'approve2'\n # reim_obj = self.pool.get('reimburse')\n # reim_src = reim_obj.search(cr,uid,[('type','=',remove),('state','=',approve2),('tahun','=',year),('employee_id','=',xyz.employee_id.id)],context=context)\n # import pdb;pdb.set_trace()\n # if reim_src == [] or xyz.type == 'add' :\n # obj = reim_obj.browse(cr,uid,reim_src,context=context)\n # else:\n # #raise osv.except_osv(_('Warning!'), _('Anda sudah memiliki alokasi tunjangan pengobatan'))\n # obj = reim_obj.browse(cr,uid,reim_src,context=context)\n return result \n \n def _employee_get(self, cr, uid, context=None):\n ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)\n if ids:\n return ids[0]\n return False\n\n def _compute_sisa_reimburse(self, cr, uid, ids, name, args, context=None):\n result = {}\n self.check_reimburse(cr,uid,ids, context=context)\n for hol in self.browse(cr, uid, ids, context=context):\n if hol.type=='remove':\n result[hol.id] = hol.nominal\n else:\n result[hol.id] = -hol.nominal\n return result \n\n def action_draft(self,cr,uid,ids,context=None): \n \treturn self.write(cr,uid,ids,{'state':REIMBURSE_STATES[0][0]},context=context)\n\n def action_verify(self,cr,uid,ids,context=None): \n \treturn self.write(cr,uid,ids,{'state':REIMBURSE_STATES[1][0]},context=context)\n \t\n def action_approve(self,cr,uid,ids,context=None): \n \treturn self.write(cr,uid,ids,{'state':REIMBURSE_STATES[2][0]},context=context) \n \t \n def action_approve2(self,cr,uid,ids,context=None): \n \treturn self.write(cr,uid,ids,{'state':REIMBURSE_STATES[3][0]},context=context)\n\n \n _columns={\n 'jenis':fields.selection([('obat','Pengobatan Tahunan'),('rawat','Perawatan Rumah Sakit')],'Jenis Tunjangan',required=True),\n 'employee_id':fields.many2one('hr.employee','Nama Karyawan',select=True,store=True,required=True),\n 'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),\n 'department_id' : fields.related('employee_id','department_id',type='many2one',relation='hr.department',string='Departemen',store=True,readonly=True),\n 'nominal':fields.function(employe,string='Alokasi Tunjangan',store=True),\n 'nomin' : fields.float(\"Jumlah Permintaan\"),\n 'sisa_reimburse': fields.function(_compute_sisa_reimburse, string='Sisa Reimburse', store=True),\n 'tanggal':fields.date('Tanggal',required=True),\n 'keterangan':fields.char('Keterangan',200),\n 'bukti':fields.binary('Bukti File',),\n 'state': fields.selection(REIMBURSE_STATES, 'Status', readonly=True, help=\"Gives the status of the reimburse.\"), \n 'type': fields.selection([('remove','Alokasi Reimburse'),('add','Permohonan Reimburse')], 'Tipe Reimburse', required=True, readonly=True, states={'draft':[('readonly',False)], 'verify':[('readonly',False)]}, select=True), \n 'parent_id': fields.many2one('reimburse', 'Parent'), \n \"tahun\" : fields.char(\"Tahun\"),\n }\n _defaults = {\n 'employee_id': _employee_get,\n 'state': REIMBURSE_STATES[0][0],\n 'type': 'remove',\n 'user_id': lambda obj, cr, uid, context: uid, \n 'tahun' : lambda *a : time.strftime('%Y'), \n 'tanggal' :lambda *a: time.strftime('%Y-%m-%d'),\n 'jenis' : 'obat'\n } \n \n #def unlink(self, cr, uid, ids, context=None):\n # for rec in self.browse(cr, uid, ids, context=context):\n # if rec.state not in ['draft', 'reject', 'verify']:\n # raise osv.except_osv(_('Warning!'),_('Anda tidak bisa menghapus reimburse ketika statusnya %s.')%(rec.state))\n #return super(reimburse, self).unlink(cr, uid, ids, context) \n\n def check_reimburse(self, cr, uid, ids, context=None):\n #import pdb;pdb.set_trace() \n obj=self.browse(cr,uid,ids)[0]\n employee = obj.employee_id.name\n nominal = obj.nomin\n total_pemakaian = obj.employee_id.total_pemakaian_pengobatan\n total_tunjangan_peng= obj.employee_id.total_tunjangan_pengobatan\n total = nominal + total_pemakaian\n rim_obj = self.pool.get('reimburse')\n src_obj = rim_obj.search(cr,uid,[('employee_id','=',employee)])\n brw = rim_obj.browse(cr,uid,src_obj)\n x = 1\n y = 1\n year =str(datetime.now().year)\n #import pdb;pdb.set_trace()\n for record in brw:\n #if record.type == 'remove' and record.tahun == year : \n # y = 3\n if record.type == 'remove' and record.state == 'approve2' and record.tahun == year :\n x = 2\n #y = 2 \n if total <= total_tunjangan_peng or obj.state == 'approve2':\n x = 0\n if record.jenis == 'rawat' :\n x = 0 \n if x == 1 : \n raise osv.except_osv(_('Warning!'), _('Anda tidak memiliki Tunjangan Pengobatan')) \n elif x == 2 :\n raise osv.except_osv(_('Warning!'), _('tunjangan pengobatan anda sudah habis'))\n #elif y == 2 :\n #raise osv.except_osv(_('Warning!'), _('Anda sudah memiliki alokasi tunjangan pengobatan')) \n #elif y == 3 :\n # raise osv.except_osv(_('Warning!'), _('Sedang menunggu Approval management')) \n return True \n\n def reimburse_alloc(self,cr,uid, ids=None,context=None): \n year = str(datetime.now().year)\n contract_obj = self.pool.get('hr.contract')\n co_id = contract_obj.search(cr, uid,[('status','=',True)],context=context) \n employs = contract_obj.browse(cr, uid, co_id)\n values={}\n values1={}\n for xxx in employs:\n '''result ={}\n tahun = xxx.tahun\n thn = time.strftime('%Y')\n if tahun == thn :\n yyy = xxx.employee_id.name\n contract_obj = self.pool.get('hr.contract')\n co_id = contract_obj.search(cr, uid,[('employee_id', '=', yyy)],context=context) \n ob= contract_obj.browse(cr, uid, co_id, context=context)[0] \n jeje=xxx.jenis \n obcd=ob.department_id.id \n if jeje == 'obat': \n result[xxx.id]=ob.jatah_reimburse_pengobatan \n elif jeje == 'rawat': \n result[xxx.id]=ob.jatah_reimburse_perawatan \n '''\n values = {\n 'jenis':'obat',\n 'employee_id':xxx.employee_id.id,\n 'tahun': year,\n 'keterangan':\"tes\"\n }\n self.create(cr,uid,values,context=context)\n return True\n \nreimburse()\n\nclass hr_employee(osv.osv):\n _inherit=\"hr.employee\"\n\n def create(self, cr, uid, vals, context=None):\n\n if 'sisa_reimburse_pengobatan' in vals and not vals['sisa_reimburse_pengobatan']:\n del(vals['sisa_reimburse_pengobatan'])\n if 'sisa_reimburse_rs' in vals and not vals['sisa_reimburse_rs']:\n del(vals['sisa_reimburse_rs']) \n return super(hr_employee, self).create(cr, uid, vals, context=context)\n\n def _set_remaining_reimburse_obat(self, cr, uid, empl_id, name, value, arg, context=None):\n employee = self.browse(cr, uid, empl_id, context=context)\n diff = value - employee.sisa_reimburse_pengobatan\n\n reimburse_obj = self.pool.get('reimburse')\n if diff > 0:\n reim_id = reimburse_obj.create(cr, uid, {'name': _('Alokasi untuk %s') % employee.name, 'employee_id': employee.id, 'type': 'add', 'nominal': diff}, context=context)\n elif diff < 0:\n reim_id = reimburse_obj.create(cr, uid, {'employee_id': employee.id, 'type': 'remove','nominal': abs(diff)}, context=context)\n else:\n return False\n wf_service = netsvc.LocalService(\"workflow\")\n wf_service.trg_validate(uid, 'reimburse', leave_id, 'verify', cr)\n wf_service.trg_validate(uid, 'reimburse', leave_id, 'approve', cr)\n wf_service.trg_validate(uid, 'reimburse', leave_id, 'approve2', cr)\n return True\n\n def _set_remaining_reimburse_rawat(self, cr, uid, empl_id, name, value, arg, context=None):\n employee = self.browse(cr, uid, empl_id, context=context)\n diff = value - employee.sisa_reimburse_rs\n\n reimburse_obj = self.pool.get('reimburse')\n if diff > 0:\n reim_id = reimburse_obj.create(cr, uid, {'name': _('Alokasi untuk %s') % employee.name, 'employee_id': employee.id, 'type': 'add', 'nominal': diff}, context=context)\n elif diff < 0:\n reim_id = reimburse_obj.create(cr, uid, {'employee_id': employee.id, 'type': 'remove','nominal': abs(diff)}, context=context)\n else:\n return False\n wf_service = netsvc.LocalService(\"workflow\")\n wf_service.trg_validate(uid, 'reimburse', leave_id, 'verify', cr)\n wf_service.trg_validate(uid, 'reimburse', leave_id, 'approve', cr)\n wf_service.trg_validate(uid, 'reimburse', leave_id, 'approve2', cr)\n return True\n\n def _compute_sisa_reimburse_obat(self, cr, uid, ids, name, args, context=None):\n #import pdb;pdb.set_trace()\n #reimburse = self.pool.get('reimburse').browse(cr, uid, ids, context=context)\n reimburse_obj = self.pool.get(\"reimburse\")\n yyy=0.0\n result={}\n zz=0.0\n for reim in self.browse(cr,uid,ids):\n xxx=reim.name\n search_obj=reimburse_obj.search(cr,uid,[('employee_id','=',xxx)])\n reimb=reimburse_obj.browse(cr,uid,search_obj,context=context)\n for emp in reimb :\n xyz=emp.jenis\n thn = time.strftime('%Y')\n tahun =emp.tahun\n zzz = emp.nominal\n stt = emp.state\n if tahun == thn and stt == 'approve2':\n if xyz == \"obat\":\n yyy += emp.nomin\n if zzz != False and xyz == \"obat\" :\n zz = emp.nominal\n result[reim.id] =zz - yyy\n return result\n \n def _compute_sisa_reimburse_rawat(self, cr, uid, ids, name, args, context=None):\n #import pdb;pdb.set_trace()\n #reimburse = self.pool.get('reimburse').browse(cr, uid, ids, context=context)\n reimburse_obj = self.pool.get(\"reimburse\")\n yyy=0.0\n result={}\n zz=0.0\n for reim in self.browse(cr,uid,ids):\n xxx=reim.name\n search_obj=reimburse_obj.search(cr,uid,[('employee_id','=',xxx)])\n reimb=reimburse_obj.browse(cr,uid,search_obj,context=context)\n for emp in reimb :\n xyz=emp.jenis\n thn = time.strftime('%Y')\n tahun =emp.tahun\n zzz = emp.nominal\n stt = emp.state\n if tahun == thn and stt == 'approve2':\n if xyz == \"rawat\":\n yyy += emp.nomin\n if zzz != False and xyz == \"rawat\" :\n zz = emp.nominal\n result[reim.id] =zz - yyy\n return result \n\n def _get_reimburse_status(self, cr, uid, ids, name, args, context=None):\n reimburse_obj = self.pool.get('reimburse')\n reimburse_id = reimburse_obj.search(cr, uid,\n [('employee_id', 'in', ids),('type','=','remove'),('state','not in',('cancel','reject'))],\n context=context)\n result = {}\n for id in ids:\n result[id] = {\n 'current_reimburse_state': False,\n }\n for reim in self.pool.get('reimburse').browse(cr, uid, reimburse_id, context=context):\n result[reim.employee_id.id]['current_reimburse_state'] = reim.state\n return result\n\n def total_tunjangan_pengobatan(self,cr,uid,ids,name,arg,context=None):\n xxx=self.browse(cr,uid,ids)[0]\n employee=xxx.name\n obj=self.pool.get('reimburse')\n search_obj=obj.search(cr,uid,[('employee_id','=',employee)])\n reimb=obj.browse(cr,uid,search_obj,context=context)\n result={}\n xyz = 0\n year =str(datetime.now().year)\n for tunjangan in reimb :\n if tunjangan.tahun == year and tunjangan.jenis=='obat' and tunjangan.type == 'remove' and tunjangan.state == 'approve2':\n xyz=tunjangan.nominal\n result[xxx.id]=xyz \n return result \n\n def total_pemakaian_pengobatan(self, cr ,uid, ids,name,arg,context=None):\n #import pdb;pdb.set_trace() \n tunjangan = 0.0\n sisa = 0.0\n xxx= self.browse(cr,uid,ids)[0]\n tunjangan = xxx.total_tunjangan_pengobatan \n sisa = xxx.sisa_reimburse_pengobatan\n result={}\n total = tunjangan - sisa\n result[xxx.id]=total\n return result \n\n def total_reimburse_rs(self, cr, uid, ids, name, arg, context=None):\n xxx=self.browse(cr,uid,ids)[0]\n employee=xxx.name\n obj=self.pool.get('reimburse')\n search_obj=obj.search(cr,uid,[('employee_id','=',employee)])\n reimb=obj.browse(cr,uid,search_obj,context=context)\n result={}\n year =str(datetime.now().year)\n totals = 0\n for tunjangan in reimb :\n if tunjangan.tahun == year and tunjangan.jenis=='rawat' and tunjangan.type == 'add' and tunjangan.state == 'approve2':\n if tunjangan.nomin == False :\n total = 0 + totals\n else : \n total=tunjangan.nomin+ totals\n totals = total\n result[xxx.id] = totals \n return result \n\n _columns = {\n 'sisa_reimburse_pengobatan': fields.function(_compute_sisa_reimburse_obat, string='Sisa Tunjangan Pengobatan', fnct_inv=_set_remaining_reimburse_obat, type=\"float\",readonly=True),\n 'sisa_reimburse_rs': fields.function(_compute_sisa_reimburse_rawat, string='Sisa Reimburse Perawatan RS', fnct_inv=_set_remaining_reimburse_rawat, type=\"float\",),\n 'reimburse_ids':fields.one2many('reimburse','employee_id','Daftar Reimburse',readonly=True),\n 'total_reimburse_rs' :fields.function(total_reimburse_rs,string='Total Perawatan Rumah Sakit',readonly=True),\n 'total_tunjangan_pengobatan' :fields.function(total_tunjangan_pengobatan,string='Total Tunjangan Pengobatan'),\n 'total_pemakaian_pengobatan':fields.function(total_pemakaian_pengobatan,string='Total Pemakaian Pengobatan'),\n\n }\n\nhr_employee()\n\nclass hr_contract(osv.osv):\n _name = 'hr.contract'\n _inherit = 'hr.contract'\n \n def _hitung_reimburse_obat(self, cr, uid, ids, wage, jatah_reimburse_pengobatan, arg, context=None):\n rec = self.browse(cr, uid, ids, context=context)[0]\n typ=rec.type_id.reimburse_pengobatan\n wag=rec.wage\n date_start =rec.date_start\n date_end = rec.date_end\n dt_yr = datetime.strptime(date_start,\"%Y-%m-%d\").year\n dt_bln = datetime.strptime(date_start,\"%Y-%m-%d\").month \n dten_yr = False\n dten_bln = False\n if date_end != False :\n dten_yr = datetime.strptime(date_end,\"%Y-%m-%d\").year\n dten_bln = datetime.strptime(date_end,\"%Y-%m-%d\").month \n year =datetime.now().year\n month =datetime.now().month\n result = {}\n for re in self.browse(cr, uid, ids, context=context):\n if dt_yr == year : \n bulan = float(13 - dt_bln) \n elif dten_yr == year and dt_yr == year :\n bulan = float(dten_bln) \n elif date_end != False and dten_yr == year :\n bulan = float(dten_bln)\n else :\n bulan = float(12) \n #if wag:\n total = wag * typ\n jatah = float((wag * typ) * (bulan/12))\n if total >= rec.type_id.range_pengobatan :\n result [re.id]= float(rec.type_id.range_pengobatan * (bulan/12))\n else :\n result [re.id]= jatah\n return result \n \n def _hitung_reimburse_rawat(self, cr, uid, ids, wage, jatah_reimburse_perawatan, arg, context=None):\n rec = self.browse(cr, uid, ids, context=context)[0]\n typ=rec.type_id.reimburse_perawatan\n wag=rec.wage\n result = {}\n for r in self.browse(cr, uid, ids, context=context):\n #if wag:\n jatah = typ * wag\n result [r.id]= jatah\n return result \n \n _columns = {\n \"jatah_reimburse_pengobatan\":fields.function(_hitung_reimburse_obat, type='float', obj='hr.contract', method=True, store=False,string='Pengobatan Tahunan',readonly=True),\n \"jatah_reimburse_perawatan\":fields.function(_hitung_reimburse_rawat, type='float', obj='hr.contract', method=True, store=False,string='Perawatan Rumah Sakit',readonly=True),\n\n }\nhr_contract() \n\n","sub_path":"lucas_marin/hrd_ppi_reimburse/reimburse.py","file_name":"reimburse.py","file_ext":"py","file_size_in_byte":20466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"292975928","text":"import pandas as pd\nfrom datetime import datetime\nimport sys\n\nhosts = [f\"br{str(i).zfill(2)}\" for i in range(1, 31)]\n#filepaths = [ f\"backup/deploy_test/{host}.csv\" for host in hosts]\n#filepaths = [ f\"backup/docker_512_minus1/{host}.csv\" for host in hosts]\nfilepaths = [\n f\"./backup/monitor/{host}.csv\" for host in hosts]\n\n\ndef clean_and_addheader(filepath):\n new_lines = []\n with open(filepath) as f:\n lines = f.readlines()\n started_time = float(lines[0].strip().split(\",\")[0])\n for line in lines:\n striped_words = line.strip().split(\",\")\n if len(striped_words) != 3:\n continue\n try:\n this_time = round(float(striped_words[0]), 1) # 小数点第一位で四捨五入\n except:\n continue # 時間以外のものが混ざるなら殺す\n striped_words[0] = this_time\n striped_words[1] = int(striped_words[1])\n striped_words[2] = int(striped_words[2])\n new_lines.append(striped_words)\n return new_lines\n\n\ndef calc_convergence(merged_df, completed_count):\n hosts = [f\"br{str(i).zfill(2)}\" for i in range(1, 31)]\n columns_j = [f\"{host}_jool\" for host in hosts]\n query_text = \"\"\n for index, column in enumerate(columns_j):\n if index == 0:\n pass\n else:\n query_text += \" & \"\n query_text += f\"{column} == {completed_count}\"\n\n convergenced_time = merged_df.query(query_text).head(1).index\n\n return convergenced_time - merged_df.head(1).index\n\n\nif __name__ == \"__main__\":\n\n # get args\n COMPLETE_EAMT_COUNT = sys.argv[1]\n # get docker started/stopped time\n docker_log_path = \"./backup/pod/docker-log.txt\"\n standard_time = 0\n with open(docker_log_path, \"r\") as f:\n standard_time = int(f.read().strip())\n\n dfs = []\n # キレイにしてpandasに挿入\n for index, filepath in enumerate(filepaths):\n host = hosts[index]\n df = pd.DataFrame(clean_and_addheader(filepath), columns=[\n \"time\", f\"{host}_gobgp\", f\"{host}_jool\"])\n selected = df.query(f\"time >= {standard_time} \")\n selected[\"time\"] = selected[\"time\"] - standard_time\n dfs.append(selected)\n merged = pd.concat(dfs, sort=True)\n # merged[\"time\"] = pd.to_datetime(merged['time'].astype(float), unit='s')\n merged = merged.sort_values(\"time\")\n merged = merged.fillna(method='ffill')\n merged = merged.reset_index(drop=True)\n merged = merged.groupby('time').mean()\n\n convergenced_time = calc_convergence(merged, COMPLETE_EAMT_COUNT)\n\n print(convergenced_time[0])\n","sub_path":"ansible_setup/roles/parse_monitor_log/test/convergence.py","file_name":"convergence.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"103743324","text":"# Review exercise 1\n\nwhile True:\n try:\n num = input(\"Enter an integer:\")\n print(int(num))\n break\n except ValueError:\n print(\"Try again.\")\n\n# Review exercise 2\n\nstring = input(\"Give us a string: \")\n\ntry:\n n = int(input(\"Give us an integer: \"))\n print(string[n])\nexcept ValueError:\n print(\"Invalid number.\")\nexcept IndexError:\n print(\"Invalid index.\")","sub_path":"Ch 08 - Conditional Logic and Control Flow/08.6-recover-errors.py","file_name":"08.6-recover-errors.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"560039064","text":"import requests\nimport json\n\ndb_url = \"http://iesc-s1.mit.edu/608dev/sandbox/adchurch/final/database.py\"\n\nr = requests.get(db_url)\ndata = r.json()\ntable_string = \"\"\nfor result in data:\n other_string = '''\n ''' + str(result['id']) + '''\n ''' + str(result['time']) + ''' \n ''' + str(result['result']) + '''\n '''\n table_string = other_string + table_string\n\nprint(table_string)\n","sub_path":"demo/website_json_test.py","file_name":"website_json_test.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"160735675","text":"from discord.ext import commands\r\nimport discord\r\nimport datetime\r\n\r\ndef dump(obj):\r\n\toutput = \"\"\r\n\tfor attr in dir(obj):\r\n\t\toutput += \"\\nobj.%s = %r\" % (attr, getattr(obj, attr))\r\n\t\tprint(\"obj.%s = %r\" % (attr, getattr(obj, attr)))\r\n\treturn output\r\n\r\n\r\nclass General(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t@commands.command()\r\n\tasync def userinfo(self, ctx, user: discord.Member=None):\r\n\t\t#await ctx.send(f\"```py\\n{dump(user)}```\")\r\n\r\n\t\tif not user:\r\n\t\t\tuser = ctx.message.author\r\n\t\telif type(user)==\"str\":\r\n\t\t\tuser = self.bot.get_user(int(user))\r\n\r\n\t\t# Very very shit \r\n\t\t\"\"\"\r\n\t\tawait ctx.send(str(user.avatar_url))\r\n\t\trequest.urlretrieve(str(user.avatar_url), \"temp.webp\")\r\n\t\t#filename = wget.download(user.avatar_url, out=\"temp.webp\")\r\n\t\timage = Image.open(\"temp.webp\").convert(\"RGB\")\r\n\t\timage.save(\"temp.png\", \"PNG\")\r\n\t\t\r\n\t\tf = discord.File(\"temp.png\", filename=\"temp.png\")\r\n\t\t#await messagable.send(file=f, embed=e)\r\n\t\t\"\"\"\r\n\t\toutput = \"\"\r\n\t\tfor i in user.roles:\r\n\t\t\toutput += i.mention\r\n\r\n\r\n\t\tembed=discord.Embed(title=user.name, description=user.mention, color=user.color, timestamp=ctx.message.created_at)\r\n\t\t#embed.set_thumbnail(url=\"attachment://temp.webp\")\r\n\t\tembed.set_thumbnail(url=user.avatar_url)\r\n\t\tembed.set_image(url=\"attachment://temp.png\")\r\n\t\tembed.add_field(name=\"Nickname\", value=user.display_name, inline=False)\r\n\t\tembed.add_field(name=\"Joined on\", value=user.joined_at.date(), inline=True)\r\n\t\tembed.add_field(name=\"Status\", value=user.status, inline=True)\r\n\t\tembed.add_field(name=\"Created account on\", value=user.created_at.date(), inline=True)\r\n\t\tembed.add_field(name=\"Roles\", value=output, inline=True)\r\n\t\tembed.set_footer(text=f\"ID: {user.id}\")\r\n\t\tawait ctx.send(embed=embed)\r\n\t\t#os.remove(\"temp.webp\")\r\n\t\t#os.remove(\"temp.png\")\r\n\r\n\t@commands.command()\r\n\tasync def coop(self, ctx, user: discord.Member=None):\r\n\t\tif not user:\r\n\t\t\tuser = ctx.message.author\r\n\t\telif type(user)==\"str\":\r\n\t\t\tuser = self.bot.get_user(int(user))\r\n\r\n\t\tcoop_role = ctx.guild.get_role(694261282861219952)\r\n\r\n\t\tif coop_role in user.roles:\r\n\t\t\tawait user.remove_roles(coop_role)\r\n\t\t\tawait ctx.send('You have left coop gang')\r\n\t\telse:\r\n\t\t\tawait user.add_roles(coop_role)\r\n\t\t\tawait ctx.send(\"You are now in the coop gang\")\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(General(bot))\r\n","sub_path":"cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"258588073","text":"with open('../../../Downloads/rosalind_dna_1_dataset.txt') as x:\n\t\ta = 0\n\t\tc = 0\n\t\tg = 0\n\t\tt = 0\n\t\t#Basic loop and count. For every matched string adding 1 to the corrosponding variable.\n\t\tfor i in x:\n\t\t\tif i == 'A':\n\t\t\t\ta += 1\n\t\t\telif i == 'C':\n\t\t\t\tc += 1\n\t\t\telif i == 'G':\n\t\t\t\tg += 1\n\t\t\telif i == 'T':\n\t\t\t\tt += 1\n\t\t\t\t\n\nprint(a,c,g,t, end='')\n","sub_path":"stronghold/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"291650140","text":"import gym\nimport gym_hyrja\nimport time\nimport numpy as np\nimport tensorflow as tf\n\ndef set_move(val):\n if val==1:\n return 1\n else:\n return -1\n#\n# env\ntf.reset_default_graph()\nenv = gym.make(\"Hyrja-v0\")\nobs = env.reset()\n# param\ninputs1 = tf.placeholder(shape=[1,187],dtype=tf.float32) # tensor input\nW1 = tf.Variable(tf.random_uniform([187,80],-0.1,0.1))\nW2 = tf.Variable(tf.random_uniform([80,50],-0.1,0.1))\nW3 = tf.Variable(tf.random_uniform([50,32768],-0.1,0.1))\nlayer1_out=tf.matmul(inputs1,W1)\nlayer2_out=tf.matmul(layer1_out,W2)\nQout=tf.matmul(layer2_out,W3)\n# action to max Q(s,a)\npredict = tf.argmax(Qout,1) # max Q_network for next action | argmax(input,axis=None,name=None,dimension=None) 0 for\n# Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.\nnextQ = tf.placeholder(shape=[1,32768],dtype=tf.float32) # next Q value for 4 actions\nloss = tf.reduce_sum(tf.square(nextQ - Qout)) # summation of squared loss\ntrainer = tf.train.GradientDescentOptimizer(learning_rate=1) # set optimization method\nupdateModel = trainer.minimize(loss) # set objective function\n\ny=0.99 # discount rate\n#\nexploration_rate_const=0.05\naction_map={0:[-1,-1,-1],\n 1:[-1,-1,0],\n 2: [-1, -1, 1],\n 3: [-1, 0, -1],\n 4: [-1, 0, 0],\n 5: [-1, 0, 1],\n 6: [-1, 1, -1],\n 7: [-1, 1, 0],\n 8: [-1, 1, 1],\n 9:[0,-1,-1],\n 10:[0,-1,0],\n 11: [0, -1, 1],\n 12: [0, 0, -1],\n 13: [0, 0, 0],\n 14: [0, 0, 1],\n 15: [0, 1, -1],\n 16: [0, 1, 0],\n 17: [0, 1, 1],\n 18:[1,-1,-1],\n 19:[1,-1,0],\n 20: [1, -1, 1],\n 21: [1, 0, -1],\n 22: [1, 0, 0],\n 23: [1, 0, 1],\n 24: [1, 1, -1],\n 25: [1, 1, 0],\n 26: [1, 1, 1]\n }\naction_map_flex_dir = {}\ncounter = 0\n# action mapping\n# player 1\nfor i1 in range(2): # move or not\n for k1 in range(2): # x axis\n for l1 in range(2): # y axis:\n # player 2\n for i2 in range(2): # move or not\n for k2 in range(2): # x axis\n for l2 in range(2): # y axis:\n # player 3\n for i3 in range(2): # move or not\n for k3 in range(2): # x axis\n for l3 in range(2): # y axis:\n # player 4\n for i4 in range(2): # move or not\n for k4 in range(2): # x axis\n for l4 in range(2): # y axis:\n # player 5\n for i5 in range(2): # move or not\n for k5 in range(2): # x axis\n for l5 in range(2): # y axis:\n action_map_flex_dir[counter] = [i1, set_move(k1), set_move(l1),\n i2, set_move(k2), set_move(l2),\n i3, set_move(k3), set_move(l3),\n i4, set_move(k4), set_move(l4),\n i5, set_move(k5), set_move(l5)]\n counter += 1\n#\nwith tf.Session() as sess:\n # initialize weights\n sess.run(tf.global_variables_initializer())\n print(\"variable initalized, env reset, start first trial\")\n prev_ = 0\n iter=0\n for _ in range(15000000):\n env.render()\n obs=[]\n for k1 in env.state:\n for k2 in k1:\n obs.append(k2)\n #print(\"start calculate Q values: \")\n obs=np.reshape(obs,(-1,187))\n action_index, allQ = sess.run([predict, Qout], feed_dict={inputs1:obs}) # var:new_data for feeding\n #print(\"Q values calculated, mapping action\")\n action = action_map_flex_dir[action_index[0]]\n if np.random.rand(1) < exploration_rate_const*(1-(np.log10(_+1)/7.17)): # random action\n action = env.action_space.sample()\n #print(\"action: \", action)\n new_obs=[]\n new_obs_raw, reward, done, info = env.step(np.array(action))\n for k1 in new_obs_raw:\n for k2 in k1:\n new_obs.append(k2)\n new_obs = np.reshape(new_obs, (-1, 187))\n # Obtain the Q' values by feeding the new state through our network\n Q_for_next_step = sess.run(Qout, feed_dict={inputs1:new_obs})\n # O btain maxQ' and set our target value for chosen action.\n max_Q_for_next_step = np.max(Q_for_next_step)\n targetQ = allQ\n targetQ[0, action_index[0]] = reward + y * max_Q_for_next_step\n # Train our network using target and predicted Q values\n sess.run(updateModel, feed_dict={inputs1: obs , nextQ: targetQ})\n obs = new_obs\n # results\n if done:\n # report\n print(\"iter: \",iter,\", \", obs[-1][-1]*100,\"% BOSS remaining HP\",\" steps: \",_-prev_)\n obs=env.reset()\n iter+=1\n prev_=_\n # env.close()\n\n","sub_path":"gym_hyrja/run/random_action.py","file_name":"random_action.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"238126541","text":"from django.db import models\nfrom offers.models import Offers\nfrom core.models import *\nimport datetime\nfrom django.utils import timezone\n\n# Create your models here.\nclass Cupons(models.Model): # Модель для хранения купонов\n\ttitle = models.CharField(max_length = 256, verbose_name = 'Название', blank=True)\n\n\tlink = models.CharField(max_length = 1000, verbose_name = 'Ссылка', blank=True)\n\toffer = models.ForeignKey(Offers, verbose_name = 'Офер', related_name = 'cupons')\n\tdescription = models.TextField(max_length = 5000, blank=True, verbose_name = 'Описание')\n\tmetadescription = models.TextField(max_length = 5000, blank=True, verbose_name = 'Описание')\n\tdiscount = models.CharField(max_length = 256, verbose_name = 'Скидка', blank=True)\n\tcode = models.CharField(max_length = 256, verbose_name = 'Промокод', blank=True)\n\tidbroker = models.CharField(max_length = 256, verbose_name = 'id у брокера', blank=True)\n\tdate = models.DateTimeField(auto_now_add=True)\n\tdate_end = models.DateTimeField(blank=True, null=True, verbose_name = 'Дата окончания')\n\tactive = models.BooleanField(default=True)\n\tclicks = models.IntegerField(default = 0)\n\tman = models.BooleanField(default=False)\n\tuser = models.BooleanField(default=False)\n\thide = models.BooleanField(default=False)\n\tpath = models.CharField('Путь', max_length=500, blank=True)\n\n\tdef __str__(self):\n\t\treturn self.title\n\n\tclass Meta:\n\t\tverbose_name = 'Купон'\n\t\tverbose_name_plural = 'Купоны'\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.date_end:\n\t\t\tdate_end = self.date_end.strftime('%Y-%m-%d')\n\t\telse:\n\t\t\tdate_end = 'не указана'\n\t\tself.metadescription = 'Подробная информация о купоне %s от %s. Дата окончания купона %s.' % (self.title.strip(), self.offer.shop.title, date_end)\n\t\tsuper(Cupons, self).save(*args, **kwargs)\n\t\t\n\ndef add_cupons(mass, massid, broker):\n\tsumm = 0\n\tcupons = Cupons.objects.filter(offer__broker__id = broker).filter(active = True).filter(man = False).select_related('offer__status')\n\tfor cupon in cupons:\n\t\t# print(cupon.id)\n\n\t\tif cupon.idbroker not in massid or cupon.offer.status != 2:\n\t\t\tcupon.active = False\n\t\t\tcupon.save()\n\t\t\turl = Urls.objects.filter(urltype = 4).filter(link = cupon.id)\n\t\t\tif len(url) > 0:\n\t\t\t\turl = url[0]\n\t\t\t\turl.sitemap = False\n\t\t\t\turl.save()\n\t\ttry:\n\t\t\tif cupon.date_end < timezone.now():\n\t\t\t\tcupon.active = False\n\t\t\t\tcupon.save()\n\t\t\t\turl = Urls.objects.filter(urltype = 4).filter(link = cupon.id)\n\t\t\t\tif len(url) > 0:\n\t\t\t\t\turl = url[0]\n\t\t\t\t\turl.sitemap = False\n\t\t\t\t\turl.save()\n\t\texcept:\n\t\t\tif cupon.date_end < datetime.datetime.now():\n\t\t\t\tcupon.active = False\n\t\t\t\tcupon.save()\n\t\t\t\turl = Urls.objects.filter(urltype = 4).filter(link = cupon.id)\n\t\t\t\tif len(url) > 0:\n\t\t\t\t\turl = url[0]\n\t\t\t\t\turl.sitemap = False\n\t\t\t\t\turl.save()\n\t\n\n\tfor cup in mass:\n\t\t# try:\n\t\t\t# print(cup)\n\t\t\tcup['date_end'] = datetime.datetime.strptime(cup['date_end'], '%Y-%m-%d')\n\t\t\tcupons = Cupons.objects.filter(idbroker = cup['idbroker']).filter(offer__broker__id = broker)\n\t\t\tif cupons.count() == 0:\n\t\t\t\tcupons = Cupons.objects.filter(title = cup['title'].strip()).filter(date_end = cup['date_end'])\n\t\t\tif cupons.count() == 0:\n\t\t\t\toffer = Offers.objects.filter(broker__id = broker).filter(idbroker = cup['offer'])\n\t\t\t\tif offer.count() > 0:\n\t\t\t\t\toffer = offer[0]\n\t\t\t\t\tshop = offer.shop\n\t\t\t\t\tif cup['goto_link'] == '':\n\t\t\t\t\t\tcup['goto_link'] = offer.link\n\t\t\t\t\tif offer and offer.status == 2 and shop and shop.active:\n\t\t\t\t\t\tpath = shop.path\n\t\t\t\t\t\tn = 0\n\t\t\t\t\t\thumlinko = '%s%s' % (path, cup['humlink'][:98-len(path)])\n\t\t\t\t\t\tif humlinko[-1] == '-':\n\t\t\t\t\t\t\thumlink = '%s/' % humlinko[:-1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thumlink = '%s/' % humlinko\n\t\t\t\t\t\turlst = Urls.objects.filter(path = humlink)\n\t\t\t\t\t\twhile urlst.count() != 0:\n\t\t\t\t\t\t\thumlink = '%s-%s/' % (humlinko[:-1*len(str(n))], n)\n\t\t\t\t\t\t\turlst = Urls.objects.filter(path = humlink)\n\t\t\t\t\t\t\tn += 1\n\n\n\t\t\t\t\t\tcupon = Cupons(title = cup['title'].strip(), link = cup['goto_link'], offer = offer, description = cup['description'], discount = cup['discount'], code = cup['promocode'], date_end = cup['date_end'], idbroker = cup['idbroker'], path = humlink)\n\t\t\t\t\t\tcupon.save()\n\t\t\t\t\t\turl = Urls(title = cup['title'][:100], func = 'cupon', path = humlink, urltype = 4, link = cupon.id, template = 'cupon.html', active = True, sitemap = False, priority = 7)\n\t\t\t\t\t\turl.save()\n\t\t\t\t\t\tgroup = UrlsGroups.objects.get(pk = 6)\n\t\t\t\t\t\turl.groups.add(group)\n\t\t\t\t\t\turl.save()\n\t\t\t\t\t\tsumm += 1\n\t\t\t\t\t\tprint(humlink)\n\n\n\t\t\t# if len(cupons) == 1 and cup['offer'] == '3529':\n\t\t\t# \tprint(cup)\n\t\t\tif len(cupons) == 1:\n\t\t\t\tcupon = cupons[0]\n\t\t\t\tcupon.date_end = cup['date_end']\n\t\t\t\ttry:\n\t\t\t\t\tif cupon.date_end >= timezone.now():\n\t\t\t\t\t\tcupon.active = True\n\t\t\t\t\t\tcupon.save()\n\t\t\t\t\t\turl = Urls.objects.filter(urltype = 4).filter(link = cupon.id)\n\t\t\t\t\t\tif len(url) > 0:\n\t\t\t\t\t\t\turl = url[0]\n\t\t\t\t\t\t\t# url.sitemap = True\n\t\t\t\t\t\t\turl.sitemap = False\n\t\t\t\t\t\t\turl.active = True\n\t\t\t\t\t\t\turl.save()\n\t\t\t\texcept:\n\t\t\t\t\tif cupon.date_end >= datetime.datetime.now():\n\t\t\t\t\t\tcupon.active = True\n\t\t\t\t\t\tcupon.save()\n\t\t\t\t\t\turl = Urls.objects.filter(urltype = 4).filter(link = cupon.id)\n\t\t\t\t\t\tif len(url) > 0:\n\t\t\t\t\t\t\turl = url[0]\n\t\t\t\t\t\t\t# url.sitemap = True\n\t\t\t\t\t\t\turl.sitemap = False\n\t\t\t\t\t\t\turl.active = True\n\t\t\t\t\t\t\turl.save()\n\t\t# except:\n\t\t# \tprint cup\n\n\t\t\n\tprint(summ)\n\nclass ManCupons(models.Model):\n\tCTYPE = (\n\t\t(1, 'Промокод'),\n\t\t(2, 'Акция'),\n\t)\n\tlink = models.CharField(max_length = 1000, verbose_name = 'Ссылка', blank=True)\n\tname = models.CharField(max_length = 256, verbose_name = 'Имя', blank=True)\n\tcode = models.CharField(max_length = 256, verbose_name = 'Промокод', blank=True)\n\tctype = models.IntegerField(choices=CTYPE, default = 0, verbose_name = 'Тип')\n\tdescription = models.TextField(max_length = 1000, verbose_name = 'Описание', blank=True)\n\tdate_end = models.DateTimeField(blank=True, null=True)\n\tdate_add = models.DateTimeField(auto_now_add=True, null=True)\n\tiduser = models.IntegerField(default = 0, verbose_name = 'Пользователь')\n\toffer = models.ForeignKey(Offers, verbose_name = 'Офер', null=True)\n\tcheck = models.BooleanField(default=False)\n\tpay = models.BooleanField(default=False)\n\n\tdef print_actions(self):\n\t\tr = ''\n\t\tif not self.check and not self.pay:\n\t\t\treturn 'ОплатитьОтказать' % (self.id, self.id)\n\t\telse:\n\t\t\tif self.pay:\n\t\t\t\treturn '

оплачено

'\n\t\t\telif self.check:\n\t\t\t\treturn '

отлконено

'\n\t\t\telse:\n\t\t\t\treturn 'error'","sub_path":"cupons/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"233529986","text":"# dummy easy\nanswer = 0\nanswered = False\nfor a in range(1,999):\n for b in range(2, 999):\n c = (a*a + b*b)**(1/2)\n if c - int(c) == 0:\n if a + b + c == 1000:\n answer = a*b*c\n answered = True\n else:\n continue\n else:\n continue\n if answered == True:\n break\nprint(int(answer))\n","sub_path":"005 Percent Problems/009 - Special Pythagorean Triplet/Problem9.py","file_name":"Problem9.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"83839814","text":"import os\nimport shutil\nimport random\n\n# CSV_DIR_NO_TRANSPORT = 'C:\\\\Users\\\\ucfaalf\\\\Documents\\\\Projects\\\\AcousticAnalysis\\\\2013Random\\\\LabelsCSV\\\\'\n# CSV_DIR_TRANSPORT24kHz = 'C:\\\\Users\\\\ucfaalf\\\\Documents\\\\Projects\\\\AcousticAnalysis\\\\2013Random\\\\LabelsCSVTransport\\\\24000HzSR'\n# CSV_DIR_TRANSPORT41kHz = 'C:\\\\Users\\\\ucfaalf\\\\Documents\\\\Projects\\\\AcousticAnalysis\\\\2013Random\\\\LabelsCSVTransport\\\\41000HzSR'\nCSV_DIR = 'C:\\\\Users\\\\ucfaalf\\\\Dropbox\\\\EngD\\\\Data\\\\2013Random\\\\allLabelFiles\\\\'\nWAV_DIR = 'C:\\\\Users\\\\ucfaalf\\\\Documents\\\\Projects\\\\AcousticAnalysis\\\\2013Random\\\\Amalgamated_Files_24kHz\\\\'\n\n# csvFilesNoTransport = os.listdir(CSV_DIR_NO_TRANSPORT)\n# csvFilesTransport24kHz = os.listdir(CSV_DIR_TRANSPORT24kHz)\n# csvFilesTransport41kHz = os.listdir(CSV_DIR_TRANSPORT41kHz)\ncsvFiles = os.listdir(CSV_DIR)\nwavFiles = os.listdir(WAV_DIR)\n\ncsvFileList = []\nfor csvFile in csvFiles:\n csvBaseName = csvFile[:-14]\n csvFileList.append(csvBaseName)\n\n# for csvFile in csvFilesTransport24kHz:\n# csvBaseName = csvFile[:-14]\n# csvFileList.append(csvBaseName) \n\n# for csvFile in csvFilesTransport41kHz:\n# csvBaseName = csvFile[:-14]\n# csvFileList.append(csvBaseName)\n\n# csvFileList = list(set(csvFileList)) \n \nwavFileList = []\nfor wavFile in wavFiles:\n wavBaseName = wavFile[:-4]\n wavFileList.append(wavBaseName)\n\nwavWOcsv = []\nfor wav in wavFileList:\n if wav not in csvFileList:\n wavWOcsv.append(wav)\n\n# verityLabels = []\n# for wavFile in wavWOcsv:\n# if wavFile[:3] in ('BR4', 'BR2', 'IG6', 'E29', 'TW7', 'BR6', 'DA5', 'RM4', 'CM1'):\n# verityLabels.append(wavFile)\n\n# print verityLabels\n\n# dirExtention = \"C:\\\\Users\\\\ucfaalf\\\\Documents\\\\Projects\\\\AcousticAnalysis\\\\2013Random\\\\Amalgamated_Files_24kHz\\\\\"\n# fileList = os.listdir(WAV_DIR)\n\n# wav2FileList = []\n# for wav2 in fileList:\n# \twav2BaseName = wav2[:-4]\n# \twav2FileList.append(wav2BaseName)\n\n# fileListBlankFiles = [x for x in wavFileList if x in verityLabels]\n\n# print len(fileListBlankFiles)\n\n# randomNumbers = random.sample(xrange(len(fileListBlankFiles)), 2)\n# fileSelection = [fileListBlankFiles[i] for i in randomNumbers]\n\n# print fileSelection\n\n# for i in fileSelection:\n# shutil.copy(WAV_DIR + i + '.wav', \"C:\\\\Users\\\\ucfaalf\\\\Dropbox\\\\EngD\\\\Projects\\\\Chapter 3\\\\goldenTestSet\\\\21VerityFiles\")\n\nprint(wavWOcsv)","sub_path":"findBlankSoundFiles.py","file_name":"findBlankSoundFiles.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"414951853","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\nimport os\n\ndef find_recipes():\n site = 'https://akispetretzikis.com'\n\n # create new directory for the results\n parent_dir = os.path.dirname(os.path.abspath(\"top_level_file.txt\"))\n # Directory\n directory = \"Matched Recipes\"\n\n # Path\n path = os.path.join(parent_dir, directory)\n os.mkdir(path)\n\n print(\"Search the website: akispetretzikis.com \")\n print(\"Insert up to 3 ingredients to filter all the recipes\")\n\n\n first_ing=''\n second_ing=''\n while(first_ing == ''):\n first_ing= input(\"Give the main ingredient\\n>\")\n while( second_ing == ''):\n second_ing = input('Give a second ingredient\\n>')\n third_ing = input('Give a third ingredient if you want\\n>')\n\n\n search = '&search=' + first_ing\n page_counter = 1\n\n\n\n url='https://akispetretzikis.com/en/search?from=admin' + '&page=' +str(page_counter)+ search+'&utf8=%E2%9C%93'\n html_text=requests.get(url).text\n soup = BeautifulSoup(html_text,'lxml')\n show_more_button=soup.find(id='next_page_link')\n\n\n\n\n if(show_more_button is None):\n print('NO RESULTS FOR THE MAIN INGREDIENT')\n\n while (show_more_button is not None):\n print(\"searching...\")\n texts = soup.find_all('div', class_='texts')\n for i in texts:\n\n recipe_url = site + i.find('a')['href']\n\n recipe_html_text = requests.get(recipe_url).text\n recipe_soup = BeautifulSoup(recipe_html_text, 'lxml')\n\n ingredients = recipe_soup.find('div', class_='text ingredients-list')\n # check if other ingrediens are in the same recipe\n if (((second_ing and third_ing) in str(ingredients)) is True):\n\n # Recipe url\n\n # for the Recipe Name\n recipe_name = i.find('h4').text\n\n # Time till we feast\n hands_on_time = recipe_soup.find('ul', class_='new-times').find('h5').text\n\n # print ingredients\n ing_li = ingredients.find_all(['li', 'p'])\n\n\n # for the recipe method\n method_box = recipe_soup.find('div', class_='method')\n method_list = method_box.find_all('li')\n\n\n\n # select directory\n\n\n\n with open(f'Matched Recipes/{recipe_name}.txt', 'w',encoding='utf-8') as f:\n f.write('Recipe link\\t'+recipe_url+'\\n')\n f.write(recipe_name+'\\n')\n f.write('Hands on Time:\\t'+hands_on_time+'\\n')\n f.write('Ingredients\\n')\n for i in ing_li:\n f.write(i.text+'\\n')\n #f.write('\\n')\n f.write('Ingredients\\n')\n for m in method_list:\n f.write(m.text+'\\n')\n\n\n else:\n continue\n\n\n\n\n\n\n #increments for while\n page_counter = page_counter + 1\n url = 'https://akispetretzikis.com/en/search?from=admin' + '&page=' + str(page_counter) + search + '&utf8=%E2%9C%93'\n html_text = requests.get(url).text\n soup = BeautifulSoup(html_text, 'lxml')\n show_more_button = soup.find(id='next_page_link')\n\n\n\nif __name__ == '__main__':\n\n\n\n find_recipes()\n print(\"Search completed\\nOpen Matched Recipes directory to view them\")\n print(\"Goodbye!\")\n","sub_path":"recipe_scrapper.py","file_name":"recipe_scrapper.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"575921333","text":"import os\nimport datetime\nimport sqlite3\n\n\nfrom sqlite3 import Error\nfrom .get_json import get_json_data\nfrom .settings import BASE_DIR\n\n\ndef decdeg2dms(dd):\n negative = dd < 0\n dd = abs(dd)\n minutes, seconds = divmod(dd*3600, 60)\n degrees, minutes = divmod(minutes, 60)\n if negative:\n if degrees > 0:\n degrees = -degrees\n elif minutes > 0:\n minutes = -minutes\n else:\n seconds = -seconds\n return (degrees, minutes, seconds)\n\n\ndef create_db_connection():\n conn = None\n\n try:\n conn = sqlite3.connect(os.path.join(BASE_DIR, 'db.sqlite3'))\n except Error as e:\n print(e)\n\n return conn\n\n\ndef insert_eathquake(sql_object):\n conn = create_db_connection()\n\n sql_string = '''INSERT INTO quake_eathquake(session_id,src,id_eathquake,version\n ,eathquake_time,lat,lng,mag,depth,nst,region,data_source,create_date,url,lat_deg,lng_deg)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''\n\n try:\n with conn:\n sql_cursor = conn.cursor()\n sql_cursor.execute(sql_string, sql_object)\n except Error as e:\n print(e)\n\n # return sql_cursor.lastrowid\n\n\ndef create_row(feature, session_id):\n\n properties = feature['properties']\n geometry = feature['geometry']\n coordinates = geometry['coordinates']\n\n local_date_time = datetime.datetime.strptime(\n '1970-01-01' + ' 0:0:0.0', \"%Y-%m-%d %H:%M:%S.%f\")\n local_date_time = local_date_time + \\\n datetime.timedelta(milliseconds=properties['time'])\n\n lat_deg = decdeg2dms(coordinates[0])\n lng_deg = decdeg2dms(coordinates[1])\n\n sql_object = (session_id,\n properties['sources'],\n feature['id'],\n '1',\n local_date_time,\n coordinates[0],\n coordinates[1],\n properties['mag'],\n coordinates[2],\n properties['nst'],\n properties['place'],\n 'usgs-gov',\n datetime.datetime.now(),\n properties['url'],\n str(lat_deg[0]).replace('.0','') + \"°\" + str(lat_deg[1]).replace('.0','') +\n \"'\" + str(round(lat_deg[2], 0)).replace('.0','') + \"''\",\n str(lng_deg[0]).replace('.0','') + \"°\" + str(lng_deg[1]).replace('.0','') +\n \"'\" + str(round(lng_deg[2], 0)).replace('.0','') + \"''\",\n )\n\n row_id = insert_eathquake(sql_object)\n","sub_path":"quake/quake/sql_db.py","file_name":"sql_db.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"621355142","text":"import sys\nimport unicodedata\n\n\ndef buscar(*palavras_chave):\n \"\"\" Busca por caracteres que contenham a palavra chave em seu nome.\n Ex:\n >>> from exercicios.buscador import buscar\n >>> for caracter, nome in sorted(buscar('BLACK', 'suit')):\n ... print(caracter, nome)\n ...\n ♠ BLACK SPADE SUIT\n ♣ BLACK CLUB SUIT\n ♥ BLACK HEART SUIT\n ♦ BLACK DIAMOND SUIT\n >>> for caracter, nome in sorted(buscar('suit')):\n ... print(caracter, nome)\n ...\n ♠ BLACK SPADE SUIT\n ♡ WHITE HEART SUIT\n ♢ WHITE DIAMOND SUIT\n ♣ BLACK CLUB SUIT\n ♤ WHITE SPADE SUIT\n ♥ BLACK HEART SUIT\n ♦ BLACK DIAMOND SUIT\n ♧ WHITE CLUB SUIT\n 🕴 MAN IN BUSINESS SUIT LEVITATING\n >>> dict(buscar('BlAcK', 'suit', 'ClUb'))\n {'♣': 'BLACK CLUB SUIT'}\n >>> for caracter, nome in sorted(buscar('chess', 'king')):\n ... print(caracter, nome)\n ...\n ♔ WHITE CHESS KING\n ♚ BLACK CHESS KING\n :param palavras_chave: tupla de strings\n :return: generator onde cada elemento é uma tupla. O primeiro elemento da\n tupla é o caracter e o segundo é seu nome. Assim ele pode ser utilizado no\n construtor de um dicionário\n \"\"\"\n limite = 0\n max_unicode_value = sys.maxunicode\n palavras_upper = [palavra.upper() for palavra in palavras_chave]\n while limite < max_unicode_value:\n caracter = chr(limite)\n try:\n unicode_name_upper = unicodedata.name(caracter).upper().split()\n except ValueError:\n pass\n else:\n if all(\n palavra in unicode_name_upper\n for palavra in palavras_upper\n ):\n yield (caracter, \" \".join(unicode_name_upper))\n finally:\n limite += 1\n\n\nif __name__ == '__main__':\n print(dict(buscar('white', 'suit')))\n","sub_path":"exercicios/buscador.py","file_name":"buscador.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"278392700","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport os,argparse\nimport glob\n\ndef read_star_by_line(STARFILE,MRCFILE,OFILE):\n this_file = open(STARFILE,'r')\n these_lines=this_file.readlines()\n for line in these_lines:\n #For lines that don't begin with the \"new line\" character\n if len(line) > 1:\n this_line = line.split()\n if not this_line[0][0].isnumeric():\n continue\n else:\n this_x = this_line[0]\n this_y = this_line[1]\n ofile.write(this_x+'\\t'+this_y+'\\t')\n ofile.write(MRCFILE+'\\n')\n #If line begins with \"newline\" character \n else:\n continue\n\n\n#Command Line Options\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-sf\",help='Full path to the folder containing the .star files that will be imported to cryosparc',required=True)\nparser.add_argument(\"-mf\",help='Full path to folder containing micrographs to which the .star files should be linked',required=True)\nparser.add_argument(\"-o\",help='Path & name for the output (combined) .star file for importing to cryosparc')\nargs = parser.parse_args()\n\n#Get list of star files before we generate the new star file\nstar_list = glob.glob(os.path.join(args.sf,\"*.star\"))\n\n#Write header information for concatenated .star file\nofile = open(args.o,'w')\nofile.write(\"\\ndata_\\n\\nloop_\\n_rlnCoordinateX #1\\n_rlnCoordinateY #2\\n_rlnMicrographName #3\\n\")\n\n#Add an MRC file to each star's particle line\nfor STARFILE in star_list:\n STARBASE = os.path.split(STARFILE)[1]\n MRCBASE = STARBASE[:-5] +\".mrc\"\n MRCFULL = os.path.join(args.mf,MRCBASE)\n read_star_by_line(STARFILE,MRCFULL,ofile)\n\n#Close and write the new .star file\nofile.close()\n","sub_path":"cryolo_to_cryosparc/convert_stars_to_csparc.py","file_name":"convert_stars_to_csparc.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146124252","text":"# -*- coding:utf-8 -*-\n\nimport re\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom bs4 import BeautifulSoup\nimport nltk.data\n\ntrain = pd.read_csv(\"./IMDB/labeledTrainData.tsv\", delimiter = \"\\t\")\ntest = pd.read_csv(\"./IMDB/testData.tsv\", delimiter = \"\\t\")\n# print(train.head())\n# print(test.head())\n\nunlabeled_train = pd.read_csv(\"./IMDB/unlabeledTrainData.tsv\",\n\tdelimiter = \"\\t\", quoting = 3)\n\ntokenizer = nltk.data.load(\"tokenizers/punkt/english.pickle\")\n\ndef review_to_text(review, remove_stopwords):\n\t\"\"\"\n\treview: type str\n\tremove_stopwords: type boolean\n\t\"\"\"\n\traw_text = BeautifulSoup(review, \"html\").get_text()\n\tletters = re.sub(\"[^a-zA-Z]\", \" \", raw_text)\n\twords = letters.lower().split()\n\tif remove_stopwords:\n\t\tstop_words = set(stopwords.words(\"english\"))\n\t\twords = [w for w in words if w not in stop_words]\n\treturn words\n\ndef review_to_sentences(review, tokenizer):\n\traw_sentences = tokenizer.tokenize(review.strip())\n\tsentences = []\n\tfor raw_sentence in raw_sentences:\n\t\tif len(raw_sentence) > 0:\n\t\t\tsentences.append(review_to_text(raw_sentence, False))\n\treturn sentences\n\ncorpora = []\nfor review in unlabeled_train[\"review\"]:\n\tcorpora += review_to_sentences(review.decode(\"utf-8\"), tokenizer)\n\nnum_features = 300 # word vectors dimension\nmin_word_count = 20\nnum_workers = 2\ncontext = 10\ndownsampling = 1e-3\n\nfrom gensim.models import word2vec\nprint(\"Training word2vec model...\")\n\nmodel = word2vec.Word2Vec(corpora, workers = num_workers,\n\tsize = num_features, min_count = min_word_count,\n\twindow = context, sample = downsampling)\n\nmodel.init_sims(replace = True)\n\nmodel.save(\"./IMDB/model.sav\")\n\nfrom gensim.models import Word2Vec\nmodel = Word2Vec.load(\"./IMDB/model.sav\")\nmodel.most_similar(\"man\")","sub_path":"kaggle_competitions/IMDB_egs2.py","file_name":"IMDB_egs2.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"537598861","text":"'''\nGiven an unsorted array of integers, find the length of the longest consecutive\nelements sequence.\n\nFor example, given [100,4,200,1,3,2], the longest consecutive elements sequence\nshould be [1,2,3,4]. It's length is 4\n\nYour algorithm shouuld run in O(n) complexity\n'''\n\n\n'''\nApproach #1 Brute Force [Time Limit Exceeded]\nIntuition\n\nBecause a sequence could start at any number in `nums`, we can exhaust the entire search \nspace by building as long a sequence as possible from every number.\n\nAlgorithm\n\nThe brute force algorithm does not do anything clever - it just considers each number \nin nums, attempting to count as high as possible from that number using \nonly numbers in nums. After it counts too high (i.e. currentNum refers to a number that\nnums does not contain), it records the length of the sequence if it is larger than the\ncurrent best. The algorithm is necessarily optimal because it explores every possibility.\n\nComplexity Anaylsis:\n\nTime complexity O(n^3):\n The outer loop runs exactly n times, and because currentNum increments by 1 during\n each iteration of the `while` loop, it runs in O(n) time. Then, on each iteration of the\n `while` loop, an O(n) lookup in the array is performed. Therefore, this brute force\n algorithm is really three nested O(n) loops, which compound multiplicatively to a cubic\n runtime.\n'''\n\nclass Solution:\n\n def longest_consecutive(self, nums):\n longest_streak = 0\n\n for num in nums:\n current_num = num\n current_streak = 1\n\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n\n longest_streak = max(longest_streak, current_streak)\n\n return longest_streak\n\n '''\n Approach # Sorting [Accepted]\n Intuition\n\n If we can iterate over the numbers in ascending order, then it will be easy to find sequences\n of consecutive numbers. To do so, we can sort the array.\n\n Algorithm\n\n Before we do anything, we check for the base case input of the empty array. The longest \n sequence in an empty array is, of course, 0, so we can simply return that. For \n all other cases, we sort nums and consider each number after the first (because\n we need to compare each number to its previous number). If the current number and \n the previous are equal, then our current sequence is neither extended nor broken, so\n we simply move on to the next number. If they are unequal, then we must check whether \n the current number extends the sequence (i.e. nums[i] == nums[i-1] + 1). If it does,\n then we add to our current count and continue. Otherwise, the sequence is broken, \n so we record our current sequence and reset it to 1 (to include the number that broke \n the sequence). It is possible that the last element of nums is part of the longest sequence,\n so we return the maximum of the current sequence and the longest one.\n\n Sorting Example\n\n [9,1,4,7,3,-1,0,5,8,-1,6] \n |\n \\/\n\n [-1,-1,0,1,3,4,5,6,7,8,9]\n\n Here, an example array is sorted before the linear scan identifies all consecutive sequences.\n The longest sequence is colored in red.\n\n Complexity Analysis:\n Time complexity O(nlgn):\n The main `for` loop does constant work n times, so the algorithm's time complexity is \n dominated by the invocation of sort, which will run O(nlgn) time\n Space complexity O(1) (or O(n))\n For the implementations provided here, the space complexity is constant beacse we sort\n the input array in place. If we are not allowed to modify the input array, we must\n spend liear space to store a sorted copy.\n '''\n\nclass Solution:\n\n\n def longest_consecutive_2(self, nums):\n if not nums:\n return 0\n\n nums.sort()\n\n longest_streak = 1\n current_streak = 1\n\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] == nums[i - 1] + 1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\n\n return max(longest_streak, current_streak)\n\n'''\nApproach #3 HashSet and intelligent sequence building [accepted]\n\nIntuition:\n It turns out that our initial brute force solution was on the right track, but missing\n a few optimizations necessary to reach O(n) time complexity.\n\nAlgorithm:\n The optimised algorithm contains only two changes from the brute forec approach: the \n numbers are stored in a `HashSet` (or `Set` in Python) to allow O(1) lookups, and \n we only attempt to build sequences from numbers that are not already part of a longer\n sequence. This is accomplished by first ensuring that the number that would immediately \n precede the current number in a sequence is not present, as that number would\n necessarily be part of a longer sequence.\n'''\n\nclass Solution:\n\n\n def longest_consecutive_3(self, nums):\n longest_streak = 0\n num_set = set(nums)\n\n for num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n\n longest_streak = max(longest_streak, current_streak)\n\n return longest_streak\n","sub_path":"gs/longest_consecutive_sequence.py","file_name":"longest_consecutive_sequence.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"520312571","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process('RECODQM')\n\n# minimum of logs\nMessageLogger = cms.Service(\"MessageLogger\",\n statistics = cms.untracked.vstring(),\n destinations = cms.untracked.vstring('cerr'),\n cerr = cms.untracked.PSet(\n threshold = cms.untracked.string('WARNING')\n )\n)\n\n# import of standard configurations\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('Configuration.StandardSequences.EDMtoMEAtRunEnd_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\n# global tag\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:mc', '') #for MC\n\n# load DQM frame work\nprocess.load(\"DQMServices.Core.DQM_cfg\")\nprocess.load(\"DQMServices.Components.DQMEnvironment_cfi\")\n\n# raw data source\n#process.source = cms.Source(\"PoolSource\",\n# fileNames = cms.untracked.vstring('file:/afs/cern.ch/user/j/jkaspar/public/run268608_ls0001_streamA_StorageManager.root')\n#)\n\nprocess.load('TotemRawData.Readers.TotemStandaloneRawDataSource_cfi')\nprocess.source.verbosity = 10\nprocess.source.printProgressFrequency = 0\nprocess.source.fileNames.append('/afs/cern.ch/user/j/jkaspar/public/run_9987_EVB11_1.003.srs')\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(100)\n)\n\n# raw-to-digi conversion\nprocess.load('CondFormats.TotemReadoutObjects.TotemDAQMappingESSourceXML_cfi')\nprocess.TotemDAQMappingESSourceXML.mappingFileNames.append(\"CondFormats/TotemReadoutObjects/xml/totem_rp_210far_220_mapping.xml\")\n\n# process.load('EventFilter.TotemRawToDigi.TotemRPRawToDigi_cfi')\n# process.totemRPRawToDigi.rawDataTag = cms.InputTag(\"rawDataCollector\")\n# process.totemRPRawToDigi.fedIds = cms.vuint32(577, 578, 579, 580)\n# process.totemRPRawToDigi.RawToDigi.printErrorSummary = 0\n# process.totemRPRawToDigi.RawToDigi.printUnknownFrameSummary = 0\n\nprocess.load(\"EventFilter.TotemRawToDigi.totemTriggerRawToDigi_cfi\")\nprocess.totemTriggerRawToDigi.rawDataTag = cms.InputTag(\"source\")\nprocess.totemTriggerRawToDigi.fedId = 0x29c\n\nprocess.load('EventFilter.TotemRawToDigi.totemRPRawToDigi_cfi')\nprocess.totemRPRawToDigi.rawDataTag = cms.InputTag(\"source\")\nprocess.totemRPRawToDigi.fedIds = cms.vuint32(0x1a1, 0x1a2, 0x1a9, 0x1aa, 0x1b5, 0x1bd)\n\n# RP geometry\nprocess.load(\"Geometry.VeryForwardGeometry.geometryRP_cfi\")\nprocess.XMLIdealGeometryESSource.geomXMLFiles.append(\"Geometry/VeryForwardData/data/RP_Garage/RP_Dist_Beam_Cent.xml\")\n\n# local RP reconstruction chain with standard settings\nprocess.load(\"RecoCTPPS.TotemRPLocal.totemRPLocalReconstruction_cff\")\n\n# TOTEM DQM modules\nprocess.load(\"DQM.Totem.totemDAQTriggerDQMSource_cfi\")\nprocess.load(\"DQM.Totem.totemRPDQMSource_cfi\")\n\n# DQM output\nprocess.DQMOutput = cms.OutputModule(\"DQMRootOutputModule\",\n fileName = cms.untracked.string(\"OUT_step1.root\")\n)\n\n# execution schedule\nprocess.reco_step = cms.Path(\n process.totemTriggerRawToDigi *\n process.totemRPRawToDigi *\n process.totemRPLocalReconstruction\n)\n\nprocess.dqm_produce_step = cms.Path(\n process.totemDAQTriggerDQMSource *\n process.totemRPDQMSource\n)\n\nprocess.dqm_output_step = cms.EndPath(\n process.DQMOutput\n)\n\nprocess.schedule = cms.Schedule(\n process.reco_step,\n process.dqm_produce_step,\n process.dqm_output_step\n)\n","sub_path":"DQM/Totem/test/step1_cfg.py","file_name":"step1_cfg.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"621117638","text":"#!/usr/bin/env python3\n\nimport os\nimport glob\n\nimport pandas as pd\n\n# Returns a tuple.\n# Example: '/Users/john/GitProjects/Python-Baseball/stats', 'data.py'\npath, file_name = os.path.split(os.path.abspath(__file__))\ngame_files = glob.glob(os.path.join(os.path.dirname(__file__), '..', 'games', '*.EVE'))\ngame_files.sort()\n\ngame_frames = []\n# Append game frames\nfor game_file in game_files:\n game_frame = pd.read_csv(game_file, names=['type', 'multi2', 'multi3', 'multi4', 'multi5', 'multi6', 'event'])\n game_frames.append(game_frame)\n\n# Concatenate DataFrames\ngames = pd.concat(game_frames)\n\n# Clean values\ngames.loc[games['multi5'] == '??', ['multi5']] = ''\n\n# Extract identifiers\nidentifiers = games['multi2'].str.extract(r'(.LS(\\d{4})\\d{5})')\n# Forward fill identifiers\nidentifiers = identifiers.fillna(method='ffill')\nidentifiers.columns = ['game_id', 'year']\n\n# Concatenate identifier columns\ngames = pd.concat([games, identifiers], sort=False, axis=1)\n# Fill NaN (Not a Number) values\ngames = games.fillna(' ')\ngames.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])\n\n# Print DataFrame\nprint(games.head())\n","sub_path":"stats/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"570648533","text":"# coding: utf-8\n\nimport os, sys, glob\nfrom glob import glob\n\nmdlist = glob('*.md')\ntype(mdlist)\n\n\nfor file in mdlist:\n\n fp = open(file, 'r', encoding='utf-8')\n lines = []\n for line in fp: # 内置的迭代器, 效率很高\n lines.append(line.rstrip())\n fp.close()\n\n lines.pop(3)\n lines.insert(1, '')\n\n s = '\\n'.join(lines)\n\n fp = open(file, 'w', encoding='utf-8')\n fp.write(s)\n fp.close()\n","sub_path":"TOOL_mdAddToc/mdAddToc3.py","file_name":"mdAddToc3.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"283650627","text":"# Copyright 2018 ICON Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"helper class for TxItem\"\"\"\n\nimport json\nimport sys\n\nfrom loopchain.blockchain.transactions import Transaction, TransactionVersioner, TransactionSerializer\nfrom loopchain.protos import loopchain_pb2\n\n\nclass TxItem:\n tx_serializers = {}\n\n def __init__(self, tx_json: str, channel: str):\n self.channel = channel\n self.__tx_json = tx_json\n self.__len = sys.getsizeof(tx_json) + sys.getsizeof(channel)\n\n def __len__(self):\n return self.__len\n\n def get_tx_message(self):\n message = loopchain_pb2.TxSend(\n tx_json=self.__tx_json,\n channel=self.channel)\n return message\n\n @classmethod\n def create_tx_item(cls, tx_param: tuple, channel: str):\n tx, tx_versioner = tx_param\n tx_serializer = cls.get_serializer(tx, tx_versioner)\n tx_item = TxItem(\n json.dumps(tx_serializer.to_raw_data(tx)),\n channel\n )\n return tx_item\n\n @classmethod\n def get_serializer(cls, tx: Transaction, tx_versioner: TransactionVersioner):\n if tx.version not in cls.tx_serializers:\n cls.tx_serializers[tx.version] = TransactionSerializer.new(tx.version, tx.type(), tx_versioner)\n return cls.tx_serializers[tx.version]\n","sub_path":"loopchain/baseservice/tx_item_helper.py","file_name":"tx_item_helper.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"596498614","text":"import numpy as np\nimport pandas as pd\n# import model\nimport model\nimport itertools\nimport multiprocessing as mp\nimport pickle\n\n# Prepare network and data\nprep_data = model.data_and_network_prep()\n\n# Create data for the fit\ndata_for_fit_i = model.create_data_for_fit_influenza()\n\ndata_for_fit_v = model.create_data_for_fit(prep_data)\n\n\n# Get parameters - vaccination model\nwith open('../../Data/vaccination_model/grid_search_5_res.pickle', 'rb') as pickle_in:\n grid_search_res_1 = pickle.load(pickle_in)\n\nwith open('../../Data/vaccination_model/grid_search_6_res.pickle', 'rb') as pickle_in:\n grid_search_res_2 = pickle.load(pickle_in)\n\nwith open('../../Data/vaccination_model/grid_search_7_res.pickle', 'rb') as pickle_in:\n grid_search_res_3 = pickle.load(pickle_in)\n\ngrid_search_res_v = grid_search_res_1 + grid_search_res_2 + grid_search_res_3\n\n# Max likelihood subdist\nliklihood_subdist_v = max(grid_search_res_v, key=lambda x: x['log_likelihood_subdist'])\nliklihood_subdist_v['parameters']['beta'] = liklihood_subdist_v['parameters']['beta_2']\n\n# Get parameters - influenza model\n# with open('L:/Dor/data/coupled_model/grid_search_2016_1_res.pickle', 'rb') as pickle_in:\n# grid_search_res_i = pickle.load(pickle_in)\n\n# # Max likelihood subdist\n# liklihood_subdist_i = max(grid_search_res_i, key=lambda x: x['log_likelihood_subdist'])\n\n# Load parameters_i all seasons\nwith open('../../data/coupled_model/parameters_i_all_seasons.pickle', 'rb') as pickle_in:\n parameters_i = pickle.load(pickle_in)\n\n# Set parameters\nparameters_v = liklihood_subdist_v['parameters']\n# parameters_i = liklihood_subdist_i['parameters']\n\n\n# intervention type\n# inter_type = 'random'\n# inter_type = 'by_area'\n# inter_type = 'by_subdist'\n# inter_type = 'by_yeshuv'\n\n\n# intervention parameters\nlength = 5\n\n# Number of simulations\nm = 40\n\n# Set intervention parameters\n# Intervention percents\ninter_percents = [0.005, 0.01, 0.02, 0.03, 0.05]\n# inter_percents = [0.01, 0.025, 0.05]\n# inter_percents = [0.04]\n\n# Vaccination season start time: 1.8, 1.9, 1.10, 1.11\nstart_times = [61, 92, 122, 153]\n\n# Intervention times (by start time)\nall_inter_times = [61, 92, 122, 153, 183, 214] # 1.8, 1.9, 1.10, 1.11, 1.12, 1.1\nintervention_times = {61: all_inter_times, 92: all_inter_times[1:], 122: all_inter_times[2:], 153: all_inter_times[3:]}\n\n# Random intervention dict\ninterventions_dict_random = {inter_percent:\n {start_time: [{'time': time, 'percent': inter_percent, 'len': length,\n 'vacc_start': start_time, 'type': 'random'}\n for time in intervention_times[start_time]]\n for start_time in start_times}\n for inter_percent in inter_percents}\n\n# Intervention by area\n# Load nodes by area and age\nwith open(model.nodes_by_area_age_dict_path, 'rb') as pickle_in:\n nodes_by_area_age = pickle.load(pickle_in)\n\n# Load nodes by area and age\nwith open(model.nodes_by_area_age_dict_path, 'rb') as pickle_in:\n nodes_by_area_age = pickle.load(pickle_in)\n\n# Load page ranks\nwith open(model.pagerank_by_area_age_path, 'rb') as pickle_in:\n pageranks = pickle.load(pickle_in)\n\n# Sort areas and age groups by page rank (descending)\nareas_age_by_rank_with_rank = sorted(list(pageranks.items()), key=lambda x: x[1], reverse=True)\nareas_age_by_rank = list(map(lambda x: x[0], areas_age_by_rank_with_rank))\n\n# Filter irrelevant areas\nareas_age_by_rank = list(filter(lambda x: x in nodes_by_area_age, areas_age_by_rank))\nareas_age_by_rank_with_rank = list(filter(lambda x: x[0] in nodes_by_area_age, areas_age_by_rank_with_rank))\n\n# Create a list of nodes by PageRank\nnodes_by_rank = []\nfor (area, age) in areas_age_by_rank:\n nodes_by_rank += list(nodes_by_area_age[(area, age)])\n\n# Create intervention dict\ninterventions_dict_area = {inter_percent:\n {start_time: [{'time': time, 'percent': inter_percent, 'len': length, 'vacc_start': start_time, 'type': 'by_area',\n 'nodes_by_rank': nodes_by_rank} for time in intervention_times[start_time]]\n for start_time in start_times}\n for inter_percent in inter_percents}\n\n# Load page ranks by subdist\nwith open('../../Data/vaccination_data/pagerank_by_subdist_age.pickle', 'rb') as pickle_in:\n pageranks_subdist = pickle.load(pickle_in)\n\n# Sort areas and age groups by page rank (descending)\nsubdists_age_by_rank_with_rank = sorted(list(pageranks_subdist.items()), key=lambda x: x[1], reverse=True)\nsubdists_age = list(map(lambda x: x[0], subdists_age_by_rank_with_rank))\nsubdist_ranks = list(map(lambda x: x[1], subdists_age_by_rank_with_rank))\n\n# Create intervention dict\ninterventions_dict_subdist = {inter_percent:\n {start_time: [{'time': time, 'percent': inter_percent, 'len': length, 'vacc_start': start_time,\n 'type': 'by_subdist', 'subdists_age': subdists_age, 'subdist_ranks': subdist_ranks}\n for time in intervention_times[start_time]]\n for start_time in start_times}\n for inter_percent in inter_percents}\n\n# Load page ranks by yeshuv\nwith open('../../Data/vaccination_data/pagerank_by_yeshuv_age.pickle', 'rb') as pickle_in:\n pageranks_yeshuv = pickle.load(pickle_in)\n\n# Sort areas and age groups by page rank (descending)\nyeshuv_age_by_rank_with_rank = sorted(list(pageranks_yeshuv.items()), key=lambda x: x[1], reverse=True)\nyeshuv_age = list(map(lambda x: x[0], yeshuv_age_by_rank_with_rank))\nyeshuv_ranks = list(map(lambda x: x[1], yeshuv_age_by_rank_with_rank))\n\n# Create intervention dict\ninterventions_dict_yeshuv = {inter_percent:\n {start_time: [{'time': time, 'percent': inter_percent, 'len': length, 'vacc_start': start_time,\n 'type': 'by_yeshuv', 'yeshuv_age': yeshuv_age, 'yeshuv_ranks': yeshuv_ranks}\n for time in intervention_times[start_time]]\n for start_time in start_times}\n for inter_percent in inter_percents}\n\n\n# Define a function for multiprocessing\ndef intervention_mp(intervention):\n print(mp.current_process())\n\n # Run the model with current intervention\n inter_res = model.intervention_coupled_model(parameters_i, parameters_v, prep_data, data_for_fit_i, data_for_fit_v, intervention,\n intervention['vacc_start'], num_of_simulations=m)\n\n return {(intervention['percent'], intervention['vacc_start'], intervention['time']): inter_res}\n\n\ndef mp_handler(interventions_list):\n # Create a pool of processes\n pool = mp.Pool(24)\n # Process in parallel\n results = pool.map(intervention_mp, interventions_list)\n return results\n\n\nif __name__ == '__main__':\n # Intervention dicts to go over\n # inter_dicts = [interventions_dict_random, interventions_dict_subdist, interventions_dict_yeshuv,\n # interventions_dict_area]\n inter_dicts = [interventions_dict_yeshuv]\n\n # File names for save\n # file_names = [f'random_intervention_res_all_seasons_m{m}', f'subdist_intervention_res_all_seasons_m{m}',\n # f'yeshuv_intervention_res_all_seasons_m{m}', f'area_intervention_res_all_seasons_m{m}']\n file_names = [f'yeshuv_intervention_res_all_seasons_m{m}_all_per']\n\n # Go over the dicts and run all interventions\n for i, interventions_dict in enumerate(inter_dicts):\n # Unpack dictionary\n interventions_list = []\n for inter_percent in interventions_dict:\n for start_time, inter in interventions_dict[inter_percent].items():\n interventions_list.extend(inter)\n\n # Get results using mp\n res = mp_handler(interventions_list)\n\n # Saving the results\n with open(f'../../data/coupled_model/{file_names[i]}.pickle', 'wb') as pickle_out:\n pickle.dump(res, pickle_out)\n","sub_path":"influenza_modeling/I_Coupled model intervention - multiprocessing.py","file_name":"I_Coupled model intervention - multiprocessing.py","file_ext":"py","file_size_in_byte":8166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}